aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-parrot7/gpu-m400.c152
-rw-r--r--arch/arm/mach-parrot7/gpu.c96
-rw-r--r--arch/arm/mach-parrot7/gpu.h40
-rw-r--r--arch/arm/mach-parrot7/i2cm.c329
-rw-r--r--arch/arm/mach-parrot7/i2cm.h75
-rw-r--r--arch/arm/mach-parrot7/i2cs.c122
-rw-r--r--arch/arm/mach-parrot7/i2cs.h36
-rw-r--r--arch/arm/mach-parrot7/include/mach/clkdev.h30
-rw-r--r--arch/arm/mach-parrot7/include/mach/debug-macro.S162
-rw-r--r--arch/arm/mach-parrot7/include/mach/entry-macro.S104
-rw-r--r--arch/arm/mach-parrot7/include/mach/ether.h37
-rw-r--r--arch/arm/mach-parrot7/include/mach/gpio.h146
-rw-r--r--arch/arm/mach-parrot7/include/mach/gpu-p7.h28
-rw-r--r--arch/arm/mach-parrot7/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-parrot7/include/mach/irqs.h180
-rw-r--r--arch/arm/mach-parrot7/include/mach/memory.h32
-rw-r--r--arch/arm/mach-parrot7/include/mach/p7-adc.h56
-rw-r--r--arch/arm/mach-parrot7/include/mach/p7.h170
-rw-r--r--arch/arm/mach-parrot7/include/mach/pwm.h64
-rw-r--r--arch/arm/mach-parrot7/include/mach/regs-uart.h135
-rw-r--r--arch/arm/mach-parrot7/include/mach/smp.h47
-rw-r--r--arch/arm/mach-parrot7/include/mach/system.h34
-rw-r--r--arch/arm/mach-parrot7/include/mach/timex.h37
-rw-r--r--arch/arm/mach-parrot7/include/mach/uncompress.h80
-rw-r--r--arch/arm/mach-parrot7/include/mach/usb-p7.h49
-rw-r--r--arch/arm/mach-parrot7/ion.c90
-rw-r--r--arch/arm/mach-parrot7/ion.h37
-rw-r--r--arch/arm/mach-parrot7/iopin.c83
-rw-r--r--arch/arm/mach-parrot7/iopin.h104
-rw-r--r--arch/arm/mach-parrot7/lcd-monspecs.c728
-rw-r--r--arch/arm/mach-parrot7/lcd-monspecs.h108
-rw-r--r--arch/arm/mach-parrot7/mpegts.c119
-rw-r--r--arch/arm/mach-parrot7/mpegts.h42
-rw-r--r--arch/arm/mach-parrot7/mpmc.c87
-rw-r--r--arch/arm/mach-parrot7/mpmc.h21
-rw-r--r--arch/arm/mach-parrot7/nand.c94
-rw-r--r--arch/arm/mach-parrot7/nand.h37
-rw-r--r--arch/arm/mach-parrot7/p7-smc.S311
-rw-r--r--arch/arm/mach-parrot7/p7_pwm.c128
-rw-r--r--arch/arm/mach-parrot7/p7_pwm.h34
-rw-r--r--arch/arm/mach-parrot7/p7_temperature.c51
-rw-r--r--arch/arm/mach-parrot7/p7_temperature.h29
-rw-r--r--arch/arm/mach-parrot7/p7mu.c47
-rw-r--r--arch/arm/mach-parrot7/p7mu.h37
-rw-r--r--arch/arm/mach-parrot7/pinctrl.c202
-rw-r--r--arch/arm/mach-parrot7/pinctrl.h806
-rw-r--r--arch/arm/mach-parrot7/pindesc-r1.c808
-rw-r--r--arch/arm/mach-parrot7/pindesc-r2.c855
-rw-r--r--arch/arm/mach-parrot7/pindesc-r3.c855
-rw-r--r--arch/arm/mach-parrot7/pm.c190
-rw-r--r--arch/arm/mach-parrot7/readme.txt38
-rw-r--r--arch/arm/mach-parrot7/sdhci.c145
-rw-r--r--arch/arm/mach-parrot7/sdhci.h39
-rw-r--r--arch/arm/mach-parrot7/sii_platform_data.h37
-rw-r--r--arch/arm/mach-parrot7/sleep.S183
-rw-r--r--arch/arm/mach-parrot7/spi.c399
-rw-r--r--arch/arm/mach-parrot7/spi.h187
-rw-r--r--arch/arm/mach-parrot7/system.h154
-rw-r--r--arch/arm/mach-parrot7/uart.c203
-rw-r--r--drivers/iio/Kconfig81
-rw-r--r--drivers/iio/Makefile29
-rw-r--r--drivers/iio/accel/Kconfig108
-rw-r--r--drivers/iio/accel/Makefile18
-rw-r--r--drivers/iio/accel/bma180.c862
-rw-r--r--drivers/iio/accel/bmc150-accel.c1456
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c430
-rw-r--r--drivers/iio/accel/kxcjk-1013.c1429
-rw-r--r--drivers/iio/accel/kxsd9.c276
-rw-r--r--drivers/iio/accel/mma8452.c451
-rw-r--r--drivers/iio/accel/st_accel.h55
-rw-r--r--drivers/iio/accel/st_accel_buffer.c105
-rw-r--r--drivers/iio/accel/st_accel_core.c540
-rw-r--r--drivers/iio/accel/st_accel_i2c.c129
-rw-r--r--drivers/iio/accel/st_accel_spi.c77
-rw-r--r--drivers/iio/adc/Kconfig324
-rw-r--r--drivers/iio/adc/Makefile36
-rw-r--r--drivers/iio/adc/ad7266.c522
-rw-r--r--drivers/iio/adc/ad7291.c585
-rw-r--r--drivers/iio/adc/ad7298.c391
-rw-r--r--drivers/iio/adc/ad7476.c315
-rw-r--r--drivers/iio/adc/ad7791.c453
-rw-r--r--drivers/iio/adc/ad7793.c865
-rw-r--r--drivers/iio/adc/ad7887.c369
-rw-r--r--drivers/iio/adc/ad7923.c371
-rw-r--r--drivers/iio/adc/ad799x.c905
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c553
-rw-r--r--drivers/iio/adc/at91_adc.c1439
-rw-r--r--drivers/iio/adc/axp288_adc.c261
-rw-r--r--drivers/iio/adc/exynos_adc.c779
-rw-r--r--drivers/iio/adc/lp8788_adc.c254
-rw-r--r--drivers/iio/adc/max1027.c521
-rw-r--r--drivers/iio/adc/max1363.c1701
-rw-r--r--drivers/iio/adc/mcp320x.c401
-rw-r--r--drivers/iio/adc/mcp3422.c426
-rw-r--r--drivers/iio/adc/men_z188_adc.c173
-rw-r--r--drivers/iio/adc/nau7802.c582
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c595
-rw-r--r--drivers/iio/adc/rockchip_saradc.c351
-rw-r--r--drivers/iio/adc/ti-adc081c.c154
-rw-r--r--drivers/iio/adc/ti-adc128s052.c179
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c558
-rw-r--r--drivers/iio/adc/twl4030-madc.c895
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c1009
-rw-r--r--drivers/iio/adc/vf610_adc.c735
-rw-r--r--drivers/iio/adc/viperboard_adc.c157
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1336
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c248
-rw-r--r--drivers/iio/adc/xilinx-xadc.h209
-rw-r--r--drivers/iio/amplifiers/Kconfig19
-rw-r--r--drivers/iio/amplifiers/Makefile6
-rw-r--r--drivers/iio/amplifiers/ad8366.c213
-rw-r--r--drivers/iio/buffer_cb.c124
-rw-r--r--drivers/iio/common/Kconfig6
-rw-r--r--drivers/iio/common/Makefile11
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig28
-rw-r--r--drivers/iio/common/hid-sensors/Makefile7
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c397
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c139
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h27
-rw-r--r--drivers/iio/common/st_sensors/Kconfig14
-rw-r--r--drivers/iio/common/st_sensors/Makefile10
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c128
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c558
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c112
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c122
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c77
-rw-r--r--drivers/iio/dac/Kconfig184
-rw-r--r--drivers/iio/dac/Makefile22
-rw-r--r--drivers/iio/dac/ad5064.c684
-rw-r--r--drivers/iio/dac/ad5360.c562
-rw-r--r--drivers/iio/dac/ad5380.c654
-rw-r--r--drivers/iio/dac/ad5421.c536
-rw-r--r--drivers/iio/dac/ad5446.c623
-rw-r--r--drivers/iio/dac/ad5449.c369
-rw-r--r--drivers/iio/dac/ad5504.c377
-rw-r--r--drivers/iio/dac/ad5624r.h79
-rw-r--r--drivers/iio/dac/ad5624r_spi.c319
-rw-r--r--drivers/iio/dac/ad5686.c408
-rw-r--r--drivers/iio/dac/ad5755.c622
-rw-r--r--drivers/iio/dac/ad5764.c370
-rw-r--r--drivers/iio/dac/ad5791.c474
-rw-r--r--drivers/iio/dac/ad7303.c303
-rw-r--r--drivers/iio/dac/max517.c222
-rw-r--r--drivers/iio/dac/max5821.c405
-rw-r--r--drivers/iio/dac/mcp4725.c349
-rw-r--r--drivers/iio/dac/mcp4922.c216
-rw-r--r--drivers/iio/frequency/Kconfig42
-rw-r--r--drivers/iio/frequency/Makefile7
-rw-r--r--drivers/iio/frequency/ad9523.c1040
-rw-r--r--drivers/iio/frequency/adf4350.c642
-rw-r--r--drivers/iio/gyro/Kconfig112
-rw-r--r--drivers/iio/gyro/Makefile24
-rw-r--r--drivers/iio/gyro/adis16080.c241
-rw-r--r--drivers/iio/gyro/adis16130.c179
-rw-r--r--drivers/iio/gyro/adis16136.c577
-rw-r--r--drivers/iio/gyro/adis16260.c389
-rw-r--r--drivers/iio/gyro/adxrs450.c468
-rw-r--r--drivers/iio/gyro/bmg160.c1273
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c427
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c153
-rw-r--r--drivers/iio/gyro/itg3200_core.c374
-rw-r--r--drivers/iio/gyro/st_gyro.h52
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c105
-rw-r--r--drivers/iio/gyro/st_gyro_core.c389
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c114
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c74
-rw-r--r--drivers/iio/humidity/Kconfig35
-rw-r--r--drivers/iio/humidity/Makefile7
-rw-r--r--drivers/iio/humidity/dht11.c293
-rw-r--r--drivers/iio/humidity/si7005.c189
-rw-r--r--drivers/iio/humidity/si7020.c161
-rw-r--r--drivers/iio/iio_core.h73
-rw-r--r--drivers/iio/iio_core_trigger.h42
-rw-r--r--drivers/iio/imu/Kconfig42
-rw-r--r--drivers/iio/imu/Makefile16
-rw-r--r--drivers/iio/imu/adis.c440
-rw-r--r--drivers/iio/imu/adis16400.h213
-rw-r--r--drivers/iio/imu/adis16400_buffer.c90
-rw-r--r--drivers/iio/imu/adis16400_core.c939
-rw-r--r--drivers/iio/imu/adis16480.c882
-rw-r--r--drivers/iio/imu/adis_buffer.c171
-rw-r--r--drivers/iio/imu/adis_trigger.c89
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig16
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c790
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h247
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c192
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c155
-rw-r--r--drivers/iio/industrialio-buffer.c1131
-rw-r--r--drivers/iio/industrialio-core.c1284
-rw-r--r--drivers/iio/industrialio-event.c520
-rw-r--r--drivers/iio/industrialio-trigger.c583
-rw-r--r--drivers/iio/industrialio-triggered-buffer.c112
-rw-r--r--drivers/iio/inkern.c636
-rw-r--r--drivers/iio/kfifo_buf.c207
-rw-r--r--drivers/iio/light/Kconfig195
-rw-r--r--drivers/iio/light/Makefile21
-rw-r--r--drivers/iio/light/adjd_s311.c321
-rw-r--r--drivers/iio/light/al3320a.c232
-rw-r--r--drivers/iio/light/apds9300.c531
-rw-r--r--drivers/iio/light/cm32181.c371
-rw-r--r--drivers/iio/light/cm36651.c750
-rw-r--r--drivers/iio/light/gp2ap020a00f.c1654
-rw-r--r--drivers/iio/light/hid-sensor-als.c392
-rw-r--r--drivers/iio/light/hid-sensor-prox.c384
-rw-r--r--drivers/iio/light/isl29125.c347
-rw-r--r--drivers/iio/light/lm3533-als.c927
-rw-r--r--drivers/iio/light/ltr501.c445
-rw-r--r--drivers/iio/light/tcs3414.c405
-rw-r--r--drivers/iio/light/tcs3472.c379
-rw-r--r--drivers/iio/light/tsl2563.c901
-rw-r--r--drivers/iio/light/tsl4531.c261
-rw-r--r--drivers/iio/light/vcnl4000.c198
-rw-r--r--drivers/iio/magnetometer/Kconfig82
-rw-r--r--drivers/iio/magnetometer/Makefile16
-rw-r--r--drivers/iio/magnetometer/ak09911.c326
-rw-r--r--drivers/iio/magnetometer/ak8975.c601
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c541
-rw-r--r--drivers/iio/magnetometer/mag3110.c438
-rw-r--r--drivers/iio/magnetometer/st_magn.h45
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c89
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c439
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c95
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c71
-rw-r--r--drivers/iio/orientation/Kconfig31
-rw-r--r--drivers/iio/orientation/Makefile7
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c448
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c345
-rw-r--r--drivers/iio/pressure/Kconfig94
-rw-r--r--drivers/iio/pressure/Makefile16
-rw-r--r--drivers/iio/pressure/bmp280.c455
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c393
-rw-r--r--drivers/iio/pressure/mpl115.c211
-rw-r--r--drivers/iio/pressure/mpl3115.c329
-rw-r--r--drivers/iio/pressure/st_pressure.h49
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c96
-rw-r--r--drivers/iio/pressure/st_pressure_core.c497
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c94
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c70
-rw-r--r--drivers/iio/pressure/t5403.c275
-rw-r--r--drivers/iio/proximity/Kconfig19
-rw-r--r--drivers/iio/proximity/Makefile6
-rw-r--r--drivers/input/keyreset.c239
-rw-r--r--drivers/input/misc/akm8975.c829
-rw-r--r--drivers/input/misc/gpio_axis.c192
-rw-r--r--drivers/input/misc/gpio_event.c239
-rw-r--r--drivers/input/misc/gpio_input.c376
-rw-r--r--drivers/input/misc/gpio_matrix.c441
-rw-r--r--drivers/input/misc/gpio_output.c97
-rw-r--r--drivers/input/misc/keychord.c387
-rw-r--r--drivers/input/misc/mpu6050.c402
-rw-r--r--drivers/input/misc/mpu6050_accel_gyro.c384
-rw-r--r--drivers/input/misc/mpu6050_i2c.c214
-rw-r--r--drivers/input/misc/mpu6050x.h192
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c675
-rw-r--r--drivers/leds/ledtrig-sleep.c80
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_irq.h26
-rw-r--r--drivers/media/video/adv7391.c407
-rw-r--r--drivers/media/video/adv7604.c3281
-rw-r--r--drivers/media/video/ar0330.c1797
-rw-r--r--drivers/media/video/ar1820.c409
-rw-r--r--drivers/media/video/as3636.c792
-rw-r--r--drivers/media/video/galileo1.c2074
-rw-r--r--drivers/media/video/galileo1_reg.h402
-rw-r--r--drivers/media/video/galileo2.c188
-rw-r--r--drivers/media/video/mt9f002.c2901
-rw-r--r--drivers/media/video/mt9f002_regs.h548
-rw-r--r--drivers/media/video/mt9m021.c1120
-rw-r--r--drivers/media/video/mt9m021_reg.h35
-rw-r--r--drivers/media/video/mt9v117.c896
-rw-r--r--drivers/media/video/mt9v117_patches.h85
-rw-r--r--drivers/media/video/ov16825/Kconfig6
-rwxr-xr-xdrivers/media/video/ov16825/Makefile1
-rw-r--r--drivers/media/video/ov16825/ov16825.c360
-rw-r--r--drivers/media/video/ov16825/ov16825_clk.c511
-rw-r--r--drivers/media/video/ov16825/ov16825_clk.h64
-rw-r--r--drivers/media/video/ov16825/ov16825_common.c107
-rw-r--r--drivers/media/video/ov16825/ov16825_common.h75
-rw-r--r--drivers/media/video/ov16825/ov16825_confs.h1283
-rw-r--r--drivers/media/video/ov16825/ov16825_ctrl.c1118
-rw-r--r--drivers/media/video/ov16825/ov16825_ctrl.h35
-rw-r--r--drivers/media/video/ov16825/ov16825_registers.h401
-rw-r--r--drivers/media/video/ov7740.c798
-rw-r--r--drivers/media/video/tc358746a.c751
-rw-r--r--drivers/media/video/tc358746a_reg.h494
-rw-r--r--drivers/media/video/tc358764.c782
-rw-r--r--drivers/media/video/tw8834.c824
-rw-r--r--drivers/media/video/tw8836.c882
-rw-r--r--drivers/media/video/tw9990.c625
-rw-r--r--drivers/misc/akm8975.c732
-rw-r--r--drivers/misc/ti-st/gps_drv.c805
-rw-r--r--drivers/misc/ti-st/tty_hci.c543
-rw-r--r--drivers/misc/uid_stat.c166
-rw-r--r--drivers/misc/wl127x-rfkill.c121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/phy/broadcom_br.c294
-rw-r--r--drivers/net/phy/fakephy.c116
-rw-r--r--drivers/net/ppp/pppolac.c449
-rw-r--r--drivers/net/ppp/pppopns.c428
-rw-r--r--drivers/net/wireless/bcmdhd/Kconfig47
-rw-r--r--drivers/net/wireless/bcmdhd/Makefile39
-rw-r--r--drivers/net/wireless/bcmdhd/aiutils.c836
-rw-r--r--drivers/net/wireless/bcmdhd/bcmevent.c148
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh.c726
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_linux.c742
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c1432
-rw-r--r--drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c387
-rw-r--r--drivers/net/wireless/bcmdhd/bcmutils.c2093
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_channels.c936
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_channels.h345
-rw-r--r--drivers/net/wireless/bcmdhd/bcmwifi_rates.h306
-rw-r--r--drivers/net/wireless/bcmdhd/dhd.h778
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.c338
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bta.h39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_bus.h109
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cdc.c2851
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cfg80211.c594
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_cfg80211.h42
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_common.c2323
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_custom_gpio.c293
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_dbg.h110
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux.c5327
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_linux_sched.c39
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_proto.h109
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_sdio.c6939
-rw-r--r--drivers/net/wireless/bcmdhd/dhd_wlfc.h277
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_stats.h43
-rw-r--r--drivers/net/wireless/bcmdhd/dngl_wlhdr.h40
-rw-r--r--drivers/net/wireless/bcmdhd/hndpmu.c197
-rw-r--r--drivers/net/wireless/bcmdhd/include/Makefile53
-rw-r--r--drivers/net/wireless/bcmdhd/include/aidmp.h375
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_android_types.h44
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_cfg.h29
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h361
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmcdc.h126
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdefs.h239
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmdevs.h487
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmendian.h278
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmnvram.h179
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmpcispi.h181
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmperf.h36
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdbus.h131
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh.h238
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h123
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdpcm.h274
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdspi.h135
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsdstd.h248
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmspi.h40
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmspibrcm.h139
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h606
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h874
-rw-r--r--drivers/net/wireless/bcmdhd/include/bcmutils.h775
-rw-r--r--drivers/net/wireless/bcmdhd/include/dbus.h571
-rw-r--r--drivers/net/wireless/bcmdhd/include/dhdioctl.h135
-rw-r--r--drivers/net/wireless/bcmdhd/include/epivers.h48
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndpmu.h36
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h88
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndrte_cons.h67
-rw-r--r--drivers/net/wireless/bcmdhd/include/hndsoc.h235
-rw-r--r--drivers/net/wireless/bcmdhd/include/linux_osl.h422
-rw-r--r--drivers/net/wireless/bcmdhd/include/linuxver.h614
-rw-r--r--drivers/net/wireless/bcmdhd/include/miniopt.h77
-rw-r--r--drivers/net/wireless/bcmdhd/include/msgtrace.h74
-rw-r--r--drivers/net/wireless/bcmdhd/include/osl.h88
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_end.h53
-rw-r--r--drivers/net/wireless/bcmdhd/include/packed_section_start.h60
-rw-r--r--drivers/net/wireless/bcmdhd/include/pcicfg.h90
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11.h2244
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h45
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.11e.h131
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/802.1d.h48
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmeth.h82
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmevent.h328
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmip.h210
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h104
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h441
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/eapol.h193
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/ethernet.h162
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/p2p.h564
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/sdspi.h75
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/vlan.h69
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/wpa.h203
-rw-r--r--drivers/net/wireless/bcmdhd/include/proto/wps.h379
-rw-r--r--drivers/net/wireless/bcmdhd/include/rwl_wifi.h96
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbchipc.h2235
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbconfig.h275
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbhnddma.h370
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbpcmcia.h108
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdio.h187
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h293
-rw-r--r--drivers/net/wireless/bcmdhd/include/sbsocram.h193
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdio.h617
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdioh.h441
-rw-r--r--drivers/net/wireless/bcmdhd/include/sdiovar.h58
-rw-r--r--drivers/net/wireless/bcmdhd/include/siutils.h313
-rw-r--r--drivers/net/wireless/bcmdhd/include/spid.h153
-rw-r--r--drivers/net/wireless/bcmdhd/include/trxhdr.h53
-rw-r--r--drivers/net/wireless/bcmdhd/include/typedefs.h310
-rw-r--r--drivers/net/wireless/bcmdhd/include/usbrdl.h203
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h117
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlfc_proto.h225
-rw-r--r--drivers/net/wireless/bcmdhd/include/wlioctl.h5002
-rw-r--r--drivers/net/wireless/bcmdhd/linux_osl.c1053
-rw-r--r--drivers/net/wireless/bcmdhd/sbutils.c1001
-rw-r--r--drivers/net/wireless/bcmdhd/siutils.c2356
-rw-r--r--drivers/net/wireless/bcmdhd/siutils_priv.h246
-rw-r--r--drivers/net/wireless/bcmdhd/uamp_api.h176
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.c845
-rw-r--r--drivers/net/wireless/bcmdhd/wl_android.h57
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.c7302
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfg80211.h658
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.c1967
-rw-r--r--drivers/net/wireless/bcmdhd/wl_cfgp2p.h284
-rw-r--r--drivers/net/wireless/bcmdhd/wl_dbg.h63
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.c3737
-rw-r--r--drivers/net/wireless/bcmdhd/wl_iw.h161
-rw-r--r--drivers/net/wireless/bcmdhd/wl_linux_mon.c422
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.c368
-rw-r--r--drivers/net/wireless/bcmdhd/wldev_common.h110
-rw-r--r--drivers/parrot/Kconfig30
-rw-r--r--drivers/parrot/Makefile32
-rw-r--r--drivers/parrot/block/Kconfig11
-rw-r--r--drivers/parrot/block/Makefile1
-rw-r--r--drivers/parrot/block/phy-brd.c209
-rw-r--r--drivers/parrot/block/phy-brd.h22
-rw-r--r--drivers/parrot/clk/Makefile6
-rw-r--r--drivers/parrot/clk/clk-avipll.c272
-rw-r--r--drivers/parrot/clk/clk-gbc.c285
-rw-r--r--drivers/parrot/clk/clk-pll.c321
-rw-r--r--drivers/parrot/clk/clk-sd.c120
-rw-r--r--drivers/parrot/clk/clk-xin.c158
-rw-r--r--drivers/parrot/clk/clock.c98
-rw-r--r--drivers/parrot/clk/clock.h38
-rw-r--r--drivers/parrot/clk/p7clk-provider.h146
-rw-r--r--drivers/parrot/cpufreq/Kconfig4
-rw-r--r--drivers/parrot/cpufreq/Makefile1
-rw-r--r--drivers/parrot/cpufreq/cpufreq-p7.c179
-rw-r--r--drivers/parrot/crypto/Kconfig11
-rw-r--r--drivers/parrot/crypto/Makefile6
-rw-r--r--drivers/parrot/crypto/p7-crypto.c1427
-rw-r--r--drivers/parrot/crypto/p7-crypto_regs.h70
-rw-r--r--drivers/parrot/dma/Kconfig7
-rw-r--r--drivers/parrot/dummy.c1
-rw-r--r--drivers/parrot/gator/Kconfig39
-rw-r--r--drivers/parrot/gator/LICENSE339
-rw-r--r--drivers/parrot/gator/Makefile78
-rw-r--r--drivers/parrot/gator/gator.h152
-rw-r--r--drivers/parrot/gator/gator_annotate.c189
-rw-r--r--drivers/parrot/gator/gator_annotate_kernel.c200
-rw-r--r--drivers/parrot/gator/gator_backtrace.c208
-rw-r--r--drivers/parrot/gator/gator_buffer.c171
-rw-r--r--drivers/parrot/gator/gator_buffer_write.c83
-rw-r--r--drivers/parrot/gator/gator_cookies.c446
-rw-r--r--drivers/parrot/gator/gator_events_armv6.c234
-rw-r--r--drivers/parrot/gator/gator_events_armv7.c314
-rw-r--r--drivers/parrot/gator/gator_events_block.c160
-rw-r--r--drivers/parrot/gator/gator_events_irq.c163
-rw-r--r--drivers/parrot/gator/gator_events_l2c-310.c208
-rw-r--r--drivers/parrot/gator/gator_events_mali_4xx.c622
-rw-r--r--drivers/parrot/gator/gator_events_mali_4xx.h18
-rw-r--r--drivers/parrot/gator/gator_events_mali_common.c72
-rw-r--r--drivers/parrot/gator/gator_events_mali_common.h77
-rw-r--r--drivers/parrot/gator/gator_events_mali_midgard.c562
-rw-r--r--drivers/parrot/gator/gator_events_mali_midgard_hw.c974
-rw-r--r--drivers/parrot/gator/gator_events_mali_midgard_hw_test.c55
-rw-r--r--drivers/parrot/gator/gator_events_meminfo.c470
-rw-r--r--drivers/parrot/gator/gator_events_mmapped.c209
-rw-r--r--drivers/parrot/gator/gator_events_net.c172
-rw-r--r--drivers/parrot/gator/gator_events_perf_pmu.c574
-rw-r--r--drivers/parrot/gator/gator_events_sched.c113
-rw-r--r--drivers/parrot/gator/gator_events_scorpion.c674
-rw-r--r--drivers/parrot/gator/gator_fs.c370
-rw-r--r--drivers/parrot/gator/gator_hrtimer_gator.c80
-rw-r--r--drivers/parrot/gator/gator_iks.c197
-rw-r--r--drivers/parrot/gator/gator_main.c1491
-rw-r--r--drivers/parrot/gator/gator_marshaling.c371
-rw-r--r--drivers/parrot/gator/gator_trace_gpu.c321
-rw-r--r--drivers/parrot/gator/gator_trace_power.c192
-rw-r--r--drivers/parrot/gator/gator_trace_sched.c321
-rw-r--r--drivers/parrot/gator/mali/mali_kbase_gator_api.h219
-rw-r--r--drivers/parrot/gator/mali/mali_mjollnir_profiling_gator_api.h159
-rw-r--r--drivers/parrot/gator/mali/mali_utgard_profiling_gator_api.h197
-rw-r--r--drivers/parrot/gator/mali_midgard.mk39
-rw-r--r--drivers/parrot/gpio/Kconfig20
-rw-r--r--drivers/parrot/gpio/Makefile6
-rw-r--r--drivers/parrot/gpio/p7-gpio.c1411
-rw-r--r--drivers/parrot/gpio/p7-gpio.h37
-rw-r--r--drivers/parrot/gpio/p7mu-gpio.c301
-rw-r--r--drivers/parrot/gpu/Kconfig29
-rw-r--r--drivers/parrot/gpu/Makefile18
-rw-r--r--drivers/parrot/gpu/ion/Kconfig7
-rw-r--r--drivers/parrot/gpu/ion/Makefile2
-rw-r--r--drivers/parrot/gpu/ion/p7-ion.c115
-rw-r--r--drivers/parrot/gpu/ion/p7-ion.h15
-rw-r--r--drivers/parrot/gpu/mali200/Kbuild317
-rw-r--r--drivers/parrot/gpu/mali200/Kbuild.original288
-rw-r--r--drivers/parrot/gpu/mali200/Kconfig31
-rw-r--r--drivers/parrot/gpu/mali200/Makefile89
-rw-r--r--drivers/parrot/gpu/mali200/__malidrv_build_info.c1
-rw-r--r--drivers/parrot/gpu/mali200/arch-pb-virtex5-m200/config.h75
-rw-r--r--drivers/parrot/gpu/mali200/arch-pb-virtex5-m400-1-pmu/config.h85
-rw-r--r--drivers/parrot/gpu/mali200/arch-ve-virtex6-m450-8/config.h168
-rw-r--r--drivers/parrot/gpu/mali200/arch/config.h12
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_block_allocator.c391
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_block_allocator.h18
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_cluster.c218
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_cluster.h44
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_device_pause_resume.c46
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_device_pause_resume.h31
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_dlbu.c285
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_dlbu.h45
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp.c693
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp.h46
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp_job.c49
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp_job.h131
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp_scheduler.c438
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_gp_scheduler.h30
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_group.c841
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_group.h146
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_hw_core.c46
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_hw_core.h71
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_common.h183
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_core.c1185
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_core.h38
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.c184
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.h101
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.c346
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.c375
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.h151
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_utilization.c218
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_utilization.h44
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_kernel_vsync.c51
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_l2_cache.c398
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_l2_cache.h43
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mem_validation.c71
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mem_validation.h19
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_memory.c1321
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_memory.h80
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mmu.c619
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mmu.h55
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.c459
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.h83
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_osk.h1800
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_osk_bitops.h166
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_osk_list.h184
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_osk_mali.h222
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_osk_profiling.h147
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pm.c547
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pm.h56
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pmu.c199
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pmu.h70
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp.c710
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp.h47
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp_job.c92
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp_job.h255
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp_scheduler.c542
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_pp_scheduler.h38
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_scheduler.c37
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_scheduler.h21
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_session.c47
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_session.h65
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_ukk.h661
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_user_settings_db.c88
-rw-r--r--drivers/parrot/gpu/mali200/common/mali_user_settings_db.h40
-rw-r--r--drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard.h28
-rw-r--r--drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_counters.h264
-rw-r--r--drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_ioctl.h80
-rw-r--r--drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_profiling_events.h121
-rw-r--r--drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_uk_types.h1133
-rw-r--r--drivers/parrot/gpu/mali200/linux/license/gpl/mali_kernel_license.h31
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_linux.c523
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_linux.h43
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_pm.c265
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_pm.h17
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.c1281
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.h30
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_linux_pm.h50
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_linux_pm_testsuite.h32
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_linux_trace.h126
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_atomics.c55
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.c86
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.h48
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_irq.c266
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_locks.c336
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_low_level_mem.c590
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_mali.c38
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_math.c22
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_memory.c61
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_misc.c64
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_notification.c189
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_pm.c83
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_profiling_gator.c261
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_profiling_internal.c308
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_specific.h130
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_time.c51
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_timers.c65
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_osk_wait_queue.c73
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_pmu_power_up_down.c65
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_profiling_events.h17
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_uk_types.h17
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_core.c162
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_gp.c113
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_mem.c259
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_pp.c88
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_profiling.c183
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_vsync.c41
-rw-r--r--drivers/parrot/gpu/mali200/linux/mali_ukk_wrappers.h70
-rw-r--r--drivers/parrot/gpu/mali200/platform/default/mali_platform.c43
-rw-r--r--drivers/parrot/gpu/mali200/platform/mali_platform.h80
-rw-r--r--drivers/parrot/gpu/mali200/platform/mali_platform_pmu_testing/mali_platform.c66
-rw-r--r--drivers/parrot/gpu/mali200/readme.txt28
-rw-r--r--drivers/parrot/gpu/mali200/regs/mali_200_regs.h172
-rw-r--r--drivers/parrot/gpu/mali200/regs/mali_gp_regs.h214
-rw-r--r--drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.h26
-rwxr-xr-xdrivers/parrot/gpu/mali400/Kbuild209
-rwxr-xr-xdrivers/parrot/gpu/mali400/Kconfig94
-rwxr-xr-xdrivers/parrot/gpu/mali400/Makefile170
-rw-r--r--drivers/parrot/gpu/mali400/__malidrv_build_info.c1
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_broadcast.c132
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_broadcast.h52
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_dlbu.c209
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_dlbu.h46
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_dma.c202
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_dma.h190
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp.c355
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp.h93
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp_job.c131
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp_job.h186
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp_scheduler.c703
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_gp_scheduler.h101
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_group.c2048
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_group.h322
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_hw_core.c45
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_hw_core.h100
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_common.h181
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_core.c1455
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_core.h56
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.c173
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_utilization.c440
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_utilization.h65
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_kernel_vsync.c49
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_l2_cache.c584
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_l2_cache.h89
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mem_validation.c65
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mem_validation.h19
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mmu.c430
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mmu.h114
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.c437
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.h107
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk.h1346
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk_bitops.h162
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk_list.h273
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk_mali.h66
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk_profiling.h141
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_osk_types.h455
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pm.c122
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pm.h28
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pm_domain.c241
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pm_domain.h74
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pmu.c406
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pmu.h134
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp.c561
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp.h139
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp_job.c278
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp_job.h393
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp_scheduler.c2127
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_pp_scheduler.h130
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_scheduler.c112
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_scheduler.h90
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_scheduler_types.h34
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_session.c81
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_session.h94
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_soft_job.c464
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_soft_job.h196
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.c77
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.h70
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline.c1376
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline.h496
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.c198
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.h67
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.c158
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.h51
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_ukk.h621
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_user_settings_db.c147
-rw-r--r--drivers/parrot/gpu/mali400/common/mali_user_settings_db.h39
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard.h418
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_counters.h261
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_ioctl.h96
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_events.h174
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h197
-rw-r--r--drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_uk_types.h1138
-rwxr-xr-xdrivers/parrot/gpu/mali400/linux/license/gpl/mali_kernel_license.h30
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_device_pause_resume.c38
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_kernel_linux.c816
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_kernel_linux.h28
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.c1390
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.h29
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_linux_trace.h162
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory.c353
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory.h134
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.c319
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.h29
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.c434
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.h40
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_external.c129
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.c578
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.h47
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_types.h100
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_memory_ump.c215
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_atomics.c60
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_irq.c200
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_locks.c281
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_locks.h326
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_low_level_mem.c137
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_mali.c118
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_math.c27
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_memory.c61
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_misc.c74
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_notification.c182
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_pm.c109
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_profiling.c320
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_specific.h92
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_time.c51
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_timers.c76
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_wait_queue.c78
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_osk_wq.c240
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_pmu_power_up_down.c71
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_profiling_events.h17
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_profiling_gator_api.h17
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_profiling_internal.c274
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_profiling_internal.h35
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_sync.c310
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_sync.h118
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_uk_types.h17
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_core.c113
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_gp.c91
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_mem.c237
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_pp.c105
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_profiling.c190
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_soft_job.c86
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_timeline.c88
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_vsync.c39
-rw-r--r--drivers/parrot/gpu/mali400/linux/mali_ukk_wrappers.h76
-rw-r--r--drivers/parrot/gpu/mali400/platform/arm/arm.c228
-rw-r--r--drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.c122
-rw-r--r--drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.h44
-rwxr-xr-xdrivers/parrot/gpu/mali400/readme.txt24
-rw-r--r--drivers/parrot/gpu/mali400/regs/mali_200_regs.h130
-rw-r--r--drivers/parrot/gpu/mali400/regs/mali_gp_regs.h172
-rw-r--r--drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.h26
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/Kbuild199
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/Kconfig57
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/Makefile138
-rw-r--r--drivers/parrot/gpu/mali400_legacy/__malidrv_build_info.c1
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.c392
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.h18
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_broadcast.c129
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_broadcast.h52
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_dlbu.c217
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_dlbu.h46
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp.c364
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp.h96
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp_job.c116
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp_job.h152
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.c593
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.h47
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_group.c2046
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_group.h283
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_hw_core.c47
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_hw_core.h104
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_common.h175
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.c1309
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.h51
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.c184
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.h101
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.c345
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.h37
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.c375
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.h152
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.c397
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.h65
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_kernel_vsync.c51
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.c463
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.h75
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.c71
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.h19
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_memory.c1295
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_memory.h86
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mmu.c452
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mmu.h130
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.c485
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.h100
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_osk.h1811
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_osk_bitops.h166
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_osk_list.h183
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_osk_mali.h269
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_osk_profiling.h141
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pm.c128
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pm.h28
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.c240
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.h73
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pmu.c405
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pmu.h113
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp.c521
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp.h127
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp_job.c169
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp_job.h309
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.c1974
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.h84
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_scheduler.c37
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_scheduler.h47
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_session.c51
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_session.h73
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_ukk.h633
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.c95
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.h40
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard.h390
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_counters.h264
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_ioctl.h91
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_events.h172
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_gator_api.h202
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_uk_types.h1175
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/license/gpl/mali_kernel_license.h31
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_device_pause_resume.c35
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.c480
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.h40
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.c725
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.h37
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.c1707
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.h30
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_linux_trace.h126
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_atomics.c55
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_irq.c139
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_locks.c303
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_low_level_mem.c723
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_mali.c140
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_math.c22
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_memory.c61
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_misc.c64
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_notification.c191
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_pm.c110
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_profiling.c287
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_specific.h37
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_time.c51
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_timers.c77
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_wait_queue.c73
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_osk_wq.c124
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_pmu_power_up_down.c75
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_profiling_events.h17
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_profiling_gator_api.h17
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.c300
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.h36
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_sync.c273
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_sync.h102
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_sync_user.c194
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_uk_types.h17
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_core.c183
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_gp.c88
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_mem.c303
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_pp.c95
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_profiling.c183
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_vsync.c41
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/linux/mali_ukk_wrappers.h75
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/platform/arm/arm.c218
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.c129
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.h42
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/readme.txt24
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/regs/mali_200_regs.h128
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/regs/mali_gp_regs.h173
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.c13
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.h48
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.c13
-rwxr-xr-xdrivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/parrot/gpu/p7gpu-pm.c169
-rw-r--r--drivers/parrot/gpu/p7gpu-pm.h32
-rw-r--r--drivers/parrot/gpu/p7ump.c279
-rw-r--r--drivers/parrot/gpu/p7ump.h44
-rwxr-xr-xdrivers/parrot/gpu/ump/Kbuild102
-rwxr-xr-xdrivers/parrot/gpu/ump/Kbuild.original94
-rwxr-xr-xdrivers/parrot/gpu/ump/Kconfig16
-rwxr-xr-xdrivers/parrot/gpu/ump/Makefile67
-rwxr-xr-xdrivers/parrot/gpu/ump/Makefile.common20
-rw-r--r--drivers/parrot/gpu/ump/arch-default/config.h24
-rw-r--r--drivers/parrot/gpu/ump/arch-pb-virtex5/config.h18
-rw-r--r--drivers/parrot/gpu/ump/arch/config.h8
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_api.c455
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_common.c360
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_common.h125
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.c155
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.h89
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_memory_backend.h48
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_ref_drv.c181
-rw-r--r--drivers/parrot/gpu/ump/common/ump_kernel_types.h51
-rw-r--r--drivers/parrot/gpu/ump/common/ump_osk.h48
-rw-r--r--drivers/parrot/gpu/ump/common/ump_uk_types.h193
-rw-r--r--drivers/parrot/gpu/ump/common/ump_ukk.h60
-rwxr-xr-xdrivers/parrot/gpu/ump/include/ump/ump_kernel_interface.h235
-rwxr-xr-xdrivers/parrot/gpu/ump/include/ump/ump_kernel_interface_ref_drv.h31
-rwxr-xr-xdrivers/parrot/gpu/ump/include/ump/ump_kernel_platform.h48
-rwxr-xr-xdrivers/parrot/gpu/ump/linux/license/gpl/ump_kernel_license.h30
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_ioctl.h53
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_linux.c447
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_linux.h18
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.c271
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.h23
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.c235
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.h23
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.c208
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.h84
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_memory_backend.c65
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_osk_atomics.c27
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_osk_low_level_mem.c314
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_osk_misc.c36
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.c71
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.h34
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.c280
-rw-r--r--drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.h46
l---------drivers/parrot/gpu/ump/mali_files1
-rwxr-xr-xdrivers/parrot/gpu/ump/readme.txt28
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/Kbuild102
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/Kconfig16
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/Makefile67
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/Makefile.common20
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/arch-pb-virtex5/config.h18
-rw-r--r--drivers/parrot/gpu/ump_legacy/arch/config.h10
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_api.c530
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_common.c398
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_common.h128
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.c166
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.h91
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_memory_backend.h50
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_ref_drv.c197
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_kernel_types.h53
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_osk.h49
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_uk_types.h194
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/common/ump_ukk.h61
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface.h236
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface_ref_drv.h31
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_platform.h48
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/license/gpl/ump_kernel_license.h31
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_ioctl.h54
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.c463
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.h18
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.c285
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.h23
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.c255
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.h23
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_memory_backend.c70
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_osk_atomics.c27
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_osk_low_level_mem.c348
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_osk_misc.c37
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.c76
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.h35
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.c306
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.h47
-rwxr-xr-xdrivers/parrot/gpu/ump_legacy/readme.txt28
-rw-r--r--drivers/parrot/i2c/Kconfig49
-rw-r--r--drivers/parrot/i2c/Makefile9
-rw-r--r--drivers/parrot/i2c/muxes/Kconfig7
-rw-r--r--drivers/parrot/i2c/muxes/Makefile1
-rw-r--r--drivers/parrot/i2c/muxes/p7-i2cmux.c211
-rw-r--r--drivers/parrot/i2c/muxes/p7-i2cmux.h19
-rw-r--r--drivers/parrot/i2c/p7-i2cm.h41
-rw-r--r--drivers/parrot/i2c/p7-i2cm_debugfs.c350
-rw-r--r--drivers/parrot/i2c/p7-i2cm_debugfs.h19
-rw-r--r--drivers/parrot/i2c/p7-i2cm_legacy.c1409
-rw-r--r--drivers/parrot/i2c/p7-i2cm_regs.h123
-rw-r--r--drivers/parrot/i2c/p7-i2cs_regs.h43
-rw-r--r--drivers/parrot/i2c/plds_i2cs.c1222
-rw-r--r--drivers/parrot/i2c/plds_i2cs.h25
-rw-r--r--drivers/parrot/i2c/smsc-82514-usb-hub.c321
-rw-r--r--drivers/parrot/i2c/smsc-82514-usb-hub.h39
-rw-r--r--drivers/parrot/i2c/ti_ub925_lvds.c412
-rw-r--r--drivers/parrot/i2c/ti_ub925_lvds.h56
-rw-r--r--drivers/parrot/iio/Kconfig15
-rw-r--r--drivers/parrot/iio/Makefile10
-rw-r--r--drivers/parrot/iio/adc/Kconfig53
-rw-r--r--drivers/parrot/iio/adc/Makefile17
-rw-r--r--drivers/parrot/iio/adc/p7-adc_core.c128
-rw-r--r--drivers/parrot/iio/adc/p7-adc_core.h48
-rw-r--r--drivers/parrot/iio/adc/p7-adc_regs.h36
-rw-r--r--drivers/parrot/iio/adc/p7mu-adc.h109
-rw-r--r--drivers/parrot/iio/adc/p7mu-adc_core.c686
-rw-r--r--drivers/parrot/iio/adc/p7mu-adc_ring.c292
-rw-r--r--drivers/parrot/iio/adc/p7temp.c484
-rw-r--r--drivers/parrot/iio/adc/p7temp_hw08.c557
-rw-r--r--drivers/parrot/iio/bldc/Kconfig20
-rw-r--r--drivers/parrot/iio/bldc/Makefile9
-rw-r--r--drivers/parrot/iio/bldc/bldc_cypress_core.c434
-rw-r--r--drivers/parrot/iio/bldc/bldc_cypress_i2c.c100
-rw-r--r--drivers/parrot/iio/bldc/bldc_cypress_ring.c115
-rw-r--r--drivers/parrot/iio/bldc/bldc_i2c.c55
-rw-r--r--drivers/parrot/iio/imu/Kconfig10
-rw-r--r--drivers/parrot/iio/imu/Makefile8
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/Kconfig16
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/Makefile7
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_core.c1290
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_iio.h345
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_ring.c309
-rw-r--r--drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_trigger.c231
-rw-r--r--drivers/parrot/iio/magnetometer/Kconfig27
-rw-r--r--drivers/parrot/iio/magnetometer/Makefile8
-rw-r--r--drivers/parrot/iio/magnetometer/ak8975.c874
-rw-r--r--drivers/parrot/iio/magnetometer/ak8975_regs.h130
-rw-r--r--drivers/parrot/iio/platform_data/ak8975.h54
-rw-r--r--drivers/parrot/iio/platform_data/bldc_cypress.h18
-rw-r--r--drivers/parrot/iio/platform_data/invensense_mpu6050.h41
-rw-r--r--drivers/parrot/iio/pressure/Kconfig25
-rw-r--r--drivers/parrot/iio/pressure/Makefile6
-rw-r--r--drivers/parrot/iio/pressure/ms5607.c905
-rw-r--r--drivers/parrot/iio/trigger/Kconfig19
-rw-r--r--drivers/parrot/iio/trigger/Makefile6
-rw-r--r--drivers/parrot/iio/trigger/iio-trig-hrtimer.c453
-rw-r--r--drivers/parrot/input/Kconfig3
-rw-r--r--drivers/parrot/input/Makefile2
-rw-r--r--drivers/parrot/input/touchscreen/Kconfig21
-rw-r--r--drivers/parrot/input/touchscreen/Makefile2
-rw-r--r--drivers/parrot/input/touchscreen/antec_ts.c693
-rw-r--r--drivers/parrot/input/touchscreen/atmel_mxt_ts.c3373
-rw-r--r--drivers/parrot/input/touchscreen/atmel_mxt_ts.h30
-rw-r--r--drivers/parrot/input/virtual_wakeup_button.c105
-rw-r--r--drivers/parrot/lttng/Kconfig4
-rw-r--r--drivers/parrot/lttng/Makefile1
-rw-r--r--drivers/parrot/lttng/lttng_debugfs.c141
-rw-r--r--drivers/parrot/media/Kconfig14
-rw-r--r--drivers/parrot/media/Makefile6
-rw-r--r--drivers/parrot/media/hx280enc.c528
-rw-r--r--drivers/parrot/media/hx280enc.h87
-rw-r--r--drivers/parrot/media/memalloc.c180
-rw-r--r--drivers/parrot/media/memalloc.h9
-rw-r--r--drivers/parrot/media/p7-mpegts.c937
-rw-r--r--drivers/parrot/media/p7-mpegts.h36
-rw-r--r--drivers/parrot/media/p7-mpegts_ioctl.h53
-rw-r--r--drivers/parrot/media/p7-mpegts_priv.h87
-rw-r--r--drivers/parrot/media/p7-mpegts_regs.h45
-rw-r--r--drivers/parrot/media/video/Kconfig150
-rw-r--r--drivers/parrot/media/video/Makefile33
-rw-r--r--drivers/parrot/media/video/avi_capture.c1032
-rw-r--r--drivers/parrot/media/video/avi_capture.h151
-rw-r--r--drivers/parrot/media/video/avi_capture_test.c145
-rw-r--r--drivers/parrot/media/video/avi_capture_timings.h14
-rw-r--r--drivers/parrot/media/video/avi_m2m.c2202
-rw-r--r--drivers/parrot/media/video/avi_m2m.h34
-rw-r--r--drivers/parrot/media/video/avi_multicapture.c947
-rw-r--r--drivers/parrot/media/video/avi_multicapture.h72
-rw-r--r--drivers/parrot/media/video/avi_r2r.h256
-rw-r--r--drivers/parrot/media/video/avi_r2r_core.c2351
-rw-r--r--drivers/parrot/media/video/avi_r2r_core.h317
-rw-r--r--drivers/parrot/media/video/avi_r2r_debugfs.c691
-rw-r--r--drivers/parrot/media/video/avi_r2r_test_multi.c3191
-rw-r--r--drivers/parrot/media/video/avi_stats.c618
-rw-r--r--drivers/parrot/media/video/avi_stats.h106
-rw-r--r--drivers/parrot/media/video/avi_v4l2.c584
-rw-r--r--drivers/parrot/media/video/avi_v4l2.h143
-rw-r--r--drivers/parrot/media/video/avi_v4l2_dev.c192
-rw-r--r--drivers/parrot/media/video/avi_v4l2_dev.h19
-rw-r--r--drivers/parrot/media/video/avi_v4l2_isp.c1286
-rw-r--r--drivers/parrot/media/video/avi_v4l2_isp.h80
-rw-r--r--drivers/parrot/media/video/avi_v4l2_isp_ctrls.h649
-rw-r--r--drivers/parrot/media/video/avi_voc.c1582
-rw-r--r--drivers/parrot/media/video/avi_voc.h19
-rw-r--r--drivers/parrot/media/video/avi_voc_core.h145
-rw-r--r--drivers/parrot/media/video/avi_voc_ctrl.c191
-rw-r--r--drivers/parrot/media/video/avi_voc_ctrl.h35
-rw-r--r--drivers/parrot/media/video/avicam.h44
-rw-r--r--drivers/parrot/media/video/avicam_dummy_dev.c210
-rw-r--r--drivers/parrot/media/video/avicam_stats.c348
-rw-r--r--drivers/parrot/media/video/avicam_v4l2.c2023
-rw-r--r--drivers/parrot/media/video/avicam_v4l2.h143
-rw-r--r--drivers/parrot/media/video/lepton.c1076
-rw-r--r--drivers/parrot/media/video/lepton.h18
-rw-r--r--drivers/parrot/media/video/lepton_dev.c886
-rw-r--r--drivers/parrot/mfd/Kconfig33
-rw-r--r--drivers/parrot/mfd/Makefile4
-rw-r--r--drivers/parrot/mfd/dib0700_mfd.c2101
-rw-r--r--drivers/parrot/mfd/dib07x0.h65
-rw-r--r--drivers/parrot/mfd/p7mu-clk.c439
-rw-r--r--drivers/parrot/mfd/p7mu-clk.h50
-rw-r--r--drivers/parrot/mfd/p7mu-core.c1174
-rw-r--r--drivers/parrot/mfd/p7mu-pin.c239
-rw-r--r--drivers/parrot/mfd/p7mu-pin.h45
-rw-r--r--drivers/parrot/mfd/p7mu.h111
-rw-r--r--drivers/parrot/mfd/sicilia-irradiance.c132
-rw-r--r--drivers/parrot/mmc/Kconfig13
-rw-r--r--drivers/parrot/mmc/Makefile2
-rw-r--r--drivers/parrot/mmc/acs3-sdhci.c920
-rw-r--r--drivers/parrot/mmc/acs3-sdhci.h113
-rw-r--r--drivers/parrot/nand/Kconfig6
-rw-r--r--drivers/parrot/nand/Makefile5
-rw-r--r--drivers/parrot/nand/cast-base.c2173
-rw-r--r--drivers/parrot/nand/cast-nand.h36
-rw-r--r--drivers/parrot/nand/cast-regs.h219
-rw-r--r--drivers/parrot/nand/cast-timings.c105
-rw-r--r--drivers/parrot/nand/cast-timings.h67
-rw-r--r--drivers/parrot/net/Kconfig10
-rw-r--r--drivers/parrot/net/Makefile1
-rw-r--r--drivers/parrot/net/ksz8851_hw.h487
-rw-r--r--drivers/parrot/net/ksz8851snl.c1165
-rw-r--r--drivers/parrot/pinctrl/Kconfig7
-rw-r--r--drivers/parrot/pinctrl/Makefile2
-rw-r--r--drivers/parrot/pinctrl/p7-pinctrl.c763
-rw-r--r--drivers/parrot/pinctrl/p7-pinctrl.h204
-rw-r--r--drivers/parrot/power/Kconfig8
-rw-r--r--drivers/parrot/power/Makefile1
-rw-r--r--drivers/parrot/power/fake_battery.c284
-rw-r--r--drivers/parrot/pwm/Kconfig29
-rw-r--r--drivers/parrot/pwm/Makefile9
-rw-r--r--drivers/parrot/pwm/p7_pwm.c690
-rw-r--r--drivers/parrot/pwm/p7_pwm.h44
-rw-r--r--drivers/parrot/pwm/p7mu-pwm.c441
-rw-r--r--drivers/parrot/regulator/Kconfig7
-rw-r--r--drivers/parrot/regulator/Makefile2
-rw-r--r--drivers/parrot/regulator/switch-voltage-regulator.c288
-rw-r--r--drivers/parrot/regulator/switch-voltage-regulator.h75
-rw-r--r--drivers/parrot/rtc/Kconfig10
-rw-r--r--drivers/parrot/rtc/Makefile3
-rw-r--r--drivers/parrot/rtc/rtc-p7mu.c281
-rw-r--r--drivers/parrot/serial/Kconfig25
-rw-r--r--drivers/parrot/serial/Makefile2
-rw-r--r--drivers/parrot/serial/px-uart.c848
-rw-r--r--drivers/parrot/serial/px-uart.h12
-rw-r--r--drivers/parrot/serial/zprint.c185
-rw-r--r--drivers/parrot/serial/zprint.h24
-rw-r--r--drivers/parrot/sound/p7_aai/Kconfig19
-rw-r--r--drivers/parrot/sound/p7_aai/Makefile17
-rw-r--r--drivers/parrot/sound/p7_aai/aai.h236
-rw-r--r--drivers/parrot/sound/p7_aai/aai_alsa.c163
-rw-r--r--drivers/parrot/sound/p7_aai/aai_alsa.h9
-rw-r--r--drivers/parrot/sound/p7_aai/aai_clk.c113
-rw-r--r--drivers/parrot/sound/p7_aai/aai_clk.h10
-rw-r--r--drivers/parrot/sound/p7_aai/aai_group.c182
-rw-r--r--drivers/parrot/sound/p7_aai/aai_group.h11
-rw-r--r--drivers/parrot/sound/p7_aai/aai_hw.c626
-rw-r--r--drivers/parrot/sound/p7_aai/aai_hw.h273
-rw-r--r--drivers/parrot/sound/p7_aai/aai_hw_chan.h559
-rw-r--r--drivers/parrot/sound/p7_aai/aai_hw_struct.h256
-rw-r--r--drivers/parrot/sound/p7_aai/aai_ioctl.c549
-rw-r--r--drivers/parrot/sound/p7_aai/aai_ioctl.h89
-rw-r--r--drivers/parrot/sound/p7_aai/aai_irq.c199
-rw-r--r--drivers/parrot/sound/p7_aai/aai_module.c332
-rw-r--r--drivers/parrot/sound/p7_aai/aai_ops.c322
-rw-r--r--drivers/parrot/sound/p7_aai/aai_platform_data.h69
-rw-r--r--drivers/parrot/sound/p7_aai/aai_regs.c827
-rw-r--r--drivers/parrot/sound/p7_aai/aai_regs.h33
-rw-r--r--drivers/parrot/sound/p7_aai/aai_rules.c958
-rw-r--r--drivers/parrot/sound/p7_aai/aai_rules.h106
-rw-r--r--drivers/parrot/sound/p7_aai/reg_aai.h693
-rw-r--r--drivers/parrot/sound/p7_aai/reg_aai_gbc.h111
-rw-r--r--drivers/parrot/spi/Kconfig51
-rw-r--r--drivers/parrot/spi/Makefile13
-rw-r--r--drivers/parrot/spi/p7-spi.c1522
-rw-r--r--drivers/parrot/spi/p7-spi.h139
-rw-r--r--drivers/parrot/spi/p7-spi_priv.h330
-rw-r--r--drivers/parrot/spi/p7-spi_regs.h188
-rw-r--r--drivers/parrot/spi/p7-spim.c1370
-rw-r--r--drivers/parrot/spi/p7-spim.h24
-rw-r--r--drivers/parrot/spi/p7-spim_test.c175
-rw-r--r--drivers/parrot/spi/p7-spis.c961
-rw-r--r--drivers/parrot/spi/p7-spis.h35
-rw-r--r--drivers/parrot/spi/p7-swb.c317
-rw-r--r--drivers/parrot/spi/p7-swb.h53
-rw-r--r--drivers/parrot/spi/spimtest_loopback.c253
-rw-r--r--drivers/parrot/spi/spistest_loopback.c328
-rw-r--r--drivers/parrot/uio/Kconfig27
-rw-r--r--drivers/parrot/uio/Makefile6
-rw-r--r--drivers/parrot/uio/hx-uio.c904
-rw-r--r--drivers/parrot/uio/hx-uio.h76
-rw-r--r--drivers/parrot/uio/hx270-vdec.c183
-rw-r--r--drivers/parrot/uio/hx270-vdec.h23
-rw-r--r--drivers/parrot/uio/hx280-venc.c149
-rw-r--r--drivers/parrot/uio/hx280-venc.h23
-rw-r--r--drivers/parrot/usb/serial/blackberry/Kconfig6
-rw-r--r--drivers/parrot/usb/serial/blackberry/Makefile1
-rw-r--r--drivers/parrot/usb/serial/blackberry/blackberry.c65
-rw-r--r--drivers/parrot/video/Kconfig60
-rw-r--r--drivers/parrot/video/Makefile20
-rw-r--r--drivers/parrot/video/avi.c1587
-rw-r--r--drivers/parrot/video/avi.h481
-rw-r--r--drivers/parrot/video/avi_cap.c254
-rw-r--r--drivers/parrot/video/avi_cap.h77
-rw-r--r--drivers/parrot/video/avi_chroma.c508
-rw-r--r--drivers/parrot/video/avi_chroma.h60
-rw-r--r--drivers/parrot/video/avi_compat.c1182
-rw-r--r--drivers/parrot/video/avi_compat.h112
-rw-r--r--drivers/parrot/video/avi_debug.c177
-rw-r--r--drivers/parrot/video/avi_debug.h23
-rw-r--r--drivers/parrot/video/avi_debugfs.c1379
-rw-r--r--drivers/parrot/video/avi_dma.c620
-rw-r--r--drivers/parrot/video/avi_dma.h182
-rw-r--r--drivers/parrot/video/avi_isp.c111
-rw-r--r--drivers/parrot/video/avi_isp.h69
-rw-r--r--drivers/parrot/video/avi_limit.c258
-rw-r--r--drivers/parrot/video/avi_limit.h56
-rw-r--r--drivers/parrot/video/avi_logger.c149
-rw-r--r--drivers/parrot/video/avi_logger.h36
-rw-r--r--drivers/parrot/video/avi_pixfmt.c30
-rw-r--r--drivers/parrot/video/avi_pixfmt.h597
-rw-r--r--drivers/parrot/video/avi_regmap/avi_cfg.h664
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_bayer.h55
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_chain_bayer_inter.h40
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_chain_yuv_inter.h35
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_chroma.h163
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_chromatic_aberration.h162
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_color_correction.h163
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_dead_pixel_correction.h100
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_denoising.h198
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_drop.h44
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_edge_enhancement_color_reduction_filter.h93
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_gamma_corrector.h83
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_green_imbalance.h157
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_i3d_lut.h69
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_lens_shading_correction.h199
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_pedestal.h77
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_statistics_bayer.h162
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_statistics_yuv.h227
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_vlformat_32to40.h33
-rw-r--r--drivers/parrot/video/avi_regmap/avi_isp_vlformat_40to32.h33
-rw-r--r--drivers/parrot/video/avi_regmap/avi_lcd.h391
-rw-r--r--drivers/parrot/video/avi_scaler.c656
-rw-r--r--drivers/parrot/video/avi_scaler.h89
-rw-r--r--drivers/parrot/video/avi_segment.c1811
-rw-r--r--drivers/parrot/video/avi_segment.h343
-rw-r--r--drivers/parrot/video/avi_vsync_gen.c461
-rw-r--r--drivers/parrot/video/avi_vsync_gen.h79
-rw-r--r--drivers/parrot/video/avifb.c2575
-rw-r--r--drivers/parrot/video/avifb.h67
-rw-r--r--drivers/parrot/video/reg_avi.h927
-rwxr-xr-xdrivers/parrot/video/scaler-fir-coeffs.pl245
-rw-r--r--drivers/pwm/Kconfig42
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c1126
-rw-r--r--drivers/staging/android/trace_persistent.c242
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl25836
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x13
-rw-r--r--drivers/staging/iio/Documentation/lsiio.c163
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-ad719220
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c1684
-rw-r--r--drivers/staging/iio/adc/spear_adc.c400
-rw-r--r--drivers/staging/iio/frequency/Kconfig26
-rw-r--r--drivers/staging/iio/frequency/Makefile6
-rw-r--r--drivers/staging/iio/frequency/ad9832.c352
-rw-r--r--drivers/staging/iio/frequency/ad9832.h126
-rw-r--r--drivers/staging/iio/frequency/ad9834.c458
-rw-r--r--drivers/staging/iio/frequency/ad9834.h112
-rw-r--r--drivers/staging/iio/frequency/dds.h110
-rw-r--r--drivers/staging/iio/light/isl29028.c562
-rw-r--r--drivers/staging/iio/light/tsl2591.c408
-rw-r--r--drivers/staging/iio/light/tsl2591.h106
-rw-r--r--drivers/staging/iio/light/tsl2x7x.h100
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c2036
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.h59
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_core.c638
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_i2c.c104
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_spi.c100
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.h24
-rw-r--r--drivers/staging/ulogger/Kconfig26
-rw-r--r--drivers/staging/ulogger/Makefile1
-rw-r--r--drivers/staging/ulogger/ulogger.c952
-rw-r--r--drivers/staging/ulogger/ulogger.h90
-rw-r--r--drivers/switch/Kconfig15
-rw-r--r--drivers/switch/Makefile4
-rw-r--r--drivers/switch/switch_class.c174
-rw-r--r--drivers/switch/switch_gpio.c172
-rw-r--r--drivers/usb/chipidea/Kconfig40
-rw-r--r--drivers/usb/chipidea/Makefile14
-rw-r--r--drivers/usb/chipidea/bits.h97
-rw-r--r--drivers/usb/chipidea/ci.h322
-rw-r--r--drivers/usb/chipidea/ci13xxx_msm.c110
-rw-r--r--drivers/usb/chipidea/ci13xxx_pci.c180
-rw-r--r--drivers/usb/chipidea/core.c668
-rw-r--r--drivers/usb/chipidea/debug.c446
-rw-r--r--drivers/usb/chipidea/debug.h31
-rw-r--r--drivers/usb/chipidea/host.c207
-rw-r--r--drivers/usb/chipidea/host.h42
-rw-r--r--drivers/usb/chipidea/udc.c1902
-rw-r--r--drivers/usb/chipidea/udc.h114
-rw-r--r--drivers/usb/gadget/android.c1581
-rw-r--r--drivers/usb/gadget/carplay.c260
-rw-r--r--drivers/usb/gadget/f_accessory.c1180
-rw-r--r--drivers/usb/gadget/f_adb.c622
-rw-r--r--drivers/usb/gadget/f_audio_source.c828
-rw-r--r--drivers/usb/gadget/f_mtp.c1283
-rw-r--r--drivers/usb/misc/ehset.c167
-rw-r--r--drivers/usb/otg/otg-wakelock.c170
-rw-r--r--drivers/video/cea861_modedb.h1
-rw-r--r--drivers/video/hdmi.c436
-rw-r--r--drivers/video/logo/logo_parrot_clut224.ppm592
-rw-r--r--drivers/video/mxc/Kconfig116
-rw-r--r--drivers/video/mxc/Makefile26
-rw-r--r--drivers/video/mxc/siihdmi.c1742
-rw-r--r--drivers/video/mxc/siihdmi.h422
-rw-r--r--fs/yaffs2/Kconfig161
-rw-r--r--fs/yaffs2/Makefile17
-rw-r--r--fs/yaffs2/yaffs_allocator.c396
-rw-r--r--fs/yaffs2/yaffs_allocator.h30
-rw-r--r--fs/yaffs2/yaffs_attribs.c124
-rw-r--r--fs/yaffs2/yaffs_attribs.h28
-rw-r--r--fs/yaffs2/yaffs_bitmap.c98
-rw-r--r--fs/yaffs2/yaffs_bitmap.h33
-rw-r--r--fs/yaffs2/yaffs_checkptrw.c415
-rw-r--r--fs/yaffs2/yaffs_checkptrw.h33
-rw-r--r--fs/yaffs2/yaffs_ecc.c298
-rw-r--r--fs/yaffs2/yaffs_ecc.h44
-rw-r--r--fs/yaffs2/yaffs_getblockinfo.h35
-rw-r--r--fs/yaffs2/yaffs_guts.c5164
-rw-r--r--fs/yaffs2/yaffs_guts.h915
-rw-r--r--fs/yaffs2/yaffs_linux.h41
-rw-r--r--fs/yaffs2/yaffs_mtdif.c54
-rw-r--r--fs/yaffs2/yaffs_mtdif.h23
-rw-r--r--fs/yaffs2/yaffs_mtdif1.c330
-rw-r--r--fs/yaffs2/yaffs_mtdif1.h29
-rw-r--r--fs/yaffs2/yaffs_mtdif2.c225
-rw-r--r--fs/yaffs2/yaffs_mtdif2.h29
-rw-r--r--fs/yaffs2/yaffs_nameval.c201
-rw-r--r--fs/yaffs2/yaffs_nameval.h28
-rw-r--r--fs/yaffs2/yaffs_nand.c127
-rw-r--r--fs/yaffs2/yaffs_nand.h38
-rw-r--r--fs/yaffs2/yaffs_packedtags1.c53
-rw-r--r--fs/yaffs2/yaffs_packedtags1.h39
-rw-r--r--fs/yaffs2/yaffs_packedtags2.c196
-rw-r--r--fs/yaffs2/yaffs_packedtags2.h47
-rw-r--r--fs/yaffs2/yaffs_tagscompat.c422
-rw-r--r--fs/yaffs2/yaffs_tagscompat.h36
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.c27
-rw-r--r--fs/yaffs2/yaffs_tagsvalidity.h23
-rw-r--r--fs/yaffs2/yaffs_trace.h57
-rw-r--r--fs/yaffs2/yaffs_verify.c535
-rw-r--r--fs/yaffs2/yaffs_verify.h43
-rw-r--r--fs/yaffs2/yaffs_vfs.c2792
-rw-r--r--fs/yaffs2/yaffs_yaffs1.c433
-rw-r--r--fs/yaffs2/yaffs_yaffs1.h22
-rw-r--r--fs/yaffs2/yaffs_yaffs2.c1598
-rw-r--r--fs/yaffs2/yaffs_yaffs2.h39
-rw-r--r--fs/yaffs2/yportenv.h70
-rw-r--r--include/linux/akm8975.h87
-rw-r--r--include/linux/android_aid.h28
-rw-r--r--include/linux/cea861.h444
-rwxr-xr-xinclude/linux/earlysuspend.h56
-rw-r--r--include/linux/edid.h373
-rw-r--r--include/linux/gpio_event.h170
-rw-r--r--include/linux/hdmi.h288
-rw-r--r--include/linux/hid-sensor-hub.h232
-rw-r--r--include/linux/hid-sensor-ids.h155
-rw-r--r--include/linux/if_pppolac.h33
-rw-r--r--include/linux/if_pppopns.h32
-rw-r--r--include/linux/iio/Kbuild3
-rw-r--r--include/linux/iio/accel/kxcjk_1013.h22
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h173
-rw-r--r--include/linux/iio/bldc/parrot_bldc_cypress_iio.h31
-rw-r--r--include/linux/iio/bldc/parrot_bldc_iio.h145
-rw-r--r--include/linux/iio/buffer.h253
-rw-r--r--include/linux/iio/common/st_sensors.h290
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h31
-rw-r--r--include/linux/iio/common/st_sensors_spi.h20
-rw-r--r--include/linux/iio/consumer.h199
-rw-r--r--include/linux/iio/dac/ad5421.h28
-rw-r--r--include/linux/iio/dac/ad5504.h16
-rw-r--r--include/linux/iio/dac/ad5791.h25
-rw-r--r--include/linux/iio/dac/max517.h15
-rw-r--r--include/linux/iio/dac/mcp4725.h16
-rw-r--r--include/linux/iio/driver.h31
-rw-r--r--include/linux/iio/events.h87
-rw-r--r--include/linux/iio/frequency/ad9523.h195
-rw-r--r--include/linux/iio/frequency/adf4350.h128
-rw-r--r--include/linux/iio/gyro/itg3200.h154
-rw-r--r--include/linux/iio/iio.h633
-rw-r--r--include/linux/iio/imu/adis.h283
-rw-r--r--include/linux/iio/kfifo_buf.h11
-rw-r--r--include/linux/iio/machine.h31
-rw-r--r--include/linux/iio/sysfs.h127
-rw-r--r--include/linux/iio/trigger.h149
-rw-r--r--include/linux/iio/trigger_consumer.h63
-rw-r--r--include/linux/iio/triggered_buffer.h15
-rw-r--r--include/linux/iio/types.h94
-rw-r--r--include/linux/input/akm8975.h60
-rw-r--r--include/linux/input/mpu6050.h66
-rw-r--r--include/linux/ion.h376
-rw-r--r--include/linux/keychord.h52
-rw-r--r--include/linux/keyreset.h28
-rw-r--r--include/linux/mfd/lm3533.h104
-rw-r--r--include/linux/netfilter/xt_qtaguid.h13
-rw-r--r--include/linux/netfilter/xt_quota2.h25
-rw-r--r--include/linux/persistent_ram.h78
-rw-r--r--include/linux/platform_data/ad5449.h40
-rw-r--r--include/linux/platform_data/ad5755.h103
-rw-r--r--include/linux/platform_data/ad7266.h54
-rw-r--r--include/linux/platform_data/ad7291.h12
-rw-r--r--include/linux/platform_data/ad7298.h20
-rw-r--r--include/linux/platform_data/ad7303.h21
-rw-r--r--include/linux/platform_data/ad7791.h17
-rw-r--r--include/linux/platform_data/ad7793.h112
-rw-r--r--include/linux/platform_data/ad7887.h26
-rw-r--r--include/linux/platform_data/adau17x1.h109
-rw-r--r--include/linux/platform_data/adau1977.h46
-rw-r--r--include/linux/platform_data/ads7828.h29
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h31
-rw-r--r--include/linux/platform_data/tsl2563.h8
-rw-r--r--include/linux/sw_sync.h58
-rw-r--r--include/linux/switch.h53
-rw-r--r--include/linux/synaptics_i2c_rmi.h55
-rw-r--r--include/linux/sync.h427
-rw-r--r--include/linux/uhid.h104
-rw-r--r--include/linux/uid_stat.h29
-rw-r--r--include/linux/usb/chipidea.h50
-rw-r--r--include/linux/usb/f_accessory.h146
-rw-r--r--include/linux/usb/f_mtp.h75
-rw-r--r--include/linux/v4l2-common.h71
-rw-r--r--include/linux/v4l2-dv-timings.h913
-rw-r--r--include/linux/wakelock.h67
-rw-r--r--include/linux/wifi_tiwlan.h27
-rw-r--r--include/linux/wl127x-rfkill.h35
-rw-r--r--include/linux/wlan_plat.h27
-rw-r--r--include/media/adv7391.h64
-rw-r--r--include/media/adv7604.h175
-rw-r--r--include/media/ar0330.h26
-rw-r--r--include/media/ar1820.h18
-rw-r--r--include/media/as3636.h54
-rw-r--r--include/media/galileo1.h28
-rw-r--r--include/media/mt9f002.h43
-rw-r--r--include/media/mt9m021.h20
-rw-r--r--include/media/mt9v117.h20
-rw-r--r--include/media/mx25l6435e.h56
-rw-r--r--include/media/ov16825.h13
-rw-r--r--include/media/ov7740.h16
-rw-r--r--include/media/tc358746a.h36
-rw-r--r--include/media/tc358764.h9
-rw-r--r--include/media/tw8834.h22
-rw-r--r--include/media/tw9990.h47
-rw-r--r--include/net/activity_stats.h25
-rw-r--r--include/parrot/avicam_dummy_dev.h70
-rw-r--r--include/parrot/mfd/dib0700_mfd.h20
-rw-r--r--include/trace/events/cpufreq_interactive.h112
-rw-r--r--include/trace/events/parrot_trace.h61
-rw-r--r--include/trace/events/sync.h82
-rw-r--r--kernel/power/autosleep.c127
-rw-r--r--kernel/power/suspend_time.c111
-rw-r--r--kernel/power/wakelock.c215
-rw-r--r--lucie/Config.in3
-rw-r--r--lucie/linux.mk27
-rw-r--r--net/activity_stats.c115
-rw-r--r--net/ipv4/sysfs_net_ipv4.c88
-rw-r--r--net/netfilter/xt_qtaguid.c2977
-rw-r--r--net/netfilter/xt_qtaguid_internal.h333
-rw-r--r--net/netfilter/xt_qtaguid_print.c564
-rw-r--r--net/netfilter/xt_qtaguid_print.h120
-rw-r--r--net/netfilter/xt_quota2.c382
-rw-r--r--sound/soc/codecs/adau1977.c1119
-rw-r--r--sound/soc/codecs/adau1977.h37
-rw-r--r--sound/soc/codecs/wau8822.c884
-rw-r--r--sound/soc/codecs/wau8822.h81
-rw-r--r--sound/soc/codecs/wm8594.c1124
-rw-r--r--sound/soc/codecs/wm8594.h47
-rw-r--r--sound/soc/parrot/Kconfig9
-rw-r--r--sound/soc/parrot/Makefile8
-rw-r--r--sound/soc/parrot/parrot-fc7100.c92
1465 files changed, 442615 insertions, 0 deletions
diff --git a/arch/arm/mach-parrot7/gpu-m400.c b/arch/arm/mach-parrot7/gpu-m400.c
new file mode 100644
index 00000000000000..7e81f2954d66c1
--- /dev/null
+++ b/arch/arm/mach-parrot7/gpu-m400.c
@@ -0,0 +1,152 @@
+/**
+ * linux/arch/arm/mach-parrot7/gpu-m400.c - Parrot7 MALI400 GPU platform interface
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 28-05-2013
+ *
+ * This file is released under the GPL
+ */
+
+#if defined(CONFIG_MALI400) || \
+ defined(CONFIG_MALI400_MODULE)
+
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include "gpu/mali400/include/linux/mali/mali_utgard_uk_types.h"
+#include "gpu/mali400/include/linux/mali/mali_utgard.h"
+#include "gpu.h"
+#include "common.h"
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+static struct mali_gpu_device_data mali_gpu_data = {
+ /* Mali OS memory limit */
+ .shared_mem_size = 512 * 1024 * 1024, /* 512MB */
+
+ /* Framebuffer memory */
+ /* This will be set after the AVI FB has been set
+ .fb_start = <base address of contigous frame buffer memory>,
+ .fb_size = <size of frame buffer memory>
+ */
+
+ /* DVFS */
+ //.utilization_interval = 1000, /* ms */
+ //.utilization_handler = <utilization function>,
+
+};
+
+static struct resource mali_gpu_resources[] = {
+MALI_GPU_RESOURCES_MALI400_MP4(P7_GPU,
+ P7_GPU_GP_IRQ,
+ P7_GPU_GPMMU_IRQ,
+ P7_GPU_PP0_IRQ,
+ P7_GPU_PPMMU0_IRQ,
+ P7_GPU_PP1_IRQ,
+ P7_GPU_PPMMU1_IRQ,
+ P7_GPU_PP2_IRQ,
+ P7_GPU_PPMMU2_IRQ,
+ P7_GPU_PP3_IRQ,
+ P7_GPU_PPMMU3_IRQ)
+};
+
+static struct resource mali_gpu_resources_3pix[] = {
+MALI_GPU_RESOURCES_MALI400_MP3(P7_GPU,
+ P7_GPU_GP_IRQ,
+ P7_GPU_GPMMU_IRQ,
+ P7_GPU_PP0_IRQ,
+ P7_GPU_PPMMU0_IRQ,
+ P7_GPU_PP1_IRQ,
+ P7_GPU_PPMMU1_IRQ,
+ P7_GPU_PP2_IRQ,
+ P7_GPU_PPMMU2_IRQ)
+};
+
+static struct resource mali_gpu_resources_2pix[] = {
+MALI_GPU_RESOURCES_MALI400_MP2(P7_GPU,
+ P7_GPU_GP_IRQ,
+ P7_GPU_GPMMU_IRQ,
+ P7_GPU_PP0_IRQ,
+ P7_GPU_PPMMU0_IRQ,
+ P7_GPU_PP1_IRQ,
+ P7_GPU_PPMMU1_IRQ)
+};
+
+static struct resource mali_gpu_resources_1pix[] = {
+MALI_GPU_RESOURCES_MALI400_MP1(P7_GPU,
+ P7_GPU_GP_IRQ,
+ P7_GPU_GPMMU_IRQ,
+ P7_GPU_PP0_IRQ,
+ P7_GPU_PPMMU0_IRQ)
+};
+
+struct platform_device mali_gpu_device = {
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mali_gpu_resources),
+ .resource = mali_gpu_resources,
+ .dev.platform_data = &mali_gpu_data,
+ .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+};
+
+static int __init p7_init_gpu_m400(void)
+{
+ int ret = 0;
+
+ /* We have Mali400 only on Revision 3. */
+ if (P7_CHIPREV_R3 != p7_chiprev())
+ return 0;
+
+ ret = platform_device_register(&mali_gpu_device);
+ if (ret) {
+ printk(KERN_ERR "Error registering Mali400 GPU device: %d.\n", ret);
+ goto out;
+#ifdef CONFIG_PM_RUNTIME
+ } else {
+ pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+ pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+ }
+ printk(KERN_INFO"Mali400 GPU registered.\n");
+
+out:
+ return ret;
+}
+
+int __init p7_init_gpu_fb_m400(unsigned long fb_start, unsigned long fb_size, int nb_pixcore)
+{
+ if (nb_pixcore == 1) {
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_1pix);
+ mali_gpu_device.resource = mali_gpu_resources_1pix;
+ }
+ else if (nb_pixcore == 2) {
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_2pix);
+ mali_gpu_device.resource = mali_gpu_resources_2pix;
+ }
+ else if (nb_pixcore == 3) {
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_3pix);
+ mali_gpu_device.resource = mali_gpu_resources_3pix;
+ }
+
+ /* Setup FB start and size according to given resource */
+ if (fb_start && fb_size) {
+ mali_gpu_data.fb_start = fb_start;
+ mali_gpu_data.fb_size = fb_size;
+
+ printk(KERN_INFO"Mali400 GPU Framebuffer: 0x%08lx size 0x%lx\n",
+ mali_gpu_data.fb_start,
+ mali_gpu_data.fb_size);
+ }
+
+ return p7_init_gpu_m400();
+}
+
+#endif /* defined(CONFIG_MALI400) || \
+ defined(CONFIG_MALI400_MODULE) */
+
diff --git a/arch/arm/mach-parrot7/gpu.c b/arch/arm/mach-parrot7/gpu.c
new file mode 100644
index 00000000000000..ee6f29795b87d9
--- /dev/null
+++ b/arch/arm/mach-parrot7/gpu.c
@@ -0,0 +1,96 @@
+/**
+ * linux/arch/arm/mach-parrot7/gpu.c - Parrot7 GPU platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 31-10-2012
+ *
+ * This file is released under the GPL
+ */
+
+#if defined(CONFIG_GPU_PARROT7) || \
+ defined(CONFIG_GPU_PARROT7_MODULE)
+
+#if !( defined(CONFIG_MALI200) || \
+ defined(CONFIG_MALI200_MODULE) ) && \
+ !( defined(CONFIG_MALI400) || \
+ defined(CONFIG_MALI400_MODULE))
+#error You need to select either Mali200 or Mali400 to enable the GPU.
+#endif
+
+#include "common.h"
+#include "gpu.h"
+
+static struct p7gpu_pm_plat_data p7gpu_pdata;
+
+static struct platform_device p7gpu_pm_dev = {
+ .name = "p7gpu",
+ .id = 0
+};
+
+static char* mali200_clocks_names[] = { M200_BUS_CLK, M200_CORE_CLK };
+static char *mali400_clocks_names[] = { M400_CLK,
+ M400_CLK"_pp0", M400_CLK"_pp1", M400_CLK"_pp2", M400_CLK"_pp3" };
+
+
+static int p7_init_gpu_pm(char** clocks_names, int num)
+{
+ int ret = 0;
+
+ /* Setup correct clock names */
+ if (NULL == clocks_names)
+ panic("P7 GPU Clock names not provided.");
+ p7gpu_pdata.clocks_names = clocks_names;
+ p7gpu_pdata.clocks_number = num;
+
+ p7gpu_pm_dev.dev.platform_data = &p7gpu_pdata;
+
+ /* Add power management device */
+ ret = platform_device_register(&p7gpu_pm_dev);
+ if (ret) {
+ printk(KERN_ERR "Error registering P7 GPU device: %d.\n", ret);
+ platform_device_unregister(&p7gpu_pm_dev);
+ goto out;
+ }
+ printk(KERN_INFO"P7 GPU device PM registered.\n");
+
+out:
+ return ret;
+}
+
+/*
+ * Setup GPU's framebuffer, passsing framebuffer buffer information.
+ */
+int __init p7_init_gpu_fb(unsigned long fb_start, unsigned long fb_size, int nb_pixcore)
+{
+ int ret = 0;
+ int chiprev = p7_chiprev();
+
+ /* Initialize Mali200/400 (and register platform device if needed) */
+ switch(chiprev) {
+ case P7_CHIPREV_R1:
+ case P7_CHIPREV_R2:
+ ret = p7_init_gpu_pm(mali200_clocks_names, ARRAY_SIZE(mali200_clocks_names));
+ if (ret == 0)
+ ret = p7_init_gpu_fb_m200(fb_start, fb_size);
+ break;
+ case P7_CHIPREV_R3:
+ if (nb_pixcore < 0 || nb_pixcore > 4)
+ ret = -1;
+ /* clock should be registered before gpu driver */
+ if (ret == 0)
+ ret = p7_init_gpu_pm(mali400_clocks_names, ARRAY_SIZE(mali400_clocks_names) - 4 + nb_pixcore);
+ if (ret == 0)
+ ret = p7_init_gpu_fb_m400(fb_start, fb_size, nb_pixcore);
+ break;
+ default:
+ ret = -1;
+ printk(KERN_ERR "GPU not initialized: Unknown P7 revision.\n");
+ }
+ return ret;
+}
+
+#endif /* defined(CONFIG_GPU_PARROT7) || \
+ defined(CONFIG_GPU_PARROT7_MODULE) */
+
diff --git a/arch/arm/mach-parrot7/gpu.h b/arch/arm/mach-parrot7/gpu.h
new file mode 100644
index 00000000000000..67b419a360783c
--- /dev/null
+++ b/arch/arm/mach-parrot7/gpu.h
@@ -0,0 +1,40 @@
+/**
+ * linux/arch/arm/mach-parrot7/gpu.h - Parrot7 MALI200 GPU platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 01-11-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include "gpu/p7gpu-pm.h"
+
+#if defined(CONFIG_GPU_PARROT7) || \
+ defined(CONFIG_GPU_PARROT7_MODULE)
+int p7_init_gpu_fb(unsigned long fb_start, unsigned long fb_size, int nb_pixcore) __init;
+
+#else
+#define p7_init_gpu_fb(_fb_start, _fb_size, nb_pixcore) \
+ ({ -ENOSYS; })
+#endif
+
+
+#if defined(CONFIG_MALI200) || \
+ defined(CONFIG_MALI200_MODULE)
+int p7_init_gpu_fb_m200(unsigned long fb_start, unsigned long fb_size) __init;
+#else
+#define p7_init_gpu_fb_m200(_fb_start, _fb_size) \
+ ({ -ENOSYS; })
+#endif
+
+
+#if defined(CONFIG_MALI400) || \
+ defined(CONFIG_MALI400_MODULE)
+int p7_init_gpu_fb_m400(unsigned long fb_start, unsigned long fb_size, int nb_pixcore) __init;
+#else
+#define p7_init_gpu_fb_m400(_fb_start, _fb_size, nb_pixcore) \
+ ({ -ENOSYS; })
+#endif
+
diff --git a/arch/arm/mach-parrot7/i2cm.c b/arch/arm/mach-parrot7/i2cm.c
new file mode 100644
index 00000000000000..c2ebb47bd9896c
--- /dev/null
+++ b/arch/arm/mach-parrot7/i2cm.c
@@ -0,0 +1,329 @@
+/**
+ * linux/arch/arm/mach-parrot7/i2cm.c - Parrot7 i2c master controller platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 12-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/clkdev.h>
+#include <i2c/p7-i2cm.h>
+#include <i2c/muxes/p7-i2cmux.h>
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include "pinctrl.h"
+#include "common.h"
+#include "clock.h"
+#include "i2cm.h"
+
+/* Master / bus 0 resources */
+static struct resource p7_i2cm0_res[] = {
+ [0] = {
+ .start = P7_I2CM0,
+ .end = P7_I2CM0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CM0_IRQ,
+ .end = P7_I2CM0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* Master / bus 1 resources */
+static struct resource p7_i2cm1_res[] = {
+ [0] = {
+ .start = P7_I2CM1,
+ .end = P7_I2CM1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CM1_IRQ,
+ .end = P7_I2CM1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* Master / bus 2 resources */
+static struct resource p7_i2cm2_res[] = {
+ [0] = {
+ .start = P7_I2CM2,
+ .end = P7_I2CM2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CM2_IRQ,
+ .end = P7_I2CM2_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* Master / bus 3 resources */
+static struct resource p7_i2cm3_res[] = {
+ [0] = {
+ .start = P7_I2CMS,
+ .end = P7_I2CMS + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CMS_IRQ,
+ .end = P7_I2CMS_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device p7_i2cm_devs[] = {
+ {
+ .name = P7I2CM_DRV_NAME,
+ .id = 0,
+ .resource = p7_i2cm0_res,
+ .num_resources = ARRAY_SIZE(p7_i2cm0_res)
+ },
+ {
+ .name = P7I2CM_DRV_NAME,
+ .id = 1,
+ .resource = p7_i2cm1_res,
+ .num_resources = ARRAY_SIZE(p7_i2cm1_res)
+ },
+ {
+ .name = P7I2CM_DRV_NAME,
+ .id = 2,
+ .resource = p7_i2cm2_res,
+ .num_resources = ARRAY_SIZE(p7_i2cm2_res)
+ },
+ {
+ .name = P7I2CM_DRV_NAME,
+ .id = 3,
+ .resource = p7_i2cm3_res,
+ .num_resources = ARRAY_SIZE(p7_i2cm3_res)
+ },
+};
+
+/**
+ * p7_init_i2cm() - Instantiate I2C master controller identified by @bus
+ * for further driver usage.
+ *
+ * @bus: master controller / bus identifier
+ * @pdata: controller platform specific data
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_i2cm(int bus,
+ struct p7i2cm_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(bus >= ARRAY_SIZE(p7_i2cm_devs));
+ BUG_ON(! pdata);
+ BUG_ON(pin_cnt && !pins);
+#endif
+ if (bus >= ARRAY_SIZE(p7_i2cm_devs)) {
+ return;
+ }
+ switch(p7_chiprev())
+ {
+ case P7_CHIPREV_R1:
+ pdata->revision = I2CM_REVISION_1;
+ break;
+ case P7_CHIPREV_R2:
+ pdata->revision = I2CM_REVISION_2;
+ break;
+ case P7_CHIPREV_R3:
+ pdata->revision = I2CM_REVISION_3;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ /* Assign pins before registering device in cases driver is already loaded. */
+ if (pin_cnt) {
+ char buf[10];
+ snprintf(buf, 10, "p7-i2cm.%d", bus);
+ err = p7_assign_pins(buf, pins, pin_cnt);
+ if (err)
+ goto err;
+ }
+
+ err = p7_init_dev(&p7_i2cm_devs[bus], pdata, NULL, 0);
+ if (! err)
+ return;
+
+ /*
+ * Pinctrl does not provide us with a way to remove registered pin
+ * mappings...
+ */
+
+err:
+ panic("p7: failed to init I2C master controller %d (%d)\n", bus, err);
+}
+
+#if defined(CONFIG_I2C_MUX_PARROT7) || defined(CONFIG_I2C_MUX_PARROT7_MODULE)
+
+/* Only I2CM_2 can be muxed on the P7 */
+static struct platform_device p7_i2cmux_dev = {
+ .name = P7I2CMUX_DRV_NAME,
+ .id = 0,
+};
+
+#include <i2c/muxes/p7-i2cmux.h>
+
+/**
+ * p7_init_i2cm2_muxed() - Instantiate I2C master controller 2 handling several
+ * physical buses. Only I2CM2 can be used that way on
+ * the P7.
+ *
+ * @pdata: platform data for...
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ */
+void __init p7_init_i2cm_muxed(int bus,
+ struct p7i2cm_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt,
+ struct p7i2cmux_plat_data* mux_pdata,
+ struct p7i2cmux_pins const* mux_pins)
+{
+ char dname[] = "p7-i2cmux.xx";
+ size_t i;
+ int ret;
+
+#ifdef DEBUG
+ BUG_ON(! pdata);
+ BUG_ON(! mux_pdata);
+ BUG_ON(! mux_pdata->channel_names);
+ BUG_ON(! mux_pdata->nr_channels);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+ BUG_ON(! mux_pins);
+#endif
+
+ /* Register the I2CM without any associated pins */
+ pdata->muxed = 1;
+ p7_init_i2cm(bus, pdata, NULL, 0);
+
+ /* We register the pin configurations first in case the driver is
+ * already loaded */
+
+ /* Register default pin configuration */
+ snprintf(dname, sizeof(dname), "p7-i2cmux.%d", bus);
+ ret = p7_assign_named_pins(dname, NULL, pins, pin_cnt);
+
+ /* Register alternate buses pin configurations */
+ for (i = 0; ! ret && i < mux_pdata->nr_channels; i++)
+ ret = p7_assign_named_pins(dname,
+ mux_pdata->channel_names[i],
+ mux_pins[i].pinmap,
+ mux_pins[i].sz);
+ if (ret)
+ goto err;
+
+ /* Register the mux device */
+ mux_pdata->parent = bus;
+ p7_i2cmux_dev.id = bus;
+
+ ret = p7_init_dev(&p7_i2cmux_dev, mux_pdata, NULL, 0);
+ if (!ret)
+ return;
+
+err:
+ panic("p7: failed to init muxed I2C master controller %d (%d)\n",
+ bus,
+ ret);
+}
+
+#endif /* #if defined(CONFIG_I2C_MUX_PARROT7) || \
+ defined(CONFIG_I2C_MUX_PARROT7_MODULE) */
+
+/**
+ * p7_init_i2cm_slave() - Register I2C slave device to master controller
+ * identified by @bus for further driver usage.
+ *
+ * @bus: master controller / bus identifier
+ * @info: slave device descriptor
+ * @pins: array of pin functions and optional settings
+ * @pin_cnt: number of element of @pins array
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ */
+int __init p7_init_i2cm_slave(int bus,
+ struct i2c_board_info const* info,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ struct i2c_adapter* adapt;
+ struct i2c_client* client;
+ char* name = NULL;
+ int err = 0;
+
+#ifdef DEBUG
+ BUG_ON(! info);
+
+ pr_debug("p7: registering %s %d-%04hx...\n", info->type, bus, info->addr);
+#endif
+
+ /*
+ * We need to use dynamic slave registration here since master driver
+ * might already be loaded. All static board info would therefore be
+ * ignored.
+ * Get master assigned to bus...
+ */
+ adapt = i2c_get_adapter(bus);
+ if (! adapt) {
+ printk("failed to get adapter\n");
+ err = -ENODEV;
+ goto err;
+ }
+
+ if (pin_cnt) {
+ name = kasprintf(GFP_KERNEL,
+ "%d-%04x",
+ i2c_adapter_id(adapt),
+ info->addr | ((info->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
+ if (! name)
+ goto put;
+
+ err = p7_assign_named_pins(name, NULL, pins, pin_cnt);
+ if (err) {
+ kfree(name);
+ goto put;
+ }
+ }
+
+ /* Register new slave to master retrieved above... */
+ client = i2c_new_device(adapt, info);
+ if (! client) {
+ err = -ENXIO;
+ /*
+ * Don't free name since we cannot unregister pin mapping (which would
+ * still holds reference to name).
+ */
+ goto put;
+ }
+
+ if (err)
+ i2c_unregister_device(client);
+
+put:
+ i2c_put_adapter(adapt);
+err:
+ WARN(err,
+ "p7: failed to register %s %d-%04hx (%d)\n",
+ info->type,
+ bus,
+ info->addr,
+ err);
+ return err;
+}
diff --git a/arch/arm/mach-parrot7/i2cm.h b/arch/arm/mach-parrot7/i2cm.h
new file mode 100644
index 00000000000000..1338fd14532e39
--- /dev/null
+++ b/arch/arm/mach-parrot7/i2cm.h
@@ -0,0 +1,75 @@
+/**
+ * linux/arch/arm/mach-parrot7/i2cm.h - Parrot7 I2C master controller platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 12-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_I2CM_H
+#define _ARCH_PARROT7_I2CM_H
+
+struct p7i2cm_plat_data;
+struct i2c_board_info;
+struct pinctrl_map;
+
+#if defined(CONFIG_I2CM_PARROT7) || \
+ defined(CONFIG_I2CM_PARROT7_MODULE)
+
+#include <linux/init.h>
+
+extern void p7_init_i2cm(int,
+ struct p7i2cm_plat_data*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+
+extern int p7_init_i2cm_slave(int,
+ struct i2c_board_info const*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+#else /* defined(CONFIG_I2CM_PARROT7) || \
+ defined(CONFIG_I2CM_PARROT7_MODULE) */
+
+#define p7_init_i2cm(_bus, _pdata, _pins, _pins_nr)
+#define p7_init_i2cm_slave(_bus, _info, _pins, _pins_nr) \
+ ({ -ENOSYS; })
+
+#endif /* defined(CONFIG_I2CM_PARROT7) || \
+ defined(CONFIG_I2CM_PARROT7_MODULE) */
+
+#if defined(CONFIG_I2C_MUX_PARROT7) || \
+ defined(CONFIG_I2C_MUX_PARROT7_MODULE)
+
+struct p7i2cmux_plat_data;
+
+struct p7i2cmux_pins {
+ struct pinctrl_map* pinmap;
+ size_t sz;
+};
+
+extern void p7_init_i2cm_muxed(int,
+ struct p7i2cm_plat_data*,
+ struct pinctrl_map*,
+ size_t,
+ struct p7i2cmux_plat_data*,
+ struct p7i2cmux_pins const*) __init;
+
+#else
+
+struct p7i2cmux_plat_data;
+
+struct p7i2cmux_pins {
+ struct pinctrl_map* pinmap;
+ size_t sz;
+};
+
+#endif /* #if defined(CONFIG_I2C_MUX_PARROT7) || \
+ defined(CONFIG_I2C_MUX_PARROT7_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/i2cs.c b/arch/arm/mach-parrot7/i2cs.c
new file mode 100644
index 00000000000000..f4490212ed7f44
--- /dev/null
+++ b/arch/arm/mach-parrot7/i2cs.c
@@ -0,0 +1,122 @@
+/**
+ * linux/arch/arm/mach-parrot7/i2cs.c - Parrot7 i2c slave controller platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 13-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/clkdev.h>
+#include <i2c/plds_i2cs.h>
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include "common.h"
+#include "clock.h"
+
+/* Slave 0 resources */
+static struct resource plds_i2cs0_res[] = {
+ [0] = {
+ .start = P7_I2CS0,
+ .end = P7_I2CS0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CS0_IRQ,
+ .end = P7_I2CS0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* Slave 1 resources */
+static struct resource plds_i2cs1_res[] = {
+ [0] = {
+ .start = P7_I2CS1,
+ .end = P7_I2CS1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CS1_IRQ,
+ .end = P7_I2CS1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+/* Slave 2 resources */
+static struct resource plds_i2cs2_res[] = {
+ [0] = {
+ .start = P7_I2CS2,
+ .end = P7_I2CS2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_I2CS2_IRQ,
+ .end = P7_I2CS2_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct resource *plds_i2cs_res[] = {
+ plds_i2cs0_res,
+ plds_i2cs1_res,
+ plds_i2cs2_res,
+};
+
+static struct platform_device plds_i2cs_dev = {
+ .name = PLDS_I2CS_NAME,
+ .id = 0,
+ .resource = plds_i2cs0_res,
+ .num_resources = ARRAY_SIZE(plds_i2cs0_res)
+};
+
+/**
+ * p7_init_i2cs() - Instantiate I2C slave controller identified by @slave
+ * for further driver usage.
+ *
+ * @slave: slave controller identifier
+ * @pdata: controller platform specific data
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_i2cs( struct plds_i2cs_pdata *pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! pdata);
+ BUG_ON(pdata->i2c_bus > 2);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+
+ switch(p7_chiprev()) {
+ case P7_CHIPREV_R1:
+ pdata->revision = I2CS_REVISION_1;
+ break;
+ case P7_CHIPREV_R2:
+ pdata->revision = I2CS_REVISION_2;
+ break;
+ case P7_CHIPREV_R3:
+ pdata->revision = I2CS_REVISION_3;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ plds_i2cs_dev.id = pdata->i2c_bus;
+ plds_i2cs_dev.resource = plds_i2cs_res[pdata->i2c_bus];
+ plds_i2cs_dev.num_resources = 2;
+
+ err = p7_init_dev(&plds_i2cs_dev, pdata, pins, pin_cnt);
+ if (err)
+ panic("p7: failed to init I2C slave controller %d (%d)\n",
+ pdata->i2c_bus, err);
+}
diff --git a/arch/arm/mach-parrot7/i2cs.h b/arch/arm/mach-parrot7/i2cs.h
new file mode 100644
index 00000000000000..ac2a526aeada19
--- /dev/null
+++ b/arch/arm/mach-parrot7/i2cs.h
@@ -0,0 +1,36 @@
+/**
+ * linux/arch/arm/mach-parrot7/i2cs.h - Parrot7 i2c slave controller platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 13-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_I2CS_H
+#define _ARCH_PARROT7_I2CS_H
+
+struct plds_i2cs_pdata;
+struct pinctrl_map;
+
+#if defined(CONFIG_PLDS_I2CS) || \
+ defined(CONFIG_PLDS_I2CS_MODULE)
+
+#include <linux/init.h>
+
+extern void p7_init_i2cs(struct plds_i2cs_pdata*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+#else /* defined(CONFIG_PLDS_I2CS) || \
+ defined(CONFIG_PLDS_I2CS_MODULE) */
+
+#define p7_init_i2cs(_pdata, _pins, _pins_nr)
+
+#endif /* defined(CONFIG_PLDS_I2CS) || \
+ defined(CONFIG_PLDS_I2CS_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/clkdev.h b/arch/arm/mach-parrot7/include/mach/clkdev.h
new file mode 100644
index 00000000000000..abe1e8bcb821b9
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/clkdev.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/clkdev.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_CLKDEV_H
+#define _ARCH_PARROT7_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/debug-macro.S b/arch/arm/mach-parrot7/include/mach/debug-macro.S
new file mode 100644
index 00000000000000..c754e3604559b9
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/debug-macro.S
@@ -0,0 +1,162 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/debug-macro.S
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 15-Nov-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <mach/p7.h>
+#include <mach/regs-uart.h>
+#include <asm/memory.h>
+
+#if defined(CONFIG_PARROT7_DEBUG_LL_ZPRINT)
+ .macro addruart,rp,rv,tmp
+ ldr \rp, =P7_ZPRINT @ physical
+ ldr \rv, =__MMIO_P2V(P7_ZPRINT) @ virtual
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #0]
+ dsb
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+ .macro busyuart,rd,rx
+ .endm
+
+#else /* ZPRINT */
+
+ .macro addruart,rp,rv,tmp
+ /* The virtual and physical addresses are stored contiguously in
+ p7_early_uart declared in march-parrot7/devices.c */
+ ldr \tmp, =p7_early_uart
+
+ /* There's a trick here, we can't access =p7_early_uart directly because
+ it might not be valid if we're not yet remapped into virtual
+ addresses. To make sure this code works in both real and virtual modes I
+ use a trick from mach-omap2 where we use the relative offset between the
+ current PC and a known location (1999f in the code below) to get a
+ correct address each time. */
+ adr \rp, 1999f @ current address of word at label 1999
+ ldr \rv, [\rp] @ absolute (virtual) address of word at label 1999
+ sub \rv, \rv, \rp @ offset between the two. That should be 0 in
+ @ virtual mode
+ sub \tmp, \tmp, \rv @ "patch" the address of p7_early_uart with the
+ @ computed offset
+
+ /* At this point \tmp contains the proper current address of p7_early_uart */
+
+ ldr \rp, [\tmp] @ p7_early_uart[0] (physical addr)
+ ldr \rv, [\tmp, #4] @ p7_early_uart[1] (virtual addr)
+
+ /* If it's non-0 we can use these values directly */
+ cmp \rp, #0
+ cmpne \rv, #0
+ bne 1002f
+
+ /* Otherwise we have to initialize the value of p7_early_uart with the
+ address of the UART specified in the P7 BOOT_MODE */
+ mrc p15, 0, \rv, c1, c0
+ tst \rv, #1 @ is MMU active?
+
+ /* If the MMU is active we can't be sure the SYS block has been properly
+ remapped yet, so we bail out rather than risking a DATA_ABORT */
+ bne 1002f
+
+ ldr \rv, =P7_BOOT_MODE
+ ldr \rp, [\rv] @ Read the BOOT_MODE register value
+ and \rp, \rp, #P7_BOOT_MODE_DIAG_MASK
+
+ cmp \rp, #P7_BOOT_MODE_DIAG_UART0
+ ldreq \rp, =P7_UART0
+ ldreq \rv, =__MMIO_P2V(P7_UART0)
+ beq 1001f
+
+ cmp \rp, #P7_BOOT_MODE_DIAG_UART1
+ ldreq \rp, =P7_UART1
+ ldreq \rv, =__MMIO_P2V(P7_UART1)
+ beq 1001f
+
+ cmp \rp, #P7_BOOT_MODE_DIAG_UART2
+ ldreq \rp, =P7_UART2
+ ldreq \rv, =__MMIO_P2V(P7_UART2)
+ beq 1001f
+
+ cmp \rp, #P7_BOOT_MODE_DIAG_UART3
+ ldreq \rp, =P7_UART3
+ ldreq \rv, =__MMIO_P2V(P7_UART3)
+ beq 1001f
+
+ /* Fallthrough: the BOOT_MODE has an unknown or unsupported value */
+ mov \rp, #0
+ mov \rv, #0
+1001:
+ /* Now we want to store the \rp and \rv values back in p7_early_uart so
+ that they are found the next time this function is called. */
+ str \rp, [\tmp] @ p7_early_uart[0] (physical addr)
+ str \rv, [\tmp, #4] @ p7_early_uart[1] (virtual addr)
+
+ b 1002f
+
+1999:
+ /* This is just for calculating the offset of p7_early_uart above */
+ .word .
+ .ltorg
+1002:
+ /* At this point \rp and \rv contain either correct physical and virtual
+ addresses or 0 if there was a problem and the early printk is unusable. */
+ .endm
+
+
+ .macro senduart,rd,rx
+ /* NOP when rx is 0 (see addruart) */
+ cmp \rx, #0
+ beq 1003f
+ strb \rd, [\rx, #_UART_TRX]
+1003:
+ .endm
+
+
+ .macro waituart,rd,rx
+ /* NOP when rx is 0 (see addruart) */
+ cmp \rx, #0
+ beq 1005f
+1004:
+ ldr \rd, [\rx, #_UART_STATUS]
+ tst \rd, #UART_STATUS_TXFILLED
+ bne 1004b
+1005:
+ .endm
+
+
+ .macro busyuart,rd,rx
+ /* NOP when rx is 0 (see addruart) */
+ cmp \rx, #0
+ beq 1007f
+1006:
+ ldr \rd, [\rx, #_UART_STATUS]
+ tst \rd, #UART_STATUS_TXEMPTY
+ beq 1006b
+1007:
+ .endm
+
+#endif /* ZPRINT */
diff --git a/arch/arm/mach-parrot7/include/mach/entry-macro.S b/arch/arm/mach-parrot7/include/mach/entry-macro.S
new file mode 100644
index 00000000000000..1dd4be06bd2270
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/entry-macro.S
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/entry-macro.S
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/ptrace.h>
+#include <asm/hardware/gic.h>
+#include <mach/p7.h>
+#include <mach/io.h>
+#include <mach/irqs.h>
+
+/* Uncomment this if running on old P7 releases where FIQs are always on... */
+#if 0
+#define P7_FIQ_WORKAROUND
+#endif
+
+ .macro disable_fiq
+#ifdef P7_FIQ_WORKAROUND
+ mrs r8, spsr
+ orr r8, r8, #PSR_F_BIT
+ msr spsr, r8
+#endif
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =__MMIO_P2V(P7_CPU_ICC)
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an interrupt if it's
+ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of the highest
+ * priority enabled interrupt. We then just need to check whether it is in the
+ * valid range for an IRQ (30-1020 inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+ ldr \tmp, =1021
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #P7_LOCALTIMER_IRQ
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #P7_LOCALTIMER_IRQ
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
+
+/* vim:set ts=4:sw=4:noet:ft=asm: */
diff --git a/arch/arm/mach-parrot7/include/mach/ether.h b/arch/arm/mach-parrot7/include/mach/ether.h
new file mode 100644
index 00000000000000..aef4a72fc91cc5
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/ether.h
@@ -0,0 +1,37 @@
+/**
+ * linux/arch/arm/mach-parrot7/ether.c - Parrot7 Ethernet platform interface
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Jimmy Perchet <jimmy.perchet@parrot.com>
+ * date: 27-08-2013
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_ETH_H
+#define _ARCH_PARROT7_ETH_H
+
+#if defined(CONFIG_STMMAC_ETH) || \
+ defined(CONFIG_STMMAC_ETH_MODULE)
+
+#include <linux/init.h>
+
+enum phy_iface {
+ PHY_IFACE_MII,
+ PHY_IFACE_RGMII,
+ PHY_IFACE_MIILITE,
+ PHY_IFACE_SSMII,
+};
+
+void __init p7_init_ether(enum phy_iface iface,
+ int phy_reset_gpio,
+ unsigned long drive_strength);
+
+#else /*def CONFIG_STMMAC_ETH*/
+
+#define p7_init_ether(_iface,_phy_reset_gpio,_drive_strength) do {} while (0)
+
+#endif /*def CONFIG_STMMAC_ETH*/
+
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/gpio.h b/arch/arm/mach-parrot7/include/mach/gpio.h
new file mode 100644
index 00000000000000..d8da9a77d94ebc
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/gpio.h
@@ -0,0 +1,146 @@
+/**
+ * linux/arch/arm/mach-parrot7/include/mach/gpio.h - Parrot7 gpiolib platform
+ * specific interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Lionel Flandrin <lionel.flandrin@parrot.com>
+ * date: 06-Apr-2012
+ *
+ * This file is released under the GPL
+ *
+ * The whole purpose of this file is to provide gpiolib and Parrot7's common
+ * platform code with GPIO space definitions, i.e., number of GPIO lines to
+ * manage, and, for each GPIO controller present in the system, their respective
+ * first GPIO within the global space (so-called "gpio base").
+ * We want to achieve this in a flexible enough manner to prevent from changing
+ * platform and / or driver code each time a new GPIO controller (or expander)
+ * is added.
+ */
+
+#ifndef _ARCH_PARROT7_GPIO_H_
+#define _ARCH_PARROT7_GPIO_H_
+
+/*
+ * Holds current number of GPIOs declared for the global space.
+ * Don't use this outside of this file, this is for internal purpose only !
+ */
+#define __P7_FIRST_GPIO__ 0
+
+/*
+ * Define gpio base for each controller.
+ * Use enum definition to force preprocessor expansion each time
+ * __P7_FIRST_GPIO__ is re-defined.
+ */
+enum {
+/**********************************
+ * Declare Parrot7 chip GPIO lines
+ **********************************/
+#if defined(CONFIG_GPIO_PARROT7) || defined(CONFIG_GPIO_PARROT7_MODULE)
+ P7_FIRST_GPIO = __P7_FIRST_GPIO__,
+#define P7_GPIOS_MAX 220
+#undef __P7_FIRST_GPIO__
+#define __P7_FIRST_GPIO__ P7_GPIOS_MAX
+
+#define P7_GPIO_NR(_n) (P7_FIRST_GPIO + (_n))
+
+#endif
+
+/******************************************************************
+ * Declare Parrot7 Power Management Unit companion chip GPIO lines
+ ******************************************************************/
+#if defined(CONFIG_GPIO_P7MU) || defined(CONFIG_GPIO_P7MU_MODULE)
+ P7MU_FIRST_GPIO = __P7_FIRST_GPIO__,
+#define P7MU_GPIOS_MAX 5
+#undef __P7_FIRST_GPIO__
+#define __P7_FIRST_GPIO__ (P7MU_FIRST_GPIO + P7MU_GPIOS_MAX)
+
+/* P7MU IOs are starting with IO1, so we substract 1 to be 0 indexed */
+#define P7MU_IO_NR(_n) (P7MU_FIRST_GPIO + (_n) - 1)
+
+#endif
+
+/***********************************************
+ * Declare first FC7100 I/O expander GPIO lines
+ ***********************************************/
+#if (defined(CONFIG_GPIO_PCA953X) || \
+ defined(CONFIG_GPIO_PCA953X_MODULE)) && \
+ defined(CONFIG_MACH_PARROT_FC7100_FAMILY)
+ FC7100_IOEXPAND0_FIRST_GPIO = __P7_FIRST_GPIO__,
+#define FC7100_IOEXPAND0_GPIOS_MAX 16
+#undef __P7_FIRST_GPIO__
+#define __P7_FIRST_GPIO__ \
+ (FC7100_IOEXPAND0_FIRST_GPIO + FC7100_IOEXPAND0_GPIOS_MAX)
+
+#define FC7100_IOEXPAND0_GPIO_NR(_n) (FC7100_IOEXPAND0_FIRST_GPIO + (_n))
+#endif
+
+/************************************************
+ * Declare second FC7100 I/O expander GPIO lines
+ ************************************************/
+#if (defined(CONFIG_GPIO_PCA953X) || \
+ defined(CONFIG_GPIO_PCA953X_MODULE)) && \
+ defined(CONFIG_MACH_PARROT_FC7100_FAMILY)
+ FC7100_IOEXPAND1_FIRST_GPIO = __P7_FIRST_GPIO__,
+#define FC7100_IOEXPAND1_GPIOS_MAX 16
+#undef __P7_FIRST_GPIO__
+#define __P7_FIRST_GPIO__ \
+ (FC7100_IOEXPAND1_FIRST_GPIO + FC7100_IOEXPAND1_GPIOS_MAX)
+
+#define FC7100_IOEXPAND1_GPIO_NR(_n) (FC7100_IOEXPAND1_FIRST_GPIO + (_n))
+#endif
+
+/****************************************************
+ * Declare 24 bits FC7100DEV I/O expander GPIO lines
+ ****************************************************/
+#define FC7100DEV_IOEXPAND_GPIO_NR FC7100_IOEXPAND0_GPIO_NR
+};
+
+/* At last, set the total number of available GPIO lines in the global space.
+ We add some headroom for potential "hotplug" GPIO chips (used on Sicilia to
+ add the external GPIOs of the irradiance module on the Hook).
+ */
+#define ARCH_NR_GPIOS (__P7_FIRST_GPIO__ + 50)
+
+#include <asm-generic/gpio.h>
+
+/*
+ * We use the gpiolib framework. Those functions will call the corresponding
+ * callbacks defined in the GPIO driver's struct gpio_chip. (see for instance
+ * drivers/parrot/gpio/p7gpio.c).
+ *
+ * If it turns out that the gpiolib framework is too slow for a given product,
+ * we could inline the code accessing the GPIO here and save a couple cycles
+ * (for instance for heavy bitbangig).
+ */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+/* When use filtering and/or phase measure, use the following struct
+ * @start and @stop are gpios
+ * @filter if the counter ratio is set. This ratio devides irqs.
+ *
+ */
+
+#define GPIO_NO_MEASURE (-1)
+#define GPIO_MEASURE_START 0
+#define GPIO_MEASURE_STOP 1
+#define GPIO_MEASURE_START_AFTER_STOP 2
+#define GPIO_MEASURE_STOP_AFTER_START 3
+
+struct p7gpio_filter_phase{
+ u32 start;
+ u32 stop;
+ u8 filter;
+ s8 mode;
+ u8 export_reset;
+};
+
+/* custom fonction for mapping a gpio to an irq :
+ should be called after the p7 gpio is registered, but before gpio_to_irq
+ */
+int p7_gpio_interrupt_register(unsigned gpio);
+int p7_gpio_filter_interrupt_register(struct p7gpio_filter_phase);
+#endif /* _ARCH_PARROT7_GPIO_H_ */
diff --git a/arch/arm/mach-parrot7/include/mach/gpu-p7.h b/arch/arm/mach-parrot7/include/mach/gpu-p7.h
new file mode 100644
index 00000000000000..47f6f91788feec
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/gpu-p7.h
@@ -0,0 +1,28 @@
+/**
+ * linux/arch/arm/mach-parrot7/include/mach/gpu-p7.h - Parrot7 MALI200 GPU
+ * platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 20-10-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef __ARCH_PARROT7_GPU_H__
+#define __ARCH_PARROT7_GPU_H__
+
+#if defined(CONFIG_MALI200) || \
+ defined(CONFIG_MALI200_MODULE)
+
+#include "mali_osk_specific.h"
+#include "mali_osk.h"
+
+extern _mali_osk_resource_t p7mali200_arch_configuration [];
+#define arch_configuration p7mali200_arch_configuration
+
+#endif /* defined(CONFIG_MALI200) || \
+ defined(CONFIG_MALI200_MODULE) */
+
+#endif /* __ARCH_PARROT7_GPU_H__ */
diff --git a/arch/arm/mach-parrot7/include/mach/hardware.h b/arch/arm/mach-parrot7/include/mach/hardware.h
new file mode 100644
index 00000000000000..9e3571bc6e39fe
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/hardware.h
@@ -0,0 +1 @@
+/* Useless. */
diff --git a/arch/arm/mach-parrot7/include/mach/irqs.h b/arch/arm/mach-parrot7/include/mach/irqs.h
new file mode 100644
index 00000000000000..551b6deec20550
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/irqs.h
@@ -0,0 +1,180 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/irqs.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_IRQS_H
+#define _ARCH_PARROT7_IRQS_H
+
+/* Private Peripherals Interrupts. */
+#define P7_LOCALTIMER_IRQ 29
+#define P7_LOCALWDOG_IRQ 30
+
+
+/* Shared Peripherals Interrupts. */
+#define P7_SPIRQ(_irq) (32 + (_irq))
+
+/* GPU IRQ for MALI200: only on P7 R1 & R2 */
+#define P7_GPU_IRQ0 P7_SPIRQ(0)
+#define P7_GPU_IRQ1 P7_SPIRQ(1)
+#define P7_GPU_IRQ2 P7_SPIRQ(2)
+/* GPU IRQ for MALI400: only on P7 R3 */
+#define P7_GPU_PP0_IRQ P7_SPIRQ(85)
+#define P7_GPU_PPMMU0_IRQ P7_SPIRQ(86)
+#define P7_GPU_PP1_IRQ P7_SPIRQ(87)
+#define P7_GPU_PPMMU1_IRQ P7_SPIRQ(88)
+#define P7_GPU_PP2_IRQ P7_SPIRQ(89)
+#define P7_GPU_PPMMU2_IRQ P7_SPIRQ(90)
+#define P7_GPU_PP3_IRQ P7_SPIRQ(91)
+#define P7_GPU_PPMMU3_IRQ P7_SPIRQ(92)
+#define P7_GPU_GP_IRQ P7_SPIRQ(93)
+#define P7_GPU_GPMMU_IRQ P7_SPIRQ(94)
+
+#define P7_NAND_IRQ P7_SPIRQ(3)
+#define P7_SDIO0_IRQ P7_SPIRQ(11)
+#define P7_SDIO1_IRQ P7_SPIRQ(12)
+#define P7_SDIO2_IRQ P7_SPIRQ(13)
+
+/* SPI IRQ: 4 to 7 */
+#define P7_SPI0_IRQ P7_SPIRQ(4)
+#define P7_SPI1_IRQ P7_SPIRQ(5)
+#define P7_SPI2_IRQ P7_SPIRQ(6)
+#define P7_SPI3_IRQ P7_SPIRQ(7)
+
+/* USB IRQ: 8 to 10 */
+#define P7_USB0_IRQ P7_SPIRQ(8)
+#define P7_USB1_IRQ P7_SPIRQ(9)
+
+/* DMAC IRQs on P7 R1: 15 to 21 */
+#define P7_R1_DMA_ABORT_IRQ P7_SPIRQ(15)
+#define P7_R1_DMA0_IRQ P7_SPIRQ(16)
+#define P7_R1_DMA1_IRQ P7_SPIRQ(17)
+#define P7_R1_DMA2_IRQ P7_SPIRQ(18)
+#define P7_R1_DMA3_IRQ P7_SPIRQ(19)
+#define P7_R1_DMA4_IRQ P7_SPIRQ(20)
+#define P7_R1_DMA5_IRQ P7_SPIRQ(21)
+
+/*Ethernet IRQs: 15 to 17 */
+#define P7_ETH0_IRQ P7_SPIRQ(15)
+#define P7_ETH1_IRQ P7_SPIRQ(16)
+#define P7_ETH2_IRQ P7_SPIRQ(17)
+
+/*MPEG_TS IRQs*/
+#define P7_MPEGTS0_IRQ P7_SPIRQ(18)
+#define P7_MPEGTS1_IRQ P7_SPIRQ(19)
+
+/* CRYPTO */
+#define P7_CRYPTO_IRQ P7_SPIRQ(20)
+
+/* DMAC IRQs on P7 R2/3 */
+#define P7_DMA_IRQ P7_SPIRQ(21)
+
+/* AVI IRQs: 22 to 56 */
+#define P7_CFG_IRQ P7_SPIRQ(22)
+#define P7_INTER_IRQ P7_SPIRQ(23)
+#define P7_FIFO00_IRQ P7_SPIRQ(24)
+#define P7_FIFO01_IRQ P7_SPIRQ(25)
+#define P7_FIFO02_IRQ P7_SPIRQ(26)
+#define P7_FIFO03_IRQ P7_SPIRQ(27)
+#define P7_FIFO04_IRQ P7_SPIRQ(28)
+#define P7_FIFO05_IRQ P7_SPIRQ(29)
+#define P7_FIFO06_IRQ P7_SPIRQ(30)
+#define P7_FIFO07_IRQ P7_SPIRQ(31)
+#define P7_FIFO08_IRQ P7_SPIRQ(32)
+#define P7_FIFO09_IRQ P7_SPIRQ(33)
+#define P7_FIFO10_IRQ P7_SPIRQ(34)
+#define P7_FIFO11_IRQ P7_SPIRQ(35)
+#define P7_CONV0_IRQ P7_SPIRQ(36)
+#define P7_CONV1_IRQ P7_SPIRQ(37)
+#define P7_CONV2_IRQ P7_SPIRQ(38)
+#define P7_CONV3_IRQ P7_SPIRQ(39)
+#define P7_BLEND0_IRQ P7_SPIRQ(40)
+#define P7_BLEND1_IRQ P7_SPIRQ(41)
+#define P7_LCD0_IRQ P7_SPIRQ(42)
+#define P7_LCD1_IRQ P7_SPIRQ(43)
+#define P7_CAM0_IRQ P7_SPIRQ(44)
+#define P7_CAM1_IRQ P7_SPIRQ(45)
+#define P7_CAM2_IRQ P7_SPIRQ(46)
+#define P7_CAM3_IRQ P7_SPIRQ(47)
+#define P7_CAM4_IRQ P7_SPIRQ(48)
+#define P7_CAM5_IRQ P7_SPIRQ(49)
+#define P7_SCALROT0_IRQ P7_SPIRQ(50)
+#define P7_SCALROT1_IRQ P7_SPIRQ(51)
+#define P7_GAM0_IRQ P7_SPIRQ(52)
+#define P7_GAM1_IRQ P7_SPIRQ(53)
+
+#define P7_VDEC_IRQ P7_SPIRQ(57)
+#define P7_VENC_IRQ P7_SPIRQ(58)
+
+#define P7_UART0_IRQ P7_SPIRQ(59)
+#define P7_UART1_IRQ P7_SPIRQ(60)
+#define P7_UART2_IRQ P7_SPIRQ(61)
+#define P7_UART3_IRQ P7_SPIRQ(62)
+#define P7_UART4_IRQ P7_SPIRQ(63)
+#define P7_UART5_IRQ P7_SPIRQ(64)
+#define P7_UART6_IRQ P7_SPIRQ(65)
+#define P7_UART7_IRQ P7_SPIRQ(66)
+#define P7_I2CM0_IRQ P7_SPIRQ(67)
+#define P7_I2CM1_IRQ P7_SPIRQ(68)
+#define P7_I2CM2_IRQ P7_SPIRQ(69)
+#define P7_I2CMS_IRQ P7_SPIRQ(95)
+#define P7_I2CS0_IRQ P7_SPIRQ(70)
+#define P7_I2CS1_IRQ P7_SPIRQ(71)
+#define P7_I2CS2_IRQ P7_SPIRQ(72)
+#define P7_GPIO_IRQ P7_SPIRQ(73)
+
+#define P7_AXIMON_IRQ P7_SPIRQ(74)
+
+#define P7_TIM0_IRQ P7_SPIRQ(75)
+#define P7_TIM1_IRQ P7_SPIRQ(76)
+#define P7_TIM2_IRQ P7_SPIRQ(77)
+#define P7_TIM3_IRQ P7_SPIRQ(78)
+
+#define P7_AAI_IRQ P7_SPIRQ(79)
+
+#define P7_CCAN0_IRQ P7_SPIRQ(83)
+#define P7_CCAN1_IRQ P7_SPIRQ(84)
+
+
+#define P7_PMU_IRQ0 P7_SPIRQ(115)
+#define P7_PMU_IRQ1 P7_SPIRQ(116)
+
+#define P7_IRQS (160)
+
+/* Parrot GPIO controller may handle up to 20 GPIO lines as interrupts. */
+#if defined(CONFIG_GPIO_PARROT7) || defined(CONFIG_GPIO_PARROT7_MODULE)
+#define P7_GPIO_IRQS (20)
+#else
+#define P7_GPIO_IRQS (0)
+#endif
+
+#define P7MU_GPIO_IRQS (22)
+
+#if defined(CONFIG_MACH_PARROT_FC7100)
+/* IRQs for the two IO expanders */
+#define P7_EXTERNAL_IRQS (2 * 16)
+#else
+#define P7_EXTERNAL_IRQS (0)
+#endif
+
+#define NR_IRQS (P7_IRQS + P7_GPIO_IRQS + P7MU_GPIO_IRQS + P7_EXTERNAL_IRQS)
+
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/memory.h b/arch/arm/mach-parrot7/include/mach/memory.h
new file mode 100644
index 00000000000000..5906fe6292a28b
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/memory.h
@@ -0,0 +1,32 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/memory.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_MEMORY_H
+#define _ARCH_PARROT7_MEMORY_H
+
+/* RAM physical address. */
+#define PLAT_PHYS_OFFSET UL(0x80000000)
+
+/* Required for ION */
+#define ARCH_HAS_SG_CHAIN
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/p7-adc.h b/arch/arm/mach-parrot7/include/mach/p7-adc.h
new file mode 100644
index 00000000000000..69988b99feaef8
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/p7-adc.h
@@ -0,0 +1,56 @@
+/**
+********************************************************************************
+* @file p7-adc.h
+* @brief P7 Analogic to Digital Converter driver
+*
+* Copyright (C) 2013 Parrot S.A.
+*
+* @author Karl Leplat <karl.leplat@parrot.com>
+* @date 2013-09-19
+********************************************************************************
+*/
+#ifndef ADC_P7_ADC_H_
+#define ADC_P7_ADC_H_
+
+#include <linux/kernel.h>
+
+/* Max sampling frequency based on clk_adc 8Mhz - do not change */
+#define P7MUADC_MARKER_FREQ 1200000
+
+typedef enum {
+ P7MUADC_IIO_TEMP = 0,
+ P7MUADC_IIO_RING_BUFFER,
+ P7MUADC_IIO_MARKER,
+ P7_ADC_IIO_MAX_CHIP_INFO,
+} p7_adc_type;
+
+struct p7_adc_chan {
+ int type;
+ int channel;
+ u32 freq;
+ u32 samples;
+};
+
+struct p7_adc_chan_data {
+ struct p7_adc_chan *channels;
+ size_t num_channels;
+};
+
+struct p7_temp_chan {
+ int channel;
+ u32 freq;
+ const char *name;
+};
+
+enum {
+ P7_TEMP_FC7100_HW08,
+ P7_TEMP_FC7100_HW04,
+ P7_TEMP_SICILIA,
+};
+struct p7_temp_chan_data {
+ struct p7_temp_chan *channels;
+ size_t num_channels;
+ int temp_mode;
+};
+
+#endif /* ADC_P7_ADC_H_ */
diff --git a/arch/arm/mach-parrot7/include/mach/p7.h b/arch/arm/mach-parrot7/include/mach/p7.h
new file mode 100644
index 00000000000000..73d430073b80f6
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/p7.h
@@ -0,0 +1,170 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/p7.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_P7_H
+#define _ARCH_PARROT7_P7_H
+
+#include <asm/memory.h>
+#ifndef __ASSEMBLY__
+#include <asm/io.h>
+#endif
+
+
+/*
+ * I/O devices static mappings: inside vmalloc region,
+ * 48MB below VMALLOC_END.
+ */
+#define __MMIO_P2V(_phys) (0xff000000 - (3 * SZ_16M) + ((_phys) & 0x03ffffff))
+#define MMIO_P2V(_phys) __typesafe_io(__MMIO_P2V(_phys))
+
+/* Primary input clock frequency (Hertz). */
+#define P7_CLKIN_HZ UL(26000000)
+
+#define P7_INTRAM UL(0x00100000)
+#define P7_GPU UL(0x00200000)
+#define P7_AAI UL(0x00300000)
+#define P7_AVI UL(0x00400000)
+#define P7_VDEC UL(0x00500000)
+#define P7_VENC UL(0x00600000)
+#define P7_DMA UL(0x00700000)
+#define P7_CPU UL(0x00800000)
+#define P7_MPMC UL(0x00900000)
+#define P7_SYS UL(0x00a00000)
+#define P7_NIC UL(0x00e00000)
+
+/*
+ * Core Private Memory Region.
+ */
+#define P7_CPU_PMR UL(0x00f00000)
+/* Snoop Control Unit registers. */
+#define P7_CPU_SCU (P7_CPU_PMR)
+/* Local interrupts controller interface. */
+#define P7_CPU_ICC (P7_CPU_PMR + UL(0x0100))
+/* Core private / local timer. */
+#define P7_CPU_LOCALTIMER (P7_CPU_PMR + UL(0x0600))
+/* Interrupts distributor. */
+#define P7_CPU_ICD (P7_CPU_PMR + UL(0x1000))
+
+/*
+ * Level 2 cache controller (PL310).
+ */
+#define P7_L2C (P7_CPU_PMR + SZ_512K)
+
+/*
+ * MPMC
+ */
+/* gate training */
+#define P7_MPMC_TRAIN_EN (UL(0x4f75 << 2))
+
+/*
+ * High Speed Peripherals.
+ */
+#define P7_HSP UL(0x01000000)
+#define P7_NAND P7_HSP
+#define P7_SPI0 (P7_HSP + UL(0x100000))
+#define P7_SPI1 (P7_HSP + UL(0x101000))
+#define P7_SPI2 (P7_HSP + UL(0x102000))
+#define P7_SPI3 (P7_HSP + UL(0x103000))
+#define P7_MPEGTS0 (P7_HSP + UL(0x104000))
+#define P7_MPEGTS1 (P7_HSP + UL(0x10c000))
+#define P7_SPI (P7_HSP + UL(0x10f000))
+#define P7_SDIO0 (P7_HSP + UL(0x200000))
+#define P7_SDIO1 (P7_HSP + UL(0x300000))
+#define P7_SDIO2 (P7_HSP + UL(0x400000))
+#define P7_USB0 (P7_HSP + UL(0x500000))
+#define P7_USB1 (P7_HSP + UL(0x600000))
+#define P7_USB2 (P7_HSP + UL(0x700000))
+#define P7_ETHER (P7_HSP + UL(0x800000))
+#define P7_CRYPTO (P7_HSP + UL(0x900000))
+
+/*
+ * Low Speed Peripherals.
+ */
+#define P7_LSP UL(0x02000000)
+#define P7_PWM (P7_LSP + UL(0x000000))
+#define P7_UART0 (P7_LSP + UL(0x100000))
+#define P7_UART1 (P7_LSP + UL(0x110000))
+#define P7_UART2 (P7_LSP + UL(0x120000))
+#define P7_UART3 (P7_LSP + UL(0x130000))
+#define P7_UART4 (P7_LSP + UL(0x140000))
+#define P7_UART5 (P7_LSP + UL(0x150000))
+#define P7_UART6 (P7_LSP + UL(0x160000))
+#define P7_UART7 (P7_LSP + UL(0x170000))
+#define P7_I2CM0 (P7_LSP + UL(0x200000))
+#define P7_I2CM1 (P7_LSP + UL(0x210000))
+#define P7_I2CM2 (P7_LSP + UL(0x220000))
+#define P7_I2CMS (P7_LSP + UL(0x800000))
+#define P7_I2CS0 (P7_LSP + UL(0x300000))
+#define P7_I2CS1 (P7_LSP + UL(0x310000))
+#define P7_I2CS2 (P7_LSP + UL(0x320000))
+#define P7_GPIO (P7_LSP + UL(0x400000))
+#define P7_CCAN0 (P7_LSP + UL(0x500000))
+#define P7_CCAN1 (P7_LSP + UL(0x510000))
+
+/*
+ * AXI Monitor
+ */
+#define P7_AXIMON (P7_MPMC + UL(0x80000))
+#define P7_MPMC_GBC (P7_MPMC + UL(0xffc00))
+#define P7_MPMC_GBC_PRI (P7_MPMC_GBC + UL(0x1f4))
+
+
+/*
+ * P7 Boot mode register containing the diagnostic channel to use amongst other
+ * things
+ */
+#define P7_BOOT_MODE (P7_SYS + UL(0x408))
+
+#define P7_BOOT_MODE_DIAG_MASK (0x7 << 5)
+#define P7_BOOT_MODE_DIAG_NONE (0 << 5)
+#define P7_BOOT_MODE_DIAG_USB0 (1 << 5)
+#define P7_BOOT_MODE_DIAG_UART0 (2 << 5)
+#define P7_BOOT_MODE_DIAG_DCC (3 << 5)
+#define P7_BOOT_MODE_DIAG_UART2 (4 << 5)
+#define P7_BOOT_MODE_DIAG_UART1 (5 << 5)
+#define P7_BOOT_MODE_DIAG_UART3 (6 << 5)
+
+/*
+ * System Debug (only present when running emulated design)
+ */
+
+#ifdef CONFIG_MACH_PARROT_ZEBU
+
+#ifdef CONFIG_ARCH_PARROT7_ZEBU_MPW1
+#define P7_SYS_DBG (P7_SYS + UL(0x2000))
+/* MPW2 */
+//#define P7_SYS_DBG (P7_SYS + UL(0x3000))
+
+#define P7_ZPRINT P7_SYS_DBG
+#define P7_SYS_DBG_HALT (P7_SYS_DBG + UL(0x1008))
+#endif
+
+#ifdef CONFIG_ARCH_PARROT7_ZEBU_MPW2
+#define P7_SYS_DBG (P7_SYS + UL(0x3000))
+#define P7_ZPRINT (P7_SYS + UL(0x70000))
+#define P7_SYS_DBG_HALT (P7_SYS + UL(0x71008))
+#endif
+
+#endif /* CONFIG_MACH_PARROT_ZEBU */
+
+#endif
diff --git a/arch/arm/mach-parrot7/include/mach/pwm.h b/arch/arm/mach-parrot7/include/mach/pwm.h
new file mode 100644
index 00000000000000..1f0d84fa8cace0
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/pwm.h
@@ -0,0 +1,64 @@
+/**
+ * linux/arch/arm/mach-parrot7/include/mach/pwm.h - Parrot7 pwm platform
+ * specific interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 22-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_PWM_H_
+#define _ARCH_PARROT7_PWM_H_
+
+/*
+ * Holds current number of PWMs declared for the global space.
+ * Don't use this outside of this file, this is for internal purpose only !
+ */
+#define __P7_FIRST_PWM__ 0
+
+#define P7_PWMS_MAX 16
+#define P7MU_PWMS_MAX 2
+
+/*
+ * Define pwm base for each controller.
+ * Use enum definition to force preprocessor expansion each time
+ * __P7_FIRST_PWM__ is re-defined.
+ */
+enum {
+/**********************************
+ * Declare Parrot7 chip PWM lines
+ **********************************/
+#if defined(CONFIG_PWM_PARROT7) || defined(CONFIG_PWM_PARROT7_MODULE)
+ P7_FIRST_PWM = __P7_FIRST_PWM__,
+#undef __P7_FIRST_PWM__
+#define __P7_FIRST_PWM__ P7_PWMS_MAX
+
+#define P7_PWM_NR(_n) (P7_FIRST_PWM + (_n))
+#else
+#define P7_PWM_NR(_n) (-1)
+#endif
+
+/******************************************************************
+ * Declare Parrot7 Power Management Unit companion chip PWM lines
+ ******************************************************************/
+
+#if defined(CONFIG_PWM_P7MU) || defined(CONFIG_PWM_P7MU_MODULE)
+ P7MU_FIRST_PWM = __P7_FIRST_PWM__,
+#undef __P7_FIRST_PWM__
+#define __P7_FIRST_PWM__ (P7MU_FIRST_PWM + P7MU_PWMS_MAX)
+
+#define P7MU_PWM_NR(_n) (P7MU_FIRST_PWM + (_n))
+
+#else
+#define P7MU_PWM_NR(_n) (-1)
+#endif
+ P7_FIRST_NR = __P7_FIRST_PWM__,
+};
+
+/* At last, set the total number of available PWM lines in the global space. */
+#define ARCH_NR_PWMS __P7_FIRST_PWM__
+
+#endif /* _ARCH_PARROT7_PWM_H_ */
diff --git a/arch/arm/mach-parrot7/include/mach/regs-uart.h b/arch/arm/mach-parrot7/include/mach/regs-uart.h
new file mode 100644
index 00000000000000..e6f08e3860bc5b
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/regs-uart.h
@@ -0,0 +1,135 @@
+/**
+ * @file linux/include/asm-arm/arch-parrot/regs-uart.h
+ * @brief Device I/O - Description of Parrot5/5+/6 serial hardware
+ *
+ * Copyright (C) 2005,2006,2007 Parrot S.A.
+ *
+ * @author yves.lemoine@parrot.com
+ * @author ivan.djelic@parrot.com
+ * @date 2005-09-28
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __ARCH_ARM_PARROT5_UART_REGS_H
+#define __ARCH_ARM_PARROT5_UART_REGS_H
+
+#define PXUART_NR 8
+
+#define _UART_TRX 0x00 /* UART RX/TX Buffers */
+#define _UART_IRX 0x04 /* Rx IRQ Enable Register */
+#define _UART_ITX 0x08 /* Tx IRQ Enable Register */
+#define _UART_RSTBRK 0x0C /* Reset Break Register */
+#define _UART_SETBRK 0x10 /* Set Break Register */
+#define _UART_LCR 0x14 /* Line Command Register */
+#define _UART_SPD 0x18 /* Speed Register */
+#define _UART_STATUS 0x1C /* Status Register */
+
+/* Registers bitwise definitions */
+
+/* Receive register */
+#define UART_TRX_INVALID (1 << 8) /* Byte invalidity flag */
+#define UART_TRX_OVERRUN (1 << 9) /* Over-run bit */
+#define UART_TRX_PARITY_ERROR (1 << 10) /* Parity error bit */
+#define UART_TRX_FRAME_ERROR (1 << 11) /* Frame error bit */
+#define UART_TRX_RXBREAK (1 << 12) /* Break detection bit */
+#define UART_TRX_TXCOLLISION (1 << 13) /* TX flag collision */
+
+/* Error mask - ignore invalid and collision flags */
+#define UART_TRX_ANY_ERROR \
+ (UART_TRX_OVERRUN | \
+ UART_TRX_PARITY_ERROR| \
+ UART_TRX_FRAME_ERROR | \
+ UART_TRX_RXBREAK)
+
+/* Receive Interrupt Enable Register */
+#define UART_IRX_RIM 1 /* RX Interrupt mask bit */
+
+/* Transmit Interrupt Enable Register */
+#define UART_ITX_TIM 1 /* TX Interrupt mask bit */
+
+/* Line Command Register */
+
+#define UART_LCR_LONG_BREAK_2 (0 << 12)
+#define UART_LCR_LONG_BREAK_4 (1 << 12)
+#define UART_LCR_LONG_BREAK_8 (2 << 12)
+#define UART_LCR_LONG_BREAK_16 (3 << 12)
+#define UART_LCR_LONG_BREAK_32 (4 << 12)
+#define UART_LCR_LONG_BREAK_64 (5 << 12)
+#define UART_LCR_LONG_BREAK_128 (6 << 12)
+#define UART_LCR_LONG_BREAK_256 (7 << 12)
+#define UART_LCR_TSTZERO (1 << 11) /* Collision test on level 0*/
+#define UART_LCR_SINGLEWIRE (1 << 10) /* Single Wire mode. */
+#define UART_LCR_TXFIFO (1 << 9) /* TX FIFO utilization */
+#define UART_LCR_RXFIFO (1 << 8) /* RX FIFO utilization */
+#define UART_LCR_FIFO_THRESHOLD_1 (0 << 6) /* RX FIFO threshold */
+#define UART_LCR_FIFO_THRESHOLD_8 (1 << 6)
+#define UART_LCR_FIFO_THRESHOLD_16 (2 << 6)
+#define UART_LCR_FIFO_THRESHOLD_24 (3 << 6)
+#define UART_LCR_CTSMODE (1 << 5) /* CTS validation bit */
+#define UART_LCR_STICKY (1 << 4) /* Fixed Parity bit */
+#define UART_LCR_EVEN (1 << 3) /* Even Parity bit */
+#define UART_LCR_PARITY (1 << 2) /* Parity enable bit */
+#define UART_LCR_TWOSTOP (1 << 1) /* 2 stop bits enable bit */
+#define UART_LCR_EIGHTBITS 1 /* 8 bits mode enable bit */
+
+/* Parrot5+ LCR extensions */
+#define UART_LCR_RTS_CTRL (1 << 15) /* auto(0)/manual(1) nRTS */
+#define UART_LCR_FIFO_RX_THRESHOLD_1 (0 << 6) /* Receive FIFO threshold */
+#define UART_LCR_FIFO_RX_THRESHOLD_16 (1 << 6)
+#define UART_LCR_FIFO_RX_THRESHOLD_32 (2 << 6)
+#define UART_LCR_FIFO_RX_THRESHOLD_48 (3 << 6)
+#define UART_LCR_FIFO_TX_THRESHOLD_0 (0 << 16) /* Transmit FIFO threshold */
+#define UART_LCR_FIFO_TX_THRESHOLD_4 (1 << 16)
+#define UART_LCR_FIFO_TX_THRESHOLD_8 (2 << 16)
+#define UART_LCR_FIFO_TX_THRESHOLD_16 (3 << 16)
+
+/* Status Register */
+#define UART_STATUS_COLLISION (1 << 10) /* Collision flag bit */
+#define UART_STATUS_STOP_BREAK (1 << 9) /* Stop break flag bit */
+#define UART_STATUS_TX_BREAK (1 << 8) /* Transmit Break flag bit */
+#define UART_STATUS_TXFILLED (1 << 7) /* FIFO TX Filled flag bit */
+#define UART_STATUS_TXEMPTY (1 << 6) /* TX empty flag bit */
+#define UART_STATUS_ITTX (1 << 5) /* TX Interrupt flag bit */
+#define UART_STATUS_CTSN (1 << 4) /* CTSN input read-back value */
+#define UART_STATUS_LONG_BREAK (1 << 3) /* Long brk detection flag bit*/
+#define UART_STATUS_RTSN (1 << 2) /* RSTN output read-back value*/
+#define UART_STATUS_RXFILLED (1 << 1) /* FIFO RX Filled flag bit */
+#define UART_STATUS_ITRX 1 /* Receive Interrupt flag bit */
+
+#define BAUD_921600 (UART_CLOCK/921600)
+#define BAUD_460800 (UART_CLOCK/460800)
+#define BAUD_115200 (UART_CLOCK/115200)
+#define BAUD_38400 (UART_CLOCK/ 38400)
+#define BAUD_19200 (UART_CLOCK/ 19200)
+#define BAUD_9600 (UART_CLOCK/ 9600)
+
+/* define alternative values for CPUCLK = 156 MHz mode */
+#define BAUD_921600_156MHZ ((3*UART_CLOCK/4)/921600)
+#define BAUD_460800_156MHZ ((3*UART_CLOCK/4)/460800)
+#define BAUD_115200_156MHZ ((3*UART_CLOCK/4)/115200)
+#define BAUD_38400_156MHZ ((3*UART_CLOCK/4)/ 38400)
+#define BAUD_19200_156MHZ ((3*UART_CLOCK/4)/ 19200)
+#define BAUD_9600_156MHZ ((3*UART_CLOCK/4)/ 9600)
+
+/* FIFO sizes */
+#define UART_TX_FIFO_SIZE_P5 (16)
+#define UART_RX_FIFO_SIZE_P5 (32)
+
+/* Parrot5+ fifos are twice deeper */
+#define UART_TX_FIFO_SIZE_P5P (32)
+#define UART_RX_FIFO_SIZE_P5P (64)
+
+#endif /* __ARCH_ARM_PARROT5_UART_REGS_H */
diff --git a/arch/arm/mach-parrot7/include/mach/smp.h b/arch/arm/mach-parrot7/include/mach/smp.h
new file mode 100644
index 00000000000000..a0e9813c103e74
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/smp.h
@@ -0,0 +1,47 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/smp.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_SMP_H
+#define _ARCH_PARROT7_SMP_H
+
+#include <asm/hardware/gic.h>
+
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("mrc p15, 0, %0, c0, c0, 5" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0f; \
+ })
+
+/*
+ * We use IRQ1 as the IPI (Inter-Processor Interrupts).
+ */
+static inline void smp_cross_call(const struct cpumask *mask)
+{
+ gic_raise_softirq(mask, 1);
+}
+
+#endif
+
+/* vim:set ts=4:sw=4:noet:ft=c: */
diff --git a/arch/arm/mach-parrot7/include/mach/system.h b/arch/arm/mach-parrot7/include/mach/system.h
new file mode 100644
index 00000000000000..ece16441394fc6
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/system.h
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/system.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_SYSTEM_H
+#define _ARCH_PARROT7_SYSTEM_H
+
+static inline void arch_idle(void)
+{
+ cpu_do_idle();
+}
+
+#endif
+
+/* vim:set ts=4:sw=4:noet:ft=c: */
diff --git a/arch/arm/mach-parrot7/include/mach/timex.h b/arch/arm/mach-parrot7/include/mach/timex.h
new file mode 100644
index 00000000000000..0ec044bcdf6d35
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/timex.h
@@ -0,0 +1,37 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/timex.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_TIMEX_H
+#define _ARCH_PARROT7_TIMEX_H
+
+/*
+ * CLOCK_TICK_RATE is actually a don't-care value. Make it default to
+ * (100 * HZ).
+ */
+#define CLOCK_TICK_RATE (100 * HZ)
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif
+
+/* vim:set ts=4:sw=4:noet:ft=c: */
diff --git a/arch/arm/mach-parrot7/include/mach/uncompress.h b/arch/arm/mach-parrot7/include/mach/uncompress.h
new file mode 100644
index 00000000000000..7ab19a47102ad2
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/uncompress.h
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/arm/mach-parrot7/include/mach/uncompress.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 28-Oct-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <mach/p7.h>
+#include <mach/regs-uart.h>
+
+#if defined(CONFIG_PARROT7_DEBUG_LL_UART0)
+#define UART_TX (*(volatile unsigned char *)(P7_UART0 + _UART_TRX))
+#define UART_STAT (*(volatile unsigned char *)(P7_UART0 + _UART_STATUS))
+#elif defined(CONFIG_PARROT7_DEBUG_LL_UART1)
+#define UART_TX (*(volatile unsigned char *)(P7_UART1 + _UART_TRX))
+#define UART_STAT (*(volatile unsigned char *)(P7_UART1 + _UART_STATUS))
+#elif defined(CONFIG_PARROT7_DEBUG_LL_UART2)
+#define UART_TX (*(volatile unsigned char *)(P7_UART2 + _UART_TRX))
+#define UART_STAT (*(volatile unsigned char *)(P7_UART2 + _UART_STATUS))
+#elif defined(CONFIG_PARROT7_DEBUG_LL_UART3)
+#define UART_TX (*(volatile unsigned char *)(P7_UART3 + _UART_TRX))
+#define UART_STAT (*(volatile unsigned char *)(P7_UART3 + _UART_STATUS))
+#endif
+
+
+#if defined(CONFIG_PARROT7_DEBUG_LL_UART0) \
+ || defined(CONFIG_PARROT7_DEBUG_LL_UART1) \
+ || defined(CONFIG_PARROT7_DEBUG_LL_UART2) \
+ || defined(CONFIG_PARROT7_DEBUG_LL_UART3)
+
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ while (UART_STAT & UART_STATUS_TXFILLED)
+ barrier();
+
+ UART_TX = c;
+}
+
+static inline void flush(void)
+{
+ while ((UART_STAT & UART_STATUS_TXEMPTY) == 0)
+ barrier();
+}
+#else
+static inline void putc(int c)
+{
+}
+
+static inline void flush(void)
+{
+}
+
+#endif
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
+
+/* vim:set ts=4:sw=4:noet:ft=c: */
diff --git a/arch/arm/mach-parrot7/include/mach/usb-p7.h b/arch/arm/mach-parrot7/include/mach/usb-p7.h
new file mode 100644
index 00000000000000..5b29a2a949a94c
--- /dev/null
+++ b/arch/arm/mach-parrot7/include/mach/usb-p7.h
@@ -0,0 +1,49 @@
+/**
+ * @file arch/arm/mach-parrot7/include/mach/usb-p7.h
+ * @brief Parrot7 usb controller definitions
+ *
+ * Copyright (C) 2009 Parrot S.A.
+ *
+ * @author olivier.bienvenu@parrot.com
+ * @date 2009-04-17
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __ARCH_ARM_PARROT_USB_P7_H
+#define __ARCH_ARM_PARROT_USB_P7_H
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/usb/chipidea.h>
+
+#define USB_DRV_NAME "ci_hdrc"
+
+enum p7_usb2_phy_modes {
+ P7_USB2_PHY_NONE,
+ P7_USB2_PHY_ULPI,
+ P7_USB2_PHY_UTMI,
+ P7_USB2_PHY_UTMI_WIDE,
+ P7_USB2_PHY_SERIAL,
+};
+
+struct p7_usb2_platform_data {
+ struct ci13xxx_udc_driver ci_udc;
+ unsigned int phy_enable; // adress of the PHY enable register on the p7mu
+ unsigned int gpio_vbus;
+ struct pinctrl *pctl;
+ unsigned int chip_rev;
+};
+
+#endif /* __ARCH_ARM_PARROT_REGS_P7_H */
diff --git a/arch/arm/mach-parrot7/ion.c b/arch/arm/mach-parrot7/ion.c
new file mode 100644
index 00000000000000..dd08c78b29be51
--- /dev/null
+++ b/arch/arm/mach-parrot7/ion.c
@@ -0,0 +1,90 @@
+/*
+ * linux/arch/arm/mach-parrot7/ion.c - Parrot7 ION allocator platform
+ * implementation
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 14-Jan-2014
+ *
+ * This file is released under the GPL
+ *
+ * See linux/drivers/parrot/gpu/ion/p7-ion.c
+ */
+
+#include <linux/platform_device.h>
+#include <linux/ion.h>
+#include "ion.h"
+
+#define P7ION_DRV_NAME "p7-ion"
+#define P7_ION_HEAPS_MAX 16
+
+static struct ion_platform_heap p7_ion_pheaps[P7_ION_HEAPS_MAX] = {
+ { /* vmalloc based */
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .name = "vmalloc"
+ },
+ { /* kmalloc based */
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "kmalloc"
+ },
+ { /* global DMA coherent / CMA based */
+ .type = ION_HEAP_TYPE_DMA,
+ .id = ION_HEAP_TYPE_DMA,
+ .name = "cma",
+ .priv = NULL
+ }
+};
+
+static struct ion_platform_data p7_ion_pdata = {
+ .nr = 3,
+ .heaps = p7_ion_pheaps
+};
+
+static struct platform_device p7_ion_dev = {
+ .name = P7ION_DRV_NAME,
+ .id = 0,
+ .num_resources = 0,
+ .dev.platform_data = &p7_ion_pdata
+};
+
+void __init p7_ionize_dev(struct platform_device* pdev, unsigned int id)
+{
+ struct ion_platform_heap* const heap = &p7_ion_pheaps[p7_ion_pdata.nr];
+
+#ifdef DEBUG
+ BUG_ON(pdev);
+ /* dma_declare_coherent_memory MUST have been called before !! */
+ BUG_ON(! pdev->dev.dma_mem);
+ BUG_ON(id < ION_HEAP_TYPE_CUSTOM);
+ {
+ unsigned int h;
+ for (h = 0; h < p7_ion_pdata.nr; h++)
+ BUG_ON(id == p7_ion_pheaps[h].id);
+ }
+#endif
+
+ heap->type = ION_HEAP_TYPE_DMA;
+ heap->id = id;
+ heap->name = dev_name(&pdev->dev);
+ heap->priv = &pdev->dev;
+
+ p7_ion_pdata.nr++;
+}
+
+void __init p7_init_ion(void)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! p7_ion_pdata.nr);
+#endif
+
+ pr_debug("p7: registering " P7ION_DRV_NAME ".0...\n");
+
+ err = platform_device_register(&p7_ion_dev);
+ if (err)
+ panic("p7: failed to register " P7ION_DRV_NAME ".0 (%d)\n", err);
+}
diff --git a/arch/arm/mach-parrot7/ion.h b/arch/arm/mach-parrot7/ion.h
new file mode 100644
index 00000000000000..29aff52771d1dc
--- /dev/null
+++ b/arch/arm/mach-parrot7/ion.h
@@ -0,0 +1,37 @@
+/**
+ * linux/arch/arm/mach-parrot7/ion.h - Parrot7 ION allocator platform
+ * interface
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 14-Jan-2014
+ *
+ * This file is released under the GPL
+ *
+ * See linux/drivers/parrot/gpu/ion/p7-ion.c
+ */
+
+#ifndef _ARCH_PARROT7_ION_H
+#define _ARCH_PARROT7_ION_H
+
+#if defined(CONFIG_ION) || \
+ defined(CONFIG_ION_MODULE)
+
+#include <linux/init.h>
+
+struct platform_device;
+
+extern void p7_init_ion(void) __init;
+extern void p7_ionize_dev(struct platform_device*, unsigned int) __init;
+
+#else /* defined(CONFIG_ION) || \
+ defined(CONFIG_ION_MODULE) */
+
+#define p7_init_ion()
+#define p7_ionize_dev(_pdev)
+
+#endif /* defined(CONFIG_ION) || \
+ defined(CONFIG_ION_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/iopin.c b/arch/arm/mach-parrot7/iopin.c
new file mode 100644
index 00000000000000..f87eadc245c74f
--- /dev/null
+++ b/arch/arm/mach-parrot7/iopin.c
@@ -0,0 +1,83 @@
+/*
+ * linux/arch/arm/mach-parrot7/iopin.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 10-Feb-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <mach/p7.h>
+#include "system.h"
+#include "iopin.h"
+
+/* Uncomment this if running on old P7 releases where the input enable bit is buggy... */
+#if 0
+#define P7_r1865_IOPIN_WORKAROUND
+#endif
+
+static void __init p7_setup_iopin(union p7_iopin const config)
+{
+ unsigned long const addr = __MMIO_P2V(P7_SYS_PADCTRL_IO) +
+ (config.fields.id * sizeof(u32));
+ union p7_iopin pin = { .word = readl(addr) };
+
+#ifndef P7_r1865_IOPIN_WORKAROUND
+ /* Disable pin at first. */
+ pin.fields.input_en = 0;
+ __raw_writel(pin.word, addr);
+
+ if (config.fields.set_usage)
+ pin.fields.usage = config.fields.usage;
+
+ if (config.fields.set_trigger)
+ pin.fields.trigger = config.fields.trigger;
+
+ if (config.fields.set_pull)
+ pin.fields.pull = config.fields.pull;
+
+ if (config.fields.set_slewrate)
+ pin.fields.slewrate = config.fields.slewrate;
+
+ if (config.fields.set_driver)
+ pin.fields.driver = config.fields.driver;
+
+ /* Write config. */
+ writel(pin.word, addr);
+ /* Re-enable pin. */
+ pin.fields.input_en = 1;
+ writel(pin.word, addr);
+#else /* P7_r1865_IOPIN_WORKAROUND */
+ if (config.fields.set_usage)
+ pin.fields.usage = config.fields.usage;
+
+ __raw_writel(pin.word, addr);
+#endif /* P7_r1865_IOPIN_WORKAROUND */
+}
+
+void __init p7_init_iopin_table(union p7_iopin const* iopins)
+{
+ union p7_iopin const* pin;
+
+ for (pin = iopins; pin->word; pin++)
+ p7_setup_iopin(*pin);
+}
+
+/* vim:set ts=4:sw=4:et:ft=c: */
diff --git a/arch/arm/mach-parrot7/iopin.h b/arch/arm/mach-parrot7/iopin.h
new file mode 100644
index 00000000000000..c58820cf44d609
--- /dev/null
+++ b/arch/arm/mach-parrot7/iopin.h
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/arm/mach-parrot7/iopin.h
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 10-Feb-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_IOPIN_H
+#define _ARCH_PARROT7_IOPIN_H
+
+#include <linux/init.h>
+#include <asm/types.h>
+
+union p7_iopin {
+ u32 word;
+ struct {
+ unsigned usage:2;
+ unsigned unused0:1;
+ unsigned set_usage:1;
+ unsigned set_trigger:1;
+ unsigned set_pull:1;
+ unsigned set_slewrate:1;
+ unsigned set_driver:1;
+ unsigned trigger:1;
+ unsigned pull:3;
+ unsigned input_en:1;
+ unsigned unused1:3;
+ unsigned slewrate:2;
+ unsigned driver:6;
+ unsigned id:8;
+ } fields;
+};
+
+#define P7_IOPIN_USAGE (1 << 3)
+#define P7_IOPIN_TRIGGER (1 << 4)
+#define P7_IOPIN_PULL (1 << 5)
+#define P7_IOPIN_SLEWRATE (1 << 6)
+#define P7_IOPIN_DRIVER (1 << 7)
+
+#define P7_IOPIN_SMT_NONE 0 /* No trigger */
+#define P7_IOPIN_SMT_EN 1 /* Schmidt trigger enabled */
+
+#define P7_IOPIN_PULL_HIGHZ 0 /* High impendence */
+#define P7_IOPIN_PULL_UP 1 /* Pull up */
+#define P7_IOPIN_PULL_DOWN 2 /* Pull down */
+#define P7_IOPIN_PULL_KEEP 4 /* Bus keeper */
+
+#define P7_IOPIN_SLEWRATE_0 0 /* Slowest */
+#define P7_IOPIN_SLEWRATE_1 1
+#define P7_IOPIN_SLEWRATE_2 2
+#define P7_IOPIN_SLEWRATE_3 3 /* Fastest */
+
+#define P7_IOPIN_DRIVER_0 1 /* Weakest */
+#define P7_IOPIN_DRIVER_1 3
+#define P7_IOPIN_DRIVER_2 7
+#define P7_IOPIN_DRIVER_3 0xf
+#define P7_IOPIN_DRIVER_4 0x1f
+#define P7_IOPIN_DRIVER_5 0x3f/* Strongest */
+
+#define P7_INIT_FULL_IOPIN(_id, \
+ _set, \
+ _trigger, \
+ _pull, \
+ _slewrate, \
+ _driver) \
+ { .fields = { \
+ .usage = (_id) & 0xf, \
+ .set_usage = !! ((_set) & P7_IOPIN_USAGE), \
+ .set_trigger = !! ((_set) & P7_IOPIN_TRIGGER), \
+ .set_pull = !! ((_set) & P7_IOPIN_PULL), \
+ .set_slewrate = !! ((_set) & P7_IOPIN_SLEWRATE), \
+ .set_driver = !! ((_set) & P7_IOPIN_DRIVER), \
+ .trigger = _trigger, \
+ .pull = _pull, \
+ .slewrate = _slewrate, \
+ .driver = _driver, \
+ .id = ((_id) >> 4) \
+ } \
+ }
+
+#define P7_INIT_IOPIN(_id) \
+ P7_INIT_FULL_IOPIN(_id, P7_IOPIN_USAGE, 0, 0, 0, 0)
+
+extern void p7_init_iopin_table(union p7_iopin const*) __init;
+
+#endif
+
+/* vim:set ts=4:sw=4:et:ft=c: */
diff --git a/arch/arm/mach-parrot7/lcd-monspecs.c b/arch/arm/mach-parrot7/lcd-monspecs.c
new file mode 100644
index 00000000000000..6342935bca85ae
--- /dev/null
+++ b/arch/arm/mach-parrot7/lcd-monspecs.c
@@ -0,0 +1,728 @@
+#include "lcd-monspecs.h"
+
+/* The 7" OEM screen */
+/* The screen used in the RNB5. 6.2" with capacitive touchscreen */
+struct avi_videomode fg0700k_video_mode = {
+ .name = "fg0700k4dsswbgt1",
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 33260,
+ .left_margin = 104,/* Hback */
+ .right_margin = 24, /* Hfront */
+ .upper_margin = 12, /* Vback */
+ .lower_margin = 3, /* Vfront */
+ .hsync_len = 80,
+ .vsync_len = 7,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+};
+
+/* The 10" screen */
+struct avi_videomode lt104ac_video_mode = {
+ .name = "lt104ac",
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 65000,
+ .left_margin = 160,
+ .right_margin = 24,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .hsync_len = 136,
+ .vsync_len = 6,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+};
+
+struct avi_videomode lt104ac_no_den_video_mode = {
+ .name = "lt104ac_no_den",
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 65000,
+ .left_margin = 296,
+ .right_margin = 24,
+ .upper_margin = 34,
+ .lower_margin = 3,
+ .hsync_len = 136,
+ .vsync_len = 6,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+};
+
+
+/* The Antec 9" Screen */
+struct avi_videomode chimei_antec_video_mode = {
+ .name = "chimei-antec",
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 33300,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .left_margin = 46, /* Hback */
+ .right_margin = 210, /* Hfront */
+ .upper_margin = 23, /* Vback */
+ .lower_margin = 22, /* Vfront */
+ .hsync_len = 80,
+ .vsync_len = 7,
+};
+
+/* The FC6100 workbench 7' Screen */
+struct avi_videomode tpo_laj07t001a_video_mode = {
+ .name = "tpo_laj07t001a",
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 33230,
+ .flags = 0,
+ .left_margin = 128, /* Hback */
+ .right_margin = 49, /* Hfront */
+ .upper_margin = 34, /* Vback */
+ .lower_margin = 10, /* Vfront */
+ .hsync_len = 19,
+ .vsync_len = 2,
+};
+
+/* OEM FC7100 kyocera screen */
+struct avi_videomode kyocera_video_mode = {
+ .name = "kyocera",
+ .xres = 1280,
+ .yres = 800,
+ .pixclock = 71100,
+ /* The rest of the timings will be calculated using the VESA
+ * generalized timing formula. */
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 100,
+ .left_margin = 30,
+ .right_margin = 30,
+ .vsync_len = 5,
+ .upper_margin = 16,
+ .lower_margin = 2,
+ /* Don't attempt to validate monitor timings */
+};
+
+struct avi_videomode porsche_rse_video_mode = {
+ .name = "porsche_rse",
+ .xres = 1280,
+ .yres = 800,
+ .pixclock = 80500,
+ /* The rest of the timings will be calculated using the VESA
+ * generalized timing formula. */
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 200,
+ .left_margin = 10,
+ .right_margin = 10,
+ .vsync_len = 67,
+ .upper_margin = 5,
+ .lower_margin = 5,
+};
+
+struct avi_videomode drse_video_mode = {
+ .name = "drse",
+ .xres = 1280,
+ .yres = 800,
+ .pixclock = 71100,
+ /* The rest of the timings will be calculated using the VESA
+ * generalized timing formula. */
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 100,
+ .left_margin = 30,
+ .right_margin = 30,
+ .vsync_len = 5,
+ .upper_margin = 16,
+ .lower_margin = 2,
+};
+
+struct avi_videomode rnb6_truly_video_mode = {
+ .name = "rnb6_truly_video_mode",
+ .xres = 720,
+ .yres = 1280,
+ .flags = 0,
+ .hsync_len = 8,
+ .left_margin = 45,
+ .right_margin = 70,
+ .vsync_len = 2,
+ .upper_margin = 15,
+ .lower_margin = 16,
+ .pixclock = 66412,
+};
+
+struct avi_videomode ntsc_720_video_mode = {
+ .name = "ntsc_720",
+ .xres = 720,
+ .yres = 480,
+ .flags = AVI_VIDEOMODE_INTERLACED,
+
+ .hsync_len = 62,
+ .left_margin = 57,
+ .right_margin = 19,
+
+ .vsync_len = 5,
+ .upper_margin = 15,
+ .lower_margin = 2,
+
+ .pixclock = 13500,
+};
+
+struct avi_videomode ntsc_640_video_mode = {
+ .name = "ntsc_640",
+ .xres = 640,
+ .yres = 480,
+ .flags = AVI_VIDEOMODE_INTERLACED,
+
+ .hsync_len = 69,
+ .left_margin = 57,
+ .right_margin = 14,
+
+ .vsync_len = 5,
+ .upper_margin = 15,
+ .lower_margin = 2,
+
+ /* The AVI PLL can not actually generate anything under 12.5MHz, let's
+ * hope this is within tolerance. */
+ .pixclock = 12273,
+};
+
+/* OEM FC7100 Vovo truck screen: TRULY SEMICONDUCTORS 2K31715 */
+struct avi_videomode tft800480_video_mode = {
+ .name = "2K31715",
+ .xres = 800,
+ .yres = 480,
+
+ .pixclock = 30000,
+
+ .flags = 0,
+
+ .hsync_len = 48,
+ .left_margin = 41,
+ .right_margin = 40,
+
+ .vsync_len = 3,
+ .upper_margin = 29,
+ .lower_margin = 13,
+
+};
+
+/*************************************************/
+/********** HDMI OUT section *********************/
+/*************************************************/
+
+/* CEA 861D/TV Formats */
+
+struct avi_videomode hdmi_720x576p50_video_mode = {
+ .name = "hdmi_720x576p50",
+ .xres = 720,
+ .yres = 576,
+ .flags = 0,
+ /* As per CEA-861-D.pdf page 25 720x576p @50 Hz (Formats 17 & 18) */
+ .hsync_len = 64,
+ .left_margin = 68,
+ .right_margin = 12,
+ .vsync_len = 5,
+ .upper_margin = 39,
+ .lower_margin = 5,
+ .pixclock = 27000,
+};
+
+struct avi_videomode hdmi_720x576i50_video_mode = {
+ .name = "hdmi_720x576i50",
+ .xres = 720,
+ .yres = 576,
+ .flags = AVI_VIDEOMODE_INTERLACED,
+ /* As per CEA-861-D.pdf page 26 720(1440)x576i @50 Hz (Formats 21 & 22)
+ * Horizontal timings, horizontal resolution and pixel clock
+ * are divided by two from original values of CEA-861
+ */
+ .hsync_len = 63,
+ .left_margin = 69,
+ .right_margin = 12,
+ .vsync_len = 3,
+ .upper_margin = 19,
+ .lower_margin = 2,
+ .pixclock = 13500,
+};
+
+struct avi_videomode hdmi_720x480p60_video_mode = {
+ .name = "hdmi_720x480p60",
+ .xres = 720,
+ .yres = 480,
+ .flags = 0,
+ /* As per CEA-861-D.pdf page 21 720x480p @59.94/60 Hz (Formats 2 & 3) */
+ .hsync_len = 62,
+ .left_margin = 60,
+ .right_margin = 16,
+ .vsync_len = 6,
+ .upper_margin = 30,
+ .lower_margin = 9,
+ .pixclock = 27027,
+};
+
+struct avi_videomode hdmi_720x480p5994_video_mode = {
+ .name = "hdmi_720x480p59.94",
+ .xres = 720,
+ .yres = 480,
+ .flags = 0,
+ /* As per CEA-861-D.pdf page 21 720x480p @59.94/60 Hz (Formats 2 & 3) */
+ .hsync_len = 62,
+ .left_margin = 60,
+ .right_margin = 16,
+ .vsync_len = 6,
+ .upper_margin = 30,
+ .lower_margin = 9,
+ .pixclock = 27000,
+};
+
+struct avi_videomode hdmi_720x480i60_video_mode = {
+ .name = "hdmi_720x480i60",
+ .xres = 720,
+ .yres = 480,
+ .flags = AVI_VIDEOMODE_INTERLACED,
+ /* As per CEA-861-D.pdf page 22 720(1440)x480i @59.94/60 Hz (Formats 6 & 7)
+ * Horizontal timings, horizontal resolution and pixel clock
+ * are divided by two from original values of CEA-861
+ */
+ .hsync_len = 62,
+ .left_margin = 57,
+ .right_margin = 19,
+ .vsync_len = 3,
+ .upper_margin = 15,
+ .lower_margin = 4,
+ .pixclock = 13513,
+};
+
+struct avi_videomode hdmi_1440x480p60_video_mode = {
+ .name = "hdmi_1440x480p60",
+ .xres = 1440,
+ .yres = 480,
+ .flags = 0,
+ /* As per CEA-861-D.pdf page 32 1440x480p @59.94/60 Hz (Formats 14 & 15) */
+ .hsync_len = 124,
+ .left_margin = 120,
+ .right_margin = 32,
+ .vsync_len = 6,
+ .upper_margin = 30,
+ .lower_margin = 9,
+ .pixclock = 54054,
+};
+
+struct avi_videomode hdmi_1440x576p50_video_mode = {
+ .name = "hdmi_1440x576p50",
+ .xres = 1440,
+ .yres = 576,
+ .flags = 0,
+ /* As per CEA-861-D.pdf 1440x576p @50 Hz (Formats 29 & 30) */
+ .hsync_len = 128,
+ .left_margin = 136,
+ .right_margin = 24,
+ .vsync_len = 5,
+ .upper_margin = 39,
+ .lower_margin = 5,
+ .pixclock = 54000,
+};
+
+struct avi_videomode hdmi_1280x720p24_video_mode = {
+ .name = "hdmi_1280x720p24", /* vic#60 */
+ .xres = 1280,
+ .yres = 720,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-E.pdf page 16/17 1280x720p @24 Hz (Format 60) */
+ .hsync_len = 40,
+ .left_margin = 220,
+ .right_margin = 1760,
+ .vsync_len = 5,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .pixclock = 59400,
+};
+
+struct avi_videomode hdmi_1280x720p25_video_mode = {
+ .name = "hdmi_1280x720p25", /* vic#61 */
+ .xres = 1280,
+ .yres = 720,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-E.pdf page 16/17 1280x720p @25 Hz (Format 61) */
+ .hsync_len = 40,
+ .left_margin = 220,
+ .right_margin = 2420,
+ .vsync_len = 5,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1280x720p30_video_mode = {
+ .name = "hdmi_1280x720p30", /* vic#62 */
+ .xres = 1280,
+ .yres = 720,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-E.pdf page 16/17 1280x720p @30 Hz (Format 62) */
+ .hsync_len = 40,
+ .left_margin = 220,
+ .right_margin = 1760,
+ .vsync_len = 5,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1280x720p50_video_mode = {
+ .name = "hdmi_1280x720p50", /* vic#19 */
+ .xres = 1280,
+ .yres = 720,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf page 23 1280x720p @50 Hz (Format 19) */
+ .hsync_len = 40,
+ .left_margin = 220,
+ .right_margin = 440,
+ .vsync_len = 5,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1280x720p60_video_mode = {
+ .name = "hdmi_1280x720p60", /* vic#4 */
+ .xres = 1280,
+ .yres = 720,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf page 19 1280x720p @59.94/60 Hz (Format 4) */
+ .hsync_len = 40,
+ .left_margin = 220,
+ .right_margin = 110,
+ .vsync_len = 5,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080p30_video_mode = {
+ .name = "hdmi_1920x1080p30", /* vic#34 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf 1920x1080p @ 30Hz (Format 34) */
+ .hsync_len = 44,
+ .left_margin = 148,
+ .right_margin = 88,
+ .vsync_len = 5,
+ .upper_margin = 36,
+ .lower_margin = 4,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080p24_video_mode = {
+ .name = "hdmi_1920x1080p24", /* vic#32 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf 1920x1080p @ 24Hz (Format 32) */
+ .hsync_len = 44,
+ .left_margin = 148,/* Hback */
+ .right_margin = 638,/* Hfront */
+ .vsync_len = 5,
+ .upper_margin = 36,/* Vback */
+ .lower_margin = 4,/* Vfront */
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080p25_video_mode = {
+ .name = "hdmi_1920x1080p25", /* vic#33 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf 1920x1080p @ 25Hz (Format 33) */
+ .hsync_len = 44,
+ .left_margin = 148,/* Hback */
+ .right_margin = 528,/* Hfront */
+ .vsync_len = 5,
+ .upper_margin = 36,/* Vback */
+ .lower_margin = 4,/* Vfront */
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080i50_video_mode = {
+ .name = "hdmi_1920x1080i50", /* vic#20 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH
+ | AVI_VIDEOMODE_INTERLACED,
+ /* As per CEA-861-D.pdf page 24 1920x1080i @50 Hz (Format 20) */
+ .hsync_len = 44,
+ .left_margin = 148,
+ .right_margin = 528,
+ .vsync_len = 5,
+ .upper_margin = 15,
+ .lower_margin = 2,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080p50_video_mode = {
+ .name = "hdmi_1920x1080p50", /* vic#31 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf 1920x1080p @ 50Hz (Format 31) */
+ .hsync_len = 44,
+ .left_margin = 148,/* Hback */
+ .right_margin = 528,/* Hfront */
+ .vsync_len = 5,
+ .upper_margin = 36,/* Vback */
+ .lower_margin = 4,/* Vfront */
+ .pixclock = 148500,
+};
+
+struct avi_videomode hdmi_1920x1080i60_video_mode = {
+ .name = "hdmi_1920x1080i60", /* vic#5 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH
+ | AVI_VIDEOMODE_INTERLACED,
+ /* As per CEA-861-D.pdf page 20 1920x1080i @59.94/60 Hz (Format 5) */
+ .hsync_len = 44,
+ .left_margin = 148,
+ .right_margin = 88,
+ .vsync_len = 5,
+ .upper_margin = 15,
+ .lower_margin = 2,
+ .pixclock = 74250,
+};
+
+struct avi_videomode hdmi_1920x1080i5994_video_mode = {
+ .name = "hdmi_1920x1080i59.94", /* vic#5 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH
+ | AVI_VIDEOMODE_INTERLACED,
+ /* As per CEA-861-D.pdf page 20 1920x1080i @59.94/60 Hz (Format 5) */
+ .hsync_len = 44,
+ .left_margin = 148,
+ .right_margin = 88,
+ .vsync_len = 5,
+ .upper_margin = 15,
+ .lower_margin = 2,
+ .pixclock = 74176,
+};
+
+struct avi_videomode hdmi_1920x1080p60_video_mode = {
+ .name = "hdmi_1920x1080p60", /* vic#16 */
+ .xres = 1920,
+ .yres = 1080,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ /* As per CEA-861-D.pdf page 33 1920x1080p @ 59.94/60Hz (Format 16) */
+ .hsync_len = 44,
+ .left_margin = 148,/* Hback */
+ .right_margin = 88,/* Hfront */
+ .vsync_len = 5,
+ .upper_margin = 36,/* Vback */
+ .lower_margin = 4,/* Vfront */
+ .pixclock = 148500,
+};
+
+/* VESA Formats */
+
+struct avi_videomode hdmi_1024x768p60_video_mode = {
+ .name = "hdmi_1024x768p60", /* vesa */
+ .xres = 1024,
+ .yres = 768,
+ .flags = 0,
+ .hsync_len = 136,
+ .left_margin = 160,
+ .right_margin = 24,
+ .vsync_len = 6,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .pixclock = 65003,
+};
+
+struct avi_videomode hdmi_1024x768p75_video_mode = {
+ .name = "hdmi_1024x768p75", /* vesa */
+ .xres = 1024,
+ .yres = 768,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 96,
+ .left_margin = 176,
+ .right_margin = 16,
+ .vsync_len = 3,
+ .upper_margin = 28,
+ .lower_margin = 1,
+ .pixclock = 78802,
+};
+
+struct avi_videomode hdmi_1024x768p85_video_mode = {
+ .name = "hdmi_1024x768p85", /* vesa */
+ .xres = 1024,
+ .yres = 768,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 96,
+ .left_margin = 208,
+ .right_margin = 48,
+ .vsync_len = 3,
+ .upper_margin = 36,
+ .lower_margin = 1,
+ .pixclock = 94500,
+};
+
+struct avi_videomode hdmi_800x600p75_video_mode = {
+ .name = "hdmi_800x600p75", /* vesa */
+ .xres = 800,
+ .yres = 600,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 80,
+ .left_margin = 160,
+ .right_margin = 16,
+ .vsync_len = 3,
+ .upper_margin = 21,
+ .lower_margin = 1,
+ .pixclock = 49500,
+};
+
+struct avi_videomode hdmi_800x600p85_video_mode = {
+ .name = "hdmi_800x600p85", /* vesa */
+ .xres = 800,
+ .yres = 600,
+ .flags = AVI_VIDEOMODE_SYNC_ACTIVE_HIGH,
+ .hsync_len = 64,
+ .left_margin = 152,
+ .right_margin = 32,
+ .vsync_len = 3,
+ .upper_margin = 27,
+ .lower_margin = 1,
+ .pixclock = 56250,
+};
+
+struct avi_videomode hdmi_640x480p75_video_mode = {
+ .name = "hdmi_640x480p75", /* vesa */
+ .xres = 640,
+ .yres = 480,
+ .flags = 0,
+ .hsync_len = 64,
+ .left_margin = 120,
+ .right_margin = 16,
+ .vsync_len = 3,
+ .upper_margin = 16,
+ .lower_margin = 1,
+ .pixclock = 31500,
+};
+
+struct avi_videomode sfx1_tegra_640x3360p32_video_mode = {
+ .name = "sfx1_tegra_640x3360p32", /* sfx1 */
+ .xres = 640,
+ .yres = 3361, // One additionnal line for metadata
+ .flags = 0,
+ .hsync_len = 32,
+ .left_margin = 16,
+ .right_margin = 16,
+ .vsync_len = 54,
+ .upper_margin = 16,
+ .lower_margin = 1,
+ .pixclock = 77300,
+};
+
+struct avi_videomode sfx1_tegra_640x2401p32_video_mode = {
+ .name = "sfx1_tegra_640x2401p32", /* sfx1 */
+ .xres = 640,
+ .yres = 2401, // One additionnal line for metadata
+ .flags = 0,
+ .hsync_len = 32,
+ .left_margin = 16,
+ .right_margin = 16,
+ .vsync_len = 54,
+ .upper_margin = 16,
+ .lower_margin = 1,
+ .pixclock = 56700,
+};
+
+struct avi_videomode hdmi_640x480p85_video_mode = {
+ .name = "hdmi_640x480p85", /* vesa */
+ .xres = 640,
+ .yres = 480,
+ .flags = 0,
+ .hsync_len = 48,
+ .left_margin = 112,
+ .right_margin = 32,
+ .vsync_len = 3,
+ .upper_margin = 25,
+ .lower_margin = 1,
+ .pixclock = 36000,
+};
+
+struct avi_videomode hdmi_1280x1024p60_video_mode = {
+ .name = "hdmi_1280x1024p60", /* vesa */
+ .xres = 1280,
+ .yres = 1024,
+ .flags = 0,
+ .hsync_len = 112,
+ .left_margin = 248,
+ .right_margin = 48,
+ .vsync_len = 3,
+ .upper_margin = 38,
+ .lower_margin = 1,
+ .pixclock = 108000,
+};
+
+struct avi_videomode hdmi_1280x1024p75_video_mode = {
+ .name = "hdmi_1280x1024p75", /* vesa */
+ .xres = 1280,
+ .yres = 1024,
+ .flags = 0,
+ .hsync_len = 144,
+ .left_margin = 248,
+ .right_margin = 16,
+ .vsync_len = 3,
+ .upper_margin = 38,
+ .lower_margin = 1,
+ .pixclock = 135000,
+};
+
+struct avi_videomode hdmi_1280x1024p85_video_mode = {
+ .name = "hdmi_1280x1024p85", /* vesa */
+ .xres = 1280,
+ .yres = 1024,
+ .flags = 0,
+ .hsync_len = 160,
+ .left_margin = 240,
+ .right_margin = 48,
+ .vsync_len = 3,
+ .upper_margin = 44,
+ .lower_margin = 1,
+ .pixclock = 157500,
+};
+
+const struct avi_videomode *p7_all_video_modes[] = {
+ &fg0700k_video_mode,
+ &lt104ac_video_mode,
+ &lt104ac_no_den_video_mode,
+ &kyocera_video_mode,
+ &porsche_rse_video_mode,
+ &drse_video_mode,
+ &chimei_antec_video_mode,
+ &tpo_laj07t001a_video_mode,
+ &ntsc_720_video_mode,
+ &ntsc_640_video_mode,
+ &tft800480_video_mode,
+ &rnb6_truly_video_mode,
+ &hdmi_720x480p60_video_mode,
+ &hdmi_720x480p5994_video_mode,
+ &hdmi_720x480i60_video_mode,
+ &hdmi_1440x480p60_video_mode,
+ &hdmi_720x576p50_video_mode,
+ &hdmi_720x576i50_video_mode,
+ &hdmi_1440x576p50_video_mode,
+ &hdmi_1280x720p24_video_mode,
+ &hdmi_1280x720p25_video_mode,
+ &hdmi_1280x720p50_video_mode,
+ &hdmi_1280x720p30_video_mode,
+ &hdmi_1280x720p60_video_mode,
+ &hdmi_1920x1080p24_video_mode,
+ &hdmi_1920x1080p25_video_mode,
+ &hdmi_1920x1080i50_video_mode,
+ &hdmi_1920x1080p50_video_mode,
+ &hdmi_1920x1080p30_video_mode,
+ &hdmi_1920x1080i60_video_mode,
+ &hdmi_1920x1080i5994_video_mode,
+ &hdmi_1920x1080p60_video_mode,
+ &hdmi_1280x1024p60_video_mode,
+ &hdmi_1280x1024p75_video_mode,
+ &hdmi_1280x1024p85_video_mode,
+ &hdmi_1024x768p60_video_mode,
+ &hdmi_1024x768p75_video_mode,
+ &hdmi_1024x768p85_video_mode,
+ &hdmi_800x600p75_video_mode,
+ &hdmi_800x600p85_video_mode,
+ &hdmi_640x480p75_video_mode,
+ &hdmi_640x480p85_video_mode,
+ NULL /* end of table */
+};
diff --git a/arch/arm/mach-parrot7/lcd-monspecs.h b/arch/arm/mach-parrot7/lcd-monspecs.h
new file mode 100644
index 00000000000000..dd6ade8481b722
--- /dev/null
+++ b/arch/arm/mach-parrot7/lcd-monspecs.h
@@ -0,0 +1,108 @@
+/**
+ * linux/arch/arm/mach-parrot7/lcd-monspecs.h
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Lionel Flandrin <lionel.flandrin@parrot.com>
+ * date: 08-Oct-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _LCD_MONSPECS_H_
+#define _LCD_MONSPECS_H_
+
+
+#include <video/avi.h>
+
+/* The 7" OEM screen */
+/* The screen used in the RNB5. 6.2" with capacitive touchscreen */
+extern struct avi_videomode fg0700k_video_mode;
+/* The 10" screen */
+extern struct avi_videomode lt104ac_video_mode;
+/* When DEN is not wired */
+extern struct avi_videomode lt104ac_no_den_video_mode;
+/* OEM kyocera */
+extern struct avi_videomode kyocera_video_mode;
+/* The Antec 9" Screen */
+extern struct avi_videomode chimei_antec_video_mode;
+/* PRSE */
+extern struct avi_videomode porsche_rse_video_mode;
+/* DRSE */
+extern struct avi_videomode drse_video_mode;
+
+extern struct avi_videomode ntsc_720_video_mode;
+extern struct avi_videomode ntsc_640_video_mode;
+
+/* OEM FC7100 Vovo truck screen: TRULY SEMICONDUCTORS 2K31715 */
+extern struct avi_videomode tft800480_video_mode;
+
+/* RNB6 screen 720x1280 */
+extern struct avi_videomode rnb6_truly_video_mode;
+
+/**** HDMI Output ****/
+/* CEA 861D/TV Formats */
+/* SD TV */
+/* NTSC */
+extern struct avi_videomode hdmi_720x480p60_video_mode;
+extern struct avi_videomode hdmi_720x480p5994_video_mode;
+extern struct avi_videomode hdmi_720x480i60_video_mode;
+extern struct avi_videomode hdmi_1440x480p60_video_mode;
+
+/* PAL */
+extern struct avi_videomode hdmi_720x576p50_video_mode;
+extern struct avi_videomode hdmi_720x576i50_video_mode;
+extern struct avi_videomode hdmi_1440x576p50_video_mode;
+
+/* HD TV HD Ready */
+/* Cinema */
+extern struct avi_videomode hdmi_1280x720p24_video_mode;
+/* Europe TV */
+extern struct avi_videomode hdmi_1280x720p25_video_mode;
+extern struct avi_videomode hdmi_1280x720p50_video_mode;
+/* America/Japan TV */
+extern struct avi_videomode hdmi_1280x720p30_video_mode;
+extern struct avi_videomode hdmi_1280x720p60_video_mode;
+
+/* HD TV Full HD */
+/* Cinema */
+extern struct avi_videomode hdmi_1920x1080p24_video_mode;
+
+/* Europe TV */
+extern struct avi_videomode hdmi_1920x1080p25_video_mode;
+extern struct avi_videomode hdmi_1920x1080i50_video_mode;
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1920x1080p50_video_mode;
+
+/* America/Japan TV */
+extern struct avi_videomode hdmi_1920x1080p30_video_mode;
+extern struct avi_videomode hdmi_1920x1080i60_video_mode;
+extern struct avi_videomode hdmi_1920x1080i5994_video_mode;
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1920x1080p60_video_mode;
+
+/* VESA Formats */
+/* formats p85 are not supported by HDTV monitor Samsung T22B300EW */
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1280x1024p60_video_mode;
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1280x1024p75_video_mode;
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1280x1024p85_video_mode;
+extern struct avi_videomode hdmi_1024x768p60_video_mode;
+extern struct avi_videomode hdmi_1024x768p75_video_mode;
+/* requires 24 bit data bus separate syncs only */
+extern struct avi_videomode hdmi_1024x768p85_video_mode;
+/* not supported by HDTV monitor Samsung T22B300EW */
+extern struct avi_videomode hdmi_800x600p75_video_mode;
+extern struct avi_videomode hdmi_800x600p85_video_mode;
+extern struct avi_videomode hdmi_640x480p75_video_mode;
+extern struct avi_videomode sfx1_tegra_640x3360p32_video_mode;
+extern struct avi_videomode sfx1_tegra_640x2401p32_video_mode;
+extern struct avi_videomode hdmi_640x480p85_video_mode;
+
+// Null terminated list of all the above modes
+extern const struct avi_videomode *p7_all_video_modes[];
+
+
+#endif /* _LCD_MONSPECS_H_ */
diff --git a/arch/arm/mach-parrot7/mpegts.c b/arch/arm/mach-parrot7/mpegts.c
new file mode 100644
index 00000000000000..909bbad1888a39
--- /dev/null
+++ b/arch/arm/mach-parrot7/mpegts.c
@@ -0,0 +1,119 @@
+/*
+ * Parrot7 MPEG-TS controller platform implementation
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <media/p7-mpegts.h>
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include "common.h"
+#include "dma.h"
+#include "spi.h"
+
+static struct resource p7_mpegts0_res[] = {
+ [0] = {
+ .start = P7_MPEGTS0,
+ .end = P7_MPEGTS0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_MPEGTS0_IRQ,
+ .end = P7_MPEGTS0_IRQ,
+ .flags = IORESOURCE_IRQ
+ },
+ [2] = {
+ .start = P7_MPEGTS0_DMA,
+ .end = P7_MPEGTS0_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct resource p7_mpegts1_res[] = {
+ [0] = {
+ .start = P7_MPEGTS1,
+ .end = P7_MPEGTS1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_MPEGTS1_IRQ,
+ .end = P7_MPEGTS1_IRQ,
+ .flags = IORESOURCE_IRQ
+ },
+ [2] = {
+ .start = P7_MPEGTS1_DMA,
+ .end = P7_MPEGTS1_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static u64 p7_mpegts0_dma_mask = DMA_BIT_MASK(32);
+static u64 p7_mpegts1_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device p7_mpegts_dev[] = {
+ {
+ .name = P7MPG_DRV_NAME,
+ .id = 0,
+ .resource = p7_mpegts0_res,
+ .num_resources = ARRAY_SIZE(p7_mpegts0_res),
+ .dev = {
+ .dma_mask = &p7_mpegts0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ },
+ {
+ .name = P7MPG_DRV_NAME,
+ .id = 1,
+ .resource = p7_mpegts1_res,
+ .num_resources = ARRAY_SIZE(p7_mpegts1_res),
+ .dev = {
+ .dma_mask = &p7_mpegts1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ }
+};
+
+static size_t p7_mpegts_dmasz[ARRAY_SIZE(p7_mpegts_dev)] __initdata;
+
+void __init p7_reserve_mpegtsmem(unsigned int core, size_t size)
+{
+#ifdef DEBUG
+ BUG_ON(! size);
+ BUG_ON(core >= ARRAY_SIZE(p7_mpegts_dev));
+#endif
+ p7_mpegts_dmasz[core] = size;
+ p7_reserve_devmem(&p7_mpegts_dev[core], NULL, &p7_mpegts_dmasz[core]);
+}
+
+void __init p7_init_mpegts(int core,
+ struct p7mpg_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ int err;
+ if (p7_chiprev() == P7_CHIPREV_R1) {
+ printk(KERN_ERR "MPEG-TS not available on P7R1\n");
+ return;
+ }
+
+#ifdef DEBUG
+ BUG_ON(core < 0 || core >= ARRAY_SIZE(p7_mpegts_dev));
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+ p7_init_spi();
+
+ pdata->dma_sz = p7_mpegts_dmasz[core];
+
+ err = p7_init_dev(&p7_mpegts_dev[core], pdata, pins, pin_cnt);
+ if (! err)
+ return;
+
+ panic("p7: failed to init MPEG-TS (%d)\n", err);
+}
diff --git a/arch/arm/mach-parrot7/mpegts.h b/arch/arm/mach-parrot7/mpegts.h
new file mode 100644
index 00000000000000..173f6303c1b8ab
--- /dev/null
+++ b/arch/arm/mach-parrot7/mpegts.h
@@ -0,0 +1,42 @@
+/*
+ * Parrot 7 MPEG-TS controller platform interface
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+
+#ifndef _ARCH_PARROT7_MPEGTS_H
+#define _ARCH_PARROT7_MPEGTS_H
+
+#include <linux/init.h>
+#include <media/p7-mpegts.h>
+
+#if defined(CONFIG_MPEGTS_PARROT7) || \
+ defined(CONFIG_MPEGTS_PARROT7_MODULE)
+
+extern void p7_init_mpegts(int core, struct p7mpg_plat_data* pdata,
+ struct pinctrl_map* pins, size_t pin_cnt);
+extern void p7_reserve_mpegtsmem(unsigned int core, size_t size);
+
+#else
+
+static inline void p7_init_mpegts(int core,
+ struct p7mpg_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ return;
+}
+
+static inline void p7_reserve_mpegtsmem(unsigned int core, size_t size)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-parrot7/mpmc.c b/arch/arm/mach-parrot7/mpmc.c
new file mode 100644
index 00000000000000..21631dba46578a
--- /dev/null
+++ b/arch/arm/mach-parrot7/mpmc.c
@@ -0,0 +1,87 @@
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/syscore_ops.h>
+
+#include <mach/p7.h>
+
+#include "mpmc.h"
+
+int p7_mpmc_set_prio(enum p7_mpmc_port port, unsigned prio)
+{
+ int off = port * 4;
+ u32 r;
+
+ if (prio > 7)
+ return -ERANGE;
+
+ r = __raw_readl(__MMIO_P2V(P7_MPMC_GBC_PRI));
+ r &= ~(7UL << off);
+ r |= prio << off;
+
+ __raw_writel(r, __MMIO_P2V(P7_MPMC_GBC_PRI));
+
+ return 0;
+}
+
+unsigned p7_mpmc_get_prio(enum p7_mpmc_port port)
+{
+ int off = port * 4;
+ u32 r;
+
+ r = __raw_readl(__MMIO_P2V(P7_MPMC_GBC_PRI));
+ r >>= off;
+ r &= 7UL;
+
+ return r;
+}
+
+
+#ifdef CONFIG_PM
+
+static unsigned p7_mpmc_config[P7_MPMC_NR_PORT];
+
+/* Save PRIO values on suspend */
+static int p7_mpmc_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(p7_mpmc_config); i++)
+ p7_mpmc_config[i] = p7_mpmc_get_prio(i);
+
+ return 0;
+}
+
+/* Restore PRIO values on resume */
+static void p7_mpmc_resume(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(p7_mpmc_config); i++)
+ p7_mpmc_set_prio(i, p7_mpmc_config[i]);
+}
+
+struct syscore_ops p7_mpmc_syscore_ops = {
+ .suspend = p7_mpmc_suspend,
+ .resume = p7_mpmc_resume,
+};
+
+
+#endif /* CONFIG_PM */
+
+void __init p7_init_mpmc(void)
+{
+ /* Default MPMC configuration. We set all ports to priority 1 to disable
+ * QoS. */
+ p7_mpmc_set_prio(P7_MPMC_CPU_DMA_PORT, 1);
+ p7_mpmc_set_prio(P7_MPMC_AVI_PORT, 1);
+ p7_mpmc_set_prio(P7_MPMC_VDEC_VENC_PORT, 1);
+ p7_mpmc_set_prio(P7_MPMC_HSP_AAI_PORT, 1);
+ p7_mpmc_set_prio(P7_MPMC_GPU_VENC_PORT, 1);
+
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&p7_mpmc_syscore_ops);
+#endif
+}
diff --git a/arch/arm/mach-parrot7/mpmc.h b/arch/arm/mach-parrot7/mpmc.h
new file mode 100644
index 00000000000000..644e5a267dd511
--- /dev/null
+++ b/arch/arm/mach-parrot7/mpmc.h
@@ -0,0 +1,21 @@
+#ifndef _MPMC_H_
+#define _MPMC_H_
+
+enum p7_mpmc_port
+{
+ /* The value of the enum are in register order (from LSB to MSB) */
+ P7_MPMC_CPU_DMA_PORT = 0,
+ P7_MPMC_AVI_PORT = 1,
+ P7_MPMC_VDEC_VENC_PORT = 2,
+ P7_MPMC_HSP_AAI_PORT = 3,
+ P7_MPMC_GPU_VENC_PORT = 4,
+ P7_MPMC_NR_PORT,
+};
+
+/* Prio is in the range [0, 7] */
+extern int p7_mpmc_set_prio(enum p7_mpmc_port port, unsigned prio);
+extern unsigned p7_mpmc_get_prio(enum p7_mpmc_port port);
+
+void __init p7_init_mpmc(void);
+
+#endif /* _MPMC_H_ */
diff --git a/arch/arm/mach-parrot7/nand.c b/arch/arm/mach-parrot7/nand.c
new file mode 100644
index 00000000000000..e47519dfaf5524
--- /dev/null
+++ b/arch/arm/mach-parrot7/nand.c
@@ -0,0 +1,94 @@
+/**
+ * linux/arch/arm/mach-parrot7/nand.c - Nand controller platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 15-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clkdev.h>
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include <nand/cast-nand.h>
+#include <linux/mtd/nand.h>
+
+#include "common.h"
+#include "clock.h"
+
+/* The NAND controller allocate dma_memory only once in its probe,
+ * so the size is known at compilation time
+ */
+static size_t dma_size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
+static dma_addr_t dma_addr;
+
+static struct resource p7_nand_res[] = {
+ [0] = {
+ .start = P7_NAND,
+ .end = P7_NAND + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_NAND_IRQ,
+ .end = P7_NAND_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static u64 p7_nand_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device p7_nand_dev = {
+ .name = CAST_DRV_NAME,
+ .id = 0,
+ .resource = p7_nand_res,
+ .num_resources = ARRAY_SIZE(p7_nand_res),
+ .dev = {
+ .dma_mask = &p7_nand_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32)
+ }
+};
+
+/**
+ * p7_init_nand() - Instantiate NAND controller for further driver usage.
+ *
+ * @pdata: controller platform specific data
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_nand(struct cast_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+#ifdef DEBUG
+ BUG_ON(! pdata);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+
+ p7_init_dev(&p7_nand_dev, pdata, pins, pin_cnt);
+ if (! dma_declare_coherent_memory(&p7_nand_dev.dev,
+ dma_addr,
+ dma_addr,
+ dma_size,
+ DMA_MEMORY_MAP))
+ panic("p7: failed to remap DMA area [%08x:%08x] for device %s\n",
+ dma_addr,
+ dma_addr + dma_size -1,
+ dev_name(&p7_nand_dev.dev));
+}
+
+/**
+ * p7_reserve_nand_mem() - Reserve DMA memory for the NAND controller
+ */
+void __init p7_reserve_nand_mem(void)
+{
+ /* The memory is not allocated in the global pool.
+ * The driver only does a small allocation that cause fragmentation in the global pool
+ */
+ p7_reserve_devmem(&p7_nand_dev,&dma_addr,&dma_size);
+}
diff --git a/arch/arm/mach-parrot7/nand.h b/arch/arm/mach-parrot7/nand.h
new file mode 100644
index 00000000000000..fcb19771136f89
--- /dev/null
+++ b/arch/arm/mach-parrot7/nand.h
@@ -0,0 +1,37 @@
+/**
+ * linux/arch/arm/mach-parrot7/nand.h - Nand controller platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 15-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_NAND_H
+#define _ARCH_PARROT7_NAND_H
+
+struct cast_plat_data;
+struct pinctrl_map;
+
+#if defined(CONFIG_MTD_NAND_CAST) || \
+ defined(CONFIG_MTD_NAND_CAST_MODULE)
+
+#include <linux/init.h>
+
+extern void p7_init_nand(struct cast_plat_data*,
+ struct pinctrl_map*,
+ size_t) __init;
+extern void p7_reserve_nand_mem(void);
+#else /* defined(CONFIG_MTD_NAND_CAST) || \
+ defined(CONFIG_MTD_NAND_CAST_MODULE) */
+
+#define p7_init_nand(_pdata, _pins, _pins_nr)
+#define p7_reserve_nand_mem()
+
+#endif /* defined(CONFIG_MTD_NAND_CAST) || \
+ defined(CONFIG_MTD_NAND_CAST_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/p7-smc.S b/arch/arm/mach-parrot7/p7-smc.S
new file mode 100644
index 00000000000000..777bc1b380c812
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7-smc.S
@@ -0,0 +1,311 @@
+/*
+ * Parrot7 secure API file
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * CPU0 only, enable or disable L2 cache
+ * r0 (smc_id) : SMC_L2CACHE (2)
+ * r1 (arg0): PL310 Auxiliary Control Reg value
+ * r2 (arg1): (enable << 31) | (data_ram_ctrl << 16) | tag_ram_ctrl
+ *
+ */
+ENTRY(smc_l2x0_enable)
+ stmfd sp!, {r2-r12, lr}
+ mov r2, r1
+ mov r1, r0
+ mov r0, #0x2
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r2-r12, pc}
+ENDPROC(smc_l2x0_enable)
+
+ENTRY(smc_l2x0_disable)
+ stmfd sp!, {r1-r12, lr}
+ mov r2, #0x0
+ mov r1, #0x0
+ mov r0, #0x2
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_disable)
+
+/*
+ * Write Auxiliary control register
+ * r0 (smc_id): SMC_ACTLR (3)
+ * r1 (arg0): value to be written
+ */
+ENTRY(smc_actlr)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ mov r0, #0x3
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_actlr)
+
+/*
+ * CPU0 only, Write PL301 remap register
+ * r0 (smc_id): SMC_PL301_REMAP (4)
+ * r1 (arg0): value to be written
+ */
+ENTRY(smc_pl301_remap)
+ /*stmfd sp!, {r2-r12, lr}*/
+ mov r1, r0
+ mov r0, #0x4
+ dsb
+ .arch_extension sec
+ smc #0
+ /*ldmfd sp!, {r2-r12, pc}*/
+ mov pc, lr
+ENDPROC(smc_pl301_remap)
+
+/*
+ * ARM_ERRATA_751472: An interrupted ICIALLUIS operation may
+ * prevent the completion of a following broadcasted operation
+ * Present in all r0, r1 and r2 revisions, fixed in r3r0
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_DISABLE_IR_MNT (6)
+ */
+ENTRY(smc_disable_ir_mnt)
+ stmfd sp!, {r1-r12, lr}
+ mov r0, #0x6
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_disable_ir_mnt)
+
+/*
+ * ARM_ERRATA_743622: Faulty logic in the store buffer may lead
+ * to data corruption
+ * Present in all r2 revisions, fixed in r3p0
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_DISABLE_FLU_OPT (7)
+ */
+ENTRY(smc_disable_flu_opt)
+ stmfd sp!, {r1-r12, lr}
+ ldr r0, =0x7
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_disable_flu_opt)
+
+
+/*
+ * Enable debug in non-secure modei
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_ENABLE_DEBUG (8)
+ */
+ENTRY(smc_enable_debug)
+ stmfd sp!, {r1-r12, lr}
+ ldr r0, =0x8
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_enable_debug)
+
+/*
+ * Write access to L2X0 ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_CTRL (9)
+ */
+ENTRY(smc_l2x0_ctrl)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0x9
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_ctrl)
+
+/*
+ * Write access to L2X0 auxiliary ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_AUX (10)
+ */
+ENTRY(smc_l2x0_aux)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xa
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_aux)
+
+/*
+ * Write access to L2X0 tag RAM latency ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_TAG_LAT (11)
+ */
+ENTRY(smc_l2x0_tag_lat)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xb
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_tag_lat)
+
+/*
+ * Write access to L2X0 data RAM latency ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_DAT_LAT (12)
+ */
+ENTRY(smc_l2x0_dat_lat)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xc
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_dat_lat)
+
+/*
+ * Write access to L2X0 debug ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_DBG_CTRL (13)
+ */
+ENTRY(smc_l2x0_dbg_ctrl)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xd
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_dbg_ctrl)
+
+/*
+ * Write access to L2X0 prefetch ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_PFT_CTRL (14)
+ */
+ENTRY(smc_l2x0_pft_ctrl)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xe
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_pft_ctrl)
+
+/*
+ * Write access to L2X0 power ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_PWR_CTRL (15)
+ */
+ENTRY(smc_l2x0_pwr_ctrl)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0xf
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_pwr_ctrl)
+
+/*
+ * Write access to L2X0 addr filtering start reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_ADR_FIL_START (16)
+ */
+ENTRY(smc_l2x0_adr_fil_start)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0x10
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_adr_fil_start)
+
+/*
+ * Write access to L2X0 addr filtering end reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_ADR_FIL_END (17)
+ */
+ENTRY(smc_l2x0_adr_fil_end)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0x11
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_adr_fil_end)
+
+/*
+ * Write access to SCU non-secure access ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_NSACR (18)
+ */
+ENTRY(smc_scu_nsacr)
+ stmfd sp!, {r1-r12, lr}
+ ldr r0, =0x12
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_scu_nsacr)
+
+/*
+ * Write access to L2X0 invalidate way reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_L2_INV_WAY (19)
+ */
+ENTRY(smc_l2x0_inv_way)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0x13
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_l2x0_inv_way)
+
+/*
+ * Write access to CPACR CP10/CP11 access ctrl reg
+ * This service is implemented in the library extension
+ * r0 (smc_id): EXT_WR_CPACR (20)
+ */
+ENTRY(smc_scu_cpacr)
+ stmfd sp!, {r1-r12, lr}
+ mov r1, r0
+ ldr r0, =0x14
+ dsb
+ .arch_extension sec
+ smc #0
+ ldmfd sp!, {r1-r12, pc}
+ENDPROC(smc_scu_cpacr)
+/*
+ * Test if CPU is in non-secure state
+ */
+ENTRY(p7_is_nonsecure_)
+ stmfd sp!, {r1-r12, lr}
+ mrs r1, cpsr /*save local irq flags */
+ mrc p15, 0, r2, c1, c0, 1 /* read ACTLR */
+ mov r3, #0x80 /* ACTLR.EXCL mask*/
+ eor r4, r3, r2 /* attempt to toggle ACTLR.EXCL*/
+ mcr p15, 0, r4, c1, c0, 1
+ mrc p15, 0, r5, c1, c0, 1 /* read back ACTLR */
+ mcr p15, 0, r2, c1, c0, 1 /* write back ACTLR */
+ msr cpsr_c, r1 /*resotre local irq flags */
+ eors r2, r5
+ beq 1f
+ mov r0, #0
+ b 2f
+1: mov r0, #1
+2: ldmfd sp!, {r1-r12, pc}
+ENDPROC(p7_is_nonsecure_)
diff --git a/arch/arm/mach-parrot7/p7_pwm.c b/arch/arm/mach-parrot7/p7_pwm.c
new file mode 100644
index 00000000000000..68d4ce39a7de46
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7_pwm.c
@@ -0,0 +1,128 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7pwm.c - Parrot7 PWM platform implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Victor Lambret <victor.lambret.ext@parrot.com>
+ * date: 29-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <pwm/p7_pwm.h>
+#include <mach/pwm.h>
+#include "p7_pwm.h"
+#include <mach/p7.h>
+#include "common.h"
+#include "clock.h"
+#include "pinctrl.h"
+
+static struct resource p7pwm_res[] = {
+ [0] = {
+ .start = P7_PWM,
+ .end = P7_PWM + SZ_64K - 1,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device p7_p7pwm_dev = {
+ .name = P7PWM_DRV_NAME,
+ .id = 0,
+ .resource = p7pwm_res,
+ .num_resources = ARRAY_SIZE(p7pwm_res)
+};
+
+/*This part deduce the used_pwms from pinmaps*/
+
+struct p7pwm_pinpwm {
+ uint16_t pin;
+ uint16_t mask_pwm;
+};
+
+
+const static struct p7pwm_pinpwm p7pwm_numpins[] __initconst = {
+ {.pin=P7_PWM_00, .mask_pwm=(1 << 0)},
+ {.pin=P7_PWM_01, .mask_pwm=(1 << 1)},
+ {.pin=P7_PWM_02, .mask_pwm=(1 << 2)},
+ {.pin=P7_PWM_03, .mask_pwm=(1 << 3)},
+ {.pin=P7_PWM_04, .mask_pwm=(1 << 4)},
+ {.pin=P7_PWM_05, .mask_pwm=(1 << 5)},
+ {.pin=P7_PWM_06, .mask_pwm=(1 << 6)},
+ {.pin=P7_PWM_07, .mask_pwm=(1 << 7)},
+ {.pin=P7_PWM_08, .mask_pwm=(1 << 8)},
+ {.pin=P7_PWM_09, .mask_pwm=(1 << 9)},
+ {.pin=P7_PWM_10, .mask_pwm=(1 << 10)},
+ {.pin=P7_PWM_11, .mask_pwm=(1 << 11)},
+ {.pin=P7_PWM_12, .mask_pwm=(1 << 12)},
+ {.pin=P7_PWM_13, .mask_pwm=(1 << 13)},
+ {.pin=P7_PWM_14, .mask_pwm=(1 << 14)},
+ {.pin=P7_PWM_15, .mask_pwm=(1 << 15)},
+ {.pin=P7_PWM_15a,.mask_pwm=(1 << 15)},
+};
+
+/*PWM default Configuration : No precision control, normal mode*/
+static struct p7pwm_conf p7pwm_default_conf = {
+ .period_precision = 100,
+ .duty_precision = 100,
+ .mode = P7PWM_MODE_NORMAL
+};
+
+/**
+ * p7_init_p7pwm() - Instantiate P7 PWM dev for further driver usage.
+ *
+ * @pdata: platform data
+ * @pins: array of pinmaps, each pinmap contain function & props for one PWM
+ * @cnt: number of PWM described in pins
+ */
+void __init p7_init_p7pwm(struct p7pwm_pdata *pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ int err;
+ unsigned int used = 0;
+ int i,j;
+
+#ifdef DEBUG
+ BUG_ON(! pins);
+ BUG_ON(! pdata);
+#endif
+
+ /*Compute dynamically the used pwms*/
+ for (i=0; i< pin_cnt; i++)
+ for (j=0; j < ARRAY_SIZE(p7pwm_numpins); j++)
+ if ((unsigned int)(pins[i].data.mux.function) == p7pwm_numpins[j].pin)
+ used |= p7pwm_numpins[j].mask_pwm;
+ pdata->used = used;
+
+ /*Replace NULL conf pointers by default conf*/
+ for (i=0 ; i<P7PWM_NUMBER; i++)
+ if (NULL == pdata->conf[i])
+ pdata->conf[i] = &p7pwm_default_conf;
+
+ /* Assign pins before registering device in cases driver is already
+ * loaded. */
+ if (pin_cnt) {
+ char buf[10];
+ snprintf(buf, 10, "p7_pwm.%d", p7_p7pwm_dev.id);
+ err = p7_assign_pins(buf, pins, pin_cnt);
+ if (err)
+ goto err;
+ }
+
+ /* Create platform device */
+ err = p7_init_dev(&p7_p7pwm_dev, pdata, NULL, 0);
+ if (err)
+ panic("p7: failed to init P7 PWM %d (%d)\n", 0, err);
+
+ return;
+
+ /*
+ * Pinctrl does not provide us with a way to remove registered pin
+ * mappings...
+ */
+err:
+ panic("p7: failed to init PWM master controller %d (%d)\n",
+ p7_p7pwm_dev.id, err);
+}
diff --git a/arch/arm/mach-parrot7/p7_pwm.h b/arch/arm/mach-parrot7/p7_pwm.h
new file mode 100644
index 00000000000000..d4b0949f66b13b
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7_pwm.h
@@ -0,0 +1,34 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7pwm.h - Parrot7 PWM platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Victor Lambret <victor.lambret.ext@parrot.com>
+ * date: 29-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_P7PWM_H
+#define _ARCH_PARROT7_P7PWM_H
+
+#include <linux/init.h>
+#include <linux/pinctrl/machine.h>
+#include <pwm/p7_pwm.h>
+
+#if defined(CONFIG_PWM_PARROT7) || \
+ defined(CONFIG_PWM_PARROT7_MODULE)
+
+void __init p7_init_p7pwm(struct p7pwm_pdata *pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt) ;
+
+#else /* defined(CONFIG_PWM_PARROT7) || \
+ defined(CONFIG_PWM_PARROT7_MODULE) */
+
+#define p7_init_p7pwm(...)
+
+#endif /* defined(CONFIG_PWM_PARROT7) || \
+ defined(CONFIG_PWM_PARROT7_MODULE) */
+
+#endif /*_ARCH_PARROT7_P7PWM_H*/
diff --git a/arch/arm/mach-parrot7/p7_temperature.c b/arch/arm/mach-parrot7/p7_temperature.c
new file mode 100644
index 00000000000000..a03f09e2458991
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7_temperature.c
@@ -0,0 +1,51 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7_temperature.c - Parrot7 TEMPERATURE platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Karl Leplat <karl.leplat@parrot.com>
+ * date: 20-Sept-2013
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <mach/p7-adc.h>
+#include "pinctrl.h"
+#include "p7_temperature.h"
+
+static struct p7_temp_chan p7mu_adc_channels[] = {
+ {
+ .channel = 0,
+ .freq = 160000,
+ .name = "p7",
+ },
+ {
+ .channel = 7,
+ .freq = 160000,
+ .name = "p7mu",
+ }
+};
+
+static struct p7_temp_chan_data p7mu_adc_chan_data = {
+ .channels = p7mu_adc_channels,
+ .num_channels = ARRAY_SIZE(p7mu_adc_channels),
+};
+
+static struct platform_device p7_temp_device = {
+ .name = "p7-temperature",
+ .id = -1,
+ .dev.platform_data = &p7mu_adc_chan_data,
+};
+
+void __init p7_init_temperature(void)
+{
+ int err;
+
+ p7_config_pin(49, P7CTL_DRV_CFG(5));
+ p7_config_pin(50, P7CTL_DRV_CFG(5));
+
+ err = platform_device_register(&p7_temp_device);
+ if (err)
+ pr_err(KERN_ERR "Error registering P7 temperature device: %d.\n", err);
+}
diff --git a/arch/arm/mach-parrot7/p7_temperature.h b/arch/arm/mach-parrot7/p7_temperature.h
new file mode 100644
index 00000000000000..d65c608fff7328
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7_temperature.h
@@ -0,0 +1,29 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7_temperature.h - Parrot7 TEMPERATURE platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Karl Leplat <karl.leplat@parrot.com>
+ * date: 20-Sept-2013
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_P7TEMPERATURE_H
+#define _ARCH_PARROT7_P7TEMPERATURE_H
+
+#if defined(CONFIG_P7_TEMPERATURE) || \
+ defined(CONFIG_P7_TEMPERATURE_MODULE)
+
+extern void p7_init_temperature(void) __init;
+
+#else /* defined(CONFIG_P7_TEMPERATURE) || \
+ defined(CONFIG_P7_TEMPERATURE) */
+
+#define p7_init_temperature() \
+ ({ -ENOSYS; })
+
+#endif /* defined(CONFIG_P7_TEMPERATURE) || \
+ defined(CONFIG_P7_TEMPERATURE_MODULE) */
+
+#endif /*_ARCH_PARROT7_P7TEMPERATURE_H*/
diff --git a/arch/arm/mach-parrot7/p7mu.c b/arch/arm/mach-parrot7/p7mu.c
new file mode 100644
index 00000000000000..15646751ba2c5e
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7mu.c
@@ -0,0 +1,47 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7mu.c - Parrot7 power management unit platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 22-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <mfd/p7mu.h>
+#include "common.h"
+#include "pinctrl.h"
+#include "i2cm.h"
+
+static struct i2c_board_info p7mu_dev __initdata = {
+ I2C_BOARD_INFO(P7MU_DRV_NAME, 0x31),
+};
+
+/**
+ * p7_init_p7mu() - Instantiate P7MU device on I2C bus identified by @bus
+ * for further driver usage.
+ *
+ * @bus: master controller / bus identifier
+ * @pdata: controller platform specific data
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_p7mu(int bus,
+ struct p7mu_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+#ifdef DEBUG
+ BUG_ON(! pdata);
+ BUG_ON(! gpio_is_valid(pdata->gpio));
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+
+ p7mu_dev.platform_data = pdata;
+ p7_init_i2cm_slave(bus, &p7mu_dev, pins, pin_cnt);
+}
diff --git a/arch/arm/mach-parrot7/p7mu.h b/arch/arm/mach-parrot7/p7mu.h
new file mode 100644
index 00000000000000..4ee960bb6c7457
--- /dev/null
+++ b/arch/arm/mach-parrot7/p7mu.h
@@ -0,0 +1,37 @@
+/**
+ * linux/arch/arm/mach-parrot7/p7mu.h - Parrot7 power management unit platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 26-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_P7MU_H
+#define _ARCH_PARROT7_P7MU_H
+
+struct p7mu_plat_data;
+struct pinctrl_map;
+
+#if defined(CONFIG_MFD_P7MU) || \
+ defined(CONFIG_MFD_P7MU_MODULE)
+
+#include <linux/init.h>
+
+extern void p7_init_p7mu(int,
+ struct p7mu_plat_data*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+#else /* defined(CONFIG_MFD_P7MU) || \
+ defined(CONFIG_MFD_P7MU_MODULE) */
+
+#define p7_init_p7mu(_bus, _pdata, _pins, _pins_nr)
+
+#endif /* defined(CONFIG_MFD_P7MU) || \
+ defined(CONFIG_MFD_P7MU_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/pinctrl.c b/arch/arm/mach-parrot7/pinctrl.c
new file mode 100644
index 00000000000000..7488f658b4106a
--- /dev/null
+++ b/arch/arm/mach-parrot7/pinctrl.c
@@ -0,0 +1,202 @@
+/**
+ * linux/arch/arm/mach-parrot7/pinctrl.c - Parrot7 pin controller platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 06-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <gpio/p7-gpio.h>
+#include "system.h"
+#include "common.h"
+#include "pinctrl.h"
+
+static const struct p7_pinctrl_config *p7_pctl_chip_configs[] = {
+ [P7_CHIPREV_R1] = &p7_pinctrl_config_r1,
+ [P7_CHIPREV_R2] = &p7_pinctrl_config_r2,
+ [P7_CHIPREV_R3] = &p7_pinctrl_config_r3,
+};
+
+static inline const struct p7_pinctrl_config *p7_pctl_get_chip_config(void)
+{
+ int rev = p7_chiprev();
+
+ /* Check if we have a valid pin config for this chip rev */
+ BUG_ON(rev >= ARRAY_SIZE(p7_pctl_chip_configs) ||
+ p7_pctl_chip_configs[rev] == NULL);
+
+ return p7_pctl_chip_configs[rev];
+}
+
+
+/**
+ * p7_config_pin() - Configure a pin.
+ *
+ * @pin: pin index (ie gpio)
+ * @config: configuration to apply
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ */
+int p7_config_pin(int pin, int config)
+{
+ const struct p7_pinctrl_config *cfg;
+ const char *pin_name;
+
+ cfg = p7_pctl_get_chip_config();
+ if (pin < 0 || pin >= cfg->pin_descs_sz) {
+ pr_err("p7: invalid pin number (%d)\n", pin);
+ return -EINVAL;
+ }
+
+ pin_name = cfg->pin_descs[pin].name;
+ pr_debug("p7: config pin %d (%s)\n", pin, pin_name);
+ return pin_config_set(P7CTL_DRV_NAME ".0", pin_name, config);
+}
+
+
+/**
+ * p7_assign_named_pins() - Assign a set of pins to device for further usage.
+ *
+ * @dev_name: device name
+ * @name: optional pins set name
+ * @pins: array of pin functions and optional settings
+ * @pin_cnt: number of element of @pins array
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ *
+ * Register a set of multiplexed functions / pins to pinctrl core for device
+ * usage. @pins array element is either:
+ * - one multiplexed function / physical pin mapping initialized using
+ * P7_INIT_PINMAP(), or
+ * - one physical pin set of properties (pull up / down, drive strength...)
+ * initialized using P7_INIT_PINCFG().
+ *
+ * Device drivers will further use registered pins by requesting them from the
+ * pinctrl core.
+ * Keep in mind that PIN_MAP_CONFIGS_GROUPS mappings are not handled
+ * since Parrot7 has no notion of pin groups at hardware level.
+ *
+ * Note:
+ * @pins may be located into initdata section (or on stack) as pinctrl core
+ * (shallow) copies structures content at registering time. However, embedded
+ * pointers are not copied, meaning all pin settings cannot live in kernel's
+ * init sections.
+ */
+int __init p7_assign_named_pins(char const* dev_name,
+ char const* name,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ const struct p7_pinctrl_config *cfg;
+ unsigned int m;
+ int err;
+
+ cfg = p7_pctl_get_chip_config();
+
+#ifdef DEBUG
+ pr_debug("p7: registering %s I/O pins...\n", dev_name);
+ BUG_ON(! dev_name);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+
+ for (m = 0; m < pin_cnt; m++) {
+ enum p7_pin_func func_id;
+
+ switch (pins[m].type) {
+ case PIN_MAP_TYPE_MUX_GROUP:
+ func_id = (enum p7_pin_func) pins[m].data.mux.function;
+
+ BUG_ON((unsigned int) func_id >= P7_N_FUNCTIONS);
+ /* Make sure the function is available on this chip rev */
+ BUG_ON(cfg->pin_funcs[func_id].name == NULL);
+
+ pins[m].data.mux.function = cfg->pin_funcs[func_id].name;
+
+ pr_debug("p7:\tinstall %s I/O pin function\n",
+ pins[m].data.mux.function);
+ break;
+
+ case PIN_MAP_TYPE_CONFIGS_PIN:
+ func_id = (enum p7_pin_func) pins[m].data.configs.group_or_pin;
+
+ BUG_ON((unsigned int) func_id >= P7_N_FUNCTIONS);
+ /* Make sure the function is available on this chip rev */
+ BUG_ON(cfg->pin_funcs[func_id].name == NULL);
+ BUG_ON((unsigned int) cfg->pin_funcs[func_id].pin_id >=
+ cfg->pin_descs_sz);
+
+ pins[m].data.configs.group_or_pin =
+ cfg->pin_descs[cfg->pin_funcs[func_id].pin_id].name;
+
+ pr_debug("p7:\tinstall %s I/O pin settings\n",
+ pins[m].data.configs.group_or_pin);
+ break;
+
+ default:
+ BUG();
+ }
+
+ /* Assign pins to device. */
+ pins[m].dev_name = dev_name;
+
+ /* Do not override default name if none given in argument. */
+ if (name)
+ pins[m].name = name;
+ }
+
+ /* Register pins to pinctrl core. */
+ err = pinctrl_register_mappings(pins, pin_cnt);
+ if (err)
+ pr_err("p7: failed to register %s I/O pins (%d)\n", dev_name, err);
+
+ return err;
+}
+
+/*
+ * Pin controller hardware registers space
+ */
+static struct resource p7_pctl_res = {
+ .start = P7_SYS_PADCTRL_IO,
+ .end = P7_SYS_PADCTRL_IO + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+/*
+ * Pin controller platform specific data, allowing to manage Parrot7 variants
+ * with different set of pins and functions. This one is meant for MPW1.
+ */
+static struct p7ctl_plat_data p7_pctl_pdata = {
+ .gpio_drv = P7GPIO_DRV_NAME
+};
+
+/*
+ * Pin controller device
+ */
+static struct platform_device p7_pctl_dev = {
+ .name = P7CTL_DRV_NAME,
+ .id = 0,
+ .dev.platform_data = &p7_pctl_pdata,
+ .num_resources = 1,
+ .resource = &p7_pctl_res
+};
+
+int __init p7_init_pctl(void)
+{
+ const struct p7_pinctrl_config *cfg = p7_pctl_get_chip_config();
+
+ p7_pctl_pdata.funcs = cfg->pin_funcs;
+ p7_pctl_pdata.funcs_nr = P7_N_FUNCTIONS;
+ p7_pctl_pdata.pins = cfg->pin_descs;
+ p7_pctl_pdata.pins_nr = cfg->pin_descs_sz;
+ p7_pctl_pdata.gpios = cfg->gpio_pins;
+ p7_pctl_pdata.gpios_nr = cfg->gpio_pins_sz;
+
+ return p7_init_dev(&p7_pctl_dev, NULL, NULL, 0);
+}
diff --git a/arch/arm/mach-parrot7/pinctrl.h b/arch/arm/mach-parrot7/pinctrl.h
new file mode 100644
index 00000000000000..0156c55d60a66b
--- /dev/null
+++ b/arch/arm/mach-parrot7/pinctrl.h
@@ -0,0 +1,806 @@
+/**
+ * linux/arch/arm/mach-parrot7/pinctrl.h - Parrot7 pin controller platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 07-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+/**************************************************
+ * Pinctrl interface usage sample code for BSP...
+ **************************************************/
+
+#ifdef DONT_REMOVE_ME
+
+/*
+ * Properties tables must be declared as static. The cannot lay onto any
+ * kernel's init sections and must not be constant.
+ */
+static unsigned long myboard_sdhci_cmdcfg[] = {
+ P7CTL_SMT_CFG(OFF),
+ P7CTL_PUD_CFG(KEEP),
+ P7CTL_SLR_CFG(1),
+};
+
+static unsigned long myboard_sdhci_datcfg[] = {
+ P7CTL_SMT_CFG(ON),
+ P7CTL_PUD_CFG(UP),
+ P7CTL_SLR_CFG(0),
+ P7CTL_DRV_CFG(TRI)
+};
+
+static unsigned long myboard_sdhci1_clkcfg[] = {
+ P7CTL_PUD_CFG(DOWN),
+ P7CTL_DRV_CFG(5)
+};
+
+/*
+ * Pinmaps tables SHOULD be hold by an __initdata section.
+ */
+static struct pinctrl_map myboard_sdhci0_pins[] __initdata = {
+ /* Just select the right clock function. */
+ P7_INIT_PINMAP(P7_SD_0_CLK),
+
+ /* Command pin is assigned a set of properties shared with sdhci1 command. */
+ P7_INIT_PINMAP(P7_SD_0_CMD),
+ P7_INIT_PINCFG(P7_SD_0_CMD, myboard_sdhci_cmdcfg),
+
+ /* Just select the right functions here. */
+ P7_INIT_PINMAP(P7_SD_0_DAT00),
+ P7_INIT_PINMAP(P7_SD_0_DAT01),
+ P7_INIT_PINMAP(P7_SD_0_DAT02),
+
+ /* Properties also shared with sdhci0 dat00 & dat01. */
+ P7_INIT_PINCFG(P7_SD_0_DAT03, myboard_sdhci_datcfg),
+};
+
+static struct pinctrl_map myboard_sdhci1_pins[] __initdata = {
+ /* Clock pin is assigned its own set of properties. */
+ P7_INIT_PINMAP(P7_SD_1_CLK),
+ P7_INIT_PINCFG(P7_SD_1_CLK, myboard_sdhci1_clkcfg),
+
+ /* Same for command pin. */
+ P7_INIT_PINMAP(P7_SD_1_CMD),
+ P7_INIT_PINCFG(P7_SD_1_CMD, myboard_sdhci_cmdcfg),
+
+ /* A single set of properties is applied to dat00 & dat01 only */
+ P7_INIT_PINMAP(P7_SD_1_DAT00),
+ P7_INIT_PINCFG(P7_SD_1_DAT00, myboard_sdhci_datcfg),
+
+ P7_INIT_PINMAP(P7_SD_1_DAT01),
+ P7_INIT_PINCFG(P7_SD_1_DAT01, myboard_sdhci_datcfg),
+
+ /* Just select the right functions here. */
+ P7_INIT_PINMAP(P7_SD_1_DAT02),
+ P7_INIT_PINMAP(P7_SD_1_DAT03),
+};
+
+/*
+ * Register pinmaps later on using p7_init_dev or controller specific
+ * implementation.
+ */
+p7_init_sdhci(0,
+ &myboard_sdhci0_pdata,
+ myboard_sdhci0_pins,
+ ARRAY_SIZE(myboard_sdhci0_pins));
+p7_init_sdhci(1,
+ &myboard_sdhci1_pdata,
+ myboard_sdhci1_pins,
+ ARRAY_SIZE(myboard_sdhci1_pins));
+
+#endif /* DONT_REMOVE_ME */
+
+#ifndef _ARCH_PARROT7_PINCTRL_H
+#define _ARCH_PARROT7_PINCTRL_H
+
+#ifdef CONFIG_PINCTRL_PARROT7
+
+#include <linux/pinctrl/machine.h>
+#include <pinctrl/p7-pinctrl.h>
+
+extern int p7_config_pin(int pin,
+ int config);
+
+extern int p7_assign_named_pins(char const*,
+ char const*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+/**
+ * p7_assign_pins() - Assign a default set of pins to device for further usage.
+ *
+ * @dev_name: device name
+ * @pins: array of pin functions and optional settings
+ * @pin_cnt: number of element of @pins array
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ *
+ * Note: see p7_assign_pins() for more infos.
+ */
+ static inline int p7_assign_pins(char const* dev_name,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ return p7_assign_named_pins(dev_name, NULL, pins, pin_cnt);
+}
+
+/**
+ * P7_INIT_PINMAP() - Define a pin to be used according to multiplexing
+ * function passed in argument.
+ * @_func_id: multiplexing function identifier
+ */
+#define P7_INIT_PINMAP(_func_id) \
+ PIN_MAP_MUX_GROUP_DEFAULT(NULL, \
+ P7CTL_DRV_NAME ".0", \
+ NULL, \
+ (char*) _func_id)
+
+/**
+ * P7_INIT_PINCFG() - Define multiplexed pin settings
+ *
+ * @_func_id: multiplexing function identifier
+ * @_cfg: settings
+ *
+ * @_cfg must be initialized using Parrot7 pin controller driver macros
+ * (see linux/drivers/pinctrl/p7-pinctrl.h);
+ */
+#define P7_INIT_PINCFG(_func_id, _cfg) \
+ PIN_MAP_CONFIGS_PIN_DEFAULT(NULL, \
+ P7CTL_DRV_NAME ".0", \
+ (char*) _func_id, \
+ _cfg)
+
+struct p7_pinctrl_config {
+ const struct pinctrl_pin_desc *pin_descs;
+ unsigned pin_descs_sz;
+ const struct p7ctl_function *pin_funcs;
+ unsigned long *gpio_pins;
+ unsigned gpio_pins_sz;
+};
+
+extern const struct p7_pinctrl_config p7_pinctrl_config_r1;
+extern const struct p7_pinctrl_config p7_pinctrl_config_r2;
+extern const struct p7_pinctrl_config p7_pinctrl_config_r3;
+
+extern int p7_init_pctl(void) __init;
+
+/**
+ * enum p7_pin_func - Multiplexing function identifier.
+ *
+ * This enum list all functions that exist in *any* version of the chip, not all
+ * chips revisions will have the same set of functions.
+ *
+ * Note: identifiers were generated using the genpin.sh script located
+ * into this directory.
+ */
+enum p7_pin_func {
+ P7_CAM_1_DATA02,
+ P7_GPIO_000,
+ P7_SPI_03b,
+ P7_CAM_3_VSa,
+ P7_CAM_1_DATA03,
+ P7_GPIO_001,
+ P7_SPI_04b,
+ P7_CAM_3_HSa,
+ P7_CAM_1_DATA04,
+ P7_GPIO_002,
+ P7_SPI_05b,
+ P7_CAM_4_VSa,
+ P7_CAM_1_DATA05,
+ P7_GPIO_003,
+ P7_SPI_06b,
+ P7_CAM_4_HSa,
+ P7_CAM_1_DATA06,
+ P7_GPIO_004,
+ P7_SPI_07b,
+ P7_CAM_5_VSa,
+ P7_CAM_1_DATA07,
+ P7_GPIO_005,
+ P7_SPI_08b,
+ P7_CAM_5_HSa,
+ P7_CAM_5_CLK,
+ P7_GPIO_006,
+ P7_SD_2_CLK,
+ P7_CAM_5_DATA00,
+ P7_GPIO_007,
+ P7_SD_2_CMD,
+ P7_CAM_5_DATA01,
+ P7_GPIO_008,
+ P7_SD_2_DAT00,
+ P7_CAM_5_DATA02,
+ P7_GPIO_009,
+ P7_SD_2_DAT01,
+ P7_CAM_5_DATA03,
+ P7_GPIO_010,
+ P7_SD_2_DAT02,
+ P7_CAM_5_DATA04,
+ P7_GPIO_011,
+ P7_SD_2_DAT03,
+ P7_CAM_5_DATA05,
+ P7_GPIO_012,
+ P7_CAM_5_DATA06,
+ P7_GPIO_013,
+ P7_CAM_5_DATA07,
+ P7_GPIO_014,
+ P7_SD_1_CLK,
+ P7_UART_5_TX,
+ P7_SD_1_CMD,
+ P7_UART_5_RX,
+ P7_SD_1_DAT00,
+ P7_UART_6_TX,
+ P7_SD_1_DAT02,
+ P7_UART_7_TX,
+ P7_SD_1_DAT01,
+ P7_UART_6_RX,
+ P7_SD_1_DAT03,
+ P7_UART_7_RX,
+ P7_SD_0_CLK,
+ P7_SD_0_CMD,
+ P7_SD_0_DAT00,
+ P7_SD_0_DAT01,
+ P7_SD_0_DAT02,
+ P7_SD_0_DAT03,
+ P7_NAND_NCE,
+ P7_SPI_00a,
+ P7_NAND_NR,
+ P7_SPI_01a,
+ P7_NAND_NW,
+ P7_SPI_02a,
+ P7_NAND_AL,
+ P7_SPI_03a,
+ P7_NAND_CL,
+ P7_SPI_04a,
+ P7_NAND_RNB,
+ P7_SPI_05a,
+ P7_NAND_DQS,
+ P7_GPIO_033,
+ P7_CAN1_RXa,
+ P7_NAND_DATA_00,
+ P7_SPI_06a,
+ P7_NAND_DATA_01,
+ P7_SPI_07a,
+ P7_NAND_DATA_02,
+ P7_SPI_08a,
+ P7_NAND_DATA_03,
+ P7_SPI_09a,
+ P7_NAND_DATA_04,
+ P7_GPIO_038,
+ P7_NAND_DATA_05,
+ P7_GPIO_039,
+ P7_NAND_DATA_06,
+ P7_GPIO_040,
+ P7_NAND_DATA_07,
+ P7_GPIO_041,
+ P7_NAND_WP,
+ P7_GPIO_042,
+ P7_GPIO_049,
+ P7_GPIO_050,
+ P7_CAN1_TXa,
+ P7_FORCE_POWER_ON,
+ P7_TRST_N,
+ P7_TDI,
+ P7_TDO,
+ P7_TCK,
+ P7_TMS,
+ P7_UART_1_RX,
+ P7_UART_1_TX,
+ P7_UART_1_RTS,
+ P7_CAN0_RXb,
+ P7_UART_1_CTS,
+ P7_CAN0_TXb,
+ P7_UART_0_RX,
+ P7_GPIO_055,
+ P7_UART_0_TX,
+ P7_GPIO_056,
+ P7_UART_0_RTS,
+ P7_GPIO_057,
+ P7_UART_0_CTS,
+ P7_GPIO_058,
+ P7_I2C_2_DAT,
+ P7_GPIO_059,
+ P7_I2C_2_SL_DAT,
+ P7_I2C_2_CLK,
+ P7_GPIO_060,
+ P7_I2C_2_SL_CLK,
+ P7_I2C_1_DAT,
+ P7_GPIO_061,
+ P7_I2C_1_SL_DAT,
+ P7_I2C_1_CLK,
+ P7_GPIO_062,
+ P7_I2C_1_SL_CLK,
+ P7_I2C_0_DAT,
+ P7_GPIO_063,
+ P7_I2C_0_SL_DAT,
+ P7_I2C_0_CLK,
+ P7_GPIO_064,
+ P7_I2C_0_SL_CLK,
+ P7_UART_4_RX,
+ P7_GPIO_065,
+ P7_UART_4_TX,
+ P7_GPIO_066,
+ P7_UART_3_RX,
+ P7_GPIO_067,
+ P7_UART_3_TX,
+ P7_GPIO_068,
+ P7_UART_2_RX,
+ P7_CAN1_TXb,
+ P7_UART_2_TX,
+ P7_CAN1_RXb,
+ P7_I2C_SECURE_CLK,
+ P7_SPI_09b,
+ P7_GPIO_071,
+ P7_PWM_15,
+ P7_SPI_10b,
+ P7_GPIO_072,
+ P7_SPI_11b,
+ P7_GPIO_073,
+ P7_I2C_SECURE_CLKa,
+ P7_SPI_12b,
+ P7_GPIO_074,
+ P7_SPI_13b,
+ P7_GPIO_075,
+ P7_SPI_14b,
+ P7_GPIO_076,
+ P7_SPI_00,
+ P7_GPIO_077,
+ P7_AAI_15,
+ P7_CAN1_TX,
+ P7_SPI_01,
+ P7_GPIO_078,
+ P7_AAI_16,
+ P7_CAN1_RX,
+ P7_SPI_02,
+ P7_GPIO_079,
+ P7_AAI_17,
+ P7_CAN0_TX,
+ P7_SPI_03,
+ P7_GPIO_080,
+ P7_AAI_18,
+ P7_CAN0_RX,
+ P7_SPI_04,
+ P7_GPIO_081,
+ P7_AAI_19,
+ P7_SPI_05,
+ P7_GPIO_082,
+ P7_AAI_20,
+ P7_SPI_06,
+ P7_GPIO_083,
+ P7_AAI_21,
+ P7_SPI_07,
+ P7_GPIO_084,
+ P7_AAI_22,
+ P7_SPI_08,
+ P7_GPIO_085,
+ P7_AAI_23,
+ P7_SPI_09,
+ P7_GPIO_086,
+ P7_AAI_24,
+ P7_SPI_10,
+ P7_GPIO_087,
+ P7_AAI_25,
+ P7_SPI_11,
+ P7_GPIO_088,
+ P7_AAI_26,
+ P7_SPI_12,
+ P7_GPIO_089,
+ P7_AAI_27,
+ P7_SPI_13,
+ P7_GPIO_090,
+ P7_AAI_28,
+ P7_SPI_14,
+ P7_GPIO_091,
+ P7_AAI_29,
+ P7_SPI_15,
+ P7_GPIO_092,
+ P7_AAI_30,
+ P7_P7MU_CLK_OUT,
+ P7_PWM_15a,
+ P7_I2C_SECURE_DAT,
+ P7_REBOOT_P7MU,
+ P7_GPIO_094,
+ P7_ULPI_0_DATA00,
+ P7_ULPI_0_DATA01,
+ P7_ULPI_0_DATA02,
+ P7_ULPI_0_DATA03,
+ P7_ULPI_0_DATA04,
+ P7_ULPI_0_DATA05,
+ P7_ULPI_0_DATA06,
+ P7_ULPI_0_DATA07,
+ P7_ULPI_0_NXT,
+ P7_ULPI_0_DIR,
+ P7_ULPI_0_CLK,
+ P7_ULPI_1_DATA00,
+ P7_GPIO_106,
+ P7_I2C_2_CLKa,
+ P7_ULPI_1_DATA01,
+ P7_GPIO_107,
+ P7_I2C_2_DATa,
+ P7_ULPI_1_DATA02,
+ P7_GPIO_108,
+ P7_I2C_2_CLKb,
+ P7_ULPI_1_DATA03,
+ P7_GPIO_109,
+ P7_I2C_2_DATb,
+ P7_ULPI_1_DATA04,
+ P7_GPIO_110,
+ P7_I2C_2_CLKc,
+ P7_ULPI_1_DATA05,
+ P7_GPIO_111,
+ P7_I2C_2_DATc,
+ P7_ULPI_1_DATA06,
+ P7_GPIO_112,
+ P7_I2C_2_CLKd,
+ P7_ULPI_1_DATA07,
+ P7_GPIO_113,
+ P7_I2C_2_DATd,
+ P7_ULPI_1_NXT,
+ P7_GPIO_114,
+ P7_I2C_2_CLKe,
+ P7_ULPI_1_DIR,
+ P7_GPIO_115,
+ P7_ULPI_1_STP,
+ P7_GPIO_116,
+ P7_GPIO_117,
+ P7_I2C_2_DATe,
+ P7_ULPI_0_STP,
+ P7_ULPI_1_CLK,
+ P7_GPIO_118,
+ P7_AAI_00,
+ P7_GPIO_119,
+ P7_PWM_00,
+ P7_AAI_01,
+ P7_GPIO_120,
+ P7_PWM_01,
+ P7_AAI_02,
+ P7_GPIO_121,
+ P7_PWM_02,
+ P7_AAI_03,
+ P7_GPIO_122,
+ P7_PWM_03,
+ P7_AAI_04,
+ P7_GPIO_123,
+ P7_PWM_04,
+ P7_AAI_05,
+ P7_GPIO_124,
+ P7_PWM_05,
+ P7_AAI_06,
+ P7_GPIO_125,
+ P7_PWM_06,
+ P7_AAI_07,
+ P7_GPIO_126,
+ P7_PWM_07,
+ P7_AAI_08,
+ P7_GPIO_127,
+ P7_PWM_08,
+ P7_AAI_09,
+ P7_GPIO_128,
+ P7_PWM_09,
+ P7_AAI_10,
+ P7_GPIO_129,
+ P7_PWM_10,
+ P7_AAI_11,
+ P7_GPIO_130,
+ P7_PWM_11,
+ P7_AAI_12,
+ P7_GPIO_131,
+ P7_PWM_12,
+ P7_AAI_13,
+ P7_GPIO_132,
+ P7_PWM_13,
+ P7_AAI_14,
+ P7_GPIO_133,
+ P7_PWM_14,
+ P7_SPI_16,
+ P7_GPIO_134,
+ P7_ETH_MII_TXER,
+ P7_SPI_17,
+ P7_GPIO_135,
+ P7_ETH_MII_RXER,
+ P7_SPI_18,
+ P7_GPIO_136,
+ P7_ETH_MII_CRS,
+ P7_SPI_19,
+ P7_GPIO_137,
+ P7_ETH_MII_COL,
+ P7_ETH_RGMII_TXC,
+ P7_GPIO_138,
+ P7_ETH_RGMII_TXD_00,
+ P7_GPIO_139,
+ P7_ETH_RGMII_TXD_01,
+ P7_GPIO_140,
+ P7_ETH_RGMII_TXD_02,
+ P7_GPIO_141,
+ P7_ETH_RGMII_TXD_03,
+ P7_GPIO_142,
+ P7_ETH_RGMII_TX_CTL,
+ P7_GPIO_143,
+ P7_ETH_RGMII_RXC,
+ P7_GPIO_144,
+ P7_ETH_RGMII_RXD_00,
+ P7_GPIO_145,
+ P7_ETH_RGMII_RXD_01,
+ P7_GPIO_146,
+ P7_ETH_RGMII_RXD_02,
+ P7_GPIO_147,
+ P7_ETH_RGMII_RXD_03,
+ P7_GPIO_148,
+ P7_ETH_RGMII_RX_CTL,
+ P7_GPIO_149,
+ P7_ETH_MDC,
+ P7_GPIO_150,
+ P7_ETH_MDIO,
+ P7_GPIO_151,
+ P7_LCD_0_DEN,
+ P7_GPIO_152,
+ P7_CAM_1_VS,
+ P7_LCD_0_HS,
+ P7_GPIO_153,
+ P7_CAM_1_HS,
+ P7_LCD_0_DENa,
+ P7_LCD_0_VS,
+ P7_GPIO_154,
+ P7_CAM_0_VS,
+ P7_LCD_0_DATA00,
+ P7_GPIO_155,
+ P7_CAM_0_HS,
+ P7_LCD_0_DATA01,
+ P7_GPIO_156,
+ P7_CAM_2_VS,
+ P7_LCD_0_DATA02,
+ P7_GPIO_157,
+ P7_CAM_2_HS,
+ P7_LCD_0_DATA03,
+ P7_GPIO_158,
+ P7_CAM_3_VS,
+ P7_LCD_0_DATA04,
+ P7_GPIO_159,
+ P7_CAM_3_HS,
+ P7_LCD_0_DATA05,
+ P7_GPIO_160,
+ P7_CAM_5_VS,
+ P7_LCD_0_DATA06,
+ P7_GPIO_161,
+ P7_CAM_5_HS,
+ P7_LCD_0_DATA07,
+ P7_GPIO_162,
+ P7_CAM_0_DATA08,
+ P7_LCD_0_DATA08,
+ P7_GPIO_163,
+ P7_CAM_0_DATA09,
+ P7_CAM_0_DATA08a,
+ P7_LCD_0_DATA09,
+ P7_GPIO_164,
+ P7_CAM_0_DATA10,
+ P7_CAM_0_DATA09a,
+ P7_LCD_0_DATA10,
+ P7_GPIO_165,
+ P7_CAM_0_DATA11,
+ P7_CAM_0_DATA10a,
+ P7_LCD_0_DATA11,
+ P7_GPIO_166,
+ P7_CAM_0_DATA12,
+ P7_CAM_0_DATA11a,
+ P7_LCD_0_DATA12,
+ P7_GPIO_167,
+ P7_CAM_0_DATA13,
+ P7_CAM_0_DATA12a,
+ P7_LCD_0_DATA13,
+ P7_GPIO_168,
+ P7_CAM_0_DATA14,
+ P7_CAM_0_DATA13a,
+ P7_LCD_0_DATA14,
+ P7_GPIO_169,
+ P7_CAM_0_DATA15,
+ P7_CAM_0_DATA14a,
+ P7_LCD_0_DATA15,
+ P7_GPIO_170,
+ P7_CAM_0_DATA16,
+ P7_CAM_0_DATA15a,
+ P7_LCD_0_DATA16,
+ P7_GPIO_171,
+ P7_CAM_0_DATA17,
+ P7_LCD_0_DATA17,
+ P7_GPIO_172,
+ P7_CAM_0_DATA18,
+ P7_LCD_0_DATA18,
+ P7_GPIO_173,
+ P7_CAM_0_DATA19,
+ P7_LCD_0_DATA19,
+ P7_GPIO_174,
+ P7_CAM_0_DATA20,
+ P7_LCD_0_DATA20,
+ P7_GPIO_175,
+ P7_CAM_0_DATA21,
+ P7_CAM_3_DATA09a,
+ P7_LCD_0_DATA21,
+ P7_GPIO_176,
+ P7_CAM_0_DATA22,
+ P7_CAM_3_DATA08a,
+ P7_LCD_0_DATA22,
+ P7_GPIO_177,
+ P7_CAM_0_DATA23,
+ P7_LCD_0_DATA23,
+ P7_GPIO_178,
+ P7_CAM_4_VS,
+ P7_LCD_0_CLK,
+ P7_GPIO_179,
+ P7_CAM_4_HS,
+ P7_CAM_0_CLKa,
+ P7_LCD_1_HS,
+ P7_GPIO_180,
+ P7_CAM_2_CLK,
+ P7_LCD_1_DENa,
+ P7_LCD_1_VS,
+ P7_GPIO_181,
+ P7_CAM_2_DATA00,
+ P7_CPU_TRACECLKOUT,
+ P7_LCD_1_CLK,
+ P7_GPIO_182,
+ P7_CAM_2_DATA01,
+ P7_CPU_TRACECTL,
+ P7_LCD_1_DATA00,
+ P7_GPIO_183,
+ P7_CAM_2_DATA02,
+ P7_CPU_TRACEDATA_00,
+ P7_LCD_1_DATA01,
+ P7_GPIO_184,
+ P7_CAM_2_DATA03,
+ P7_CPU_TRACEDATA_01,
+ P7_LCD_1_DATA02,
+ P7_GPIO_185,
+ P7_CAM_2_DATA04,
+ P7_CPU_TRACEDATA_02,
+ P7_LCD_1_DATA03,
+ P7_GPIO_186,
+ P7_CAM_2_DATA05,
+ P7_CPU_TRACEDATA_03,
+ P7_LCD_1_DATA04,
+ P7_GPIO_187,
+ P7_CAM_2_DATA06,
+ P7_CPU_TRACEDATA_04,
+ P7_LCD_1_DATA05,
+ P7_GPIO_188,
+ P7_CAM_2_DATA07,
+ P7_CPU_TRACEDATA_05,
+ P7_LCD_1_DATA06,
+ P7_GPIO_189,
+ P7_CAM_3_CLK,
+ P7_CPU_TRACEDATA_06,
+ P7_LCD_1_DATA07,
+ P7_GPIO_190,
+ P7_CAM_3_DATA00,
+ P7_CPU_TRACEDATA_07,
+ P7_LCD_1_DATA08,
+ P7_GPIO_191,
+ P7_CAM_3_DATA01,
+ P7_CPU_TRACEDATA_08,
+ P7_LCD_1_DATA09,
+ P7_GPIO_192,
+ P7_CAM_3_DATA02,
+ P7_CPU_TRACEDATA_09,
+ P7_LCD_1_DATA10,
+ P7_GPIO_193,
+ P7_CAM_3_DATA03,
+ P7_CPU_TRACEDATA_10,
+ P7_LCD_1_DATA11,
+ P7_GPIO_194,
+ P7_CAM_3_DATA04,
+ P7_CPU_TRACEDATA_11,
+ P7_LCD_1_DATA12,
+ P7_GPIO_195,
+ P7_CAM_3_DATA05,
+ P7_CPU_TRACEDATA_12,
+ P7_LCD_1_DATA13,
+ P7_GPIO_196,
+ P7_CAM_3_DATA06,
+ P7_CPU_TRACEDATA_13,
+ P7_LCD_1_DATA14,
+ P7_GPIO_197,
+ P7_CAM_3_DATA07,
+ P7_CPU_TRACEDATA_14,
+ P7_LCD_1_DATA15,
+ P7_GPIO_198,
+ P7_CAM_4_CLK,
+ P7_CPU_TRACEDATA_15,
+ P7_LCD_1_DATA16,
+ P7_GPIO_199,
+ P7_CAM_4_DATA00,
+ P7_LCD_1_DATA17,
+ P7_GPIO_200,
+ P7_CAM_4_DATA01,
+ P7_LCD_1_DATA18,
+ P7_GPIO_201,
+ P7_CAM_4_DATA02,
+ P7_LCD_1_DATA19,
+ P7_GPIO_202,
+ P7_CAM_4_DATA03,
+ P7_LCD_1_DATA20,
+ P7_GPIO_203,
+ P7_CAM_4_DATA04,
+ P7_LCD_1_DATA21,
+ P7_GPIO_204,
+ P7_CAM_4_DATA05,
+ P7_LCD_1_DATA22,
+ P7_GPIO_205,
+ P7_CAM_4_DATA06,
+ P7_LCD_1_DATA23,
+ P7_GPIO_206,
+ P7_CAM_4_DATA07,
+ P7_LCD_1_DEN,
+ P7_GPIO_207,
+ P7_CAM_1_HSa,
+ P7_LCD_1_CLKa,
+ P7_CAM_0_CLK,
+ P7_GPIO_208,
+ P7_CAM_1_CLKa,
+ P7_CAM_0_DATA00,
+ P7_GPIO_209,
+ P7_CAM_1_DATA08,
+ P7_CAM_0_DATA01,
+ P7_GPIO_210,
+ P7_CAM_1_DATA09,
+ P7_CAM_0_DATA02,
+ P7_GPIO_211,
+ P7_CAM_1_DATA10,
+ P7_CAM_4_DATA08,
+ P7_CAM_0_DATA03,
+ P7_GPIO_212,
+ P7_CAM_1_DATA11,
+ P7_CAM_4_DATA09,
+ P7_CAM_0_DATA04,
+ P7_GPIO_213,
+ P7_CAM_1_DATA12,
+ P7_CAM_5_DATA08,
+ P7_CAM_0_DATA05,
+ P7_GPIO_214,
+ P7_CAM_1_DATA13,
+ P7_CAM_5_DATA09,
+ P7_CAM_0_DATA06,
+ P7_GPIO_215,
+ P7_CAM_1_DATA14,
+ P7_CAM_2_DATA08,
+ P7_CAM_0_DATA07,
+ P7_GPIO_216,
+ P7_CAM_1_DATA15,
+ P7_CAM_2_DATA09,
+ P7_CAM_1_CLK,
+ P7_GPIO_217,
+ P7_SPI_00b,
+ P7_CAM_1_DATA00,
+ P7_GPIO_218,
+ P7_SPI_01b,
+ P7_CAM_0_VSa,
+ P7_CAM_1_DATA01,
+ P7_GPIO_219,
+ P7_SPI_02b,
+ P7_CAM_0_HSa,
+ P7_PAR_NCS,
+ P7_PAR_NRS,
+ P7_PAR_NRD,
+ P7_PAR_NWR,
+ P7_PAR_D00,
+ P7_PAR_D01,
+ P7_PAR_D02,
+ P7_PAR_D03,
+ P7_PAR_D04,
+ P7_PAR_D05,
+ P7_PAR_D06,
+ P7_PAR_D07,
+ P7_PAR_D08,
+ P7_PAR_D09,
+ P7_PAR_D10,
+ P7_PAR_D11,
+ P7_PAR_D12,
+ P7_PAR_D13,
+ P7_PAR_D14,
+ P7_PAR_D15,
+ P7_N_FUNCTIONS,
+};
+
+#endif /* CONFIG_PINCTRL_PARROT7 */
+
+#endif /*_ARCH_PARROT7_PINCTRL_H */
diff --git a/arch/arm/mach-parrot7/pindesc-r1.c b/arch/arm/mach-parrot7/pindesc-r1.c
new file mode 100644
index 00000000000000..34c8fc80fa11f7
--- /dev/null
+++ b/arch/arm/mach-parrot7/pindesc-r1.c
@@ -0,0 +1,808 @@
+#include "common.h"
+#include "pinctrl.h"
+
+/*
+ * Physical pin descriptors
+ *
+ * Pins identifiers use the GPIO numbering scheme. Names are assigned by
+ * concatenating all possible multiplexing functions a pin may be used with.
+ *
+ * Content was generated with the help of the genpin.sh script located into
+ * this directory.
+ */
+static struct pinctrl_pin_desc const p7_pin_descs_r1[] = {
+ P7CTL_INIT_PIN(0, "CAM_1_DATA02__GPIO_000__SPI_03b__CAM_3_VSa"),
+ P7CTL_INIT_PIN(1, "CAM_1_DATA03__GPIO_001__SPI_04b__CAM_3_HSa"),
+ P7CTL_INIT_PIN(2, "CAM_1_DATA04__GPIO_002__SPI_05b__CAM_4_VSa"),
+ P7CTL_INIT_PIN(3, "CAM_1_DATA05__GPIO_003__SPI_06b__CAM_4_HSa"),
+ P7CTL_INIT_PIN(4, "CAM_1_DATA06__GPIO_004__SPI_07b__CAM_5_VSa"),
+ P7CTL_INIT_PIN(5, "CAM_1_DATA07__GPIO_005__SPI_08b__CAM_5_HSa"),
+ P7CTL_INIT_PIN(6, "CAM_5_CLK__GPIO_006__SD_2_CLK"),
+ P7CTL_INIT_PIN(7, "CAM_5_DATA00__GPIO_007__SD_2_CMD"),
+ P7CTL_INIT_PIN(8, "CAM_5_DATA01__GPIO_008__SD_2_DAT00"),
+ P7CTL_INIT_PIN(9, "CAM_5_DATA02__GPIO_009__SD_2_DAT01"),
+ P7CTL_INIT_PIN(10, "CAM_5_DATA03__GPIO_010__SD_2_DAT02"),
+ P7CTL_INIT_PIN(11, "CAM_5_DATA04__GPIO_011__SD_2_DAT03"),
+ P7CTL_INIT_PIN(12, "CAM_5_DATA05__GPIO_012"),
+ P7CTL_INIT_PIN(13, "CAM_5_DATA06__GPIO_013"),
+ P7CTL_INIT_PIN(14, "CAM_5_DATA07__GPIO_014"),
+ P7CTL_INIT_PIN(15, "SD_1_CLK__GPIO_015__UART_5_TX"),
+ P7CTL_INIT_PIN(16, "SD_1_CMD__GPIO_016__UART_5_RX"),
+ P7CTL_INIT_PIN(17, "SD_1_DAT00__GPIO_017__UART_6_TX"),
+ P7CTL_INIT_PIN(18, "SD_1_DAT02__GPIO_018__UART_7_TX"),
+ P7CTL_INIT_PIN(19, "SD_1_DAT01__GPIO_019__UART_6_RX"),
+ P7CTL_INIT_PIN(20, "SD_1_DAT03__GPIO_020__UART_7_RX"),
+ P7CTL_INIT_PIN(21, "SD_0_CLK__GPIO_021"),
+ P7CTL_INIT_PIN(22, "SD_0_CMD__GPIO_022"),
+ P7CTL_INIT_PIN(23, "SD_0_DAT00__GPIO_023"),
+ P7CTL_INIT_PIN(24, "SD_0_DAT01__GPIO_024"),
+ P7CTL_INIT_PIN(25, "SD_0_DAT02__GPIO_025"),
+ P7CTL_INIT_PIN(26, "SD_0_DAT03__GPIO_026"),
+ P7CTL_INIT_PIN(27, "NAND_NCE__GPIO_027__SPI_00a"),
+ P7CTL_INIT_PIN(28, "NAND_NR__GPIO_028__SPI_01a"),
+ P7CTL_INIT_PIN(29, "NAND_NW__GPIO_029__SPI_02a"),
+ P7CTL_INIT_PIN(30, "NAND_AL__GPIO_030__SPI_03a"),
+ P7CTL_INIT_PIN(31, "NAND_CL__GPIO_031__SPI_04a"),
+ P7CTL_INIT_PIN(32, "NAND_RNB__GPIO_032__SPI_05a"),
+ P7CTL_INIT_PIN(33, "NAND_DQS__GPIO_033__CAN1_RXa"),
+ P7CTL_INIT_PIN(34, "NAND_DATA_00__GPIO_034__SPI_06a"),
+ P7CTL_INIT_PIN(35, "NAND_DATA_01__GPIO_035__SPI_07a"),
+ P7CTL_INIT_PIN(36, "NAND_DATA_02__GPIO_036__SPI_08a"),
+ P7CTL_INIT_PIN(37, "NAND_DATA_03__GPIO_037__SPI_09a"),
+ P7CTL_INIT_PIN(38, "NAND_DATA_04__GPIO_038"),
+ P7CTL_INIT_PIN(39, "NAND_DATA_05__GPIO_039"),
+ P7CTL_INIT_PIN(40, "NAND_DATA_06__GPIO_040"),
+ P7CTL_INIT_PIN(41, "NAND_DATA_07__GPIO_041"),
+ P7CTL_INIT_PIN(42, "NAND_WP__GPIO_042__CAN1_TXa"),
+ P7CTL_INIT_PIN(43, "FORCE_POWER_ON__GPIO_043"),
+ P7CTL_INIT_PIN(44, "TRST_N__GPIO_044"),
+ P7CTL_INIT_PIN(45, "TDI__GPIO_045"),
+ P7CTL_INIT_PIN(46, "TDO__GPIO_046"),
+ P7CTL_INIT_PIN(47, "TCK__GPIO_047"),
+ P7CTL_INIT_PIN(48, "TMS__GPIO_048"),
+ P7CTL_INIT_PIN(49, "GPIO_049"),
+ P7CTL_INIT_PIN(50, "GPIO_050"),
+ P7CTL_INIT_PIN(51, "UART_1_RX__GPIO_051"),
+ P7CTL_INIT_PIN(52, "UART_1_TX__GPIO_052"),
+ P7CTL_INIT_PIN(53, "UART_1_RTS__GPIO_053__CAN0_RXb"),
+ P7CTL_INIT_PIN(54, "UART_1_CTS__GPIO_054__CAN0_TXb"),
+ P7CTL_INIT_PIN(55, "UART_0_RX__GPIO_055"),
+ P7CTL_INIT_PIN(56, "UART_0_TX__GPIO_056"),
+ P7CTL_INIT_PIN(57, "UART_0_RTS__GPIO_057"),
+ P7CTL_INIT_PIN(58, "UART_0_CTS__GPIO_058"),
+ P7CTL_INIT_PIN(59, "I2C_2_DAT__GPIO_059__I2C_2_SL_DAT"),
+ P7CTL_INIT_PIN(60, "I2C_2_CLK__GPIO_060__I2C_2_SL_CLK"),
+ P7CTL_INIT_PIN(61, "I2C_1_DAT__GPIO_061__I2C_1_SL_DAT"),
+ P7CTL_INIT_PIN(62, "I2C_1_CLK__GPIO_062__I2C_1_SL_CLK"),
+ P7CTL_INIT_PIN(63, "I2C_0_DAT__GPIO_063__I2C_0_SL_DAT"),
+ P7CTL_INIT_PIN(64, "I2C_0_CLK__GPIO_064__I2C_0_SL_CLK"),
+ P7CTL_INIT_PIN(65, "UART_4_RX__GPIO_065"),
+ P7CTL_INIT_PIN(66, "UART_4_TX__GPIO_066"),
+ P7CTL_INIT_PIN(67, "UART_3_RX__GPIO_067"),
+ P7CTL_INIT_PIN(68, "UART_3_TX__GPIO_068"),
+ P7CTL_INIT_PIN(69, "UART_2_RX__GPIO_069__CAN1_TXb"),
+ P7CTL_INIT_PIN(70, "UART_2_TX__GPIO_070__CAN1_RXb"),
+ P7CTL_INIT_PIN(71, "SPI_09b__GPIO_071__PWM_15"),
+ P7CTL_INIT_PIN(72, "SPI_10b__GPIO_072"),
+ P7CTL_INIT_PIN(73, "SPI_11b__GPIO_073"),
+ P7CTL_INIT_PIN(74, "SPI_12b__GPIO_074"),
+ P7CTL_INIT_PIN(75, "SPI_13b__GPIO_075"),
+ P7CTL_INIT_PIN(76, "SPI_14b__GPIO_076"),
+ P7CTL_INIT_PIN(77, "SPI_00__GPIO_077__AAI_15__CAN1_TX"),
+ P7CTL_INIT_PIN(78, "SPI_01__GPIO_078__AAI_16__CAN1_RX"),
+ P7CTL_INIT_PIN(79, "SPI_02__GPIO_079__AAI_17__CAN0_TX"),
+ P7CTL_INIT_PIN(80, "SPI_03__GPIO_080__AAI_18__CAN0_RX"),
+ P7CTL_INIT_PIN(81, "SPI_04__GPIO_081__AAI_19"),
+ P7CTL_INIT_PIN(82, "SPI_05__GPIO_082__AAI_20"),
+ P7CTL_INIT_PIN(83, "SPI_06__GPIO_083__AAI_21"),
+ P7CTL_INIT_PIN(84, "SPI_07__GPIO_084__AAI_22"),
+ P7CTL_INIT_PIN(85, "SPI_08__GPIO_085__AAI_23"),
+ P7CTL_INIT_PIN(86, "SPI_09__GPIO_086__AAI_24"),
+ P7CTL_INIT_PIN(87, "SPI_10__GPIO_087__AAI_25"),
+ P7CTL_INIT_PIN(88, "SPI_11__GPIO_088__AAI_26"),
+ P7CTL_INIT_PIN(89, "SPI_12__GPIO_089__AAI_27"),
+ P7CTL_INIT_PIN(90, "SPI_13__GPIO_090__AAI_28"),
+ P7CTL_INIT_PIN(91, "SPI_14__GPIO_091__AAI_29"),
+ P7CTL_INIT_PIN(92, "SPI_15__GPIO_092__AAI_30"),
+ P7CTL_INIT_PIN(93, "P7MU_CLK_OUT__GPIO_093__PWM_15a"),
+ P7CTL_INIT_PIN(94, "REBOOT_P7MU__GPIO_094"),
+ P7CTL_INIT_PIN(95, "ULPI_0_DATA00__GPIO_095"),
+ P7CTL_INIT_PIN(96, "ULPI_0_DATA01__GPIO_096"),
+ P7CTL_INIT_PIN(97, "ULPI_0_DATA02__GPIO_097"),
+ P7CTL_INIT_PIN(98, "ULPI_0_DATA03__GPIO_098"),
+ P7CTL_INIT_PIN(99, "ULPI_0_DATA04__GPIO_099"),
+ P7CTL_INIT_PIN(100, "ULPI_0_DATA05__GPIO_100"),
+ P7CTL_INIT_PIN(101, "ULPI_0_DATA06__GPIO_101"),
+ P7CTL_INIT_PIN(102, "ULPI_0_DATA07__GPIO_102"),
+ P7CTL_INIT_PIN(103, "ULPI_0_NXT__GPIO_103"),
+ P7CTL_INIT_PIN(104, "ULPI_0_STP__GPIO_104"),
+ P7CTL_INIT_PIN(105, "ULPI_0_DIR__GPIO_105"),
+ P7CTL_INIT_PIN(106, "ULPI_0_CLK__GPIO_106"),
+ P7CTL_INIT_PIN(107, "ULPI_1_DATA00__GPIO_107__I2C_2_CLKa"),
+ P7CTL_INIT_PIN(108, "ULPI_1_DATA01__GPIO_108__I2C_2_DATa"),
+ P7CTL_INIT_PIN(109, "ULPI_1_DATA02__GPIO_109__I2C_2_CLKb"),
+ P7CTL_INIT_PIN(110, "ULPI_1_DATA03__GPIO_110__I2C_2_DATb"),
+ P7CTL_INIT_PIN(111, "ULPI_1_DATA04__GPIO_111__I2C_2_CLKc"),
+ P7CTL_INIT_PIN(112, "ULPI_1_DATA05__GPIO_112__I2C_2_DATc"),
+ P7CTL_INIT_PIN(113, "ULPI_1_DATA06__GPIO_113__I2C_2_CLKd"),
+ P7CTL_INIT_PIN(114, "ULPI_1_DATA07__GPIO_114__I2C_2_DATd"),
+ P7CTL_INIT_PIN(115, "ULPI_1_NXT__GPIO_115__I2C_2_CLKe"),
+ P7CTL_INIT_PIN(116, "ULPI_1_STP__GPIO_116__I2C_2_DATe"),
+ P7CTL_INIT_PIN(117, "ULPI_1_DIR__GPIO_117"),
+ P7CTL_INIT_PIN(118, "ULPI_1_CLK__GPIO_118"),
+ P7CTL_INIT_PIN(119, "AAI_00__GPIO_119__PWM_00"),
+ P7CTL_INIT_PIN(120, "AAI_01__GPIO_120__PWM_01"),
+ P7CTL_INIT_PIN(121, "AAI_02__GPIO_121__PWM_02"),
+ P7CTL_INIT_PIN(122, "AAI_03__GPIO_122__PWM_03"),
+ P7CTL_INIT_PIN(123, "AAI_04__GPIO_123__PWM_04"),
+ P7CTL_INIT_PIN(124, "AAI_05__GPIO_124__PWM_05"),
+ P7CTL_INIT_PIN(125, "AAI_06__GPIO_125__PWM_06"),
+ P7CTL_INIT_PIN(126, "AAI_07__GPIO_126__PWM_07"),
+ P7CTL_INIT_PIN(127, "AAI_08__GPIO_127__PWM_08"),
+ P7CTL_INIT_PIN(128, "AAI_09__GPIO_128__PWM_09"),
+ P7CTL_INIT_PIN(129, "AAI_10__GPIO_129__PWM_10"),
+ P7CTL_INIT_PIN(130, "AAI_11__GPIO_130__PWM_11"),
+ P7CTL_INIT_PIN(131, "AAI_12__GPIO_131__PWM_12"),
+ P7CTL_INIT_PIN(132, "AAI_13__GPIO_132__PWM_13"),
+ P7CTL_INIT_PIN(133, "AAI_14__GPIO_133__PWM_14"),
+ P7CTL_INIT_PIN(134, "LCD_0_DEN__GPIO_134__CAM_1_VS"),
+ P7CTL_INIT_PIN(135, "LCD_0_HS__GPIO_135__CAM_1_HS__LCD_0_DENa"),
+ P7CTL_INIT_PIN(136, "LCD_0_VS__GPIO_136__CAM_0_VS__PAR_NCS"),
+ P7CTL_INIT_PIN(137, "LCD_0_DATA00__GPIO_137__CAM_0_HS__PAR_NRS"),
+ P7CTL_INIT_PIN(138, "LCD_0_DATA01__GPIO_138__CAM_2_VS__PAR_NRD"),
+ P7CTL_INIT_PIN(139, "LCD_0_DATA02__GPIO_139__CAM_2_HS__PAR_NWR"),
+ P7CTL_INIT_PIN(140, "LCD_0_DATA03__GPIO_140__CAM_3_VS__PAR_D00"),
+ P7CTL_INIT_PIN(141, "LCD_0_DATA04__GPIO_141__CAM_3_HS__PAR_D01"),
+ P7CTL_INIT_PIN(142, "LCD_0_DATA05__GPIO_142__CAM_5_VS__PAR_D02"),
+ P7CTL_INIT_PIN(143, "LCD_0_DATA06__GPIO_143__CAM_5_HS__PAR_D03"),
+ P7CTL_INIT_PIN(144, "LCD_0_DATA07__GPIO_144__CAM_0_DATA08__PAR_D04"),
+ P7CTL_INIT_PIN(145, "LCD_0_DATA08__GPIO_145__CAM_0_DATA09__PAR_D05"),
+ P7CTL_INIT_PIN(146, "LCD_0_DATA09__GPIO_146__CAM_0_DATA10__PAR_D06"),
+ P7CTL_INIT_PIN(147, "LCD_0_DATA10__GPIO_147__CAM_0_DATA11__PAR_D07"),
+ P7CTL_INIT_PIN(148, "LCD_0_DATA11__GPIO_148__CAM_0_DATA12__PAR_D08"),
+ P7CTL_INIT_PIN(149, "LCD_0_DATA12__GPIO_149__CAM_0_DATA13__PAR_D09"),
+ P7CTL_INIT_PIN(150, "LCD_0_DATA13__GPIO_150__CAM_0_DATA14__PAR_D10"),
+ P7CTL_INIT_PIN(151, "LCD_0_DATA14__GPIO_151__CAM_0_DATA15__PAR_D11"),
+ P7CTL_INIT_PIN(152, "LCD_0_DATA15__GPIO_152__CAM_0_DATA16__PAR_D12"),
+ P7CTL_INIT_PIN(153, "LCD_0_DATA16__GPIO_153__CAM_0_DATA17__PAR_D13"),
+ P7CTL_INIT_PIN(154, "LCD_0_DATA17__GPIO_154__CAM_0_DATA18__PAR_D14"),
+ P7CTL_INIT_PIN(155, "LCD_0_DATA18__GPIO_155__CAM_0_DATA19__PAR_D15"),
+ P7CTL_INIT_PIN(156, "LCD_0_DATA19__GPIO_156__CAM_0_DATA20"),
+ P7CTL_INIT_PIN(157, "LCD_0_DATA20__GPIO_157__CAM_0_DATA21"),
+ P7CTL_INIT_PIN(158, "LCD_0_DATA21__GPIO_158__CAM_0_DATA22"),
+ P7CTL_INIT_PIN(159, "LCD_0_DATA22__GPIO_159__CAM_0_DATA23"),
+ P7CTL_INIT_PIN(160, "LCD_0_DATA23__GPIO_160__CAM_4_VS"),
+ P7CTL_INIT_PIN(161, "LCD_0_CLK__GPIO_161__CAM_4_HS"),
+ P7CTL_INIT_PIN(162, "LCD_1_HS__GPIO_162__CAM_2_CLK__LCD_1_DENa"),
+ P7CTL_INIT_PIN(163, "LCD_1_VS__GPIO_163__CAM_2_DATA00__CPU_TRACECLKOUT"),
+ P7CTL_INIT_PIN(164, "LCD_1_CLK__GPIO_164__CAM_2_DATA01__CPU_TRACECTL"),
+ P7CTL_INIT_PIN(165, "LCD_1_DATA00__GPIO_165__CAM_2_DATA02__CPU_TRACEDATA_00"),
+ P7CTL_INIT_PIN(166, "LCD_1_DATA01__GPIO_166__CAM_2_DATA03__CPU_TRACEDATA_01"),
+ P7CTL_INIT_PIN(167, "LCD_1_DATA02__GPIO_167__CAM_2_DATA04__CPU_TRACEDATA_02"),
+ P7CTL_INIT_PIN(168, "LCD_1_DATA03__GPIO_168__CAM_2_DATA05__CPU_TRACEDATA_03"),
+ P7CTL_INIT_PIN(169, "LCD_1_DATA04__GPIO_169__CAM_2_DATA06__CPU_TRACEDATA_04"),
+ P7CTL_INIT_PIN(170, "LCD_1_DATA05__GPIO_170__CAM_2_DATA07__CPU_TRACEDATA_05"),
+ P7CTL_INIT_PIN(171, "LCD_1_DATA06__GPIO_171__CAM_3_CLK__CPU_TRACEDATA_06"),
+ P7CTL_INIT_PIN(172, "LCD_1_DATA07__GPIO_172__CAM_3_DATA00__CPU_TRACEDATA_07"),
+ P7CTL_INIT_PIN(173, "LCD_1_DATA08__GPIO_173__CAM_3_DATA01__CPU_TRACEDATA_08"),
+ P7CTL_INIT_PIN(174, "LCD_1_DATA09__GPIO_174__CAM_3_DATA02__CPU_TRACEDATA_09"),
+ P7CTL_INIT_PIN(175, "LCD_1_DATA10__GPIO_175__CAM_3_DATA03__CPU_TRACEDATA_10"),
+ P7CTL_INIT_PIN(176, "LCD_1_DATA11__GPIO_176__CAM_3_DATA04__CPU_TRACEDATA_11"),
+ P7CTL_INIT_PIN(177, "LCD_1_DATA12__GPIO_177__CAM_3_DATA05__CPU_TRACEDATA_12"),
+ P7CTL_INIT_PIN(178, "LCD_1_DATA13__GPIO_178__CAM_3_DATA06__CPU_TRACEDATA_13"),
+ P7CTL_INIT_PIN(179, "LCD_1_DATA14__GPIO_179__CAM_3_DATA07__CPU_TRACEDATA_14"),
+ P7CTL_INIT_PIN(180, "LCD_1_DATA15__GPIO_180__CAM_4_CLK__CPU_TRACEDATA_15"),
+ P7CTL_INIT_PIN(181, "LCD_1_DATA16__GPIO_181__CAM_4_DATA00"),
+ P7CTL_INIT_PIN(182, "LCD_1_DATA17__GPIO_182__CAM_4_DATA01"),
+ P7CTL_INIT_PIN(183, "LCD_1_DATA18__GPIO_183__CAM_4_DATA02"),
+ P7CTL_INIT_PIN(184, "LCD_1_DATA19__GPIO_184__CAM_4_DATA03"),
+ P7CTL_INIT_PIN(185, "LCD_1_DATA20__GPIO_185__CAM_4_DATA04"),
+ P7CTL_INIT_PIN(186, "LCD_1_DATA21__GPIO_186__CAM_4_DATA05"),
+ P7CTL_INIT_PIN(187, "LCD_1_DATA22__GPIO_187__CAM_4_DATA06"),
+ P7CTL_INIT_PIN(188, "LCD_1_DATA23__GPIO_188__CAM_4_DATA07"),
+ P7CTL_INIT_PIN(189, "LCD_1_DEN__GPIO_189__CAM_1_HSa__LCD_1_CLKa"),
+ P7CTL_INIT_PIN(190, "CAM_0_CLK__GPIO_190"),
+ P7CTL_INIT_PIN(191, "CAM_0_DATA00__GPIO_191__CAM_1_DATA08"),
+ P7CTL_INIT_PIN(192, "CAM_0_DATA01__GPIO_192__CAM_1_DATA09"),
+ P7CTL_INIT_PIN(193, "CAM_0_DATA02__GPIO_193__CAM_1_DATA10"),
+ P7CTL_INIT_PIN(194, "CAM_0_DATA03__GPIO_194__CAM_1_DATA11"),
+ P7CTL_INIT_PIN(195, "CAM_0_DATA04__GPIO_195__CAM_1_DATA12"),
+ P7CTL_INIT_PIN(196, "CAM_0_DATA05__GPIO_196__CAM_1_DATA13"),
+ P7CTL_INIT_PIN(197, "CAM_0_DATA06__GPIO_197__CAM_1_DATA14"),
+ P7CTL_INIT_PIN(198, "CAM_0_DATA07__GPIO_198__CAM_1_DATA15"),
+ P7CTL_INIT_PIN(199, "CAM_1_CLK__GPIO_199__SPI_00b"),
+ P7CTL_INIT_PIN(200, "CAM_1_DATA00__GPIO_200__SPI_01b__CAM_0_VSa"),
+ P7CTL_INIT_PIN(201, "CAM_1_DATA01__GPIO_201__SPI_02b__CAM_0_HSa")
+};
+
+/*
+ * Multiplexing funcions
+ *
+ * Each physical pin may be used with up to 4 multiplexing functions.
+ *
+ * Content was generated with the help of the genpin.sh script located uinto
+ * this directory.
+ */
+static struct p7ctl_function const p7_pin_funcs_r1[P7_N_FUNCTIONS] = {
+ [P7_CAM_1_DATA02] = P7CTL_INIT_FUNC(CAM_1_DATA02, 0, 0),
+ [P7_GPIO_000] = P7CTL_INIT_FUNC(GPIO_000, 0, 1),
+ [P7_SPI_03b] = P7CTL_INIT_FUNC(SPI_03b, 0, 2),
+ [P7_CAM_3_VSa] = P7CTL_INIT_FUNC(CAM_3_VSa, 0, 3),
+ [P7_CAM_1_DATA03] = P7CTL_INIT_FUNC(CAM_1_DATA03, 1, 0),
+ [P7_GPIO_001] = P7CTL_INIT_FUNC(GPIO_001, 1, 1),
+ [P7_SPI_04b] = P7CTL_INIT_FUNC(SPI_04b, 1, 2),
+ [P7_CAM_3_HSa] = P7CTL_INIT_FUNC(CAM_3_HSa, 1, 3),
+ [P7_CAM_1_DATA04] = P7CTL_INIT_FUNC(CAM_1_DATA04, 2, 0),
+ [P7_GPIO_002] = P7CTL_INIT_FUNC(GPIO_002, 2, 1),
+ [P7_SPI_05b] = P7CTL_INIT_FUNC(SPI_05b, 2, 2),
+ [P7_CAM_4_VSa] = P7CTL_INIT_FUNC(CAM_4_VSa, 2, 3),
+ [P7_CAM_1_DATA05] = P7CTL_INIT_FUNC(CAM_1_DATA05, 3, 0),
+ [P7_GPIO_003] = P7CTL_INIT_FUNC(GPIO_003, 3, 1),
+ [P7_SPI_06b] = P7CTL_INIT_FUNC(SPI_06b, 3, 2),
+ [P7_CAM_4_HSa] = P7CTL_INIT_FUNC(CAM_4_HSa, 3, 3),
+ [P7_CAM_1_DATA06] = P7CTL_INIT_FUNC(CAM_1_DATA06, 4, 0),
+ [P7_GPIO_004] = P7CTL_INIT_FUNC(GPIO_004, 4, 1),
+ [P7_SPI_07b] = P7CTL_INIT_FUNC(SPI_07b, 4, 2),
+ [P7_CAM_5_VSa] = P7CTL_INIT_FUNC(CAM_5_VSa, 4, 3),
+ [P7_CAM_1_DATA07] = P7CTL_INIT_FUNC(CAM_1_DATA07, 5, 0),
+ [P7_GPIO_005] = P7CTL_INIT_FUNC(GPIO_005, 5, 1),
+ [P7_SPI_08b] = P7CTL_INIT_FUNC(SPI_08b, 5, 2),
+ [P7_CAM_5_HSa] = P7CTL_INIT_FUNC(CAM_5_HSa, 5, 3),
+ [P7_CAM_5_CLK] = P7CTL_INIT_FUNC(CAM_5_CLK, 6, 0),
+ [P7_GPIO_006] = P7CTL_INIT_FUNC(GPIO_006, 6, 1),
+ [P7_SD_2_CLK] = P7CTL_INIT_FUNC(SD_2_CLK, 6, 2),
+ [P7_CAM_5_DATA00] = P7CTL_INIT_FUNC(CAM_5_DATA00, 7, 0),
+ [P7_GPIO_007] = P7CTL_INIT_FUNC(GPIO_007, 7, 1),
+ [P7_SD_2_CMD] = P7CTL_INIT_FUNC(SD_2_CMD, 7, 2),
+ [P7_CAM_5_DATA01] = P7CTL_INIT_FUNC(CAM_5_DATA01, 8, 0),
+ [P7_GPIO_008] = P7CTL_INIT_FUNC(GPIO_008, 8, 1),
+ [P7_SD_2_DAT00] = P7CTL_INIT_FUNC(SD_2_DAT00, 8, 2),
+ [P7_CAM_5_DATA02] = P7CTL_INIT_FUNC(CAM_5_DATA02, 9, 0),
+ [P7_GPIO_009] = P7CTL_INIT_FUNC(GPIO_009, 9, 1),
+ [P7_SD_2_DAT01] = P7CTL_INIT_FUNC(SD_2_DAT01, 9, 2),
+ [P7_CAM_5_DATA03] = P7CTL_INIT_FUNC(CAM_5_DATA03, 10, 0),
+ [P7_GPIO_010] = P7CTL_INIT_FUNC(GPIO_010, 10, 1),
+ [P7_SD_2_DAT02] = P7CTL_INIT_FUNC(SD_2_DAT02, 10, 2),
+ [P7_CAM_5_DATA04] = P7CTL_INIT_FUNC(CAM_5_DATA04, 11, 0),
+ [P7_GPIO_011] = P7CTL_INIT_FUNC(GPIO_011, 11, 1),
+ [P7_SD_2_DAT03] = P7CTL_INIT_FUNC(SD_2_DAT03, 11, 2),
+ [P7_CAM_5_DATA05] = P7CTL_INIT_FUNC(CAM_5_DATA05, 12, 0),
+ [P7_GPIO_012] = P7CTL_INIT_FUNC(GPIO_012, 12, 1),
+ [P7_CAM_5_DATA06] = P7CTL_INIT_FUNC(CAM_5_DATA06, 13, 0),
+ [P7_GPIO_013] = P7CTL_INIT_FUNC(GPIO_013, 13, 1),
+ [P7_CAM_5_DATA07] = P7CTL_INIT_FUNC(CAM_5_DATA07, 14, 0),
+ [P7_GPIO_014] = P7CTL_INIT_FUNC(GPIO_014, 14, 1),
+ [P7_SD_1_CLK] = P7CTL_INIT_FUNC(SD_1_CLK, 15, 0),
+ [P7_UART_5_TX] = P7CTL_INIT_FUNC(UART_5_TX, 15, 2),
+ [P7_SD_1_CMD] = P7CTL_INIT_FUNC(SD_1_CMD, 16, 0),
+ [P7_UART_5_RX] = P7CTL_INIT_FUNC(UART_5_RX, 16, 2),
+ [P7_SD_1_DAT00] = P7CTL_INIT_FUNC(SD_1_DAT00, 17, 0),
+ [P7_UART_6_TX] = P7CTL_INIT_FUNC(UART_6_TX, 17, 2),
+ [P7_SD_1_DAT02] = P7CTL_INIT_FUNC(SD_1_DAT02, 18, 0),
+ [P7_UART_7_TX] = P7CTL_INIT_FUNC(UART_7_TX, 18, 2),
+ [P7_SD_1_DAT01] = P7CTL_INIT_FUNC(SD_1_DAT01, 19, 0),
+ [P7_UART_6_RX] = P7CTL_INIT_FUNC(UART_6_RX, 19, 2),
+ [P7_SD_1_DAT03] = P7CTL_INIT_FUNC(SD_1_DAT03, 20, 0),
+ [P7_UART_7_RX] = P7CTL_INIT_FUNC(UART_7_RX, 20, 2),
+ [P7_SD_0_CLK] = P7CTL_INIT_FUNC(SD_0_CLK, 21, 0),
+ [P7_SD_0_CMD] = P7CTL_INIT_FUNC(SD_0_CMD, 22, 0),
+ [P7_SD_0_DAT00] = P7CTL_INIT_FUNC(SD_0_DAT00, 23, 0),
+ [P7_SD_0_DAT01] = P7CTL_INIT_FUNC(SD_0_DAT01, 24, 0),
+ [P7_SD_0_DAT02] = P7CTL_INIT_FUNC(SD_0_DAT02, 25, 0),
+ [P7_SD_0_DAT03] = P7CTL_INIT_FUNC(SD_0_DAT03, 26, 0),
+ [P7_NAND_NCE] = P7CTL_INIT_FUNC(NAND_NCE, 27, 0),
+ [P7_SPI_00a] = P7CTL_INIT_FUNC(SPI_00a, 27, 2),
+ [P7_NAND_NR] = P7CTL_INIT_FUNC(NAND_NR, 28, 0),
+ [P7_SPI_01a] = P7CTL_INIT_FUNC(SPI_01a, 28, 2),
+ [P7_NAND_NW] = P7CTL_INIT_FUNC(NAND_NW, 29, 0),
+ [P7_SPI_02a] = P7CTL_INIT_FUNC(SPI_02a, 29, 2),
+ [P7_NAND_AL] = P7CTL_INIT_FUNC(NAND_AL, 30, 0),
+ [P7_SPI_03a] = P7CTL_INIT_FUNC(SPI_03a, 30, 2),
+ [P7_NAND_CL] = P7CTL_INIT_FUNC(NAND_CL, 31, 0),
+ [P7_SPI_04a] = P7CTL_INIT_FUNC(SPI_04a, 31, 2),
+ [P7_NAND_RNB] = P7CTL_INIT_FUNC(NAND_RNB, 32, 0),
+ [P7_SPI_05a] = P7CTL_INIT_FUNC(SPI_05a, 32, 2),
+ [P7_NAND_DQS] = P7CTL_INIT_FUNC(NAND_DQS, 33, 0),
+ [P7_GPIO_033] = P7CTL_INIT_FUNC(GPIO_033, 33, 1),
+ [P7_CAN1_RXa] = P7CTL_INIT_FUNC(CAN1_RXa, 33, 2),
+ [P7_NAND_DATA_00] = P7CTL_INIT_FUNC(NAND_DATA_00, 34, 0),
+ [P7_SPI_06a] = P7CTL_INIT_FUNC(SPI_06a, 34, 2),
+ [P7_NAND_DATA_01] = P7CTL_INIT_FUNC(NAND_DATA_01, 35, 0),
+ [P7_SPI_07a] = P7CTL_INIT_FUNC(SPI_07a, 35, 2),
+ [P7_NAND_DATA_02] = P7CTL_INIT_FUNC(NAND_DATA_02, 36, 0),
+ [P7_SPI_08a] = P7CTL_INIT_FUNC(SPI_08a, 36, 2),
+ [P7_NAND_DATA_03] = P7CTL_INIT_FUNC(NAND_DATA_03, 37, 0),
+ [P7_SPI_09a] = P7CTL_INIT_FUNC(SPI_09a, 37, 2),
+ [P7_NAND_DATA_04] = P7CTL_INIT_FUNC(NAND_DATA_04, 38, 0),
+ [P7_GPIO_038] = P7CTL_INIT_FUNC(GPIO_038, 38, 1),
+ [P7_NAND_DATA_05] = P7CTL_INIT_FUNC(NAND_DATA_05, 39, 0),
+ [P7_GPIO_039] = P7CTL_INIT_FUNC(GPIO_039, 39, 1),
+ [P7_NAND_DATA_06] = P7CTL_INIT_FUNC(NAND_DATA_06, 40, 0),
+ [P7_GPIO_040] = P7CTL_INIT_FUNC(GPIO_040, 40, 1),
+ [P7_NAND_DATA_07] = P7CTL_INIT_FUNC(NAND_DATA_07, 41, 0),
+ [P7_GPIO_041] = P7CTL_INIT_FUNC(GPIO_041, 41, 1),
+ [P7_NAND_WP] = P7CTL_INIT_FUNC(NAND_WP, 42, 0),
+ [P7_GPIO_042] = P7CTL_INIT_FUNC(GPIO_042, 42, 1),
+ [P7_CAN1_TXa] = P7CTL_INIT_FUNC(CAN1_TXa, 42, 2),
+ [P7_FORCE_POWER_ON] = P7CTL_INIT_FUNC(FORCE_POWER_ON, 43, 0),
+ [P7_TRST_N] = P7CTL_INIT_FUNC(TRST_N, 44, 0),
+ [P7_TDI] = P7CTL_INIT_FUNC(TDI, 45, 0),
+ [P7_TDO] = P7CTL_INIT_FUNC(TDO, 46, 0),
+ [P7_TCK] = P7CTL_INIT_FUNC(TCK, 47, 0),
+ [P7_TMS] = P7CTL_INIT_FUNC(TMS, 48, 0),
+ [P7_GPIO_049] = P7CTL_INIT_FUNC(GPIO_049, 49, 1),
+ [P7_GPIO_050] = P7CTL_INIT_FUNC(GPIO_050, 50, 1),
+ [P7_UART_1_RX] = P7CTL_INIT_FUNC(UART_1_RX, 51, 0),
+ [P7_UART_1_TX] = P7CTL_INIT_FUNC(UART_1_TX, 52, 0),
+ [P7_UART_1_RTS] = P7CTL_INIT_FUNC(UART_1_RTS, 53, 0),
+ [P7_CAN0_RXb] = P7CTL_INIT_FUNC(CAN0_RXb, 53, 2),
+ [P7_UART_1_CTS] = P7CTL_INIT_FUNC(UART_1_CTS, 54, 0),
+ [P7_CAN0_TXb] = P7CTL_INIT_FUNC(CAN0_TXb, 54, 2),
+ [P7_UART_0_RX] = P7CTL_INIT_FUNC(UART_0_RX, 55, 0),
+ [P7_UART_0_TX] = P7CTL_INIT_FUNC(UART_0_TX, 56, 0),
+ [P7_UART_0_RTS] = P7CTL_INIT_FUNC(UART_0_RTS, 57, 0),
+ [P7_UART_0_CTS] = P7CTL_INIT_FUNC(UART_0_CTS, 58, 0),
+ [P7_I2C_2_DAT] = P7CTL_INIT_FUNC(I2C_2_DAT, 59, 0),
+ [P7_GPIO_059] = P7CTL_INIT_FUNC(GPIO_059, 59, 1),
+ [P7_I2C_2_SL_DAT] = P7CTL_INIT_FUNC(I2C_2_SL_DAT, 59, 2),
+ [P7_I2C_2_CLK] = P7CTL_INIT_FUNC(I2C_2_CLK, 60, 0),
+ [P7_GPIO_060] = P7CTL_INIT_FUNC(GPIO_060, 60, 1),
+ [P7_I2C_2_SL_CLK] = P7CTL_INIT_FUNC(I2C_2_SL_CLK, 60, 2),
+ [P7_I2C_1_DAT] = P7CTL_INIT_FUNC(I2C_1_DAT, 61, 0),
+ [P7_GPIO_061] = P7CTL_INIT_FUNC(GPIO_061, 61, 1),
+ [P7_I2C_1_SL_DAT] = P7CTL_INIT_FUNC(I2C_1_SL_DAT, 61, 2),
+ [P7_I2C_1_CLK] = P7CTL_INIT_FUNC(I2C_1_CLK, 62, 0),
+ [P7_GPIO_062] = P7CTL_INIT_FUNC(GPIO_062, 62, 1),
+ [P7_I2C_1_SL_CLK] = P7CTL_INIT_FUNC(I2C_1_SL_CLK, 62, 2),
+ [P7_I2C_0_DAT] = P7CTL_INIT_FUNC(I2C_0_DAT, 63, 0),
+ [P7_GPIO_063] = P7CTL_INIT_FUNC(GPIO_063, 63, 1),
+ [P7_I2C_0_SL_DAT] = P7CTL_INIT_FUNC(I2C_0_SL_DAT, 63, 2),
+ [P7_I2C_0_CLK] = P7CTL_INIT_FUNC(I2C_0_CLK, 64, 0),
+ [P7_GPIO_064] = P7CTL_INIT_FUNC(GPIO_064, 64, 1),
+ [P7_I2C_0_SL_CLK] = P7CTL_INIT_FUNC(I2C_0_SL_CLK, 64, 2),
+ [P7_UART_4_RX] = P7CTL_INIT_FUNC(UART_4_RX, 65, 0),
+ [P7_GPIO_065] = P7CTL_INIT_FUNC(GPIO_065, 65, 1),
+ [P7_UART_4_TX] = P7CTL_INIT_FUNC(UART_4_TX, 66, 0),
+ [P7_GPIO_066] = P7CTL_INIT_FUNC(GPIO_066, 66, 1),
+ [P7_UART_3_RX] = P7CTL_INIT_FUNC(UART_3_RX, 67, 0),
+ [P7_GPIO_067] = P7CTL_INIT_FUNC(GPIO_067, 67, 1),
+ [P7_UART_3_TX] = P7CTL_INIT_FUNC(UART_3_TX, 68, 0),
+ [P7_GPIO_068] = P7CTL_INIT_FUNC(GPIO_068, 68, 1),
+ [P7_UART_2_RX] = P7CTL_INIT_FUNC(UART_2_RX, 69, 0),
+ [P7_CAN1_TXb] = P7CTL_INIT_FUNC(CAN1_TXb, 69, 2),
+ [P7_UART_2_TX] = P7CTL_INIT_FUNC(UART_2_TX, 70, 0),
+ [P7_CAN1_RXb] = P7CTL_INIT_FUNC(CAN1_RXb, 70, 2),
+ [P7_SPI_09b] = P7CTL_INIT_FUNC(SPI_09b, 71, 0),
+ [P7_GPIO_071] = P7CTL_INIT_FUNC(GPIO_071, 71, 1),
+ [P7_PWM_15] = P7CTL_INIT_FUNC(PWM_15, 71, 2),
+ [P7_SPI_10b] = P7CTL_INIT_FUNC(SPI_10b, 72, 0),
+ [P7_GPIO_072] = P7CTL_INIT_FUNC(GPIO_072, 72, 1),
+ [P7_SPI_11b] = P7CTL_INIT_FUNC(SPI_11b, 73, 0),
+ [P7_GPIO_073] = P7CTL_INIT_FUNC(GPIO_073, 73, 1),
+ [P7_SPI_12b] = P7CTL_INIT_FUNC(SPI_12b, 74, 0),
+ [P7_GPIO_074] = P7CTL_INIT_FUNC(GPIO_074, 74, 1),
+ [P7_SPI_13b] = P7CTL_INIT_FUNC(SPI_13b, 75, 0),
+ [P7_GPIO_075] = P7CTL_INIT_FUNC(GPIO_075, 75, 1),
+ [P7_SPI_14b] = P7CTL_INIT_FUNC(SPI_14b, 76, 0),
+ [P7_GPIO_076] = P7CTL_INIT_FUNC(GPIO_076, 76, 1),
+ [P7_SPI_00] = P7CTL_INIT_FUNC(SPI_00, 77, 0),
+ [P7_GPIO_077] = P7CTL_INIT_FUNC(GPIO_077, 77, 1),
+ [P7_AAI_15] = P7CTL_INIT_FUNC(AAI_15, 77, 2),
+ [P7_CAN1_TX] = P7CTL_INIT_FUNC(CAN1_TX, 77, 3),
+ [P7_SPI_01] = P7CTL_INIT_FUNC(SPI_01, 78, 0),
+ [P7_GPIO_078] = P7CTL_INIT_FUNC(GPIO_078, 78, 1),
+ [P7_AAI_16] = P7CTL_INIT_FUNC(AAI_16, 78, 2),
+ [P7_CAN1_RX] = P7CTL_INIT_FUNC(CAN1_RX, 78, 3),
+ [P7_SPI_02] = P7CTL_INIT_FUNC(SPI_02, 79, 0),
+ [P7_GPIO_079] = P7CTL_INIT_FUNC(GPIO_079, 79, 1),
+ [P7_AAI_17] = P7CTL_INIT_FUNC(AAI_17, 79, 2),
+ [P7_CAN0_TX] = P7CTL_INIT_FUNC(CAN0_TX, 79, 3),
+ [P7_SPI_03] = P7CTL_INIT_FUNC(SPI_03, 80, 0),
+ [P7_GPIO_080] = P7CTL_INIT_FUNC(GPIO_080, 80, 1),
+ [P7_AAI_18] = P7CTL_INIT_FUNC(AAI_18, 80, 2),
+ [P7_CAN0_RX] = P7CTL_INIT_FUNC(CAN0_RX, 80, 3),
+ [P7_SPI_04] = P7CTL_INIT_FUNC(SPI_04, 81, 0),
+ [P7_GPIO_081] = P7CTL_INIT_FUNC(GPIO_081, 81, 1),
+ [P7_AAI_19] = P7CTL_INIT_FUNC(AAI_19, 81, 2),
+ [P7_SPI_05] = P7CTL_INIT_FUNC(SPI_05, 82, 0),
+ [P7_GPIO_082] = P7CTL_INIT_FUNC(GPIO_082, 82, 1),
+ [P7_AAI_20] = P7CTL_INIT_FUNC(AAI_20, 82, 2),
+ [P7_SPI_06] = P7CTL_INIT_FUNC(SPI_06, 83, 0),
+ [P7_GPIO_083] = P7CTL_INIT_FUNC(GPIO_083, 83, 1),
+ [P7_AAI_21] = P7CTL_INIT_FUNC(AAI_21, 83, 2),
+ [P7_SPI_07] = P7CTL_INIT_FUNC(SPI_07, 84, 0),
+ [P7_GPIO_084] = P7CTL_INIT_FUNC(GPIO_084, 84, 1),
+ [P7_AAI_22] = P7CTL_INIT_FUNC(AAI_22, 84, 2),
+ [P7_SPI_08] = P7CTL_INIT_FUNC(SPI_08, 85, 0),
+ [P7_GPIO_085] = P7CTL_INIT_FUNC(GPIO_085, 85, 1),
+ [P7_AAI_23] = P7CTL_INIT_FUNC(AAI_23, 85, 2),
+ [P7_SPI_09] = P7CTL_INIT_FUNC(SPI_09, 86, 0),
+ [P7_GPIO_086] = P7CTL_INIT_FUNC(GPIO_086, 86, 1),
+ [P7_AAI_24] = P7CTL_INIT_FUNC(AAI_24, 86, 2),
+ [P7_SPI_10] = P7CTL_INIT_FUNC(SPI_10, 87, 0),
+ [P7_GPIO_087] = P7CTL_INIT_FUNC(GPIO_087, 87, 1),
+ [P7_AAI_25] = P7CTL_INIT_FUNC(AAI_25, 87, 2),
+ [P7_SPI_11] = P7CTL_INIT_FUNC(SPI_11, 88, 0),
+ [P7_GPIO_088] = P7CTL_INIT_FUNC(GPIO_088, 88, 1),
+ [P7_AAI_26] = P7CTL_INIT_FUNC(AAI_26, 88, 2),
+ [P7_SPI_12] = P7CTL_INIT_FUNC(SPI_12, 89, 0),
+ [P7_GPIO_089] = P7CTL_INIT_FUNC(GPIO_089, 89, 1),
+ [P7_AAI_27] = P7CTL_INIT_FUNC(AAI_27, 89, 2),
+ [P7_SPI_13] = P7CTL_INIT_FUNC(SPI_13, 90, 0),
+ [P7_GPIO_090] = P7CTL_INIT_FUNC(GPIO_090, 90, 1),
+ [P7_AAI_28] = P7CTL_INIT_FUNC(AAI_28, 90, 2),
+ [P7_SPI_14] = P7CTL_INIT_FUNC(SPI_14, 91, 0),
+ [P7_GPIO_091] = P7CTL_INIT_FUNC(GPIO_091, 91, 1),
+ [P7_AAI_29] = P7CTL_INIT_FUNC(AAI_29, 91, 2),
+ [P7_SPI_15] = P7CTL_INIT_FUNC(SPI_15, 92, 0),
+ [P7_GPIO_092] = P7CTL_INIT_FUNC(GPIO_092, 92, 1),
+ [P7_AAI_30] = P7CTL_INIT_FUNC(AAI_30, 92, 2),
+ [P7_P7MU_CLK_OUT] = P7CTL_INIT_FUNC(P7MU_CLK_OUT, 93, 0),
+ [P7_PWM_15a] = P7CTL_INIT_FUNC(PWM_15a, 93, 2),
+ [P7_REBOOT_P7MU] = P7CTL_INIT_FUNC(REBOOT_P7MU, 94, 0),
+ [P7_GPIO_094] = P7CTL_INIT_FUNC(GPIO_094, 94, 1),
+ [P7_ULPI_0_DATA00] = P7CTL_INIT_FUNC(ULPI_0_DATA00, 95, 0),
+ [P7_ULPI_0_DATA01] = P7CTL_INIT_FUNC(ULPI_0_DATA01, 96, 0),
+ [P7_ULPI_0_DATA02] = P7CTL_INIT_FUNC(ULPI_0_DATA02, 97, 0),
+ [P7_ULPI_0_DATA03] = P7CTL_INIT_FUNC(ULPI_0_DATA03, 98, 0),
+ [P7_ULPI_0_DATA04] = P7CTL_INIT_FUNC(ULPI_0_DATA04, 99, 0),
+ [P7_ULPI_0_DATA05] = P7CTL_INIT_FUNC(ULPI_0_DATA05, 100, 0),
+ [P7_ULPI_0_DATA06] = P7CTL_INIT_FUNC(ULPI_0_DATA06, 101, 0),
+ [P7_ULPI_0_DATA07] = P7CTL_INIT_FUNC(ULPI_0_DATA07, 102, 0),
+ [P7_ULPI_0_NXT] = P7CTL_INIT_FUNC(ULPI_0_NXT, 103, 0),
+ [P7_ULPI_0_STP] = P7CTL_INIT_FUNC(ULPI_0_STP, 104, 0),
+ [P7_ULPI_0_DIR] = P7CTL_INIT_FUNC(ULPI_0_DIR, 105, 0),
+ [P7_ULPI_0_CLK] = P7CTL_INIT_FUNC(ULPI_0_CLK, 106, 0),
+ [P7_ULPI_1_DATA00] = P7CTL_INIT_FUNC(ULPI_1_DATA00, 107, 0),
+ [P7_GPIO_107] = P7CTL_INIT_FUNC(GPIO_107, 107, 1),
+ [P7_I2C_2_CLKa] = P7CTL_INIT_FUNC(I2C_2_CLKa, 107, 2),
+ [P7_ULPI_1_DATA01] = P7CTL_INIT_FUNC(ULPI_1_DATA01, 108, 0),
+ [P7_GPIO_108] = P7CTL_INIT_FUNC(GPIO_108, 108, 1),
+ [P7_I2C_2_DATa] = P7CTL_INIT_FUNC(I2C_2_DATa, 108, 2),
+ [P7_ULPI_1_DATA02] = P7CTL_INIT_FUNC(ULPI_1_DATA02, 109, 0),
+ [P7_GPIO_109] = P7CTL_INIT_FUNC(GPIO_109, 109, 1),
+ [P7_I2C_2_CLKb] = P7CTL_INIT_FUNC(I2C_2_CLKb, 109, 2),
+ [P7_ULPI_1_DATA03] = P7CTL_INIT_FUNC(ULPI_1_DATA03, 110, 0),
+ [P7_GPIO_110] = P7CTL_INIT_FUNC(GPIO_110, 110, 1),
+ [P7_I2C_2_DATb] = P7CTL_INIT_FUNC(I2C_2_DATb, 110, 2),
+ [P7_ULPI_1_DATA04] = P7CTL_INIT_FUNC(ULPI_1_DATA04, 111, 0),
+ [P7_GPIO_111] = P7CTL_INIT_FUNC(GPIO_111, 111, 1),
+ [P7_I2C_2_CLKc] = P7CTL_INIT_FUNC(I2C_2_CLKc, 111, 2),
+ [P7_ULPI_1_DATA05] = P7CTL_INIT_FUNC(ULPI_1_DATA05, 112, 0),
+ [P7_GPIO_112] = P7CTL_INIT_FUNC(GPIO_112, 112, 1),
+ [P7_I2C_2_DATc] = P7CTL_INIT_FUNC(I2C_2_DATc, 112, 2),
+ [P7_ULPI_1_DATA06] = P7CTL_INIT_FUNC(ULPI_1_DATA06, 113, 0),
+ [P7_GPIO_113] = P7CTL_INIT_FUNC(GPIO_113, 113, 1),
+ [P7_I2C_2_CLKd] = P7CTL_INIT_FUNC(I2C_2_CLKd, 113, 2),
+ [P7_ULPI_1_DATA07] = P7CTL_INIT_FUNC(ULPI_1_DATA07, 114, 0),
+ [P7_GPIO_114] = P7CTL_INIT_FUNC(GPIO_114, 114, 1),
+ [P7_I2C_2_DATd] = P7CTL_INIT_FUNC(I2C_2_DATd, 114, 2),
+ [P7_ULPI_1_NXT] = P7CTL_INIT_FUNC(ULPI_1_NXT, 115, 0),
+ [P7_GPIO_115] = P7CTL_INIT_FUNC(GPIO_115, 115, 1),
+ [P7_I2C_2_CLKe] = P7CTL_INIT_FUNC(I2C_2_CLKe, 115, 2),
+ [P7_ULPI_1_STP] = P7CTL_INIT_FUNC(ULPI_1_STP, 116, 0),
+ [P7_GPIO_116] = P7CTL_INIT_FUNC(GPIO_116, 116, 1),
+ [P7_I2C_2_DATe] = P7CTL_INIT_FUNC(I2C_2_DATe, 116, 2),
+ [P7_ULPI_1_DIR] = P7CTL_INIT_FUNC(ULPI_1_DIR, 117, 0),
+ [P7_GPIO_117] = P7CTL_INIT_FUNC(GPIO_117, 117, 1),
+ [P7_ULPI_1_CLK] = P7CTL_INIT_FUNC(ULPI_1_CLK, 118, 0),
+ [P7_GPIO_118] = P7CTL_INIT_FUNC(GPIO_118, 118, 1),
+ [P7_AAI_00] = P7CTL_INIT_FUNC(AAI_00, 119, 0),
+ [P7_GPIO_119] = P7CTL_INIT_FUNC(GPIO_119, 119, 1),
+ [P7_PWM_00] = P7CTL_INIT_FUNC(PWM_00, 119, 2),
+ [P7_AAI_01] = P7CTL_INIT_FUNC(AAI_01, 120, 0),
+ [P7_GPIO_120] = P7CTL_INIT_FUNC(GPIO_120, 120, 1),
+ [P7_PWM_01] = P7CTL_INIT_FUNC(PWM_01, 120, 2),
+ [P7_AAI_02] = P7CTL_INIT_FUNC(AAI_02, 121, 0),
+ [P7_GPIO_121] = P7CTL_INIT_FUNC(GPIO_121, 121, 1),
+ [P7_PWM_02] = P7CTL_INIT_FUNC(PWM_02, 121, 2),
+ [P7_AAI_03] = P7CTL_INIT_FUNC(AAI_03, 122, 0),
+ [P7_GPIO_122] = P7CTL_INIT_FUNC(GPIO_122, 122, 1),
+ [P7_PWM_03] = P7CTL_INIT_FUNC(PWM_03, 122, 2),
+ [P7_AAI_04] = P7CTL_INIT_FUNC(AAI_04, 123, 0),
+ [P7_GPIO_123] = P7CTL_INIT_FUNC(GPIO_123, 123, 1),
+ [P7_PWM_04] = P7CTL_INIT_FUNC(PWM_04, 123, 2),
+ [P7_AAI_05] = P7CTL_INIT_FUNC(AAI_05, 124, 0),
+ [P7_GPIO_124] = P7CTL_INIT_FUNC(GPIO_124, 124, 1),
+ [P7_PWM_05] = P7CTL_INIT_FUNC(PWM_05, 124, 2),
+ [P7_AAI_06] = P7CTL_INIT_FUNC(AAI_06, 125, 0),
+ [P7_GPIO_125] = P7CTL_INIT_FUNC(GPIO_125, 125, 1),
+ [P7_PWM_06] = P7CTL_INIT_FUNC(PWM_06, 125, 2),
+ [P7_AAI_07] = P7CTL_INIT_FUNC(AAI_07, 126, 0),
+ [P7_GPIO_126] = P7CTL_INIT_FUNC(GPIO_126, 126, 1),
+ [P7_PWM_07] = P7CTL_INIT_FUNC(PWM_07, 126, 2),
+ [P7_AAI_08] = P7CTL_INIT_FUNC(AAI_08, 127, 0),
+ [P7_GPIO_127] = P7CTL_INIT_FUNC(GPIO_127, 127, 1),
+ [P7_PWM_08] = P7CTL_INIT_FUNC(PWM_08, 127, 2),
+ [P7_AAI_09] = P7CTL_INIT_FUNC(AAI_09, 128, 0),
+ [P7_GPIO_128] = P7CTL_INIT_FUNC(GPIO_128, 128, 1),
+ [P7_PWM_09] = P7CTL_INIT_FUNC(PWM_09, 128, 2),
+ [P7_AAI_10] = P7CTL_INIT_FUNC(AAI_10, 129, 0),
+ [P7_GPIO_129] = P7CTL_INIT_FUNC(GPIO_129, 129, 1),
+ [P7_PWM_10] = P7CTL_INIT_FUNC(PWM_10, 129, 2),
+ [P7_AAI_11] = P7CTL_INIT_FUNC(AAI_11, 130, 0),
+ [P7_GPIO_130] = P7CTL_INIT_FUNC(GPIO_130, 130, 1),
+ [P7_PWM_11] = P7CTL_INIT_FUNC(PWM_11, 130, 2),
+ [P7_AAI_12] = P7CTL_INIT_FUNC(AAI_12, 131, 0),
+ [P7_GPIO_131] = P7CTL_INIT_FUNC(GPIO_131, 131, 1),
+ [P7_PWM_12] = P7CTL_INIT_FUNC(PWM_12, 131, 2),
+ [P7_AAI_13] = P7CTL_INIT_FUNC(AAI_13, 132, 0),
+ [P7_GPIO_132] = P7CTL_INIT_FUNC(GPIO_132, 132, 1),
+ [P7_PWM_13] = P7CTL_INIT_FUNC(PWM_13, 132, 2),
+ [P7_AAI_14] = P7CTL_INIT_FUNC(AAI_14, 133, 0),
+ [P7_GPIO_133] = P7CTL_INIT_FUNC(GPIO_133, 133, 1),
+ [P7_PWM_14] = P7CTL_INIT_FUNC(PWM_14, 133, 2),
+ [P7_LCD_0_DEN] = P7CTL_INIT_FUNC(LCD_0_DEN, 134, 0),
+ [P7_GPIO_134] = P7CTL_INIT_FUNC(GPIO_134, 134, 1),
+ [P7_CAM_1_VS] = P7CTL_INIT_FUNC(CAM_1_VS, 134, 2),
+ [P7_LCD_0_HS] = P7CTL_INIT_FUNC(LCD_0_HS, 135, 0),
+ [P7_GPIO_135] = P7CTL_INIT_FUNC(GPIO_135, 135, 1),
+ [P7_CAM_1_HS] = P7CTL_INIT_FUNC(CAM_1_HS, 135, 2),
+ [P7_LCD_0_DENa] = P7CTL_INIT_FUNC(LCD_0_DENa, 135, 3),
+ [P7_LCD_0_VS] = P7CTL_INIT_FUNC(LCD_0_VS, 136, 0),
+ [P7_GPIO_136] = P7CTL_INIT_FUNC(GPIO_136, 136, 1),
+ [P7_CAM_0_VS] = P7CTL_INIT_FUNC(CAM_0_VS, 136, 2),
+ [P7_PAR_NCS] = P7CTL_INIT_FUNC(PAR_NCS, 136, 3),
+ [P7_LCD_0_DATA00] = P7CTL_INIT_FUNC(LCD_0_DATA00, 137, 0),
+ [P7_GPIO_137] = P7CTL_INIT_FUNC(GPIO_137, 137, 1),
+ [P7_CAM_0_HS] = P7CTL_INIT_FUNC(CAM_0_HS, 137, 2),
+ [P7_PAR_NRS] = P7CTL_INIT_FUNC(PAR_NRS, 137, 3),
+ [P7_LCD_0_DATA01] = P7CTL_INIT_FUNC(LCD_0_DATA01, 138, 0),
+ [P7_GPIO_138] = P7CTL_INIT_FUNC(GPIO_138, 138, 1),
+ [P7_CAM_2_VS] = P7CTL_INIT_FUNC(CAM_2_VS, 138, 2),
+ [P7_PAR_NRD] = P7CTL_INIT_FUNC(PAR_NRD, 138, 3),
+ [P7_LCD_0_DATA02] = P7CTL_INIT_FUNC(LCD_0_DATA02, 139, 0),
+ [P7_GPIO_139] = P7CTL_INIT_FUNC(GPIO_139, 139, 1),
+ [P7_CAM_2_HS] = P7CTL_INIT_FUNC(CAM_2_HS, 139, 2),
+ [P7_PAR_NWR] = P7CTL_INIT_FUNC(PAR_NWR, 139, 3),
+ [P7_LCD_0_DATA03] = P7CTL_INIT_FUNC(LCD_0_DATA03, 140, 0),
+ [P7_GPIO_140] = P7CTL_INIT_FUNC(GPIO_140, 140, 1),
+ [P7_CAM_3_VS] = P7CTL_INIT_FUNC(CAM_3_VS, 140, 2),
+ [P7_PAR_D00] = P7CTL_INIT_FUNC(PAR_D00, 140, 3),
+ [P7_LCD_0_DATA04] = P7CTL_INIT_FUNC(LCD_0_DATA04, 141, 0),
+ [P7_GPIO_141] = P7CTL_INIT_FUNC(GPIO_141, 141, 1),
+ [P7_CAM_3_HS] = P7CTL_INIT_FUNC(CAM_3_HS, 141, 2),
+ [P7_PAR_D01] = P7CTL_INIT_FUNC(PAR_D01, 141, 3),
+ [P7_LCD_0_DATA05] = P7CTL_INIT_FUNC(LCD_0_DATA05, 142, 0),
+ [P7_GPIO_142] = P7CTL_INIT_FUNC(GPIO_142, 142, 1),
+ [P7_CAM_5_VS] = P7CTL_INIT_FUNC(CAM_5_VS, 142, 2),
+ [P7_PAR_D02] = P7CTL_INIT_FUNC(PAR_D02, 142, 3),
+ [P7_LCD_0_DATA06] = P7CTL_INIT_FUNC(LCD_0_DATA06, 143, 0),
+ [P7_GPIO_143] = P7CTL_INIT_FUNC(GPIO_143, 143, 1),
+ [P7_CAM_5_HS] = P7CTL_INIT_FUNC(CAM_5_HS, 143, 2),
+ [P7_PAR_D03] = P7CTL_INIT_FUNC(PAR_D03, 143, 3),
+ [P7_LCD_0_DATA07] = P7CTL_INIT_FUNC(LCD_0_DATA07, 144, 0),
+ [P7_GPIO_144] = P7CTL_INIT_FUNC(GPIO_144, 144, 1),
+ [P7_CAM_0_DATA08] = P7CTL_INIT_FUNC(CAM_0_DATA08, 144, 2),
+ [P7_PAR_D04] = P7CTL_INIT_FUNC(PAR_D04, 144, 3),
+ [P7_LCD_0_DATA08] = P7CTL_INIT_FUNC(LCD_0_DATA08, 145, 0),
+ [P7_GPIO_145] = P7CTL_INIT_FUNC(GPIO_145, 145, 1),
+ [P7_CAM_0_DATA09] = P7CTL_INIT_FUNC(CAM_0_DATA09, 145, 2),
+ [P7_PAR_D05] = P7CTL_INIT_FUNC(PAR_D05, 145, 3),
+ [P7_LCD_0_DATA09] = P7CTL_INIT_FUNC(LCD_0_DATA09, 146, 0),
+ [P7_GPIO_146] = P7CTL_INIT_FUNC(GPIO_146, 146, 1),
+ [P7_CAM_0_DATA10] = P7CTL_INIT_FUNC(CAM_0_DATA10, 146, 2),
+ [P7_PAR_D06] = P7CTL_INIT_FUNC(PAR_D06, 146, 3),
+ [P7_LCD_0_DATA10] = P7CTL_INIT_FUNC(LCD_0_DATA10, 147, 0),
+ [P7_GPIO_147] = P7CTL_INIT_FUNC(GPIO_147, 147, 1),
+ [P7_CAM_0_DATA11] = P7CTL_INIT_FUNC(CAM_0_DATA11, 147, 2),
+ [P7_PAR_D07] = P7CTL_INIT_FUNC(PAR_D07, 147, 3),
+ [P7_LCD_0_DATA11] = P7CTL_INIT_FUNC(LCD_0_DATA11, 148, 0),
+ [P7_GPIO_148] = P7CTL_INIT_FUNC(GPIO_148, 148, 1),
+ [P7_CAM_0_DATA12] = P7CTL_INIT_FUNC(CAM_0_DATA12, 148, 2),
+ [P7_PAR_D08] = P7CTL_INIT_FUNC(PAR_D08, 148, 3),
+ [P7_LCD_0_DATA12] = P7CTL_INIT_FUNC(LCD_0_DATA12, 149, 0),
+ [P7_GPIO_149] = P7CTL_INIT_FUNC(GPIO_149, 149, 1),
+ [P7_CAM_0_DATA13] = P7CTL_INIT_FUNC(CAM_0_DATA13, 149, 2),
+ [P7_PAR_D09] = P7CTL_INIT_FUNC(PAR_D09, 149, 3),
+ [P7_LCD_0_DATA13] = P7CTL_INIT_FUNC(LCD_0_DATA13, 150, 0),
+ [P7_GPIO_150] = P7CTL_INIT_FUNC(GPIO_150, 150, 1),
+ [P7_CAM_0_DATA14] = P7CTL_INIT_FUNC(CAM_0_DATA14, 150, 2),
+ [P7_PAR_D10] = P7CTL_INIT_FUNC(PAR_D10, 150, 3),
+ [P7_LCD_0_DATA14] = P7CTL_INIT_FUNC(LCD_0_DATA14, 151, 0),
+ [P7_GPIO_151] = P7CTL_INIT_FUNC(GPIO_151, 151, 1),
+ [P7_CAM_0_DATA15] = P7CTL_INIT_FUNC(CAM_0_DATA15, 151, 2),
+ [P7_PAR_D11] = P7CTL_INIT_FUNC(PAR_D11, 151, 3),
+ [P7_LCD_0_DATA15] = P7CTL_INIT_FUNC(LCD_0_DATA15, 152, 0),
+ [P7_GPIO_152] = P7CTL_INIT_FUNC(GPIO_152, 152, 1),
+ [P7_CAM_0_DATA16] = P7CTL_INIT_FUNC(CAM_0_DATA16, 152, 2),
+ [P7_PAR_D12] = P7CTL_INIT_FUNC(PAR_D12, 152, 3),
+ [P7_LCD_0_DATA16] = P7CTL_INIT_FUNC(LCD_0_DATA16, 153, 0),
+ [P7_GPIO_153] = P7CTL_INIT_FUNC(GPIO_153, 153, 1),
+ [P7_CAM_0_DATA17] = P7CTL_INIT_FUNC(CAM_0_DATA17, 153, 2),
+ [P7_PAR_D13] = P7CTL_INIT_FUNC(PAR_D13, 153, 3),
+ [P7_LCD_0_DATA17] = P7CTL_INIT_FUNC(LCD_0_DATA17, 154, 0),
+ [P7_GPIO_154] = P7CTL_INIT_FUNC(GPIO_154, 154, 1),
+ [P7_CAM_0_DATA18] = P7CTL_INIT_FUNC(CAM_0_DATA18, 154, 2),
+ [P7_PAR_D14] = P7CTL_INIT_FUNC(PAR_D14, 154, 3),
+ [P7_LCD_0_DATA18] = P7CTL_INIT_FUNC(LCD_0_DATA18, 155, 0),
+ [P7_GPIO_155] = P7CTL_INIT_FUNC(GPIO_155, 155, 1),
+ [P7_CAM_0_DATA19] = P7CTL_INIT_FUNC(CAM_0_DATA19, 155, 2),
+ [P7_PAR_D15] = P7CTL_INIT_FUNC(PAR_D15, 155, 3),
+ [P7_LCD_0_DATA19] = P7CTL_INIT_FUNC(LCD_0_DATA19, 156, 0),
+ [P7_GPIO_156] = P7CTL_INIT_FUNC(GPIO_156, 156, 1),
+ [P7_CAM_0_DATA20] = P7CTL_INIT_FUNC(CAM_0_DATA20, 156, 2),
+ [P7_LCD_0_DATA20] = P7CTL_INIT_FUNC(LCD_0_DATA20, 157, 0),
+ [P7_GPIO_157] = P7CTL_INIT_FUNC(GPIO_157, 157, 1),
+ [P7_CAM_0_DATA21] = P7CTL_INIT_FUNC(CAM_0_DATA21, 157, 2),
+ [P7_LCD_0_DATA21] = P7CTL_INIT_FUNC(LCD_0_DATA21, 158, 0),
+ [P7_GPIO_158] = P7CTL_INIT_FUNC(GPIO_158, 158, 1),
+ [P7_CAM_0_DATA22] = P7CTL_INIT_FUNC(CAM_0_DATA22, 158, 2),
+ [P7_LCD_0_DATA22] = P7CTL_INIT_FUNC(LCD_0_DATA22, 159, 0),
+ [P7_GPIO_159] = P7CTL_INIT_FUNC(GPIO_159, 159, 1),
+ [P7_CAM_0_DATA23] = P7CTL_INIT_FUNC(CAM_0_DATA23, 159, 2),
+ [P7_LCD_0_DATA23] = P7CTL_INIT_FUNC(LCD_0_DATA23, 160, 0),
+ [P7_GPIO_160] = P7CTL_INIT_FUNC(GPIO_160, 160, 1),
+ [P7_CAM_4_VS] = P7CTL_INIT_FUNC(CAM_4_VS, 160, 2),
+ [P7_LCD_0_CLK] = P7CTL_INIT_FUNC(LCD_0_CLK, 161, 0),
+ [P7_GPIO_161] = P7CTL_INIT_FUNC(GPIO_161, 161, 1),
+ [P7_CAM_4_HS] = P7CTL_INIT_FUNC(CAM_4_HS, 161, 2),
+ [P7_LCD_1_HS] = P7CTL_INIT_FUNC(LCD_1_HS, 162, 0),
+ [P7_GPIO_162] = P7CTL_INIT_FUNC(GPIO_162, 162, 1),
+ [P7_CAM_2_CLK] = P7CTL_INIT_FUNC(CAM_2_CLK, 162, 2),
+ [P7_LCD_1_DENa] = P7CTL_INIT_FUNC(LCD_1_DENa, 162, 3),
+ [P7_LCD_1_VS] = P7CTL_INIT_FUNC(LCD_1_VS, 163, 0),
+ [P7_GPIO_163] = P7CTL_INIT_FUNC(GPIO_163, 163, 1),
+ [P7_CAM_2_DATA00] = P7CTL_INIT_FUNC(CAM_2_DATA00, 163, 2),
+ [P7_CPU_TRACECLKOUT] = P7CTL_INIT_FUNC(CPU_TRACECLKOUT, 163, 3),
+ [P7_LCD_1_CLK] = P7CTL_INIT_FUNC(LCD_1_CLK, 164, 0),
+ [P7_GPIO_164] = P7CTL_INIT_FUNC(GPIO_164, 164, 1),
+ [P7_CAM_2_DATA01] = P7CTL_INIT_FUNC(CAM_2_DATA01, 164, 2),
+ [P7_CPU_TRACECTL] = P7CTL_INIT_FUNC(CPU_TRACECTL, 164, 3),
+ [P7_LCD_1_DATA00] = P7CTL_INIT_FUNC(LCD_1_DATA00, 165, 0),
+ [P7_GPIO_165] = P7CTL_INIT_FUNC(GPIO_165, 165, 1),
+ [P7_CAM_2_DATA02] = P7CTL_INIT_FUNC(CAM_2_DATA02, 165, 2),
+ [P7_CPU_TRACEDATA_00] = P7CTL_INIT_FUNC(CPU_TRACEDATA_00, 165, 3),
+ [P7_LCD_1_DATA01] = P7CTL_INIT_FUNC(LCD_1_DATA01, 166, 0),
+ [P7_GPIO_166] = P7CTL_INIT_FUNC(GPIO_166, 166, 1),
+ [P7_CAM_2_DATA03] = P7CTL_INIT_FUNC(CAM_2_DATA03, 166, 2),
+ [P7_CPU_TRACEDATA_01] = P7CTL_INIT_FUNC(CPU_TRACEDATA_01, 166, 3),
+ [P7_LCD_1_DATA02] = P7CTL_INIT_FUNC(LCD_1_DATA02, 167, 0),
+ [P7_GPIO_167] = P7CTL_INIT_FUNC(GPIO_167, 167, 1),
+ [P7_CAM_2_DATA04] = P7CTL_INIT_FUNC(CAM_2_DATA04, 167, 2),
+ [P7_CPU_TRACEDATA_02] = P7CTL_INIT_FUNC(CPU_TRACEDATA_02, 167, 3),
+ [P7_LCD_1_DATA03] = P7CTL_INIT_FUNC(LCD_1_DATA03, 168, 0),
+ [P7_GPIO_168] = P7CTL_INIT_FUNC(GPIO_168, 168, 1),
+ [P7_CAM_2_DATA05] = P7CTL_INIT_FUNC(CAM_2_DATA05, 168, 2),
+ [P7_CPU_TRACEDATA_03] = P7CTL_INIT_FUNC(CPU_TRACEDATA_03, 168, 3),
+ [P7_LCD_1_DATA04] = P7CTL_INIT_FUNC(LCD_1_DATA04, 169, 0),
+ [P7_GPIO_169] = P7CTL_INIT_FUNC(GPIO_169, 169, 1),
+ [P7_CAM_2_DATA06] = P7CTL_INIT_FUNC(CAM_2_DATA06, 169, 2),
+ [P7_CPU_TRACEDATA_04] = P7CTL_INIT_FUNC(CPU_TRACEDATA_04, 169, 3),
+ [P7_LCD_1_DATA05] = P7CTL_INIT_FUNC(LCD_1_DATA05, 170, 0),
+ [P7_GPIO_170] = P7CTL_INIT_FUNC(GPIO_170, 170, 1),
+ [P7_CAM_2_DATA07] = P7CTL_INIT_FUNC(CAM_2_DATA07, 170, 2),
+ [P7_CPU_TRACEDATA_05] = P7CTL_INIT_FUNC(CPU_TRACEDATA_05, 170, 3),
+ [P7_LCD_1_DATA06] = P7CTL_INIT_FUNC(LCD_1_DATA06, 171, 0),
+ [P7_GPIO_171] = P7CTL_INIT_FUNC(GPIO_171, 171, 1),
+ [P7_CAM_3_CLK] = P7CTL_INIT_FUNC(CAM_3_CLK, 171, 2),
+ [P7_CPU_TRACEDATA_06] = P7CTL_INIT_FUNC(CPU_TRACEDATA_06, 171, 3),
+ [P7_LCD_1_DATA07] = P7CTL_INIT_FUNC(LCD_1_DATA07, 172, 0),
+ [P7_GPIO_172] = P7CTL_INIT_FUNC(GPIO_172, 172, 1),
+ [P7_CAM_3_DATA00] = P7CTL_INIT_FUNC(CAM_3_DATA00, 172, 2),
+ [P7_CPU_TRACEDATA_07] = P7CTL_INIT_FUNC(CPU_TRACEDATA_07, 172, 3),
+ [P7_LCD_1_DATA08] = P7CTL_INIT_FUNC(LCD_1_DATA08, 173, 0),
+ [P7_GPIO_173] = P7CTL_INIT_FUNC(GPIO_173, 173, 1),
+ [P7_CAM_3_DATA01] = P7CTL_INIT_FUNC(CAM_3_DATA01, 173, 2),
+ [P7_CPU_TRACEDATA_08] = P7CTL_INIT_FUNC(CPU_TRACEDATA_08, 173, 3),
+ [P7_LCD_1_DATA09] = P7CTL_INIT_FUNC(LCD_1_DATA09, 174, 0),
+ [P7_GPIO_174] = P7CTL_INIT_FUNC(GPIO_174, 174, 1),
+ [P7_CAM_3_DATA02] = P7CTL_INIT_FUNC(CAM_3_DATA02, 174, 2),
+ [P7_CPU_TRACEDATA_09] = P7CTL_INIT_FUNC(CPU_TRACEDATA_09, 174, 3),
+ [P7_LCD_1_DATA10] = P7CTL_INIT_FUNC(LCD_1_DATA10, 175, 0),
+ [P7_GPIO_175] = P7CTL_INIT_FUNC(GPIO_175, 175, 1),
+ [P7_CAM_3_DATA03] = P7CTL_INIT_FUNC(CAM_3_DATA03, 175, 2),
+ [P7_CPU_TRACEDATA_10] = P7CTL_INIT_FUNC(CPU_TRACEDATA_10, 175, 3),
+ [P7_LCD_1_DATA11] = P7CTL_INIT_FUNC(LCD_1_DATA11, 176, 0),
+ [P7_GPIO_176] = P7CTL_INIT_FUNC(GPIO_176, 176, 1),
+ [P7_CAM_3_DATA04] = P7CTL_INIT_FUNC(CAM_3_DATA04, 176, 2),
+ [P7_CPU_TRACEDATA_11] = P7CTL_INIT_FUNC(CPU_TRACEDATA_11, 176, 3),
+ [P7_LCD_1_DATA12] = P7CTL_INIT_FUNC(LCD_1_DATA12, 177, 0),
+ [P7_GPIO_177] = P7CTL_INIT_FUNC(GPIO_177, 177, 1),
+ [P7_CAM_3_DATA05] = P7CTL_INIT_FUNC(CAM_3_DATA05, 177, 2),
+ [P7_CPU_TRACEDATA_12] = P7CTL_INIT_FUNC(CPU_TRACEDATA_12, 177, 3),
+ [P7_LCD_1_DATA13] = P7CTL_INIT_FUNC(LCD_1_DATA13, 178, 0),
+ [P7_GPIO_178] = P7CTL_INIT_FUNC(GPIO_178, 178, 1),
+ [P7_CAM_3_DATA06] = P7CTL_INIT_FUNC(CAM_3_DATA06, 178, 2),
+ [P7_CPU_TRACEDATA_13] = P7CTL_INIT_FUNC(CPU_TRACEDATA_13, 178, 3),
+ [P7_LCD_1_DATA14] = P7CTL_INIT_FUNC(LCD_1_DATA14, 179, 0),
+ [P7_GPIO_179] = P7CTL_INIT_FUNC(GPIO_179, 179, 1),
+ [P7_CAM_3_DATA07] = P7CTL_INIT_FUNC(CAM_3_DATA07, 179, 2),
+ [P7_CPU_TRACEDATA_14] = P7CTL_INIT_FUNC(CPU_TRACEDATA_14, 179, 3),
+ [P7_LCD_1_DATA15] = P7CTL_INIT_FUNC(LCD_1_DATA15, 180, 0),
+ [P7_GPIO_180] = P7CTL_INIT_FUNC(GPIO_180, 180, 1),
+ [P7_CAM_4_CLK] = P7CTL_INIT_FUNC(CAM_4_CLK, 180, 2),
+ [P7_CPU_TRACEDATA_15] = P7CTL_INIT_FUNC(CPU_TRACEDATA_15, 180, 3),
+ [P7_LCD_1_DATA16] = P7CTL_INIT_FUNC(LCD_1_DATA16, 181, 0),
+ [P7_GPIO_181] = P7CTL_INIT_FUNC(GPIO_181, 181, 1),
+ [P7_CAM_4_DATA00] = P7CTL_INIT_FUNC(CAM_4_DATA00, 181, 2),
+ [P7_LCD_1_DATA17] = P7CTL_INIT_FUNC(LCD_1_DATA17, 182, 0),
+ [P7_GPIO_182] = P7CTL_INIT_FUNC(GPIO_182, 182, 1),
+ [P7_CAM_4_DATA01] = P7CTL_INIT_FUNC(CAM_4_DATA01, 182, 2),
+ [P7_LCD_1_DATA18] = P7CTL_INIT_FUNC(LCD_1_DATA18, 183, 0),
+ [P7_GPIO_183] = P7CTL_INIT_FUNC(GPIO_183, 183, 1),
+ [P7_CAM_4_DATA02] = P7CTL_INIT_FUNC(CAM_4_DATA02, 183, 2),
+ [P7_LCD_1_DATA19] = P7CTL_INIT_FUNC(LCD_1_DATA19, 184, 0),
+ [P7_GPIO_184] = P7CTL_INIT_FUNC(GPIO_184, 184, 1),
+ [P7_CAM_4_DATA03] = P7CTL_INIT_FUNC(CAM_4_DATA03, 184, 2),
+ [P7_LCD_1_DATA20] = P7CTL_INIT_FUNC(LCD_1_DATA20, 185, 0),
+ [P7_GPIO_185] = P7CTL_INIT_FUNC(GPIO_185, 185, 1),
+ [P7_CAM_4_DATA04] = P7CTL_INIT_FUNC(CAM_4_DATA04, 185, 2),
+ [P7_LCD_1_DATA21] = P7CTL_INIT_FUNC(LCD_1_DATA21, 186, 0),
+ [P7_GPIO_186] = P7CTL_INIT_FUNC(GPIO_186, 186, 1),
+ [P7_CAM_4_DATA05] = P7CTL_INIT_FUNC(CAM_4_DATA05, 186, 2),
+ [P7_LCD_1_DATA22] = P7CTL_INIT_FUNC(LCD_1_DATA22, 187, 0),
+ [P7_GPIO_187] = P7CTL_INIT_FUNC(GPIO_187, 187, 1),
+ [P7_CAM_4_DATA06] = P7CTL_INIT_FUNC(CAM_4_DATA06, 187, 2),
+ [P7_LCD_1_DATA23] = P7CTL_INIT_FUNC(LCD_1_DATA23, 188, 0),
+ [P7_GPIO_188] = P7CTL_INIT_FUNC(GPIO_188, 188, 1),
+ [P7_CAM_4_DATA07] = P7CTL_INIT_FUNC(CAM_4_DATA07, 188, 2),
+ [P7_LCD_1_DEN] = P7CTL_INIT_FUNC(LCD_1_DEN, 189, 0),
+ [P7_GPIO_189] = P7CTL_INIT_FUNC(GPIO_189, 189, 1),
+ [P7_CAM_1_HSa] = P7CTL_INIT_FUNC(CAM_1_HSa, 189, 2),
+ [P7_LCD_1_CLKa] = P7CTL_INIT_FUNC(LCD_1_CLKa, 189, 3),
+ [P7_CAM_0_CLK] = P7CTL_INIT_FUNC(CAM_0_CLK, 190, 0),
+ [P7_GPIO_190] = P7CTL_INIT_FUNC(GPIO_190, 190, 1),
+ [P7_CAM_0_DATA00] = P7CTL_INIT_FUNC(CAM_0_DATA00, 191, 0),
+ [P7_GPIO_191] = P7CTL_INIT_FUNC(GPIO_191, 191, 1),
+ [P7_CAM_1_DATA08] = P7CTL_INIT_FUNC(CAM_1_DATA08, 191, 2),
+ [P7_CAM_0_DATA01] = P7CTL_INIT_FUNC(CAM_0_DATA01, 192, 0),
+ [P7_GPIO_192] = P7CTL_INIT_FUNC(GPIO_192, 192, 1),
+ [P7_CAM_1_DATA09] = P7CTL_INIT_FUNC(CAM_1_DATA09, 192, 2),
+ [P7_CAM_0_DATA02] = P7CTL_INIT_FUNC(CAM_0_DATA02, 193, 0),
+ [P7_GPIO_193] = P7CTL_INIT_FUNC(GPIO_193, 193, 1),
+ [P7_CAM_1_DATA10] = P7CTL_INIT_FUNC(CAM_1_DATA10, 193, 2),
+ [P7_CAM_0_DATA03] = P7CTL_INIT_FUNC(CAM_0_DATA03, 194, 0),
+ [P7_GPIO_194] = P7CTL_INIT_FUNC(GPIO_194, 194, 1),
+ [P7_CAM_1_DATA11] = P7CTL_INIT_FUNC(CAM_1_DATA11, 194, 2),
+ [P7_CAM_0_DATA04] = P7CTL_INIT_FUNC(CAM_0_DATA04, 195, 0),
+ [P7_GPIO_195] = P7CTL_INIT_FUNC(GPIO_195, 195, 1),
+ [P7_CAM_1_DATA12] = P7CTL_INIT_FUNC(CAM_1_DATA12, 195, 2),
+ [P7_CAM_0_DATA05] = P7CTL_INIT_FUNC(CAM_0_DATA05, 196, 0),
+ [P7_GPIO_196] = P7CTL_INIT_FUNC(GPIO_196, 196, 1),
+ [P7_CAM_1_DATA13] = P7CTL_INIT_FUNC(CAM_1_DATA13, 196, 2),
+ [P7_CAM_0_DATA06] = P7CTL_INIT_FUNC(CAM_0_DATA06, 197, 0),
+ [P7_GPIO_197] = P7CTL_INIT_FUNC(GPIO_197, 197, 1),
+ [P7_CAM_1_DATA14] = P7CTL_INIT_FUNC(CAM_1_DATA14, 197, 2),
+ [P7_CAM_0_DATA07] = P7CTL_INIT_FUNC(CAM_0_DATA07, 198, 0),
+ [P7_GPIO_198] = P7CTL_INIT_FUNC(GPIO_198, 198, 1),
+ [P7_CAM_1_DATA15] = P7CTL_INIT_FUNC(CAM_1_DATA15, 198, 2),
+ [P7_CAM_1_CLK] = P7CTL_INIT_FUNC(CAM_1_CLK, 199, 0),
+ [P7_GPIO_199] = P7CTL_INIT_FUNC(GPIO_199, 199, 1),
+ [P7_SPI_00b] = P7CTL_INIT_FUNC(SPI_00b, 199, 2),
+ [P7_CAM_1_DATA00] = P7CTL_INIT_FUNC(CAM_1_DATA00, 200, 0),
+ [P7_GPIO_200] = P7CTL_INIT_FUNC(GPIO_200, 200, 1),
+ [P7_SPI_01b] = P7CTL_INIT_FUNC(SPI_01b, 200, 2),
+ [P7_CAM_0_VSa] = P7CTL_INIT_FUNC(CAM_0_VSa, 200, 3),
+ [P7_CAM_1_DATA01] = P7CTL_INIT_FUNC(CAM_1_DATA01, 201, 0),
+ [P7_GPIO_201] = P7CTL_INIT_FUNC(GPIO_201, 201, 1),
+ [P7_SPI_02b] = P7CTL_INIT_FUNC(SPI_02b, 201, 2),
+ [P7_CAM_0_HSa] = P7CTL_INIT_FUNC(CAM_0_HSa, 201, 3)
+};
+
+/*
+ * Bitmap of GPIO enabled pins
+ *
+ * Each physical pin usable as GPIO is indicated with a bit set to 1 in this
+ * bitmap. The bitmap is indexed by pin identifier.
+ *
+ * Content was generated with the help of the genpin.sh script located uinto
+ * this directory.
+ */
+static unsigned long p7_gpio_pins_r1[] = {
+ 0x00007fff,
+ 0xf80607c2,
+ 0x5fffff9f,
+ 0xfffff800,
+ 0xffffffff,
+ 0xffffffff,
+ 0x000003ff
+};
+
+const struct p7_pinctrl_config p7_pinctrl_config_r1 = {
+ .pin_descs = p7_pin_descs_r1,
+ .pin_descs_sz = ARRAY_SIZE(p7_pin_descs_r1),
+ .pin_funcs = p7_pin_funcs_r1,
+ .gpio_pins = p7_gpio_pins_r1,
+ .gpio_pins_sz = ARRAY_SIZE(p7_gpio_pins_r1),
+};
diff --git a/arch/arm/mach-parrot7/pindesc-r2.c b/arch/arm/mach-parrot7/pindesc-r2.c
new file mode 100644
index 00000000000000..6295a6f385f75e
--- /dev/null
+++ b/arch/arm/mach-parrot7/pindesc-r2.c
@@ -0,0 +1,855 @@
+#include "common.h"
+#include "pinctrl.h"
+
+/*
+ * Physical pin descriptors
+ *
+ * Pins identifiers use the GPIO numbering scheme. Names are assigned by
+ * concatenating all possible multiplexing functions a pin may be used with.
+ *
+ * Content was generated with the help of the genpin.sh script located into
+ * this directory.
+ */
+/* Physical pin descriptors */
+static struct pinctrl_pin_desc const p7_pin_descs_r2[] = {
+ P7CTL_INIT_PIN(0, "CAM_1_DATA02__GPIO_000__SPI_03b__CAM_3_VSa"),
+ P7CTL_INIT_PIN(1, "CAM_1_DATA03__GPIO_001__SPI_04b__CAM_3_HSa"),
+ P7CTL_INIT_PIN(2, "CAM_1_DATA04__GPIO_002__SPI_05b__CAM_4_VSa"),
+ P7CTL_INIT_PIN(3, "CAM_1_DATA05__GPIO_003__SPI_06b__CAM_4_HSa"),
+ P7CTL_INIT_PIN(4, "CAM_1_DATA06__GPIO_004__SPI_07b__CAM_5_VSa"),
+ P7CTL_INIT_PIN(5, "CAM_1_DATA07__GPIO_005__SPI_08b__CAM_5_HSa"),
+ P7CTL_INIT_PIN(6, "CAM_5_CLK__GPIO_006__SD_2_CLK"),
+ P7CTL_INIT_PIN(7, "CAM_5_DATA00__GPIO_007__SD_2_CMD"),
+ P7CTL_INIT_PIN(8, "CAM_5_DATA01__GPIO_008__SD_2_DAT00"),
+ P7CTL_INIT_PIN(9, "CAM_5_DATA02__GPIO_009__SD_2_DAT01"),
+ P7CTL_INIT_PIN(10, "CAM_5_DATA03__GPIO_010__SD_2_DAT02"),
+ P7CTL_INIT_PIN(11, "CAM_5_DATA04__GPIO_011__SD_2_DAT03"),
+ P7CTL_INIT_PIN(12, "CAM_5_DATA05__GPIO_012"),
+ P7CTL_INIT_PIN(13, "CAM_5_DATA06__GPIO_013"),
+ P7CTL_INIT_PIN(14, "CAM_5_DATA07__GPIO_014"),
+ P7CTL_INIT_PIN(15, "SD_1_CLK__GPIO_015__UART_5_TX"),
+ P7CTL_INIT_PIN(16, "SD_1_CMD__GPIO_016__UART_5_RX"),
+ P7CTL_INIT_PIN(17, "SD_1_DAT00__GPIO_017__UART_6_TX"),
+ P7CTL_INIT_PIN(18, "SD_1_DAT02__GPIO_018__UART_7_TX"),
+ P7CTL_INIT_PIN(19, "SD_1_DAT01__GPIO_019__UART_6_RX"),
+ P7CTL_INIT_PIN(20, "SD_1_DAT03__GPIO_020__UART_7_RX"),
+ P7CTL_INIT_PIN(21, "SD_0_CLK__GPIO_021"),
+ P7CTL_INIT_PIN(22, "SD_0_CMD__GPIO_022"),
+ P7CTL_INIT_PIN(23, "SD_0_DAT00__GPIO_023"),
+ P7CTL_INIT_PIN(24, "SD_0_DAT01__GPIO_024"),
+ P7CTL_INIT_PIN(25, "SD_0_DAT02__GPIO_025"),
+ P7CTL_INIT_PIN(26, "SD_0_DAT03__GPIO_026"),
+ P7CTL_INIT_PIN(27, "NAND_NCE__GPIO_027__SPI_00a"),
+ P7CTL_INIT_PIN(28, "NAND_NR__GPIO_028__SPI_01a"),
+ P7CTL_INIT_PIN(29, "NAND_NW__GPIO_029__SPI_02a"),
+ P7CTL_INIT_PIN(30, "NAND_AL__GPIO_030__SPI_03a"),
+ P7CTL_INIT_PIN(31, "NAND_CL__GPIO_031__SPI_04a"),
+ P7CTL_INIT_PIN(32, "NAND_RNB__GPIO_032__SPI_05a"),
+ P7CTL_INIT_PIN(33, "NAND_DQS__GPIO_033__CAN1_RXa"),
+ P7CTL_INIT_PIN(34, "NAND_DATA_00__GPIO_034__SPI_06a"),
+ P7CTL_INIT_PIN(35, "NAND_DATA_01__GPIO_035__SPI_07a"),
+ P7CTL_INIT_PIN(36, "NAND_DATA_02__GPIO_036__SPI_08a"),
+ P7CTL_INIT_PIN(37, "NAND_DATA_03__GPIO_037__SPI_09a"),
+ P7CTL_INIT_PIN(38, "NAND_DATA_04__GPIO_038"),
+ P7CTL_INIT_PIN(39, "NAND_DATA_05__GPIO_039"),
+ P7CTL_INIT_PIN(40, "NAND_DATA_06__GPIO_040"),
+ P7CTL_INIT_PIN(41, "NAND_DATA_07__GPIO_041"),
+ P7CTL_INIT_PIN(42, "NAND_WP__GPIO_042__CAN1_TXa"),
+ P7CTL_INIT_PIN(43, "FORCE_POWER_ON__GPIO_043"),
+ P7CTL_INIT_PIN(44, "TRST_N__GPIO_044"),
+ P7CTL_INIT_PIN(45, "TDI__GPIO_045"),
+ P7CTL_INIT_PIN(46, "TDO__GPIO_046"),
+ P7CTL_INIT_PIN(47, "TCK__GPIO_047"),
+ P7CTL_INIT_PIN(48, "TMS__GPIO_048"),
+ P7CTL_INIT_PIN(49, "GPIO_049"),
+ P7CTL_INIT_PIN(50, "GPIO_050"),
+ P7CTL_INIT_PIN(51, "UART_1_RX__GPIO_051"),
+ P7CTL_INIT_PIN(52, "UART_1_TX__GPIO_052"),
+ P7CTL_INIT_PIN(53, "UART_1_RTS__GPIO_053__CAN0_RXb"),
+ P7CTL_INIT_PIN(54, "UART_1_CTS__GPIO_054__CAN0_TXb"),
+ P7CTL_INIT_PIN(55, "UART_0_RX__GPIO_055"),
+ P7CTL_INIT_PIN(56, "UART_0_TX__GPIO_056"),
+ P7CTL_INIT_PIN(57, "UART_0_RTS__GPIO_057"),
+ P7CTL_INIT_PIN(58, "UART_0_CTS__GPIO_058"),
+ P7CTL_INIT_PIN(59, "I2C_2_DAT__GPIO_059__I2C_2_SL_DAT"),
+ P7CTL_INIT_PIN(60, "I2C_2_CLK__GPIO_060__I2C_2_SL_CLK"),
+ P7CTL_INIT_PIN(61, "I2C_1_DAT__GPIO_061__I2C_1_SL_DAT"),
+ P7CTL_INIT_PIN(62, "I2C_1_CLK__GPIO_062__I2C_1_SL_CLK"),
+ P7CTL_INIT_PIN(63, "I2C_0_DAT__GPIO_063__I2C_0_SL_DAT"),
+ P7CTL_INIT_PIN(64, "I2C_0_CLK__GPIO_064__I2C_0_SL_CLK"),
+ P7CTL_INIT_PIN(65, "UART_4_RX__GPIO_065"),
+ P7CTL_INIT_PIN(66, "UART_4_TX__GPIO_066"),
+ P7CTL_INIT_PIN(67, "UART_3_RX__GPIO_067"),
+ P7CTL_INIT_PIN(68, "UART_3_TX__GPIO_068"),
+ P7CTL_INIT_PIN(69, "UART_2_RX__GPIO_069__CAN1_TXb"),
+ P7CTL_INIT_PIN(70, "UART_2_TX__GPIO_070__CAN1_RXb"),
+ P7CTL_INIT_PIN(71, "SPI_09b__GPIO_071__PWM_15"),
+ P7CTL_INIT_PIN(72, "SPI_10b__GPIO_072"),
+ P7CTL_INIT_PIN(73, "SPI_11b__GPIO_073"),
+ P7CTL_INIT_PIN(74, "SPI_12b__GPIO_074"),
+ P7CTL_INIT_PIN(75, "SPI_13b__GPIO_075"),
+ P7CTL_INIT_PIN(76, "SPI_14b__GPIO_076"),
+ P7CTL_INIT_PIN(77, "SPI_00__GPIO_077__AAI_15__CAN1_TX"),
+ P7CTL_INIT_PIN(78, "SPI_01__GPIO_078__AAI_16__CAN1_RX"),
+ P7CTL_INIT_PIN(79, "SPI_02__GPIO_079__AAI_17__CAN0_TX"),
+ P7CTL_INIT_PIN(80, "SPI_03__GPIO_080__AAI_18__CAN0_RX"),
+ P7CTL_INIT_PIN(81, "SPI_04__GPIO_081__AAI_19"),
+ P7CTL_INIT_PIN(82, "SPI_05__GPIO_082__AAI_20"),
+ P7CTL_INIT_PIN(83, "SPI_06__GPIO_083__AAI_21"),
+ P7CTL_INIT_PIN(84, "SPI_07__GPIO_084__AAI_22"),
+ P7CTL_INIT_PIN(85, "SPI_08__GPIO_085__AAI_23"),
+ P7CTL_INIT_PIN(86, "SPI_09__GPIO_086__AAI_24"),
+ P7CTL_INIT_PIN(87, "SPI_10__GPIO_087__AAI_25"),
+ P7CTL_INIT_PIN(88, "SPI_11__GPIO_088__AAI_26"),
+ P7CTL_INIT_PIN(89, "SPI_12__GPIO_089__AAI_27"),
+ P7CTL_INIT_PIN(90, "SPI_13__GPIO_090__AAI_28"),
+ P7CTL_INIT_PIN(91, "SPI_14__GPIO_091__AAI_29"),
+ P7CTL_INIT_PIN(92, "SPI_15__GPIO_092__AAI_30"),
+ P7CTL_INIT_PIN(93, "P7MU_CLK_OUT__GPIO_093__PWM_15a"),
+ P7CTL_INIT_PIN(94, "REBOOT_P7MU__GPIO_094"),
+ P7CTL_INIT_PIN(95, "ULPI_0_DATA00__GPIO_095"),
+ P7CTL_INIT_PIN(96, "ULPI_0_DATA01__GPIO_096"),
+ P7CTL_INIT_PIN(97, "ULPI_0_DATA02__GPIO_097"),
+ P7CTL_INIT_PIN(98, "ULPI_0_DATA03__GPIO_098"),
+ P7CTL_INIT_PIN(99, "ULPI_0_DATA04__GPIO_099"),
+ P7CTL_INIT_PIN(100, "ULPI_0_DATA05__GPIO_100"),
+ P7CTL_INIT_PIN(101, "ULPI_0_DATA06__GPIO_101"),
+ P7CTL_INIT_PIN(102, "ULPI_0_DATA07__GPIO_102"),
+ P7CTL_INIT_PIN(103, "ULPI_0_NXT__GPIO_103"),
+ P7CTL_INIT_PIN(104, "ULPI_0_DIR__GPIO_104"),
+ P7CTL_INIT_PIN(105, "ULPI_0_CLK__GPIO_105"),
+ P7CTL_INIT_PIN(106, "ULPI_1_DATA00__GPIO_106__I2C_2_CLKa"),
+ P7CTL_INIT_PIN(107, "ULPI_1_DATA01__GPIO_107__I2C_2_DATa"),
+ P7CTL_INIT_PIN(108, "ULPI_1_DATA02__GPIO_108__I2C_2_CLKb"),
+ P7CTL_INIT_PIN(109, "ULPI_1_DATA03__GPIO_109__I2C_2_DATb"),
+ P7CTL_INIT_PIN(110, "ULPI_1_DATA04__GPIO_110__I2C_2_CLKc"),
+ P7CTL_INIT_PIN(111, "ULPI_1_DATA05__GPIO_111__I2C_2_DATc"),
+ P7CTL_INIT_PIN(112, "ULPI_1_DATA06__GPIO_112__I2C_2_CLKd"),
+ P7CTL_INIT_PIN(113, "ULPI_1_DATA07__GPIO_113__I2C_2_DATd"),
+ P7CTL_INIT_PIN(114, "ULPI_1_NXT__GPIO_114__I2C_2_CLKe"),
+ P7CTL_INIT_PIN(115, "ULPI_1_DIR__GPIO_115"),
+ P7CTL_INIT_PIN(116, "ULPI_1_STP__GPIO_116__I2C_2_DATe"),
+ P7CTL_INIT_PIN(117, "ULPI_0_STP__GPIO_117"),
+ P7CTL_INIT_PIN(118, "ULPI_1_CLK__GPIO_118"),
+ P7CTL_INIT_PIN(119, "AAI_00__GPIO_119__PWM_00"),
+ P7CTL_INIT_PIN(120, "AAI_01__GPIO_120__PWM_01"),
+ P7CTL_INIT_PIN(121, "AAI_02__GPIO_121__PWM_02"),
+ P7CTL_INIT_PIN(122, "AAI_03__GPIO_122__PWM_03"),
+ P7CTL_INIT_PIN(123, "AAI_04__GPIO_123__PWM_04"),
+ P7CTL_INIT_PIN(124, "AAI_05__GPIO_124__PWM_05"),
+ P7CTL_INIT_PIN(125, "AAI_06__GPIO_125__PWM_06"),
+ P7CTL_INIT_PIN(126, "AAI_07__GPIO_126__PWM_07"),
+ P7CTL_INIT_PIN(127, "AAI_08__GPIO_127__PWM_08"),
+ P7CTL_INIT_PIN(128, "AAI_09__GPIO_128__PWM_09"),
+ P7CTL_INIT_PIN(129, "AAI_10__GPIO_129__PWM_10"),
+ P7CTL_INIT_PIN(130, "AAI_11__GPIO_130__PWM_11"),
+ P7CTL_INIT_PIN(131, "AAI_12__GPIO_131__PWM_12"),
+ P7CTL_INIT_PIN(132, "AAI_13__GPIO_132__PWM_13"),
+ P7CTL_INIT_PIN(133, "AAI_14__GPIO_133__PWM_14"),
+ P7CTL_INIT_PIN(134, "SPI_16__GPIO_134__ETH_MII_TXER"),
+ P7CTL_INIT_PIN(135, "SPI_17__GPIO_135__ETH_MII_RXER"),
+ P7CTL_INIT_PIN(136, "SPI_18__GPIO_136__ETH_MII_CRS"),
+ P7CTL_INIT_PIN(137, "SPI_19__GPIO_137__ETH_MII_COL"),
+ P7CTL_INIT_PIN(138, "ETH_RGMII_TXC__GPIO_138"),
+ P7CTL_INIT_PIN(139, "ETH_RGMII_TXD_00__GPIO_139"),
+ P7CTL_INIT_PIN(140, "ETH_RGMII_TXD_01__GPIO_140"),
+ P7CTL_INIT_PIN(141, "ETH_RGMII_TXD_02__GPIO_141"),
+ P7CTL_INIT_PIN(142, "ETH_RGMII_TXD_03__GPIO_142"),
+ P7CTL_INIT_PIN(143, "ETH_RGMII_TX_CTL__GPIO_143"),
+ P7CTL_INIT_PIN(144, "ETH_RGMII_RXC__GPIO_144"),
+ P7CTL_INIT_PIN(145, "ETH_RGMII_RXD_00__GPIO_145"),
+ P7CTL_INIT_PIN(146, "ETH_RGMII_RXD_01__GPIO_146"),
+ P7CTL_INIT_PIN(147, "ETH_RGMII_RXD_02__GPIO_147"),
+ P7CTL_INIT_PIN(148, "ETH_RGMII_RXD_03__GPIO_148"),
+ P7CTL_INIT_PIN(149, "ETH_RGMII_RX_CTL__GPIO_149"),
+ P7CTL_INIT_PIN(150, "ETH_MDC__GPIO_150"),
+ P7CTL_INIT_PIN(151, "ETH_MDIO__GPIO_151"),
+ P7CTL_INIT_PIN(152, "LCD_0_DEN__GPIO_152__CAM_1_VS"),
+ P7CTL_INIT_PIN(153, "LCD_0_HS__GPIO_153__CAM_1_HS__LCD_0_DENa"),
+ P7CTL_INIT_PIN(154, "LCD_0_VS__GPIO_154__CAM_0_VS__PAR_NCS"),
+ P7CTL_INIT_PIN(155, "LCD_0_DATA00__GPIO_155__CAM_0_HS__PAR_NRS"),
+ P7CTL_INIT_PIN(156, "LCD_0_DATA01__GPIO_156__CAM_2_VS__PAR_NRD"),
+ P7CTL_INIT_PIN(157, "LCD_0_DATA02__GPIO_157__CAM_2_HS__PAR_NWR"),
+ P7CTL_INIT_PIN(158, "LCD_0_DATA03__GPIO_158__CAM_3_VS__PAR_D00"),
+ P7CTL_INIT_PIN(159, "LCD_0_DATA04__GPIO_159__CAM_3_HS__PAR_D01"),
+ P7CTL_INIT_PIN(160, "LCD_0_DATA05__GPIO_160__CAM_5_VS__PAR_D02"),
+ P7CTL_INIT_PIN(161, "LCD_0_DATA06__GPIO_161__CAM_5_HS__PAR_D03"),
+ P7CTL_INIT_PIN(162, "LCD_0_DATA07__GPIO_162__CAM_0_DATA08__PAR_D04"),
+ P7CTL_INIT_PIN(163, "LCD_0_DATA08__GPIO_163__CAM_0_DATA09__PAR_D05"),
+ P7CTL_INIT_PIN(164, "LCD_0_DATA09__GPIO_164__CAM_0_DATA10__PAR_D06"),
+ P7CTL_INIT_PIN(165, "LCD_0_DATA10__GPIO_165__CAM_0_DATA11__PAR_D07"),
+ P7CTL_INIT_PIN(166, "LCD_0_DATA11__GPIO_166__CAM_0_DATA12__PAR_D08"),
+ P7CTL_INIT_PIN(167, "LCD_0_DATA12__GPIO_167__CAM_0_DATA13__PAR_D09"),
+ P7CTL_INIT_PIN(168, "LCD_0_DATA13__GPIO_168__CAM_0_DATA14__PAR_D10"),
+ P7CTL_INIT_PIN(169, "LCD_0_DATA14__GPIO_169__CAM_0_DATA15__PAR_D11"),
+ P7CTL_INIT_PIN(170, "LCD_0_DATA15__GPIO_170__CAM_0_DATA16__PAR_D12"),
+ P7CTL_INIT_PIN(171, "LCD_0_DATA16__GPIO_171__CAM_0_DATA17__PAR_D13"),
+ P7CTL_INIT_PIN(172, "LCD_0_DATA17__GPIO_172__CAM_0_DATA18__PAR_D14"),
+ P7CTL_INIT_PIN(173, "LCD_0_DATA18__GPIO_173__CAM_0_DATA19__PAR_D15"),
+ P7CTL_INIT_PIN(174, "LCD_0_DATA19__GPIO_174__CAM_0_DATA20"),
+ P7CTL_INIT_PIN(175, "LCD_0_DATA20__GPIO_175__CAM_0_DATA21"),
+ P7CTL_INIT_PIN(176, "LCD_0_DATA21__GPIO_176__CAM_0_DATA22"),
+ P7CTL_INIT_PIN(177, "LCD_0_DATA22__GPIO_177__CAM_0_DATA23"),
+ P7CTL_INIT_PIN(178, "LCD_0_DATA23__GPIO_178__CAM_4_VS"),
+ P7CTL_INIT_PIN(179, "LCD_0_CLK__GPIO_179__CAM_4_HS"),
+ P7CTL_INIT_PIN(180, "LCD_1_HS__GPIO_180__CAM_2_CLK__LCD_1_DENa"),
+ P7CTL_INIT_PIN(181, "LCD_1_VS__GPIO_181__CAM_2_DATA00__CPU_TRACECLKOUT"),
+ P7CTL_INIT_PIN(182, "LCD_1_CLK__GPIO_182__CAM_2_DATA01__CPU_TRACECTL"),
+ P7CTL_INIT_PIN(183, "LCD_1_DATA00__GPIO_183__CAM_2_DATA02__CPU_TRACEDATA_00"),
+ P7CTL_INIT_PIN(184, "LCD_1_DATA01__GPIO_184__CAM_2_DATA03__CPU_TRACEDATA_01"),
+ P7CTL_INIT_PIN(185, "LCD_1_DATA02__GPIO_185__CAM_2_DATA04__CPU_TRACEDATA_02"),
+ P7CTL_INIT_PIN(186, "LCD_1_DATA03__GPIO_186__CAM_2_DATA05__CPU_TRACEDATA_03"),
+ P7CTL_INIT_PIN(187, "LCD_1_DATA04__GPIO_187__CAM_2_DATA06__CPU_TRACEDATA_04"),
+ P7CTL_INIT_PIN(188, "LCD_1_DATA05__GPIO_188__CAM_2_DATA07__CPU_TRACEDATA_05"),
+ P7CTL_INIT_PIN(189, "LCD_1_DATA06__GPIO_189__CAM_3_CLK__CPU_TRACEDATA_06"),
+ P7CTL_INIT_PIN(190, "LCD_1_DATA07__GPIO_190__CAM_3_DATA00__CPU_TRACEDATA_07"),
+ P7CTL_INIT_PIN(191, "LCD_1_DATA08__GPIO_191__CAM_3_DATA01__CPU_TRACEDATA_08"),
+ P7CTL_INIT_PIN(192, "LCD_1_DATA09__GPIO_192__CAM_3_DATA02__CPU_TRACEDATA_09"),
+ P7CTL_INIT_PIN(193, "LCD_1_DATA10__GPIO_193__CAM_3_DATA03__CPU_TRACEDATA_10"),
+ P7CTL_INIT_PIN(194, "LCD_1_DATA11__GPIO_194__CAM_3_DATA04__CPU_TRACEDATA_11"),
+ P7CTL_INIT_PIN(195, "LCD_1_DATA12__GPIO_195__CAM_3_DATA05__CPU_TRACEDATA_12"),
+ P7CTL_INIT_PIN(196, "LCD_1_DATA13__GPIO_196__CAM_3_DATA06__CPU_TRACEDATA_13"),
+ P7CTL_INIT_PIN(197, "LCD_1_DATA14__GPIO_197__CAM_3_DATA07__CPU_TRACEDATA_14"),
+ P7CTL_INIT_PIN(198, "LCD_1_DATA15__GPIO_198__CAM_4_CLK__CPU_TRACEDATA_15"),
+ P7CTL_INIT_PIN(199, "LCD_1_DATA16__GPIO_199__CAM_4_DATA00"),
+ P7CTL_INIT_PIN(200, "LCD_1_DATA17__GPIO_200__CAM_4_DATA01"),
+ P7CTL_INIT_PIN(201, "LCD_1_DATA18__GPIO_201__CAM_4_DATA02"),
+ P7CTL_INIT_PIN(202, "LCD_1_DATA19__GPIO_202__CAM_4_DATA03"),
+ P7CTL_INIT_PIN(203, "LCD_1_DATA20__GPIO_203__CAM_4_DATA04"),
+ P7CTL_INIT_PIN(204, "LCD_1_DATA21__GPIO_204__CAM_4_DATA05"),
+ P7CTL_INIT_PIN(205, "LCD_1_DATA22__GPIO_205__CAM_4_DATA06"),
+ P7CTL_INIT_PIN(206, "LCD_1_DATA23__GPIO_206__CAM_4_DATA07"),
+ P7CTL_INIT_PIN(207, "LCD_1_DEN__GPIO_207__CAM_1_HSa__LCD_1_CLKa"),
+ P7CTL_INIT_PIN(208, "CAM_0_CLK__GPIO_208"),
+ P7CTL_INIT_PIN(209, "CAM_0_DATA00__GPIO_209__CAM_1_DATA08"),
+ P7CTL_INIT_PIN(210, "CAM_0_DATA01__GPIO_210__CAM_1_DATA09"),
+ P7CTL_INIT_PIN(211, "CAM_0_DATA02__GPIO_211__CAM_1_DATA10"),
+ P7CTL_INIT_PIN(212, "CAM_0_DATA03__GPIO_212__CAM_1_DATA11"),
+ P7CTL_INIT_PIN(213, "CAM_0_DATA04__GPIO_213__CAM_1_DATA12"),
+ P7CTL_INIT_PIN(214, "CAM_0_DATA05__GPIO_214__CAM_1_DATA13"),
+ P7CTL_INIT_PIN(215, "CAM_0_DATA06__GPIO_215__CAM_1_DATA14"),
+ P7CTL_INIT_PIN(216, "CAM_0_DATA07__GPIO_216__CAM_1_DATA15"),
+ P7CTL_INIT_PIN(217, "CAM_1_CLK__GPIO_217__SPI_00b"),
+ P7CTL_INIT_PIN(218, "CAM_1_DATA00__GPIO_218__SPI_01b__CAM_0_VSa"),
+ P7CTL_INIT_PIN(219, "CAM_1_DATA01__GPIO_219__SPI_02b__CAM_0_HSa")
+};
+
+/* Pinmux functions */
+static struct p7ctl_function const p7_pin_funcs_r2[P7_N_FUNCTIONS] = {
+ [P7_CAM_1_DATA02] = P7CTL_INIT_FUNC(CAM_1_DATA02, 0, 0),
+ [P7_GPIO_000] = P7CTL_INIT_FUNC(GPIO_000, 0, 1),
+ [P7_SPI_03b] = P7CTL_INIT_FUNC(SPI_03b, 0, 2),
+ [P7_CAM_3_VSa] = P7CTL_INIT_FUNC(CAM_3_VSa, 0, 3),
+ [P7_CAM_1_DATA03] = P7CTL_INIT_FUNC(CAM_1_DATA03, 1, 0),
+ [P7_GPIO_001] = P7CTL_INIT_FUNC(GPIO_001, 1, 1),
+ [P7_SPI_04b] = P7CTL_INIT_FUNC(SPI_04b, 1, 2),
+ [P7_CAM_3_HSa] = P7CTL_INIT_FUNC(CAM_3_HSa, 1, 3),
+ [P7_CAM_1_DATA04] = P7CTL_INIT_FUNC(CAM_1_DATA04, 2, 0),
+ [P7_GPIO_002] = P7CTL_INIT_FUNC(GPIO_002, 2, 1),
+ [P7_SPI_05b] = P7CTL_INIT_FUNC(SPI_05b, 2, 2),
+ [P7_CAM_4_VSa] = P7CTL_INIT_FUNC(CAM_4_VSa, 2, 3),
+ [P7_CAM_1_DATA05] = P7CTL_INIT_FUNC(CAM_1_DATA05, 3, 0),
+ [P7_GPIO_003] = P7CTL_INIT_FUNC(GPIO_003, 3, 1),
+ [P7_SPI_06b] = P7CTL_INIT_FUNC(SPI_06b, 3, 2),
+ [P7_CAM_4_HSa] = P7CTL_INIT_FUNC(CAM_4_HSa, 3, 3),
+ [P7_CAM_1_DATA06] = P7CTL_INIT_FUNC(CAM_1_DATA06, 4, 0),
+ [P7_GPIO_004] = P7CTL_INIT_FUNC(GPIO_004, 4, 1),
+ [P7_SPI_07b] = P7CTL_INIT_FUNC(SPI_07b, 4, 2),
+ [P7_CAM_5_VSa] = P7CTL_INIT_FUNC(CAM_5_VSa, 4, 3),
+ [P7_CAM_1_DATA07] = P7CTL_INIT_FUNC(CAM_1_DATA07, 5, 0),
+ [P7_GPIO_005] = P7CTL_INIT_FUNC(GPIO_005, 5, 1),
+ [P7_SPI_08b] = P7CTL_INIT_FUNC(SPI_08b, 5, 2),
+ [P7_CAM_5_HSa] = P7CTL_INIT_FUNC(CAM_5_HSa, 5, 3),
+ [P7_CAM_5_CLK] = P7CTL_INIT_FUNC(CAM_5_CLK, 6, 0),
+ [P7_GPIO_006] = P7CTL_INIT_FUNC(GPIO_006, 6, 1),
+ [P7_SD_2_CLK] = P7CTL_INIT_FUNC(SD_2_CLK, 6, 2),
+ [P7_CAM_5_DATA00] = P7CTL_INIT_FUNC(CAM_5_DATA00, 7, 0),
+ [P7_GPIO_007] = P7CTL_INIT_FUNC(GPIO_007, 7, 1),
+ [P7_SD_2_CMD] = P7CTL_INIT_FUNC(SD_2_CMD, 7, 2),
+ [P7_CAM_5_DATA01] = P7CTL_INIT_FUNC(CAM_5_DATA01, 8, 0),
+ [P7_GPIO_008] = P7CTL_INIT_FUNC(GPIO_008, 8, 1),
+ [P7_SD_2_DAT00] = P7CTL_INIT_FUNC(SD_2_DAT00, 8, 2),
+ [P7_CAM_5_DATA02] = P7CTL_INIT_FUNC(CAM_5_DATA02, 9, 0),
+ [P7_GPIO_009] = P7CTL_INIT_FUNC(GPIO_009, 9, 1),
+ [P7_SD_2_DAT01] = P7CTL_INIT_FUNC(SD_2_DAT01, 9, 2),
+ [P7_CAM_5_DATA03] = P7CTL_INIT_FUNC(CAM_5_DATA03, 10, 0),
+ [P7_GPIO_010] = P7CTL_INIT_FUNC(GPIO_010, 10, 1),
+ [P7_SD_2_DAT02] = P7CTL_INIT_FUNC(SD_2_DAT02, 10, 2),
+ [P7_CAM_5_DATA04] = P7CTL_INIT_FUNC(CAM_5_DATA04, 11, 0),
+ [P7_GPIO_011] = P7CTL_INIT_FUNC(GPIO_011, 11, 1),
+ [P7_SD_2_DAT03] = P7CTL_INIT_FUNC(SD_2_DAT03, 11, 2),
+ [P7_CAM_5_DATA05] = P7CTL_INIT_FUNC(CAM_5_DATA05, 12, 0),
+ [P7_GPIO_012] = P7CTL_INIT_FUNC(GPIO_012, 12, 1),
+ [P7_CAM_5_DATA06] = P7CTL_INIT_FUNC(CAM_5_DATA06, 13, 0),
+ [P7_GPIO_013] = P7CTL_INIT_FUNC(GPIO_013, 13, 1),
+ [P7_CAM_5_DATA07] = P7CTL_INIT_FUNC(CAM_5_DATA07, 14, 0),
+ [P7_GPIO_014] = P7CTL_INIT_FUNC(GPIO_014, 14, 1),
+ [P7_SD_1_CLK] = P7CTL_INIT_FUNC(SD_1_CLK, 15, 0),
+ [P7_UART_5_TX] = P7CTL_INIT_FUNC(UART_5_TX, 15, 2),
+ [P7_SD_1_CMD] = P7CTL_INIT_FUNC(SD_1_CMD, 16, 0),
+ [P7_UART_5_RX] = P7CTL_INIT_FUNC(UART_5_RX, 16, 2),
+ [P7_SD_1_DAT00] = P7CTL_INIT_FUNC(SD_1_DAT00, 17, 0),
+ [P7_UART_6_TX] = P7CTL_INIT_FUNC(UART_6_TX, 17, 2),
+ [P7_SD_1_DAT02] = P7CTL_INIT_FUNC(SD_1_DAT02, 18, 0),
+ [P7_UART_7_TX] = P7CTL_INIT_FUNC(UART_7_TX, 18, 2),
+ [P7_SD_1_DAT01] = P7CTL_INIT_FUNC(SD_1_DAT01, 19, 0),
+ [P7_UART_6_RX] = P7CTL_INIT_FUNC(UART_6_RX, 19, 2),
+ [P7_SD_1_DAT03] = P7CTL_INIT_FUNC(SD_1_DAT03, 20, 0),
+ [P7_UART_7_RX] = P7CTL_INIT_FUNC(UART_7_RX, 20, 2),
+ [P7_SD_0_CLK] = P7CTL_INIT_FUNC(SD_0_CLK, 21, 0),
+ [P7_SD_0_CMD] = P7CTL_INIT_FUNC(SD_0_CMD, 22, 0),
+ [P7_SD_0_DAT00] = P7CTL_INIT_FUNC(SD_0_DAT00, 23, 0),
+ [P7_SD_0_DAT01] = P7CTL_INIT_FUNC(SD_0_DAT01, 24, 0),
+ [P7_SD_0_DAT02] = P7CTL_INIT_FUNC(SD_0_DAT02, 25, 0),
+ [P7_SD_0_DAT03] = P7CTL_INIT_FUNC(SD_0_DAT03, 26, 0),
+ [P7_NAND_NCE] = P7CTL_INIT_FUNC(NAND_NCE, 27, 0),
+ [P7_SPI_00a] = P7CTL_INIT_FUNC(SPI_00a, 27, 2),
+ [P7_NAND_NR] = P7CTL_INIT_FUNC(NAND_NR, 28, 0),
+ [P7_SPI_01a] = P7CTL_INIT_FUNC(SPI_01a, 28, 2),
+ [P7_NAND_NW] = P7CTL_INIT_FUNC(NAND_NW, 29, 0),
+ [P7_SPI_02a] = P7CTL_INIT_FUNC(SPI_02a, 29, 2),
+ [P7_NAND_AL] = P7CTL_INIT_FUNC(NAND_AL, 30, 0),
+ [P7_SPI_03a] = P7CTL_INIT_FUNC(SPI_03a, 30, 2),
+ [P7_NAND_CL] = P7CTL_INIT_FUNC(NAND_CL, 31, 0),
+ [P7_SPI_04a] = P7CTL_INIT_FUNC(SPI_04a, 31, 2),
+ [P7_NAND_RNB] = P7CTL_INIT_FUNC(NAND_RNB, 32, 0),
+ [P7_SPI_05a] = P7CTL_INIT_FUNC(SPI_05a, 32, 2),
+ [P7_NAND_DQS] = P7CTL_INIT_FUNC(NAND_DQS, 33, 0),
+ [P7_GPIO_033] = P7CTL_INIT_FUNC(GPIO_033, 33, 1),
+ [P7_CAN1_RXa] = P7CTL_INIT_FUNC(CAN1_RXa, 33, 2),
+ [P7_NAND_DATA_00] = P7CTL_INIT_FUNC(NAND_DATA_00, 34, 0),
+ [P7_SPI_06a] = P7CTL_INIT_FUNC(SPI_06a, 34, 2),
+ [P7_NAND_DATA_01] = P7CTL_INIT_FUNC(NAND_DATA_01, 35, 0),
+ [P7_SPI_07a] = P7CTL_INIT_FUNC(SPI_07a, 35, 2),
+ [P7_NAND_DATA_02] = P7CTL_INIT_FUNC(NAND_DATA_02, 36, 0),
+ [P7_SPI_08a] = P7CTL_INIT_FUNC(SPI_08a, 36, 2),
+ [P7_NAND_DATA_03] = P7CTL_INIT_FUNC(NAND_DATA_03, 37, 0),
+ [P7_SPI_09a] = P7CTL_INIT_FUNC(SPI_09a, 37, 2),
+ [P7_NAND_DATA_04] = P7CTL_INIT_FUNC(NAND_DATA_04, 38, 0),
+ [P7_GPIO_038] = P7CTL_INIT_FUNC(GPIO_038, 38, 1),
+ [P7_NAND_DATA_05] = P7CTL_INIT_FUNC(NAND_DATA_05, 39, 0),
+ [P7_GPIO_039] = P7CTL_INIT_FUNC(GPIO_039, 39, 1),
+ [P7_NAND_DATA_06] = P7CTL_INIT_FUNC(NAND_DATA_06, 40, 0),
+ [P7_GPIO_040] = P7CTL_INIT_FUNC(GPIO_040, 40, 1),
+ [P7_NAND_DATA_07] = P7CTL_INIT_FUNC(NAND_DATA_07, 41, 0),
+ [P7_GPIO_041] = P7CTL_INIT_FUNC(GPIO_041, 41, 1),
+ [P7_NAND_WP] = P7CTL_INIT_FUNC(NAND_WP, 42, 0),
+ [P7_GPIO_042] = P7CTL_INIT_FUNC(GPIO_042, 42, 1),
+ [P7_CAN1_TXa] = P7CTL_INIT_FUNC(CAN1_TXa, 42, 2),
+ [P7_FORCE_POWER_ON] = P7CTL_INIT_FUNC(FORCE_POWER_ON, 43, 0),
+ [P7_TRST_N] = P7CTL_INIT_FUNC(TRST_N, 44, 0),
+ [P7_TDI] = P7CTL_INIT_FUNC(TDI, 45, 0),
+ [P7_TDO] = P7CTL_INIT_FUNC(TDO, 46, 0),
+ [P7_TCK] = P7CTL_INIT_FUNC(TCK, 47, 0),
+ [P7_TMS] = P7CTL_INIT_FUNC(TMS, 48, 0),
+ [P7_GPIO_049] = P7CTL_INIT_FUNC(GPIO_049, 49, 1),
+ [P7_GPIO_050] = P7CTL_INIT_FUNC(GPIO_050, 50, 1),
+ [P7_UART_1_RX] = P7CTL_INIT_FUNC(UART_1_RX, 51, 0),
+ [P7_UART_1_TX] = P7CTL_INIT_FUNC(UART_1_TX, 52, 0),
+ [P7_UART_1_RTS] = P7CTL_INIT_FUNC(UART_1_RTS, 53, 0),
+ [P7_CAN0_RXb] = P7CTL_INIT_FUNC(CAN0_RXb, 53, 2),
+ [P7_UART_1_CTS] = P7CTL_INIT_FUNC(UART_1_CTS, 54, 0),
+ [P7_CAN0_TXb] = P7CTL_INIT_FUNC(CAN0_TXb, 54, 2),
+ [P7_UART_0_RX] = P7CTL_INIT_FUNC(UART_0_RX, 55, 0),
+ [P7_GPIO_055] = P7CTL_INIT_FUNC(GPIO_055, 55, 1),
+ [P7_UART_0_TX] = P7CTL_INIT_FUNC(UART_0_TX, 56, 0),
+ [P7_GPIO_056] = P7CTL_INIT_FUNC(GPIO_056, 56, 1),
+ [P7_UART_0_RTS] = P7CTL_INIT_FUNC(UART_0_RTS, 57, 0),
+ [P7_GPIO_057] = P7CTL_INIT_FUNC(GPIO_057, 57, 1),
+ [P7_UART_0_CTS] = P7CTL_INIT_FUNC(UART_0_CTS, 58, 0),
+ [P7_GPIO_058] = P7CTL_INIT_FUNC(GPIO_058, 58, 1),
+ [P7_I2C_2_DAT] = P7CTL_INIT_FUNC(I2C_2_DAT, 59, 0),
+ [P7_GPIO_059] = P7CTL_INIT_FUNC(GPIO_059, 59, 1),
+ [P7_I2C_2_SL_DAT] = P7CTL_INIT_FUNC(I2C_2_SL_DAT, 59, 2),
+ [P7_I2C_2_CLK] = P7CTL_INIT_FUNC(I2C_2_CLK, 60, 0),
+ [P7_GPIO_060] = P7CTL_INIT_FUNC(GPIO_060, 60, 1),
+ [P7_I2C_2_SL_CLK] = P7CTL_INIT_FUNC(I2C_2_SL_CLK, 60, 2),
+ [P7_I2C_1_DAT] = P7CTL_INIT_FUNC(I2C_1_DAT, 61, 0),
+ [P7_GPIO_061] = P7CTL_INIT_FUNC(GPIO_061, 61, 1),
+ [P7_I2C_1_SL_DAT] = P7CTL_INIT_FUNC(I2C_1_SL_DAT, 61, 2),
+ [P7_I2C_1_CLK] = P7CTL_INIT_FUNC(I2C_1_CLK, 62, 0),
+ [P7_GPIO_062] = P7CTL_INIT_FUNC(GPIO_062, 62, 1),
+ [P7_I2C_1_SL_CLK] = P7CTL_INIT_FUNC(I2C_1_SL_CLK, 62, 2),
+ [P7_I2C_0_DAT] = P7CTL_INIT_FUNC(I2C_0_DAT, 63, 0),
+ [P7_GPIO_063] = P7CTL_INIT_FUNC(GPIO_063, 63, 1),
+ [P7_I2C_0_SL_DAT] = P7CTL_INIT_FUNC(I2C_0_SL_DAT, 63, 2),
+ [P7_I2C_0_CLK] = P7CTL_INIT_FUNC(I2C_0_CLK, 64, 0),
+ [P7_GPIO_064] = P7CTL_INIT_FUNC(GPIO_064, 64, 1),
+ [P7_I2C_0_SL_CLK] = P7CTL_INIT_FUNC(I2C_0_SL_CLK, 64, 2),
+ [P7_UART_4_RX] = P7CTL_INIT_FUNC(UART_4_RX, 65, 0),
+ [P7_GPIO_065] = P7CTL_INIT_FUNC(GPIO_065, 65, 1),
+ [P7_UART_4_TX] = P7CTL_INIT_FUNC(UART_4_TX, 66, 0),
+ [P7_GPIO_066] = P7CTL_INIT_FUNC(GPIO_066, 66, 1),
+ [P7_UART_3_RX] = P7CTL_INIT_FUNC(UART_3_RX, 67, 0),
+ [P7_GPIO_067] = P7CTL_INIT_FUNC(GPIO_067, 67, 1),
+ [P7_UART_3_TX] = P7CTL_INIT_FUNC(UART_3_TX, 68, 0),
+ [P7_GPIO_068] = P7CTL_INIT_FUNC(GPIO_068, 68, 1),
+ [P7_UART_2_RX] = P7CTL_INIT_FUNC(UART_2_RX, 69, 0),
+ [P7_CAN1_TXb] = P7CTL_INIT_FUNC(CAN1_TXb, 69, 2),
+ [P7_UART_2_TX] = P7CTL_INIT_FUNC(UART_2_TX, 70, 0),
+ [P7_CAN1_RXb] = P7CTL_INIT_FUNC(CAN1_RXb, 70, 2),
+ [P7_SPI_09b] = P7CTL_INIT_FUNC(SPI_09b, 71, 0),
+ [P7_GPIO_071] = P7CTL_INIT_FUNC(GPIO_071, 71, 1),
+ [P7_PWM_15] = P7CTL_INIT_FUNC(PWM_15, 71, 2),
+ [P7_SPI_10b] = P7CTL_INIT_FUNC(SPI_10b, 72, 0),
+ [P7_GPIO_072] = P7CTL_INIT_FUNC(GPIO_072, 72, 1),
+ [P7_SPI_11b] = P7CTL_INIT_FUNC(SPI_11b, 73, 0),
+ [P7_GPIO_073] = P7CTL_INIT_FUNC(GPIO_073, 73, 1),
+ [P7_SPI_12b] = P7CTL_INIT_FUNC(SPI_12b, 74, 0),
+ [P7_GPIO_074] = P7CTL_INIT_FUNC(GPIO_074, 74, 1),
+ [P7_SPI_13b] = P7CTL_INIT_FUNC(SPI_13b, 75, 0),
+ [P7_GPIO_075] = P7CTL_INIT_FUNC(GPIO_075, 75, 1),
+ [P7_SPI_14b] = P7CTL_INIT_FUNC(SPI_14b, 76, 0),
+ [P7_GPIO_076] = P7CTL_INIT_FUNC(GPIO_076, 76, 1),
+ [P7_SPI_00] = P7CTL_INIT_FUNC(SPI_00, 77, 0),
+ [P7_GPIO_077] = P7CTL_INIT_FUNC(GPIO_077, 77, 1),
+ [P7_AAI_15] = P7CTL_INIT_FUNC(AAI_15, 77, 2),
+ [P7_CAN1_TX] = P7CTL_INIT_FUNC(CAN1_TX, 77, 3),
+ [P7_SPI_01] = P7CTL_INIT_FUNC(SPI_01, 78, 0),
+ [P7_GPIO_078] = P7CTL_INIT_FUNC(GPIO_078, 78, 1),
+ [P7_AAI_16] = P7CTL_INIT_FUNC(AAI_16, 78, 2),
+ [P7_CAN1_RX] = P7CTL_INIT_FUNC(CAN1_RX, 78, 3),
+ [P7_SPI_02] = P7CTL_INIT_FUNC(SPI_02, 79, 0),
+ [P7_GPIO_079] = P7CTL_INIT_FUNC(GPIO_079, 79, 1),
+ [P7_AAI_17] = P7CTL_INIT_FUNC(AAI_17, 79, 2),
+ [P7_CAN0_TX] = P7CTL_INIT_FUNC(CAN0_TX, 79, 3),
+ [P7_SPI_03] = P7CTL_INIT_FUNC(SPI_03, 80, 0),
+ [P7_GPIO_080] = P7CTL_INIT_FUNC(GPIO_080, 80, 1),
+ [P7_AAI_18] = P7CTL_INIT_FUNC(AAI_18, 80, 2),
+ [P7_CAN0_RX] = P7CTL_INIT_FUNC(CAN0_RX, 80, 3),
+ [P7_SPI_04] = P7CTL_INIT_FUNC(SPI_04, 81, 0),
+ [P7_GPIO_081] = P7CTL_INIT_FUNC(GPIO_081, 81, 1),
+ [P7_AAI_19] = P7CTL_INIT_FUNC(AAI_19, 81, 2),
+ [P7_SPI_05] = P7CTL_INIT_FUNC(SPI_05, 82, 0),
+ [P7_GPIO_082] = P7CTL_INIT_FUNC(GPIO_082, 82, 1),
+ [P7_AAI_20] = P7CTL_INIT_FUNC(AAI_20, 82, 2),
+ [P7_SPI_06] = P7CTL_INIT_FUNC(SPI_06, 83, 0),
+ [P7_GPIO_083] = P7CTL_INIT_FUNC(GPIO_083, 83, 1),
+ [P7_AAI_21] = P7CTL_INIT_FUNC(AAI_21, 83, 2),
+ [P7_SPI_07] = P7CTL_INIT_FUNC(SPI_07, 84, 0),
+ [P7_GPIO_084] = P7CTL_INIT_FUNC(GPIO_084, 84, 1),
+ [P7_AAI_22] = P7CTL_INIT_FUNC(AAI_22, 84, 2),
+ [P7_SPI_08] = P7CTL_INIT_FUNC(SPI_08, 85, 0),
+ [P7_GPIO_085] = P7CTL_INIT_FUNC(GPIO_085, 85, 1),
+ [P7_AAI_23] = P7CTL_INIT_FUNC(AAI_23, 85, 2),
+ [P7_SPI_09] = P7CTL_INIT_FUNC(SPI_09, 86, 0),
+ [P7_GPIO_086] = P7CTL_INIT_FUNC(GPIO_086, 86, 1),
+ [P7_AAI_24] = P7CTL_INIT_FUNC(AAI_24, 86, 2),
+ [P7_SPI_10] = P7CTL_INIT_FUNC(SPI_10, 87, 0),
+ [P7_GPIO_087] = P7CTL_INIT_FUNC(GPIO_087, 87, 1),
+ [P7_AAI_25] = P7CTL_INIT_FUNC(AAI_25, 87, 2),
+ [P7_SPI_11] = P7CTL_INIT_FUNC(SPI_11, 88, 0),
+ [P7_GPIO_088] = P7CTL_INIT_FUNC(GPIO_088, 88, 1),
+ [P7_AAI_26] = P7CTL_INIT_FUNC(AAI_26, 88, 2),
+ [P7_SPI_12] = P7CTL_INIT_FUNC(SPI_12, 89, 0),
+ [P7_GPIO_089] = P7CTL_INIT_FUNC(GPIO_089, 89, 1),
+ [P7_AAI_27] = P7CTL_INIT_FUNC(AAI_27, 89, 2),
+ [P7_SPI_13] = P7CTL_INIT_FUNC(SPI_13, 90, 0),
+ [P7_GPIO_090] = P7CTL_INIT_FUNC(GPIO_090, 90, 1),
+ [P7_AAI_28] = P7CTL_INIT_FUNC(AAI_28, 90, 2),
+ [P7_SPI_14] = P7CTL_INIT_FUNC(SPI_14, 91, 0),
+ [P7_GPIO_091] = P7CTL_INIT_FUNC(GPIO_091, 91, 1),
+ [P7_AAI_29] = P7CTL_INIT_FUNC(AAI_29, 91, 2),
+ [P7_SPI_15] = P7CTL_INIT_FUNC(SPI_15, 92, 0),
+ [P7_GPIO_092] = P7CTL_INIT_FUNC(GPIO_092, 92, 1),
+ [P7_AAI_30] = P7CTL_INIT_FUNC(AAI_30, 92, 2),
+ [P7_P7MU_CLK_OUT] = P7CTL_INIT_FUNC(P7MU_CLK_OUT, 93, 0),
+ [P7_PWM_15a] = P7CTL_INIT_FUNC(PWM_15a, 93, 2),
+ [P7_REBOOT_P7MU] = P7CTL_INIT_FUNC(REBOOT_P7MU, 94, 0),
+ [P7_GPIO_094] = P7CTL_INIT_FUNC(GPIO_094, 94, 1),
+ [P7_ULPI_0_DATA00] = P7CTL_INIT_FUNC(ULPI_0_DATA00, 95, 0),
+ [P7_ULPI_0_DATA01] = P7CTL_INIT_FUNC(ULPI_0_DATA01, 96, 0),
+ [P7_ULPI_0_DATA02] = P7CTL_INIT_FUNC(ULPI_0_DATA02, 97, 0),
+ [P7_ULPI_0_DATA03] = P7CTL_INIT_FUNC(ULPI_0_DATA03, 98, 0),
+ [P7_ULPI_0_DATA04] = P7CTL_INIT_FUNC(ULPI_0_DATA04, 99, 0),
+ [P7_ULPI_0_DATA05] = P7CTL_INIT_FUNC(ULPI_0_DATA05, 100, 0),
+ [P7_ULPI_0_DATA06] = P7CTL_INIT_FUNC(ULPI_0_DATA06, 101, 0),
+ [P7_ULPI_0_DATA07] = P7CTL_INIT_FUNC(ULPI_0_DATA07, 102, 0),
+ [P7_ULPI_0_NXT] = P7CTL_INIT_FUNC(ULPI_0_NXT, 103, 0),
+ [P7_ULPI_0_DIR] = P7CTL_INIT_FUNC(ULPI_0_DIR, 104, 0),
+ [P7_ULPI_0_CLK] = P7CTL_INIT_FUNC(ULPI_0_CLK, 105, 0),
+ [P7_ULPI_1_DATA00] = P7CTL_INIT_FUNC(ULPI_1_DATA00, 106, 0),
+ [P7_GPIO_106] = P7CTL_INIT_FUNC(GPIO_106, 106, 1),
+ [P7_I2C_2_CLKa] = P7CTL_INIT_FUNC(I2C_2_CLKa, 106, 2),
+ [P7_ULPI_1_DATA01] = P7CTL_INIT_FUNC(ULPI_1_DATA01, 107, 0),
+ [P7_GPIO_107] = P7CTL_INIT_FUNC(GPIO_107, 107, 1),
+ [P7_I2C_2_DATa] = P7CTL_INIT_FUNC(I2C_2_DATa, 107, 2),
+ [P7_ULPI_1_DATA02] = P7CTL_INIT_FUNC(ULPI_1_DATA02, 108, 0),
+ [P7_GPIO_108] = P7CTL_INIT_FUNC(GPIO_108, 108, 1),
+ [P7_I2C_2_CLKb] = P7CTL_INIT_FUNC(I2C_2_CLKb, 108, 2),
+ [P7_ULPI_1_DATA03] = P7CTL_INIT_FUNC(ULPI_1_DATA03, 109, 0),
+ [P7_GPIO_109] = P7CTL_INIT_FUNC(GPIO_109, 109, 1),
+ [P7_I2C_2_DATb] = P7CTL_INIT_FUNC(I2C_2_DATb, 109, 2),
+ [P7_ULPI_1_DATA04] = P7CTL_INIT_FUNC(ULPI_1_DATA04, 110, 0),
+ [P7_GPIO_110] = P7CTL_INIT_FUNC(GPIO_110, 110, 1),
+ [P7_I2C_2_CLKc] = P7CTL_INIT_FUNC(I2C_2_CLKc, 110, 2),
+ [P7_ULPI_1_DATA05] = P7CTL_INIT_FUNC(ULPI_1_DATA05, 111, 0),
+ [P7_GPIO_111] = P7CTL_INIT_FUNC(GPIO_111, 111, 1),
+ [P7_I2C_2_DATc] = P7CTL_INIT_FUNC(I2C_2_DATc, 111, 2),
+ [P7_ULPI_1_DATA06] = P7CTL_INIT_FUNC(ULPI_1_DATA06, 112, 0),
+ [P7_GPIO_112] = P7CTL_INIT_FUNC(GPIO_112, 112, 1),
+ [P7_I2C_2_CLKd] = P7CTL_INIT_FUNC(I2C_2_CLKd, 112, 2),
+ [P7_ULPI_1_DATA07] = P7CTL_INIT_FUNC(ULPI_1_DATA07, 113, 0),
+ [P7_GPIO_113] = P7CTL_INIT_FUNC(GPIO_113, 113, 1),
+ [P7_I2C_2_DATd] = P7CTL_INIT_FUNC(I2C_2_DATd, 113, 2),
+ [P7_ULPI_1_NXT] = P7CTL_INIT_FUNC(ULPI_1_NXT, 114, 0),
+ [P7_GPIO_114] = P7CTL_INIT_FUNC(GPIO_114, 114, 1),
+ [P7_I2C_2_CLKe] = P7CTL_INIT_FUNC(I2C_2_CLKe, 114, 2),
+ [P7_ULPI_1_DIR] = P7CTL_INIT_FUNC(ULPI_1_DIR, 115, 0),
+ [P7_GPIO_115] = P7CTL_INIT_FUNC(GPIO_115, 115, 1),
+ [P7_ULPI_1_STP] = P7CTL_INIT_FUNC(ULPI_1_STP, 116, 0),
+ [P7_GPIO_116] = P7CTL_INIT_FUNC(GPIO_116, 116, 1),
+ [P7_I2C_2_DATe] = P7CTL_INIT_FUNC(I2C_2_DATe, 116, 2),
+ [P7_ULPI_0_STP] = P7CTL_INIT_FUNC(ULPI_0_STP, 117, 0),
+ [P7_ULPI_1_CLK] = P7CTL_INIT_FUNC(ULPI_1_CLK, 118, 0),
+ [P7_GPIO_118] = P7CTL_INIT_FUNC(GPIO_118, 118, 1),
+ [P7_AAI_00] = P7CTL_INIT_FUNC(AAI_00, 119, 0),
+ [P7_GPIO_119] = P7CTL_INIT_FUNC(GPIO_119, 119, 1),
+ [P7_PWM_00] = P7CTL_INIT_FUNC(PWM_00, 119, 2),
+ [P7_AAI_01] = P7CTL_INIT_FUNC(AAI_01, 120, 0),
+ [P7_GPIO_120] = P7CTL_INIT_FUNC(GPIO_120, 120, 1),
+ [P7_PWM_01] = P7CTL_INIT_FUNC(PWM_01, 120, 2),
+ [P7_AAI_02] = P7CTL_INIT_FUNC(AAI_02, 121, 0),
+ [P7_GPIO_121] = P7CTL_INIT_FUNC(GPIO_121, 121, 1),
+ [P7_PWM_02] = P7CTL_INIT_FUNC(PWM_02, 121, 2),
+ [P7_AAI_03] = P7CTL_INIT_FUNC(AAI_03, 122, 0),
+ [P7_GPIO_122] = P7CTL_INIT_FUNC(GPIO_122, 122, 1),
+ [P7_PWM_03] = P7CTL_INIT_FUNC(PWM_03, 122, 2),
+ [P7_AAI_04] = P7CTL_INIT_FUNC(AAI_04, 123, 0),
+ [P7_GPIO_123] = P7CTL_INIT_FUNC(GPIO_123, 123, 1),
+ [P7_PWM_04] = P7CTL_INIT_FUNC(PWM_04, 123, 2),
+ [P7_AAI_05] = P7CTL_INIT_FUNC(AAI_05, 124, 0),
+ [P7_GPIO_124] = P7CTL_INIT_FUNC(GPIO_124, 124, 1),
+ [P7_PWM_05] = P7CTL_INIT_FUNC(PWM_05, 124, 2),
+ [P7_AAI_06] = P7CTL_INIT_FUNC(AAI_06, 125, 0),
+ [P7_GPIO_125] = P7CTL_INIT_FUNC(GPIO_125, 125, 1),
+ [P7_PWM_06] = P7CTL_INIT_FUNC(PWM_06, 125, 2),
+ [P7_AAI_07] = P7CTL_INIT_FUNC(AAI_07, 126, 0),
+ [P7_GPIO_126] = P7CTL_INIT_FUNC(GPIO_126, 126, 1),
+ [P7_PWM_07] = P7CTL_INIT_FUNC(PWM_07, 126, 2),
+ [P7_AAI_08] = P7CTL_INIT_FUNC(AAI_08, 127, 0),
+ [P7_GPIO_127] = P7CTL_INIT_FUNC(GPIO_127, 127, 1),
+ [P7_PWM_08] = P7CTL_INIT_FUNC(PWM_08, 127, 2),
+ [P7_AAI_09] = P7CTL_INIT_FUNC(AAI_09, 128, 0),
+ [P7_GPIO_128] = P7CTL_INIT_FUNC(GPIO_128, 128, 1),
+ [P7_PWM_09] = P7CTL_INIT_FUNC(PWM_09, 128, 2),
+ [P7_AAI_10] = P7CTL_INIT_FUNC(AAI_10, 129, 0),
+ [P7_GPIO_129] = P7CTL_INIT_FUNC(GPIO_129, 129, 1),
+ [P7_PWM_10] = P7CTL_INIT_FUNC(PWM_10, 129, 2),
+ [P7_AAI_11] = P7CTL_INIT_FUNC(AAI_11, 130, 0),
+ [P7_GPIO_130] = P7CTL_INIT_FUNC(GPIO_130, 130, 1),
+ [P7_PWM_11] = P7CTL_INIT_FUNC(PWM_11, 130, 2),
+ [P7_AAI_12] = P7CTL_INIT_FUNC(AAI_12, 131, 0),
+ [P7_GPIO_131] = P7CTL_INIT_FUNC(GPIO_131, 131, 1),
+ [P7_PWM_12] = P7CTL_INIT_FUNC(PWM_12, 131, 2),
+ [P7_AAI_13] = P7CTL_INIT_FUNC(AAI_13, 132, 0),
+ [P7_GPIO_132] = P7CTL_INIT_FUNC(GPIO_132, 132, 1),
+ [P7_PWM_13] = P7CTL_INIT_FUNC(PWM_13, 132, 2),
+ [P7_AAI_14] = P7CTL_INIT_FUNC(AAI_14, 133, 0),
+ [P7_GPIO_133] = P7CTL_INIT_FUNC(GPIO_133, 133, 1),
+ [P7_PWM_14] = P7CTL_INIT_FUNC(PWM_14, 133, 2),
+ [P7_SPI_16] = P7CTL_INIT_FUNC(SPI_16, 134, 0),
+ [P7_GPIO_134] = P7CTL_INIT_FUNC(GPIO_134, 134, 1),
+ [P7_ETH_MII_TXER] = P7CTL_INIT_FUNC(ETH_MII_TXER, 134, 2),
+ [P7_SPI_17] = P7CTL_INIT_FUNC(SPI_17, 135, 0),
+ [P7_GPIO_135] = P7CTL_INIT_FUNC(GPIO_135, 135, 1),
+ [P7_ETH_MII_RXER] = P7CTL_INIT_FUNC(ETH_MII_RXER, 135, 2),
+ [P7_SPI_18] = P7CTL_INIT_FUNC(SPI_18, 136, 0),
+ [P7_GPIO_136] = P7CTL_INIT_FUNC(GPIO_136, 136, 1),
+ [P7_ETH_MII_CRS] = P7CTL_INIT_FUNC(ETH_MII_CRS, 136, 2),
+ [P7_SPI_19] = P7CTL_INIT_FUNC(SPI_19, 137, 0),
+ [P7_GPIO_137] = P7CTL_INIT_FUNC(GPIO_137, 137, 1),
+ [P7_ETH_MII_COL] = P7CTL_INIT_FUNC(ETH_MII_COL, 137, 2),
+ [P7_ETH_RGMII_TXC] = P7CTL_INIT_FUNC(ETH_RGMII_TXC, 138, 0),
+ [P7_GPIO_138] = P7CTL_INIT_FUNC(GPIO_138, 138, 1),
+ [P7_ETH_RGMII_TXD_00] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_00, 139, 0),
+ [P7_GPIO_139] = P7CTL_INIT_FUNC(GPIO_139, 139, 1),
+ [P7_ETH_RGMII_TXD_01] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_01, 140, 0),
+ [P7_GPIO_140] = P7CTL_INIT_FUNC(GPIO_140, 140, 1),
+ [P7_ETH_RGMII_TXD_02] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_02, 141, 0),
+ [P7_GPIO_141] = P7CTL_INIT_FUNC(GPIO_141, 141, 1),
+ [P7_ETH_RGMII_TXD_03] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_03, 142, 0),
+ [P7_GPIO_142] = P7CTL_INIT_FUNC(GPIO_142, 142, 1),
+ [P7_ETH_RGMII_TX_CTL] = P7CTL_INIT_FUNC(ETH_RGMII_TX_CTL, 143, 0),
+ [P7_GPIO_143] = P7CTL_INIT_FUNC(GPIO_143, 143, 1),
+ [P7_ETH_RGMII_RXC] = P7CTL_INIT_FUNC(ETH_RGMII_RXC, 144, 0),
+ [P7_GPIO_144] = P7CTL_INIT_FUNC(GPIO_144, 144, 1),
+ [P7_ETH_RGMII_RXD_00] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_00, 145, 0),
+ [P7_GPIO_145] = P7CTL_INIT_FUNC(GPIO_145, 145, 1),
+ [P7_ETH_RGMII_RXD_01] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_01, 146, 0),
+ [P7_GPIO_146] = P7CTL_INIT_FUNC(GPIO_146, 146, 1),
+ [P7_ETH_RGMII_RXD_02] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_02, 147, 0),
+ [P7_GPIO_147] = P7CTL_INIT_FUNC(GPIO_147, 147, 1),
+ [P7_ETH_RGMII_RXD_03] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_03, 148, 0),
+ [P7_GPIO_148] = P7CTL_INIT_FUNC(GPIO_148, 148, 1),
+ [P7_ETH_RGMII_RX_CTL] = P7CTL_INIT_FUNC(ETH_RGMII_RX_CTL, 149, 0),
+ [P7_GPIO_149] = P7CTL_INIT_FUNC(GPIO_149, 149, 1),
+ [P7_ETH_MDC] = P7CTL_INIT_FUNC(ETH_MDC, 150, 0),
+ [P7_GPIO_150] = P7CTL_INIT_FUNC(GPIO_150, 150, 1),
+ [P7_ETH_MDIO] = P7CTL_INIT_FUNC(ETH_MDIO, 151, 0),
+ [P7_GPIO_151] = P7CTL_INIT_FUNC(GPIO_151, 151, 1),
+ [P7_LCD_0_DEN] = P7CTL_INIT_FUNC(LCD_0_DEN, 152, 0),
+ [P7_GPIO_152] = P7CTL_INIT_FUNC(GPIO_152, 152, 1),
+ [P7_CAM_1_VS] = P7CTL_INIT_FUNC(CAM_1_VS, 152, 2),
+ [P7_LCD_0_HS] = P7CTL_INIT_FUNC(LCD_0_HS, 153, 0),
+ [P7_GPIO_153] = P7CTL_INIT_FUNC(GPIO_153, 153, 1),
+ [P7_CAM_1_HS] = P7CTL_INIT_FUNC(CAM_1_HS, 153, 2),
+ [P7_LCD_0_DENa] = P7CTL_INIT_FUNC(LCD_0_DENa, 153, 3),
+ [P7_LCD_0_VS] = P7CTL_INIT_FUNC(LCD_0_VS, 154, 0),
+ [P7_GPIO_154] = P7CTL_INIT_FUNC(GPIO_154, 154, 1),
+ [P7_CAM_0_VS] = P7CTL_INIT_FUNC(CAM_0_VS, 154, 2),
+ [P7_PAR_NCS] = P7CTL_INIT_FUNC(PAR_NCS, 154, 3),
+ [P7_LCD_0_DATA00] = P7CTL_INIT_FUNC(LCD_0_DATA00, 155, 0),
+ [P7_GPIO_155] = P7CTL_INIT_FUNC(GPIO_155, 155, 1),
+ [P7_CAM_0_HS] = P7CTL_INIT_FUNC(CAM_0_HS, 155, 2),
+ [P7_PAR_NRS] = P7CTL_INIT_FUNC(PAR_NRS, 155, 3),
+ [P7_LCD_0_DATA01] = P7CTL_INIT_FUNC(LCD_0_DATA01, 156, 0),
+ [P7_GPIO_156] = P7CTL_INIT_FUNC(GPIO_156, 156, 1),
+ [P7_CAM_2_VS] = P7CTL_INIT_FUNC(CAM_2_VS, 156, 2),
+ [P7_PAR_NRD] = P7CTL_INIT_FUNC(PAR_NRD, 156, 3),
+ [P7_LCD_0_DATA02] = P7CTL_INIT_FUNC(LCD_0_DATA02, 157, 0),
+ [P7_GPIO_157] = P7CTL_INIT_FUNC(GPIO_157, 157, 1),
+ [P7_CAM_2_HS] = P7CTL_INIT_FUNC(CAM_2_HS, 157, 2),
+ [P7_PAR_NWR] = P7CTL_INIT_FUNC(PAR_NWR, 157, 3),
+ [P7_LCD_0_DATA03] = P7CTL_INIT_FUNC(LCD_0_DATA03, 158, 0),
+ [P7_GPIO_158] = P7CTL_INIT_FUNC(GPIO_158, 158, 1),
+ [P7_CAM_3_VS] = P7CTL_INIT_FUNC(CAM_3_VS, 158, 2),
+ [P7_PAR_D00] = P7CTL_INIT_FUNC(PAR_D00, 158, 3),
+ [P7_LCD_0_DATA04] = P7CTL_INIT_FUNC(LCD_0_DATA04, 159, 0),
+ [P7_GPIO_159] = P7CTL_INIT_FUNC(GPIO_159, 159, 1),
+ [P7_CAM_3_HS] = P7CTL_INIT_FUNC(CAM_3_HS, 159, 2),
+ [P7_PAR_D01] = P7CTL_INIT_FUNC(PAR_D01, 159, 3),
+ [P7_LCD_0_DATA05] = P7CTL_INIT_FUNC(LCD_0_DATA05, 160, 0),
+ [P7_GPIO_160] = P7CTL_INIT_FUNC(GPIO_160, 160, 1),
+ [P7_CAM_5_VS] = P7CTL_INIT_FUNC(CAM_5_VS, 160, 2),
+ [P7_PAR_D02] = P7CTL_INIT_FUNC(PAR_D02, 160, 3),
+ [P7_LCD_0_DATA06] = P7CTL_INIT_FUNC(LCD_0_DATA06, 161, 0),
+ [P7_GPIO_161] = P7CTL_INIT_FUNC(GPIO_161, 161, 1),
+ [P7_CAM_5_HS] = P7CTL_INIT_FUNC(CAM_5_HS, 161, 2),
+ [P7_PAR_D03] = P7CTL_INIT_FUNC(PAR_D03, 161, 3),
+ [P7_LCD_0_DATA07] = P7CTL_INIT_FUNC(LCD_0_DATA07, 162, 0),
+ [P7_GPIO_162] = P7CTL_INIT_FUNC(GPIO_162, 162, 1),
+ [P7_CAM_0_DATA08] = P7CTL_INIT_FUNC(CAM_0_DATA08, 162, 2),
+ [P7_PAR_D04] = P7CTL_INIT_FUNC(PAR_D04, 162, 3),
+ [P7_LCD_0_DATA08] = P7CTL_INIT_FUNC(LCD_0_DATA08, 163, 0),
+ [P7_GPIO_163] = P7CTL_INIT_FUNC(GPIO_163, 163, 1),
+ [P7_CAM_0_DATA09] = P7CTL_INIT_FUNC(CAM_0_DATA09, 163, 2),
+ [P7_PAR_D05] = P7CTL_INIT_FUNC(PAR_D05, 163, 3),
+ [P7_LCD_0_DATA09] = P7CTL_INIT_FUNC(LCD_0_DATA09, 164, 0),
+ [P7_GPIO_164] = P7CTL_INIT_FUNC(GPIO_164, 164, 1),
+ [P7_CAM_0_DATA10] = P7CTL_INIT_FUNC(CAM_0_DATA10, 164, 2),
+ [P7_PAR_D06] = P7CTL_INIT_FUNC(PAR_D06, 164, 3),
+ [P7_LCD_0_DATA10] = P7CTL_INIT_FUNC(LCD_0_DATA10, 165, 0),
+ [P7_GPIO_165] = P7CTL_INIT_FUNC(GPIO_165, 165, 1),
+ [P7_CAM_0_DATA11] = P7CTL_INIT_FUNC(CAM_0_DATA11, 165, 2),
+ [P7_PAR_D07] = P7CTL_INIT_FUNC(PAR_D07, 165, 3),
+ [P7_LCD_0_DATA11] = P7CTL_INIT_FUNC(LCD_0_DATA11, 166, 0),
+ [P7_GPIO_166] = P7CTL_INIT_FUNC(GPIO_166, 166, 1),
+ [P7_CAM_0_DATA12] = P7CTL_INIT_FUNC(CAM_0_DATA12, 166, 2),
+ [P7_PAR_D08] = P7CTL_INIT_FUNC(PAR_D08, 166, 3),
+ [P7_LCD_0_DATA12] = P7CTL_INIT_FUNC(LCD_0_DATA12, 167, 0),
+ [P7_GPIO_167] = P7CTL_INIT_FUNC(GPIO_167, 167, 1),
+ [P7_CAM_0_DATA13] = P7CTL_INIT_FUNC(CAM_0_DATA13, 167, 2),
+ [P7_PAR_D09] = P7CTL_INIT_FUNC(PAR_D09, 167, 3),
+ [P7_LCD_0_DATA13] = P7CTL_INIT_FUNC(LCD_0_DATA13, 168, 0),
+ [P7_GPIO_168] = P7CTL_INIT_FUNC(GPIO_168, 168, 1),
+ [P7_CAM_0_DATA14] = P7CTL_INIT_FUNC(CAM_0_DATA14, 168, 2),
+ [P7_PAR_D10] = P7CTL_INIT_FUNC(PAR_D10, 168, 3),
+ [P7_LCD_0_DATA14] = P7CTL_INIT_FUNC(LCD_0_DATA14, 169, 0),
+ [P7_GPIO_169] = P7CTL_INIT_FUNC(GPIO_169, 169, 1),
+ [P7_CAM_0_DATA15] = P7CTL_INIT_FUNC(CAM_0_DATA15, 169, 2),
+ [P7_PAR_D11] = P7CTL_INIT_FUNC(PAR_D11, 169, 3),
+ [P7_LCD_0_DATA15] = P7CTL_INIT_FUNC(LCD_0_DATA15, 170, 0),
+ [P7_GPIO_170] = P7CTL_INIT_FUNC(GPIO_170, 170, 1),
+ [P7_CAM_0_DATA16] = P7CTL_INIT_FUNC(CAM_0_DATA16, 170, 2),
+ [P7_PAR_D12] = P7CTL_INIT_FUNC(PAR_D12, 170, 3),
+ [P7_LCD_0_DATA16] = P7CTL_INIT_FUNC(LCD_0_DATA16, 171, 0),
+ [P7_GPIO_171] = P7CTL_INIT_FUNC(GPIO_171, 171, 1),
+ [P7_CAM_0_DATA17] = P7CTL_INIT_FUNC(CAM_0_DATA17, 171, 2),
+ [P7_PAR_D13] = P7CTL_INIT_FUNC(PAR_D13, 171, 3),
+ [P7_LCD_0_DATA17] = P7CTL_INIT_FUNC(LCD_0_DATA17, 172, 0),
+ [P7_GPIO_172] = P7CTL_INIT_FUNC(GPIO_172, 172, 1),
+ [P7_CAM_0_DATA18] = P7CTL_INIT_FUNC(CAM_0_DATA18, 172, 2),
+ [P7_PAR_D14] = P7CTL_INIT_FUNC(PAR_D14, 172, 3),
+ [P7_LCD_0_DATA18] = P7CTL_INIT_FUNC(LCD_0_DATA18, 173, 0),
+ [P7_GPIO_173] = P7CTL_INIT_FUNC(GPIO_173, 173, 1),
+ [P7_CAM_0_DATA19] = P7CTL_INIT_FUNC(CAM_0_DATA19, 173, 2),
+ [P7_PAR_D15] = P7CTL_INIT_FUNC(PAR_D15, 173, 3),
+ [P7_LCD_0_DATA19] = P7CTL_INIT_FUNC(LCD_0_DATA19, 174, 0),
+ [P7_GPIO_174] = P7CTL_INIT_FUNC(GPIO_174, 174, 1),
+ [P7_CAM_0_DATA20] = P7CTL_INIT_FUNC(CAM_0_DATA20, 174, 2),
+ [P7_LCD_0_DATA20] = P7CTL_INIT_FUNC(LCD_0_DATA20, 175, 0),
+ [P7_GPIO_175] = P7CTL_INIT_FUNC(GPIO_175, 175, 1),
+ [P7_CAM_0_DATA21] = P7CTL_INIT_FUNC(CAM_0_DATA21, 175, 2),
+ [P7_LCD_0_DATA21] = P7CTL_INIT_FUNC(LCD_0_DATA21, 176, 0),
+ [P7_GPIO_176] = P7CTL_INIT_FUNC(GPIO_176, 176, 1),
+ [P7_CAM_0_DATA22] = P7CTL_INIT_FUNC(CAM_0_DATA22, 176, 2),
+ [P7_LCD_0_DATA22] = P7CTL_INIT_FUNC(LCD_0_DATA22, 177, 0),
+ [P7_GPIO_177] = P7CTL_INIT_FUNC(GPIO_177, 177, 1),
+ [P7_CAM_0_DATA23] = P7CTL_INIT_FUNC(CAM_0_DATA23, 177, 2),
+ [P7_LCD_0_DATA23] = P7CTL_INIT_FUNC(LCD_0_DATA23, 178, 0),
+ [P7_GPIO_178] = P7CTL_INIT_FUNC(GPIO_178, 178, 1),
+ [P7_CAM_4_VS] = P7CTL_INIT_FUNC(CAM_4_VS, 178, 2),
+ [P7_LCD_0_CLK] = P7CTL_INIT_FUNC(LCD_0_CLK, 179, 0),
+ [P7_GPIO_179] = P7CTL_INIT_FUNC(GPIO_179, 179, 1),
+ [P7_CAM_4_HS] = P7CTL_INIT_FUNC(CAM_4_HS, 179, 2),
+ [P7_LCD_1_HS] = P7CTL_INIT_FUNC(LCD_1_HS, 180, 0),
+ [P7_GPIO_180] = P7CTL_INIT_FUNC(GPIO_180, 180, 1),
+ [P7_CAM_2_CLK] = P7CTL_INIT_FUNC(CAM_2_CLK, 180, 2),
+ [P7_LCD_1_DENa] = P7CTL_INIT_FUNC(LCD_1_DENa, 180, 3),
+ [P7_LCD_1_VS] = P7CTL_INIT_FUNC(LCD_1_VS, 181, 0),
+ [P7_GPIO_181] = P7CTL_INIT_FUNC(GPIO_181, 181, 1),
+ [P7_CAM_2_DATA00] = P7CTL_INIT_FUNC(CAM_2_DATA00, 181, 2),
+ [P7_CPU_TRACECLKOUT] = P7CTL_INIT_FUNC(CPU_TRACECLKOUT, 181, 3),
+ [P7_LCD_1_CLK] = P7CTL_INIT_FUNC(LCD_1_CLK, 182, 0),
+ [P7_GPIO_182] = P7CTL_INIT_FUNC(GPIO_182, 182, 1),
+ [P7_CAM_2_DATA01] = P7CTL_INIT_FUNC(CAM_2_DATA01, 182, 2),
+ [P7_CPU_TRACECTL] = P7CTL_INIT_FUNC(CPU_TRACECTL, 182, 3),
+ [P7_LCD_1_DATA00] = P7CTL_INIT_FUNC(LCD_1_DATA00, 183, 0),
+ [P7_GPIO_183] = P7CTL_INIT_FUNC(GPIO_183, 183, 1),
+ [P7_CAM_2_DATA02] = P7CTL_INIT_FUNC(CAM_2_DATA02, 183, 2),
+ [P7_CPU_TRACEDATA_00] = P7CTL_INIT_FUNC(CPU_TRACEDATA_00, 183, 3),
+ [P7_LCD_1_DATA01] = P7CTL_INIT_FUNC(LCD_1_DATA01, 184, 0),
+ [P7_GPIO_184] = P7CTL_INIT_FUNC(GPIO_184, 184, 1),
+ [P7_CAM_2_DATA03] = P7CTL_INIT_FUNC(CAM_2_DATA03, 184, 2),
+ [P7_CPU_TRACEDATA_01] = P7CTL_INIT_FUNC(CPU_TRACEDATA_01, 184, 3),
+ [P7_LCD_1_DATA02] = P7CTL_INIT_FUNC(LCD_1_DATA02, 185, 0),
+ [P7_GPIO_185] = P7CTL_INIT_FUNC(GPIO_185, 185, 1),
+ [P7_CAM_2_DATA04] = P7CTL_INIT_FUNC(CAM_2_DATA04, 185, 2),
+ [P7_CPU_TRACEDATA_02] = P7CTL_INIT_FUNC(CPU_TRACEDATA_02, 185, 3),
+ [P7_LCD_1_DATA03] = P7CTL_INIT_FUNC(LCD_1_DATA03, 186, 0),
+ [P7_GPIO_186] = P7CTL_INIT_FUNC(GPIO_186, 186, 1),
+ [P7_CAM_2_DATA05] = P7CTL_INIT_FUNC(CAM_2_DATA05, 186, 2),
+ [P7_CPU_TRACEDATA_03] = P7CTL_INIT_FUNC(CPU_TRACEDATA_03, 186, 3),
+ [P7_LCD_1_DATA04] = P7CTL_INIT_FUNC(LCD_1_DATA04, 187, 0),
+ [P7_GPIO_187] = P7CTL_INIT_FUNC(GPIO_187, 187, 1),
+ [P7_CAM_2_DATA06] = P7CTL_INIT_FUNC(CAM_2_DATA06, 187, 2),
+ [P7_CPU_TRACEDATA_04] = P7CTL_INIT_FUNC(CPU_TRACEDATA_04, 187, 3),
+ [P7_LCD_1_DATA05] = P7CTL_INIT_FUNC(LCD_1_DATA05, 188, 0),
+ [P7_GPIO_188] = P7CTL_INIT_FUNC(GPIO_188, 188, 1),
+ [P7_CAM_2_DATA07] = P7CTL_INIT_FUNC(CAM_2_DATA07, 188, 2),
+ [P7_CPU_TRACEDATA_05] = P7CTL_INIT_FUNC(CPU_TRACEDATA_05, 188, 3),
+ [P7_LCD_1_DATA06] = P7CTL_INIT_FUNC(LCD_1_DATA06, 189, 0),
+ [P7_GPIO_189] = P7CTL_INIT_FUNC(GPIO_189, 189, 1),
+ [P7_CAM_3_CLK] = P7CTL_INIT_FUNC(CAM_3_CLK, 189, 2),
+ [P7_CPU_TRACEDATA_06] = P7CTL_INIT_FUNC(CPU_TRACEDATA_06, 189, 3),
+ [P7_LCD_1_DATA07] = P7CTL_INIT_FUNC(LCD_1_DATA07, 190, 0),
+ [P7_GPIO_190] = P7CTL_INIT_FUNC(GPIO_190, 190, 1),
+ [P7_CAM_3_DATA00] = P7CTL_INIT_FUNC(CAM_3_DATA00, 190, 2),
+ [P7_CPU_TRACEDATA_07] = P7CTL_INIT_FUNC(CPU_TRACEDATA_07, 190, 3),
+ [P7_LCD_1_DATA08] = P7CTL_INIT_FUNC(LCD_1_DATA08, 191, 0),
+ [P7_GPIO_191] = P7CTL_INIT_FUNC(GPIO_191, 191, 1),
+ [P7_CAM_3_DATA01] = P7CTL_INIT_FUNC(CAM_3_DATA01, 191, 2),
+ [P7_CPU_TRACEDATA_08] = P7CTL_INIT_FUNC(CPU_TRACEDATA_08, 191, 3),
+ [P7_LCD_1_DATA09] = P7CTL_INIT_FUNC(LCD_1_DATA09, 192, 0),
+ [P7_GPIO_192] = P7CTL_INIT_FUNC(GPIO_192, 192, 1),
+ [P7_CAM_3_DATA02] = P7CTL_INIT_FUNC(CAM_3_DATA02, 192, 2),
+ [P7_CPU_TRACEDATA_09] = P7CTL_INIT_FUNC(CPU_TRACEDATA_09, 192, 3),
+ [P7_LCD_1_DATA10] = P7CTL_INIT_FUNC(LCD_1_DATA10, 193, 0),
+ [P7_GPIO_193] = P7CTL_INIT_FUNC(GPIO_193, 193, 1),
+ [P7_CAM_3_DATA03] = P7CTL_INIT_FUNC(CAM_3_DATA03, 193, 2),
+ [P7_CPU_TRACEDATA_10] = P7CTL_INIT_FUNC(CPU_TRACEDATA_10, 193, 3),
+ [P7_LCD_1_DATA11] = P7CTL_INIT_FUNC(LCD_1_DATA11, 194, 0),
+ [P7_GPIO_194] = P7CTL_INIT_FUNC(GPIO_194, 194, 1),
+ [P7_CAM_3_DATA04] = P7CTL_INIT_FUNC(CAM_3_DATA04, 194, 2),
+ [P7_CPU_TRACEDATA_11] = P7CTL_INIT_FUNC(CPU_TRACEDATA_11, 194, 3),
+ [P7_LCD_1_DATA12] = P7CTL_INIT_FUNC(LCD_1_DATA12, 195, 0),
+ [P7_GPIO_195] = P7CTL_INIT_FUNC(GPIO_195, 195, 1),
+ [P7_CAM_3_DATA05] = P7CTL_INIT_FUNC(CAM_3_DATA05, 195, 2),
+ [P7_CPU_TRACEDATA_12] = P7CTL_INIT_FUNC(CPU_TRACEDATA_12, 195, 3),
+ [P7_LCD_1_DATA13] = P7CTL_INIT_FUNC(LCD_1_DATA13, 196, 0),
+ [P7_GPIO_196] = P7CTL_INIT_FUNC(GPIO_196, 196, 1),
+ [P7_CAM_3_DATA06] = P7CTL_INIT_FUNC(CAM_3_DATA06, 196, 2),
+ [P7_CPU_TRACEDATA_13] = P7CTL_INIT_FUNC(CPU_TRACEDATA_13, 196, 3),
+ [P7_LCD_1_DATA14] = P7CTL_INIT_FUNC(LCD_1_DATA14, 197, 0),
+ [P7_GPIO_197] = P7CTL_INIT_FUNC(GPIO_197, 197, 1),
+ [P7_CAM_3_DATA07] = P7CTL_INIT_FUNC(CAM_3_DATA07, 197, 2),
+ [P7_CPU_TRACEDATA_14] = P7CTL_INIT_FUNC(CPU_TRACEDATA_14, 197, 3),
+ [P7_LCD_1_DATA15] = P7CTL_INIT_FUNC(LCD_1_DATA15, 198, 0),
+ [P7_GPIO_198] = P7CTL_INIT_FUNC(GPIO_198, 198, 1),
+ [P7_CAM_4_CLK] = P7CTL_INIT_FUNC(CAM_4_CLK, 198, 2),
+ [P7_CPU_TRACEDATA_15] = P7CTL_INIT_FUNC(CPU_TRACEDATA_15, 198, 3),
+ [P7_LCD_1_DATA16] = P7CTL_INIT_FUNC(LCD_1_DATA16, 199, 0),
+ [P7_GPIO_199] = P7CTL_INIT_FUNC(GPIO_199, 199, 1),
+ [P7_CAM_4_DATA00] = P7CTL_INIT_FUNC(CAM_4_DATA00, 199, 2),
+ [P7_LCD_1_DATA17] = P7CTL_INIT_FUNC(LCD_1_DATA17, 200, 0),
+ [P7_GPIO_200] = P7CTL_INIT_FUNC(GPIO_200, 200, 1),
+ [P7_CAM_4_DATA01] = P7CTL_INIT_FUNC(CAM_4_DATA01, 200, 2),
+ [P7_LCD_1_DATA18] = P7CTL_INIT_FUNC(LCD_1_DATA18, 201, 0),
+ [P7_GPIO_201] = P7CTL_INIT_FUNC(GPIO_201, 201, 1),
+ [P7_CAM_4_DATA02] = P7CTL_INIT_FUNC(CAM_4_DATA02, 201, 2),
+ [P7_LCD_1_DATA19] = P7CTL_INIT_FUNC(LCD_1_DATA19, 202, 0),
+ [P7_GPIO_202] = P7CTL_INIT_FUNC(GPIO_202, 202, 1),
+ [P7_CAM_4_DATA03] = P7CTL_INIT_FUNC(CAM_4_DATA03, 202, 2),
+ [P7_LCD_1_DATA20] = P7CTL_INIT_FUNC(LCD_1_DATA20, 203, 0),
+ [P7_GPIO_203] = P7CTL_INIT_FUNC(GPIO_203, 203, 1),
+ [P7_CAM_4_DATA04] = P7CTL_INIT_FUNC(CAM_4_DATA04, 203, 2),
+ [P7_LCD_1_DATA21] = P7CTL_INIT_FUNC(LCD_1_DATA21, 204, 0),
+ [P7_GPIO_204] = P7CTL_INIT_FUNC(GPIO_204, 204, 1),
+ [P7_CAM_4_DATA05] = P7CTL_INIT_FUNC(CAM_4_DATA05, 204, 2),
+ [P7_LCD_1_DATA22] = P7CTL_INIT_FUNC(LCD_1_DATA22, 205, 0),
+ [P7_GPIO_205] = P7CTL_INIT_FUNC(GPIO_205, 205, 1),
+ [P7_CAM_4_DATA06] = P7CTL_INIT_FUNC(CAM_4_DATA06, 205, 2),
+ [P7_LCD_1_DATA23] = P7CTL_INIT_FUNC(LCD_1_DATA23, 206, 0),
+ [P7_GPIO_206] = P7CTL_INIT_FUNC(GPIO_206, 206, 1),
+ [P7_CAM_4_DATA07] = P7CTL_INIT_FUNC(CAM_4_DATA07, 206, 2),
+ [P7_LCD_1_DEN] = P7CTL_INIT_FUNC(LCD_1_DEN, 207, 0),
+ [P7_GPIO_207] = P7CTL_INIT_FUNC(GPIO_207, 207, 1),
+ [P7_CAM_1_HSa] = P7CTL_INIT_FUNC(CAM_1_HSa, 207, 2),
+ [P7_LCD_1_CLKa] = P7CTL_INIT_FUNC(LCD_1_CLKa, 207, 3),
+ [P7_CAM_0_CLK] = P7CTL_INIT_FUNC(CAM_0_CLK, 208, 0),
+ [P7_GPIO_208] = P7CTL_INIT_FUNC(GPIO_208, 208, 1),
+ [P7_CAM_0_DATA00] = P7CTL_INIT_FUNC(CAM_0_DATA00, 209, 0),
+ [P7_GPIO_209] = P7CTL_INIT_FUNC(GPIO_209, 209, 1),
+ [P7_CAM_1_DATA08] = P7CTL_INIT_FUNC(CAM_1_DATA08, 209, 2),
+ [P7_CAM_0_DATA01] = P7CTL_INIT_FUNC(CAM_0_DATA01, 210, 0),
+ [P7_GPIO_210] = P7CTL_INIT_FUNC(GPIO_210, 210, 1),
+ [P7_CAM_1_DATA09] = P7CTL_INIT_FUNC(CAM_1_DATA09, 210, 2),
+ [P7_CAM_0_DATA02] = P7CTL_INIT_FUNC(CAM_0_DATA02, 211, 0),
+ [P7_GPIO_211] = P7CTL_INIT_FUNC(GPIO_211, 211, 1),
+ [P7_CAM_1_DATA10] = P7CTL_INIT_FUNC(CAM_1_DATA10, 211, 2),
+ [P7_CAM_0_DATA03] = P7CTL_INIT_FUNC(CAM_0_DATA03, 212, 0),
+ [P7_GPIO_212] = P7CTL_INIT_FUNC(GPIO_212, 212, 1),
+ [P7_CAM_1_DATA11] = P7CTL_INIT_FUNC(CAM_1_DATA11, 212, 2),
+ [P7_CAM_0_DATA04] = P7CTL_INIT_FUNC(CAM_0_DATA04, 213, 0),
+ [P7_GPIO_213] = P7CTL_INIT_FUNC(GPIO_213, 213, 1),
+ [P7_CAM_1_DATA12] = P7CTL_INIT_FUNC(CAM_1_DATA12, 213, 2),
+ [P7_CAM_0_DATA05] = P7CTL_INIT_FUNC(CAM_0_DATA05, 214, 0),
+ [P7_GPIO_214] = P7CTL_INIT_FUNC(GPIO_214, 214, 1),
+ [P7_CAM_1_DATA13] = P7CTL_INIT_FUNC(CAM_1_DATA13, 214, 2),
+ [P7_CAM_0_DATA06] = P7CTL_INIT_FUNC(CAM_0_DATA06, 215, 0),
+ [P7_GPIO_215] = P7CTL_INIT_FUNC(GPIO_215, 215, 1),
+ [P7_CAM_1_DATA14] = P7CTL_INIT_FUNC(CAM_1_DATA14, 215, 2),
+ [P7_CAM_0_DATA07] = P7CTL_INIT_FUNC(CAM_0_DATA07, 216, 0),
+ [P7_GPIO_216] = P7CTL_INIT_FUNC(GPIO_216, 216, 1),
+ [P7_CAM_1_DATA15] = P7CTL_INIT_FUNC(CAM_1_DATA15, 216, 2),
+ [P7_CAM_1_CLK] = P7CTL_INIT_FUNC(CAM_1_CLK, 217, 0),
+ [P7_GPIO_217] = P7CTL_INIT_FUNC(GPIO_217, 217, 1),
+ [P7_SPI_00b] = P7CTL_INIT_FUNC(SPI_00b, 217, 2),
+ [P7_CAM_1_DATA00] = P7CTL_INIT_FUNC(CAM_1_DATA00, 218, 0),
+ [P7_GPIO_218] = P7CTL_INIT_FUNC(GPIO_218, 218, 1),
+ [P7_SPI_01b] = P7CTL_INIT_FUNC(SPI_01b, 218, 2),
+ [P7_CAM_0_VSa] = P7CTL_INIT_FUNC(CAM_0_VSa, 218, 3),
+ [P7_CAM_1_DATA01] = P7CTL_INIT_FUNC(CAM_1_DATA01, 219, 0),
+ [P7_GPIO_219] = P7CTL_INIT_FUNC(GPIO_219, 219, 1),
+ [P7_SPI_02b] = P7CTL_INIT_FUNC(SPI_02b, 219, 2),
+ [P7_CAM_0_HSa] = P7CTL_INIT_FUNC(CAM_0_HSa, 219, 3)
+};
+
+static unsigned long p7_gpio_pins_r2[] = {
+ 0x00007fff,
+ 0xff8007c2,
+ 0x5fffff9f,
+ 0xffdffc00,
+ 0xffffffff,
+ 0xffffffff,
+ 0x0fffffff
+};
+
+const struct p7_pinctrl_config p7_pinctrl_config_r2 = {
+ .pin_descs = p7_pin_descs_r2,
+ .pin_descs_sz = ARRAY_SIZE(p7_pin_descs_r2),
+ .pin_funcs = p7_pin_funcs_r2,
+ .gpio_pins = p7_gpio_pins_r2,
+ .gpio_pins_sz = ARRAY_SIZE(p7_gpio_pins_r2),
+};
diff --git a/arch/arm/mach-parrot7/pindesc-r3.c b/arch/arm/mach-parrot7/pindesc-r3.c
new file mode 100644
index 00000000000000..1c70bfda5f0d13
--- /dev/null
+++ b/arch/arm/mach-parrot7/pindesc-r3.c
@@ -0,0 +1,855 @@
+#include "common.h"
+#include "pinctrl.h"
+
+/*
+ * Physical pin descriptors
+ *
+ * Pins identifiers use the GPIO numbering scheme. Names are assigned by
+ * concatenating all possible multiplexing functions a pin may be used with.
+ *
+ * Content was generated with the help of the genpin.sh script located into
+ * this directory.
+ */
+static struct pinctrl_pin_desc const p7_pin_descs_r3[] = {
+ P7CTL_INIT_PIN(0, "CAM_1_DATA02__GPIO_000__SPI_03b__CAM_3_VSa"),
+ P7CTL_INIT_PIN(1, "CAM_1_DATA03__GPIO_001__SPI_04b__CAM_3_HSa"),
+ P7CTL_INIT_PIN(2, "CAM_1_DATA04__GPIO_002__SPI_05b__CAM_4_VSa"),
+ P7CTL_INIT_PIN(3, "CAM_1_DATA05__GPIO_003__SPI_06b__CAM_4_HSa"),
+ P7CTL_INIT_PIN(4, "CAM_1_DATA06__GPIO_004__SPI_07b__CAM_5_VSa"),
+ P7CTL_INIT_PIN(5, "CAM_1_DATA07__GPIO_005__SPI_08b__CAM_5_HSa"),
+ P7CTL_INIT_PIN(6, "CAM_5_CLK__GPIO_006__SD_2_CLK"),
+ P7CTL_INIT_PIN(7, "CAM_5_DATA00__GPIO_007__SD_2_CMD"),
+ P7CTL_INIT_PIN(8, "CAM_5_DATA01__GPIO_008__SD_2_DAT00"),
+ P7CTL_INIT_PIN(9, "CAM_5_DATA02__GPIO_009__SD_2_DAT01"),
+ P7CTL_INIT_PIN(10, "CAM_5_DATA03__GPIO_010__SD_2_DAT02"),
+ P7CTL_INIT_PIN(11, "CAM_5_DATA04__GPIO_011__SD_2_DAT03"),
+ P7CTL_INIT_PIN(12, "CAM_5_DATA05__GPIO_012"),
+ P7CTL_INIT_PIN(13, "CAM_5_DATA06__GPIO_013"),
+ P7CTL_INIT_PIN(14, "CAM_5_DATA07__GPIO_014"),
+ P7CTL_INIT_PIN(15, "SD_1_CLK__GPIO_015__UART_5_TX"),
+ P7CTL_INIT_PIN(16, "SD_1_CMD__GPIO_016__UART_5_RX"),
+ P7CTL_INIT_PIN(17, "SD_1_DAT00__GPIO_017__UART_6_TX"),
+ P7CTL_INIT_PIN(18, "SD_1_DAT02__GPIO_018__UART_7_TX"),
+ P7CTL_INIT_PIN(19, "SD_1_DAT01__GPIO_019__UART_6_RX"),
+ P7CTL_INIT_PIN(20, "SD_1_DAT03__GPIO_020__UART_7_RX"),
+ P7CTL_INIT_PIN(21, "SD_0_CLK__GPIO_021"),
+ P7CTL_INIT_PIN(22, "SD_0_CMD__GPIO_022"),
+ P7CTL_INIT_PIN(23, "SD_0_DAT00__GPIO_023"),
+ P7CTL_INIT_PIN(24, "SD_0_DAT01__GPIO_024"),
+ P7CTL_INIT_PIN(25, "SD_0_DAT02__GPIO_025"),
+ P7CTL_INIT_PIN(26, "SD_0_DAT03__GPIO_026"),
+ P7CTL_INIT_PIN(27, "NAND_NCE__GPIO_027__SPI_00a"),
+ P7CTL_INIT_PIN(28, "NAND_NR__GPIO_028__SPI_01a"),
+ P7CTL_INIT_PIN(29, "NAND_NW__GPIO_029__SPI_02a"),
+ P7CTL_INIT_PIN(30, "NAND_AL__GPIO_030__SPI_03a"),
+ P7CTL_INIT_PIN(31, "NAND_CL__GPIO_031__SPI_04a"),
+ P7CTL_INIT_PIN(32, "NAND_RNB__GPIO_032__SPI_05a"),
+ P7CTL_INIT_PIN(33, "NAND_DQS__GPIO_033__CAN1_RXa"),
+ P7CTL_INIT_PIN(34, "NAND_DATA_00__GPIO_034__SPI_06a"),
+ P7CTL_INIT_PIN(35, "NAND_DATA_01__GPIO_035__SPI_07a"),
+ P7CTL_INIT_PIN(36, "NAND_DATA_02__GPIO_036__SPI_08a"),
+ P7CTL_INIT_PIN(37, "NAND_DATA_03__GPIO_037__SPI_09a"),
+ P7CTL_INIT_PIN(38, "NAND_DATA_04__GPIO_038"),
+ P7CTL_INIT_PIN(39, "NAND_DATA_05__GPIO_039"),
+ P7CTL_INIT_PIN(40, "NAND_DATA_06__GPIO_040"),
+ P7CTL_INIT_PIN(41, "NAND_DATA_07__GPIO_041"),
+ P7CTL_INIT_PIN(42, "NAND_WP__GPIO_042__CAN1_TXa"),
+ P7CTL_INIT_PIN(43, "FORCE_POWER_ON__GPIO_043"),
+ P7CTL_INIT_PIN(44, "TRST_N__GPIO_044"),
+ P7CTL_INIT_PIN(45, "TDI__GPIO_045"),
+ P7CTL_INIT_PIN(46, "TDO__GPIO_046"),
+ P7CTL_INIT_PIN(47, "TCK__GPIO_047"),
+ P7CTL_INIT_PIN(48, "TMS__GPIO_048"),
+ P7CTL_INIT_PIN(49, "GPIO_049"),
+ P7CTL_INIT_PIN(50, "GPIO_050"),
+ P7CTL_INIT_PIN(51, "UART_1_RX__GPIO_051"),
+ P7CTL_INIT_PIN(52, "UART_1_TX__GPIO_052"),
+ P7CTL_INIT_PIN(53, "UART_1_RTS__GPIO_053__CAN0_RXb"),
+ P7CTL_INIT_PIN(54, "UART_1_CTS__GPIO_054__CAN0_TXb"),
+ P7CTL_INIT_PIN(55, "UART_0_RX__GPIO_055"),
+ P7CTL_INIT_PIN(56, "UART_0_TX__GPIO_056"),
+ P7CTL_INIT_PIN(57, "UART_0_RTS__GPIO_057"),
+ P7CTL_INIT_PIN(58, "UART_0_CTS__GPIO_058"),
+ P7CTL_INIT_PIN(59, "I2C_2_DAT__GPIO_059__I2C_2_SL_DAT"),
+ P7CTL_INIT_PIN(60, "I2C_2_CLK__GPIO_060__I2C_2_SL_CLK"),
+ P7CTL_INIT_PIN(61, "I2C_1_DAT__GPIO_061__I2C_1_SL_DAT"),
+ P7CTL_INIT_PIN(62, "I2C_1_CLK__GPIO_062__I2C_1_SL_CLK"),
+ P7CTL_INIT_PIN(63, "I2C_0_DAT__GPIO_063__I2C_0_SL_DAT"),
+ P7CTL_INIT_PIN(64, "I2C_0_CLK__GPIO_064__I2C_0_SL_CLK"),
+ P7CTL_INIT_PIN(65, "UART_4_RX__GPIO_065"),
+ P7CTL_INIT_PIN(66, "UART_4_TX__GPIO_066"),
+ P7CTL_INIT_PIN(67, "UART_3_RX__GPIO_067"),
+ P7CTL_INIT_PIN(68, "UART_3_TX__GPIO_068"),
+ P7CTL_INIT_PIN(69, "UART_2_RX__GPIO_069__CAN1_TXb"),
+ P7CTL_INIT_PIN(70, "UART_2_TX__GPIO_070__CAN1_RXb__I2C_SECURE_CLK"),
+ P7CTL_INIT_PIN(71, "SPI_09b__GPIO_071__PWM_15"),
+ P7CTL_INIT_PIN(72, "SPI_10b__GPIO_072"),
+ P7CTL_INIT_PIN(73, "SPI_11b__GPIO_073__I2C_SECURE_CLKa"),
+ P7CTL_INIT_PIN(74, "SPI_12b__GPIO_074"),
+ P7CTL_INIT_PIN(75, "SPI_13b__GPIO_075"),
+ P7CTL_INIT_PIN(76, "SPI_14b__GPIO_076"),
+ P7CTL_INIT_PIN(77, "SPI_00__GPIO_077__AAI_15__CAN1_TX"),
+ P7CTL_INIT_PIN(78, "SPI_01__GPIO_078__AAI_16__CAN1_RX"),
+ P7CTL_INIT_PIN(79, "SPI_02__GPIO_079__AAI_17__CAN0_TX"),
+ P7CTL_INIT_PIN(80, "SPI_03__GPIO_080__AAI_18__CAN0_RX"),
+ P7CTL_INIT_PIN(81, "SPI_04__GPIO_081__AAI_19"),
+ P7CTL_INIT_PIN(82, "SPI_05__GPIO_082__AAI_20"),
+ P7CTL_INIT_PIN(83, "SPI_06__GPIO_083__AAI_21"),
+ P7CTL_INIT_PIN(84, "SPI_07__GPIO_084__AAI_22"),
+ P7CTL_INIT_PIN(85, "SPI_08__GPIO_085__AAI_23"),
+ P7CTL_INIT_PIN(86, "SPI_09__GPIO_086__AAI_24"),
+ P7CTL_INIT_PIN(87, "SPI_10__GPIO_087__AAI_25"),
+ P7CTL_INIT_PIN(88, "SPI_11__GPIO_088__AAI_26"),
+ P7CTL_INIT_PIN(89, "SPI_12__GPIO_089__AAI_27"),
+ P7CTL_INIT_PIN(90, "SPI_13__GPIO_090__AAI_28"),
+ P7CTL_INIT_PIN(91, "SPI_14__GPIO_091__AAI_29"),
+ P7CTL_INIT_PIN(92, "SPI_15__GPIO_092__AAI_30"),
+ P7CTL_INIT_PIN(93, "P7MU_CLK_OUT__GPIO_093__PWM_15a__I2C_SECURE_DAT"),
+ P7CTL_INIT_PIN(94, "REBOOT_P7MU__GPIO_094"),
+ P7CTL_INIT_PIN(95, "ULPI_0_DATA00__GPIO_095"),
+ P7CTL_INIT_PIN(96, "ULPI_0_DATA01__GPIO_096"),
+ P7CTL_INIT_PIN(97, "ULPI_0_DATA02__GPIO_097"),
+ P7CTL_INIT_PIN(98, "ULPI_0_DATA03__GPIO_098"),
+ P7CTL_INIT_PIN(99, "ULPI_0_DATA04__GPIO_099"),
+ P7CTL_INIT_PIN(100, "ULPI_0_DATA05__GPIO_100"),
+ P7CTL_INIT_PIN(101, "ULPI_0_DATA06__GPIO_101"),
+ P7CTL_INIT_PIN(102, "ULPI_0_DATA07__GPIO_102"),
+ P7CTL_INIT_PIN(103, "ULPI_0_NXT__GPIO_103"),
+ P7CTL_INIT_PIN(104, "ULPI_0_DIR__GPIO_104"),
+ P7CTL_INIT_PIN(105, "ULPI_0_CLK__GPIO_105"),
+ P7CTL_INIT_PIN(106, "ULPI_1_DATA00__GPIO_106__I2C_2_CLKa"),
+ P7CTL_INIT_PIN(107, "ULPI_1_DATA01__GPIO_107__I2C_2_DATa"),
+ P7CTL_INIT_PIN(108, "ULPI_1_DATA02__GPIO_108__I2C_2_CLKb"),
+ P7CTL_INIT_PIN(109, "ULPI_1_DATA03__GPIO_109__I2C_2_DATb"),
+ P7CTL_INIT_PIN(110, "ULPI_1_DATA04__GPIO_110__I2C_2_CLKc"),
+ P7CTL_INIT_PIN(111, "ULPI_1_DATA05__GPIO_111__I2C_2_DATc"),
+ P7CTL_INIT_PIN(112, "ULPI_1_DATA06__GPIO_112__I2C_2_CLKd"),
+ P7CTL_INIT_PIN(113, "ULPI_1_DATA07__GPIO_113__I2C_2_DATd"),
+ P7CTL_INIT_PIN(114, "ULPI_1_NXT__GPIO_114__I2C_2_CLKe"),
+ P7CTL_INIT_PIN(115, "ULPI_1_DIR__GPIO_115"),
+ P7CTL_INIT_PIN(116, "ULPI_1_STP__GPIO_116__I2C_2_DATe"),
+ P7CTL_INIT_PIN(117, "ULPI_0_STP__GPIO_117"),
+ P7CTL_INIT_PIN(118, "ULPI_1_CLK__GPIO_118"),
+ P7CTL_INIT_PIN(119, "AAI_00__GPIO_119__PWM_00"),
+ P7CTL_INIT_PIN(120, "AAI_01__GPIO_120__PWM_01"),
+ P7CTL_INIT_PIN(121, "AAI_02__GPIO_121__PWM_02"),
+ P7CTL_INIT_PIN(122, "AAI_03__GPIO_122__PWM_03"),
+ P7CTL_INIT_PIN(123, "AAI_04__GPIO_123__PWM_04"),
+ P7CTL_INIT_PIN(124, "AAI_05__GPIO_124__PWM_05"),
+ P7CTL_INIT_PIN(125, "AAI_06__GPIO_125__PWM_06"),
+ P7CTL_INIT_PIN(126, "AAI_07__GPIO_126__PWM_07"),
+ P7CTL_INIT_PIN(127, "AAI_08__GPIO_127__PWM_08"),
+ P7CTL_INIT_PIN(128, "AAI_09__GPIO_128__PWM_09"),
+ P7CTL_INIT_PIN(129, "AAI_10__GPIO_129__PWM_10"),
+ P7CTL_INIT_PIN(130, "AAI_11__GPIO_130__PWM_11"),
+ P7CTL_INIT_PIN(131, "AAI_12__GPIO_131__PWM_12"),
+ P7CTL_INIT_PIN(132, "AAI_13__GPIO_132__PWM_13"),
+ P7CTL_INIT_PIN(133, "AAI_14__GPIO_133__PWM_14"),
+ P7CTL_INIT_PIN(134, "SPI_16__GPIO_134__ETH_MII_TXER"),
+ P7CTL_INIT_PIN(135, "SPI_17__GPIO_135__ETH_MII_RXER"),
+ P7CTL_INIT_PIN(136, "SPI_18__GPIO_136__ETH_MII_CRS"),
+ P7CTL_INIT_PIN(137, "SPI_19__GPIO_137__ETH_MII_COL"),
+ P7CTL_INIT_PIN(138, "ETH_RGMII_TXC__GPIO_138"),
+ P7CTL_INIT_PIN(139, "ETH_RGMII_TXD_00__GPIO_139"),
+ P7CTL_INIT_PIN(140, "ETH_RGMII_TXD_01__GPIO_140"),
+ P7CTL_INIT_PIN(141, "ETH_RGMII_TXD_02__GPIO_141"),
+ P7CTL_INIT_PIN(142, "ETH_RGMII_TXD_03__GPIO_142"),
+ P7CTL_INIT_PIN(143, "ETH_RGMII_TX_CTL__GPIO_143"),
+ P7CTL_INIT_PIN(144, "ETH_RGMII_RXC__GPIO_144"),
+ P7CTL_INIT_PIN(145, "ETH_RGMII_RXD_00__GPIO_145"),
+ P7CTL_INIT_PIN(146, "ETH_RGMII_RXD_01__GPIO_146"),
+ P7CTL_INIT_PIN(147, "ETH_RGMII_RXD_02__GPIO_147"),
+ P7CTL_INIT_PIN(148, "ETH_RGMII_RXD_03__GPIO_148"),
+ P7CTL_INIT_PIN(149, "ETH_RGMII_RX_CTL__GPIO_149"),
+ P7CTL_INIT_PIN(150, "ETH_MDC__GPIO_150"),
+ P7CTL_INIT_PIN(151, "ETH_MDIO__GPIO_151"),
+ P7CTL_INIT_PIN(152, "LCD_0_DEN__GPIO_152__CAM_1_VS"),
+ P7CTL_INIT_PIN(153, "LCD_0_HS__GPIO_153__CAM_1_HS__LCD_0_DENa"),
+ P7CTL_INIT_PIN(154, "LCD_0_VS__GPIO_154__CAM_0_VS"),
+ P7CTL_INIT_PIN(155, "LCD_0_DATA00__GPIO_155__CAM_0_HS"),
+ P7CTL_INIT_PIN(156, "LCD_0_DATA01__GPIO_156__CAM_2_VS"),
+ P7CTL_INIT_PIN(157, "LCD_0_DATA02__GPIO_157__CAM_2_HS"),
+ P7CTL_INIT_PIN(158, "LCD_0_DATA03__GPIO_158__CAM_3_VS"),
+ P7CTL_INIT_PIN(159, "LCD_0_DATA04__GPIO_159__CAM_3_HS"),
+ P7CTL_INIT_PIN(160, "LCD_0_DATA05__GPIO_160__CAM_5_VS"),
+ P7CTL_INIT_PIN(161, "LCD_0_DATA06__GPIO_161__CAM_5_HS"),
+ P7CTL_INIT_PIN(162, "LCD_0_DATA07__GPIO_162__CAM_0_DATA08"),
+ P7CTL_INIT_PIN(163, "LCD_0_DATA08__GPIO_163__CAM_0_DATA09__CAM_0_DATA08a"),
+ P7CTL_INIT_PIN(164, "LCD_0_DATA09__GPIO_164__CAM_0_DATA10__CAM_0_DATA09a"),
+ P7CTL_INIT_PIN(165, "LCD_0_DATA10__GPIO_165__CAM_0_DATA11__CAM_0_DATA10a"),
+ P7CTL_INIT_PIN(166, "LCD_0_DATA11__GPIO_166__CAM_0_DATA12__CAM_0_DATA11a"),
+ P7CTL_INIT_PIN(167, "LCD_0_DATA12__GPIO_167__CAM_0_DATA13__CAM_0_DATA12a"),
+ P7CTL_INIT_PIN(168, "LCD_0_DATA13__GPIO_168__CAM_0_DATA14__CAM_0_DATA13a"),
+ P7CTL_INIT_PIN(169, "LCD_0_DATA14__GPIO_169__CAM_0_DATA15__CAM_0_DATA14a"),
+ P7CTL_INIT_PIN(170, "LCD_0_DATA15__GPIO_170__CAM_0_DATA16__CAM_0_DATA15a"),
+ P7CTL_INIT_PIN(171, "LCD_0_DATA16__GPIO_171__CAM_0_DATA17"),
+ P7CTL_INIT_PIN(172, "LCD_0_DATA17__GPIO_172__CAM_0_DATA18"),
+ P7CTL_INIT_PIN(173, "LCD_0_DATA18__GPIO_173__CAM_0_DATA19"),
+ P7CTL_INIT_PIN(174, "LCD_0_DATA19__GPIO_174__CAM_0_DATA20"),
+ P7CTL_INIT_PIN(175, "LCD_0_DATA20__GPIO_175__CAM_0_DATA21__CAM_3_DATA09a"),
+ P7CTL_INIT_PIN(176, "LCD_0_DATA21__GPIO_176__CAM_0_DATA22__CAM_3_DATA08a"),
+ P7CTL_INIT_PIN(177, "LCD_0_DATA22__GPIO_177__CAM_0_DATA23"),
+ P7CTL_INIT_PIN(178, "LCD_0_DATA23__GPIO_178__CAM_4_VS"),
+ P7CTL_INIT_PIN(179, "LCD_0_CLK__GPIO_179__CAM_4_HS__CAM_0_CLKa"),
+ P7CTL_INIT_PIN(180, "LCD_1_HS__GPIO_180__CAM_2_CLK__LCD_1_DENa"),
+ P7CTL_INIT_PIN(181, "LCD_1_VS__GPIO_181__CAM_2_DATA00__CPU_TRACECLKOUT"),
+ P7CTL_INIT_PIN(182, "LCD_1_CLK__GPIO_182__CAM_2_DATA01__CPU_TRACECTL"),
+ P7CTL_INIT_PIN(183, "LCD_1_DATA00__GPIO_183__CAM_2_DATA02__CPU_TRACEDATA_00"),
+ P7CTL_INIT_PIN(184, "LCD_1_DATA01__GPIO_184__CAM_2_DATA03__CPU_TRACEDATA_01"),
+ P7CTL_INIT_PIN(185, "LCD_1_DATA02__GPIO_185__CAM_2_DATA04__CPU_TRACEDATA_02"),
+ P7CTL_INIT_PIN(186, "LCD_1_DATA03__GPIO_186__CAM_2_DATA05__CPU_TRACEDATA_03"),
+ P7CTL_INIT_PIN(187, "LCD_1_DATA04__GPIO_187__CAM_2_DATA06__CPU_TRACEDATA_04"),
+ P7CTL_INIT_PIN(188, "LCD_1_DATA05__GPIO_188__CAM_2_DATA07__CPU_TRACEDATA_05"),
+ P7CTL_INIT_PIN(189, "LCD_1_DATA06__GPIO_189__CAM_3_CLK__CPU_TRACEDATA_06"),
+ P7CTL_INIT_PIN(190, "LCD_1_DATA07__GPIO_190__CAM_3_DATA00__CPU_TRACEDATA_07"),
+ P7CTL_INIT_PIN(191, "LCD_1_DATA08__GPIO_191__CAM_3_DATA01__CPU_TRACEDATA_08"),
+ P7CTL_INIT_PIN(192, "LCD_1_DATA09__GPIO_192__CAM_3_DATA02__CPU_TRACEDATA_09"),
+ P7CTL_INIT_PIN(193, "LCD_1_DATA10__GPIO_193__CAM_3_DATA03__CPU_TRACEDATA_10"),
+ P7CTL_INIT_PIN(194, "LCD_1_DATA11__GPIO_194__CAM_3_DATA04__CPU_TRACEDATA_11"),
+ P7CTL_INIT_PIN(195, "LCD_1_DATA12__GPIO_195__CAM_3_DATA05__CPU_TRACEDATA_12"),
+ P7CTL_INIT_PIN(196, "LCD_1_DATA13__GPIO_196__CAM_3_DATA06__CPU_TRACEDATA_13"),
+ P7CTL_INIT_PIN(197, "LCD_1_DATA14__GPIO_197__CAM_3_DATA07__CPU_TRACEDATA_14"),
+ P7CTL_INIT_PIN(198, "LCD_1_DATA15__GPIO_198__CAM_4_CLK__CPU_TRACEDATA_15"),
+ P7CTL_INIT_PIN(199, "LCD_1_DATA16__GPIO_199__CAM_4_DATA00"),
+ P7CTL_INIT_PIN(200, "LCD_1_DATA17__GPIO_200__CAM_4_DATA01"),
+ P7CTL_INIT_PIN(201, "LCD_1_DATA18__GPIO_201__CAM_4_DATA02"),
+ P7CTL_INIT_PIN(202, "LCD_1_DATA19__GPIO_202__CAM_4_DATA03"),
+ P7CTL_INIT_PIN(203, "LCD_1_DATA20__GPIO_203__CAM_4_DATA04"),
+ P7CTL_INIT_PIN(204, "LCD_1_DATA21__GPIO_204__CAM_4_DATA05"),
+ P7CTL_INIT_PIN(205, "LCD_1_DATA22__GPIO_205__CAM_4_DATA06"),
+ P7CTL_INIT_PIN(206, "LCD_1_DATA23__GPIO_206__CAM_4_DATA07"),
+ P7CTL_INIT_PIN(207, "LCD_1_DEN__GPIO_207__CAM_1_HSa__LCD_1_CLKa"),
+ P7CTL_INIT_PIN(208, "CAM_0_CLK__GPIO_208__CAM_1_CLKa"),
+ P7CTL_INIT_PIN(209, "CAM_0_DATA00__GPIO_209__CAM_1_DATA08"),
+ P7CTL_INIT_PIN(210, "CAM_0_DATA01__GPIO_210__CAM_1_DATA09"),
+ P7CTL_INIT_PIN(211, "CAM_0_DATA02__GPIO_211__CAM_1_DATA10__CAM_4_DATA08"),
+ P7CTL_INIT_PIN(212, "CAM_0_DATA03__GPIO_212__CAM_1_DATA11__CAM_4_DATA09"),
+ P7CTL_INIT_PIN(213, "CAM_0_DATA04__GPIO_213__CAM_1_DATA12__CAM_5_DATA08"),
+ P7CTL_INIT_PIN(214, "CAM_0_DATA05__GPIO_214__CAM_1_DATA13__CAM_5_DATA09"),
+ P7CTL_INIT_PIN(215, "CAM_0_DATA06__GPIO_215__CAM_1_DATA14__CAM_2_DATA08"),
+ P7CTL_INIT_PIN(216, "CAM_0_DATA07__GPIO_216__CAM_1_DATA15__CAM_2_DATA09"),
+ P7CTL_INIT_PIN(217, "CAM_1_CLK__GPIO_217__SPI_00b"),
+ P7CTL_INIT_PIN(218, "CAM_1_DATA00__GPIO_218__SPI_01b__CAM_0_VSa"),
+ P7CTL_INIT_PIN(219, "CAM_1_DATA01__GPIO_219__SPI_02b__CAM_0_HSa")
+};
+
+/* Pinmux functions */
+static struct p7ctl_function const p7_pin_funcs_r3[P7_N_FUNCTIONS] = {
+ [P7_CAM_1_DATA02] = P7CTL_INIT_FUNC(CAM_1_DATA02, 0, 0),
+ [P7_GPIO_000] = P7CTL_INIT_FUNC(GPIO_000, 0, 1),
+ [P7_SPI_03b] = P7CTL_INIT_FUNC(SPI_03b, 0, 2),
+ [P7_CAM_3_VSa] = P7CTL_INIT_FUNC(CAM_3_VSa, 0, 3),
+ [P7_CAM_1_DATA03] = P7CTL_INIT_FUNC(CAM_1_DATA03, 1, 0),
+ [P7_GPIO_001] = P7CTL_INIT_FUNC(GPIO_001, 1, 1),
+ [P7_SPI_04b] = P7CTL_INIT_FUNC(SPI_04b, 1, 2),
+ [P7_CAM_3_HSa] = P7CTL_INIT_FUNC(CAM_3_HSa, 1, 3),
+ [P7_CAM_1_DATA04] = P7CTL_INIT_FUNC(CAM_1_DATA04, 2, 0),
+ [P7_GPIO_002] = P7CTL_INIT_FUNC(GPIO_002, 2, 1),
+ [P7_SPI_05b] = P7CTL_INIT_FUNC(SPI_05b, 2, 2),
+ [P7_CAM_4_VSa] = P7CTL_INIT_FUNC(CAM_4_VSa, 2, 3),
+ [P7_CAM_1_DATA05] = P7CTL_INIT_FUNC(CAM_1_DATA05, 3, 0),
+ [P7_GPIO_003] = P7CTL_INIT_FUNC(GPIO_003, 3, 1),
+ [P7_SPI_06b] = P7CTL_INIT_FUNC(SPI_06b, 3, 2),
+ [P7_CAM_4_HSa] = P7CTL_INIT_FUNC(CAM_4_HSa, 3, 3),
+ [P7_CAM_1_DATA06] = P7CTL_INIT_FUNC(CAM_1_DATA06, 4, 0),
+ [P7_GPIO_004] = P7CTL_INIT_FUNC(GPIO_004, 4, 1),
+ [P7_SPI_07b] = P7CTL_INIT_FUNC(SPI_07b, 4, 2),
+ [P7_CAM_5_VSa] = P7CTL_INIT_FUNC(CAM_5_VSa, 4, 3),
+ [P7_CAM_1_DATA07] = P7CTL_INIT_FUNC(CAM_1_DATA07, 5, 0),
+ [P7_GPIO_005] = P7CTL_INIT_FUNC(GPIO_005, 5, 1),
+ [P7_SPI_08b] = P7CTL_INIT_FUNC(SPI_08b, 5, 2),
+ [P7_CAM_5_HSa] = P7CTL_INIT_FUNC(CAM_5_HSa, 5, 3),
+ [P7_CAM_5_CLK] = P7CTL_INIT_FUNC(CAM_5_CLK, 6, 0),
+ [P7_GPIO_006] = P7CTL_INIT_FUNC(GPIO_006, 6, 1),
+ [P7_SD_2_CLK] = P7CTL_INIT_FUNC(SD_2_CLK, 6, 2),
+ [P7_CAM_5_DATA00] = P7CTL_INIT_FUNC(CAM_5_DATA00, 7, 0),
+ [P7_GPIO_007] = P7CTL_INIT_FUNC(GPIO_007, 7, 1),
+ [P7_SD_2_CMD] = P7CTL_INIT_FUNC(SD_2_CMD, 7, 2),
+ [P7_CAM_5_DATA01] = P7CTL_INIT_FUNC(CAM_5_DATA01, 8, 0),
+ [P7_GPIO_008] = P7CTL_INIT_FUNC(GPIO_008, 8, 1),
+ [P7_SD_2_DAT00] = P7CTL_INIT_FUNC(SD_2_DAT00, 8, 2),
+ [P7_CAM_5_DATA02] = P7CTL_INIT_FUNC(CAM_5_DATA02, 9, 0),
+ [P7_GPIO_009] = P7CTL_INIT_FUNC(GPIO_009, 9, 1),
+ [P7_SD_2_DAT01] = P7CTL_INIT_FUNC(SD_2_DAT01, 9, 2),
+ [P7_CAM_5_DATA03] = P7CTL_INIT_FUNC(CAM_5_DATA03, 10, 0),
+ [P7_GPIO_010] = P7CTL_INIT_FUNC(GPIO_010, 10, 1),
+ [P7_SD_2_DAT02] = P7CTL_INIT_FUNC(SD_2_DAT02, 10, 2),
+ [P7_CAM_5_DATA04] = P7CTL_INIT_FUNC(CAM_5_DATA04, 11, 0),
+ [P7_GPIO_011] = P7CTL_INIT_FUNC(GPIO_011, 11, 1),
+ [P7_SD_2_DAT03] = P7CTL_INIT_FUNC(SD_2_DAT03, 11, 2),
+ [P7_CAM_5_DATA05] = P7CTL_INIT_FUNC(CAM_5_DATA05, 12, 0),
+ [P7_GPIO_012] = P7CTL_INIT_FUNC(GPIO_012, 12, 1),
+ [P7_CAM_5_DATA06] = P7CTL_INIT_FUNC(CAM_5_DATA06, 13, 0),
+ [P7_GPIO_013] = P7CTL_INIT_FUNC(GPIO_013, 13, 1),
+ [P7_CAM_5_DATA07] = P7CTL_INIT_FUNC(CAM_5_DATA07, 14, 0),
+ [P7_GPIO_014] = P7CTL_INIT_FUNC(GPIO_014, 14, 1),
+ [P7_SD_1_CLK] = P7CTL_INIT_FUNC(SD_1_CLK, 15, 0),
+ [P7_UART_5_TX] = P7CTL_INIT_FUNC(UART_5_TX, 15, 2),
+ [P7_SD_1_CMD] = P7CTL_INIT_FUNC(SD_1_CMD, 16, 0),
+ [P7_UART_5_RX] = P7CTL_INIT_FUNC(UART_5_RX, 16, 2),
+ [P7_SD_1_DAT00] = P7CTL_INIT_FUNC(SD_1_DAT00, 17, 0),
+ [P7_UART_6_TX] = P7CTL_INIT_FUNC(UART_6_TX, 17, 2),
+ [P7_SD_1_DAT02] = P7CTL_INIT_FUNC(SD_1_DAT02, 18, 0),
+ [P7_UART_7_TX] = P7CTL_INIT_FUNC(UART_7_TX, 18, 2),
+ [P7_SD_1_DAT01] = P7CTL_INIT_FUNC(SD_1_DAT01, 19, 0),
+ [P7_UART_6_RX] = P7CTL_INIT_FUNC(UART_6_RX, 19, 2),
+ [P7_SD_1_DAT03] = P7CTL_INIT_FUNC(SD_1_DAT03, 20, 0),
+ [P7_UART_7_RX] = P7CTL_INIT_FUNC(UART_7_RX, 20, 2),
+ [P7_SD_0_CLK] = P7CTL_INIT_FUNC(SD_0_CLK, 21, 0),
+ [P7_SD_0_CMD] = P7CTL_INIT_FUNC(SD_0_CMD, 22, 0),
+ [P7_SD_0_DAT00] = P7CTL_INIT_FUNC(SD_0_DAT00, 23, 0),
+ [P7_SD_0_DAT01] = P7CTL_INIT_FUNC(SD_0_DAT01, 24, 0),
+ [P7_SD_0_DAT02] = P7CTL_INIT_FUNC(SD_0_DAT02, 25, 0),
+ [P7_SD_0_DAT03] = P7CTL_INIT_FUNC(SD_0_DAT03, 26, 0),
+ [P7_NAND_NCE] = P7CTL_INIT_FUNC(NAND_NCE, 27, 0),
+ [P7_SPI_00a] = P7CTL_INIT_FUNC(SPI_00a, 27, 2),
+ [P7_NAND_NR] = P7CTL_INIT_FUNC(NAND_NR, 28, 0),
+ [P7_SPI_01a] = P7CTL_INIT_FUNC(SPI_01a, 28, 2),
+ [P7_NAND_NW] = P7CTL_INIT_FUNC(NAND_NW, 29, 0),
+ [P7_SPI_02a] = P7CTL_INIT_FUNC(SPI_02a, 29, 2),
+ [P7_NAND_AL] = P7CTL_INIT_FUNC(NAND_AL, 30, 0),
+ [P7_SPI_03a] = P7CTL_INIT_FUNC(SPI_03a, 30, 2),
+ [P7_NAND_CL] = P7CTL_INIT_FUNC(NAND_CL, 31, 0),
+ [P7_SPI_04a] = P7CTL_INIT_FUNC(SPI_04a, 31, 2),
+ [P7_NAND_RNB] = P7CTL_INIT_FUNC(NAND_RNB, 32, 0),
+ [P7_SPI_05a] = P7CTL_INIT_FUNC(SPI_05a, 32, 2),
+ [P7_NAND_DQS] = P7CTL_INIT_FUNC(NAND_DQS, 33, 0),
+ [P7_GPIO_033] = P7CTL_INIT_FUNC(GPIO_033, 33, 1),
+ [P7_CAN1_RXa] = P7CTL_INIT_FUNC(CAN1_RXa, 33, 2),
+ [P7_NAND_DATA_00] = P7CTL_INIT_FUNC(NAND_DATA_00, 34, 0),
+ [P7_SPI_06a] = P7CTL_INIT_FUNC(SPI_06a, 34, 2),
+ [P7_NAND_DATA_01] = P7CTL_INIT_FUNC(NAND_DATA_01, 35, 0),
+ [P7_SPI_07a] = P7CTL_INIT_FUNC(SPI_07a, 35, 2),
+ [P7_NAND_DATA_02] = P7CTL_INIT_FUNC(NAND_DATA_02, 36, 0),
+ [P7_SPI_08a] = P7CTL_INIT_FUNC(SPI_08a, 36, 2),
+ [P7_NAND_DATA_03] = P7CTL_INIT_FUNC(NAND_DATA_03, 37, 0),
+ [P7_SPI_09a] = P7CTL_INIT_FUNC(SPI_09a, 37, 2),
+ [P7_NAND_DATA_04] = P7CTL_INIT_FUNC(NAND_DATA_04, 38, 0),
+ [P7_GPIO_038] = P7CTL_INIT_FUNC(GPIO_038, 38, 1),
+ [P7_NAND_DATA_05] = P7CTL_INIT_FUNC(NAND_DATA_05, 39, 0),
+ [P7_GPIO_039] = P7CTL_INIT_FUNC(GPIO_039, 39, 1),
+ [P7_NAND_DATA_06] = P7CTL_INIT_FUNC(NAND_DATA_06, 40, 0),
+ [P7_GPIO_040] = P7CTL_INIT_FUNC(GPIO_040, 40, 1),
+ [P7_NAND_DATA_07] = P7CTL_INIT_FUNC(NAND_DATA_07, 41, 0),
+ [P7_GPIO_041] = P7CTL_INIT_FUNC(GPIO_041, 41, 1),
+ [P7_NAND_WP] = P7CTL_INIT_FUNC(NAND_WP, 42, 0),
+ [P7_GPIO_042] = P7CTL_INIT_FUNC(GPIO_042, 42, 1),
+ [P7_CAN1_TXa] = P7CTL_INIT_FUNC(CAN1_TXa, 42, 2),
+ [P7_FORCE_POWER_ON] = P7CTL_INIT_FUNC(FORCE_POWER_ON, 43, 0),
+ [P7_TRST_N] = P7CTL_INIT_FUNC(TRST_N, 44, 0),
+ [P7_TDI] = P7CTL_INIT_FUNC(TDI, 45, 0),
+ [P7_TDO] = P7CTL_INIT_FUNC(TDO, 46, 0),
+ [P7_TCK] = P7CTL_INIT_FUNC(TCK, 47, 0),
+ [P7_TMS] = P7CTL_INIT_FUNC(TMS, 48, 0),
+ [P7_GPIO_049] = P7CTL_INIT_FUNC(GPIO_049, 49, 1),
+ [P7_GPIO_050] = P7CTL_INIT_FUNC(GPIO_050, 50, 1),
+ [P7_UART_1_RX] = P7CTL_INIT_FUNC(UART_1_RX, 51, 0),
+ [P7_UART_1_TX] = P7CTL_INIT_FUNC(UART_1_TX, 52, 0),
+ [P7_UART_1_RTS] = P7CTL_INIT_FUNC(UART_1_RTS, 53, 0),
+ [P7_CAN0_RXb] = P7CTL_INIT_FUNC(CAN0_RXb, 53, 2),
+ [P7_UART_1_CTS] = P7CTL_INIT_FUNC(UART_1_CTS, 54, 0),
+ [P7_CAN0_TXb] = P7CTL_INIT_FUNC(CAN0_TXb, 54, 2),
+ [P7_UART_0_RX] = P7CTL_INIT_FUNC(UART_0_RX, 55, 0),
+ [P7_GPIO_055] = P7CTL_INIT_FUNC(GPIO_055, 55, 1),
+ [P7_UART_0_TX] = P7CTL_INIT_FUNC(UART_0_TX, 56, 0),
+ [P7_GPIO_056] = P7CTL_INIT_FUNC(GPIO_056, 56, 1),
+ [P7_UART_0_RTS] = P7CTL_INIT_FUNC(UART_0_RTS, 57, 0),
+ [P7_GPIO_057] = P7CTL_INIT_FUNC(GPIO_057, 57, 1),
+ [P7_UART_0_CTS] = P7CTL_INIT_FUNC(UART_0_CTS, 58, 0),
+ [P7_GPIO_058] = P7CTL_INIT_FUNC(GPIO_058, 58, 1),
+ [P7_I2C_2_DAT] = P7CTL_INIT_FUNC(I2C_2_DAT, 59, 0),
+ [P7_GPIO_059] = P7CTL_INIT_FUNC(GPIO_059, 59, 1),
+ [P7_I2C_2_SL_DAT] = P7CTL_INIT_FUNC(I2C_2_SL_DAT, 59, 2),
+ [P7_I2C_2_CLK] = P7CTL_INIT_FUNC(I2C_2_CLK, 60, 0),
+ [P7_GPIO_060] = P7CTL_INIT_FUNC(GPIO_060, 60, 1),
+ [P7_I2C_2_SL_CLK] = P7CTL_INIT_FUNC(I2C_2_SL_CLK, 60, 2),
+ [P7_I2C_1_DAT] = P7CTL_INIT_FUNC(I2C_1_DAT, 61, 0),
+ [P7_GPIO_061] = P7CTL_INIT_FUNC(GPIO_061, 61, 1),
+ [P7_I2C_1_SL_DAT] = P7CTL_INIT_FUNC(I2C_1_SL_DAT, 61, 2),
+ [P7_I2C_1_CLK] = P7CTL_INIT_FUNC(I2C_1_CLK, 62, 0),
+ [P7_GPIO_062] = P7CTL_INIT_FUNC(GPIO_062, 62, 1),
+ [P7_I2C_1_SL_CLK] = P7CTL_INIT_FUNC(I2C_1_SL_CLK, 62, 2),
+ [P7_I2C_0_DAT] = P7CTL_INIT_FUNC(I2C_0_DAT, 63, 0),
+ [P7_GPIO_063] = P7CTL_INIT_FUNC(GPIO_063, 63, 1),
+ [P7_I2C_0_SL_DAT] = P7CTL_INIT_FUNC(I2C_0_SL_DAT, 63, 2),
+ [P7_I2C_0_CLK] = P7CTL_INIT_FUNC(I2C_0_CLK, 64, 0),
+ [P7_GPIO_064] = P7CTL_INIT_FUNC(GPIO_064, 64, 1),
+ [P7_I2C_0_SL_CLK] = P7CTL_INIT_FUNC(I2C_0_SL_CLK, 64, 2),
+ [P7_UART_4_RX] = P7CTL_INIT_FUNC(UART_4_RX, 65, 0),
+ [P7_GPIO_065] = P7CTL_INIT_FUNC(GPIO_065, 65, 1),
+ [P7_UART_4_TX] = P7CTL_INIT_FUNC(UART_4_TX, 66, 0),
+ [P7_GPIO_066] = P7CTL_INIT_FUNC(GPIO_066, 66, 1),
+ [P7_UART_3_RX] = P7CTL_INIT_FUNC(UART_3_RX, 67, 0),
+ [P7_GPIO_067] = P7CTL_INIT_FUNC(GPIO_067, 67, 1),
+ [P7_UART_3_TX] = P7CTL_INIT_FUNC(UART_3_TX, 68, 0),
+ [P7_GPIO_068] = P7CTL_INIT_FUNC(GPIO_068, 68, 1),
+ [P7_UART_2_RX] = P7CTL_INIT_FUNC(UART_2_RX, 69, 0),
+ [P7_CAN1_TXb] = P7CTL_INIT_FUNC(CAN1_TXb, 69, 2),
+ [P7_UART_2_TX] = P7CTL_INIT_FUNC(UART_2_TX, 70, 0),
+ [P7_CAN1_RXb] = P7CTL_INIT_FUNC(CAN1_RXb, 70, 2),
+ [P7_I2C_SECURE_CLK] = P7CTL_INIT_FUNC(I2C_SECURE_CLK, 70, 3),
+ [P7_SPI_09b] = P7CTL_INIT_FUNC(SPI_09b, 71, 0),
+ [P7_GPIO_071] = P7CTL_INIT_FUNC(GPIO_071, 71, 1),
+ [P7_PWM_15] = P7CTL_INIT_FUNC(PWM_15, 71, 2),
+ [P7_SPI_10b] = P7CTL_INIT_FUNC(SPI_10b, 72, 0),
+ [P7_GPIO_072] = P7CTL_INIT_FUNC(GPIO_072, 72, 1),
+ [P7_SPI_11b] = P7CTL_INIT_FUNC(SPI_11b, 73, 0),
+ [P7_GPIO_073] = P7CTL_INIT_FUNC(GPIO_073, 73, 1),
+ [P7_I2C_SECURE_CLKa] = P7CTL_INIT_FUNC(I2C_SECURE_CLKa, 73, 2),
+ [P7_SPI_12b] = P7CTL_INIT_FUNC(SPI_12b, 74, 0),
+ [P7_GPIO_074] = P7CTL_INIT_FUNC(GPIO_074, 74, 1),
+ [P7_SPI_13b] = P7CTL_INIT_FUNC(SPI_13b, 75, 0),
+ [P7_GPIO_075] = P7CTL_INIT_FUNC(GPIO_075, 75, 1),
+ [P7_SPI_14b] = P7CTL_INIT_FUNC(SPI_14b, 76, 0),
+ [P7_GPIO_076] = P7CTL_INIT_FUNC(GPIO_076, 76, 1),
+ [P7_SPI_00] = P7CTL_INIT_FUNC(SPI_00, 77, 0),
+ [P7_GPIO_077] = P7CTL_INIT_FUNC(GPIO_077, 77, 1),
+ [P7_AAI_15] = P7CTL_INIT_FUNC(AAI_15, 77, 2),
+ [P7_CAN1_TX] = P7CTL_INIT_FUNC(CAN1_TX, 77, 3),
+ [P7_SPI_01] = P7CTL_INIT_FUNC(SPI_01, 78, 0),
+ [P7_GPIO_078] = P7CTL_INIT_FUNC(GPIO_078, 78, 1),
+ [P7_AAI_16] = P7CTL_INIT_FUNC(AAI_16, 78, 2),
+ [P7_CAN1_RX] = P7CTL_INIT_FUNC(CAN1_RX, 78, 3),
+ [P7_SPI_02] = P7CTL_INIT_FUNC(SPI_02, 79, 0),
+ [P7_GPIO_079] = P7CTL_INIT_FUNC(GPIO_079, 79, 1),
+ [P7_AAI_17] = P7CTL_INIT_FUNC(AAI_17, 79, 2),
+ [P7_CAN0_TX] = P7CTL_INIT_FUNC(CAN0_TX, 79, 3),
+ [P7_SPI_03] = P7CTL_INIT_FUNC(SPI_03, 80, 0),
+ [P7_GPIO_080] = P7CTL_INIT_FUNC(GPIO_080, 80, 1),
+ [P7_AAI_18] = P7CTL_INIT_FUNC(AAI_18, 80, 2),
+ [P7_CAN0_RX] = P7CTL_INIT_FUNC(CAN0_RX, 80, 3),
+ [P7_SPI_04] = P7CTL_INIT_FUNC(SPI_04, 81, 0),
+ [P7_GPIO_081] = P7CTL_INIT_FUNC(GPIO_081, 81, 1),
+ [P7_AAI_19] = P7CTL_INIT_FUNC(AAI_19, 81, 2),
+ [P7_SPI_05] = P7CTL_INIT_FUNC(SPI_05, 82, 0),
+ [P7_GPIO_082] = P7CTL_INIT_FUNC(GPIO_082, 82, 1),
+ [P7_AAI_20] = P7CTL_INIT_FUNC(AAI_20, 82, 2),
+ [P7_SPI_06] = P7CTL_INIT_FUNC(SPI_06, 83, 0),
+ [P7_GPIO_083] = P7CTL_INIT_FUNC(GPIO_083, 83, 1),
+ [P7_AAI_21] = P7CTL_INIT_FUNC(AAI_21, 83, 2),
+ [P7_SPI_07] = P7CTL_INIT_FUNC(SPI_07, 84, 0),
+ [P7_GPIO_084] = P7CTL_INIT_FUNC(GPIO_084, 84, 1),
+ [P7_AAI_22] = P7CTL_INIT_FUNC(AAI_22, 84, 2),
+ [P7_SPI_08] = P7CTL_INIT_FUNC(SPI_08, 85, 0),
+ [P7_GPIO_085] = P7CTL_INIT_FUNC(GPIO_085, 85, 1),
+ [P7_AAI_23] = P7CTL_INIT_FUNC(AAI_23, 85, 2),
+ [P7_SPI_09] = P7CTL_INIT_FUNC(SPI_09, 86, 0),
+ [P7_GPIO_086] = P7CTL_INIT_FUNC(GPIO_086, 86, 1),
+ [P7_AAI_24] = P7CTL_INIT_FUNC(AAI_24, 86, 2),
+ [P7_SPI_10] = P7CTL_INIT_FUNC(SPI_10, 87, 0),
+ [P7_GPIO_087] = P7CTL_INIT_FUNC(GPIO_087, 87, 1),
+ [P7_AAI_25] = P7CTL_INIT_FUNC(AAI_25, 87, 2),
+ [P7_SPI_11] = P7CTL_INIT_FUNC(SPI_11, 88, 0),
+ [P7_GPIO_088] = P7CTL_INIT_FUNC(GPIO_088, 88, 1),
+ [P7_AAI_26] = P7CTL_INIT_FUNC(AAI_26, 88, 2),
+ [P7_SPI_12] = P7CTL_INIT_FUNC(SPI_12, 89, 0),
+ [P7_GPIO_089] = P7CTL_INIT_FUNC(GPIO_089, 89, 1),
+ [P7_AAI_27] = P7CTL_INIT_FUNC(AAI_27, 89, 2),
+ [P7_SPI_13] = P7CTL_INIT_FUNC(SPI_13, 90, 0),
+ [P7_GPIO_090] = P7CTL_INIT_FUNC(GPIO_090, 90, 1),
+ [P7_AAI_28] = P7CTL_INIT_FUNC(AAI_28, 90, 2),
+ [P7_SPI_14] = P7CTL_INIT_FUNC(SPI_14, 91, 0),
+ [P7_GPIO_091] = P7CTL_INIT_FUNC(GPIO_091, 91, 1),
+ [P7_AAI_29] = P7CTL_INIT_FUNC(AAI_29, 91, 2),
+ [P7_SPI_15] = P7CTL_INIT_FUNC(SPI_15, 92, 0),
+ [P7_GPIO_092] = P7CTL_INIT_FUNC(GPIO_092, 92, 1),
+ [P7_AAI_30] = P7CTL_INIT_FUNC(AAI_30, 92, 2),
+ [P7_P7MU_CLK_OUT] = P7CTL_INIT_FUNC(P7MU_CLK_OUT, 93, 0),
+ [P7_PWM_15a] = P7CTL_INIT_FUNC(PWM_15a, 93, 2),
+ [P7_I2C_SECURE_DAT] = P7CTL_INIT_FUNC(I2C_SECURE_DAT, 93, 3),
+ [P7_REBOOT_P7MU] = P7CTL_INIT_FUNC(REBOOT_P7MU, 94, 0),
+ [P7_GPIO_094] = P7CTL_INIT_FUNC(GPIO_094, 94, 1),
+ [P7_ULPI_0_DATA00] = P7CTL_INIT_FUNC(ULPI_0_DATA00, 95, 0),
+ [P7_ULPI_0_DATA01] = P7CTL_INIT_FUNC(ULPI_0_DATA01, 96, 0),
+ [P7_ULPI_0_DATA02] = P7CTL_INIT_FUNC(ULPI_0_DATA02, 97, 0),
+ [P7_ULPI_0_DATA03] = P7CTL_INIT_FUNC(ULPI_0_DATA03, 98, 0),
+ [P7_ULPI_0_DATA04] = P7CTL_INIT_FUNC(ULPI_0_DATA04, 99, 0),
+ [P7_ULPI_0_DATA05] = P7CTL_INIT_FUNC(ULPI_0_DATA05, 100, 0),
+ [P7_ULPI_0_DATA06] = P7CTL_INIT_FUNC(ULPI_0_DATA06, 101, 0),
+ [P7_ULPI_0_DATA07] = P7CTL_INIT_FUNC(ULPI_0_DATA07, 102, 0),
+ [P7_ULPI_0_NXT] = P7CTL_INIT_FUNC(ULPI_0_NXT, 103, 0),
+ [P7_ULPI_0_DIR] = P7CTL_INIT_FUNC(ULPI_0_DIR, 104, 0),
+ [P7_ULPI_0_CLK] = P7CTL_INIT_FUNC(ULPI_0_CLK, 105, 0),
+ [P7_ULPI_1_DATA00] = P7CTL_INIT_FUNC(ULPI_1_DATA00, 106, 0),
+ [P7_GPIO_106] = P7CTL_INIT_FUNC(GPIO_106, 106, 1),
+ [P7_I2C_2_CLKa] = P7CTL_INIT_FUNC(I2C_2_CLKa, 106, 2),
+ [P7_ULPI_1_DATA01] = P7CTL_INIT_FUNC(ULPI_1_DATA01, 107, 0),
+ [P7_GPIO_107] = P7CTL_INIT_FUNC(GPIO_107, 107, 1),
+ [P7_I2C_2_DATa] = P7CTL_INIT_FUNC(I2C_2_DATa, 107, 2),
+ [P7_ULPI_1_DATA02] = P7CTL_INIT_FUNC(ULPI_1_DATA02, 108, 0),
+ [P7_GPIO_108] = P7CTL_INIT_FUNC(GPIO_108, 108, 1),
+ [P7_I2C_2_CLKb] = P7CTL_INIT_FUNC(I2C_2_CLKb, 108, 2),
+ [P7_ULPI_1_DATA03] = P7CTL_INIT_FUNC(ULPI_1_DATA03, 109, 0),
+ [P7_GPIO_109] = P7CTL_INIT_FUNC(GPIO_109, 109, 1),
+ [P7_I2C_2_DATb] = P7CTL_INIT_FUNC(I2C_2_DATb, 109, 2),
+ [P7_ULPI_1_DATA04] = P7CTL_INIT_FUNC(ULPI_1_DATA04, 110, 0),
+ [P7_GPIO_110] = P7CTL_INIT_FUNC(GPIO_110, 110, 1),
+ [P7_I2C_2_CLKc] = P7CTL_INIT_FUNC(I2C_2_CLKc, 110, 2),
+ [P7_ULPI_1_DATA05] = P7CTL_INIT_FUNC(ULPI_1_DATA05, 111, 0),
+ [P7_GPIO_111] = P7CTL_INIT_FUNC(GPIO_111, 111, 1),
+ [P7_I2C_2_DATc] = P7CTL_INIT_FUNC(I2C_2_DATc, 111, 2),
+ [P7_ULPI_1_DATA06] = P7CTL_INIT_FUNC(ULPI_1_DATA06, 112, 0),
+ [P7_GPIO_112] = P7CTL_INIT_FUNC(GPIO_112, 112, 1),
+ [P7_I2C_2_CLKd] = P7CTL_INIT_FUNC(I2C_2_CLKd, 112, 2),
+ [P7_ULPI_1_DATA07] = P7CTL_INIT_FUNC(ULPI_1_DATA07, 113, 0),
+ [P7_GPIO_113] = P7CTL_INIT_FUNC(GPIO_113, 113, 1),
+ [P7_I2C_2_DATd] = P7CTL_INIT_FUNC(I2C_2_DATd, 113, 2),
+ [P7_ULPI_1_NXT] = P7CTL_INIT_FUNC(ULPI_1_NXT, 114, 0),
+ [P7_GPIO_114] = P7CTL_INIT_FUNC(GPIO_114, 114, 1),
+ [P7_I2C_2_CLKe] = P7CTL_INIT_FUNC(I2C_2_CLKe, 114, 2),
+ [P7_ULPI_1_DIR] = P7CTL_INIT_FUNC(ULPI_1_DIR, 115, 0),
+ [P7_GPIO_115] = P7CTL_INIT_FUNC(GPIO_115, 115, 1),
+ [P7_ULPI_1_STP] = P7CTL_INIT_FUNC(ULPI_1_STP, 116, 0),
+ [P7_GPIO_116] = P7CTL_INIT_FUNC(GPIO_116, 116, 1),
+ [P7_I2C_2_DATe] = P7CTL_INIT_FUNC(I2C_2_DATe, 116, 2),
+ [P7_ULPI_0_STP] = P7CTL_INIT_FUNC(ULPI_0_STP, 117, 0),
+ [P7_ULPI_1_CLK] = P7CTL_INIT_FUNC(ULPI_1_CLK, 118, 0),
+ [P7_GPIO_118] = P7CTL_INIT_FUNC(GPIO_118, 118, 1),
+ [P7_AAI_00] = P7CTL_INIT_FUNC(AAI_00, 119, 0),
+ [P7_GPIO_119] = P7CTL_INIT_FUNC(GPIO_119, 119, 1),
+ [P7_PWM_00] = P7CTL_INIT_FUNC(PWM_00, 119, 2),
+ [P7_AAI_01] = P7CTL_INIT_FUNC(AAI_01, 120, 0),
+ [P7_GPIO_120] = P7CTL_INIT_FUNC(GPIO_120, 120, 1),
+ [P7_PWM_01] = P7CTL_INIT_FUNC(PWM_01, 120, 2),
+ [P7_AAI_02] = P7CTL_INIT_FUNC(AAI_02, 121, 0),
+ [P7_GPIO_121] = P7CTL_INIT_FUNC(GPIO_121, 121, 1),
+ [P7_PWM_02] = P7CTL_INIT_FUNC(PWM_02, 121, 2),
+ [P7_AAI_03] = P7CTL_INIT_FUNC(AAI_03, 122, 0),
+ [P7_GPIO_122] = P7CTL_INIT_FUNC(GPIO_122, 122, 1),
+ [P7_PWM_03] = P7CTL_INIT_FUNC(PWM_03, 122, 2),
+ [P7_AAI_04] = P7CTL_INIT_FUNC(AAI_04, 123, 0),
+ [P7_GPIO_123] = P7CTL_INIT_FUNC(GPIO_123, 123, 1),
+ [P7_PWM_04] = P7CTL_INIT_FUNC(PWM_04, 123, 2),
+ [P7_AAI_05] = P7CTL_INIT_FUNC(AAI_05, 124, 0),
+ [P7_GPIO_124] = P7CTL_INIT_FUNC(GPIO_124, 124, 1),
+ [P7_PWM_05] = P7CTL_INIT_FUNC(PWM_05, 124, 2),
+ [P7_AAI_06] = P7CTL_INIT_FUNC(AAI_06, 125, 0),
+ [P7_GPIO_125] = P7CTL_INIT_FUNC(GPIO_125, 125, 1),
+ [P7_PWM_06] = P7CTL_INIT_FUNC(PWM_06, 125, 2),
+ [P7_AAI_07] = P7CTL_INIT_FUNC(AAI_07, 126, 0),
+ [P7_GPIO_126] = P7CTL_INIT_FUNC(GPIO_126, 126, 1),
+ [P7_PWM_07] = P7CTL_INIT_FUNC(PWM_07, 126, 2),
+ [P7_AAI_08] = P7CTL_INIT_FUNC(AAI_08, 127, 0),
+ [P7_GPIO_127] = P7CTL_INIT_FUNC(GPIO_127, 127, 1),
+ [P7_PWM_08] = P7CTL_INIT_FUNC(PWM_08, 127, 2),
+ [P7_AAI_09] = P7CTL_INIT_FUNC(AAI_09, 128, 0),
+ [P7_GPIO_128] = P7CTL_INIT_FUNC(GPIO_128, 128, 1),
+ [P7_PWM_09] = P7CTL_INIT_FUNC(PWM_09, 128, 2),
+ [P7_AAI_10] = P7CTL_INIT_FUNC(AAI_10, 129, 0),
+ [P7_GPIO_129] = P7CTL_INIT_FUNC(GPIO_129, 129, 1),
+ [P7_PWM_10] = P7CTL_INIT_FUNC(PWM_10, 129, 2),
+ [P7_AAI_11] = P7CTL_INIT_FUNC(AAI_11, 130, 0),
+ [P7_GPIO_130] = P7CTL_INIT_FUNC(GPIO_130, 130, 1),
+ [P7_PWM_11] = P7CTL_INIT_FUNC(PWM_11, 130, 2),
+ [P7_AAI_12] = P7CTL_INIT_FUNC(AAI_12, 131, 0),
+ [P7_GPIO_131] = P7CTL_INIT_FUNC(GPIO_131, 131, 1),
+ [P7_PWM_12] = P7CTL_INIT_FUNC(PWM_12, 131, 2),
+ [P7_AAI_13] = P7CTL_INIT_FUNC(AAI_13, 132, 0),
+ [P7_GPIO_132] = P7CTL_INIT_FUNC(GPIO_132, 132, 1),
+ [P7_PWM_13] = P7CTL_INIT_FUNC(PWM_13, 132, 2),
+ [P7_AAI_14] = P7CTL_INIT_FUNC(AAI_14, 133, 0),
+ [P7_GPIO_133] = P7CTL_INIT_FUNC(GPIO_133, 133, 1),
+ [P7_PWM_14] = P7CTL_INIT_FUNC(PWM_14, 133, 2),
+ [P7_SPI_16] = P7CTL_INIT_FUNC(SPI_16, 134, 0),
+ [P7_GPIO_134] = P7CTL_INIT_FUNC(GPIO_134, 134, 1),
+ [P7_ETH_MII_TXER] = P7CTL_INIT_FUNC(ETH_MII_TXER, 134, 2),
+ [P7_SPI_17] = P7CTL_INIT_FUNC(SPI_17, 135, 0),
+ [P7_GPIO_135] = P7CTL_INIT_FUNC(GPIO_135, 135, 1),
+ [P7_ETH_MII_RXER] = P7CTL_INIT_FUNC(ETH_MII_RXER, 135, 2),
+ [P7_SPI_18] = P7CTL_INIT_FUNC(SPI_18, 136, 0),
+ [P7_GPIO_136] = P7CTL_INIT_FUNC(GPIO_136, 136, 1),
+ [P7_ETH_MII_CRS] = P7CTL_INIT_FUNC(ETH_MII_CRS, 136, 2),
+ [P7_SPI_19] = P7CTL_INIT_FUNC(SPI_19, 137, 0),
+ [P7_GPIO_137] = P7CTL_INIT_FUNC(GPIO_137, 137, 1),
+ [P7_ETH_MII_COL] = P7CTL_INIT_FUNC(ETH_MII_COL, 137, 2),
+ [P7_ETH_RGMII_TXC] = P7CTL_INIT_FUNC(ETH_RGMII_TXC, 138, 0),
+ [P7_GPIO_138] = P7CTL_INIT_FUNC(GPIO_138, 138, 1),
+ [P7_ETH_RGMII_TXD_00] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_00, 139, 0),
+ [P7_GPIO_139] = P7CTL_INIT_FUNC(GPIO_139, 139, 1),
+ [P7_ETH_RGMII_TXD_01] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_01, 140, 0),
+ [P7_GPIO_140] = P7CTL_INIT_FUNC(GPIO_140, 140, 1),
+ [P7_ETH_RGMII_TXD_02] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_02, 141, 0),
+ [P7_GPIO_141] = P7CTL_INIT_FUNC(GPIO_141, 141, 1),
+ [P7_ETH_RGMII_TXD_03] = P7CTL_INIT_FUNC(ETH_RGMII_TXD_03, 142, 0),
+ [P7_GPIO_142] = P7CTL_INIT_FUNC(GPIO_142, 142, 1),
+ [P7_ETH_RGMII_TX_CTL] = P7CTL_INIT_FUNC(ETH_RGMII_TX_CTL, 143, 0),
+ [P7_GPIO_143] = P7CTL_INIT_FUNC(GPIO_143, 143, 1),
+ [P7_ETH_RGMII_RXC] = P7CTL_INIT_FUNC(ETH_RGMII_RXC, 144, 0),
+ [P7_GPIO_144] = P7CTL_INIT_FUNC(GPIO_144, 144, 1),
+ [P7_ETH_RGMII_RXD_00] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_00, 145, 0),
+ [P7_GPIO_145] = P7CTL_INIT_FUNC(GPIO_145, 145, 1),
+ [P7_ETH_RGMII_RXD_01] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_01, 146, 0),
+ [P7_GPIO_146] = P7CTL_INIT_FUNC(GPIO_146, 146, 1),
+ [P7_ETH_RGMII_RXD_02] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_02, 147, 0),
+ [P7_GPIO_147] = P7CTL_INIT_FUNC(GPIO_147, 147, 1),
+ [P7_ETH_RGMII_RXD_03] = P7CTL_INIT_FUNC(ETH_RGMII_RXD_03, 148, 0),
+ [P7_GPIO_148] = P7CTL_INIT_FUNC(GPIO_148, 148, 1),
+ [P7_ETH_RGMII_RX_CTL] = P7CTL_INIT_FUNC(ETH_RGMII_RX_CTL, 149, 0),
+ [P7_GPIO_149] = P7CTL_INIT_FUNC(GPIO_149, 149, 1),
+ [P7_ETH_MDC] = P7CTL_INIT_FUNC(ETH_MDC, 150, 0),
+ [P7_GPIO_150] = P7CTL_INIT_FUNC(GPIO_150, 150, 1),
+ [P7_ETH_MDIO] = P7CTL_INIT_FUNC(ETH_MDIO, 151, 0),
+ [P7_GPIO_151] = P7CTL_INIT_FUNC(GPIO_151, 151, 1),
+ [P7_LCD_0_DEN] = P7CTL_INIT_FUNC(LCD_0_DEN, 152, 0),
+ [P7_GPIO_152] = P7CTL_INIT_FUNC(GPIO_152, 152, 1),
+ [P7_CAM_1_VS] = P7CTL_INIT_FUNC(CAM_1_VS, 152, 2),
+ [P7_LCD_0_HS] = P7CTL_INIT_FUNC(LCD_0_HS, 153, 0),
+ [P7_GPIO_153] = P7CTL_INIT_FUNC(GPIO_153, 153, 1),
+ [P7_CAM_1_HS] = P7CTL_INIT_FUNC(CAM_1_HS, 153, 2),
+ [P7_LCD_0_DENa] = P7CTL_INIT_FUNC(LCD_0_DENa, 153, 3),
+ [P7_LCD_0_VS] = P7CTL_INIT_FUNC(LCD_0_VS, 154, 0),
+ [P7_GPIO_154] = P7CTL_INIT_FUNC(GPIO_154, 154, 1),
+ [P7_CAM_0_VS] = P7CTL_INIT_FUNC(CAM_0_VS, 154, 2),
+ [P7_LCD_0_DATA00] = P7CTL_INIT_FUNC(LCD_0_DATA00, 155, 0),
+ [P7_GPIO_155] = P7CTL_INIT_FUNC(GPIO_155, 155, 1),
+ [P7_CAM_0_HS] = P7CTL_INIT_FUNC(CAM_0_HS, 155, 2),
+ [P7_LCD_0_DATA01] = P7CTL_INIT_FUNC(LCD_0_DATA01, 156, 0),
+ [P7_GPIO_156] = P7CTL_INIT_FUNC(GPIO_156, 156, 1),
+ [P7_CAM_2_VS] = P7CTL_INIT_FUNC(CAM_2_VS, 156, 2),
+ [P7_LCD_0_DATA02] = P7CTL_INIT_FUNC(LCD_0_DATA02, 157, 0),
+ [P7_GPIO_157] = P7CTL_INIT_FUNC(GPIO_157, 157, 1),
+ [P7_CAM_2_HS] = P7CTL_INIT_FUNC(CAM_2_HS, 157, 2),
+ [P7_LCD_0_DATA03] = P7CTL_INIT_FUNC(LCD_0_DATA03, 158, 0),
+ [P7_GPIO_158] = P7CTL_INIT_FUNC(GPIO_158, 158, 1),
+ [P7_CAM_3_VS] = P7CTL_INIT_FUNC(CAM_3_VS, 158, 2),
+ [P7_LCD_0_DATA04] = P7CTL_INIT_FUNC(LCD_0_DATA04, 159, 0),
+ [P7_GPIO_159] = P7CTL_INIT_FUNC(GPIO_159, 159, 1),
+ [P7_CAM_3_HS] = P7CTL_INIT_FUNC(CAM_3_HS, 159, 2),
+ [P7_LCD_0_DATA05] = P7CTL_INIT_FUNC(LCD_0_DATA05, 160, 0),
+ [P7_GPIO_160] = P7CTL_INIT_FUNC(GPIO_160, 160, 1),
+ [P7_CAM_5_VS] = P7CTL_INIT_FUNC(CAM_5_VS, 160, 2),
+ [P7_LCD_0_DATA06] = P7CTL_INIT_FUNC(LCD_0_DATA06, 161, 0),
+ [P7_GPIO_161] = P7CTL_INIT_FUNC(GPIO_161, 161, 1),
+ [P7_CAM_5_HS] = P7CTL_INIT_FUNC(CAM_5_HS, 161, 2),
+ [P7_LCD_0_DATA07] = P7CTL_INIT_FUNC(LCD_0_DATA07, 162, 0),
+ [P7_GPIO_162] = P7CTL_INIT_FUNC(GPIO_162, 162, 1),
+ [P7_CAM_0_DATA08] = P7CTL_INIT_FUNC(CAM_0_DATA08, 162, 2),
+ [P7_LCD_0_DATA08] = P7CTL_INIT_FUNC(LCD_0_DATA08, 163, 0),
+ [P7_GPIO_163] = P7CTL_INIT_FUNC(GPIO_163, 163, 1),
+ [P7_CAM_0_DATA09] = P7CTL_INIT_FUNC(CAM_0_DATA09, 163, 2),
+ [P7_CAM_0_DATA08a] = P7CTL_INIT_FUNC(CAM_0_DATA08a, 163, 3),
+ [P7_LCD_0_DATA09] = P7CTL_INIT_FUNC(LCD_0_DATA09, 164, 0),
+ [P7_GPIO_164] = P7CTL_INIT_FUNC(GPIO_164, 164, 1),
+ [P7_CAM_0_DATA10] = P7CTL_INIT_FUNC(CAM_0_DATA10, 164, 2),
+ [P7_CAM_0_DATA09a] = P7CTL_INIT_FUNC(CAM_0_DATA09a, 164, 3),
+ [P7_LCD_0_DATA10] = P7CTL_INIT_FUNC(LCD_0_DATA10, 165, 0),
+ [P7_GPIO_165] = P7CTL_INIT_FUNC(GPIO_165, 165, 1),
+ [P7_CAM_0_DATA11] = P7CTL_INIT_FUNC(CAM_0_DATA11, 165, 2),
+ [P7_CAM_0_DATA10a] = P7CTL_INIT_FUNC(CAM_0_DATA10a, 165, 3),
+ [P7_LCD_0_DATA11] = P7CTL_INIT_FUNC(LCD_0_DATA11, 166, 0),
+ [P7_GPIO_166] = P7CTL_INIT_FUNC(GPIO_166, 166, 1),
+ [P7_CAM_0_DATA12] = P7CTL_INIT_FUNC(CAM_0_DATA12, 166, 2),
+ [P7_CAM_0_DATA11a] = P7CTL_INIT_FUNC(CAM_0_DATA11a, 166, 3),
+ [P7_LCD_0_DATA12] = P7CTL_INIT_FUNC(LCD_0_DATA12, 167, 0),
+ [P7_GPIO_167] = P7CTL_INIT_FUNC(GPIO_167, 167, 1),
+ [P7_CAM_0_DATA13] = P7CTL_INIT_FUNC(CAM_0_DATA13, 167, 2),
+ [P7_CAM_0_DATA12a] = P7CTL_INIT_FUNC(CAM_0_DATA12a, 167, 3),
+ [P7_LCD_0_DATA13] = P7CTL_INIT_FUNC(LCD_0_DATA13, 168, 0),
+ [P7_GPIO_168] = P7CTL_INIT_FUNC(GPIO_168, 168, 1),
+ [P7_CAM_0_DATA14] = P7CTL_INIT_FUNC(CAM_0_DATA14, 168, 2),
+ [P7_CAM_0_DATA13a] = P7CTL_INIT_FUNC(CAM_0_DATA13a, 168, 3),
+ [P7_LCD_0_DATA14] = P7CTL_INIT_FUNC(LCD_0_DATA14, 169, 0),
+ [P7_GPIO_169] = P7CTL_INIT_FUNC(GPIO_169, 169, 1),
+ [P7_CAM_0_DATA15] = P7CTL_INIT_FUNC(CAM_0_DATA15, 169, 2),
+ [P7_CAM_0_DATA14a] = P7CTL_INIT_FUNC(CAM_0_DATA14a, 169, 3),
+ [P7_LCD_0_DATA15] = P7CTL_INIT_FUNC(LCD_0_DATA15, 170, 0),
+ [P7_GPIO_170] = P7CTL_INIT_FUNC(GPIO_170, 170, 1),
+ [P7_CAM_0_DATA16] = P7CTL_INIT_FUNC(CAM_0_DATA16, 170, 2),
+ [P7_CAM_0_DATA15a] = P7CTL_INIT_FUNC(CAM_0_DATA15a, 170, 3),
+ [P7_LCD_0_DATA16] = P7CTL_INIT_FUNC(LCD_0_DATA16, 171, 0),
+ [P7_GPIO_171] = P7CTL_INIT_FUNC(GPIO_171, 171, 1),
+ [P7_CAM_0_DATA17] = P7CTL_INIT_FUNC(CAM_0_DATA17, 171, 2),
+ [P7_LCD_0_DATA17] = P7CTL_INIT_FUNC(LCD_0_DATA17, 172, 0),
+ [P7_GPIO_172] = P7CTL_INIT_FUNC(GPIO_172, 172, 1),
+ [P7_CAM_0_DATA18] = P7CTL_INIT_FUNC(CAM_0_DATA18, 172, 2),
+ [P7_LCD_0_DATA18] = P7CTL_INIT_FUNC(LCD_0_DATA18, 173, 0),
+ [P7_GPIO_173] = P7CTL_INIT_FUNC(GPIO_173, 173, 1),
+ [P7_CAM_0_DATA19] = P7CTL_INIT_FUNC(CAM_0_DATA19, 173, 2),
+ [P7_LCD_0_DATA19] = P7CTL_INIT_FUNC(LCD_0_DATA19, 174, 0),
+ [P7_GPIO_174] = P7CTL_INIT_FUNC(GPIO_174, 174, 1),
+ [P7_CAM_0_DATA20] = P7CTL_INIT_FUNC(CAM_0_DATA20, 174, 2),
+ [P7_LCD_0_DATA20] = P7CTL_INIT_FUNC(LCD_0_DATA20, 175, 0),
+ [P7_GPIO_175] = P7CTL_INIT_FUNC(GPIO_175, 175, 1),
+ [P7_CAM_0_DATA21] = P7CTL_INIT_FUNC(CAM_0_DATA21, 175, 2),
+ [P7_CAM_3_DATA09a] = P7CTL_INIT_FUNC(CAM_3_DATA09a, 175, 3),
+ [P7_LCD_0_DATA21] = P7CTL_INIT_FUNC(LCD_0_DATA21, 176, 0),
+ [P7_GPIO_176] = P7CTL_INIT_FUNC(GPIO_176, 176, 1),
+ [P7_CAM_0_DATA22] = P7CTL_INIT_FUNC(CAM_0_DATA22, 176, 2),
+ [P7_CAM_3_DATA08a] = P7CTL_INIT_FUNC(CAM_3_DATA08a, 176, 3),
+ [P7_LCD_0_DATA22] = P7CTL_INIT_FUNC(LCD_0_DATA22, 177, 0),
+ [P7_GPIO_177] = P7CTL_INIT_FUNC(GPIO_177, 177, 1),
+ [P7_CAM_0_DATA23] = P7CTL_INIT_FUNC(CAM_0_DATA23, 177, 2),
+ [P7_LCD_0_DATA23] = P7CTL_INIT_FUNC(LCD_0_DATA23, 178, 0),
+ [P7_GPIO_178] = P7CTL_INIT_FUNC(GPIO_178, 178, 1),
+ [P7_CAM_4_VS] = P7CTL_INIT_FUNC(CAM_4_VS, 178, 2),
+ [P7_LCD_0_CLK] = P7CTL_INIT_FUNC(LCD_0_CLK, 179, 0),
+ [P7_GPIO_179] = P7CTL_INIT_FUNC(GPIO_179, 179, 1),
+ [P7_CAM_4_HS] = P7CTL_INIT_FUNC(CAM_4_HS, 179, 2),
+ [P7_CAM_0_CLKa] = P7CTL_INIT_FUNC(CAM_0_CLKa, 179, 3),
+ [P7_LCD_1_HS] = P7CTL_INIT_FUNC(LCD_1_HS, 180, 0),
+ [P7_GPIO_180] = P7CTL_INIT_FUNC(GPIO_180, 180, 1),
+ [P7_CAM_2_CLK] = P7CTL_INIT_FUNC(CAM_2_CLK, 180, 2),
+ [P7_LCD_1_DENa] = P7CTL_INIT_FUNC(LCD_1_DENa, 180, 3),
+ [P7_LCD_1_VS] = P7CTL_INIT_FUNC(LCD_1_VS, 181, 0),
+ [P7_GPIO_181] = P7CTL_INIT_FUNC(GPIO_181, 181, 1),
+ [P7_CAM_2_DATA00] = P7CTL_INIT_FUNC(CAM_2_DATA00, 181, 2),
+ [P7_CPU_TRACECLKOUT] = P7CTL_INIT_FUNC(CPU_TRACECLKOUT, 181, 3),
+ [P7_LCD_1_CLK] = P7CTL_INIT_FUNC(LCD_1_CLK, 182, 0),
+ [P7_GPIO_182] = P7CTL_INIT_FUNC(GPIO_182, 182, 1),
+ [P7_CAM_2_DATA01] = P7CTL_INIT_FUNC(CAM_2_DATA01, 182, 2),
+ [P7_CPU_TRACECTL] = P7CTL_INIT_FUNC(CPU_TRACECTL, 182, 3),
+ [P7_LCD_1_DATA00] = P7CTL_INIT_FUNC(LCD_1_DATA00, 183, 0),
+ [P7_GPIO_183] = P7CTL_INIT_FUNC(GPIO_183, 183, 1),
+ [P7_CAM_2_DATA02] = P7CTL_INIT_FUNC(CAM_2_DATA02, 183, 2),
+ [P7_CPU_TRACEDATA_00] = P7CTL_INIT_FUNC(CPU_TRACEDATA_00, 183, 3),
+ [P7_LCD_1_DATA01] = P7CTL_INIT_FUNC(LCD_1_DATA01, 184, 0),
+ [P7_GPIO_184] = P7CTL_INIT_FUNC(GPIO_184, 184, 1),
+ [P7_CAM_2_DATA03] = P7CTL_INIT_FUNC(CAM_2_DATA03, 184, 2),
+ [P7_CPU_TRACEDATA_01] = P7CTL_INIT_FUNC(CPU_TRACEDATA_01, 184, 3),
+ [P7_LCD_1_DATA02] = P7CTL_INIT_FUNC(LCD_1_DATA02, 185, 0),
+ [P7_GPIO_185] = P7CTL_INIT_FUNC(GPIO_185, 185, 1),
+ [P7_CAM_2_DATA04] = P7CTL_INIT_FUNC(CAM_2_DATA04, 185, 2),
+ [P7_CPU_TRACEDATA_02] = P7CTL_INIT_FUNC(CPU_TRACEDATA_02, 185, 3),
+ [P7_LCD_1_DATA03] = P7CTL_INIT_FUNC(LCD_1_DATA03, 186, 0),
+ [P7_GPIO_186] = P7CTL_INIT_FUNC(GPIO_186, 186, 1),
+ [P7_CAM_2_DATA05] = P7CTL_INIT_FUNC(CAM_2_DATA05, 186, 2),
+ [P7_CPU_TRACEDATA_03] = P7CTL_INIT_FUNC(CPU_TRACEDATA_03, 186, 3),
+ [P7_LCD_1_DATA04] = P7CTL_INIT_FUNC(LCD_1_DATA04, 187, 0),
+ [P7_GPIO_187] = P7CTL_INIT_FUNC(GPIO_187, 187, 1),
+ [P7_CAM_2_DATA06] = P7CTL_INIT_FUNC(CAM_2_DATA06, 187, 2),
+ [P7_CPU_TRACEDATA_04] = P7CTL_INIT_FUNC(CPU_TRACEDATA_04, 187, 3),
+ [P7_LCD_1_DATA05] = P7CTL_INIT_FUNC(LCD_1_DATA05, 188, 0),
+ [P7_GPIO_188] = P7CTL_INIT_FUNC(GPIO_188, 188, 1),
+ [P7_CAM_2_DATA07] = P7CTL_INIT_FUNC(CAM_2_DATA07, 188, 2),
+ [P7_CPU_TRACEDATA_05] = P7CTL_INIT_FUNC(CPU_TRACEDATA_05, 188, 3),
+ [P7_LCD_1_DATA06] = P7CTL_INIT_FUNC(LCD_1_DATA06, 189, 0),
+ [P7_GPIO_189] = P7CTL_INIT_FUNC(GPIO_189, 189, 1),
+ [P7_CAM_3_CLK] = P7CTL_INIT_FUNC(CAM_3_CLK, 189, 2),
+ [P7_CPU_TRACEDATA_06] = P7CTL_INIT_FUNC(CPU_TRACEDATA_06, 189, 3),
+ [P7_LCD_1_DATA07] = P7CTL_INIT_FUNC(LCD_1_DATA07, 190, 0),
+ [P7_GPIO_190] = P7CTL_INIT_FUNC(GPIO_190, 190, 1),
+ [P7_CAM_3_DATA00] = P7CTL_INIT_FUNC(CAM_3_DATA00, 190, 2),
+ [P7_CPU_TRACEDATA_07] = P7CTL_INIT_FUNC(CPU_TRACEDATA_07, 190, 3),
+ [P7_LCD_1_DATA08] = P7CTL_INIT_FUNC(LCD_1_DATA08, 191, 0),
+ [P7_GPIO_191] = P7CTL_INIT_FUNC(GPIO_191, 191, 1),
+ [P7_CAM_3_DATA01] = P7CTL_INIT_FUNC(CAM_3_DATA01, 191, 2),
+ [P7_CPU_TRACEDATA_08] = P7CTL_INIT_FUNC(CPU_TRACEDATA_08, 191, 3),
+ [P7_LCD_1_DATA09] = P7CTL_INIT_FUNC(LCD_1_DATA09, 192, 0),
+ [P7_GPIO_192] = P7CTL_INIT_FUNC(GPIO_192, 192, 1),
+ [P7_CAM_3_DATA02] = P7CTL_INIT_FUNC(CAM_3_DATA02, 192, 2),
+ [P7_CPU_TRACEDATA_09] = P7CTL_INIT_FUNC(CPU_TRACEDATA_09, 192, 3),
+ [P7_LCD_1_DATA10] = P7CTL_INIT_FUNC(LCD_1_DATA10, 193, 0),
+ [P7_GPIO_193] = P7CTL_INIT_FUNC(GPIO_193, 193, 1),
+ [P7_CAM_3_DATA03] = P7CTL_INIT_FUNC(CAM_3_DATA03, 193, 2),
+ [P7_CPU_TRACEDATA_10] = P7CTL_INIT_FUNC(CPU_TRACEDATA_10, 193, 3),
+ [P7_LCD_1_DATA11] = P7CTL_INIT_FUNC(LCD_1_DATA11, 194, 0),
+ [P7_GPIO_194] = P7CTL_INIT_FUNC(GPIO_194, 194, 1),
+ [P7_CAM_3_DATA04] = P7CTL_INIT_FUNC(CAM_3_DATA04, 194, 2),
+ [P7_CPU_TRACEDATA_11] = P7CTL_INIT_FUNC(CPU_TRACEDATA_11, 194, 3),
+ [P7_LCD_1_DATA12] = P7CTL_INIT_FUNC(LCD_1_DATA12, 195, 0),
+ [P7_GPIO_195] = P7CTL_INIT_FUNC(GPIO_195, 195, 1),
+ [P7_CAM_3_DATA05] = P7CTL_INIT_FUNC(CAM_3_DATA05, 195, 2),
+ [P7_CPU_TRACEDATA_12] = P7CTL_INIT_FUNC(CPU_TRACEDATA_12, 195, 3),
+ [P7_LCD_1_DATA13] = P7CTL_INIT_FUNC(LCD_1_DATA13, 196, 0),
+ [P7_GPIO_196] = P7CTL_INIT_FUNC(GPIO_196, 196, 1),
+ [P7_CAM_3_DATA06] = P7CTL_INIT_FUNC(CAM_3_DATA06, 196, 2),
+ [P7_CPU_TRACEDATA_13] = P7CTL_INIT_FUNC(CPU_TRACEDATA_13, 196, 3),
+ [P7_LCD_1_DATA14] = P7CTL_INIT_FUNC(LCD_1_DATA14, 197, 0),
+ [P7_GPIO_197] = P7CTL_INIT_FUNC(GPIO_197, 197, 1),
+ [P7_CAM_3_DATA07] = P7CTL_INIT_FUNC(CAM_3_DATA07, 197, 2),
+ [P7_CPU_TRACEDATA_14] = P7CTL_INIT_FUNC(CPU_TRACEDATA_14, 197, 3),
+ [P7_LCD_1_DATA15] = P7CTL_INIT_FUNC(LCD_1_DATA15, 198, 0),
+ [P7_GPIO_198] = P7CTL_INIT_FUNC(GPIO_198, 198, 1),
+ [P7_CAM_4_CLK] = P7CTL_INIT_FUNC(CAM_4_CLK, 198, 2),
+ [P7_CPU_TRACEDATA_15] = P7CTL_INIT_FUNC(CPU_TRACEDATA_15, 198, 3),
+ [P7_LCD_1_DATA16] = P7CTL_INIT_FUNC(LCD_1_DATA16, 199, 0),
+ [P7_GPIO_199] = P7CTL_INIT_FUNC(GPIO_199, 199, 1),
+ [P7_CAM_4_DATA00] = P7CTL_INIT_FUNC(CAM_4_DATA00, 199, 2),
+ [P7_LCD_1_DATA17] = P7CTL_INIT_FUNC(LCD_1_DATA17, 200, 0),
+ [P7_GPIO_200] = P7CTL_INIT_FUNC(GPIO_200, 200, 1),
+ [P7_CAM_4_DATA01] = P7CTL_INIT_FUNC(CAM_4_DATA01, 200, 2),
+ [P7_LCD_1_DATA18] = P7CTL_INIT_FUNC(LCD_1_DATA18, 201, 0),
+ [P7_GPIO_201] = P7CTL_INIT_FUNC(GPIO_201, 201, 1),
+ [P7_CAM_4_DATA02] = P7CTL_INIT_FUNC(CAM_4_DATA02, 201, 2),
+ [P7_LCD_1_DATA19] = P7CTL_INIT_FUNC(LCD_1_DATA19, 202, 0),
+ [P7_GPIO_202] = P7CTL_INIT_FUNC(GPIO_202, 202, 1),
+ [P7_CAM_4_DATA03] = P7CTL_INIT_FUNC(CAM_4_DATA03, 202, 2),
+ [P7_LCD_1_DATA20] = P7CTL_INIT_FUNC(LCD_1_DATA20, 203, 0),
+ [P7_GPIO_203] = P7CTL_INIT_FUNC(GPIO_203, 203, 1),
+ [P7_CAM_4_DATA04] = P7CTL_INIT_FUNC(CAM_4_DATA04, 203, 2),
+ [P7_LCD_1_DATA21] = P7CTL_INIT_FUNC(LCD_1_DATA21, 204, 0),
+ [P7_GPIO_204] = P7CTL_INIT_FUNC(GPIO_204, 204, 1),
+ [P7_CAM_4_DATA05] = P7CTL_INIT_FUNC(CAM_4_DATA05, 204, 2),
+ [P7_LCD_1_DATA22] = P7CTL_INIT_FUNC(LCD_1_DATA22, 205, 0),
+ [P7_GPIO_205] = P7CTL_INIT_FUNC(GPIO_205, 205, 1),
+ [P7_CAM_4_DATA06] = P7CTL_INIT_FUNC(CAM_4_DATA06, 205, 2),
+ [P7_LCD_1_DATA23] = P7CTL_INIT_FUNC(LCD_1_DATA23, 206, 0),
+ [P7_GPIO_206] = P7CTL_INIT_FUNC(GPIO_206, 206, 1),
+ [P7_CAM_4_DATA07] = P7CTL_INIT_FUNC(CAM_4_DATA07, 206, 2),
+ [P7_LCD_1_DEN] = P7CTL_INIT_FUNC(LCD_1_DEN, 207, 0),
+ [P7_GPIO_207] = P7CTL_INIT_FUNC(GPIO_207, 207, 1),
+ [P7_CAM_1_HSa] = P7CTL_INIT_FUNC(CAM_1_HSa, 207, 2),
+ [P7_LCD_1_CLKa] = P7CTL_INIT_FUNC(LCD_1_CLKa, 207, 3),
+ [P7_CAM_0_CLK] = P7CTL_INIT_FUNC(CAM_0_CLK, 208, 0),
+ [P7_GPIO_208] = P7CTL_INIT_FUNC(GPIO_208, 208, 1),
+ [P7_CAM_1_CLKa] = P7CTL_INIT_FUNC(CAM_1_CLKa, 208, 2),
+ [P7_CAM_0_DATA00] = P7CTL_INIT_FUNC(CAM_0_DATA00, 209, 0),
+ [P7_GPIO_209] = P7CTL_INIT_FUNC(GPIO_209, 209, 1),
+ [P7_CAM_1_DATA08] = P7CTL_INIT_FUNC(CAM_1_DATA08, 209, 2),
+ [P7_CAM_0_DATA01] = P7CTL_INIT_FUNC(CAM_0_DATA01, 210, 0),
+ [P7_GPIO_210] = P7CTL_INIT_FUNC(GPIO_210, 210, 1),
+ [P7_CAM_1_DATA09] = P7CTL_INIT_FUNC(CAM_1_DATA09, 210, 2),
+ [P7_CAM_0_DATA02] = P7CTL_INIT_FUNC(CAM_0_DATA02, 211, 0),
+ [P7_GPIO_211] = P7CTL_INIT_FUNC(GPIO_211, 211, 1),
+ [P7_CAM_1_DATA10] = P7CTL_INIT_FUNC(CAM_1_DATA10, 211, 2),
+ [P7_CAM_4_DATA08] = P7CTL_INIT_FUNC(CAM_4_DATA08, 211, 3),
+ [P7_CAM_0_DATA03] = P7CTL_INIT_FUNC(CAM_0_DATA03, 212, 0),
+ [P7_GPIO_212] = P7CTL_INIT_FUNC(GPIO_212, 212, 1),
+ [P7_CAM_1_DATA11] = P7CTL_INIT_FUNC(CAM_1_DATA11, 212, 2),
+ [P7_CAM_4_DATA09] = P7CTL_INIT_FUNC(CAM_4_DATA09, 212, 3),
+ [P7_CAM_0_DATA04] = P7CTL_INIT_FUNC(CAM_0_DATA04, 213, 0),
+ [P7_GPIO_213] = P7CTL_INIT_FUNC(GPIO_213, 213, 1),
+ [P7_CAM_1_DATA12] = P7CTL_INIT_FUNC(CAM_1_DATA12, 213, 2),
+ [P7_CAM_5_DATA08] = P7CTL_INIT_FUNC(CAM_5_DATA08, 213, 3),
+ [P7_CAM_0_DATA05] = P7CTL_INIT_FUNC(CAM_0_DATA05, 214, 0),
+ [P7_GPIO_214] = P7CTL_INIT_FUNC(GPIO_214, 214, 1),
+ [P7_CAM_1_DATA13] = P7CTL_INIT_FUNC(CAM_1_DATA13, 214, 2),
+ [P7_CAM_5_DATA09] = P7CTL_INIT_FUNC(CAM_5_DATA09, 214, 3),
+ [P7_CAM_0_DATA06] = P7CTL_INIT_FUNC(CAM_0_DATA06, 215, 0),
+ [P7_GPIO_215] = P7CTL_INIT_FUNC(GPIO_215, 215, 1),
+ [P7_CAM_1_DATA14] = P7CTL_INIT_FUNC(CAM_1_DATA14, 215, 2),
+ [P7_CAM_2_DATA08] = P7CTL_INIT_FUNC(CAM_2_DATA08, 215, 3),
+ [P7_CAM_0_DATA07] = P7CTL_INIT_FUNC(CAM_0_DATA07, 216, 0),
+ [P7_GPIO_216] = P7CTL_INIT_FUNC(GPIO_216, 216, 1),
+ [P7_CAM_1_DATA15] = P7CTL_INIT_FUNC(CAM_1_DATA15, 216, 2),
+ [P7_CAM_2_DATA09] = P7CTL_INIT_FUNC(CAM_2_DATA09, 216, 3),
+ [P7_CAM_1_CLK] = P7CTL_INIT_FUNC(CAM_1_CLK, 217, 0),
+ [P7_GPIO_217] = P7CTL_INIT_FUNC(GPIO_217, 217, 1),
+ [P7_SPI_00b] = P7CTL_INIT_FUNC(SPI_00b, 217, 2),
+ [P7_CAM_1_DATA00] = P7CTL_INIT_FUNC(CAM_1_DATA00, 218, 0),
+ [P7_GPIO_218] = P7CTL_INIT_FUNC(GPIO_218, 218, 1),
+ [P7_SPI_01b] = P7CTL_INIT_FUNC(SPI_01b, 218, 2),
+ [P7_CAM_0_VSa] = P7CTL_INIT_FUNC(CAM_0_VSa, 218, 3),
+ [P7_CAM_1_DATA01] = P7CTL_INIT_FUNC(CAM_1_DATA01, 219, 0),
+ [P7_GPIO_219] = P7CTL_INIT_FUNC(GPIO_219, 219, 1),
+ [P7_SPI_02b] = P7CTL_INIT_FUNC(SPI_02b, 219, 2),
+ [P7_CAM_0_HSa] = P7CTL_INIT_FUNC(CAM_0_HSa, 219, 3)
+};
+
+static unsigned long p7_gpio_pins_r3[] = {
+ 0x00007fff,
+ 0xff8607c2,
+ 0x5fffff9f,
+ 0xffdffc00,
+ 0xffffffff,
+ 0xffffffff,
+ 0x0fffffff
+};
+
+const struct p7_pinctrl_config p7_pinctrl_config_r3 = {
+ .pin_descs = p7_pin_descs_r3,
+ .pin_descs_sz = ARRAY_SIZE(p7_pin_descs_r3),
+ .pin_funcs = p7_pin_funcs_r3,
+ .gpio_pins = p7_gpio_pins_r3,
+ .gpio_pins_sz = ARRAY_SIZE(p7_gpio_pins_r3),
+};
diff --git a/arch/arm/mach-parrot7/pm.c b/arch/arm/mach-parrot7/pm.c
new file mode 100644
index 00000000000000..dd53d2aaad10f6
--- /dev/null
+++ b/arch/arm/mach-parrot7/pm.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2013 Parrot S.A.
+ * Author: Alvaro Moran <alvaro.moran@parrot.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
+#include <mfd/p7mu.h>
+#include <linux/delay.h>
+#include <asm/smp_scu.h>
+#include <linux/mman.h>
+#include <mach/p7.h>
+#include "common.h"
+#include "system.h"
+#include "clock.h"
+#include "dma.h"
+#include "core.h"
+
+#include <mfd/p7mu.h>
+#include <asm/setup.h>
+#include <asm/smp_twd.h>
+
+/*
+ * TODO: Consider that the beginning of INTRAM might be used by the DMA, so we
+ * just want to be sure nobody else is using it.
+ */
+
+int p7_resume_cnt;
+static u32 suspend_vaddr;
+extern void p7_finish_suspend(void);
+extern int p7_finish_suspend_sz;
+struct {
+ int p7_ram_size;
+} p7_s2r_param;
+
+static int __maybe_unused p7_last_jump(unsigned long arg)
+{
+ /* assume only one memory bank : we have one CS */
+ struct membank *bank = &meminfo.bank[0];
+ p7_s2r_param.p7_ram_size = bank->size;
+
+ outer_flush_all();
+
+ soft_restart(P7_INTRAM);
+
+ /* Should never get here. */
+ BUG();
+}
+
+/*
+ * Will copy the last suspend instructions to intram.
+ */
+static int p7_prepare_suspend(void)
+{
+
+ if (p7_finish_suspend_sz > 8 * 1024) {
+ printk("Error sleep code is too big %d\n", p7_finish_suspend_sz);
+ return -1;
+ }
+ /*
+ * map physical addresses of INTRAM controller to copy the sleep code
+ * and jump there later.
+ */
+ suspend_vaddr = (u32)__arm_ioremap_exec(P7_INTRAM,
+ p7_finish_suspend_sz, false);
+ if (!suspend_vaddr) {
+ printk("Error trying to map 0x%lx\n", P7_INTRAM);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void p7_unprepare_suspend(void)
+{
+ __iounmap((void*)suspend_vaddr);
+}
+
+static int p7_enter_pm(suspend_state_t state)
+{
+ int err;
+ u32 scu_ctrl __maybe_unused;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ cpu_do_idle();
+ return 0;
+
+#ifdef CONFIG_PM_SLEEP
+ case PM_SUSPEND_MEM:
+#ifdef CONFIG_ARCH_PARROT7_DEBUG_S2RAM
+ /* debug use rom code and we use p7r3 symbols */
+ if (p7_chiprev() != P7_CHIPREV_R3) {
+ return -EINVAL;
+ }
+ /* keep 1K for stack */
+ if (p7_is_nonsecure()) {
+ return -EINVAL;
+ }
+#endif
+
+ /* warning intram is also used by PL330 ucode */
+ memcpy_toio((void*)suspend_vaddr,
+ p7_finish_suspend, p7_finish_suspend_sz);
+ err = cpu_suspend(0, p7_last_jump);
+
+ if (err == 0) {
+ p7_resume_cnt++;
+ }
+ printk("Starting resume #%d... (%d)\n", p7_resume_cnt, err);
+
+ /* stop watchdog set by bootloader on cpu0 */
+ writel(0x12345678, __MMIO_P2V(P7_CPU_LOCALTIMER) + TWD_WDOG_DISABLE);
+ writel(0x87654321, __MMIO_P2V(P7_CPU_LOCALTIMER) + TWD_WDOG_DISABLE);
+ writel(0x0, __MMIO_P2V(P7_CPU_LOCALTIMER) + TWD_WDOG_CONTROL);
+
+ /* De-reset NEON unit. */
+ p7_enable_neonclk(0);
+#ifdef CONFIG_SMP
+ platform_smp_prepare_cpus(num_possible_cpus());
+#endif
+#ifdef CONFIG_CACHE_L2X0
+ outer_resume();
+#endif
+#ifdef CONFIG_DMA_PARROT7
+ /*
+ * PL330 knows that a channel is free when the first
+ * instruction in channel's microcode is DMAEND (0x0).
+ * To simplify, make all memory used for DMA ucode to 0.
+ */
+ memset((void*)__MMIO_P2V(p7_dma_ucode_addr()),
+ 0, p7_dma_ucode_sz());
+#endif
+ break;
+#endif
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int p7_begin_pm(suspend_state_t state)
+{
+ u16 reg;
+ int ret;
+
+ disable_hlt();
+
+ p7mu_read16(0xe18, &reg); /* OTP reset value */
+ reg = (reg >> 8) ^ 'P'; /* xor with 'P' */
+ p7mu_write16(0xe83, reg);
+
+ ret = p7mu_save_cpu_jump(cpu_resume);
+ if (ret)
+ return ret;
+ return p7_prepare_suspend();
+}
+
+static void p7_end_pm(void)
+{
+ p7_unprepare_suspend();
+ enable_hlt();
+ return;
+}
+
+static struct platform_suspend_ops p7_pm_ops = {
+ .begin = p7_begin_pm,
+ .end = p7_end_pm,
+ .enter = p7_enter_pm,
+ .valid = suspend_valid_only_mem,
+};
+
+void __init p7_pm_init(void)
+{
+ suspend_set_ops(&p7_pm_ops);
+}
+
diff --git a/arch/arm/mach-parrot7/readme.txt b/arch/arm/mach-parrot7/readme.txt
new file mode 100644
index 00000000000000..32ab3a042345ae
--- /dev/null
+++ b/arch/arm/mach-parrot7/readme.txt
@@ -0,0 +1,38 @@
+Using clocks with the Parrot 7 Linux BSP :
+==========================================
+
+From within drivers you should use :
+
+struct clk* clk_get(struct device*, char const*);
+void clk_put(struct clk*);
+int clk_enable(struct clk*);
+void clk_disable(struct clk*);
+unsigned long clk_get_rate(struct clk*);
+
+Usage sample code:
+------------------
+
+static struct clk* myclk;
+
+int myfunc(void)
+{
+ printk("%lu Hertz\n", clk_get_rate(myclk));
+}
+
+int init(struct platform_device* mydev)
+{
+ /* Get a reference to my clock. */
+ myclk = clk_get(&mydev->dev, "core");
+
+ /* activate clock. */
+ clk_enable(myclk);
+}
+
+void fini(void)
+{
+ /* Turn clock off. */
+ clk_disable(myclk);
+
+ /* Do not forget this! Release clock reference. */
+ clk_put(myclk);
+}
diff --git a/arch/arm/mach-parrot7/sdhci.c b/arch/arm/mach-parrot7/sdhci.c
new file mode 100644
index 00000000000000..45005e8e436072
--- /dev/null
+++ b/arch/arm/mach-parrot7/sdhci.c
@@ -0,0 +1,145 @@
+/**
+ * linux/arch/arm/mach-parrot7/sdhci.c - Parrot7 eMMC / SD / SDIO controller
+ * platform implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 14-Jun-2012
+ *
+ * This file is released under the GPL
+ *
+ * See linux/drivers/parrot/mmc/acs3-sdhci.c file header
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <mach/irqs.h>
+#include <mach/p7.h>
+#include <mmc/acs3-sdhci.h>
+#include "common.h"
+#include "clock.h"
+
+/*****************************
+ * Host controller instances
+ *****************************/
+
+static struct resource p7_sdhci0_res[] = {
+ [0] = {
+ .start = P7_SDIO0,
+ .end = P7_SDIO0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_SDIO0_IRQ,
+ .end = P7_SDIO0_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_sdhci1_res[] = {
+ [0] = {
+ .start = P7_SDIO1,
+ .end = P7_SDIO1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_SDIO1_IRQ,
+ .end = P7_SDIO1_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_sdhci2_res[] = {
+ [0] = {
+ .start = P7_SDIO2,
+ .end = P7_SDIO2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_SDIO2_IRQ,
+ .end = P7_SDIO2_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device p7_sdhci_devs[] = {
+ {
+ .name = ACS3_DRV_NAME,
+ .id = 0,
+ .resource = p7_sdhci0_res,
+ .num_resources = ARRAY_SIZE(p7_sdhci0_res)
+ },
+ {
+ .name = ACS3_DRV_NAME,
+ .id = 1,
+ .resource = p7_sdhci1_res,
+ .num_resources = ARRAY_SIZE(p7_sdhci1_res)
+ },
+ {
+ .name = ACS3_DRV_NAME,
+ .id = 2,
+ .resource = p7_sdhci2_res,
+ .num_resources = ARRAY_SIZE(p7_sdhci2_res)
+ }
+};
+
+static struct acs3_regs const p7_sdhci_regs[] = {
+ { /* SDIO controller 0. */
+ .tdl1 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO0_TDL1_CFG),
+ .tdl2 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO0_TDL2_CFG),
+ .ctrl = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO0_CTRL),
+ .retune = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO0_RETUNE)
+ },
+ { /* SDIO controller 1. */
+ .tdl1 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO1_TDL1_CFG),
+ .tdl2 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO1_TDL2_CFG),
+ .ctrl = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO1_CTRL),
+ .retune = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO1_RETUNE)
+ },
+ { /* SDIO controller 2. */
+ .tdl1 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO2_TDL1_CFG),
+ .tdl2 = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO2_TDL2_CFG),
+ .ctrl = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO2_CTRL),
+ .retune = __MMIO_P2V(P7_HSP_GBC + HSP_GBC_SDIO2_RETUNE)
+ }
+};
+
+/**
+ * p7_init_sdhci() - Instantiate SDHCI host controller identified by @host
+ * for further driver usage.
+ *
+ * @host: SDHCI host controller identifier
+ * @pdata: controller platform specific data
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_sdhci(int host,
+ struct acs3_plat_data* pdata,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+#ifdef DEBUG
+ BUG_ON(host < 0);
+ BUG_ON(host >= ARRAY_SIZE(p7_sdhci_devs));
+ BUG_ON(! pdata);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+
+ /*
+ * Set GBC registers addresses so that driver can easily locate them in a
+ * portable way.
+ */
+ pdata->regs = &p7_sdhci_regs[host];
+
+ /*
+ * Disable the command confict error, i.e. disable multi-master mode.
+ * This allows us to keep working at high frequencies.
+ */
+ __raw_writel(__raw_readl(pdata->regs->ctrl) | ACS3_CTRL_CMD_CONFLICT_DIS_MASK,
+ pdata->regs->ctrl);
+
+ p7_init_dev(&p7_sdhci_devs[host], pdata, pins, pin_cnt);
+}
diff --git a/arch/arm/mach-parrot7/sdhci.h b/arch/arm/mach-parrot7/sdhci.h
new file mode 100644
index 00000000000000..b6e7c4477bd54c
--- /dev/null
+++ b/arch/arm/mach-parrot7/sdhci.h
@@ -0,0 +1,39 @@
+/**
+ * linux/arch/arm/mach-parrot7/sdhci.h - Parrot7 eMMC / SD / SDIO controller
+ * platform interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 14-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_SDHCI_H
+#define _ARCH_PARROT7_SDHCI_H
+
+struct acs3_plat_data;
+struct pinctrl_map;
+
+#include <linux/init.h>
+#include <mmc/acs3-sdhci.h>
+
+
+#if defined(CONFIG_MMC_SDHCI_ACS3) || \
+ defined(CONFIG_MMC_SDHCI_ACS3_MODULE)
+
+extern void p7_init_sdhci(int,
+ struct acs3_plat_data*,
+ struct pinctrl_map*,
+ size_t) __init;
+
+#else /* defined(CONFIG_MMC_PARROT7) || \
+ defined(CONFIG_MMC_PARROT7_MODULE) */
+
+#define p7_init_sdhci(_host, _pdata, _pins, _pins_nr)
+
+#endif /* defined(CONFIG_MMC_SDHCI_ACS3) || \
+ defined(CONFIG_MMC_SDHCI_ACS3_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/sii_platform_data.h b/arch/arm/mach-parrot7/sii_platform_data.h
new file mode 100644
index 00000000000000..ebc3152dfc2fee
--- /dev/null
+++ b/arch/arm/mach-parrot7/sii_platform_data.h
@@ -0,0 +1,37 @@
+/*
+ * Platform data for the sii5293
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ */
+
+
+#ifndef __SII_PLATFORM_DATA_H__
+#define __SII_PLATFORM_DATA_H__
+
+#define SII5293_PDATA_STRUCT_VERSION 1
+
+struct sii5293_platform_data {
+ /* keep version field in first position */
+ int version;
+
+ int gpio_rst;
+ int gpio_irq;
+ int i2c_bus;
+
+ int output_format;
+
+ int input_dev_rap;
+ int input_dev_rcp;
+ int input_dev_ucp;
+};
+
+#endif
diff --git a/arch/arm/mach-parrot7/sleep.S b/arch/arm/mach-parrot7/sleep.S
new file mode 100644
index 00000000000000..a280617262404d
--- /dev/null
+++ b/arch/arm/mach-parrot7/sleep.S
@@ -0,0 +1,183 @@
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/p7.h>
+#include "gbc.h"
+
+#ifdef CONFIG_ARCH_PARROT7_DEBUG_S2RAM
+#define P7_DDR_CRC_ALL 1
+#else
+#define P7_DDR_CRC_ALL 0
+#endif
+
+#define I2CM_TIP (1 << 1)
+#define I2CM_COMMAND_WR (1 << 1)
+#define I2CM_COMMAND_STOP (1 << 3)
+#define I2CM_COMMAND_START (1 << 4)
+#define I2CM_TX_REG (0x00)
+#define I2CM_STATUS_REG (0x10)
+#define I2CM_COMMAND_REG (0x14)
+#define I2CM_PRESCALE (0x18)
+
+#define P7MU_ADDR (0x31)
+#define P7MU_COMMAND_REG (0x107)
+#define P7MU_SUSPEND_CMD (1 << 1)
+#define P7MU_APP0 (0x900)
+#define P7MU_APP1 (0x901)
+#define P7MU_APP3 (0x903)
+#define P7MU_APP4 (0x904)
+#define P7MU_APP5 (0x905)
+#define P7MU_APP6 (0x906)
+
+.macro p7mu_write, reg16
+ @ input r0 (data)
+ @ local use r0: cmd byte, r1: byte to transmit, r2
+ mov r2, r0
+ mov r0, #I2CM_COMMAND_START
+ mov r1, #(P7MU_ADDR << 1)
+ bl send_byte
+ mov r0, #0
+ mov r1, #((\reg16 >> 8))
+ bl send_byte
+ mov r1, #(\reg16 & 0xff)
+ bl send_byte
+ mov r1, r2, lsr #8
+ bl send_byte
+ mov r0, #I2CM_COMMAND_STOP
+ and r1, r2, #255
+ bl send_byte
+.endm
+
+ .text
+
+ENTRY(p7_finish_suspend)
+ @set vector at 0. It helps debug in case of abort
+ mrc p15, 0, r0, c1, c0, 0 @ read control register
+ bic r0, r0, #(1 << 13)
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+#if P7_DDR_CRC_ALL
+ ldr r0, p7_s2r_param_ptr
+ ldr r10, [r0]
+ adr r0, rom_bases
+ ldmia r0, {r4 - r9}
+ @we store the mmu table after the stack. We need 32K align
+ mov sp, #(P7_INTRAM+32*1024)
+ stmdb sp!, {r0,r1,r2,r4,r5,r7,r9,r11}
+ blx r4 @cache invalidate
+ ldmia sp!, {r0,r1,r2,r4,r5,r7,r9,r11}
+ blx r5 @mmu_off
+ mov r0, sp
+ mov r1, r10, lsr #20
+ blx r6 @mmu_on
+ blx r7 @dcache
+ blx r8 @icache
+
+
+ @crc of all ddr
+ mov r0, #(0x80000000)
+ mov r1, r10
+ blx r9 @posix_crc
+ mov r10, r0
+#else
+ mov r10, #0
+#endif
+
+#if 0
+ @crc of resume code
+ ldr r0, p7_s2r_param_ptr
+ ldr r0, [r0, #4]
+ mov r1, #4096
+ blx r9 @posix_crc
+ mov r9, r0
+#else
+ mov r9, #0
+#endif
+
+ @ r4: mpmc self refresh register
+ @ r5: mpmc gate training register
+ @ r6: i2cm base address
+ @ r7: i2cm clock register
+ @ r8: watchdog base address
+ adr r0, reg_bases
+ ldmia r0, {r4 - r8}
+
+ @ enable watchdog
+ mov r0, #0xff000 @load register. the timeout is ((load + 1) * (prescaler + 1)) / (cpu_clk/2)
+ @for 780Mhz cpu it should give us 680ms.
+ str r0, [r8, #0x20]
+ movw r0, #0x0000FF09 @ctrl reg : prescaler = 0xff, WD mode, enable
+ str r0, [r8, #0x28]
+
+ @ disable gate training
+ mov r0, #0
+ str r0, [r5]
+
+ @ enable self-refresh
+ mov r0, #1
+ str r0, [r4]
+ @ MPMC is now in self-refresh
+
+ @ enable i2c clock and dereset IP
+ mov r0, #1
+ str r0, [r7]
+ mov r0, #0
+ str r0, [r7, #(LSP_GBC_I2CM0_RESET - LSP_GBC_I2CM0_CLOCK)]
+
+ @ init i2c IP
+ mov r0, #0
+ str r0, [r6, #I2CM_PRESCALE]
+
+ mov r0, #0xd8 @100KHZ
+ str r0, [r6, #I2CM_PRESCALE]
+
+ @crc of ddr
+ mov r0, r10, lsr #16
+ p7mu_write P7MU_APP3
+ mov r0, r10
+ p7mu_write P7MU_APP4
+
+ @ Send the suspend command to the P7MU via I2C
+ mov r0, #P7MU_SUSPEND_CMD
+ p7mu_write P7MU_COMMAND_REG
+
+ @ Wait here for P7MU to shutdown the power domain that
+ @ powers the CPU
+ b .
+
+ @ very simple send byte function for I2CM
+ @ write command and byte to xfer, wait for xfer to be done, return
+ @ there is no error handling
+ @ r6: i2cm base address
+ @ r0: cmd
+ @ r1: byte to write
+ @ ip: tmp
+send_byte:
+ orr r0, r0, #I2CM_COMMAND_WR
+ str r1, [r6, #I2CM_TX_REG]
+ str r0, [r6, #I2CM_COMMAND_REG]
+1: ldr ip, [r6, #I2CM_STATUS_REG]
+ ands ip, ip, #I2CM_TIP
+ bne 1b
+ bx lr
+
+reg_bases:
+ .long P7_MPMC_GBC + MPMC_GBC_SELF_REFRESH
+ .long P7_MPMC + P7_MPMC_TRAIN_EN
+ .long P7_I2CM0
+ .long P7_LSP_GBC + LSP_GBC_I2CM0_CLOCK
+ .long P7_CPU_LOCALTIMER
+
+rom_bases:
+ .long 0x00000244 @invalidate_caches_and_tlbs
+ .long 0x000144ac @mmu_off
+ .long 0x0001430c @mmu_on
+ .long 0x000016a0 @dcache_on
+ .long 0x00001670 @icache_on
+ .long 0x00001250 @posix_crc
+
+p7_s2r_param_ptr:
+ .word p7_s2r_param
+
+ENTRY(p7_finish_suspend_sz)
+ .word . - p7_finish_suspend
diff --git a/arch/arm/mach-parrot7/spi.c b/arch/arm/mach-parrot7/spi.c
new file mode 100644
index 00000000000000..3e39b346f06dde
--- /dev/null
+++ b/arch/arm/mach-parrot7/spi.c
@@ -0,0 +1,399 @@
+/**
+ * linux/arch/arm/mach-parrot7/spi.c - Parrot7 SPI controller platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 27-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clkdev.h>
+#include <linux/spi/spi.h>
+#include <spi/p7-spi.h>
+#include <mach/p7.h>
+#include <mach/irqs.h>
+#include "pinctrl.h"
+#include "dma.h"
+#include "common.h"
+#include "clock.h"
+
+/* SPI Kernel resource */
+static struct resource p7_spi_res = {
+ .start = P7_SPI,
+ .end = P7_SPI + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct p7spi_plat_data p7_spi_pdata = {
+ .max_slave_hz = 10000000,
+ .wcnt = 128,
+};
+
+/* SPI Kernel device */
+static struct platform_device p7_spi_dev = {
+ .name = P7SPI_DRV_NAME,
+ .id = 0,
+ .dev.platform_data = &p7_spi_pdata,
+ .resource = &p7_spi_res,
+ .num_resources = 1,
+};
+
+static enum p7spi_core_type p7spi_core_r1[] = {
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+};
+
+static enum p7spi_core_type p7spi_core_r23[] = {
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_MPEG,
+ P7SPI_CORE_MPEG,
+};
+
+static bool already_init = false;
+
+/**
+ * p7_init_spi() - Instantiate SPI shared resources "controller"
+ * for further driver usage.
+ *
+ * @pdata: controller platform specific data
+ */
+void __init p7_init_spi(void)
+{
+ int err = p7_chiprev();
+
+ if (already_init)
+ return;
+
+ switch (err) {
+ case P7_CHIPREV_R1:
+ p7_spi_pdata.rev = SPI_REVISION_1;
+ break;
+ case P7_CHIPREV_R2:
+ p7_spi_pdata.rev = SPI_REVISION_2;
+ break;
+ case P7_CHIPREV_R3:
+ p7_spi_pdata.rev = SPI_REVISION_3;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ if (p7_spi_pdata.rev == SPI_REVISION_1) {
+ /* MPW1 is limited to 127kHz - 65MHz. */
+ p7_spi_pdata.min_hz = 127000;
+ p7_spi_pdata.max_master_hz = 65000000;
+ p7_spi_pdata.num_pads = 16;
+ p7_spi_pdata.cores = p7spi_core_r1;
+ p7_spi_pdata.num_cores = ARRAY_SIZE(p7spi_core_r1);
+ }
+ else {
+ /* R2 and R3 to 2kHz - 130 MHz */
+ p7_spi_pdata.min_hz = 2000;
+ p7_spi_pdata.max_master_hz = 130000000;
+ p7_spi_pdata.num_pads = 20;
+ p7_spi_pdata.cores = p7spi_core_r23;
+ p7_spi_pdata.num_cores = ARRAY_SIZE(p7spi_core_r23);
+ }
+
+ err = p7_init_dev(&p7_spi_dev, NULL, NULL, 0);
+ if (! err) {
+ already_init = true;
+ return;
+ }
+
+ panic("p7: failed to init SPI kernel (%d)\n", err);
+}
+
+#if defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE)
+
+/* Bus 0 resources */
+static struct resource p7_spi0_res[] = {
+ [0] = {
+ .start = P7_SPI0,
+ .end = P7_SPI0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_SPI0_IRQ,
+ .end = P7_SPI0_IRQ,
+#ifdef CONFIG_ARCH_VEXPRESS_P7FPGA
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+#else
+ .flags = IORESOURCE_IRQ,
+#endif
+ },
+ [2] = {
+ .start = P7_SPI0_DMA,
+ .end = P7_SPI0_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+/* Bus 1 resources */
+static struct resource p7_spi1_res[] = {
+ [0] = {
+ .start = P7_SPI1,
+ .end = P7_SPI1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_SPI1_IRQ,
+ .end = P7_SPI1_IRQ,
+#ifdef CONFIG_ARCH_VEXPRESS_P7FPGA
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+#else
+ .flags = IORESOURCE_IRQ,
+#endif
+ },
+ [2] = {
+ .start = P7_SPI1_DMA,
+ .end = P7_SPI1_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+/* Bus 2 resources */
+static struct resource p7_spi2_res[] = {
+ [0] = {
+ .start = P7_SPI2,
+ .end = P7_SPI2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_SPI2_IRQ,
+ .end = P7_SPI2_IRQ,
+#ifdef CONFIG_ARCH_VEXPRESS_P7FPGA
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+#else
+ .flags = IORESOURCE_IRQ,
+#endif
+ },
+ [2] = {
+ .start = P7_SPI2_DMA,
+ .end = P7_SPI2_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+/* Bus 3 resources */
+static struct resource p7_spi3_res[] = {
+ [0] = {
+ .start = P7_SPI3,
+ .end = P7_SPI3 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = P7_SPI3_IRQ,
+ .end = P7_SPI3_IRQ,
+#ifdef CONFIG_ARCH_VEXPRESS_P7FPGA
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+#else
+ .flags = IORESOURCE_IRQ,
+#endif
+ },
+ [2] = {
+ .start = P7_SPI3_DMA,
+ .end = P7_SPI3_DMA,
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static struct platform_device p7_spi_devs[] = {
+ {
+ .id = 0,
+ .resource = p7_spi0_res,
+ .num_resources = ARRAY_SIZE(p7_spi0_res),
+ },
+ {
+ .id = 1,
+ .resource = p7_spi1_res,
+ .num_resources = ARRAY_SIZE(p7_spi1_res),
+ },
+ {
+ .id = 2,
+ .resource = p7_spi2_res,
+ .num_resources = ARRAY_SIZE(p7_spi2_res),
+ },
+ {
+ .id = 3,
+ .resource = p7_spi3_res,
+ .num_resources = ARRAY_SIZE(p7_spi3_res),
+ }
+};
+
+#endif /* defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE) */
+
+#if defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE)
+#include <spi/p7-spim.h>
+
+/**
+ * p7_init_spim() - Instantiate SPI master controller identified by @bus
+ * for further driver usage.
+ *
+ * @bus: master controller / bus identifier
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_spim(int bus, struct pinctrl_map* pins, size_t pin_cnt,
+ struct p7spi_swb const * const pdata)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(bus >= ARRAY_SIZE(p7_spi_devs));
+ BUG_ON(p7_spi_devs[bus].name);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+ BUG_ON(! pdata);
+#endif
+ p7_init_spi();
+
+ p7_spi_devs[bus].name = P7SPIM_DRV_NAME;
+ err = p7_init_dev(&p7_spi_devs[bus], (void*)pdata, pins, pin_cnt);
+ if (! err)
+ return;
+
+ panic("p7: failed to init SPI master controller %d (%d)\n",
+ bus,
+ err);
+}
+
+/**
+ * p7_init_spim_slave() - Register SPI slave device to master controller
+ * further driver usage.
+ *
+ * @bus: master controller / bus identifier
+ * @info: slave device descriptor
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ */
+int __init p7_init_spim_slave(int bus, struct spi_board_info* info)
+{
+ int err;
+
+#ifdef DEBUG
+ struct p7spi_ctrl_data* const cdata = info->controller_data;
+ BUG_ON(bus >= ARRAY_SIZE(p7_spi_devs));
+ BUG_ON(! info);
+ BUG_ON(! info->modalias);
+ BUG_ON(! cdata);
+ BUG_ON(info->chip_select);
+ BUG_ON(p7_spi_devs[bus].name && strcmp(p7_spi_devs[bus].name, P7SPIM_DRV_NAME));
+
+ pr_debug("p7: registering %s%sspi%u.0...\n",
+ info->modalias ? info->modalias : "",
+ info->modalias ? " slave as " : "",
+ bus);
+#endif
+
+ info->bus_num = bus;
+ err = spi_register_board_info(info, 1);
+ WARN(err,
+ "p7: failed to register %s%sspi%u.0 (%d)\n",
+ info->modalias ? info->modalias : "",
+ info->modalias ? " slave as " : "",
+ bus,
+ err);
+
+ return err;
+}
+
+#endif /* defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) */
+
+#if defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE)
+#include <spi/p7-spis.h>
+
+/**
+ * p7_init_spis() - Instantiate SPI slave controller identified by @bus
+ * for further driver usage.
+ *
+ * @bus: slave controller / bus identifier
+ * @pins: array of pin functions and optional settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_spis(int bus, struct pinctrl_map* pins, size_t pin_cnt,
+ struct p7spi_swb const * const pdata)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(bus >= ARRAY_SIZE(p7_spi_devs));
+ BUG_ON(p7_spi_devs[bus].name);
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+ BUG_ON(! pdata);
+#endif
+ p7_init_spi();
+
+ p7_spi_devs[bus].name = P7SPIS_DRV_NAME;
+ err = p7_init_dev(&p7_spi_devs[bus], (void*)pdata, pins, pin_cnt);
+ if (! err)
+ return;
+
+ panic("p7: failed to init SPI slave controller %d (%d)\n",
+ bus,
+ err);
+}
+
+/**
+ * p7_init_spis_master() - Register SPI master device to slave controller
+ * for further driver usage.
+ *
+ * @bus: slave controller / bus identifier
+ * @info: master device descriptor
+ *
+ * Returns: 0 - success, a negative errno like value if failure
+ */
+int __init p7_init_spis_master(int bus, struct spi_board_info* info)
+{
+ int err;
+
+#ifdef DEBUG
+ struct p7spi_ctrl_data* const cdata = info->controller_data;
+ BUG_ON(bus >= ARRAY_SIZE(p7_spi_devs));
+ BUG_ON(! info);
+ BUG_ON(! info->modalias);
+ BUG_ON(! cdata);
+ BUG_ON(info->chip_select);
+ BUG_ON(strcmp(p7_spi_devs[bus].name, P7SPIS_DRV_NAME));
+
+ pr_debug("p7: registering %s%sspi%u.0...\n",
+ info->modalias ? info->modalias : "",
+ info->modalias ? " master as " : "",
+ bus);
+#endif
+
+ info->bus_num = bus;
+ err = spi_register_board_info(info, 1);
+ WARN(err,
+ "p7: failed to register %s%sspi%u.0 (%d)\n",
+ info->modalias ? info->modalias : "",
+ info->modalias ? " slave as " : "",
+ bus,
+ err);
+
+ return err;
+}
+
+#endif /* defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE) */
diff --git a/arch/arm/mach-parrot7/spi.h b/arch/arm/mach-parrot7/spi.h
new file mode 100644
index 00000000000000..3e1dba5798d221
--- /dev/null
+++ b/arch/arm/mach-parrot7/spi.h
@@ -0,0 +1,187 @@
+/**
+ * linux/arch/arm/mach-parrot7/spi.h - Parrot7 SPI controller platform
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 28-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ARCH_PARROT7_SPI_H
+#define _ARCH_PARROT7_SPI_H
+
+struct p7spi_plat_data;
+struct p7spi_swb;
+struct spi_board_info;
+struct pinctrl_map;
+
+#if defined(CONFIG_SPI_PARROT7) || \
+ defined(CONFIG_SPI_PARROT7_MODULE)
+
+#include <linux/init.h>
+
+extern void p7_init_spi(void) __init;
+
+#else /* defined(CONFIG_SPI_PARROT7) || \
+ defined(CONFIG_SPI_PARROT7_MODULE) */
+
+#define p7_init_spi()
+
+#endif /* defined(CONFIG_SPI_PARROT7) || \
+ defined(CONFIG_SPI_PARROT7_MODULE) */
+
+#if defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE)
+
+extern void p7_init_spim(int, struct pinctrl_map*, size_t,
+ struct p7spi_swb const * const) __init;
+
+/**
+ * P7_INIT_SPIM_SLAVE() - Init a SPI slave device (connected to master
+ * controller).
+ *
+ * @_mod: name of module drive this slave
+ * @_pdata: platform data assigned to this slave
+ * @_cdata: master controller platform data to use with this slave
+ * @_hz: maximum frequency this slave may work at
+ * @_mode: clock phase / polarity this slave may work with (one of
+ * %SPI_MODE_0, %SPI_MODE_1, %SPI_MODE_2, %SPI_MODE_3, or a
+ * combination of %SPI_CPHA and / or %SPI_CPOL flags)
+ */
+#define P7_INIT_SPIM_SLAVE(_mod, _pdata, _cdata, _hz, _mode)\
+ { \
+ .modalias = _mod, \
+ .platform_data = _pdata, \
+ .controller_data = _cdata, \
+ .irq = -1, \
+ .max_speed_hz = _hz, \
+ .chip_select = 0, \
+ .mode = _mode \
+ }
+
+/**
+ * P7_DECLARE_SPIM_SLAVE() - Declare a SPI slave device (connected to master
+ * controller).
+ *
+ * @_name: slave variable name
+ * @_mod: name of module drive this slave
+ * @_pdata: platform data assigned to this slave
+ * @_cdata: master controller platform data to use with this slave
+ * @_hz: maximum frequency this slave may work at
+ * @_mode: clock phase / polarity this slave may work with (one of
+ * %SPI_MODE_0, %SPI_MODE_1, %SPI_MODE_2, %SPI_MODE_3, or a
+ * combination of %SPI_CPHA and / or %SPI_CPOL flags)
+ */
+#define P7_DECLARE_SPIM_SLAVE(_name, _mod, _pdata, _cdata, _hz, _mode) \
+ struct spi_board_info _name __initdata = P7_INIT_SPIM_SLAVE(_mod, \
+ _pdata, \
+ _cdata, \
+ _hz, \
+ _mode)
+
+extern int p7_init_spim_slave(int, struct spi_board_info*) __init;
+
+#else /* defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) */
+
+static inline void p7_init_spim(int i,
+ struct pinctrl_map *m,
+ size_t s,
+ struct p7spi_swb const * const p)
+{
+ return;
+}
+
+static inline int p7_init_spim_slave(int i, struct spi_board_info *s)
+{
+ return -ENOSYS;
+}
+
+#define P7_DECLARE_SPIM_SLAVE(_name, _mod, _pdata, _cdata, _hz, _mode) \
+ struct spi_board_info _name __initdata = P7_INIT_SPIM_SLAVE(_mod, \
+ _pdata, \
+ _cdata, \
+ _hz, \
+ _mode)
+/**
+ * P7_INIT_SPIM_SLAVE() - Init a SPI slave device (connected to master
+ * controller).
+ *
+ * @_mod: name of module drive this slave
+ * @_pdata: platform data assigned to this slave
+ * @_cdata: master controller platform data to use with this slave
+ * @_hz: maximum frequency this slave may work at
+ * @_mode: clock phase / polarity this slave may work with (one of
+ * %SPI_MODE_0, %SPI_MODE_1, %SPI_MODE_2, %SPI_MODE_3, or a
+ * combination of %SPI_CPHA and / or %SPI_CPOL flags)
+ */
+#define P7_INIT_SPIM_SLAVE(_mod, _pdata, _cdata, _hz, _mode)\
+ { \
+ .modalias = _mod, \
+ .platform_data = _pdata, \
+ .controller_data = _cdata, \
+ .irq = -1, \
+ .max_speed_hz = _hz, \
+ .chip_select = 0, \
+ .mode = _mode \
+ }
+
+#endif /* defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) */
+
+#define P7_DECLARE_SPIS_MASTER(_name, _mod, _pdata, _cdata, _hz, _mode) \
+ struct spi_board_info _name __initdata = { \
+ .modalias = _mod, \
+ .platform_data = _pdata, \
+ .controller_data = _cdata, \
+ .irq = -1, \
+ .max_speed_hz = _hz, \
+ .chip_select = 0, \
+ .mode = _mode \
+ }
+
+#if defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE)
+
+extern void p7_init_spis(int, struct pinctrl_map*, size_t,
+ struct p7spi_swb const * const) __init;
+
+/**
+ * P7_DECLARE_SPIS_MASTER() - Declare a SPI master device (connected to slave
+ * controller).
+ *
+ * @_name: master variable name
+ * @_mod: name of module drive this master
+ * @_pdata: platform data assigned to this master
+ * @_cdata: slave controller platform data to use with this master
+ * @_hz: maximum frequency this master may work at
+ * @_mode: clock phase / polarity this master may work with (one of
+ * %SPI_MODE_0, %SPI_MODE_1, %SPI_MODE_2, %SPI_MODE_3, or a
+ * combination of %SPI_CPHA and / or %SPI_CPOL flags)
+ */
+
+extern int p7_init_spis_master(int, struct spi_board_info*) __init;
+
+#else /* defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE) */
+
+static inline void p7_init_spis(int i,
+ struct pinctrl_map *m,
+ size_t s,
+ struct p7spi_swb const * const p)
+{
+ return;
+}
+
+static inline int p7_init_spis_master(int i, struct spi_board_info *s)
+{
+ return -ENOSYS;
+}
+
+#endif /* defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE) */
+
+#endif
diff --git a/arch/arm/mach-parrot7/system.h b/arch/arm/mach-parrot7/system.h
new file mode 100644
index 00000000000000..d1bb7a8b4e168c
--- /dev/null
+++ b/arch/arm/mach-parrot7/system.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/mach-parrot7/system.h
+ *
+ * Copyright (C) 2010 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 08-Nov-2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ARCH_PARROT7_SYSTEM_H
+#define _ARCH_PARROT7_SYSTEM_H
+
+#include <mach/p7.h>
+
+/* Clocks generation. */
+#define P7_SYS_CLKGEN P7_SYS
+#define P7_SYS_CLKGEN_STATUS P7_SYS_CLKGEN
+#define P7_SYS_CLKGEN_CFG_CPU_DIV (P7_SYS_CLKGEN + UL(0x04))
+#define P7_SYS_CLKGEN_CFG_SYS_DIV (P7_SYS_CLKGEN + UL(0x08))
+#define P7_SYS_CLKGEN_CFG_USB_DIV (P7_SYS_CLKGEN + UL(0x0c))
+#define P7_SYS_CLKGEN_CFG_CAN_DIV (P7_SYS_CLKGEN + UL(0x10))
+#define P7_SYS_CLKGEN_CFG_FAST (P7_SYS_CLKGEN + UL(0x14))
+#define P7_SYS_CLKGEN_CFG_DDR (P7_SYS_CLKGEN + UL(0x18))
+#define P7_SYS_CLKGEN_CFG_AVI00 (P7_SYS_CLKGEN + UL(0x1c))
+#define P7_SYS_CLKGEN_CFG_AVI01 (P7_SYS_CLKGEN + UL(0x20))
+#define P7_SYS_CLKGEN_CFG_AVI10 (P7_SYS_CLKGEN + UL(0x24))
+#define P7_SYS_CLKGEN_CFG_AVI11 (P7_SYS_CLKGEN + UL(0x28))
+#define P7_SYS_CLKGEN_CFG_AAI (P7_SYS_CLKGEN + UL(0x2c))
+#define P7_SYS_CLKGEN_CFG_NAND (P7_SYS_CLKGEN + UL(0x30))
+#define P7_SYS_CLKGEN_CFG_USB (P7_SYS_CLKGEN + UL(0x34))
+#define P7_SYS_CLKGEN_CFG_TRACE (P7_SYS_CLKGEN + UL(0x38))
+#define P7_SYS_CLKGEN_CFG_ETHERNET (P7_SYS_CLKGEN + UL(0x3c))
+
+/* Multiplexed I/O setup. */
+#define P7_SYS_PADCTRL_IO (P7_SYS + UL(0x1000))
+#define P7_SYS_PADCTRL_IO_00 (P7_SYS_PADCTRL_IO + UL(4 * 0))
+#define P7_SYS_PADCTRL_IO_01 (P7_SYS_PADCTRL_IO + UL(4 * 1))
+#define P7_SYS_PADCTRL_IO_02 (P7_SYS_PADCTRL_IO + UL(4 * 2))
+#define P7_SYS_PADCTRL_IO_03 (P7_SYS_PADCTRL_IO + UL(4 * 3))
+#define P7_SYS_PADCTRL_IO_04 (P7_SYS_PADCTRL_IO + UL(4 * 4))
+#define P7_SYS_PADCTRL_IO_05 (P7_SYS_PADCTRL_IO + UL(4 * 5))
+#define P7_SYS_PADCTRL_IO_06 (P7_SYS_PADCTRL_IO + UL(4 * 6))
+#define P7_SYS_PADCTRL_IO_07 (P7_SYS_PADCTRL_IO + UL(4 * 7))
+#define P7_SYS_PADCTRL_IO_08 (P7_SYS_PADCTRL_IO + UL(4 * 8))
+#define P7_SYS_PADCTRL_IO_09 (P7_SYS_PADCTRL_IO + UL(4 * 9))
+#define P7_SYS_PADCTRL_IO_10 (P7_SYS_PADCTRL_IO + UL(4 * 10))
+#define P7_SYS_PADCTRL_IO_11 (P7_SYS_PADCTRL_IO + UL(4 * 11))
+#define P7_SYS_PADCTRL_IO_12 (P7_SYS_PADCTRL_IO + UL(4 * 12))
+#define P7_SYS_PADCTRL_IO_13 (P7_SYS_PADCTRL_IO + UL(4 * 13))
+#define P7_SYS_PADCTRL_IO_14 (P7_SYS_PADCTRL_IO + UL(4 * 14))
+#define P7_SYS_PADCTRL_IO_15 (P7_SYS_PADCTRL_IO + UL(4 * 15))
+#define P7_SYS_PADCTRL_IO_16 (P7_SYS_PADCTRL_IO + UL(4 * 16))
+#define P7_SYS_PADCTRL_IO_17 (P7_SYS_PADCTRL_IO + UL(4 * 17))
+#define P7_SYS_PADCTRL_IO_18 (P7_SYS_PADCTRL_IO + UL(4 * 18))
+#define P7_SYS_PADCTRL_IO_19 (P7_SYS_PADCTRL_IO + UL(4 * 19))
+#define P7_SYS_PADCTRL_IO_20 (P7_SYS_PADCTRL_IO + UL(4 * 20))
+#define P7_SYS_PADCTRL_IO_21 (P7_SYS_PADCTRL_IO + UL(4 * 21))
+#define P7_SYS_PADCTRL_IO_22 (P7_SYS_PADCTRL_IO + UL(4 * 22))
+#define P7_SYS_PADCTRL_IO_23 (P7_SYS_PADCTRL_IO + UL(4 * 23))
+#define P7_SYS_PADCTRL_IO_24 (P7_SYS_PADCTRL_IO + UL(4 * 24))
+#define P7_SYS_PADCTRL_IO_25 (P7_SYS_PADCTRL_IO + UL(4 * 25))
+#define P7_SYS_PADCTRL_IO_26 (P7_SYS_PADCTRL_IO + UL(4 * 26))
+#define P7_SYS_PADCTRL_IO_27 (P7_SYS_PADCTRL_IO + UL(4 * 27))
+#define P7_SYS_PADCTRL_IO_28 (P7_SYS_PADCTRL_IO + UL(4 * 28))
+#define P7_SYS_PADCTRL_IO_29 (P7_SYS_PADCTRL_IO + UL(4 * 29))
+#define P7_SYS_PADCTRL_IO_30 (P7_SYS_PADCTRL_IO + UL(4 * 30))
+#define P7_SYS_PADCTRL_IO_31 (P7_SYS_PADCTRL_IO + UL(4 * 31))
+#define P7_SYS_PADCTRL_IO_32 (P7_SYS_PADCTRL_IO + UL(4 * 32))
+#define P7_SYS_PADCTRL_IO_33 (P7_SYS_PADCTRL_IO + UL(4 * 33))
+#define P7_SYS_PADCTRL_IO_34 (P7_SYS_PADCTRL_IO + UL(4 * 34))
+#define P7_SYS_PADCTRL_IO_35 (P7_SYS_PADCTRL_IO + UL(4 * 35))
+#define P7_SYS_PADCTRL_IO_36 (P7_SYS_PADCTRL_IO + UL(4 * 36))
+#define P7_SYS_PADCTRL_IO_37 (P7_SYS_PADCTRL_IO + UL(4 * 37))
+#define P7_SYS_PADCTRL_IO_38 (P7_SYS_PADCTRL_IO + UL(4 * 38))
+#define P7_SYS_PADCTRL_IO_39 (P7_SYS_PADCTRL_IO + UL(4 * 39))
+#define P7_SYS_PADCTRL_IO_40 (P7_SYS_PADCTRL_IO + UL(4 * 40))
+#define P7_SYS_PADCTRL_IO_41 (P7_SYS_PADCTRL_IO + UL(4 * 41))
+#define P7_SYS_PADCTRL_IO_42 (P7_SYS_PADCTRL_IO + UL(4 * 42))
+#define P7_SYS_PADCTRL_IO_43 (P7_SYS_PADCTRL_IO + UL(4 * 43))
+#define P7_SYS_PADCTRL_IO_44 (P7_SYS_PADCTRL_IO + UL(4 * 44))
+#define P7_SYS_PADCTRL_IO_45 (P7_SYS_PADCTRL_IO + UL(4 * 45))
+#define P7_SYS_PADCTRL_IO_46 (P7_SYS_PADCTRL_IO + UL(4 * 46))
+#define P7_SYS_PADCTRL_IO_47 (P7_SYS_PADCTRL_IO + UL(4 * 47))
+#define P7_SYS_PADCTRL_IO_48 (P7_SYS_PADCTRL_IO + UL(4 * 48))
+#define P7_SYS_PADCTRL_IO_49 (P7_SYS_PADCTRL_IO + UL(4 * 49))
+#define P7_SYS_PADCTRL_IO_50 (P7_SYS_PADCTRL_IO + UL(4 * 50))
+#define P7_SYS_PADCTRL_IO_51 (P7_SYS_PADCTRL_IO + UL(4 * 51))
+#define P7_SYS_PADCTRL_IO_52 (P7_SYS_PADCTRL_IO + UL(4 * 52))
+#define P7_SYS_PADCTRL_IO_53 (P7_SYS_PADCTRL_IO + UL(4 * 53))
+#define P7_SYS_PADCTRL_IO_54 (P7_SYS_PADCTRL_IO + UL(4 * 54))
+#define P7_SYS_PADCTRL_IO_55 (P7_SYS_PADCTRL_IO + UL(4 * 55))
+#define P7_SYS_PADCTRL_IO_56 (P7_SYS_PADCTRL_IO + UL(4 * 56))
+#define P7_SYS_PADCTRL_IO_57 (P7_SYS_PADCTRL_IO + UL(4 * 57))
+
+/* Configuration registers. */
+#define P7_SYS_CHIP_ID (P7_SYS + UL(0x400))
+#define P7_SYS_BOOT_MODE (P7_SYS + UL(0x408))
+#define P7_SYS_SCRATCH_REG0 (P7_SYS + UL(0x40c))
+#define P7_SYS_SCRATCH_REG1 (P7_SYS + UL(0x410))
+#define P7_SYS_WATCHDOG (P7_SYS + UL(0x418))
+
+/* Timers. */
+#define P7_SYS_TIMX_CTL UL(0x0)
+#define P7_SYS_TIMXCTL_ENABLE (1U << 0)
+#define P7_SYS_TIMX_LD UL(0x4)
+#define P7_SYS_TIMX_CNT UL(0x8)
+#define P7_SYS_TIM0_IT (1U << 0)
+#define P7_SYS_TIM1_IT (1U << 1)
+#define P7_SYS_TIM2_IT (1U << 2)
+#define P7_SYS_TIM3_IT (1U << 3)
+
+#define P7_SYS_TIM (P7_SYS + UL(0x800))
+
+#define P7_SYS_TIM0 (P7_SYS + UL(0x0))
+#define P7_SYS_TIM0_CTL (P7_SYS_TIM0 + P7_SYS_TIMX_CTL)
+#define P7_SYS_TIM0_LD (P7_SYS_TIM0 + P7_SYS_TIMX_LD)
+#define P7_SYS_TIM0_CNT (P7_SYS_TIM0 + P7_SYS_TIMX_CNT)
+
+#define P7_SYS_TIM1 (P7_SYS_TIM + UL(0x10))
+#define P7_SYS_TIM1_CTL (P7_SYS_TIM1 + P7_SYS_TIMX_CTL)
+#define P7_SYS_TIM1_LD (P7_SYS_TIM1 + P7_SYS_TIMX_LD)
+#define P7_SYS_TIM1_CNT (P7_SYS_TIM1 + P7_SYS_TIMX_CNT)
+
+#define P7_SYS_TIM2 (P7_SYS_TIM + UL(0x20))
+#define P7_SYS_TIM2_CTL (P7_SYS_TIM2 + P7_SYS_TIMX_CTL)
+#define P7_SYS_TIM2_LD (P7_SYS_TIM2 + P7_SYS_TIMX_LD)
+#define P7_SYS_TIM2_CNT (P7_SYS_TIM2 + P7_SYS_TIMX_CNT)
+
+#define P7_SYS_TIM3 (P7_SYS_TIM + UL(0x30))
+#define P7_SYS_TIM3_CTL (P7_SYS_TIM3 + P7_SYS_TIMX_CTL)
+#define P7_SYS_TIM3_LD (P7_SYS_TIM3 + P7_SYS_TIMX_LD)
+#define P7_SYS_TIM3_CNT (P7_SYS_TIM3 + P7_SYS_TIMX_CNT)
+
+#define P7_SYS_TIM_STATUS (P7_SYS_TIM + UL(0x100))
+#define P7_SYS_TIM_ITEN (P7_SYS_TIM + UL(0x104))
+#define P7_SYS_TIM_ITACK (P7_SYS_TIM + UL(0x108))
+
+#endif
+
+/* vim:set ts=4:sw=4:noet:ft=c: */
diff --git a/arch/arm/mach-parrot7/uart.c b/arch/arm/mach-parrot7/uart.c
new file mode 100644
index 00000000000000..e1458332284fea
--- /dev/null
+++ b/arch/arm/mach-parrot7/uart.c
@@ -0,0 +1,203 @@
+/**
+ * linux/arch/arm/mach-parrot7/uart.c - Parrot7 serial port platform
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 13-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clkdev.h>
+#include <serial/px-uart.h>
+#include <mach/irqs.h>
+#include <mach/p7.h>
+#include "common.h"
+#include "clock.h"
+
+static struct resource p7_uart0_res[] = {
+ [0] = {
+ .start = P7_UART0,
+ .end = P7_UART0 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART0_IRQ,
+ .end = P7_UART0_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart1_res[] = {
+ [0] = {
+ .start = P7_UART1,
+ .end = P7_UART1 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART1_IRQ,
+ .end = P7_UART1_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart2_res[] = {
+ [0] = {
+ .start = P7_UART2,
+ .end = P7_UART2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART2_IRQ,
+ .end = P7_UART2_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart3_res[] = {
+ [0] = {
+ .start = P7_UART3,
+ .end = P7_UART3 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART3_IRQ,
+ .end = P7_UART3_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart4_res[] = {
+ [0] = {
+ .start = P7_UART4,
+ .end = P7_UART4 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART4_IRQ,
+ .end = P7_UART4_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart5_res[] = {
+ [0] = {
+ .start = P7_UART5,
+ .end = P7_UART5 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART5_IRQ,
+ .end = P7_UART5_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart6_res[] = {
+ [0] = {
+ .start = P7_UART6,
+ .end = P7_UART6 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART6_IRQ,
+ .end = P7_UART6_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource p7_uart7_res[] = {
+ [0] = {
+ .start = P7_UART7,
+ .end = P7_UART7 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = P7_UART7_IRQ,
+ .end = P7_UART7_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device p7_uart_devs[] = {
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 0,
+ .resource = p7_uart0_res,
+ .num_resources = ARRAY_SIZE(p7_uart0_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 1,
+ .resource = p7_uart1_res,
+ .num_resources = ARRAY_SIZE(p7_uart1_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 2,
+ .resource = p7_uart2_res,
+ .num_resources = ARRAY_SIZE(p7_uart2_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 3,
+ .resource = p7_uart3_res,
+ .num_resources = ARRAY_SIZE(p7_uart3_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 4,
+ .resource = p7_uart4_res,
+ .num_resources = ARRAY_SIZE(p7_uart4_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 5,
+ .resource = p7_uart5_res,
+ .num_resources = ARRAY_SIZE(p7_uart5_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 6,
+ .resource = p7_uart6_res,
+ .num_resources = ARRAY_SIZE(p7_uart6_res)
+ },
+ {
+ .name = PXUART_DRV_NAME,
+ .id = 7,
+ .resource = p7_uart7_res,
+ .num_resources = ARRAY_SIZE(p7_uart7_res)
+ },
+};
+
+/**
+ * p7_init_uart() - Instantiate I2C master controller identified by @bus
+ * for further driver usage.
+ *
+ * @port: UART port identifier
+ * @pins: array of pin functions and settings
+ * @pin_cnt: number of element of @pins array
+ */
+void __init p7_init_uart(int port,
+ struct pinctrl_map* pins,
+ size_t pin_cnt)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(port >= ARRAY_SIZE(p7_uart_devs));
+ BUG_ON(! pins);
+ BUG_ON(! pin_cnt);
+#endif
+ if (port >= ARRAY_SIZE(p7_uart_devs)) {
+ printk(KERN_ERR "p7 Uart%d do not exist\n", port);
+ return;
+ }
+
+ err = p7_init_dev(&p7_uart_devs[port], NULL, pins, pin_cnt);
+ if (err)
+ panic("p7: failed to init UART %d (%d)\n", port, err);
+}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
new file mode 100644
index 00000000000000..345395e9dc6e9f
--- /dev/null
+++ b/drivers/iio/Kconfig
@@ -0,0 +1,81 @@
+#
+# Industrial I/O subsystem configuration
+#
+
+menuconfig IIO
+ tristate "Industrial I/O support"
+ select ANON_INODES
+ help
+ The industrial I/O subsystem provides a unified framework for
+ drivers for many different types of embedded sensors using a
+ number of different physical interfaces (i2c, spi, etc).
+
+if IIO
+
+config IIO_BUFFER
+ bool "Enable buffer support within IIO"
+ help
+ Provide core support for various buffer based data
+ acquisition methods.
+
+if IIO_BUFFER
+
+config IIO_BUFFER_CB
+boolean "IIO callback buffer used for push in-kernel interfaces"
+ help
+ Should be selected by any drivers that do in-kernel push
+ usage. That is, those where the data is pushed to the consumer.
+
+config IIO_KFIFO_BUF
+ select IIO_TRIGGER
+ tristate "Industrial I/O buffering based on kfifo"
+ help
+ A simple fifo based on kfifo. Note that this currently provides
+ no buffer events so it is up to userspace to work out how
+ often to read from the buffer.
+
+config IIO_TRIGGERED_BUFFER
+ tristate
+ select IIO_TRIGGER
+ select IIO_KFIFO_BUF
+ help
+ Provides helper functions for setting up triggered buffers.
+
+endif # IIO_BUFFER
+
+config IIO_TRIGGER
+ boolean "Enable triggered sampling support"
+ help
+ Provides IIO core support for triggers. Currently these
+ are used to initialize capture of samples to push into
+ buffers. The triggers are effectively a 'capture
+ data now' interrupt.
+
+config IIO_CONSUMERS_PER_TRIGGER
+ int "Maximum number of consumers per trigger"
+ depends on IIO_TRIGGER
+ default "2"
+ help
+ This value controls the maximum number of consumers that a
+ given trigger may handle. Default is 2.
+
+source "drivers/iio/accel/Kconfig"
+source "drivers/iio/adc/Kconfig"
+source "drivers/iio/amplifiers/Kconfig"
+source "drivers/iio/common/Kconfig"
+source "drivers/iio/dac/Kconfig"
+source "drivers/iio/frequency/Kconfig"
+source "drivers/iio/gyro/Kconfig"
+source "drivers/iio/humidity/Kconfig"
+source "drivers/iio/imu/Kconfig"
+source "drivers/iio/light/Kconfig"
+source "drivers/iio/magnetometer/Kconfig"
+source "drivers/iio/orientation/Kconfig"
+if IIO_TRIGGER
+ source "drivers/iio/trigger/Kconfig"
+endif #IIO_TRIGGER
+source "drivers/iio/pressure/Kconfig"
+source "drivers/iio/proximity/Kconfig"
+source "drivers/iio/temperature/Kconfig"
+
+endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
new file mode 100644
index 00000000000000..698afc2d17ce84
--- /dev/null
+++ b/drivers/iio/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the industrial I/O core.
+#
+
+obj-$(CONFIG_IIO) += industrialio.o
+industrialio-y := industrialio-core.o industrialio-event.o inkern.o
+industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
+industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
+industrialio-$(CONFIG_IIO_BUFFER_CB) += buffer_cb.o
+
+obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
+obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
+
+obj-y += accel/
+obj-y += adc/
+obj-y += amplifiers/
+obj-y += common/
+obj-y += dac/
+obj-y += gyro/
+obj-y += frequency/
+obj-y += humidity/
+obj-y += imu/
+obj-y += light/
+obj-y += magnetometer/
+obj-y += orientation/
+obj-y += pressure/
+obj-y += proximity/
+obj-y += temperature/
+obj-y += trigger/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
new file mode 100644
index 00000000000000..9b9be8725e9d44
--- /dev/null
+++ b/drivers/iio/accel/Kconfig
@@ -0,0 +1,108 @@
+#
+# Accelerometer drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Accelerometers"
+
+config BMA180
+ tristate "Bosch BMA180/BMA250 3-Axis Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for the Bosch BMA180 or
+ BMA250 triaxial acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma180.
+
+config BMC150_ACCEL
+ tristate "Bosch BMC150 Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the following Bosch accelerometers:
+ BMC150, BMI055, BMA250E, BMA222E, BMA255, BMA280.
+
+ Currently this only supports the device via an i2c interface.
+
+ This is a combo module with both accelerometer and magnetometer.
+ This driver is only implementing accelerometer part, which has
+ its own address and register map.
+
+config HID_SENSOR_ACCEL_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Accelerometers 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ accelerometers 3D.
+
+config IIO_ST_ACCEL_3AXIS
+ tristate "STMicroelectronics accelerometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_ACCEL_I2C_3AXIS if (I2C)
+ select IIO_ST_ACCEL_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics accelerometers:
+ LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330.
+
+ This driver can also be built as a module. If so, these modules
+ will be created:
+ - st_accel (core functions for the driver [it is mandatory]);
+ - st_accel_i2c (necessary for the I2C devices [optional*]);
+ - st_accel_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_ACCEL_I2C_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_ACCEL_SPI_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
+config KXSD9
+ tristate "Kionix KXSD9 Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Kionix KXSD9 accelerometer.
+ Currently this only supports the device via an SPI interface.
+
+config MMA8452
+ tristate "Freescale MMA8452Q Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MMA8452Q 3-axis
+ accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mma8452.
+
+config KXCJK1013
+ tristate "Kionix 3-Axis Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for the Kionix KXCJK-1013
+ triaxial acceleration sensor. This driver also supports KXCJ9-1008
+ and KXTJ2-1009.
+
+ To compile this driver as a module, choose M here: the module will
+ be called kxcjk-1013.
+
+endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
new file mode 100644
index 00000000000000..a593996c653922
--- /dev/null
+++ b/drivers/iio/accel/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for industrial I/O accelerometer drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_BMA180) += bma180.o
+obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel.o
+obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
+obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
+obj-$(CONFIG_KXSD9) += kxsd9.o
+obj-$(CONFIG_MMA8452) += mma8452.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
+st_accel-y := st_accel_core.o
+st_accel-$(CONFIG_IIO_BUFFER) += st_accel_buffer.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_I2C_3AXIS) += st_accel_i2c.o
+obj-$(CONFIG_IIO_ST_ACCEL_SPI_3AXIS) += st_accel_spi.o
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
new file mode 100644
index 00000000000000..01003d0050076b
--- /dev/null
+++ b/drivers/iio/accel/bma180.c
@@ -0,0 +1,862 @@
+/*
+ * bma180.c - IIO driver for Bosch BMA180 triaxial acceleration sensor
+ *
+ * Copyright 2013 Oleksandr Kravchenko <x0199363@ti.com>
+ *
+ * Support for BMA250 (c) Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * SPI is not supported by driver
+ * BMA180: 7-bit I2C slave address 0x40 or 0x41
+ * BMA250: 7-bit I2C slave address 0x18 or 0x19
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMA180_DRV_NAME "bma180"
+#define BMA180_IRQ_NAME "bma180_event"
+
+enum {
+ BMA180,
+ BMA250,
+};
+
+struct bma180_data;
+
+struct bma180_part_info {
+ const struct iio_chan_spec *channels;
+ unsigned num_channels;
+ const int *scale_table;
+ unsigned num_scales;
+ const int *bw_table;
+ unsigned num_bw;
+
+ u8 int_reset_reg, int_reset_mask;
+ u8 sleep_reg, sleep_mask;
+ u8 bw_reg, bw_mask;
+ u8 scale_reg, scale_mask;
+ u8 power_reg, power_mask, lowpower_val;
+ u8 int_enable_reg, int_enable_mask;
+ u8 softreset_reg;
+
+ int (*chip_config)(struct bma180_data *data);
+ void (*chip_disable)(struct bma180_data *data);
+};
+
+/* Register set */
+#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
+#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
+#define BMA180_TEMP 0x08
+#define BMA180_CTRL_REG0 0x0d
+#define BMA180_RESET 0x10
+#define BMA180_BW_TCS 0x20
+#define BMA180_CTRL_REG3 0x21
+#define BMA180_TCO_Z 0x30
+#define BMA180_OFFSET_LSB1 0x35
+
+/* BMA180_CTRL_REG0 bits */
+#define BMA180_DIS_WAKE_UP BIT(0) /* Disable wake up mode */
+#define BMA180_SLEEP BIT(1) /* 1 - chip will sleep */
+#define BMA180_EE_W BIT(4) /* Unlock writing to addr from 0x20 */
+#define BMA180_RESET_INT BIT(6) /* Reset pending interrupts */
+
+/* BMA180_CTRL_REG3 bits */
+#define BMA180_NEW_DATA_INT BIT(1) /* Intr every new accel data is ready */
+
+/* BMA180_OFFSET_LSB1 skipping mode bit */
+#define BMA180_SMP_SKIP BIT(0)
+
+/* Bit masks for registers bit fields */
+#define BMA180_RANGE 0x0e /* Range of measured accel values */
+#define BMA180_BW 0xf0 /* Accel bandwidth */
+#define BMA180_MODE_CONFIG 0x03 /* Config operation modes */
+
+/* We have to write this value in reset register to do soft reset */
+#define BMA180_RESET_VAL 0xb6
+
+#define BMA180_ID_REG_VAL 0x03
+
+/* Chip power modes */
+#define BMA180_LOW_POWER 0x03
+
+#define BMA250_RANGE_REG 0x0f
+#define BMA250_BW_REG 0x10
+#define BMA250_POWER_REG 0x11
+#define BMA250_RESET_REG 0x14
+#define BMA250_INT_ENABLE_REG 0x17
+#define BMA250_INT_MAP_REG 0x1a
+#define BMA250_INT_RESET_REG 0x21
+
+#define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
+#define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
+#define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
+#define BMA250_LOWPOWER_MASK BIT(6)
+#define BMA250_DATA_INTEN_MASK BIT(4)
+#define BMA250_INT1_DATA_MASK BIT(0)
+#define BMA250_INT_RESET_MASK BIT(7) /* Reset pending interrupts */
+
+struct bma180_data {
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ const struct bma180_part_info *part_info;
+ struct mutex mutex;
+ bool sleep_state;
+ int scale;
+ int bw;
+ bool pmode;
+ u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */
+};
+
+enum bma180_chan {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+ TEMP
+};
+
+static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
+static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
+
+static int bma250_bw_table[] = { 8, 16, 31, 63, 125, 250 }; /* Hz */
+static int bma250_scale_table[] = { 0, 0, 0, 38344, 0, 76590, 0, 0, 153180, 0,
+ 0, 0, 306458 };
+
+static int bma180_get_data_reg(struct bma180_data *data, enum bma180_chan chan)
+{
+ int ret;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ switch (chan) {
+ case TEMP:
+ ret = i2c_smbus_read_byte_data(data->client, BMA180_TEMP);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to read temp register\n");
+ break;
+ default:
+ ret = i2c_smbus_read_word_data(data->client,
+ BMA180_ACC_X_LSB + chan * 2);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "failed to read accel_%c register\n",
+ 'x' + chan);
+ }
+
+ return ret;
+}
+
+static int bma180_set_bits(struct bma180_data *data, u8 reg, u8 mask, u8 val)
+{
+ int ret = i2c_smbus_read_byte_data(data->client, reg);
+ u8 reg_val = (ret & ~mask) | (val << (ffs(mask) - 1));
+
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(data->client, reg, reg_val);
+}
+
+static int bma180_reset_intr(struct bma180_data *data)
+{
+ int ret = bma180_set_bits(data, data->part_info->int_reset_reg,
+ data->part_info->int_reset_mask, 1);
+
+ if (ret)
+ dev_err(&data->client->dev, "failed to reset interrupt\n");
+
+ return ret;
+}
+
+static int bma180_set_new_data_intr_state(struct bma180_data *data, bool state)
+{
+ int ret = bma180_set_bits(data, data->part_info->int_enable_reg,
+ data->part_info->int_enable_mask, state);
+ if (ret)
+ goto err;
+ ret = bma180_reset_intr(data);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev,
+ "failed to set new data interrupt state %d\n", state);
+ return ret;
+}
+
+static int bma180_set_sleep_state(struct bma180_data *data, bool state)
+{
+ int ret = bma180_set_bits(data, data->part_info->sleep_reg,
+ data->part_info->sleep_mask, state);
+
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set sleep state %d\n", state);
+ return ret;
+ }
+ data->sleep_state = state;
+
+ return 0;
+}
+
+static int bma180_set_ee_writing_state(struct bma180_data *data, bool state)
+{
+ int ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_EE_W, state);
+
+ if (ret)
+ dev_err(&data->client->dev,
+ "failed to set ee writing state %d\n", state);
+
+ return ret;
+}
+
+static int bma180_set_bw(struct bma180_data *data, int val)
+{
+ int ret, i;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ for (i = 0; i < data->part_info->num_bw; ++i) {
+ if (data->part_info->bw_table[i] == val) {
+ ret = bma180_set_bits(data, data->part_info->bw_reg,
+ data->part_info->bw_mask, i);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set bandwidth\n");
+ return ret;
+ }
+ data->bw = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bma180_set_scale(struct bma180_data *data, int val)
+{
+ int ret, i;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ for (i = 0; i < data->part_info->num_scales; ++i)
+ if (data->part_info->scale_table[i] == val) {
+ ret = bma180_set_bits(data, data->part_info->scale_reg,
+ data->part_info->scale_mask, i);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set scale\n");
+ return ret;
+ }
+ data->scale = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bma180_set_pmode(struct bma180_data *data, bool mode)
+{
+ u8 reg_val = mode ? data->part_info->lowpower_val : 0;
+ int ret = bma180_set_bits(data, data->part_info->power_reg,
+ data->part_info->power_mask, reg_val);
+
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set power mode\n");
+ return ret;
+ }
+ data->pmode = mode;
+
+ return 0;
+}
+
+static int bma180_soft_reset(struct bma180_data *data)
+{
+ int ret = i2c_smbus_write_byte_data(data->client,
+ data->part_info->softreset_reg, BMA180_RESET_VAL);
+
+ if (ret)
+ dev_err(&data->client->dev, "failed to reset the chip\n");
+
+ return ret;
+}
+
+static int bma180_chip_init(struct bma180_data *data)
+{
+ /* Try to read chip_id register. It must return 0x03. */
+ int ret = i2c_smbus_read_byte_data(data->client, BMA180_CHIP_ID);
+
+ if (ret < 0)
+ return ret;
+ if (ret != BMA180_ID_REG_VAL)
+ return -ENODEV;
+
+ ret = bma180_soft_reset(data);
+ if (ret)
+ return ret;
+ /*
+ * No serial transaction should occur within minimum 10 us
+ * after soft_reset command
+ */
+ msleep(20);
+
+ ret = bma180_set_new_data_intr_state(data, false);
+ if (ret)
+ return ret;
+
+ return bma180_set_pmode(data, false);
+}
+
+static int bma180_chip_config(struct bma180_data *data)
+{
+ int ret = bma180_chip_init(data);
+
+ if (ret)
+ goto err;
+ ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
+ if (ret)
+ goto err;
+ ret = bma180_set_ee_writing_state(data, true);
+ if (ret)
+ goto err;
+ ret = bma180_set_bits(data, BMA180_OFFSET_LSB1, BMA180_SMP_SKIP, 1);
+ if (ret)
+ goto err;
+ ret = bma180_set_bw(data, 20); /* 20 Hz */
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, 2452); /* 2 G */
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to config the chip\n");
+ return ret;
+}
+
+static int bma250_chip_config(struct bma180_data *data)
+{
+ int ret = bma180_chip_init(data);
+
+ if (ret)
+ goto err;
+ ret = bma180_set_bw(data, 16); /* 16 Hz */
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, 38344); /* 2 G */
+ if (ret)
+ goto err;
+ ret = bma180_set_bits(data, BMA250_INT_MAP_REG,
+ BMA250_INT1_DATA_MASK, 1);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to config the chip\n");
+ return ret;
+}
+
+static void bma180_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_new_data_intr_state(data, false))
+ goto err;
+ if (bma180_set_ee_writing_state(data, false))
+ goto err;
+ if (bma180_set_sleep_state(data, true))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
+static void bma250_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_new_data_intr_state(data, false))
+ goto err;
+ if (bma180_set_sleep_state(data, true))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
+static ssize_t bma180_show_avail(char *buf, const int *vals, unsigned n,
+ bool micros)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (!vals[i])
+ continue;
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ micros ? "0.%06d " : "%d ", vals[i]);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t bma180_show_filter_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bma180_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bma180_show_avail(buf, data->part_info->bw_table,
+ data->part_info->num_bw, false);
+}
+
+static ssize_t bma180_show_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bma180_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bma180_show_avail(buf, data->part_info->scale_table,
+ data->part_info->num_scales, true);
+}
+
+static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+ S_IRUGO, bma180_show_filter_freq_avail, NULL, 0);
+
+static IIO_DEVICE_ATTR(in_accel_scale_available,
+ S_IRUGO, bma180_show_scale_avail, NULL, 0);
+
+static struct attribute *bma180_attributes[] = {
+ &iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.
+ dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bma180_attrs_group = {
+ .attrs = bma180_attributes,
+};
+
+static int bma180_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->mutex);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&data->mutex);
+ return -EBUSY;
+ }
+ ret = bma180_get_data_reg(data, chan->scan_index);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *val = data->bw;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = data->scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 500;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 48; /* 0 LSB @ 24 degree C */
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma180_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = bma180_set_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = bma180_set_bw(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bma180_info = {
+ .attrs = &bma180_attrs_group,
+ .read_raw = bma180_read_raw,
+ .write_raw = bma180_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const bma180_power_modes[] = { "low_noise", "low_power" };
+
+static int bma180_get_power_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return data->pmode;
+}
+
+static int bma180_set_power_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_pmode(data, mode);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_enum bma180_power_mode_enum = {
+ .items = bma180_power_modes,
+ .num_items = ARRAY_SIZE(bma180_power_modes),
+ .get = bma180_get_power_mode,
+ .set = bma180_set_power_mode,
+};
+
+static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
+ IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
+ IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
+ { },
+};
+
+#define BMA180_ACC_CHANNEL(_axis, _bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 16 - _bits, \
+ }, \
+ .ext_info = bma180_ext_info, \
+}
+
+#define BMA180_TEMP_CHANNEL { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 8, \
+ .storagebits = 16, \
+ }, \
+}
+
+static const struct iio_chan_spec bma180_channels[] = {
+ BMA180_ACC_CHANNEL(X, 14),
+ BMA180_ACC_CHANNEL(Y, 14),
+ BMA180_ACC_CHANNEL(Z, 14),
+ BMA180_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec bma250_channels[] = {
+ BMA180_ACC_CHANNEL(X, 10),
+ BMA180_ACC_CHANNEL(Y, 10),
+ BMA180_ACC_CHANNEL(Z, 10),
+ BMA180_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct bma180_part_info bma180_part_info[] = {
+ [BMA180] = {
+ bma180_channels, ARRAY_SIZE(bma180_channels),
+ bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
+ bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
+ BMA180_CTRL_REG0, BMA180_RESET_INT,
+ BMA180_CTRL_REG0, BMA180_SLEEP,
+ BMA180_BW_TCS, BMA180_BW,
+ BMA180_OFFSET_LSB1, BMA180_RANGE,
+ BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
+ BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
+ BMA180_RESET,
+ bma180_chip_config,
+ bma180_chip_disable,
+ },
+ [BMA250] = {
+ bma250_channels, ARRAY_SIZE(bma250_channels),
+ bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
+ bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
+ BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
+ BMA250_POWER_REG, BMA250_SUSPEND_MASK,
+ BMA250_BW_REG, BMA250_BW_MASK,
+ BMA250_RANGE_REG, BMA250_RANGE_MASK,
+ BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
+ BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
+ BMA250_RESET_REG,
+ bma250_chip_config,
+ bma250_chip_disable,
+ },
+};
+
+static irqreturn_t bma180_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma180_data *data = iio_priv(indio_dev);
+ int64_t time_ns = iio_get_time_ns();
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = bma180_get_data_reg(data, bit);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ goto err;
+ }
+ ((s16 *)data->buff)[i++] = ret;
+ }
+
+ mutex_unlock(&data->mutex);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bma180_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return bma180_set_new_data_intr_state(data, state);
+}
+
+static int bma180_trig_try_reen(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return bma180_reset_intr(data);
+}
+
+static const struct iio_trigger_ops bma180_trigger_ops = {
+ .set_trigger_state = bma180_data_rdy_trigger_set_state,
+ .try_reenable = bma180_trig_try_reen,
+ .owner = THIS_MODULE,
+};
+
+static int bma180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bma180_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->part_info = &bma180_part_info[id->driver_data];
+
+ ret = data->part_info->chip_config(data);
+ if (ret < 0)
+ goto err_chip_disable;
+
+ mutex_init(&data->mutex);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = data->part_info->channels;
+ indio_dev->num_channels = data->part_info->num_channels;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bma180_info;
+
+ if (client->irq > 0) {
+ data->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (!data->trig) {
+ ret = -ENOMEM;
+ goto err_chip_disable;
+ }
+
+ ret = devm_request_irq(&client->dev, client->irq,
+ iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING,
+ "bma180_event", data->trig);
+ if (ret) {
+ dev_err(&client->dev, "unable to request IRQ\n");
+ goto err_trigger_free;
+ }
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &bma180_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+ indio_dev->trig = iio_trigger_get(data->trig);
+
+ ret = iio_trigger_register(data->trig);
+ if (ret)
+ goto err_trigger_free;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ bma180_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ goto err_trigger_unregister;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->trig)
+ iio_trigger_unregister(data->trig);
+err_trigger_free:
+ iio_trigger_free(data->trig);
+err_chip_disable:
+ data->part_info->chip_disable(data);
+
+ return ret;
+}
+
+static int bma180_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (data->trig) {
+ iio_trigger_unregister(data->trig);
+ iio_trigger_free(data->trig);
+ }
+
+ mutex_lock(&data->mutex);
+ data->part_info->chip_disable(data);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bma180_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_sleep_state(data, true);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bma180_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_sleep_state(data, false);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
+#define BMA180_PM_OPS (&bma180_pm_ops)
+#else
+#define BMA180_PM_OPS NULL
+#endif
+
+static struct i2c_device_id bma180_ids[] = {
+ { "bma180", BMA180 },
+ { "bma250", BMA250 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, bma180_ids);
+
+static struct i2c_driver bma180_driver = {
+ .driver = {
+ .name = "bma180",
+ .owner = THIS_MODULE,
+ .pm = BMA180_PM_OPS,
+ },
+ .probe = bma180_probe,
+ .remove = bma180_remove,
+ .id_table = bma180_ids,
+};
+
+module_i2c_driver(bma180_driver);
+
+MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("Bosch BMA180/BMA250 triaxial acceleration sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
new file mode 100644
index 00000000000000..066d0c04072c69
--- /dev/null
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -0,0 +1,1456 @@
+/*
+ * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
+ * - BMC150
+ * - BMI055
+ * - BMA255
+ * - BMA250E
+ * - BMA222E
+ * - BMA280
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMC150_ACCEL_DRV_NAME "bmc150_accel"
+#define BMC150_ACCEL_IRQ_NAME "bmc150_accel_event"
+#define BMC150_ACCEL_GPIO_NAME "bmc150_accel_int"
+
+#define BMC150_ACCEL_REG_CHIP_ID 0x00
+
+#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
+#define BMC150_ACCEL_ANY_MOTION_MASK 0x07
+#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
+#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
+#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
+
+#define BMC150_ACCEL_REG_PMU_LPW 0x11
+#define BMC150_ACCEL_PMU_MODE_MASK 0xE0
+#define BMC150_ACCEL_PMU_MODE_SHIFT 5
+#define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17
+#define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1
+
+#define BMC150_ACCEL_REG_PMU_RANGE 0x0F
+
+#define BMC150_ACCEL_DEF_RANGE_2G 0x03
+#define BMC150_ACCEL_DEF_RANGE_4G 0x05
+#define BMC150_ACCEL_DEF_RANGE_8G 0x08
+#define BMC150_ACCEL_DEF_RANGE_16G 0x0C
+
+/* Default BW: 125Hz */
+#define BMC150_ACCEL_REG_PMU_BW 0x10
+#define BMC150_ACCEL_DEF_BW 125
+
+#define BMC150_ACCEL_REG_INT_MAP_0 0x19
+#define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2)
+
+#define BMC150_ACCEL_REG_INT_MAP_1 0x1A
+#define BMC150_ACCEL_INT_MAP_1_BIT_DATA BIT(0)
+
+#define BMC150_ACCEL_REG_INT_RST_LATCH 0x21
+#define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80
+#define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F
+#define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00
+
+#define BMC150_ACCEL_REG_INT_EN_0 0x16
+#define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0)
+#define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1)
+#define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2)
+
+#define BMC150_ACCEL_REG_INT_EN_1 0x17
+#define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4)
+
+#define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20
+#define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0)
+
+#define BMC150_ACCEL_REG_INT_5 0x27
+#define BMC150_ACCEL_SLOPE_DUR_MASK 0x03
+
+#define BMC150_ACCEL_REG_INT_6 0x28
+#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
+
+/* Slope duration in terms of number of samples */
+#define BMC150_ACCEL_DEF_SLOPE_DURATION 1
+/* in terms of multiples of g's/LSB, based on range */
+#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
+
+#define BMC150_ACCEL_REG_XOUT_L 0x02
+
+#define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100
+
+/* Sleep Duration values */
+#define BMC150_ACCEL_SLEEP_500_MICRO 0x05
+#define BMC150_ACCEL_SLEEP_1_MS 0x06
+#define BMC150_ACCEL_SLEEP_2_MS 0x07
+#define BMC150_ACCEL_SLEEP_4_MS 0x08
+#define BMC150_ACCEL_SLEEP_6_MS 0x09
+#define BMC150_ACCEL_SLEEP_10_MS 0x0A
+#define BMC150_ACCEL_SLEEP_25_MS 0x0B
+#define BMC150_ACCEL_SLEEP_50_MS 0x0C
+#define BMC150_ACCEL_SLEEP_100_MS 0x0D
+#define BMC150_ACCEL_SLEEP_500_MS 0x0E
+#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
+
+#define BMC150_ACCEL_REG_TEMP 0x08
+#define BMC150_ACCEL_TEMP_CENTER_VAL 24
+
+#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
+#define BMC150_AUTO_SUSPEND_DELAY_MS 2000
+
+enum bmc150_accel_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+enum bmc150_power_modes {
+ BMC150_ACCEL_SLEEP_MODE_NORMAL,
+ BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND,
+ BMC150_ACCEL_SLEEP_MODE_LPM,
+ BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04,
+};
+
+struct bmc150_scale_info {
+ int scale;
+ u8 reg_range;
+};
+
+struct bmc150_accel_chip_info {
+ u8 chip_id;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ const struct bmc150_scale_info scale_table[4];
+};
+
+struct bmc150_accel_data {
+ struct i2c_client *client;
+ struct iio_trigger *dready_trig;
+ struct iio_trigger *motion_trig;
+ struct mutex mutex;
+ s16 buffer[8];
+ u8 bw_bits;
+ u32 slope_dur;
+ u32 slope_thres;
+ u32 range;
+ int ev_enable_state;
+ bool dready_trigger_on;
+ bool motion_trigger_on;
+ int64_t timestamp;
+ const struct bmc150_accel_chip_info *chip_info;
+};
+
+static const struct {
+ int val;
+ int val2;
+ u8 bw_bits;
+} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
+ {15, 630000, 0x09},
+ {31, 250000, 0x0A},
+ {62, 500000, 0x0B},
+ {125, 0, 0x0C},
+ {250, 0, 0x0D},
+ {500, 0, 0x0E},
+ {1000, 0, 0x0F} };
+
+static const struct {
+ int bw_bits;
+ int msec;
+} bmc150_accel_sample_upd_time[] = { {0x08, 64},
+ {0x09, 32},
+ {0x0A, 16},
+ {0x0B, 8},
+ {0x0C, 4},
+ {0x0D, 2},
+ {0x0E, 1},
+ {0x0F, 1} };
+
+static const struct {
+ int sleep_dur;
+ u8 reg_value;
+} bmc150_accel_sleep_value_table[] = { {0, 0},
+ {500, BMC150_ACCEL_SLEEP_500_MICRO},
+ {1000, BMC150_ACCEL_SLEEP_1_MS},
+ {2000, BMC150_ACCEL_SLEEP_2_MS},
+ {4000, BMC150_ACCEL_SLEEP_4_MS},
+ {6000, BMC150_ACCEL_SLEEP_6_MS},
+ {10000, BMC150_ACCEL_SLEEP_10_MS},
+ {25000, BMC150_ACCEL_SLEEP_25_MS},
+ {50000, BMC150_ACCEL_SLEEP_50_MS},
+ {100000, BMC150_ACCEL_SLEEP_100_MS},
+ {500000, BMC150_ACCEL_SLEEP_500_MS},
+ {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
+
+
+static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
+ enum bmc150_power_modes mode,
+ int dur_us)
+{
+ int i;
+ int ret;
+ u8 lpw_bits;
+ int dur_val = -1;
+
+ if (dur_us > 0) {
+ for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table);
+ ++i) {
+ if (bmc150_accel_sleep_value_table[i].sleep_dur ==
+ dur_us)
+ dur_val =
+ bmc150_accel_sleep_value_table[i].reg_value;
+ }
+ } else
+ dur_val = 0;
+
+ if (dur_val < 0)
+ return -EINVAL;
+
+ lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
+ lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
+
+ dev_dbg(&data->client->dev, "Set Mode bits %x\n", lpw_bits);
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_pmu_lpw\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
+ int val2)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
+ if (bmc150_accel_samp_freq_table[i].val == val &&
+ bmc150_accel_samp_freq_table[i].val2 == val2) {
+ ret = i2c_smbus_write_byte_data(
+ data->client,
+ BMC150_ACCEL_REG_PMU_BW,
+ bmc150_accel_samp_freq_table[i].bw_bits);
+ if (ret < 0)
+ return ret;
+
+ data->bw_bits =
+ bmc150_accel_samp_freq_table[i].bw_bits;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_CHIP_ID);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error: Reading chip id\n");
+ return ret;
+ }
+
+ dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
+ if (ret != data->chip_info->chip_id) {
+ dev_err(&data->client->dev, "Invalid chip %x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set Bandwidth */
+ ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set Default Range */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_PMU_RANGE,
+ BMC150_ACCEL_DEF_RANGE_4G);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_pmu_range\n");
+ return ret;
+ }
+
+ data->range = BMC150_ACCEL_DEF_RANGE_4G;
+
+ /* Set default slope duration */
+ ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_INT_5);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_5\n");
+ return ret;
+ }
+ data->slope_dur |= BMC150_ACCEL_DEF_SLOPE_DURATION;
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_5,
+ data->slope_dur);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_5\n");
+ return ret;
+ }
+ dev_dbg(&data->client->dev, "slope_dur %x\n", data->slope_dur);
+
+ /* Set default slope thresholds */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_6,
+ BMC150_ACCEL_DEF_SLOPE_THRESHOLD);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_6\n");
+ return ret;
+ }
+ data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
+ dev_dbg(&data->client->dev, "slope_thres %x\n", data->slope_thres);
+
+ /* Set default as latched interrupts */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_setup_any_motion_interrupt(
+ struct bmc150_accel_data *data,
+ bool status)
+{
+ int ret;
+
+ /* Enable/Disable INT1 mapping */
+ ret = i2c_smbus_read_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_MAP_0);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_map_0\n");
+ return ret;
+ }
+ if (status)
+ ret |= BMC150_ACCEL_INT_MAP_0_BIT_SLOPE;
+ else
+ ret &= ~BMC150_ACCEL_INT_MAP_0_BIT_SLOPE;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_MAP_0,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_map_0\n");
+ return ret;
+ }
+
+ if (status) {
+ /* Set slope duration (no of samples) */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_5,
+ data->slope_dur);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error write reg_int_5\n");
+ return ret;
+ }
+
+ /* Set slope thresholds */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_6,
+ data->slope_thres);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error write reg_int_6\n");
+ return ret;
+ }
+
+ /*
+ * New data interrupt is always non-latched,
+ * which will have higher priority, so no need
+ * to set latched mode, we will be flooded anyway with INTR
+ */
+ if (!data->dready_trigger_on) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_EN_0,
+ BMC150_ACCEL_INT_EN_BIT_SLP_X |
+ BMC150_ACCEL_INT_EN_BIT_SLP_Y |
+ BMC150_ACCEL_INT_EN_BIT_SLP_Z);
+ } else
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_EN_0,
+ 0);
+
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_en_0\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_setup_new_data_interrupt(struct bmc150_accel_data *data,
+ bool status)
+{
+ int ret;
+
+ /* Enable/Disable INT1 mapping */
+ ret = i2c_smbus_read_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_MAP_1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_map_1\n");
+ return ret;
+ }
+ if (status)
+ ret |= BMC150_ACCEL_INT_MAP_1_BIT_DATA;
+ else
+ ret &= ~BMC150_ACCEL_INT_MAP_1_BIT_DATA;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_MAP_1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_map_1\n");
+ return ret;
+ }
+
+ if (status) {
+ /*
+ * Set non latched mode interrupt and clear any latched
+ * interrupt
+ */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_NON_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_EN_1,
+ BMC150_ACCEL_INT_EN_BIT_DATA_EN);
+
+ } else {
+ /* Restore default interrupt mode */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_EN_1,
+ 0);
+ }
+
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
+ int *val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
+ if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) {
+ *val = bmc150_accel_samp_freq_table[i].val;
+ *val2 = bmc150_accel_samp_freq_table[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM
+static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) {
+ if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits)
+ return bmc150_accel_sample_upd_time[i].msec;
+ }
+
+ return BMC150_ACCEL_MAX_STARTUP_TIME_MS;
+}
+
+static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
+{
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(&data->client->dev);
+ else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed: bmc150_accel_set_power_state for %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&data->client->dev);
+
+ return ret;
+ }
+
+ return 0;
+}
+#else
+static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
+{
+ return 0;
+}
+#endif
+
+static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
+ if (data->chip_info->scale_table[i].scale == val) {
+ ret = i2c_smbus_write_byte_data(
+ data->client,
+ BMC150_ACCEL_REG_PMU_RANGE,
+ data->chip_info->scale_table[i].reg_range);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing pmu_range\n");
+ return ret;
+ }
+
+ data->range = data->chip_info->scale_table[i].reg_range;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_TEMP);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_temp\n");
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ *val = sign_extend32(ret, 7);
+
+ mutex_unlock(&data->mutex);
+
+ return IIO_VAL_INT;
+}
+
+static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int ret;
+ int axis = chan->scan_index;
+
+ mutex_lock(&data->mutex);
+ ret = bmc150_accel_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client,
+ BMC150_ACCEL_AXIS_TO_REG(axis));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading axis %d\n", axis);
+ bmc150_accel_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ ret = bmc150_accel_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bmc150_accel_get_temp(data, val);
+ case IIO_ACCEL:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ else
+ return bmc150_accel_get_axis(data, chan, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP) {
+ *val = BMC150_ACCEL_TEMP_CENTER_VAL;
+ return IIO_VAL_INT;
+ } else
+ return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val2 = 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ {
+ int i;
+ const struct bmc150_scale_info *si;
+ int st_size = ARRAY_SIZE(data->chip_info->scale_table);
+
+ for (i = 0; i < st_size; ++i) {
+ si = &data->chip_info->scale_table[i];
+ if (si->reg_range == data->range) {
+ *val2 = si->scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&data->mutex);
+ ret = bmc150_accel_get_bw(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmc150_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&data->mutex);
+ ret = bmc150_accel_set_bw(data, val, val2);
+ mutex_unlock(&data->mutex);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bmc150_accel_set_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int bmc150_accel_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = data->slope_thres;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ *val = data->slope_dur & BMC150_ACCEL_SLOPE_DUR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int bmc150_accel_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ if (data->ev_enable_state)
+ return -EBUSY;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ data->slope_thres = val;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ data->slope_dur &= ~BMC150_ACCEL_SLOPE_DUR_MASK;
+ data->slope_dur |= val & BMC150_ACCEL_SLOPE_DUR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ return data->ev_enable_state;
+}
+
+static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (state && data->ev_enable_state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->motion_trigger_on) {
+ data->ev_enable_state = 0;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /*
+ * We will expect the enable and disable to do operation in
+ * in reverse order. This will happen here anyway as our
+ * resume operation uses sync mode runtime pm calls, the
+ * suspend operation will be delayed by autosuspend delay
+ * So the disable operation will still happen in reverse of
+ * enable operation. When runtime pm is disabled the mode
+ * is always on so sequence doesn't matter
+ */
+
+ ret = bmc150_accel_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = bmc150_accel_setup_any_motion_interrupt(data, state);
+ if (ret < 0) {
+ bmc150_accel_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ data->ev_enable_state = state;
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ if (data->dready_trig != trig && data->motion_trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
+
+static struct attribute *bmc150_accel_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmc150_accel_attrs_group = {
+ .attrs = bmc150_accel_attributes,
+};
+
+static const struct iio_event_spec bmc150_accel_event = {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD)
+};
+
+#define BMC150_ACCEL_CHANNEL(_axis, bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 16 - (bits), \
+ }, \
+ .event_spec = &bmc150_accel_event, \
+ .num_event_specs = 1 \
+}
+
+#define BMC150_ACCEL_CHANNELS(bits) { \
+ { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = -1, \
+ }, \
+ BMC150_ACCEL_CHANNEL(X, bits), \
+ BMC150_ACCEL_CHANNEL(Y, bits), \
+ BMC150_ACCEL_CHANNEL(Z, bits), \
+ IIO_CHAN_SOFT_TIMESTAMP(3), \
+}
+
+static const struct iio_chan_spec bma222e_accel_channels[] =
+ BMC150_ACCEL_CHANNELS(8);
+static const struct iio_chan_spec bma250e_accel_channels[] =
+ BMC150_ACCEL_CHANNELS(10);
+static const struct iio_chan_spec bmc150_accel_channels[] =
+ BMC150_ACCEL_CHANNELS(12);
+static const struct iio_chan_spec bma280_accel_channels[] =
+ BMC150_ACCEL_CHANNELS(14);
+
+enum {
+ bmc150,
+ bmi055,
+ bma255,
+ bma250e,
+ bma222e,
+ bma280,
+};
+
+static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
+ [bmc150] = {
+ .chip_id = 0xFA,
+ .channels = bmc150_accel_channels,
+ .num_channels = ARRAY_SIZE(bmc150_accel_channels),
+ .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
+ {19122, BMC150_ACCEL_DEF_RANGE_4G},
+ {38344, BMC150_ACCEL_DEF_RANGE_8G},
+ {76590, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ [bmi055] = {
+ .chip_id = 0xFA,
+ .channels = bmc150_accel_channels,
+ .num_channels = ARRAY_SIZE(bmc150_accel_channels),
+ .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
+ {19122, BMC150_ACCEL_DEF_RANGE_4G},
+ {38344, BMC150_ACCEL_DEF_RANGE_8G},
+ {76590, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ [bma255] = {
+ .chip_id = 0xFA,
+ .channels = bmc150_accel_channels,
+ .num_channels = ARRAY_SIZE(bmc150_accel_channels),
+ .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
+ {19122, BMC150_ACCEL_DEF_RANGE_4G},
+ {38344, BMC150_ACCEL_DEF_RANGE_8G},
+ {76590, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ [bma250e] = {
+ .chip_id = 0xF9,
+ .channels = bma250e_accel_channels,
+ .num_channels = ARRAY_SIZE(bma250e_accel_channels),
+ .scale_table = { {38344, BMC150_ACCEL_DEF_RANGE_2G},
+ {76590, BMC150_ACCEL_DEF_RANGE_4G},
+ {153277, BMC150_ACCEL_DEF_RANGE_8G},
+ {306457, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ [bma222e] = {
+ .chip_id = 0xF8,
+ .channels = bma222e_accel_channels,
+ .num_channels = ARRAY_SIZE(bma222e_accel_channels),
+ .scale_table = { {153277, BMC150_ACCEL_DEF_RANGE_2G},
+ {306457, BMC150_ACCEL_DEF_RANGE_4G},
+ {612915, BMC150_ACCEL_DEF_RANGE_8G},
+ {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+ [bma280] = {
+ .chip_id = 0xFB,
+ .channels = bma280_accel_channels,
+ .num_channels = ARRAY_SIZE(bma280_accel_channels),
+ .scale_table = { {2392, BMC150_ACCEL_DEF_RANGE_2G},
+ {4785, BMC150_ACCEL_DEF_RANGE_4G},
+ {9581, BMC150_ACCEL_DEF_RANGE_8G},
+ {19152, BMC150_ACCEL_DEF_RANGE_16G} },
+ },
+};
+
+static const struct iio_info bmc150_accel_info = {
+ .attrs = &bmc150_accel_attrs_group,
+ .read_raw = bmc150_accel_read_raw,
+ .write_raw = bmc150_accel_write_raw,
+ .read_event_value = bmc150_accel_read_event,
+ .write_event_value = bmc150_accel_write_event,
+ .write_event_config = bmc150_accel_write_event_config,
+ .read_event_config = bmc150_accel_read_event_config,
+ .validate_trigger = bmc150_accel_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = i2c_smbus_read_word_data(data->client,
+ BMC150_ACCEL_AXIS_TO_REG(bit));
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ goto err_read;
+ }
+ data->buffer[i++] = ret;
+ }
+ mutex_unlock(&data->mutex);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ data->timestamp);
+err_read:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* new data interrupts don't need ack */
+ if (data->dready_trigger_on)
+ return 0;
+
+ mutex_lock(&data->mutex);
+ /* clear any latched interrupt */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+ mutex_unlock(&data->mutex);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_int_rst_latch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->ev_enable_state && data->motion_trigger_on) {
+ data->motion_trigger_on = false;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /*
+ * Refer to comment in bmc150_accel_write_event_config for
+ * enable/disable operation order
+ */
+ ret = bmc150_accel_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ ret = bmc150_accel_setup_any_motion_interrupt(data, state);
+ else
+ ret = bmc150_accel_setup_new_data_interrupt(data, state);
+ if (ret < 0) {
+ bmc150_accel_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ data->motion_trigger_on = state;
+ else
+ data->dready_trigger_on = state;
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
+ .set_trigger_state = bmc150_accel_data_rdy_trigger_set_state,
+ .try_reenable = bmc150_accel_trig_try_reen,
+ .owner = THIS_MODULE,
+};
+
+static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+ int dir;
+
+ ret = i2c_smbus_read_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_STATUS_2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_status_2\n");
+ goto ack_intr_status;
+ }
+
+ if (ret & BMC150_ACCEL_ANY_MOTION_BIT_SIGN)
+ dir = IIO_EV_DIR_FALLING;
+ else
+ dir = IIO_EV_DIR_RISING;
+
+ if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+ if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+ if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+ack_intr_status:
+ if (!data->dready_trigger_on)
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMC150_ACCEL_REG_INT_RST_LATCH,
+ BMC150_ACCEL_INT_MODE_LATCH_INT |
+ BMC150_ACCEL_INT_MODE_LATCH_RESET);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bmc150_accel_data_rdy_trig_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ data->timestamp = iio_get_time_ns();
+
+ if (data->dready_trigger_on)
+ iio_trigger_poll(data->dready_trig);
+ else if (data->motion_trigger_on)
+ iio_trigger_poll(data->motion_trig);
+
+ if (data->ev_enable_state)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
+}
+
+static const char *bmc150_accel_match_acpi_device(struct device *dev, int *data)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+
+ if (!id)
+ return NULL;
+
+ *data = (int) id->driver_data;
+
+ return dev_name(dev);
+}
+
+static int bmc150_accel_gpio_probe(struct i2c_client *client,
+ struct bmc150_accel_data *data)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, BMC150_ACCEL_GPIO_NAME, 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Failed: gpio get index\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
+static int bmc150_accel_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bmc150_accel_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+ const char *name = NULL;
+ int chip_id = 0;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ if (id) {
+ name = id->name;
+ chip_id = id->driver_data;
+ }
+
+ if (ACPI_HANDLE(&client->dev))
+ name = bmc150_accel_match_acpi_device(&client->dev, &chip_id);
+
+ data->chip_info = &bmc150_accel_chip_info_tbl[chip_id];
+
+ ret = bmc150_accel_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = data->chip_info->channels;
+ indio_dev->num_channels = data->chip_info->num_channels;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bmc150_accel_info;
+
+ if (client->irq < 0)
+ client->irq = bmc150_accel_gpio_probe(client, data);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(
+ &client->dev, client->irq,
+ bmc150_accel_data_rdy_trig_poll,
+ bmc150_accel_event_handler,
+ IRQF_TRIGGER_RISING,
+ BMC150_ACCEL_IRQ_NAME,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig)
+ return -ENOMEM;
+
+ data->motion_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-any-motion-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->motion_trig)
+ return -ENOMEM;
+
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &bmc150_accel_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+ ret = iio_trigger_register(data->dready_trig);
+ if (ret)
+ return ret;
+
+ data->motion_trig->dev.parent = &client->dev;
+ data->motion_trig->ops = &bmc150_accel_trigger_ops;
+ iio_trigger_set_drvdata(data->motion_trig, indio_dev);
+ ret = iio_trigger_register(data->motion_trig);
+ if (ret) {
+ data->motion_trig = NULL;
+ goto err_trigger_unregister;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time,
+ bmc150_accel_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed: iio triggered buffer setup\n");
+ goto err_trigger_unregister;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ BMC150_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+err_iio_unregister:
+ iio_device_unregister(indio_dev);
+err_buffer_cleanup:
+ if (data->dready_trig)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+ if (data->motion_trig)
+ iio_trigger_unregister(data->motion_trig);
+
+ return ret;
+}
+
+static int bmc150_accel_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->dready_trig) {
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+
+ mutex_lock(&data->mutex);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bmc150_accel_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static int bmc150_accel_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ if (data->dready_trigger_on || data->motion_trigger_on ||
+ data->ev_enable_state)
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int bmc150_accel_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+
+ dev_dbg(&data->client->dev, __func__);
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
+ if (ret < 0)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int bmc150_accel_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+ int ret;
+ int sleep_val;
+
+ dev_dbg(&data->client->dev, __func__);
+
+ ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ if (ret < 0)
+ return ret;
+
+ sleep_val = bmc150_accel_get_startup_times(data);
+ if (sleep_val < 20)
+ usleep_range(sleep_val * 1000, 20000);
+ else
+ msleep_interruptible(sleep_val);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops bmc150_accel_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume)
+ SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend,
+ bmc150_accel_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id bmc150_accel_acpi_match[] = {
+ {"BSBA0150", bmc150},
+ {"BMC150A", bmc150},
+ {"BMI055A", bmi055},
+ {"BMA0255", bma255},
+ {"BMA250E", bma250e},
+ {"BMA222E", bma222e},
+ {"BMA0280", bma280},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
+
+static const struct i2c_device_id bmc150_accel_id[] = {
+ {"bmc150_accel", bmc150},
+ {"bmi055_accel", bmi055},
+ {"bma255", bma255},
+ {"bma250e", bma250e},
+ {"bma222e", bma222e},
+ {"bma280", bma280},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
+
+static struct i2c_driver bmc150_accel_driver = {
+ .driver = {
+ .name = BMC150_ACCEL_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
+ .pm = &bmc150_accel_pm_ops,
+ },
+ .probe = bmc150_accel_probe,
+ .remove = bmc150_accel_remove,
+ .id_table = bmc150_accel_id,
+};
+module_i2c_driver(bmc150_accel_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BMC150 accelerometer driver");
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
new file mode 100644
index 00000000000000..d5d95317003ac8
--- /dev/null
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -0,0 +1,430 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum accel_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ ACCEL_3D_CHANNEL_MAX,
+};
+
+struct accel_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
+ u32 accel_val[ACCEL_3D_CHANNEL_MAX];
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+static const u32 accel_3d_addresses[ACCEL_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ACCEL_X_AXIS,
+ HID_USAGE_SENSOR_ACCEL_Y_AXIS,
+ HID_USAGE_SENSOR_ACCEL_Z_AXIS
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec accel_3d_channels[] = {
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_X,
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Y,
+ }, {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Z,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void accel_3d_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int accel_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case 0:
+ poll_value = hid_sensor_read_poll_value(
+ &accel_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&accel_state->common_attributes, true);
+ msleep_interruptible(poll_value * 2);
+ report_id = accel_state->accel[chan->scan_index].report_id;
+ address = accel_3d_addresses[chan->scan_index];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ accel_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_ACCEL_3D, address,
+ report_id);
+ else {
+ *val = 0;
+ hid_sensor_power_state(&accel_state->common_attributes,
+ false);
+ return -EINVAL;
+ }
+ hid_sensor_power_state(&accel_state->common_attributes, false);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = accel_state->scale_pre_decml;
+ *val2 = accel_state->scale_post_decml;
+ ret_type = accel_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = accel_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &accel_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &accel_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int accel_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &accel_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &accel_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info accel_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &accel_3d_read_raw,
+ .write_raw = &accel_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int accel_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "accel_3d_proc_event\n");
+ if (atomic_read(&accel_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ accel_state->accel_val,
+ sizeof(accel_state->accel_val));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
+ int offset;
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ACCEL_X_AXIS:
+ case HID_USAGE_SENSOR_ACCEL_Y_AXIS:
+ case HID_USAGE_SENSOR_ACCEL_Z_AXIS:
+ offset = usage_id - HID_USAGE_SENSOR_ACCEL_X_AXIS;
+ accel_state->accel_val[CHANNEL_SCAN_INDEX_X + offset] =
+ *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int accel_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct accel_3d_state *st)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i <= CHANNEL_SCAN_INDEX_Z; ++i) {
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ACCEL_X_AXIS + i,
+ &st->accel[CHANNEL_SCAN_INDEX_X + i]);
+ if (ret < 0)
+ break;
+ accel_3d_adjust_channel_bit_mask(channels,
+ CHANNEL_SCAN_INDEX_X + i,
+ st->accel[CHANNEL_SCAN_INDEX_X + i].size);
+ }
+ dev_dbg(&pdev->dev, "accel_3d %x:%x, %x:%x, %x:%x\n",
+ st->accel[0].index,
+ st->accel[0].report_id,
+ st->accel[1].index, st->accel[1].report_id,
+ st->accel[2].index, st->accel[2].report_id);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_ACCEL_3D,
+ &st->accel[CHANNEL_SCAN_INDEX_X],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ACCELERATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_accel_3d_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "accel_3d";
+ struct iio_dev *indio_dev;
+ struct accel_3d_state *accel_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct accel_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ accel_state = iio_priv(indio_dev);
+ accel_state->common_attributes.hsdev = hsdev;
+ accel_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_ACCEL_3D,
+ &accel_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = accel_3d_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_ACCEL_3D, accel_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &accel_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&accel_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &accel_state->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ accel_state->callbacks.send_event = accel_3d_proc_event;
+ accel_state->callbacks.capture_sample = accel_3d_capture_sample;
+ accel_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D,
+ &accel_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&accel_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_accel_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct accel_3d_state *accel_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ACCEL_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&accel_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_accel_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200073",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_accel_3d_ids);
+
+static struct platform_driver hid_accel_3d_platform_driver = {
+ .id_table = hid_accel_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_accel_3d_probe,
+ .remove = hid_accel_3d_remove,
+};
+module_platform_driver(hid_accel_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Accel 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
new file mode 100644
index 00000000000000..da2fe93739a2e4
--- /dev/null
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -0,0 +1,1429 @@
+/*
+ * KXCJK-1013 3-axis accelerometer driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/accel/kxcjk_1013.h>
+
+#define KXCJK1013_DRV_NAME "kxcjk1013"
+#define KXCJK1013_IRQ_NAME "kxcjk1013_event"
+
+#define KXCJK1013_REG_XOUT_L 0x06
+/*
+ * From low byte X axis register, all the other addresses of Y and Z can be
+ * obtained by just applying axis offset. The following axis defines are just
+ * provide clarity, but not used.
+ */
+#define KXCJK1013_REG_XOUT_H 0x07
+#define KXCJK1013_REG_YOUT_L 0x08
+#define KXCJK1013_REG_YOUT_H 0x09
+#define KXCJK1013_REG_ZOUT_L 0x0A
+#define KXCJK1013_REG_ZOUT_H 0x0B
+
+#define KXCJK1013_REG_DCST_RESP 0x0C
+#define KXCJK1013_REG_WHO_AM_I 0x0F
+#define KXCJK1013_REG_INT_SRC1 0x16
+#define KXCJK1013_REG_INT_SRC2 0x17
+#define KXCJK1013_REG_STATUS_REG 0x18
+#define KXCJK1013_REG_INT_REL 0x1A
+#define KXCJK1013_REG_CTRL1 0x1B
+#define KXCJK1013_REG_CTRL2 0x1D
+#define KXCJK1013_REG_INT_CTRL1 0x1E
+#define KXCJK1013_REG_INT_CTRL2 0x1F
+#define KXCJK1013_REG_DATA_CTRL 0x21
+#define KXCJK1013_REG_WAKE_TIMER 0x29
+#define KXCJK1013_REG_SELF_TEST 0x3A
+#define KXCJK1013_REG_WAKE_THRES 0x6A
+
+#define KXCJK1013_REG_CTRL1_BIT_PC1 BIT(7)
+#define KXCJK1013_REG_CTRL1_BIT_RES BIT(6)
+#define KXCJK1013_REG_CTRL1_BIT_DRDY BIT(5)
+#define KXCJK1013_REG_CTRL1_BIT_GSEL1 BIT(4)
+#define KXCJK1013_REG_CTRL1_BIT_GSEL0 BIT(3)
+#define KXCJK1013_REG_CTRL1_BIT_WUFE BIT(1)
+#define KXCJK1013_REG_INT_REG1_BIT_IEA BIT(4)
+#define KXCJK1013_REG_INT_REG1_BIT_IEN BIT(5)
+
+#define KXCJK1013_DATA_MASK_12_BIT 0x0FFF
+#define KXCJK1013_MAX_STARTUP_TIME_US 100000
+
+#define KXCJK1013_SLEEP_DELAY_MS 2000
+
+#define KXCJK1013_REG_INT_SRC2_BIT_ZP BIT(0)
+#define KXCJK1013_REG_INT_SRC2_BIT_ZN BIT(1)
+#define KXCJK1013_REG_INT_SRC2_BIT_YP BIT(2)
+#define KXCJK1013_REG_INT_SRC2_BIT_YN BIT(3)
+#define KXCJK1013_REG_INT_SRC2_BIT_XP BIT(4)
+#define KXCJK1013_REG_INT_SRC2_BIT_XN BIT(5)
+
+#define KXCJK1013_DEFAULT_WAKE_THRES 1
+
+enum kx_chipset {
+ KXCJK1013,
+ KXCJ91008,
+ KXTJ21009,
+ KX_MAX_CHIPS /* this must be last */
+};
+
+struct kxcjk1013_data {
+ struct i2c_client *client;
+ struct iio_trigger *dready_trig;
+ struct iio_trigger *motion_trig;
+ struct mutex mutex;
+ s16 buffer[8];
+ u8 odr_bits;
+ u8 range;
+ int wake_thres;
+ int wake_dur;
+ bool active_high_intr;
+ bool dready_trigger_on;
+ int ev_enable_state;
+ bool motion_trigger_on;
+ int64_t timestamp;
+ enum kx_chipset chipset;
+};
+
+enum kxcjk1013_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+enum kxcjk1013_mode {
+ STANDBY,
+ OPERATION,
+};
+
+enum kxcjk1013_range {
+ KXCJK1013_RANGE_2G,
+ KXCJK1013_RANGE_4G,
+ KXCJK1013_RANGE_8G,
+};
+
+static const struct {
+ int val;
+ int val2;
+ int odr_bits;
+} samp_freq_table[] = { {0, 781000, 0x08}, {1, 563000, 0x09},
+ {3, 125000, 0x0A}, {6, 250000, 0x0B}, {12, 500000, 0},
+ {25, 0, 0x01}, {50, 0, 0x02}, {100, 0, 0x03},
+ {200, 0, 0x04}, {400, 0, 0x05}, {800, 0, 0x06},
+ {1600, 0, 0x07} };
+
+/* Refer to section 4 of the specification */
+static const struct {
+ int odr_bits;
+ int usec;
+} odr_start_up_times[KX_MAX_CHIPS][12] = {
+ /* KXCJK-1013 */
+ {
+ {0x08, 100000},
+ {0x09, 100000},
+ {0x0A, 100000},
+ {0x0B, 100000},
+ {0, 80000},
+ {0x01, 41000},
+ {0x02, 21000},
+ {0x03, 11000},
+ {0x04, 6400},
+ {0x05, 3900},
+ {0x06, 2700},
+ {0x07, 2100},
+ },
+ /* KXCJ9-1008 */
+ {
+ {0x08, 100000},
+ {0x09, 100000},
+ {0x0A, 100000},
+ {0x0B, 100000},
+ {0, 80000},
+ {0x01, 41000},
+ {0x02, 21000},
+ {0x03, 11000},
+ {0x04, 6400},
+ {0x05, 3900},
+ {0x06, 2700},
+ {0x07, 2100},
+ },
+ /* KXCTJ2-1009 */
+ {
+ {0x08, 1240000},
+ {0x09, 621000},
+ {0x0A, 309000},
+ {0x0B, 151000},
+ {0, 80000},
+ {0x01, 41000},
+ {0x02, 21000},
+ {0x03, 11000},
+ {0x04, 6000},
+ {0x05, 4000},
+ {0x06, 3000},
+ {0x07, 2000},
+ },
+};
+
+static const struct {
+ u16 scale;
+ u8 gsel_0;
+ u8 gsel_1;
+} KXCJK1013_scale_table[] = { {9582, 0, 0},
+ {19163, 1, 0},
+ {38326, 0, 1} };
+
+static const struct {
+ int val;
+ int val2;
+ int odr_bits;
+} wake_odr_data_rate_table[] = { {0, 781000, 0x00},
+ {1, 563000, 0x01},
+ {3, 125000, 0x02},
+ {6, 250000, 0x03},
+ {12, 500000, 0x04},
+ {25, 0, 0x05},
+ {50, 0, 0x06},
+ {100, 0, 0x06},
+ {200, 0, 0x06},
+ {400, 0, 0x06},
+ {800, 0, 0x06},
+ {1600, 0, 0x06} };
+
+static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
+ enum kxcjk1013_mode mode)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (mode == STANDBY)
+ ret &= ~KXCJK1013_REG_CTRL1_BIT_PC1;
+ else
+ ret |= KXCJK1013_REG_CTRL1_BIT_PC1;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_get_mode(struct kxcjk1013_data *data,
+ enum kxcjk1013_mode *mode)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (ret & KXCJK1013_REG_CTRL1_BIT_PC1)
+ *mode = OPERATION;
+ else
+ *mode = STANDBY;
+
+ return 0;
+}
+
+static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
+ KXCJK1013_REG_CTRL1_BIT_GSEL1);
+ ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
+ ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_CTRL1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ data->range = range_index;
+
+ return 0;
+}
+
+static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading who_am_i\n");
+ return ret;
+ }
+
+ dev_dbg(&data->client->dev, "KXCJK1013 Chip Id %x\n", ret);
+
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ /* Set 12 bit mode */
+ ret |= KXCJK1013_REG_CTRL1_BIT_RES;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl\n");
+ return ret;
+ }
+
+ /* Setting range to 4G */
+ ret = kxcjk1013_set_range(data, KXCJK1013_RANGE_4G);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_DATA_CTRL);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_data_ctrl\n");
+ return ret;
+ }
+
+ data->odr_bits = ret;
+
+ /* Set up INT polarity */
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
+ return ret;
+ }
+
+ if (data->active_high_intr)
+ ret |= KXCJK1013_REG_INT_REG1_BIT_IEA;
+ else
+ ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEA;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
+ return ret;
+ }
+
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret < 0)
+ return ret;
+
+ data->wake_thres = KXCJK1013_DEFAULT_WAKE_THRES;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int kxcjk1013_get_startup_times(struct kxcjk1013_data *data)
+{
+ int i;
+ int idx = data->chipset;
+
+ for (i = 0; i < ARRAY_SIZE(odr_start_up_times[idx]); ++i) {
+ if (odr_start_up_times[idx][i].odr_bits == data->odr_bits)
+ return odr_start_up_times[idx][i].usec;
+ }
+
+ return KXCJK1013_MAX_STARTUP_TIME_US;
+}
+#endif
+
+static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
+{
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(&data->client->dev);
+ else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed: kxcjk1013_set_power_state for %d\n", on);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_WAKE_TIMER,
+ data->wake_dur);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_wake_timer\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_WAKE_THRES,
+ data->wake_thres);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_wake_thres\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
+ bool status)
+{
+ int ret;
+ enum kxcjk1013_mode store_mode;
+
+ ret = kxcjk1013_get_mode(data, &store_mode);
+ if (ret < 0)
+ return ret;
+
+ /* This is requirement by spec to change state to STANDBY */
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = kxcjk1013_chip_update_thresholds(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ else
+ ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KXCJK1013_REG_CTRL1_BIT_WUFE;
+ else
+ ret &= ~KXCJK1013_REG_CTRL1_BIT_WUFE;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ if (store_mode == OPERATION) {
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
+ bool status)
+{
+ int ret;
+ enum kxcjk1013_mode store_mode;
+
+ ret = kxcjk1013_get_mode(data, &store_mode);
+ if (ret < 0)
+ return ret;
+
+ /* This is requirement by spec to change state to STANDBY */
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ else
+ ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= KXCJK1013_REG_CTRL1_BIT_DRDY;
+ else
+ ret &= ~KXCJK1013_REG_CTRL1_BIT_DRDY;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ KXCJK1013_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
+ return ret;
+ }
+
+ if (store_mode == OPERATION) {
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_convert_freq_to_bit(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
+ if (samp_freq_table[i].val == val &&
+ samp_freq_table[i].val2 == val2) {
+ return samp_freq_table[i].odr_bits;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int kxcjk1013_convert_wake_odr_to_bit(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wake_odr_data_rate_table); ++i) {
+ if (wake_odr_data_rate_table[i].val == val &&
+ wake_odr_data_rate_table[i].val2 == val2) {
+ return wake_odr_data_rate_table[i].odr_bits;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
+{
+ int ret;
+ int odr_bits;
+ enum kxcjk1013_mode store_mode;
+
+ ret = kxcjk1013_get_mode(data, &store_mode);
+ if (ret < 0)
+ return ret;
+
+ odr_bits = kxcjk1013_convert_freq_to_bit(val, val2);
+ if (odr_bits < 0)
+ return odr_bits;
+
+ /* To change ODR, the chip must be set to STANDBY as per spec */
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_DATA_CTRL,
+ odr_bits);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing data_ctrl\n");
+ return ret;
+ }
+
+ data->odr_bits = odr_bits;
+
+ odr_bits = kxcjk1013_convert_wake_odr_to_bit(val, val2);
+ if (odr_bits < 0)
+ return odr_bits;
+
+ ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL2,
+ odr_bits);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_ctrl2\n");
+ return ret;
+ }
+
+ if (store_mode == OPERATION) {
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_get_odr(struct kxcjk1013_data *data, int *val, int *val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
+ if (samp_freq_table[i].odr_bits == data->odr_bits) {
+ *val = samp_freq_table[i].val;
+ *val2 = samp_freq_table[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int kxcjk1013_get_acc_reg(struct kxcjk1013_data *data, int axis)
+{
+ u8 reg = KXCJK1013_REG_XOUT_L + axis * 2;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, reg);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read accel_%c registers\n", 'x' + axis);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int kxcjk1013_set_scale(struct kxcjk1013_data *data, int val)
+{
+ int ret, i;
+ enum kxcjk1013_mode store_mode;
+
+
+ for (i = 0; i < ARRAY_SIZE(KXCJK1013_scale_table); ++i) {
+ if (KXCJK1013_scale_table[i].scale == val) {
+
+ ret = kxcjk1013_get_mode(data, &store_mode);
+ if (ret < 0)
+ return ret;
+
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ if (ret < 0)
+ return ret;
+
+ ret = kxcjk1013_set_range(data, i);
+ if (ret < 0)
+ return ret;
+
+ if (store_mode == OPERATION) {
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int kxcjk1013_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->mutex);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else {
+ ret = kxcjk1013_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = kxcjk1013_get_acc_reg(data, chan->scan_index);
+ if (ret < 0) {
+ kxcjk1013_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ *val = sign_extend32(ret >> 4, 11);
+ ret = kxcjk1013_set_power_state(data, false);
+ }
+ mutex_unlock(&data->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = KXCJK1013_scale_table[data->range].scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&data->mutex);
+ ret = kxcjk1013_get_odr(data, val, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kxcjk1013_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&data->mutex);
+ ret = kxcjk1013_set_odr(data, val, val2);
+ mutex_unlock(&data->mutex);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = kxcjk1013_set_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int kxcjk1013_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = data->wake_thres;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ *val = data->wake_dur;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int kxcjk1013_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ if (data->ev_enable_state)
+ return -EBUSY;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ data->wake_thres = val;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ data->wake_dur = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ return data->ev_enable_state;
+}
+
+static int kxcjk1013_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (state && data->ev_enable_state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->motion_trigger_on) {
+ data->ev_enable_state = 0;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /*
+ * We will expect the enable and disable to do operation in
+ * in reverse order. This will happen here anyway as our
+ * resume operation uses sync mode runtime pm calls, the
+ * suspend operation will be delayed by autosuspend delay
+ * So the disable operation will still happen in reverse of
+ * enable operation. When runtime pm is disabled the mode
+ * is always on so sequence doesn't matter
+ */
+ ret = kxcjk1013_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = kxcjk1013_setup_any_motion_interrupt(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ data->ev_enable_state = state;
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static int kxcjk1013_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ if (data->dready_trig != trig && data->motion_trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600");
+
+static IIO_CONST_ATTR(in_accel_scale_available, "0.009582 0.019163 0.038326");
+
+static struct attribute *kxcjk1013_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group kxcjk1013_attrs_group = {
+ .attrs = kxcjk1013_attributes,
+};
+
+static const struct iio_event_spec kxcjk1013_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD)
+};
+
+#define KXCJK1013_CHANNEL(_axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .event_spec = &kxcjk1013_event, \
+ .num_event_specs = 1 \
+}
+
+static const struct iio_chan_spec kxcjk1013_channels[] = {
+ KXCJK1013_CHANNEL(X),
+ KXCJK1013_CHANNEL(Y),
+ KXCJK1013_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_info kxcjk1013_info = {
+ .attrs = &kxcjk1013_attrs_group,
+ .read_raw = kxcjk1013_read_raw,
+ .write_raw = kxcjk1013_write_raw,
+ .read_event_value = kxcjk1013_read_event,
+ .write_event_value = kxcjk1013_write_event,
+ .write_event_config = kxcjk1013_write_event_config,
+ .read_event_config = kxcjk1013_read_event_config,
+ .validate_trigger = kxcjk1013_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = kxcjk1013_get_acc_reg(data, bit);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ goto err;
+ }
+ data->buffer[i++] = ret;
+ }
+ mutex_unlock(&data->mutex);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ data->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int kxcjk1013_trig_try_reen(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_REL);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_rel\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->ev_enable_state && data->motion_trigger_on) {
+ data->motion_trigger_on = false;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ ret = kxcjk1013_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ ret = kxcjk1013_setup_any_motion_interrupt(data, state);
+ else
+ ret = kxcjk1013_setup_new_data_interrupt(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ data->motion_trigger_on = state;
+ else
+ data->dready_trigger_on = state;
+
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops kxcjk1013_trigger_ops = {
+ .set_trigger_state = kxcjk1013_data_rdy_trigger_set_state,
+ .try_reenable = kxcjk1013_trig_try_reen,
+ .owner = THIS_MODULE,
+};
+
+static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_SRC1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_src1\n");
+ goto ack_intr;
+ }
+
+ if (ret & 0x02) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ KXCJK1013_REG_INT_SRC2);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error reading reg_int_src2\n");
+ goto ack_intr;
+ }
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+ }
+
+ack_intr:
+ if (data->dready_trigger_on)
+ return IRQ_HANDLED;
+
+ ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_INT_REL);
+ if (ret < 0)
+ dev_err(&data->client->dev, "Error reading reg_int_rel\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ data->timestamp = iio_get_time_ns();
+
+ if (data->dready_trigger_on)
+ iio_trigger_poll(data->dready_trig);
+ else if (data->motion_trigger_on)
+ iio_trigger_poll(data->motion_trig);
+
+ if (data->ev_enable_state)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
+}
+
+static const char *kxcjk1013_match_acpi_device(struct device *dev,
+ enum kx_chipset *chipset)
+{
+ const struct acpi_device_id *id;
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+ *chipset = (enum kx_chipset)id->driver_data;
+
+ return dev_name(dev);
+}
+
+static int kxcjk1013_gpio_probe(struct i2c_client *client,
+ struct kxcjk1013_data *data)
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, "kxcjk1013_int", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
+static int kxcjk1013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct kxcjk1013_data *data;
+ struct iio_dev *indio_dev;
+ struct kxcjk_1013_platform_data *pdata;
+ const char *name;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata)
+ data->active_high_intr = pdata->active_high_intr;
+ else
+ data->active_high_intr = true; /* default polarity */
+
+ if (id) {
+ data->chipset = (enum kx_chipset)(id->driver_data);
+ name = id->name;
+ } else if (ACPI_HANDLE(&client->dev)) {
+ name = kxcjk1013_match_acpi_device(&client->dev,
+ &data->chipset);
+ } else
+ return -ENODEV;
+
+ ret = kxcjk1013_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = kxcjk1013_channels;
+ indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &kxcjk1013_info;
+
+ if (client->irq < 0)
+ client->irq = kxcjk1013_gpio_probe(client, data);
+
+ if (client->irq >= 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ kxcjk1013_data_rdy_trig_poll,
+ kxcjk1013_event_handler,
+ IRQF_TRIGGER_RISING,
+ KXCJK1013_IRQ_NAME,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig)
+ return -ENOMEM;
+
+ data->motion_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-any-motion-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->motion_trig)
+ return -ENOMEM;
+
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &kxcjk1013_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+ indio_dev->trig = data->dready_trig;
+ iio_trigger_get(indio_dev->trig);
+ ret = iio_trigger_register(data->dready_trig);
+ if (ret)
+ return ret;
+
+ data->motion_trig->dev.parent = &client->dev;
+ data->motion_trig->ops = &kxcjk1013_trigger_ops;
+ iio_trigger_set_drvdata(data->motion_trig, indio_dev);
+ ret = iio_trigger_register(data->motion_trig);
+ if (ret) {
+ data->motion_trig = NULL;
+ goto err_trigger_unregister;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time,
+ kxcjk1013_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "iio triggered buffer setup failed\n");
+ goto err_trigger_unregister;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ KXCJK1013_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+err_iio_unregister:
+ iio_device_unregister(indio_dev);
+err_buffer_cleanup:
+ if (data->dready_trig)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+ if (data->motion_trig)
+ iio_trigger_unregister(data->motion_trig);
+
+ return ret;
+}
+
+static int kxcjk1013_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->dready_trig) {
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+
+ mutex_lock(&data->mutex);
+ kxcjk1013_set_mode(data, STANDBY);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int kxcjk1013_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = kxcjk1013_set_mode(data, STANDBY);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int kxcjk1013_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&data->mutex);
+ /* Check, if the suspend occured while active */
+ if (data->dready_trigger_on || data->motion_trigger_on ||
+ data->ev_enable_state)
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int kxcjk1013_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ return kxcjk1013_set_mode(data, STANDBY);
+}
+
+static int kxcjk1013_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ int ret;
+ int sleep_val;
+
+ ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret < 0)
+ return ret;
+
+ sleep_val = kxcjk1013_get_startup_times(data);
+ if (sleep_val < 20000)
+ usleep_range(sleep_val, 20000);
+ else
+ msleep_interruptible(sleep_val/1000);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops kxcjk1013_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(kxcjk1013_suspend, kxcjk1013_resume)
+ SET_RUNTIME_PM_OPS(kxcjk1013_runtime_suspend,
+ kxcjk1013_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id kx_acpi_match[] = {
+ {"KXCJ1013", KXCJK1013},
+ {"KXCJ1008", KXCJ91008},
+ {"KXTJ1009", KXTJ21009},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
+
+static const struct i2c_device_id kxcjk1013_id[] = {
+ {"kxcjk1013", KXCJK1013},
+ {"kxcj91008", KXCJ91008},
+ {"kxtj21009", KXTJ21009},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, kxcjk1013_id);
+
+static struct i2c_driver kxcjk1013_driver = {
+ .driver = {
+ .name = KXCJK1013_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(kx_acpi_match),
+ .pm = &kxcjk1013_pm_ops,
+ },
+ .probe = kxcjk1013_probe,
+ .remove = kxcjk1013_remove,
+ .id_table = kxcjk1013_id,
+};
+module_i2c_driver(kxcjk1013_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("KXCJK1013 accelerometer driver");
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
new file mode 100644
index 00000000000000..98ba761cbb9ce6
--- /dev/null
+++ b/drivers/iio/accel/kxsd9.c
@@ -0,0 +1,276 @@
+/*
+ * kxsd9.c simple support for the Kionix KXSD9 3D
+ * accelerometer.
+ *
+ * Copyright (c) 2008-2009 Jonathan Cameron <jic23@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The i2c interface is very similar, so shouldn't be a problem once
+ * I have a suitable wire made up.
+ *
+ * TODO: Support the motion detector
+ * Uses register address incrementing so could have a
+ * heavily optimized ring buffer access function.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define KXSD9_REG_X 0x00
+#define KXSD9_REG_Y 0x02
+#define KXSD9_REG_Z 0x04
+#define KXSD9_REG_AUX 0x06
+#define KXSD9_REG_RESET 0x0a
+#define KXSD9_REG_CTRL_C 0x0c
+
+#define KXSD9_FS_MASK 0x03
+
+#define KXSD9_REG_CTRL_B 0x0d
+#define KXSD9_REG_CTRL_A 0x0e
+
+#define KXSD9_READ(a) (0x80 | (a))
+#define KXSD9_WRITE(a) (a)
+
+#define KXSD9_STATE_RX_SIZE 2
+#define KXSD9_STATE_TX_SIZE 2
+/**
+ * struct kxsd9_state - device related storage
+ * @buf_lock: protect the rx and tx buffers.
+ * @us: spi device
+ * @rx: single rx buffer storage
+ * @tx: single tx buffer storage
+ **/
+struct kxsd9_state {
+ struct mutex buf_lock;
+ struct spi_device *us;
+ u8 rx[KXSD9_STATE_RX_SIZE] ____cacheline_aligned;
+ u8 tx[KXSD9_STATE_TX_SIZE];
+};
+
+#define KXSD9_SCALE_2G "0.011978"
+#define KXSD9_SCALE_4G "0.023927"
+#define KXSD9_SCALE_6G "0.035934"
+#define KXSD9_SCALE_8G "0.047853"
+
+/* reverse order */
+static const int kxsd9_micro_scales[4] = { 47853, 35934, 23927, 11978 };
+
+static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
+{
+ int ret, i;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+ bool foundit = false;
+
+ for (i = 0; i < 4; i++)
+ if (micro == kxsd9_micro_scales[i]) {
+ foundit = true;
+ break;
+ }
+ if (!foundit)
+ return -EINVAL;
+
+ mutex_lock(&st->buf_lock);
+ ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ if (ret)
+ goto error_ret;
+ st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
+ st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
+
+ ret = spi_write(st->us, st->tx, 2);
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
+{
+ int ret;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .bits_per_word = 8,
+ .len = 1,
+ .delay_usecs = 200,
+ .tx_buf = st->tx,
+ }, {
+ .bits_per_word = 8,
+ .len = 2,
+ .rx_buf = st->rx,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(address);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
+ if (!ret)
+ ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static IIO_CONST_ATTR(accel_scale_available,
+ KXSD9_SCALE_2G " "
+ KXSD9_SCALE_4G " "
+ KXSD9_SCALE_6G " "
+ KXSD9_SCALE_8G);
+
+static struct attribute *kxsd9_attributes[] = {
+ &iio_const_attr_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static int kxsd9_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret = -EINVAL;
+
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ /* Check no integer component */
+ if (val)
+ return -EINVAL;
+ ret = kxsd9_write_scale(indio_dev, val2);
+ }
+
+ return ret;
+}
+
+static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = -EINVAL;
+ struct kxsd9_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = kxsd9_read(indio_dev, chan->address);
+ if (ret < 0)
+ goto error_ret;
+ *val = ret;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ if (ret)
+ goto error_ret;
+ *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ }
+
+error_ret:
+ return ret;
+};
+#define KXSD9_ACCEL_CHAN(axis) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = KXSD9_REG_##axis, \
+ }
+
+static const struct iio_chan_spec kxsd9_channels[] = {
+ KXSD9_ACCEL_CHAN(X), KXSD9_ACCEL_CHAN(Y), KXSD9_ACCEL_CHAN(Z),
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .indexed = 1,
+ .address = KXSD9_REG_AUX,
+ }
+};
+
+static const struct attribute_group kxsd9_attribute_group = {
+ .attrs = kxsd9_attributes,
+};
+
+static int kxsd9_power_up(struct kxsd9_state *st)
+{
+ int ret;
+
+ st->tx[0] = 0x0d;
+ st->tx[1] = 0x40;
+ ret = spi_write(st->us, st->tx, 2);
+ if (ret)
+ return ret;
+
+ st->tx[0] = 0x0c;
+ st->tx[1] = 0x9b;
+ return spi_write(st->us, st->tx, 2);
+};
+
+static const struct iio_info kxsd9_info = {
+ .read_raw = &kxsd9_read_raw,
+ .write_raw = &kxsd9_write_raw,
+ .attrs = &kxsd9_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int kxsd9_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct kxsd9_state *st;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ indio_dev->channels = kxsd9_channels;
+ indio_dev->num_channels = ARRAY_SIZE(kxsd9_channels);
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &kxsd9_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+ kxsd9_power_up(st);
+
+ return iio_device_register(indio_dev);
+}
+
+static int kxsd9_remove(struct spi_device *spi)
+{
+ iio_device_unregister(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id kxsd9_id[] = {
+ {"kxsd9", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, kxsd9_id);
+
+static struct spi_driver kxsd9_driver = {
+ .driver = {
+ .name = "kxsd9",
+ .owner = THIS_MODULE,
+ },
+ .probe = kxsd9_probe,
+ .remove = kxsd9_remove,
+ .id_table = kxsd9_id,
+};
+module_spi_driver(kxsd9_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
new file mode 100644
index 00000000000000..5b80657883bb8f
--- /dev/null
+++ b/drivers/iio/accel/mma8452.c
@@ -0,0 +1,451 @@
+/*
+ * mma8452.c - Support for Freescale MMA8452Q 3-axis 12-bit accelerometer
+ *
+ * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
+ *
+ * TODO: interrupt, thresholding, orientation / freefall events, autosleep
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MMA8452_STATUS 0x00
+#define MMA8452_OUT_X 0x01 /* MSB first, 12-bit */
+#define MMA8452_OUT_Y 0x03
+#define MMA8452_OUT_Z 0x05
+#define MMA8452_WHO_AM_I 0x0d
+#define MMA8452_DATA_CFG 0x0e
+#define MMA8452_OFF_X 0x2f
+#define MMA8452_OFF_Y 0x30
+#define MMA8452_OFF_Z 0x31
+#define MMA8452_CTRL_REG1 0x2a
+#define MMA8452_CTRL_REG2 0x2b
+
+#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
+
+#define MMA8452_CTRL_DR_MASK (BIT(5) | BIT(4) | BIT(3))
+#define MMA8452_CTRL_DR_SHIFT 3
+#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
+#define MMA8452_CTRL_ACTIVE BIT(0)
+
+#define MMA8452_DATA_CFG_FS_MASK (BIT(1) | BIT(0))
+#define MMA8452_DATA_CFG_FS_2G 0
+#define MMA8452_DATA_CFG_FS_4G 1
+#define MMA8452_DATA_CFG_FS_8G 2
+
+#define MMA8452_DEVICE_ID 0x2a
+
+struct mma8452_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+ u8 data_cfg;
+};
+
+static int mma8452_drdy(struct mma8452_data *data)
+{
+ int tries = 150;
+
+ while (tries-- > 0) {
+ int ret = i2c_smbus_read_byte_data(data->client,
+ MMA8452_STATUS);
+ if (ret < 0)
+ return ret;
+ if ((ret & MMA8452_STATUS_DRDY) == MMA8452_STATUS_DRDY)
+ return 0;
+ msleep(20);
+ }
+
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+}
+
+static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
+{
+ int ret = mma8452_drdy(data);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_read_i2c_block_data(data->client,
+ MMA8452_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
+}
+
+static ssize_t mma8452_show_int_plus_micros(char *buf,
+ const int (*vals)[2], int n)
+{
+ size_t len = 0;
+
+ while (n-- > 0)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%06d ", vals[n][0], vals[n][1]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static int mma8452_get_int_plus_micros_index(const int (*vals)[2], int n,
+ int val, int val2)
+{
+ while (n-- > 0)
+ if (val == vals[n][0] && val2 == vals[n][1])
+ return n;
+
+ return -EINVAL;
+}
+
+static const int mma8452_samp_freq[8][2] = {
+ {800, 0}, {400, 0}, {200, 0}, {100, 0}, {50, 0}, {12, 500000},
+ {6, 250000}, {1, 560000}
+};
+
+/*
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
+static const int mma8452_scales[3][2] = {
+ {0, 9577}, {0, 19154}, {0, 38307}
+};
+
+static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mma8452_show_int_plus_micros(buf, mma8452_samp_freq,
+ ARRAY_SIZE(mma8452_samp_freq));
+}
+
+static ssize_t mma8452_show_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mma8452_show_int_plus_micros(buf, mma8452_scales,
+ ARRAY_SIZE(mma8452_scales));
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
+static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
+ mma8452_show_scale_avail, NULL, 0);
+
+static int mma8452_get_samp_freq_index(struct mma8452_data *data,
+ int val, int val2)
+{
+ return mma8452_get_int_plus_micros_index(mma8452_samp_freq,
+ ARRAY_SIZE(mma8452_samp_freq), val, val2);
+}
+
+static int mma8452_get_scale_index(struct mma8452_data *data,
+ int val, int val2)
+{
+ return mma8452_get_int_plus_micros_index(mma8452_scales,
+ ARRAY_SIZE(mma8452_scales), val, val2);
+}
+
+static int mma8452_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mma8452_data *data = iio_priv(indio_dev);
+ __be16 buffer[3];
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&data->lock);
+ ret = mma8452_read(data, buffer);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(
+ be16_to_cpu(buffer[chan->scan_index]) >> 4, 11);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ i = data->data_cfg & MMA8452_DATA_CFG_FS_MASK;
+ *val = mma8452_scales[i][0];
+ *val2 = mma8452_scales[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ i = (data->ctrl_reg1 & MMA8452_CTRL_DR_MASK) >>
+ MMA8452_CTRL_DR_SHIFT;
+ *val = mma8452_samp_freq[i][0];
+ *val2 = mma8452_samp_freq[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = i2c_smbus_read_byte_data(data->client, MMA8452_OFF_X +
+ chan->scan_index);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 7);
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int mma8452_standby(struct mma8452_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
+ data->ctrl_reg1 & ~MMA8452_CTRL_ACTIVE);
+}
+
+static int mma8452_active(struct mma8452_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ /* config can only be changed when in standby */
+ ret = mma8452_standby(data);
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_smbus_write_byte_data(data->client, reg, val);
+ if (ret < 0)
+ goto fail;
+
+ ret = mma8452_active(data);
+ if (ret < 0)
+ goto fail;
+
+ ret = 0;
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int mma8452_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mma8452_data *data = iio_priv(indio_dev);
+ int i;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ i = mma8452_get_samp_freq_index(data, val, val2);
+ if (i < 0)
+ return -EINVAL;
+
+ data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
+ data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
+ return mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+ case IIO_CHAN_INFO_SCALE:
+ i = mma8452_get_scale_index(data, val, val2);
+ if (i < 0)
+ return -EINVAL;
+ data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
+ data->data_cfg |= i;
+ return mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -128 || val > 127)
+ return -EINVAL;
+ return mma8452_change_config(data, MMA8452_OFF_X +
+ chan->scan_index, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t mma8452_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mma8452_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 3 16-bit channels + padding + ts */
+ int ret;
+
+ ret = mma8452_read(data, (__be16 *) buffer);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+#define MMA8452_CHANNEL(axis, idx) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec mma8452_channels[] = {
+ MMA8452_CHANNEL(X, 0),
+ MMA8452_CHANNEL(Y, 1),
+ MMA8452_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static struct attribute *mma8452_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mma8452_group = {
+ .attrs = mma8452_attributes,
+};
+
+static const struct iio_info mma8452_info = {
+ .attrs = &mma8452_group,
+ .read_raw = &mma8452_read_raw,
+ .write_raw = &mma8452_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long mma8452_scan_masks[] = {0x7, 0};
+
+static int mma8452_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mma8452_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MMA8452_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MMA8452_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mma8452_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mma8452_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mma8452_channels);
+ indio_dev->available_scan_masks = mma8452_scan_masks;
+
+ data->ctrl_reg1 = MMA8452_CTRL_ACTIVE |
+ (MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
+ ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ data->data_cfg = MMA8452_DATA_CFG_FS_2G;
+ ret = i2c_smbus_write_byte_data(client, MMA8452_DATA_CFG,
+ data->data_cfg);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mma8452_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mma8452_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mma8452_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mma8452_suspend(struct device *dev)
+{
+ return mma8452_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mma8452_resume(struct device *dev)
+{
+ return mma8452_active(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static SIMPLE_DEV_PM_OPS(mma8452_pm_ops, mma8452_suspend, mma8452_resume);
+#define MMA8452_PM_OPS (&mma8452_pm_ops)
+#else
+#define MMA8452_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mma8452_id[] = {
+ { "mma8452", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mma8452_id);
+
+static const struct of_device_id mma8452_dt_ids[] = {
+ { .compatible = "fsl,mma8452" },
+ { }
+};
+
+static struct i2c_driver mma8452_driver = {
+ .driver = {
+ .name = "mma8452",
+ .of_match_table = of_match_ptr(mma8452_dt_ids),
+ .pm = MMA8452_PM_OPS,
+ },
+ .probe = mma8452_probe,
+ .remove = mma8452_remove,
+ .id_table = mma8452_id,
+};
+module_i2c_driver(mma8452_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MMA8452 accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
new file mode 100644
index 00000000000000..fa964603430501
--- /dev/null
+++ b/drivers/iio/accel/st_accel.h
@@ -0,0 +1,55 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_ACCEL_H
+#define ST_ACCEL_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
+#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
+#define LSM330D_ACCEL_DEV_NAME "lsm330d_accel"
+#define LSM330DL_ACCEL_DEV_NAME "lsm330dl_accel"
+#define LSM330DLC_ACCEL_DEV_NAME "lsm330dlc_accel"
+#define LIS331DLH_ACCEL_DEV_NAME "lis331dlh"
+#define LSM303DL_ACCEL_DEV_NAME "lsm303dl_accel"
+#define LSM303DLH_ACCEL_DEV_NAME "lsm303dlh_accel"
+#define LSM303DLM_ACCEL_DEV_NAME "lsm303dlm_accel"
+#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
+
+/**
+* struct st_sensors_platform_data - default accel platform data
+* @drdy_int_pin: default accel DRDY is available on INT1 pin.
+*/
+static const struct st_sensors_platform_data default_accel_pdata = {
+ .drdy_int_pin = 1,
+};
+
+int st_accel_common_probe(struct iio_dev *indio_dev);
+void st_accel_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_accel_allocate_ring(struct iio_dev *indio_dev);
+void st_accel_deallocate_ring(struct iio_dev *indio_dev);
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_ACCEL_TRIGGER_SET_STATE (&st_accel_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_ACCEL_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_ACCEL_H */
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
new file mode 100644
index 00000000000000..a1e642ee13d6a4
--- /dev/null
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -0,0 +1,105 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_accel_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return st_sensors_set_enable(indio_dev, true);
+}
+
+static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ adata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (adata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ return err;
+
+st_accel_buffer_postenable_error:
+ kfree(adata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_accel_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_accel_buffer_predisable_error:
+ kfree(adata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
+ .preenable = &st_accel_buffer_preenable,
+ .postenable = &st_accel_buffer_postenable,
+ .predisable = &st_accel_buffer_predisable,
+};
+
+int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
+}
+
+void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
new file mode 100644
index 00000000000000..53f32629283a0d
--- /dev/null
+++ b/drivers/iio/accel/st_accel_core.c
@@ -0,0 +1,540 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+#define ST_ACCEL_NUMBER_DATA_CHANNELS 3
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_ACCEL_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_ACCEL_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_ACCEL_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_ACCEL_FS_AVL_2G 2
+#define ST_ACCEL_FS_AVL_4G 4
+#define ST_ACCEL_FS_AVL_6G 6
+#define ST_ACCEL_FS_AVL_8G 8
+#define ST_ACCEL_FS_AVL_16G 16
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_ACCEL_1_WAI_EXP 0x33
+#define ST_ACCEL_1_ODR_ADDR 0x20
+#define ST_ACCEL_1_ODR_MASK 0xf0
+#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
+#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
+#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
+#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
+#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
+#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
+#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
+#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
+#define ST_ACCEL_1_FS_ADDR 0x23
+#define ST_ACCEL_1_FS_MASK 0x30
+#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
+#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
+#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
+#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
+#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
+#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
+#define ST_ACCEL_1_BDU_ADDR 0x23
+#define ST_ACCEL_1_BDU_MASK 0x80
+#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
+#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
+#define ST_ACCEL_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_ACCEL_2_WAI_EXP 0x32
+#define ST_ACCEL_2_ODR_ADDR 0x20
+#define ST_ACCEL_2_ODR_MASK 0x18
+#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
+#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
+#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
+#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
+#define ST_ACCEL_2_PW_ADDR 0x20
+#define ST_ACCEL_2_PW_MASK 0xe0
+#define ST_ACCEL_2_FS_ADDR 0x23
+#define ST_ACCEL_2_FS_MASK 0x30
+#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
+#define ST_ACCEL_2_BDU_ADDR 0x23
+#define ST_ACCEL_2_BDU_MASK 0x80
+#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
+#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
+#define ST_ACCEL_2_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 3 */
+#define ST_ACCEL_3_WAI_EXP 0x40
+#define ST_ACCEL_3_ODR_ADDR 0x20
+#define ST_ACCEL_3_ODR_MASK 0xf0
+#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
+#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
+#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
+#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
+#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
+#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
+#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
+#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
+#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
+#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
+#define ST_ACCEL_3_FS_ADDR 0x24
+#define ST_ACCEL_3_FS_MASK 0x38
+#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
+#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
+#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
+#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
+#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
+#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
+#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
+#define ST_ACCEL_3_BDU_ADDR 0x20
+#define ST_ACCEL_3_BDU_MASK 0x08
+#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
+#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
+#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
+#define ST_ACCEL_3_IG1_EN_ADDR 0x23
+#define ST_ACCEL_3_IG1_EN_MASK 0x08
+#define ST_ACCEL_3_MULTIREAD_BIT false
+
+static const struct iio_chan_spec st_accel_12bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 12, 16,
+ ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 12, 16,
+ ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 12, 16,
+ ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_accel_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+ ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+ ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+ ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ {
+ .wai = ST_ACCEL_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3DH_ACCEL_DEV_NAME,
+ [1] = LSM303DLHC_ACCEL_DEV_NAME,
+ [2] = LSM330D_ACCEL_DEV_NAME,
+ [3] = LSM330DL_ACCEL_DEV_NAME,
+ [4] = LSM330DLC_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
+ { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
+ { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
+ { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_1_FS_ADDR,
+ .mask = ST_ACCEL_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_1_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_1_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_1_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_1_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_1_BDU_ADDR,
+ .mask = ST_ACCEL_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS331DLH_ACCEL_DEV_NAME,
+ [1] = LSM303DL_ACCEL_DEV_NAME,
+ [2] = LSM303DLH_ACCEL_DEV_NAME,
+ [3] = LSM303DLM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_2_ODR_ADDR,
+ .mask = ST_ACCEL_2_ODR_MASK,
+ .odr_avl = {
+ { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
+ { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
+ { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_2_PW_ADDR,
+ .mask = ST_ACCEL_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_2_FS_ADDR,
+ .mask = ST_ACCEL_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_2_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_2_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_2_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_2_BDU_ADDR,
+ .mask = ST_ACCEL_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_3_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM330_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .odr_avl = {
+ { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
+ { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
+ { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
+ { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
+ { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
+ { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_3_FS_ADDR,
+ .mask = ST_ACCEL_3_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_3_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_3_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_6G,
+ .value = ST_ACCEL_3_FS_AVL_6_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_3_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ },
+ [4] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_3_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_3_BDU_ADDR,
+ .mask = ST_ACCEL_3_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
+ .ig1 = {
+ .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
+ .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ },
+ },
+ .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = adata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adata->odr;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_accel_scale_available);
+
+static struct attribute *st_accel_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_accel_attribute_group = {
+ .attrs = st_accel_attributes,
+};
+
+static const struct iio_info accel_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_accel_attribute_group,
+ .read_raw = &st_accel_read_raw,
+ .write_raw = &st_accel_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_accel_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+};
+#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
+#else
+#define ST_ACCEL_TRIGGER_OPS NULL
+#endif
+
+int st_accel_common_probe(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+ int irq = adata->get_irq_data_ready(indio_dev);
+ int err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &accel_info;
+
+ st_sensors_power_enable(indio_dev);
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_accel_sensors_settings),
+ st_accel_sensors_settings);
+ if (err < 0)
+ return err;
+
+ adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
+ adata->multiread_bit = adata->sensor_settings->multi_read_bit;
+ indio_dev->channels = adata->sensor_settings->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ adata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &adata->sensor_settings->fs.fs_avl[0];
+ adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
+
+ if (!adata->dev->platform_data)
+ adata->dev->platform_data =
+ (struct st_sensors_platform_data *)&default_accel_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
+ if (err < 0)
+ return err;
+
+ err = st_accel_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+
+ if (irq > 0) {
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_ACCEL_TRIGGER_OPS);
+ if (err < 0)
+ goto st_accel_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_accel_device_register_error;
+
+ dev_info(&indio_dev->dev, "registered accelerometer %s\n",
+ indio_dev->name);
+
+ return 0;
+
+st_accel_device_register_error:
+ if (irq > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_accel_probe_trigger_error:
+ st_accel_deallocate_ring(indio_dev);
+
+ return err;
+}
+EXPORT_SYMBOL(st_accel_common_probe);
+
+void st_accel_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ st_sensors_power_disable(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (adata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+
+ st_accel_deallocate_ring(indio_dev);
+}
+EXPORT_SYMBOL(st_accel_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
new file mode 100644
index 00000000000000..c7246bdd30b953
--- /dev/null
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -0,0 +1,129 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_accel.h"
+
+#ifdef CONFIG_OF
+static const struct of_device_id st_accel_of_match[] = {
+ {
+ .compatible = "st,lsm303dlh-accel",
+ .data = LSM303DLH_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303dlhc-accel",
+ .data = LSM303DLHC_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3dh-accel",
+ .data = LIS3DH_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330d-accel",
+ .data = LSM330D_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dl-accel",
+ .data = LSM330DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dlc-accel",
+ .data = LSM330DLC_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis331dlh-accel",
+ .data = LIS331DLH_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303dl-accel",
+ .data = LSM303DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303dlm-accel",
+ .data = LSM303DLM_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330-accel",
+ .data = LSM330_ACCEL_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_accel_of_match);
+#else
+#define st_accel_of_match NULL
+#endif
+
+static int st_accel_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adata = iio_priv(indio_dev);
+ st_sensors_of_i2c_probe(client, st_accel_of_match);
+
+ st_sensors_i2c_configure(indio_dev, client, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_accel_i2c_remove(struct i2c_client *client)
+{
+ st_accel_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
+
+static struct i2c_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-i2c",
+ .of_match_table = of_match_ptr(st_accel_of_match),
+ },
+ .probe = st_accel_i2c_probe,
+ .remove = st_accel_i2c_remove,
+ .id_table = st_accel_id_table,
+};
+module_i2c_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
new file mode 100644
index 00000000000000..12ec29389e4ba1
--- /dev/null
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -0,0 +1,77 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_accel.h"
+
+static int st_accel_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adata = iio_priv(indio_dev);
+
+ st_sensors_spi_configure(indio_dev, spi, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_accel_spi_remove(struct spi_device *spi)
+{
+ st_accel_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_accel_id_table);
+
+static struct spi_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-spi",
+ },
+ .probe = st_accel_spi_probe,
+ .remove = st_accel_spi_remove,
+ .id_table = st_accel_id_table,
+};
+module_spi_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
new file mode 100644
index 00000000000000..5b2856cb674a69
--- /dev/null
+++ b/drivers/iio/adc/Kconfig
@@ -0,0 +1,324 @@
+#
+# ADC drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Analog to digital converters"
+
+config AD_SIGMA_DELTA
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config AD7266
+ tristate "Analog Devices AD7265/AD7266 ADC driver"
+ depends on SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7265 and AD7266
+ ADCs.
+
+config AD7291
+ tristate "Analog Devices AD7291 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices AD7291
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7291.
+
+config AD7298
+ tristate "Analog Devices AD7298 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7298
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7298.
+
+config AD7476
+ tristate "Analog Devices AD7476 and similar 1-channel ADCs driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7273, AD7274, AD7276,
+ AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
+ AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7476.
+
+config AD7791
+ tristate "Analog Devices AD7791 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7787, AD7788, AD7789,
+ AD7790 and AD7791 SPI analog to digital converters (ADC). If unsure, say
+ N (but it is safe to say "Y").
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7791.
+
+config AD7793
+ tristate "Analog Devices AD7793 and similar ADCs driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7785, AD7792, AD7793,
+ AD7794 and AD7795 SPI analog to digital converters (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called AD7793.
+
+config AD7887
+ tristate "Analog Devices AD7887 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices
+ AD7887 SPI analog to digital converter (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7887.
+
+config AD7923
+ tristate "Analog Devices AD7923 and similar ADCs driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices
+ AD7904, AD7914, AD7923, AD7924 4 Channel ADCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7923.
+
+config AD799X
+ tristate "Analog Devices AD799x ADC driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices:
+ ad7991, ad7995, ad7999, ad7992, ad7993, ad7994, ad7997, ad7998
+ i2c analog to digital converters (ADC). Provides direct access
+ via sysfs.
+
+config AT91_ADC
+ tristate "Atmel AT91 ADC"
+ depends on ARCH_AT91
+ depends on INPUT
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select SYSFS
+ help
+ Say yes here to build support for Atmel AT91 ADC.
+
+config AXP288_ADC
+ tristate "X-Powers AXP288 ADC driver"
+ depends on MFD_AXP20X
+ help
+ Say yes here to have support for X-Powers power management IC (PMIC) ADC
+ device. Depending on platform configuration, this general purpose ADC can
+ be used for sampling sensors such as thermal resistors.
+
+config EXYNOS_ADC
+ tristate "Exynos ADC driver support"
+ depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
+ help
+ Core support for the ADC block found in the Samsung EXYNOS series
+ of SoCs for drivers such as the touchscreen and hwmon to use to share
+ this resource.
+
+config LP8788_ADC
+ tristate "LP8788 ADC driver"
+ depends on MFD_LP8788
+ help
+ Say yes here to build support for TI LP8788 ADC.
+
+config MAX1027
+ tristate "Maxim max1027 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Maxim SPI ADC models
+ max1027, max1029 and max1031.
+
+config MAX1363
+ tristate "Maxim max1363 ADC driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for many Maxim i2c analog to digital
+ converters (ADC). (max1361, max1362, max1363, max1364, max1036,
+ max1037, max1038, max1039, max1136, max1136, max1137, max1138,
+ max1139, max1236, max1237, max11238, max1239, max11600, max11601,
+ max11602, max11603, max11604, max11605, max11606, max11607,
+ max11608, max11609, max11610, max11611, max11612, max11613,
+ max11614, max11615, max11616, max11617, max11644, max11645,
+ max11646, max11647) Provides direct access via sysfs and buffered
+ data via the iio dev interface.
+
+config MCP320X
+ tristate "Microchip Technology MCP3204/08"
+ depends on SPI
+ help
+ Say yes here to build support for Microchip Technology's MCP3204 or
+ MCP3208 analog to digital converter.
+
+ This driver can also be built as a module. If so, the module will be
+ called mcp320x.
+
+config MCP3422
+ tristate "Microchip Technology MCP3422/3/4/6/7/8 driver"
+ depends on I2C
+ help
+ Say yes here to build support for Microchip Technology's
+ MCP3422, MCP3423, MCP3424, MCP3426, MCP3427 or MCP3428
+ analog to digital converters.
+
+ This driver can also be built as a module. If so, the module will be
+ called mcp3422.
+
+config MEN_Z188_ADC
+ tristate "MEN 16z188 ADC IP Core support"
+ depends on MCB
+ help
+ Say yes here to enable support for the MEN 16z188 ADC IP-Core on a MCB
+ carrier.
+
+ This driver can also be built as a module. If so, the module will be
+ called men_z188_adc.
+
+config NAU7802
+ tristate "Nuvoton NAU7802 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Nuvoton NAU7802 ADC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nau7802.
+
+config QCOM_SPMI_IADC
+ tristate "Qualcomm SPMI PMIC current ADC"
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ This is the IIO Current ADC driver for Qualcomm QPNP IADC Chip.
+
+ The driver supports single mode operation to read from one of two
+ channels (external or internal). Hardware have additional
+ channels internally used for gain and offset calibration.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-spmi-iadc.
+
+config ROCKCHIP_SARADC
+ tristate "Rockchip SARADC driver"
+ depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+ help
+ Say yes here to build support for the SARADC found in SoCs from
+ Rockchip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rockchip_saradc.
+
+config TI_ADC081C
+ tristate "Texas Instruments ADC081C021/027"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADC081C021
+ and ADC081C027 ADC chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc081c.
+
+config TI_ADC128S052
+ tristate "Texas Instruments ADC128S052"
+ depends on SPI
+ help
+ If you say yes here you get support for Texas Instruments ADC128S052
+ chip.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc128s052.
+
+config TI_AM335X_ADC
+ tristate "TI's AM335X ADC driver"
+ depends on MFD_TI_AM335X_TSCADC
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to build support for Texas Instruments ADC
+ driver which is also a MFD client.
+
+config TWL4030_MADC
+ tristate "TWL4030 MADC (Monitoring A/D Converter)"
+ depends on TWL4030_CORE
+ help
+ This driver provides support for Triton TWL4030-MADC. The
+ driver supports both RT and SW conversion methods.
+
+ This driver can also be built as a module. If so, the module will be
+ called twl4030-madc.
+
+config TWL6030_GPADC
+ tristate "TWL6030 GPADC (General Purpose A/D Converter) Support"
+ depends on TWL4030_CORE
+ default n
+ help
+ Say yes here if you want support for the TWL6030/TWL6032 General
+ Purpose A/D Converter. This will add support for battery type
+ detection, battery voltage and temperature measurement, die
+ temperature measurement, system supply voltage, audio accessory,
+ USB ID detection.
+
+ This driver can also be built as a module. If so, the module will be
+ called twl6030-gpadc.
+
+config VF610_ADC
+ tristate "Freescale vf610 ADC driver"
+ depends on OF
+ help
+ Say yes here to support for Vybrid board analog-to-digital converter.
+ Since the IP is used for i.MX6SLX, the driver also support i.MX6SLX.
+
+ This driver can also be built as a module. If so, the module will be
+ called vf610_adc.
+
+config VIPERBOARD_ADC
+ tristate "Viperboard ADC support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the ADC part of the Nano River
+ Technologies Viperboard.
+
+config XILINX_XADC
+ tristate "Xilinx XADC driver"
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to have support for the Xilinx XADC. The driver does support
+ both the ZYNQ interface to the XADC as well as the AXI-XADC interface.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-xadc.
+
+endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
new file mode 100644
index 00000000000000..701fdb7c96aa5c
--- /dev/null
+++ b/drivers/iio/adc/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for IIO ADC drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
+obj-$(CONFIG_AD7266) += ad7266.o
+obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7298) += ad7298.o
+obj-$(CONFIG_AD7923) += ad7923.o
+obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7791) += ad7791.o
+obj-$(CONFIG_AD7793) += ad7793.o
+obj-$(CONFIG_AD7887) += ad7887.o
+obj-$(CONFIG_AD799X) += ad799x.o
+obj-$(CONFIG_AT91_ADC) += at91_adc.o
+obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
+obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
+obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
+obj-$(CONFIG_MAX1027) += max1027.o
+obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MCP320X) += mcp320x.o
+obj-$(CONFIG_MCP3422) += mcp3422.o
+obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
+obj-$(CONFIG_NAU7802) += nau7802.o
+obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
+obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
+obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
+obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
+obj-$(CONFIG_VF610_ADC) += vf610_adc.o
+obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
+obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
new file mode 100644
index 00000000000000..70f78c3062a718
--- /dev/null
+++ b/drivers/iio/adc/ad7266.c
@@ -0,0 +1,522 @@
+/*
+ * AD7266/65 SPI ADC driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ad7266.h>
+
+struct ad7266_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned long vref_mv;
+
+ struct spi_transfer single_xfer[3];
+ struct spi_message single_msg;
+
+ enum ad7266_range range;
+ enum ad7266_mode mode;
+ bool fixed_addr;
+ struct gpio gpios[3];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * The buffer needs to be large enough to hold two samples (4 bytes) and
+ * the naturally aligned timestamp (8 bytes).
+ */
+ struct {
+ __be16 sample[2];
+ s64 timestamp;
+ } data ____cacheline_aligned;
+};
+
+static int ad7266_wakeup(struct ad7266_state *st)
+{
+ /* Any read with >= 2 bytes will wake the device */
+ return spi_read(st->spi, &st->data.sample[0], 2);
+}
+
+static int ad7266_powerdown(struct ad7266_state *st)
+{
+ /* Any read with < 2 bytes will powerdown the device */
+ return spi_read(st->spi, &st->data.sample[0], 1);
+}
+
+static int ad7266_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7266_state *st = iio_priv(indio_dev);
+ return ad7266_wakeup(st);
+}
+
+static int ad7266_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7266_state *st = iio_priv(indio_dev);
+ return ad7266_powerdown(st);
+}
+
+static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
+ .preenable = &ad7266_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7266_postdisable,
+};
+
+static irqreturn_t ad7266_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7266_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = spi_read(st->spi, st->data.sample, 4);
+ if (ret == 0) {
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
+ pf->timestamp);
+ }
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void ad7266_select_input(struct ad7266_state *st, unsigned int nr)
+{
+ unsigned int i;
+
+ if (st->fixed_addr)
+ return;
+
+ switch (st->mode) {
+ case AD7266_MODE_SINGLE_ENDED:
+ nr >>= 1;
+ break;
+ case AD7266_MODE_PSEUDO_DIFF:
+ nr |= 1;
+ break;
+ case AD7266_MODE_DIFF:
+ nr &= ~1;
+ break;
+ }
+
+ for (i = 0; i < 3; ++i)
+ gpio_set_value(st->gpios[i].gpio, (bool)(nr & BIT(i)));
+}
+
+static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7266_state *st = iio_priv(indio_dev);
+ unsigned int nr = find_first_bit(scan_mask, indio_dev->masklength);
+
+ ad7266_select_input(st, nr);
+
+ return 0;
+}
+
+static int ad7266_read_single(struct ad7266_state *st, int *val,
+ unsigned int address)
+{
+ int ret;
+
+ ad7266_select_input(st, address);
+
+ ret = spi_sync(st->spi, &st->single_msg);
+ *val = be16_to_cpu(st->data.sample[address % 2]);
+
+ return ret;
+}
+
+static int ad7266_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long m)
+{
+ struct ad7266_state *st = iio_priv(indio_dev);
+ unsigned long scale_mv;
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ ret = ad7266_read_single(st, val, chan->address);
+ if (ret)
+ return ret;
+
+ *val = (*val >> 2) & 0xfff;
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, 11);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_mv = st->vref_mv;
+ if (st->mode == AD7266_MODE_DIFF)
+ scale_mv *= 2;
+ if (st->range == AD7266_RANGE_2VREF)
+ scale_mv *= 2;
+
+ *val = scale_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ if (st->range == AD7266_RANGE_2VREF &&
+ st->mode != AD7266_MODE_DIFF)
+ *val = 2048;
+ else
+ *val = 0;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+#define AD7266_CHAN(_chan, _sign) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = (_chan), \
+ .scan_type = { \
+ .sign = (_sign), \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 2, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD7266_DECLARE_SINGLE_ENDED_CHANNELS(_name, _sign) \
+const struct iio_chan_spec ad7266_channels_##_name[] = { \
+ AD7266_CHAN(0, (_sign)), \
+ AD7266_CHAN(1, (_sign)), \
+ AD7266_CHAN(2, (_sign)), \
+ AD7266_CHAN(3, (_sign)), \
+ AD7266_CHAN(4, (_sign)), \
+ AD7266_CHAN(5, (_sign)), \
+ AD7266_CHAN(6, (_sign)), \
+ AD7266_CHAN(7, (_sign)), \
+ AD7266_CHAN(8, (_sign)), \
+ AD7266_CHAN(9, (_sign)), \
+ AD7266_CHAN(10, (_sign)), \
+ AD7266_CHAN(11, (_sign)), \
+ IIO_CHAN_SOFT_TIMESTAMP(13), \
+}
+
+#define AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(_name, _sign) \
+const struct iio_chan_spec ad7266_channels_##_name##_fixed[] = { \
+ AD7266_CHAN(0, (_sign)), \
+ AD7266_CHAN(1, (_sign)), \
+ IIO_CHAN_SOFT_TIMESTAMP(2), \
+}
+
+static AD7266_DECLARE_SINGLE_ENDED_CHANNELS(u, 'u');
+static AD7266_DECLARE_SINGLE_ENDED_CHANNELS(s, 's');
+static AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(u, 'u');
+static AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(s, 's');
+
+#define AD7266_CHAN_DIFF(_chan, _sign) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan) * 2, \
+ .channel2 = (_chan) * 2 + 1, \
+ .address = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = (_chan), \
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 2, \
+ .endianness = IIO_BE, \
+ }, \
+ .differential = 1, \
+}
+
+#define AD7266_DECLARE_DIFF_CHANNELS(_name, _sign) \
+const struct iio_chan_spec ad7266_channels_diff_##_name[] = { \
+ AD7266_CHAN_DIFF(0, (_sign)), \
+ AD7266_CHAN_DIFF(1, (_sign)), \
+ AD7266_CHAN_DIFF(2, (_sign)), \
+ AD7266_CHAN_DIFF(3, (_sign)), \
+ AD7266_CHAN_DIFF(4, (_sign)), \
+ AD7266_CHAN_DIFF(5, (_sign)), \
+ IIO_CHAN_SOFT_TIMESTAMP(6), \
+}
+
+static AD7266_DECLARE_DIFF_CHANNELS(s, 's');
+static AD7266_DECLARE_DIFF_CHANNELS(u, 'u');
+
+#define AD7266_DECLARE_DIFF_CHANNELS_FIXED(_name, _sign) \
+const struct iio_chan_spec ad7266_channels_diff_fixed_##_name[] = { \
+ AD7266_CHAN_DIFF(0, (_sign)), \
+ AD7266_CHAN_DIFF(1, (_sign)), \
+ IIO_CHAN_SOFT_TIMESTAMP(2), \
+}
+
+static AD7266_DECLARE_DIFF_CHANNELS_FIXED(s, 's');
+static AD7266_DECLARE_DIFF_CHANNELS_FIXED(u, 'u');
+
+static const struct iio_info ad7266_info = {
+ .read_raw = &ad7266_read_raw,
+ .update_scan_mode = &ad7266_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long ad7266_available_scan_masks[] = {
+ 0x003,
+ 0x00c,
+ 0x030,
+ 0x0c0,
+ 0x300,
+ 0xc00,
+ 0x000,
+};
+
+static const unsigned long ad7266_available_scan_masks_diff[] = {
+ 0x003,
+ 0x00c,
+ 0x030,
+ 0x000,
+};
+
+static const unsigned long ad7266_available_scan_masks_fixed[] = {
+ 0x003,
+ 0x000,
+};
+
+struct ad7266_chan_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const unsigned long *scan_masks;
+};
+
+#define AD7266_CHAN_INFO_INDEX(_differential, _signed, _fixed) \
+ (((_differential) << 2) | ((_signed) << 1) | ((_fixed) << 0))
+
+static const struct ad7266_chan_info ad7266_chan_infos[] = {
+ [AD7266_CHAN_INFO_INDEX(0, 0, 0)] = {
+ .channels = ad7266_channels_u,
+ .num_channels = ARRAY_SIZE(ad7266_channels_u),
+ .scan_masks = ad7266_available_scan_masks,
+ },
+ [AD7266_CHAN_INFO_INDEX(0, 0, 1)] = {
+ .channels = ad7266_channels_u_fixed,
+ .num_channels = ARRAY_SIZE(ad7266_channels_u_fixed),
+ .scan_masks = ad7266_available_scan_masks_fixed,
+ },
+ [AD7266_CHAN_INFO_INDEX(0, 1, 0)] = {
+ .channels = ad7266_channels_s,
+ .num_channels = ARRAY_SIZE(ad7266_channels_s),
+ .scan_masks = ad7266_available_scan_masks,
+ },
+ [AD7266_CHAN_INFO_INDEX(0, 1, 1)] = {
+ .channels = ad7266_channels_s_fixed,
+ .num_channels = ARRAY_SIZE(ad7266_channels_s_fixed),
+ .scan_masks = ad7266_available_scan_masks_fixed,
+ },
+ [AD7266_CHAN_INFO_INDEX(1, 0, 0)] = {
+ .channels = ad7266_channels_diff_u,
+ .num_channels = ARRAY_SIZE(ad7266_channels_diff_u),
+ .scan_masks = ad7266_available_scan_masks_diff,
+ },
+ [AD7266_CHAN_INFO_INDEX(1, 0, 1)] = {
+ .channels = ad7266_channels_diff_fixed_u,
+ .num_channels = ARRAY_SIZE(ad7266_channels_diff_fixed_u),
+ .scan_masks = ad7266_available_scan_masks_fixed,
+ },
+ [AD7266_CHAN_INFO_INDEX(1, 1, 0)] = {
+ .channels = ad7266_channels_diff_s,
+ .num_channels = ARRAY_SIZE(ad7266_channels_diff_s),
+ .scan_masks = ad7266_available_scan_masks_diff,
+ },
+ [AD7266_CHAN_INFO_INDEX(1, 1, 1)] = {
+ .channels = ad7266_channels_diff_fixed_s,
+ .num_channels = ARRAY_SIZE(ad7266_channels_diff_fixed_s),
+ .scan_masks = ad7266_available_scan_masks_fixed,
+ },
+};
+
+static void ad7266_init_channels(struct iio_dev *indio_dev)
+{
+ struct ad7266_state *st = iio_priv(indio_dev);
+ bool is_differential, is_signed;
+ const struct ad7266_chan_info *chan_info;
+ int i;
+
+ is_differential = st->mode != AD7266_MODE_SINGLE_ENDED;
+ is_signed = (st->range == AD7266_RANGE_2VREF) |
+ (st->mode == AD7266_MODE_DIFF);
+
+ i = AD7266_CHAN_INFO_INDEX(is_differential, is_signed, st->fixed_addr);
+ chan_info = &ad7266_chan_infos[i];
+
+ indio_dev->channels = chan_info->channels;
+ indio_dev->num_channels = chan_info->num_channels;
+ indio_dev->available_scan_masks = chan_info->scan_masks;
+ indio_dev->masklength = chan_info->num_channels - 1;
+}
+
+static const char * const ad7266_gpio_labels[] = {
+ "AD0", "AD1", "AD2",
+};
+
+static int ad7266_probe(struct spi_device *spi)
+{
+ struct ad7266_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad7266_state *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (!IS_ERR_OR_NULL(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_mv = ret / 1000;
+ } else {
+ /* Use internal reference */
+ st->vref_mv = 2500;
+ }
+
+ if (pdata) {
+ st->fixed_addr = pdata->fixed_addr;
+ st->mode = pdata->mode;
+ st->range = pdata->range;
+
+ if (!st->fixed_addr) {
+ for (i = 0; i < ARRAY_SIZE(st->gpios); ++i) {
+ st->gpios[i].gpio = pdata->addr_gpios[i];
+ st->gpios[i].flags = GPIOF_OUT_INIT_LOW;
+ st->gpios[i].label = ad7266_gpio_labels[i];
+ }
+ ret = gpio_request_array(st->gpios,
+ ARRAY_SIZE(st->gpios));
+ if (ret)
+ goto error_disable_reg;
+ }
+ } else {
+ st->fixed_addr = true;
+ st->range = AD7266_RANGE_VREF;
+ st->mode = AD7266_MODE_DIFF;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad7266_info;
+
+ ad7266_init_channels(indio_dev);
+
+ /* wakeup */
+ st->single_xfer[0].rx_buf = &st->data.sample[0];
+ st->single_xfer[0].len = 2;
+ st->single_xfer[0].cs_change = 1;
+ /* conversion */
+ st->single_xfer[1].rx_buf = st->data.sample;
+ st->single_xfer[1].len = 4;
+ st->single_xfer[1].cs_change = 1;
+ /* powerdown */
+ st->single_xfer[2].tx_buf = &st->data.sample[0];
+ st->single_xfer[2].len = 1;
+
+ spi_message_init(&st->single_msg);
+ spi_message_add_tail(&st->single_xfer[0], &st->single_msg);
+ spi_message_add_tail(&st->single_xfer[1], &st->single_msg);
+ spi_message_add_tail(&st->single_xfer[2], &st->single_msg);
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
+ if (ret)
+ goto error_free_gpios;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_buffer_cleanup;
+
+ return 0;
+
+error_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_gpios:
+ if (!st->fixed_addr)
+ gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
+error_disable_reg:
+ if (!IS_ERR_OR_NULL(st->reg))
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7266_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7266_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (!st->fixed_addr)
+ gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
+ if (!IS_ERR_OR_NULL(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7266_id[] = {
+ {"ad7265", 0},
+ {"ad7266", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7266_id);
+
+static struct spi_driver ad7266_driver = {
+ .driver = {
+ .name = "ad7266",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7266_probe,
+ .remove = ad7266_remove,
+ .id_table = ad7266_id,
+};
+module_spi_driver(ad7266_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7266/65 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
new file mode 100644
index 00000000000000..7b6436e47b2290
--- /dev/null
+++ b/drivers/iio/adc/ad7291.c
@@ -0,0 +1,585 @@
+/*
+ * AD7291 8-Channel, I2C, 12-Bit SAR ADC with Temperature Sensor
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#include <linux/platform_data/ad7291.h>
+
+/*
+ * Simplified handling
+ *
+ * If no events enabled - single polled channel read
+ * If event enabled direct reads disable unless channel
+ * is in the read mask.
+ *
+ * The noise-delayed bit as per datasheet suggestion is always enabled.
+ */
+
+/*
+ * AD7291 registers definition
+ */
+#define AD7291_COMMAND 0x00
+#define AD7291_VOLTAGE 0x01
+#define AD7291_T_SENSE 0x02
+#define AD7291_T_AVERAGE 0x03
+#define AD7291_DATA_HIGH(x) ((x) * 3 + 0x4)
+#define AD7291_DATA_LOW(x) ((x) * 3 + 0x5)
+#define AD7291_HYST(x) ((x) * 3 + 0x6)
+#define AD7291_VOLTAGE_ALERT_STATUS 0x1F
+#define AD7291_T_ALERT_STATUS 0x20
+
+#define AD7291_BITS 12
+#define AD7291_VOLTAGE_LIMIT_COUNT 8
+
+
+/*
+ * AD7291 command
+ */
+#define AD7291_AUTOCYCLE BIT(0)
+#define AD7291_RESET BIT(1)
+#define AD7291_ALERT_CLEAR BIT(2)
+#define AD7291_ALERT_POLARITY BIT(3)
+#define AD7291_EXT_REF BIT(4)
+#define AD7291_NOISE_DELAY BIT(5)
+#define AD7291_T_SENSE_MASK BIT(7)
+#define AD7291_VOLTAGE_MASK GENMASK(15, 8)
+#define AD7291_VOLTAGE_OFFSET 8
+
+/*
+ * AD7291 value masks
+ */
+#define AD7291_VALUE_MASK GENMASK(11, 0)
+
+/*
+ * AD7291 alert register bits
+ */
+#define AD7291_T_LOW BIT(0)
+#define AD7291_T_HIGH BIT(1)
+#define AD7291_T_AVG_LOW BIT(2)
+#define AD7291_T_AVG_HIGH BIT(3)
+#define AD7291_V_LOW(x) BIT((x) * 2)
+#define AD7291_V_HIGH(x) BIT((x) * 2 + 1)
+
+
+struct ad7291_chip_info {
+ struct i2c_client *client;
+ struct regulator *reg;
+ u16 command;
+ u16 c_mask; /* Active voltage channels for events */
+ struct mutex state_lock;
+};
+
+static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
+{
+ struct i2c_client *client = chip->client;
+ int ret = 0;
+
+ ret = i2c_smbus_read_word_swapped(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read error\n");
+ return ret;
+ }
+
+ *data = ret;
+
+ return 0;
+}
+
+static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
+{
+ return i2c_smbus_write_word_swapped(chip->client, reg, data);
+}
+
+static irqreturn_t ad7291_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ad7291_chip_info *chip = iio_priv(private);
+ u16 t_status, v_status;
+ u16 command;
+ int i;
+ s64 timestamp = iio_get_time_ns();
+
+ if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
+ return IRQ_HANDLED;
+
+ if (ad7291_i2c_read(chip, AD7291_VOLTAGE_ALERT_STATUS, &v_status))
+ return IRQ_HANDLED;
+
+ if (!(t_status || v_status))
+ return IRQ_HANDLED;
+
+ command = chip->command | AD7291_ALERT_CLEAR;
+ ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+ command = chip->command & ~AD7291_ALERT_CLEAR;
+ ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+ /* For now treat t_sense and t_sense_average the same */
+ if ((t_status & AD7291_T_LOW) || (t_status & AD7291_T_AVG_LOW))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+ if ((t_status & AD7291_T_HIGH) || (t_status & AD7291_T_AVG_HIGH))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
+ if (v_status & AD7291_V_LOW(i))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+ if (v_status & AD7291_V_HIGH(i))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int ad7291_threshold_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ enum iio_event_info info)
+{
+ unsigned int offset;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ offset = chan->channel;
+ break;
+ case IIO_TEMP:
+ offset = AD7291_VOLTAGE_OFFSET;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (dir == IIO_EV_DIR_FALLING)
+ return AD7291_DATA_HIGH(offset);
+ else
+ return AD7291_DATA_LOW(offset);
+ case IIO_EV_INFO_HYSTERESIS:
+ return AD7291_HYST(offset);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ad7291_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ int ret;
+ u16 uval;
+
+ ret = ad7291_i2c_read(chip, ad7291_threshold_reg(chan, dir, info),
+ &uval);
+ if (ret < 0)
+ return ret;
+
+ if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE)
+ *val = uval & AD7291_VALUE_MASK;
+
+ else
+ *val = sign_extend32(uval, 11);
+
+ return IIO_VAL_INT;
+}
+
+static int ad7291_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+
+ if (info == IIO_EV_INFO_HYSTERESIS || chan->type == IIO_VOLTAGE) {
+ if (val > AD7291_VALUE_MASK || val < 0)
+ return -EINVAL;
+ } else {
+ if (val > 2047 || val < -2048)
+ return -EINVAL;
+ }
+
+ return ad7291_i2c_write(chip, ad7291_threshold_reg(chan, dir, info),
+ val);
+}
+
+static int ad7291_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ /*
+ * To be enabled the channel must simply be on. If any are enabled
+ * we are in continuous sampling mode
+ */
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return !!(chip->c_mask & BIT(15 - chan->channel));
+ case IIO_TEMP:
+ /* always on */
+ return 1;
+ default:
+ return -EINVAL;
+ }
+
+}
+
+static int ad7291_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ int ret = 0;
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ unsigned int mask;
+ u16 regval;
+
+ mutex_lock(&chip->state_lock);
+ regval = chip->command;
+ /*
+ * To be enabled the channel must simply be on. If any are enabled
+ * use continuous sampling mode.
+ * Possible to disable temp as well but that makes single read tricky.
+ */
+
+ mask = BIT(15 - chan->channel);
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if ((!state) && (chip->c_mask & mask))
+ chip->c_mask &= ~mask;
+ else if (state && (!(chip->c_mask & mask)))
+ chip->c_mask |= mask;
+ else
+ break;
+
+ regval &= ~AD7291_AUTOCYCLE;
+ regval |= chip->c_mask;
+ if (chip->c_mask) /* Enable autocycle? */
+ regval |= AD7291_AUTOCYCLE;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
+ if (ret < 0)
+ goto error_ret;
+
+ chip->command = regval;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+error_ret:
+ mutex_unlock(&chip->state_lock);
+ return ret;
+}
+
+static int ad7291_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+ u16 regval;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&chip->state_lock);
+ /* If in autocycle mode drop through */
+ if (chip->command & AD7291_AUTOCYCLE) {
+ mutex_unlock(&chip->state_lock);
+ return -EBUSY;
+ }
+ /* Enable this channel alone */
+ regval = chip->command & (~AD7291_VOLTAGE_MASK);
+ regval |= BIT(15 - chan->channel);
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, regval);
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+ /* Read voltage */
+ ret = i2c_smbus_read_word_swapped(chip->client,
+ AD7291_VOLTAGE);
+ if (ret < 0) {
+ mutex_unlock(&chip->state_lock);
+ return ret;
+ }
+ *val = ret & AD7291_VALUE_MASK;
+ mutex_unlock(&chip->state_lock);
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* Assumes tsense bit of command register always set */
+ ret = i2c_smbus_read_word_swapped(chip->client,
+ AD7291_T_SENSE);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 11);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_AVERAGE_RAW:
+ ret = i2c_smbus_read_word_swapped(chip->client,
+ AD7291_T_AVERAGE);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 11);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chip->reg) {
+ int vref;
+
+ vref = regulator_get_voltage(chip->reg);
+ if (vref < 0)
+ return vref;
+ *val = vref / 1000;
+ } else {
+ *val = 2500;
+ }
+ *val2 = AD7291_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /*
+ * One LSB of the ADC corresponds to 0.25 deg C.
+ * The temperature reading is in 12-bit twos
+ * complement format
+ */
+ *val = 250;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_event_spec ad7291_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+#define AD7291_VOLTAGE_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = _chan, \
+ .event_spec = ad7291_events, \
+ .num_event_specs = ARRAY_SIZE(ad7291_events), \
+}
+
+static const struct iio_chan_spec ad7291_channels[] = {
+ AD7291_VOLTAGE_CHAN(0),
+ AD7291_VOLTAGE_CHAN(1),
+ AD7291_VOLTAGE_CHAN(2),
+ AD7291_VOLTAGE_CHAN(3),
+ AD7291_VOLTAGE_CHAN(4),
+ AD7291_VOLTAGE_CHAN(5),
+ AD7291_VOLTAGE_CHAN(6),
+ AD7291_VOLTAGE_CHAN(7),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = 0,
+ .event_spec = ad7291_events,
+ .num_event_specs = ARRAY_SIZE(ad7291_events),
+ }
+};
+
+static const struct iio_info ad7291_info = {
+ .read_raw = &ad7291_read_raw,
+ .read_event_config = &ad7291_read_event_config,
+ .write_event_config = &ad7291_write_event_config,
+ .read_event_value = &ad7291_read_event_value,
+ .write_event_value = &ad7291_write_event_value,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7291_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad7291_platform_data *pdata = client->dev.platform_data;
+ struct ad7291_chip_info *chip;
+ struct iio_dev *indio_dev;
+ int ret = 0;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+ chip = iio_priv(indio_dev);
+
+ if (pdata && pdata->use_external_ref) {
+ chip->reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(chip->reg))
+ return PTR_ERR(chip->reg);
+
+ ret = regulator_enable(chip->reg);
+ if (ret)
+ return ret;
+ }
+
+ mutex_init(&chip->state_lock);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->client = client;
+
+ chip->command = AD7291_NOISE_DELAY |
+ AD7291_T_SENSE_MASK | /* Tsense always enabled */
+ AD7291_ALERT_POLARITY; /* set irq polarity low level */
+
+ if (pdata && pdata->use_external_ref)
+ chip->command |= AD7291_EXT_REF;
+
+ indio_dev->name = id->name;
+ indio_dev->channels = ad7291_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7291_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, AD7291_RESET);
+ if (ret) {
+ ret = -EIO;
+ goto error_disable_reg;
+ }
+
+ ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
+ if (ret) {
+ ret = -EIO;
+ goto error_disable_reg;
+ }
+
+ if (client->irq > 0) {
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ &ad7291_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ id->name,
+ indio_dev);
+ if (ret)
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_irq;
+
+ return 0;
+
+error_unreg_irq:
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+error_disable_reg:
+ if (chip->reg)
+ regulator_disable(chip->reg);
+
+ return ret;
+}
+
+static int ad7291_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ad7291_chip_info *chip = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ if (chip->reg)
+ regulator_disable(chip->reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7291_id[] = {
+ { "ad7291", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7291_id);
+
+static struct i2c_driver ad7291_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad7291_probe,
+ .remove = ad7291_remove,
+ .id_table = ad7291_id,
+};
+module_i2c_driver(ad7291_driver);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
new file mode 100644
index 00000000000000..4a8c0a2f49b695
--- /dev/null
+++ b/drivers/iio/adc/ad7298.c
@@ -0,0 +1,391 @@
+/*
+ * AD7298 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ad7298.h>
+
+#define AD7298_WRITE BIT(15) /* write to the control register */
+#define AD7298_REPEAT BIT(14) /* repeated conversion enable */
+#define AD7298_CH(x) BIT(13 - (x)) /* channel select */
+#define AD7298_TSENSE BIT(5) /* temperature conversion enable */
+#define AD7298_EXTREF BIT(2) /* external reference enable */
+#define AD7298_TAVG BIT(1) /* temperature sensor averaging enable */
+#define AD7298_PDD BIT(0) /* partial power down enable */
+
+#define AD7298_MAX_CHAN 8
+#define AD7298_INTREF_mV 2500
+
+#define AD7298_CH_TEMP 9
+
+struct ad7298_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned ext_ref;
+ struct spi_transfer ring_xfer[10];
+ struct spi_transfer scan_single_xfer[3];
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 rx_buf[12] ____cacheline_aligned;
+ __be16 tx_buf[2];
+};
+
+#define AD7298_V_CHAN(index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec ad7298_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = AD7298_CH_TEMP,
+ .scan_index = -1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ },
+ },
+ AD7298_V_CHAN(0),
+ AD7298_V_CHAN(1),
+ AD7298_V_CHAN(2),
+ AD7298_V_CHAN(3),
+ AD7298_V_CHAN(4),
+ AD7298_V_CHAN(5),
+ AD7298_V_CHAN(6),
+ AD7298_V_CHAN(7),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+/**
+ * ad7298_update_scan_mode() setup the spi transfer buffer for the new scan mask
+ **/
+static int ad7298_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad7298_state *st = iio_priv(indio_dev);
+ int i, m;
+ unsigned short command;
+ int scan_count;
+
+ /* Now compute overall size */
+ scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength);
+
+ command = AD7298_WRITE | st->ext_ref;
+
+ for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
+ if (test_bit(i, active_scan_mask))
+ command |= m;
+
+ st->tx_buf[0] = cpu_to_be16(command);
+
+ /* build spi ring message */
+ st->ring_xfer[0].tx_buf = &st->tx_buf[0];
+ st->ring_xfer[0].len = 2;
+ st->ring_xfer[0].cs_change = 1;
+ st->ring_xfer[1].tx_buf = &st->tx_buf[1];
+ st->ring_xfer[1].len = 2;
+ st->ring_xfer[1].cs_change = 1;
+
+ spi_message_init(&st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
+
+ for (i = 0; i < scan_count; i++) {
+ st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
+ st->ring_xfer[i + 2].len = 2;
+ st->ring_xfer[i + 2].cs_change = 1;
+ spi_message_add_tail(&st->ring_xfer[i + 2], &st->ring_msg);
+ }
+ /* make sure last transfer cs_change is not set */
+ st->ring_xfer[i + 1].cs_change = 0;
+
+ return 0;
+}
+
+/**
+ * ad7298_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad7298_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7298_state *st = iio_priv(indio_dev);
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, &st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
+{
+ int ret;
+ st->tx_buf[0] = cpu_to_be16(AD7298_WRITE | st->ext_ref |
+ (AD7298_CH(0) >> ch));
+
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ return ret;
+
+ return be16_to_cpu(st->rx_buf[0]);
+}
+
+static int ad7298_scan_temp(struct ad7298_state *st, int *val)
+{
+ int ret;
+ __be16 buf;
+
+ buf = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
+ AD7298_TAVG | st->ext_ref);
+
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
+ if (ret)
+ return ret;
+
+ buf = cpu_to_be16(0);
+
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
+ if (ret)
+ return ret;
+
+ usleep_range(101, 1000); /* sleep > 100us */
+
+ ret = spi_read(st->spi, (u8 *)&buf, 2);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(be16_to_cpu(buf), 11);
+
+ return 0;
+}
+
+static int ad7298_get_ref_voltage(struct ad7298_state *st)
+{
+ int vref;
+
+ if (st->ext_ref) {
+ vref = regulator_get_voltage(st->reg);
+ if (vref < 0)
+ return vref;
+
+ return vref / 1000;
+ } else {
+ return AD7298_INTREF_mV;
+ }
+}
+
+static int ad7298_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad7298_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ } else {
+ if (chan->address == AD7298_CH_TEMP)
+ ret = ad7298_scan_temp(st, val);
+ else
+ ret = ad7298_scan_direct(st, chan->address);
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ if (chan->address != AD7298_CH_TEMP)
+ *val = ret & GENMASK(chan->scan_type.realbits - 1, 0);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = ad7298_get_ref_voltage(st);
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = ad7298_get_ref_voltage(st);
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 1093 - 2732500 / ad7298_get_ref_voltage(st);
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7298_info = {
+ .read_raw = &ad7298_read_raw,
+ .update_scan_mode = ad7298_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7298_probe(struct spi_device *spi)
+{
+ struct ad7298_platform_data *pdata = spi->dev.platform_data;
+ struct ad7298_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ if (pdata && pdata->ext_ref)
+ st->ext_ref = AD7298_EXTREF;
+
+ if (st->ext_ref) {
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7298_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7298_channels);
+ indio_dev->info = &ad7298_info;
+
+ /* Setup default message */
+
+ st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[0].len = 2;
+ st->scan_single_xfer[0].cs_change = 1;
+ st->scan_single_xfer[1].tx_buf = &st->tx_buf[1];
+ st->scan_single_xfer[1].len = 2;
+ st->scan_single_xfer[1].cs_change = 1;
+ st->scan_single_xfer[2].rx_buf = &st->rx_buf[0];
+ st->scan_single_xfer[2].len = 2;
+
+ spi_message_init(&st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[2], &st->scan_single_msg);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ad7298_trigger_handler, NULL);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+
+ return 0;
+
+error_cleanup_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ if (st->ext_ref)
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7298_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7298_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->ext_ref)
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7298_id[] = {
+ {"ad7298", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7298_id);
+
+static struct spi_driver ad7298_driver = {
+ .driver = {
+ .name = "ad7298",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7298_probe,
+ .remove = ad7298_remove,
+ .id_table = ad7298_id,
+};
+module_spi_driver(ad7298_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
new file mode 100644
index 00000000000000..ce400ec176f13e
--- /dev/null
+++ b/drivers/iio/adc/ad7476.c
@@ -0,0 +1,315 @@
+/*
+ * AD7466/7/8 AD7476/5/7/8 (A) SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+struct ad7476_state;
+
+struct ad7476_chip_info {
+ unsigned int int_vref_uv;
+ struct iio_chan_spec channel[2];
+ void (*reset)(struct ad7476_state *);
+};
+
+struct ad7476_state {
+ struct spi_device *spi;
+ const struct ad7476_chip_info *chip_info;
+ struct regulator *reg;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Make the buffer large enough for one 16 bit sample and one 64 bit
+ * aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+enum ad7476_supported_device_ids {
+ ID_AD7091R,
+ ID_AD7276,
+ ID_AD7277,
+ ID_AD7278,
+ ID_AD7466,
+ ID_AD7467,
+ ID_AD7468,
+ ID_AD7495,
+ ID_AD7940,
+};
+
+static irqreturn_t ad7476_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7476_state *st = iio_priv(indio_dev);
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, &st->msg);
+ if (b_sent < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns());
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void ad7091_reset(struct ad7476_state *st)
+{
+ /* Any transfers with 8 scl cycles will reset the device */
+ spi_read(st->spi, st->data, 1);
+}
+
+static int ad7476_scan_direct(struct ad7476_state *st)
+{
+ int ret;
+
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret)
+ return ret;
+
+ return be16_to_cpup((__be16 *)st->data);
+}
+
+static int ad7476_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad7476_state *st = iio_priv(indio_dev);
+ int scale_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = ad7476_scan_direct(st);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+ *val = (ret >> st->chip_info->channel[0].scan_type.shift) &
+ GENMASK(st->chip_info->channel[0].scan_type.realbits - 1, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (!st->chip_info->int_vref_uv) {
+ scale_uv = regulator_get_voltage(st->reg);
+ if (scale_uv < 0)
+ return scale_uv;
+ } else {
+ scale_uv = st->chip_info->int_vref_uv;
+ }
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+#define _AD7476_CHAN(bits, _shift, _info_mask_sep) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .info_mask_separate = _info_mask_sep, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD7476_CHAN(bits) _AD7476_CHAN((bits), 13 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
+#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
+#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
+
+static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
+ [ID_AD7091R] = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .reset = ad7091_reset,
+ },
+ [ID_AD7276] = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7277] = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7278] = {
+ .channel[0] = AD7940_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7466] = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7467] = {
+ .channel[0] = AD7476_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7468] = {
+ .channel[0] = AD7476_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+ [ID_AD7495] = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .int_vref_uv = 2500000,
+ },
+ [ID_AD7940] = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ },
+};
+
+static const struct iio_info ad7476_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &ad7476_read_raw,
+};
+
+static int ad7476_probe(struct spi_device *spi)
+{
+ struct ad7476_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->chip_info =
+ &ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ /* Establish that the iio_dev is a child of the spi device */
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = 2;
+ indio_dev->info = &ad7476_info;
+ /* Setup default message */
+
+ st->xfer.rx_buf = &st->data;
+ st->xfer.len = st->chip_info->channel[0].scan_type.storagebits / 8;
+
+ spi_message_init(&st->msg);
+ spi_message_add_tail(&st->xfer, &st->msg);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ad7476_trigger_handler, NULL);
+ if (ret)
+ goto error_disable_reg;
+
+ if (st->chip_info->reset)
+ st->chip_info->reset(st);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_ring_unregister;
+ return 0;
+
+error_ring_unregister:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7476_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7476_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7476_id[] = {
+ {"ad7091r", ID_AD7091R},
+ {"ad7273", ID_AD7277},
+ {"ad7274", ID_AD7276},
+ {"ad7276", ID_AD7276},
+ {"ad7277", ID_AD7277},
+ {"ad7278", ID_AD7278},
+ {"ad7466", ID_AD7466},
+ {"ad7467", ID_AD7467},
+ {"ad7468", ID_AD7468},
+ {"ad7475", ID_AD7466},
+ {"ad7476", ID_AD7466},
+ {"ad7476a", ID_AD7466},
+ {"ad7477", ID_AD7467},
+ {"ad7477a", ID_AD7467},
+ {"ad7478", ID_AD7468},
+ {"ad7478a", ID_AD7468},
+ {"ad7495", ID_AD7495},
+ {"ad7910", ID_AD7467},
+ {"ad7920", ID_AD7466},
+ {"ad7940", ID_AD7940},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7476_id);
+
+static struct spi_driver ad7476_driver = {
+ .driver = {
+ .name = "ad7476",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7476_probe,
+ .remove = ad7476_remove,
+ .id_table = ad7476_id,
+};
+module_spi_driver(ad7476_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7476 and similar 1-channel ADCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
new file mode 100644
index 00000000000000..c19f8fd1b4b7ac
--- /dev/null
+++ b/drivers/iio/adc/ad7791.c
@@ -0,0 +1,453 @@
+/*
+ * AD7787/AD7788/AD7789/AD7790/AD7791 SPI ADC driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/adc/ad_sigma_delta.h>
+
+#include <linux/platform_data/ad7791.h>
+
+#define AD7791_REG_COMM 0x0 /* For writes */
+#define AD7791_REG_STATUS 0x0 /* For reads */
+#define AD7791_REG_MODE 0x1
+#define AD7791_REG_FILTER 0x2
+#define AD7791_REG_DATA 0x3
+
+#define AD7791_MODE_CONTINUOUS 0x00
+#define AD7791_MODE_SINGLE 0x02
+#define AD7791_MODE_POWERDOWN 0x03
+
+#define AD7791_CH_AIN1P_AIN1N 0x00
+#define AD7791_CH_AIN2 0x01
+#define AD7791_CH_AIN1N_AIN1N 0x02
+#define AD7791_CH_AVDD_MONITOR 0x03
+
+#define AD7791_FILTER_CLK_DIV_1 (0x0 << 4)
+#define AD7791_FILTER_CLK_DIV_2 (0x1 << 4)
+#define AD7791_FILTER_CLK_DIV_4 (0x2 << 4)
+#define AD7791_FILTER_CLK_DIV_8 (0x3 << 4)
+#define AD7791_FILTER_CLK_MASK (0x3 << 4)
+#define AD7791_FILTER_RATE_120 0x0
+#define AD7791_FILTER_RATE_100 0x1
+#define AD7791_FILTER_RATE_33_3 0x2
+#define AD7791_FILTER_RATE_20 0x3
+#define AD7791_FILTER_RATE_16_6 0x4
+#define AD7791_FILTER_RATE_16_7 0x5
+#define AD7791_FILTER_RATE_13_3 0x6
+#define AD7791_FILTER_RATE_9_5 0x7
+#define AD7791_FILTER_RATE_MASK 0x7
+
+#define AD7791_MODE_BUFFER BIT(1)
+#define AD7791_MODE_UNIPOLAR BIT(2)
+#define AD7791_MODE_BURNOUT BIT(3)
+#define AD7791_MODE_SEL_MASK (0x3 << 6)
+#define AD7791_MODE_SEL(x) ((x) << 6)
+
+#define DECLARE_AD7787_CHANNELS(name, bits, storagebits) \
+const struct iio_chan_spec name[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ (bits), (storagebits), 0), \
+ AD_SD_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
+ AD_SD_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
+ (bits), (storagebits), 0), \
+ AD_SD_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
+ (bits), (storagebits), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DECLARE_AD7791_CHANNELS(name, bits, storagebits) \
+const struct iio_chan_spec name[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ (bits), (storagebits), 0), \
+ AD_SD_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
+ (bits), (storagebits), 0), \
+ AD_SD_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
+ (bits), (storagebits), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(3), \
+}
+
+static DECLARE_AD7787_CHANNELS(ad7787_channels, 24, 32);
+static DECLARE_AD7791_CHANNELS(ad7790_channels, 16, 16);
+static DECLARE_AD7791_CHANNELS(ad7791_channels, 24, 32);
+
+enum {
+ AD7787,
+ AD7788,
+ AD7789,
+ AD7790,
+ AD7791,
+};
+
+enum ad7791_chip_info_flags {
+ AD7791_FLAG_HAS_FILTER = (1 << 0),
+ AD7791_FLAG_HAS_BUFFER = (1 << 1),
+ AD7791_FLAG_HAS_UNIPOLAR = (1 << 2),
+ AD7791_FLAG_HAS_BURNOUT = (1 << 3),
+};
+
+struct ad7791_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ enum ad7791_chip_info_flags flags;
+};
+
+static const struct ad7791_chip_info ad7791_chip_infos[] = {
+ [AD7787] = {
+ .channels = ad7787_channels,
+ .num_channels = ARRAY_SIZE(ad7787_channels),
+ .flags = AD7791_FLAG_HAS_FILTER | AD7791_FLAG_HAS_BUFFER |
+ AD7791_FLAG_HAS_UNIPOLAR | AD7791_FLAG_HAS_BURNOUT,
+ },
+ [AD7788] = {
+ .channels = ad7790_channels,
+ .num_channels = ARRAY_SIZE(ad7790_channels),
+ .flags = AD7791_FLAG_HAS_UNIPOLAR,
+ },
+ [AD7789] = {
+ .channels = ad7791_channels,
+ .num_channels = ARRAY_SIZE(ad7791_channels),
+ .flags = AD7791_FLAG_HAS_UNIPOLAR,
+ },
+ [AD7790] = {
+ .channels = ad7790_channels,
+ .num_channels = ARRAY_SIZE(ad7790_channels),
+ .flags = AD7791_FLAG_HAS_FILTER | AD7791_FLAG_HAS_BUFFER |
+ AD7791_FLAG_HAS_BURNOUT,
+ },
+ [AD7791] = {
+ .channels = ad7791_channels,
+ .num_channels = ARRAY_SIZE(ad7791_channels),
+ .flags = AD7791_FLAG_HAS_FILTER | AD7791_FLAG_HAS_BUFFER |
+ AD7791_FLAG_HAS_UNIPOLAR | AD7791_FLAG_HAS_BURNOUT,
+ },
+};
+
+struct ad7791_state {
+ struct ad_sigma_delta sd;
+ uint8_t mode;
+ uint8_t filter;
+
+ struct regulator *reg;
+ const struct ad7791_chip_info *info;
+};
+
+static struct ad7791_state *ad_sigma_delta_to_ad7791(struct ad_sigma_delta *sd)
+{
+ return container_of(sd, struct ad7791_state, sd);
+}
+
+static int ad7791_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+{
+ ad_sd_set_comm(sd, channel);
+
+ return 0;
+}
+
+static int ad7791_set_mode(struct ad_sigma_delta *sd,
+ enum ad_sigma_delta_mode mode)
+{
+ struct ad7791_state *st = ad_sigma_delta_to_ad7791(sd);
+
+ switch (mode) {
+ case AD_SD_MODE_CONTINUOUS:
+ mode = AD7791_MODE_CONTINUOUS;
+ break;
+ case AD_SD_MODE_SINGLE:
+ mode = AD7791_MODE_SINGLE;
+ break;
+ case AD_SD_MODE_IDLE:
+ case AD_SD_MODE_POWERDOWN:
+ mode = AD7791_MODE_POWERDOWN;
+ break;
+ }
+
+ st->mode &= ~AD7791_MODE_SEL_MASK;
+ st->mode |= AD7791_MODE_SEL(mode);
+
+ return ad_sd_write_reg(sd, AD7791_REG_MODE, sizeof(st->mode), st->mode);
+}
+
+static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
+ .set_channel = ad7791_set_channel,
+ .set_mode = ad7791_set_mode,
+ .has_registers = true,
+ .addr_shift = 4,
+ .read_mask = BIT(3),
+};
+
+static int ad7791_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct ad7791_state *st = iio_priv(indio_dev);
+ bool unipolar = !!(st->mode & AD7791_MODE_UNIPOLAR);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_OFFSET:
+ /**
+ * Unipolar: 0 to VREF
+ * Bipolar -VREF to VREF
+ **/
+ if (unipolar)
+ *val = 0;
+ else
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* The monitor channel uses an internal reference. */
+ if (chan->address == AD7791_CH_AVDD_MONITOR) {
+ /*
+ * The signal is attenuated by a factor of 5 and
+ * compared against a 1.17V internal reference.
+ */
+ *val = 1170 * 5;
+ } else {
+ int voltage_uv;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ if (voltage_uv < 0)
+ return voltage_uv;
+
+ *val = voltage_uv / 1000;
+ }
+ if (unipolar)
+ *val2 = chan->scan_type.realbits;
+ else
+ *val2 = chan->scan_type.realbits - 1;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const ad7791_sample_freq_avail[] = {
+ [AD7791_FILTER_RATE_120] = "120",
+ [AD7791_FILTER_RATE_100] = "100",
+ [AD7791_FILTER_RATE_33_3] = "33.3",
+ [AD7791_FILTER_RATE_20] = "20",
+ [AD7791_FILTER_RATE_16_6] = "16.6",
+ [AD7791_FILTER_RATE_16_7] = "16.7",
+ [AD7791_FILTER_RATE_13_3] = "13.3",
+ [AD7791_FILTER_RATE_9_5] = "9.5",
+};
+
+static ssize_t ad7791_read_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7791_state *st = iio_priv(indio_dev);
+ unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
+
+ return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
+}
+
+static ssize_t ad7791_write_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7791_state *st = iio_priv(indio_dev);
+ int i, ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) {
+ if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) {
+
+ mutex_lock(&indio_dev->mlock);
+ st->filter &= ~AD7791_FILTER_RATE_MASK;
+ st->filter |= i;
+ ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
+ sizeof(st->filter), st->filter);
+ mutex_unlock(&indio_dev->mlock);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ad7791_read_frequency,
+ ad7791_write_frequency);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
+
+static struct attribute *ad7791_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7791_attribute_group = {
+ .attrs = ad7791_attributes,
+};
+
+static const struct iio_info ad7791_info = {
+ .read_raw = &ad7791_read_raw,
+ .attrs = &ad7791_attribute_group,
+ .validate_trigger = ad_sd_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_info ad7791_no_filter_info = {
+ .read_raw = &ad7791_read_raw,
+ .validate_trigger = ad_sd_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7791_setup(struct ad7791_state *st,
+ struct ad7791_platform_data *pdata)
+{
+ /* Set to poweron-reset default values */
+ st->mode = AD7791_MODE_BUFFER;
+ st->filter = AD7791_FILTER_RATE_16_6;
+
+ if (!pdata)
+ return 0;
+
+ if ((st->info->flags & AD7791_FLAG_HAS_BUFFER) && !pdata->buffered)
+ st->mode &= ~AD7791_MODE_BUFFER;
+
+ if ((st->info->flags & AD7791_FLAG_HAS_BURNOUT) &&
+ pdata->burnout_current)
+ st->mode |= AD7791_MODE_BURNOUT;
+
+ if ((st->info->flags & AD7791_FLAG_HAS_UNIPOLAR) && pdata->unipolar)
+ st->mode |= AD7791_MODE_UNIPOLAR;
+
+ return ad_sd_write_reg(&st->sd, AD7791_REG_MODE, sizeof(st->mode),
+ st->mode);
+}
+
+static int ad7791_probe(struct spi_device *spi)
+{
+ struct ad7791_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad7791_state *st;
+ int ret;
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "Missing IRQ.\n");
+ return -ENXIO;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ st->info = &ad7791_chip_infos[spi_get_device_id(spi)->driver_data];
+ ad_sd_init(&st->sd, indio_dev, spi, &ad7791_sigma_delta_info);
+
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ if (st->info->flags & AD7791_FLAG_HAS_FILTER)
+ indio_dev->info = &ad7791_info;
+ else
+ indio_dev->info = &ad7791_no_filter_info;
+
+ ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad7791_setup(st, pdata);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ ad_sd_cleanup_buffer_and_trigger(indio_dev);
+error_disable_reg:
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7791_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7791_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ ad_sd_cleanup_buffer_and_trigger(indio_dev);
+
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7791_spi_ids[] = {
+ { "ad7787", AD7787 },
+ { "ad7788", AD7788 },
+ { "ad7789", AD7789 },
+ { "ad7790", AD7790 },
+ { "ad7791", AD7791 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7791_spi_ids);
+
+static struct spi_driver ad7791_driver = {
+ .driver = {
+ .name = "ad7791",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7791_probe,
+ .remove = ad7791_remove,
+ .id_table = ad7791_spi_ids,
+};
+module_spi_driver(ad7791_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Device AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
new file mode 100644
index 00000000000000..4dddeabdfbb02a
--- /dev/null
+++ b/drivers/iio/adc/ad7793.c
@@ -0,0 +1,865 @@
+/*
+ * AD7785/AD7792/AD7793/AD7794/AD7795 SPI ADC driver
+ *
+ * Copyright 2011-2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/adc/ad_sigma_delta.h>
+#include <linux/platform_data/ad7793.h>
+
+/* Registers */
+#define AD7793_REG_COMM 0 /* Communications Register (WO, 8-bit) */
+#define AD7793_REG_STAT 0 /* Status Register (RO, 8-bit) */
+#define AD7793_REG_MODE 1 /* Mode Register (RW, 16-bit */
+#define AD7793_REG_CONF 2 /* Configuration Register (RW, 16-bit) */
+#define AD7793_REG_DATA 3 /* Data Register (RO, 16-/24-bit) */
+#define AD7793_REG_ID 4 /* ID Register (RO, 8-bit) */
+#define AD7793_REG_IO 5 /* IO Register (RO, 8-bit) */
+#define AD7793_REG_OFFSET 6 /* Offset Register (RW, 16-bit
+ * (AD7792)/24-bit (AD7793)) */
+#define AD7793_REG_FULLSALE 7 /* Full-Scale Register
+ * (RW, 16-bit (AD7792)/24-bit (AD7793)) */
+
+/* Communications Register Bit Designations (AD7793_REG_COMM) */
+#define AD7793_COMM_WEN (1 << 7) /* Write Enable */
+#define AD7793_COMM_WRITE (0 << 6) /* Write Operation */
+#define AD7793_COMM_READ (1 << 6) /* Read Operation */
+#define AD7793_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
+#define AD7793_COMM_CREAD (1 << 2) /* Continuous Read of Data Register */
+
+/* Status Register Bit Designations (AD7793_REG_STAT) */
+#define AD7793_STAT_RDY (1 << 7) /* Ready */
+#define AD7793_STAT_ERR (1 << 6) /* Error (Overrange, Underrange) */
+#define AD7793_STAT_CH3 (1 << 2) /* Channel 3 */
+#define AD7793_STAT_CH2 (1 << 1) /* Channel 2 */
+#define AD7793_STAT_CH1 (1 << 0) /* Channel 1 */
+
+/* Mode Register Bit Designations (AD7793_REG_MODE) */
+#define AD7793_MODE_SEL(x) (((x) & 0x7) << 13) /* Operation Mode Select */
+#define AD7793_MODE_SEL_MASK (0x7 << 13) /* Operation Mode Select mask */
+#define AD7793_MODE_CLKSRC(x) (((x) & 0x3) << 6) /* ADC Clock Source Select */
+#define AD7793_MODE_RATE(x) ((x) & 0xF) /* Filter Update Rate Select */
+
+#define AD7793_MODE_CONT 0 /* Continuous Conversion Mode */
+#define AD7793_MODE_SINGLE 1 /* Single Conversion Mode */
+#define AD7793_MODE_IDLE 2 /* Idle Mode */
+#define AD7793_MODE_PWRDN 3 /* Power-Down Mode */
+#define AD7793_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */
+#define AD7793_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
+
+#define AD7793_CLK_INT 0 /* Internal 64 kHz Clock not
+ * available at the CLK pin */
+#define AD7793_CLK_INT_CO 1 /* Internal 64 kHz Clock available
+ * at the CLK pin */
+#define AD7793_CLK_EXT 2 /* External 64 kHz Clock */
+#define AD7793_CLK_EXT_DIV2 3 /* External Clock divided by 2 */
+
+/* Configuration Register Bit Designations (AD7793_REG_CONF) */
+#define AD7793_CONF_VBIAS(x) (((x) & 0x3) << 14) /* Bias Voltage
+ * Generator Enable */
+#define AD7793_CONF_BO_EN (1 << 13) /* Burnout Current Enable */
+#define AD7793_CONF_UNIPOLAR (1 << 12) /* Unipolar/Bipolar Enable */
+#define AD7793_CONF_BOOST (1 << 11) /* Boost Enable */
+#define AD7793_CONF_GAIN(x) (((x) & 0x7) << 8) /* Gain Select */
+#define AD7793_CONF_REFSEL(x) ((x) << 6) /* INT/EXT Reference Select */
+#define AD7793_CONF_BUF (1 << 4) /* Buffered Mode Enable */
+#define AD7793_CONF_CHAN(x) ((x) & 0xf) /* Channel select */
+#define AD7793_CONF_CHAN_MASK 0xf /* Channel select mask */
+
+#define AD7793_CH_AIN1P_AIN1M 0 /* AIN1(+) - AIN1(-) */
+#define AD7793_CH_AIN2P_AIN2M 1 /* AIN2(+) - AIN2(-) */
+#define AD7793_CH_AIN3P_AIN3M 2 /* AIN3(+) - AIN3(-) */
+#define AD7793_CH_AIN1M_AIN1M 3 /* AIN1(-) - AIN1(-) */
+#define AD7793_CH_TEMP 6 /* Temp Sensor */
+#define AD7793_CH_AVDD_MONITOR 7 /* AVDD Monitor */
+
+#define AD7795_CH_AIN4P_AIN4M 4 /* AIN4(+) - AIN4(-) */
+#define AD7795_CH_AIN5P_AIN5M 5 /* AIN5(+) - AIN5(-) */
+#define AD7795_CH_AIN6P_AIN6M 6 /* AIN6(+) - AIN6(-) */
+#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
+
+/* ID Register Bit Designations (AD7793_REG_ID) */
+#define AD7785_ID 0xB
+#define AD7792_ID 0xA
+#define AD7793_ID 0xB
+#define AD7794_ID 0xF
+#define AD7795_ID 0xF
+#define AD7796_ID 0xA
+#define AD7797_ID 0xB
+#define AD7798_ID 0x8
+#define AD7799_ID 0x9
+#define AD7793_ID_MASK 0xF
+
+/* IO (Excitation Current Sources) Register Bit Designations (AD7793_REG_IO) */
+#define AD7793_IO_IEXC1_IOUT1_IEXC2_IOUT2 0 /* IEXC1 connect to IOUT1,
+ * IEXC2 connect to IOUT2 */
+#define AD7793_IO_IEXC1_IOUT2_IEXC2_IOUT1 1 /* IEXC1 connect to IOUT2,
+ * IEXC2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT1 2 /* Both current sources
+ * IEXC1,2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT2 3 /* Both current sources
+ * IEXC1,2 connect to IOUT2 */
+
+#define AD7793_IO_IXCEN_10uA (1 << 0) /* Excitation Current 10uA */
+#define AD7793_IO_IXCEN_210uA (2 << 0) /* Excitation Current 210uA */
+#define AD7793_IO_IXCEN_1mA (3 << 0) /* Excitation Current 1mA */
+
+/* NOTE:
+ * The AD7792/AD7793 features a dual use data out ready DOUT/RDY output.
+ * In order to avoid contentions on the SPI bus, it's therefore necessary
+ * to use spi bus locking.
+ *
+ * The DOUT/RDY output must also be wired to an interrupt capable GPIO.
+ */
+
+#define AD7793_FLAG_HAS_CLKSEL BIT(0)
+#define AD7793_FLAG_HAS_REFSEL BIT(1)
+#define AD7793_FLAG_HAS_VBIAS BIT(2)
+#define AD7793_HAS_EXITATION_CURRENT BIT(3)
+#define AD7793_FLAG_HAS_GAIN BIT(4)
+#define AD7793_FLAG_HAS_BUFFER BIT(5)
+
+struct ad7793_chip_info {
+ unsigned int id;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int flags;
+
+ const struct iio_info *iio_info;
+ const u16 *sample_freq_avail;
+};
+
+struct ad7793_state {
+ const struct ad7793_chip_info *chip_info;
+ struct regulator *reg;
+ u16 int_vref_mv;
+ u16 mode;
+ u16 conf;
+ u32 scale_avail[8][2];
+
+ struct ad_sigma_delta sd;
+
+};
+
+enum ad7793_supported_device_ids {
+ ID_AD7785,
+ ID_AD7792,
+ ID_AD7793,
+ ID_AD7794,
+ ID_AD7795,
+ ID_AD7796,
+ ID_AD7797,
+ ID_AD7798,
+ ID_AD7799,
+};
+
+static struct ad7793_state *ad_sigma_delta_to_ad7793(struct ad_sigma_delta *sd)
+{
+ return container_of(sd, struct ad7793_state, sd);
+}
+
+static int ad7793_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+{
+ struct ad7793_state *st = ad_sigma_delta_to_ad7793(sd);
+
+ st->conf &= ~AD7793_CONF_CHAN_MASK;
+ st->conf |= AD7793_CONF_CHAN(channel);
+
+ return ad_sd_write_reg(&st->sd, AD7793_REG_CONF, 2, st->conf);
+}
+
+static int ad7793_set_mode(struct ad_sigma_delta *sd,
+ enum ad_sigma_delta_mode mode)
+{
+ struct ad7793_state *st = ad_sigma_delta_to_ad7793(sd);
+
+ st->mode &= ~AD7793_MODE_SEL_MASK;
+ st->mode |= AD7793_MODE_SEL(mode);
+
+ return ad_sd_write_reg(&st->sd, AD7793_REG_MODE, 2, st->mode);
+}
+
+static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
+ .set_channel = ad7793_set_channel,
+ .set_mode = ad7793_set_mode,
+ .has_registers = true,
+ .addr_shift = 3,
+ .read_mask = BIT(6),
+};
+
+static const struct ad_sd_calib_data ad7793_calib_arr[6] = {
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN1P_AIN1M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN1P_AIN1M},
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN2P_AIN2M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN2P_AIN2M},
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN3P_AIN3M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN3P_AIN3M}
+};
+
+static int ad7793_calibrate_all(struct ad7793_state *st)
+{
+ return ad_sd_calibrate_all(&st->sd, ad7793_calib_arr,
+ ARRAY_SIZE(ad7793_calib_arr));
+}
+
+static int ad7793_check_platform_data(struct ad7793_state *st,
+ const struct ad7793_platform_data *pdata)
+{
+ if ((pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT1 ||
+ pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT2) &&
+ ((pdata->exitation_current != AD7793_IX_10uA) &&
+ (pdata->exitation_current != AD7793_IX_210uA)))
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL) &&
+ pdata->clock_src != AD7793_CLK_SRC_INT)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_REFSEL) &&
+ pdata->refsel != AD7793_REFSEL_REFIN1)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_VBIAS) &&
+ pdata->bias_voltage != AD7793_BIAS_VOLTAGE_DISABLED)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) &&
+ pdata->exitation_current != AD7793_IX_DISABLED)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7793_setup(struct iio_dev *indio_dev,
+ const struct ad7793_platform_data *pdata,
+ unsigned int vref_mv)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int i, ret = -1;
+ unsigned long long scale_uv;
+ u32 id;
+
+ ret = ad7793_check_platform_data(st, pdata);
+ if (ret)
+ return ret;
+
+ /* reset the serial interface */
+ ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ if (ret < 0)
+ goto out;
+ usleep_range(500, 2000); /* Wait for at least 500us */
+
+ /* write/read test for device presence */
+ ret = ad_sd_read_reg(&st->sd, AD7793_REG_ID, 1, &id);
+ if (ret)
+ goto out;
+
+ id &= AD7793_ID_MASK;
+
+ if (id != st->chip_info->id) {
+ dev_err(&st->sd.spi->dev, "device ID query failed\n");
+ goto out;
+ }
+
+ st->mode = AD7793_MODE_RATE(1);
+ st->conf = 0;
+
+ if (st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL)
+ st->mode |= AD7793_MODE_CLKSRC(pdata->clock_src);
+ if (st->chip_info->flags & AD7793_FLAG_HAS_REFSEL)
+ st->conf |= AD7793_CONF_REFSEL(pdata->refsel);
+ if (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS)
+ st->conf |= AD7793_CONF_VBIAS(pdata->bias_voltage);
+ if (pdata->buffered || !(st->chip_info->flags & AD7793_FLAG_HAS_BUFFER))
+ st->conf |= AD7793_CONF_BUF;
+ if (pdata->boost_enable &&
+ (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS))
+ st->conf |= AD7793_CONF_BOOST;
+ if (pdata->burnout_current)
+ st->conf |= AD7793_CONF_BO_EN;
+ if (pdata->unipolar)
+ st->conf |= AD7793_CONF_UNIPOLAR;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_GAIN))
+ st->conf |= AD7793_CONF_GAIN(7);
+
+ ret = ad7793_set_mode(&st->sd, AD_SD_MODE_IDLE);
+ if (ret)
+ goto out;
+
+ ret = ad7793_set_channel(&st->sd, 0);
+ if (ret)
+ goto out;
+
+ if (st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) {
+ ret = ad_sd_write_reg(&st->sd, AD7793_REG_IO, 1,
+ pdata->exitation_current |
+ (pdata->current_source_direction << 2));
+ if (ret)
+ goto out;
+ }
+
+ ret = ad7793_calibrate_all(st);
+ if (ret)
+ goto out;
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ scale_uv = ((u64)vref_mv * 100000000)
+ >> (st->chip_info->channels[0].scan_type.realbits -
+ (!!(st->conf & AD7793_CONF_UNIPOLAR) ? 0 : 1));
+ scale_uv >>= i;
+
+ st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10;
+ st->scale_avail[i][0] = scale_uv;
+ }
+
+ return 0;
+out:
+ dev_err(&st->sd.spi->dev, "setup failed\n");
+ return ret;
+}
+
+static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
+ 33, 19, 17, 16, 12, 10, 8, 6, 4};
+
+static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
+ 33, 0, 17, 16, 12, 10, 8, 6, 4};
+
+static ssize_t ad7793_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
+}
+
+static ssize_t ad7793_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+ long lval;
+ int i, ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ ret = kstrtol(buf, 10, &lval);
+ if (ret)
+ return ret;
+
+ if (lval == 0)
+ return -EINVAL;
+
+ ret = -EINVAL;
+
+ for (i = 0; i < 16; i++)
+ if (lval == st->chip_info->sample_freq_avail[i]) {
+ mutex_lock(&indio_dev->mlock);
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad_sd_write_reg(&st->sd, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+ mutex_unlock(&indio_dev->mlock);
+ ret = 0;
+ }
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ad7793_read_frequency,
+ ad7793_write_frequency);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
+
+static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797,
+ sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4");
+
+static ssize_t ad7793_show_scale_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
+ st->scale_avail[i][1]);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
+ in_voltage-voltage_scale_available, S_IRUGO,
+ ad7793_show_scale_available, NULL, 0);
+
+static struct attribute *ad7793_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7793_attribute_group = {
+ .attrs = ad7793_attributes,
+};
+
+static struct attribute *ad7797_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7797_attribute_group = {
+ .attrs = ad7797_attributes,
+};
+
+static int ad7793_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned long long scale_uv;
+ bool unipolar = !!(st->conf & AD7793_CONF_UNIPOLAR);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->differential) {
+ *val = st->
+ scale_avail[(st->conf >> 8) & 0x7][0];
+ *val2 = st->
+ scale_avail[(st->conf >> 8) & 0x7][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ } else {
+ /* 1170mV / 2^23 * 6 */
+ scale_uv = (1170ULL * 1000000000ULL * 6ULL);
+ }
+ break;
+ case IIO_TEMP:
+ /* 1170mV / 0.81 mV/C / 2^23 */
+ scale_uv = 1444444444444444ULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1));
+ *val = 0;
+ *val2 = scale_uv;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ if (!unipolar)
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ else
+ *val = 0;
+
+ /* Kelvin to Celsius */
+ if (chan->type == IIO_TEMP) {
+ unsigned long long offset;
+ unsigned int shift;
+
+ shift = chan->scan_type.realbits - (unipolar ? 0 : 1);
+ offset = 273ULL << shift;
+ do_div(offset, 1444);
+ *val -= offset;
+ }
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int ret, i;
+ unsigned int tmp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ if (val2 == st->scale_avail[i][1]) {
+ ret = 0;
+ tmp = st->conf;
+ st->conf &= ~AD7793_CONF_GAIN(-1);
+ st->conf |= AD7793_CONF_GAIN(i);
+
+ if (tmp == st->conf)
+ break;
+
+ ad_sd_write_reg(&st->sd, AD7793_REG_CONF,
+ sizeof(st->conf), st->conf);
+ ad7793_calibrate_all(st);
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad7793_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info ad7793_info = {
+ .read_raw = &ad7793_read_raw,
+ .write_raw = &ad7793_write_raw,
+ .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .attrs = &ad7793_attribute_group,
+ .validate_trigger = ad_sd_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_info ad7797_info = {
+ .read_raw = &ad7793_read_raw,
+ .write_raw = &ad7793_write_raw,
+ .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .attrs = &ad7793_attribute_group,
+ .validate_trigger = ad_sd_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
+ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
+ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
+ AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
+ AD_SD_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
+ AD_SD_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
+ IIO_CHAN_SOFT_TIMESTAMP(6), \
+}
+
+#define DECLARE_AD7795_CHANNELS(_name, _b, _sb) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
+ AD_SD_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD_SD_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD_SD_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(9), \
+}
+
+#define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(5), \
+}
+
+static DECLARE_AD7793_CHANNELS(ad7785, 20, 32, 4);
+static DECLARE_AD7793_CHANNELS(ad7792, 16, 32, 0);
+static DECLARE_AD7793_CHANNELS(ad7793, 24, 32, 0);
+static DECLARE_AD7795_CHANNELS(ad7794, 16, 32);
+static DECLARE_AD7795_CHANNELS(ad7795, 24, 32);
+static DECLARE_AD7797_CHANNELS(ad7796, 16, 16);
+static DECLARE_AD7797_CHANNELS(ad7797, 24, 32);
+static DECLARE_AD7799_CHANNELS(ad7798, 16, 16);
+static DECLARE_AD7799_CHANNELS(ad7799, 24, 32);
+
+static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
+ [ID_AD7785] = {
+ .id = AD7785_ID,
+ .channels = ad7785_channels,
+ .num_channels = ARRAY_SIZE(ad7785_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7792] = {
+ .id = AD7792_ID,
+ .channels = ad7792_channels,
+ .num_channels = ARRAY_SIZE(ad7792_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7793] = {
+ .id = AD7793_ID,
+ .channels = ad7793_channels,
+ .num_channels = ARRAY_SIZE(ad7793_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7794] = {
+ .id = AD7794_ID,
+ .channels = ad7794_channels,
+ .num_channels = ARRAY_SIZE(ad7794_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7795] = {
+ .id = AD7795_ID,
+ .channels = ad7795_channels,
+ .num_channels = ARRAY_SIZE(ad7795_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7796] = {
+ .id = AD7796_ID,
+ .channels = ad7796_channels,
+ .num_channels = ARRAY_SIZE(ad7796_channels),
+ .iio_info = &ad7797_info,
+ .sample_freq_avail = ad7797_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL,
+ },
+ [ID_AD7797] = {
+ .id = AD7797_ID,
+ .channels = ad7797_channels,
+ .num_channels = ARRAY_SIZE(ad7797_channels),
+ .iio_info = &ad7797_info,
+ .sample_freq_avail = ad7797_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL,
+ },
+ [ID_AD7798] = {
+ .id = AD7798_ID,
+ .channels = ad7798_channels,
+ .num_channels = ARRAY_SIZE(ad7798_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7799] = {
+ .id = AD7799_ID,
+ .channels = ad7799_channels,
+ .num_channels = ARRAY_SIZE(ad7799_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+};
+
+static int ad7793_probe(struct spi_device *spi)
+{
+ const struct ad7793_platform_data *pdata = spi->dev.platform_data;
+ struct ad7793_state *st;
+ struct iio_dev *indio_dev;
+ int ret, vref_mv = 0;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ ad_sd_init(&st->sd, indio_dev, spi, &ad7793_sigma_delta_info);
+
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ vref_mv = regulator_get_voltage(st->reg);
+ if (vref_mv < 0) {
+ ret = vref_mv;
+ goto error_disable_reg;
+ }
+
+ vref_mv /= 1000;
+ } else {
+ vref_mv = 1170; /* Build-in ref */
+ }
+
+ st->chip_info =
+ &ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = st->chip_info->iio_info;
+
+ ret = ad_sd_setup_buffer_and_trigger(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad7793_setup(indio_dev, pdata, vref_mv);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ ad_sd_cleanup_buffer_and_trigger(indio_dev);
+error_disable_reg:
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL)
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7793_remove(struct spi_device *spi)
+{
+ const struct ad7793_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ ad_sd_cleanup_buffer_and_trigger(indio_dev);
+
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL)
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7793_id[] = {
+ {"ad7785", ID_AD7785},
+ {"ad7792", ID_AD7792},
+ {"ad7793", ID_AD7793},
+ {"ad7794", ID_AD7794},
+ {"ad7795", ID_AD7795},
+ {"ad7796", ID_AD7796},
+ {"ad7797", ID_AD7797},
+ {"ad7798", ID_AD7798},
+ {"ad7799", ID_AD7799},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7793_id);
+
+static struct spi_driver ad7793_driver = {
+ .driver = {
+ .name = "ad7793",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7793_probe,
+ .remove = ad7793_remove,
+ .id_table = ad7793_id,
+};
+module_spi_driver(ad7793_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7793 and simialr ADCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
new file mode 100644
index 00000000000000..2fd012ee99f5b3
--- /dev/null
+++ b/drivers/iio/adc/ad7887.c
@@ -0,0 +1,369 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ad7887.h>
+
+#define AD7887_REF_DIS BIT(5) /* on-chip reference disable */
+#define AD7887_DUAL BIT(4) /* dual-channel mode */
+#define AD7887_CH_AIN1 BIT(3) /* convert on channel 1, DUAL=1 */
+#define AD7887_CH_AIN0 0 /* convert on channel 0, DUAL=0,1 */
+#define AD7887_PM_MODE1 0 /* CS based shutdown */
+#define AD7887_PM_MODE2 1 /* full on */
+#define AD7887_PM_MODE3 2 /* auto shutdown after conversion */
+#define AD7887_PM_MODE4 3 /* standby mode */
+
+enum ad7887_channels {
+ AD7887_CH0,
+ AD7887_CH0_CH1,
+ AD7887_CH1,
+};
+
+/**
+ * struct ad7887_chip_info - chip specifc information
+ * @int_vref_mv: the internal reference voltage
+ * @channel: channel specification
+ */
+struct ad7887_chip_info {
+ u16 int_vref_mv;
+ struct iio_chan_spec channel[3];
+};
+
+struct ad7887_state {
+ struct spi_device *spi;
+ const struct ad7887_chip_info *chip_info;
+ struct regulator *reg;
+ struct spi_transfer xfer[4];
+ struct spi_message msg[3];
+ struct spi_message *ring_msg;
+ unsigned char tx_cmd_buf[4];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Buffer needs to be large enough to hold two 16 bit samples and a
+ * 64 bit aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(4, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+enum ad7887_supported_device_ids {
+ ID_AD7887
+};
+
+static int ad7887_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ /* We know this is a single long so can 'cheat' */
+ switch (*indio_dev->active_scan_mask) {
+ case (1 << 0):
+ st->ring_msg = &st->msg[AD7887_CH0];
+ break;
+ case (1 << 1):
+ st->ring_msg = &st->msg[AD7887_CH1];
+ /* Dummy read: push CH1 setting down to hardware */
+ spi_sync(st->spi, st->ring_msg);
+ break;
+ case ((1 << 1) | (1 << 0)):
+ st->ring_msg = &st->msg[AD7887_CH0_CH1];
+ break;
+ }
+
+ return 0;
+}
+
+static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ /* dummy read: restore default CH0 settin */
+ return spi_sync(st->spi, &st->msg[AD7887_CH0]);
+}
+
+/**
+ * ad7887_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad7887_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7887_state *st = iio_priv(indio_dev);
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+ iio_get_time_ns());
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = {
+ .preenable = &ad7887_ring_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7887_ring_postdisable,
+};
+
+static int ad7887_scan_direct(struct ad7887_state *st, unsigned ch)
+{
+ int ret = spi_sync(st->spi, &st->msg[ch]);
+ if (ret)
+ return ret;
+
+ return (st->data[(ch * 2)] << 8) | st->data[(ch * 2) + 1];
+}
+
+static int ad7887_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = ad7887_scan_direct(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+ *val = ret >> chan->scan_type.shift;
+ *val &= GENMASK(chan->scan_type.realbits - 1, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (st->reg) {
+ *val = regulator_get_voltage(st->reg);
+ if (*val < 0)
+ return *val;
+ *val /= 1000;
+ } else {
+ *val = st->chip_info->int_vref_mv;
+ }
+
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+
+static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+ /*
+ * More devices added in future
+ */
+ [ID_AD7887] = {
+ .channel[0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = 1,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = 0,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
+ },
+ .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .int_vref_mv = 2500,
+ },
+};
+
+static const struct iio_info ad7887_info = {
+ .read_raw = &ad7887_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7887_probe(struct spi_device *spi)
+{
+ struct ad7887_platform_data *pdata = spi->dev.platform_data;
+ struct ad7887_state *st;
+ struct iio_dev *indio_dev;
+ uint8_t mode;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ if (!pdata || !pdata->use_onchip_ref) {
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ st->chip_info =
+ &ad7887_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ /* Estabilish that the iio_dev is a child of the spi device */
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad7887_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default message */
+
+ mode = AD7887_PM_MODE4;
+ if (!pdata || !pdata->use_onchip_ref)
+ mode |= AD7887_REF_DIS;
+ if (pdata && pdata->en_dual)
+ mode |= AD7887_DUAL;
+
+ st->tx_cmd_buf[0] = AD7887_CH_AIN0 | mode;
+
+ st->xfer[0].rx_buf = &st->data[0];
+ st->xfer[0].tx_buf = &st->tx_cmd_buf[0];
+ st->xfer[0].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0]);
+ spi_message_add_tail(&st->xfer[0], &st->msg[AD7887_CH0]);
+
+ if (pdata && pdata->en_dual) {
+ st->tx_cmd_buf[2] = AD7887_CH_AIN1 | mode;
+
+ st->xfer[1].rx_buf = &st->data[0];
+ st->xfer[1].tx_buf = &st->tx_cmd_buf[2];
+ st->xfer[1].len = 2;
+
+ st->xfer[2].rx_buf = &st->data[2];
+ st->xfer[2].tx_buf = &st->tx_cmd_buf[0];
+ st->xfer[2].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[1], &st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[2], &st->msg[AD7887_CH0_CH1]);
+
+ st->xfer[3].rx_buf = &st->data[2];
+ st->xfer[3].tx_buf = &st->tx_cmd_buf[2];
+ st->xfer[3].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH1]);
+ spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
+
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = 3;
+ } else {
+ indio_dev->channels = &st->chip_info->channel[1];
+ indio_dev->num_channels = 2;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &ad7887_trigger_handler, &ad7887_ring_setup_ops);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unregister_ring;
+
+ return 0;
+error_unregister_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7887_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7887_id[] = {
+ {"ad7887", ID_AD7887},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7887_id);
+
+static struct spi_driver ad7887_driver = {
+ .driver = {
+ .name = "ad7887",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7887_probe,
+ .remove = ad7887_remove,
+ .id_table = ad7887_id,
+};
+module_spi_driver(ad7887_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
new file mode 100644
index 00000000000000..28732c28e8197a
--- /dev/null
+++ b/drivers/iio/adc/ad7923.c
@@ -0,0 +1,371 @@
+/*
+ * AD7904/AD7914/AD7923/AD7924 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc (from AD7923 Driver)
+ * Copyright 2012 CS Systemes d'Information
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AD7923_WRITE_CR (1 << 11) /* write control register */
+#define AD7923_RANGE (1 << 1) /* range to REFin */
+#define AD7923_CODING (1 << 0) /* coding is straight binary */
+#define AD7923_PM_MODE_AS (1) /* auto shutdown */
+#define AD7923_PM_MODE_FS (2) /* full shutdown */
+#define AD7923_PM_MODE_OPS (3) /* normal operation */
+#define AD7923_CHANNEL_0 (0) /* analog input 0 */
+#define AD7923_CHANNEL_1 (1) /* analog input 1 */
+#define AD7923_CHANNEL_2 (2) /* analog input 2 */
+#define AD7923_CHANNEL_3 (3) /* analog input 3 */
+#define AD7923_SEQUENCE_OFF (0) /* no sequence fonction */
+#define AD7923_SEQUENCE_PROTECT (2) /* no interrupt write cycle */
+#define AD7923_SEQUENCE_ON (3) /* continuous sequence */
+
+#define AD7923_MAX_CHAN 4
+
+#define AD7923_PM_MODE_WRITE(mode) (mode << 4) /* write mode */
+#define AD7923_CHANNEL_WRITE(channel) (channel << 6) /* write channel */
+#define AD7923_SEQUENCE_WRITE(sequence) (((sequence & 1) << 3) \
+ + ((sequence & 2) << 9))
+ /* write sequence fonction */
+/* left shift for CR : bit 11 transmit in first */
+#define AD7923_SHIFT_REGISTER 4
+
+/* val = value, dec = left shift, bits = number of bits of the mask */
+#define EXTRACT(val, dec, bits) ((val >> dec) & ((1 << bits) - 1))
+
+struct ad7923_state {
+ struct spi_device *spi;
+ struct spi_transfer ring_xfer[5];
+ struct spi_transfer scan_single_xfer[2];
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+
+ struct regulator *reg;
+
+ unsigned int settings;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 rx_buf[4] ____cacheline_aligned;
+ __be16 tx_buf[4];
+};
+
+struct ad7923_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+enum ad7923_id {
+ AD7904,
+ AD7914,
+ AD7924,
+};
+
+#define AD7923_V_CHAN(index, bits) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define DECLARE_AD7923_CHANNELS(name, bits) \
+const struct iio_chan_spec name ## _channels[] = { \
+ AD7923_V_CHAN(0, bits), \
+ AD7923_V_CHAN(1, bits), \
+ AD7923_V_CHAN(2, bits), \
+ AD7923_V_CHAN(3, bits), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+static DECLARE_AD7923_CHANNELS(ad7904, 8);
+static DECLARE_AD7923_CHANNELS(ad7914, 10);
+static DECLARE_AD7923_CHANNELS(ad7924, 12);
+
+static const struct ad7923_chip_info ad7923_chip_info[] = {
+ [AD7904] = {
+ .channels = ad7904_channels,
+ .num_channels = ARRAY_SIZE(ad7904_channels),
+ },
+ [AD7914] = {
+ .channels = ad7914_channels,
+ .num_channels = ARRAY_SIZE(ad7914_channels),
+ },
+ [AD7924] = {
+ .channels = ad7924_channels,
+ .num_channels = ARRAY_SIZE(ad7924_channels),
+ },
+};
+
+/**
+ * ad7923_update_scan_mode() setup the spi transfer buffer for the new scan mask
+ **/
+static int ad7923_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad7923_state *st = iio_priv(indio_dev);
+ int i, cmd, len;
+
+ len = 0;
+ for_each_set_bit(i, active_scan_mask, AD7923_MAX_CHAN) {
+ cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(i) |
+ AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) |
+ st->settings;
+ cmd <<= AD7923_SHIFT_REGISTER;
+ st->tx_buf[len++] = cpu_to_be16(cmd);
+ }
+ /* build spi ring message */
+ st->ring_xfer[0].tx_buf = &st->tx_buf[0];
+ st->ring_xfer[0].len = len;
+ st->ring_xfer[0].cs_change = 1;
+
+ spi_message_init(&st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
+
+ for (i = 0; i < len; i++) {
+ st->ring_xfer[i + 1].rx_buf = &st->rx_buf[i];
+ st->ring_xfer[i + 1].len = 2;
+ st->ring_xfer[i + 1].cs_change = 1;
+ spi_message_add_tail(&st->ring_xfer[i + 1], &st->ring_msg);
+ }
+ /* make sure last transfer cs_change is not set */
+ st->ring_xfer[i + 1].cs_change = 0;
+
+ return 0;
+}
+
+/**
+ * ad7923_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad7923_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7923_state *st = iio_priv(indio_dev);
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, &st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad7923_scan_direct(struct ad7923_state *st, unsigned ch)
+{
+ int ret, cmd;
+
+ cmd = AD7923_WRITE_CR | AD7923_CHANNEL_WRITE(ch) |
+ AD7923_SEQUENCE_WRITE(AD7923_SEQUENCE_OFF) |
+ st->settings;
+ cmd <<= AD7923_SHIFT_REGISTER;
+ st->tx_buf[0] = cpu_to_be16(cmd);
+
+ ret = spi_sync(st->spi, &st->scan_single_msg);
+ if (ret)
+ return ret;
+
+ return be16_to_cpu(st->rx_buf[0]);
+}
+
+static int ad7923_get_range(struct ad7923_state *st)
+{
+ int vref;
+
+ vref = regulator_get_voltage(st->reg);
+ if (vref < 0)
+ return vref;
+
+ vref /= 1000;
+
+ if (!(st->settings & AD7923_RANGE))
+ vref *= 2;
+
+ return vref;
+}
+
+static int ad7923_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad7923_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = ad7923_scan_direct(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ if (chan->address == EXTRACT(ret, 12, 4))
+ *val = EXTRACT(ret, 0, 12);
+ else
+ return -EIO;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = ad7923_get_range(st);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7923_info = {
+ .read_raw = &ad7923_read_raw,
+ .update_scan_mode = ad7923_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7923_probe(struct spi_device *spi)
+{
+ struct ad7923_state *st;
+ struct iio_dev *indio_dev;
+ const struct ad7923_chip_info *info;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+ st->settings = AD7923_CODING | AD7923_RANGE |
+ AD7923_PM_MODE_WRITE(AD7923_PM_MODE_OPS);
+
+ info = &ad7923_chip_info[spi_get_device_id(spi)->driver_data];
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = info->channels;
+ indio_dev->num_channels = info->num_channels;
+ indio_dev->info = &ad7923_info;
+
+ /* Setup default message */
+
+ st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+ st->scan_single_xfer[0].len = 2;
+ st->scan_single_xfer[0].cs_change = 1;
+ st->scan_single_xfer[1].rx_buf = &st->rx_buf[0];
+ st->scan_single_xfer[1].len = 2;
+
+ spi_message_init(&st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
+ spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg);
+
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ad7923_trigger_handler, NULL);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+
+ return 0;
+
+error_cleanup_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad7923_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7923_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7923_id[] = {
+ {"ad7904", AD7904},
+ {"ad7914", AD7914},
+ {"ad7923", AD7924},
+ {"ad7924", AD7924},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7923_id);
+
+static struct spi_driver ad7923_driver = {
+ .driver = {
+ .name = "ad7923",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7923_probe,
+ .remove = ad7923_remove,
+ .id_table = ad7923_id,
+};
+module_spi_driver(ad7923_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Patrick Vasseur <patrick.vasseur@c-s.fr>");
+MODULE_DESCRIPTION("Analog Devices AD7904/AD7914/AD7923/AD7924 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
new file mode 100644
index 00000000000000..b99de00e57b86c
--- /dev/null
+++ b/drivers/iio/adc/ad799x.c
@@ -0,0 +1,905 @@
+/*
+ * iio/adc/ad799x.c
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
+ *
+ * based on iio/adc/max1363
+ * Copyright (C) 2008-2010 Jonathan Cameron
+ *
+ * based on linux/drivers/i2c/chips/max123x
+ * Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ * based on linux/drivers/acron/char/pcf8583.c
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ad799x.c
+ *
+ * Support for ad7991, ad7995, ad7999, ad7992, ad7993, ad7994, ad7997,
+ * ad7998 and similar chips.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AD799X_CHANNEL_SHIFT 4
+
+/*
+ * AD7991, AD7995 and AD7999 defines
+ */
+
+#define AD7991_REF_SEL 0x08
+#define AD7991_FLTR 0x04
+#define AD7991_BIT_TRIAL_DELAY 0x02
+#define AD7991_SAMPLE_DELAY 0x01
+
+/*
+ * AD7992, AD7993, AD7994, AD7997 and AD7998 defines
+ */
+
+#define AD7998_FLTR BIT(3)
+#define AD7998_ALERT_EN BIT(2)
+#define AD7998_BUSY_ALERT BIT(1)
+#define AD7998_BUSY_ALERT_POL BIT(0)
+
+#define AD7998_CONV_RES_REG 0x0
+#define AD7998_ALERT_STAT_REG 0x1
+#define AD7998_CONF_REG 0x2
+#define AD7998_CYCLE_TMR_REG 0x3
+
+#define AD7998_DATALOW_REG(x) ((x) * 3 + 0x4)
+#define AD7998_DATAHIGH_REG(x) ((x) * 3 + 0x5)
+#define AD7998_HYST_REG(x) ((x) * 3 + 0x6)
+
+#define AD7998_CYC_MASK GENMASK(2, 0)
+#define AD7998_CYC_DIS 0x0
+#define AD7998_CYC_TCONF_32 0x1
+#define AD7998_CYC_TCONF_64 0x2
+#define AD7998_CYC_TCONF_128 0x3
+#define AD7998_CYC_TCONF_256 0x4
+#define AD7998_CYC_TCONF_512 0x5
+#define AD7998_CYC_TCONF_1024 0x6
+#define AD7998_CYC_TCONF_2048 0x7
+
+#define AD7998_ALERT_STAT_CLEAR 0xFF
+
+/*
+ * AD7997 and AD7997 defines
+ */
+
+#define AD7997_8_READ_SINGLE BIT(7)
+#define AD7997_8_READ_SEQUENCE (BIT(6) | BIT(5) | BIT(4))
+
+enum {
+ ad7991,
+ ad7995,
+ ad7999,
+ ad7992,
+ ad7993,
+ ad7994,
+ ad7997,
+ ad7998
+};
+
+/**
+ * struct ad799x_chip_config - chip specific information
+ * @channel: channel specification
+ * @default_config: device default configuration
+ * @info: pointer to iio_info struct
+ */
+struct ad799x_chip_config {
+ const struct iio_chan_spec channel[9];
+ u16 default_config;
+ const struct iio_info *info;
+};
+
+/**
+ * struct ad799x_chip_info - chip specific information
+ * @num_channels: number of channels
+ * @noirq_config: device configuration w/o IRQ
+ * @irq_config: device configuration w/IRQ
+ */
+struct ad799x_chip_info {
+ int num_channels;
+ const struct ad799x_chip_config noirq_config;
+ const struct ad799x_chip_config irq_config;
+};
+
+struct ad799x_state {
+ struct i2c_client *client;
+ const struct ad799x_chip_config *chip_config;
+ struct regulator *reg;
+ struct regulator *vref;
+ unsigned id;
+ u16 config;
+
+ u8 *rx_buf;
+ unsigned int transfer_size;
+};
+
+static int ad799x_write_config(struct ad799x_state *st, u16 val)
+{
+ switch (st->id) {
+ case ad7997:
+ case ad7998:
+ return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
+ val);
+ case ad7992:
+ case ad7993:
+ case ad7994:
+ return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
+ val);
+ default:
+ /* Will be written when doing a conversion */
+ st->config = val;
+ return 0;
+ }
+}
+
+static int ad799x_read_config(struct ad799x_state *st)
+{
+ switch (st->id) {
+ case ad7997:
+ case ad7998:
+ return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
+ case ad7992:
+ case ad7993:
+ case ad7994:
+ return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+ default:
+ /* No readback support */
+ return st->config;
+ }
+}
+
+/**
+ * ad799x_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad799x_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int b_sent;
+ u8 cmd;
+
+ switch (st->id) {
+ case ad7991:
+ case ad7995:
+ case ad7999:
+ cmd = st->config |
+ (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT);
+ break;
+ case ad7992:
+ case ad7993:
+ case ad7994:
+ cmd = (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT) |
+ AD7998_CONV_RES_REG;
+ break;
+ case ad7997:
+ case ad7998:
+ cmd = AD7997_8_READ_SEQUENCE | AD7998_CONV_RES_REG;
+ break;
+ default:
+ cmd = 0;
+ }
+
+ b_sent = i2c_smbus_read_i2c_block_data(st->client,
+ cmd, st->transfer_size, st->rx_buf);
+ if (b_sent < 0)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
+ iio_get_time_ns());
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad799x_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ kfree(st->rx_buf);
+ st->rx_buf = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (!st->rx_buf)
+ return -ENOMEM;
+
+ st->transfer_size = bitmap_weight(scan_mask, indio_dev->masklength) * 2;
+
+ switch (st->id) {
+ case ad7992:
+ case ad7993:
+ case ad7994:
+ case ad7997:
+ case ad7998:
+ st->config &= ~(GENMASK(7, 0) << AD799X_CHANNEL_SHIFT);
+ st->config |= (*scan_mask << AD799X_CHANNEL_SHIFT);
+ return ad799x_write_config(st, st->config);
+ default:
+ return 0;
+ }
+}
+
+static int ad799x_scan_direct(struct ad799x_state *st, unsigned ch)
+{
+ u8 cmd;
+
+ switch (st->id) {
+ case ad7991:
+ case ad7995:
+ case ad7999:
+ cmd = st->config | (BIT(ch) << AD799X_CHANNEL_SHIFT);
+ break;
+ case ad7992:
+ case ad7993:
+ case ad7994:
+ cmd = BIT(ch) << AD799X_CHANNEL_SHIFT;
+ break;
+ case ad7997:
+ case ad7998:
+ cmd = (ch << AD799X_CHANNEL_SHIFT) | AD7997_8_READ_SINGLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return i2c_smbus_read_word_swapped(st->client, cmd);
+}
+
+static int ad799x_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = ad799x_scan_direct(st, chan->scan_index);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+ *val = (ret >> chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+static const unsigned int ad7998_frequencies[] = {
+ [AD7998_CYC_DIS] = 0,
+ [AD7998_CYC_TCONF_32] = 15625,
+ [AD7998_CYC_TCONF_64] = 7812,
+ [AD7998_CYC_TCONF_128] = 3906,
+ [AD7998_CYC_TCONF_512] = 976,
+ [AD7998_CYC_TCONF_1024] = 488,
+ [AD7998_CYC_TCONF_2048] = 244,
+};
+
+static ssize_t ad799x_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ int ret = i2c_smbus_read_byte_data(st->client, AD7998_CYCLE_TMR_REG);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%u\n", ad7998_frequencies[ret & AD7998_CYC_MASK]);
+}
+
+static ssize_t ad799x_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ long val;
+ int ret, i;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = i2c_smbus_read_byte_data(st->client, AD7998_CYCLE_TMR_REG);
+ if (ret < 0)
+ goto error_ret_mutex;
+ /* Wipe the bits clean */
+ ret &= ~AD7998_CYC_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(ad7998_frequencies); i++)
+ if (val == ad7998_frequencies[i])
+ break;
+ if (i == ARRAY_SIZE(ad7998_frequencies)) {
+ ret = -EINVAL;
+ goto error_ret_mutex;
+ }
+
+ ret = i2c_smbus_write_byte_data(st->client, AD7998_CYCLE_TMR_REG,
+ ret | i);
+ if (ret < 0)
+ goto error_ret_mutex;
+ ret = len;
+
+error_ret_mutex:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad799x_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ if (!(st->config & AD7998_ALERT_EN))
+ return 0;
+
+ if ((st->config >> AD799X_CHANNEL_SHIFT) & BIT(chan->scan_index))
+ return 1;
+
+ return 0;
+}
+
+static int ad799x_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ad799x_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ if (state)
+ st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT;
+ else
+ st->config &= ~(BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT);
+
+ if (st->config >> AD799X_CHANNEL_SHIFT)
+ st->config |= AD7998_ALERT_EN;
+ else
+ st->config &= ~AD7998_ALERT_EN;
+
+ ret = ad799x_write_config(st, st->config);
+
+done:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static unsigned int ad799x_threshold_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ enum iio_event_info info)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (dir == IIO_EV_DIR_FALLING)
+ return AD7998_DATALOW_REG(chan->channel);
+ else
+ return AD7998_DATAHIGH_REG(chan->channel);
+ case IIO_EV_INFO_HYSTERESIS:
+ return AD7998_HYST_REG(chan->channel);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad799x_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ if (val < 0 || val > GENMASK(chan->scan_type.realbits - 1, 0))
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = i2c_smbus_write_word_swapped(st->client,
+ ad799x_threshold_reg(chan, dir, info),
+ val << chan->scan_type.shift);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad799x_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ ret = i2c_smbus_read_word_swapped(st->client,
+ ad799x_threshold_reg(chan, dir, info));
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ *val = (ret >> chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1 , 0);
+
+ return IIO_VAL_INT;
+}
+
+static irqreturn_t ad799x_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ad799x_state *st = iio_priv(private);
+ int i, ret;
+
+ ret = i2c_smbus_read_byte_data(st->client, AD7998_ALERT_STAT_REG);
+ if (ret <= 0)
+ goto done;
+
+ if (i2c_smbus_write_byte_data(st->client, AD7998_ALERT_STAT_REG,
+ AD7998_ALERT_STAT_CLEAR) < 0)
+ goto done;
+
+ for (i = 0; i < 8; i++) {
+ if (ret & BIT(i))
+ iio_push_event(indio_dev,
+ i & 0x1 ?
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ (i >> 1),
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING) :
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ (i >> 1),
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+
+done:
+ return IRQ_HANDLED;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ad799x_read_frequency,
+ ad799x_write_frequency);
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("15625 7812 3906 1953 976 488 244 0");
+
+static struct attribute *ad799x_event_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad799x_event_attrs_group = {
+ .attrs = ad799x_event_attributes,
+ .name = "events",
+};
+
+static const struct iio_info ad7991_info = {
+ .read_raw = &ad799x_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_info ad7993_4_7_8_noirq_info = {
+ .read_raw = &ad799x_read_raw,
+ .driver_module = THIS_MODULE,
+ .update_scan_mode = ad799x_update_scan_mode,
+};
+
+static const struct iio_info ad7993_4_7_8_irq_info = {
+ .read_raw = &ad799x_read_raw,
+ .event_attrs = &ad799x_event_attrs_group,
+ .read_event_config = &ad799x_read_event_config,
+ .write_event_config = &ad799x_write_event_config,
+ .read_event_value = &ad799x_read_event_value,
+ .write_event_value = &ad799x_write_event_value,
+ .driver_module = THIS_MODULE,
+ .update_scan_mode = ad799x_update_scan_mode,
+};
+
+static const struct iio_event_spec ad799x_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+#define _AD799X_CHANNEL(_index, _realbits, _ev_spec, _num_ev_spec) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = (_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
+ .event_spec = _ev_spec, \
+ .num_event_specs = _num_ev_spec, \
+}
+
+#define AD799X_CHANNEL(_index, _realbits) \
+ _AD799X_CHANNEL(_index, _realbits, NULL, 0)
+
+#define AD799X_CHANNEL_WITH_EVENTS(_index, _realbits) \
+ _AD799X_CHANNEL(_index, _realbits, ad799x_events, \
+ ARRAY_SIZE(ad799x_events))
+
+static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
+ [ad7991] = {
+ .num_channels = 5,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 12),
+ AD799X_CHANNEL(1, 12),
+ AD799X_CHANNEL(2, 12),
+ AD799X_CHANNEL(3, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .info = &ad7991_info,
+ },
+ },
+ [ad7995] = {
+ .num_channels = 5,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 10),
+ AD799X_CHANNEL(1, 10),
+ AD799X_CHANNEL(2, 10),
+ AD799X_CHANNEL(3, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .info = &ad7991_info,
+ },
+ },
+ [ad7999] = {
+ .num_channels = 5,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 8),
+ AD799X_CHANNEL(1, 8),
+ AD799X_CHANNEL(2, 8),
+ AD799X_CHANNEL(3, 8),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .info = &ad7991_info,
+ },
+ },
+ [ad7992] = {
+ .num_channels = 3,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 12),
+ AD799X_CHANNEL(1, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+ },
+ .info = &ad7993_4_7_8_noirq_info,
+ },
+ .irq_config = {
+ .channel = {
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+ },
+ .default_config = AD7998_ALERT_EN | AD7998_BUSY_ALERT,
+ .info = &ad7993_4_7_8_irq_info,
+ },
+ },
+ [ad7993] = {
+ .num_channels = 5,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 10),
+ AD799X_CHANNEL(1, 10),
+ AD799X_CHANNEL(2, 10),
+ AD799X_CHANNEL(3, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .info = &ad7993_4_7_8_noirq_info,
+ },
+ .irq_config = {
+ .channel = {
+ AD799X_CHANNEL_WITH_EVENTS(0, 10),
+ AD799X_CHANNEL_WITH_EVENTS(1, 10),
+ AD799X_CHANNEL_WITH_EVENTS(2, 10),
+ AD799X_CHANNEL_WITH_EVENTS(3, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .default_config = AD7998_ALERT_EN | AD7998_BUSY_ALERT,
+ .info = &ad7993_4_7_8_irq_info,
+ },
+ },
+ [ad7994] = {
+ .num_channels = 5,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 12),
+ AD799X_CHANNEL(1, 12),
+ AD799X_CHANNEL(2, 12),
+ AD799X_CHANNEL(3, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .info = &ad7993_4_7_8_noirq_info,
+ },
+ .irq_config = {
+ .channel = {
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
+ AD799X_CHANNEL_WITH_EVENTS(2, 12),
+ AD799X_CHANNEL_WITH_EVENTS(3, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .default_config = AD7998_ALERT_EN | AD7998_BUSY_ALERT,
+ .info = &ad7993_4_7_8_irq_info,
+ },
+ },
+ [ad7997] = {
+ .num_channels = 9,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 10),
+ AD799X_CHANNEL(1, 10),
+ AD799X_CHANNEL(2, 10),
+ AD799X_CHANNEL(3, 10),
+ AD799X_CHANNEL(4, 10),
+ AD799X_CHANNEL(5, 10),
+ AD799X_CHANNEL(6, 10),
+ AD799X_CHANNEL(7, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
+ .info = &ad7993_4_7_8_noirq_info,
+ },
+ .irq_config = {
+ .channel = {
+ AD799X_CHANNEL_WITH_EVENTS(0, 10),
+ AD799X_CHANNEL_WITH_EVENTS(1, 10),
+ AD799X_CHANNEL_WITH_EVENTS(2, 10),
+ AD799X_CHANNEL_WITH_EVENTS(3, 10),
+ AD799X_CHANNEL(4, 10),
+ AD799X_CHANNEL(5, 10),
+ AD799X_CHANNEL(6, 10),
+ AD799X_CHANNEL(7, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
+ .default_config = AD7998_ALERT_EN | AD7998_BUSY_ALERT,
+ .info = &ad7993_4_7_8_irq_info,
+ },
+ },
+ [ad7998] = {
+ .num_channels = 9,
+ .noirq_config = {
+ .channel = {
+ AD799X_CHANNEL(0, 12),
+ AD799X_CHANNEL(1, 12),
+ AD799X_CHANNEL(2, 12),
+ AD799X_CHANNEL(3, 12),
+ AD799X_CHANNEL(4, 12),
+ AD799X_CHANNEL(5, 12),
+ AD799X_CHANNEL(6, 12),
+ AD799X_CHANNEL(7, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
+ .info = &ad7993_4_7_8_noirq_info,
+ },
+ .irq_config = {
+ .channel = {
+ AD799X_CHANNEL_WITH_EVENTS(0, 12),
+ AD799X_CHANNEL_WITH_EVENTS(1, 12),
+ AD799X_CHANNEL_WITH_EVENTS(2, 12),
+ AD799X_CHANNEL_WITH_EVENTS(3, 12),
+ AD799X_CHANNEL(4, 12),
+ AD799X_CHANNEL(5, 12),
+ AD799X_CHANNEL(6, 12),
+ AD799X_CHANNEL(7, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ },
+ .default_config = AD7998_ALERT_EN | AD7998_BUSY_ALERT,
+ .info = &ad7993_4_7_8_irq_info,
+ },
+ },
+};
+
+static int ad799x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct ad799x_state *st;
+ struct iio_dev *indio_dev;
+ const struct ad799x_chip_info *chip_info =
+ &ad799x_chip_info_tbl[id->driver_data];
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ st->id = id->driver_data;
+ if (client->irq > 0 && chip_info->irq_config.info)
+ st->chip_config = &chip_info->irq_config;
+ else
+ st->chip_config = &chip_info->noirq_config;
+
+ /* TODO: Add pdata options for filtering and bit delay */
+
+ st->reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ st->vref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(st->vref)) {
+ ret = PTR_ERR(st->vref);
+ goto error_disable_reg;
+ }
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto error_disable_reg;
+
+ st->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = st->chip_config->info;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_config->channel;
+ indio_dev->num_channels = chip_info->num_channels;
+
+ ret = ad799x_write_config(st, st->chip_config->default_config);
+ if (ret < 0)
+ goto error_disable_reg;
+ ret = ad799x_read_config(st);
+ if (ret < 0)
+ goto error_disable_reg;
+ st->config = ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ad799x_trigger_handler, NULL);
+ if (ret)
+ goto error_disable_vref;
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ NULL,
+ ad799x_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ client->name,
+ indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+ }
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_ring;
+
+ return 0;
+
+error_cleanup_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_vref:
+ regulator_disable(st->vref);
+error_disable_reg:
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad799x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ad799x_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ kfree(st->rx_buf);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad799x_id[] = {
+ { "ad7991", ad7991 },
+ { "ad7995", ad7995 },
+ { "ad7999", ad7999 },
+ { "ad7992", ad7992 },
+ { "ad7993", ad7993 },
+ { "ad7994", ad7994 },
+ { "ad7997", ad7997 },
+ { "ad7998", ad7998 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad799x_id);
+
+static struct i2c_driver ad799x_driver = {
+ .driver = {
+ .name = "ad799x",
+ },
+ .probe = ad799x_probe,
+ .remove = ad799x_remove,
+ .id_table = ad799x_id,
+};
+module_i2c_driver(ad799x_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD799x ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
new file mode 100644
index 00000000000000..d10bd0c97233fa
--- /dev/null
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -0,0 +1,553 @@
+/*
+ * Support code for Analog Devices Sigma-Delta ADCs
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/adc/ad_sigma_delta.h>
+
+#include <asm/unaligned.h>
+
+
+#define AD_SD_COMM_CHAN_MASK 0x3
+
+#define AD_SD_REG_COMM 0x00
+#define AD_SD_REG_DATA 0x03
+
+/**
+ * ad_sd_set_comm() - Set communications register
+ *
+ * @sigma_delta: The sigma delta device
+ * @comm: New value for the communications register
+ */
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm)
+{
+ /* Some variants use the lower two bits of the communications register
+ * to select the channel */
+ sigma_delta->comm = comm & AD_SD_COMM_CHAN_MASK;
+}
+EXPORT_SYMBOL_GPL(ad_sd_set_comm);
+
+/**
+ * ad_sd_write_reg() - Write a register
+ *
+ * @sigma_delta: The sigma delta device
+ * @reg: Address of the register
+ * @size: Size of the register (0-3)
+ * @val: Value to write to the register
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ unsigned int size, unsigned int val)
+{
+ uint8_t *data = sigma_delta->data;
+ struct spi_transfer t = {
+ .tx_buf = data,
+ .len = size + 1,
+ .cs_change = sigma_delta->bus_locked,
+ };
+ struct spi_message m;
+ int ret;
+
+ data[0] = (reg << sigma_delta->info->addr_shift) | sigma_delta->comm;
+
+ switch (size) {
+ case 3:
+ data[1] = val >> 16;
+ data[2] = val >> 8;
+ data[3] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, &data[1]);
+ break;
+ case 1:
+ data[1] = val;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ if (sigma_delta->bus_locked)
+ ret = spi_sync_locked(sigma_delta->spi, &m);
+ else
+ ret = spi_sync(sigma_delta->spi, &m);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_write_reg);
+
+static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
+ unsigned int reg, unsigned int size, uint8_t *val)
+{
+ uint8_t *data = sigma_delta->data;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = data,
+ .len = 1,
+ }, {
+ .rx_buf = val,
+ .len = size,
+ .cs_change = sigma_delta->bus_locked,
+ },
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+
+ if (sigma_delta->info->has_registers) {
+ data[0] = reg << sigma_delta->info->addr_shift;
+ data[0] |= sigma_delta->info->read_mask;
+ spi_message_add_tail(&t[0], &m);
+ }
+ spi_message_add_tail(&t[1], &m);
+
+ if (sigma_delta->bus_locked)
+ ret = spi_sync_locked(sigma_delta->spi, &m);
+ else
+ ret = spi_sync(sigma_delta->spi, &m);
+
+ return ret;
+}
+
+/**
+ * ad_sd_read_reg() - Read a register
+ *
+ * @sigma_delta: The sigma delta device
+ * @reg: Address of the register
+ * @size: Size of the register (1-4)
+ * @val: Read value
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
+ unsigned int reg, unsigned int size, unsigned int *val)
+{
+ int ret;
+
+ ret = ad_sd_read_reg_raw(sigma_delta, reg, size, sigma_delta->data);
+ if (ret < 0)
+ goto out;
+
+ switch (size) {
+ case 4:
+ *val = get_unaligned_be32(sigma_delta->data);
+ break;
+ case 3:
+ *val = (sigma_delta->data[0] << 16) |
+ (sigma_delta->data[1] << 8) |
+ sigma_delta->data[2];
+ break;
+ case 2:
+ *val = get_unaligned_be16(sigma_delta->data);
+ break;
+ case 1:
+ *val = sigma_delta->data[0];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+
+static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ unsigned int mode, unsigned int channel)
+{
+ int ret;
+
+ ret = ad_sigma_delta_set_channel(sigma_delta, channel);
+ if (ret)
+ return ret;
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
+ reinit_completion(&sigma_delta->completion);
+
+ ret = ad_sigma_delta_set_mode(sigma_delta, mode);
+ if (ret < 0)
+ goto out;
+
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->spi->irq);
+ ret = wait_for_completion_timeout(&sigma_delta->completion, 2*HZ);
+ if (ret == 0) {
+ sigma_delta->irq_dis = true;
+ disable_irq_nosync(sigma_delta->spi->irq);
+ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+out:
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+
+ return ret;
+}
+
+/**
+ * ad_sd_calibrate_all() - Performs channel calibration
+ * @sigma_delta: The sigma delta device
+ * @cb: Array of channels and calibration type to perform
+ * @n: Number of items in cb
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
+ const struct ad_sd_calib_data *cb, unsigned int n)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < n; i++) {
+ ret = ad_sd_calibrate(sigma_delta, cb[i].mode, cb[i].channel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad_sd_calibrate_all);
+
+/**
+ * ad_sigma_delta_single_conversion() - Performs a single data conversion
+ * @indio_dev: The IIO device
+ * @chan: The conversion is done for this channel
+ * @val: Pointer to the location where to store the read value
+ *
+ * Returns: 0 on success, an error value otherwise.
+ */
+int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned int sample, raw_sample;
+ int ret = 0;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&indio_dev->mlock);
+ ad_sigma_delta_set_channel(sigma_delta, chan->address);
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
+ reinit_completion(&sigma_delta->completion);
+
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
+
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->spi->irq);
+ ret = wait_for_completion_interruptible_timeout(
+ &sigma_delta->completion, HZ);
+
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
+
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ goto out;
+
+ ret = ad_sd_read_reg(sigma_delta, AD_SD_REG_DATA,
+ DIV_ROUND_UP(chan->scan_type.realbits + chan->scan_type.shift, 8),
+ &raw_sample);
+
+out:
+ if (!sigma_delta->irq_dis) {
+ disable_irq_nosync(sigma_delta->spi->irq);
+ sigma_delta->irq_dis = true;
+ }
+
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret)
+ return ret;
+
+ sample = raw_sample >> chan->scan_type.shift;
+ sample &= (1 << chan->scan_type.realbits) - 1;
+ *val = sample;
+
+ ret = ad_sigma_delta_postprocess_sample(sigma_delta, raw_sample);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+EXPORT_SYMBOL_GPL(ad_sigma_delta_single_conversion);
+
+static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned int channel;
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ ret = ad_sigma_delta_set_channel(sigma_delta,
+ indio_dev->channels[channel].address);
+ if (ret)
+ goto err_predisable;
+
+ spi_bus_lock(sigma_delta->spi->master);
+ sigma_delta->bus_locked = true;
+ ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
+ if (ret)
+ goto err_unlock;
+
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->spi->irq);
+
+ return 0;
+
+err_unlock:
+ spi_bus_unlock(sigma_delta->spi->master);
+err_predisable:
+
+ return ret;
+}
+
+static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+ reinit_completion(&sigma_delta->completion);
+ wait_for_completion_timeout(&sigma_delta->completion, HZ);
+
+ if (!sigma_delta->irq_dis) {
+ disable_irq_nosync(sigma_delta->spi->irq);
+ sigma_delta->irq_dis = true;
+ }
+
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+
+ sigma_delta->bus_locked = false;
+ return spi_bus_unlock(sigma_delta->spi->master);
+}
+
+static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned int reg_size;
+ uint8_t data[16];
+ int ret;
+
+ memset(data, 0x00, 16);
+
+ reg_size = indio_dev->channels[0].scan_type.realbits +
+ indio_dev->channels[0].scan_type.shift;
+ reg_size = DIV_ROUND_UP(reg_size, 8);
+
+ switch (reg_size) {
+ case 4:
+ case 2:
+ case 1:
+ ret = ad_sd_read_reg_raw(sigma_delta, AD_SD_REG_DATA,
+ reg_size, &data[0]);
+ break;
+ case 3:
+ /* We store 24 bit samples in a 32 bit word. Keep the upper
+ * byte set to zero. */
+ ret = ad_sd_read_reg_raw(sigma_delta, AD_SD_REG_DATA,
+ reg_size, &data[1]);
+ break;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+ sigma_delta->irq_dis = false;
+ enable_irq(sigma_delta->spi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = {
+ .postenable = &ad_sd_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad_sd_buffer_postdisable,
+ .validate_scan_mask = &iio_validate_scan_mask_onehot,
+};
+
+static irqreturn_t ad_sd_data_rdy_trig_poll(int irq, void *private)
+{
+ struct ad_sigma_delta *sigma_delta = private;
+
+ complete(&sigma_delta->completion);
+ disable_irq_nosync(irq);
+ sigma_delta->irq_dis = true;
+ iio_trigger_poll(sigma_delta->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ad_sd_validate_trigger() - validate_trigger callback for ad_sigma_delta devices
+ * @indio_dev: The IIO device
+ * @trig: The new trigger
+ *
+ * Returns: 0 if the 'trig' matches the trigger registered by the ad_sigma_delta
+ * device, -EINVAL otherwise.
+ */
+int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+ if (sigma_delta->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
+
+static const struct iio_trigger_ops ad_sd_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ sigma_delta->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (sigma_delta->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ sigma_delta->trig->ops = &ad_sd_trigger_ops;
+ init_completion(&sigma_delta->completion);
+
+ ret = request_irq(sigma_delta->spi->irq,
+ ad_sd_data_rdy_trig_poll,
+ IRQF_TRIGGER_LOW,
+ indio_dev->name,
+ sigma_delta);
+ if (ret)
+ goto error_free_trig;
+
+ if (!sigma_delta->irq_dis) {
+ sigma_delta->irq_dis = true;
+ disable_irq_nosync(sigma_delta->spi->irq);
+ }
+ sigma_delta->trig->dev.parent = &sigma_delta->spi->dev;
+ iio_trigger_set_drvdata(sigma_delta->trig, sigma_delta);
+
+ ret = iio_trigger_register(sigma_delta->trig);
+ if (ret)
+ goto error_free_irq;
+
+ /* select default trigger */
+ indio_dev->trig = iio_trigger_get(sigma_delta->trig);
+
+ return 0;
+
+error_free_irq:
+ free_irq(sigma_delta->spi->irq, sigma_delta);
+error_free_trig:
+ iio_trigger_free(sigma_delta->trig);
+error_ret:
+ return ret;
+}
+
+static void ad_sd_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+
+ iio_trigger_unregister(sigma_delta->trig);
+ free_irq(sigma_delta->spi->irq, sigma_delta);
+ iio_trigger_free(sigma_delta->trig);
+}
+
+/**
+ * ad_sd_setup_buffer_and_trigger() -
+ * @indio_dev: The IIO device
+ */
+int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &ad_sd_trigger_handler, &ad_sd_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = ad_sd_probe_trigger(indio_dev);
+ if (ret) {
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad_sd_setup_buffer_and_trigger);
+
+/**
+ * ad_sd_cleanup_buffer_and_trigger() -
+ * @indio_dev: The IIO device
+ */
+void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev)
+{
+ ad_sd_remove_trigger(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+EXPORT_SYMBOL_GPL(ad_sd_cleanup_buffer_and_trigger);
+
+/**
+ * ad_sd_init() - Initializes a ad_sigma_delta struct
+ * @sigma_delta: The ad_sigma_delta device
+ * @indio_dev: The IIO device which the Sigma Delta device is used for
+ * @spi: The SPI device for the ad_sigma_delta device
+ * @info: Device specific callbacks and options
+ *
+ * This function needs to be called before any other operations are performed on
+ * the ad_sigma_delta struct.
+ */
+int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct ad_sigma_delta_info *info)
+{
+ sigma_delta->spi = spi;
+ sigma_delta->info = info;
+ iio_device_set_drvdata(indio_dev, sigma_delta);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad_sd_init);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices Sigma-Delta ADCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
new file mode 100644
index 00000000000000..05c0269203738c
--- /dev/null
+++ b/drivers/iio/adc/at91_adc.c
@@ -0,0 +1,1439 @@
+/*
+ * Driver for the ADC present in the Atmel AT91 evaluation boards.
+ *
+ * Copyright 2011 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <linux/platform_data/at91_adc.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/* Registers */
+#define AT91_ADC_CR 0x00 /* Control Register */
+#define AT91_ADC_SWRST (1 << 0) /* Software Reset */
+#define AT91_ADC_START (1 << 1) /* Start Conversion */
+
+#define AT91_ADC_MR 0x04 /* Mode Register */
+#define AT91_ADC_TSAMOD (3 << 0) /* ADC mode */
+#define AT91_ADC_TSAMOD_ADC_ONLY_MODE (0 << 0) /* ADC Mode */
+#define AT91_ADC_TSAMOD_TS_ONLY_MODE (1 << 0) /* Touch Screen Only Mode */
+#define AT91_ADC_TRGEN (1 << 0) /* Trigger Enable */
+#define AT91_ADC_TRGSEL (7 << 1) /* Trigger Selection */
+#define AT91_ADC_TRGSEL_TC0 (0 << 1)
+#define AT91_ADC_TRGSEL_TC1 (1 << 1)
+#define AT91_ADC_TRGSEL_TC2 (2 << 1)
+#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1)
+#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */
+#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */
+#define AT91_ADC_PENDET (1 << 6) /* Pen contact detection enable */
+#define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */
+#define AT91_ADC_PRESCAL_9G45 (0xff << 8)
+#define AT91_ADC_PRESCAL_(x) ((x) << 8)
+#define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */
+#define AT91_ADC_STARTUP_9G45 (0x7f << 16)
+#define AT91_ADC_STARTUP_9X5 (0xf << 16)
+#define AT91_ADC_STARTUP_(x) ((x) << 16)
+#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */
+#define AT91_ADC_SHTIM_(x) ((x) << 24)
+#define AT91_ADC_PENDBC (0x0f << 28) /* Pen Debounce time */
+#define AT91_ADC_PENDBC_(x) ((x) << 28)
+
+#define AT91_ADC_TSR 0x0C
+#define AT91_ADC_TSR_SHTIM (0xf << 24) /* Sample & Hold Time */
+#define AT91_ADC_TSR_SHTIM_(x) ((x) << 24)
+
+#define AT91_ADC_CHER 0x10 /* Channel Enable Register */
+#define AT91_ADC_CHDR 0x14 /* Channel Disable Register */
+#define AT91_ADC_CHSR 0x18 /* Channel Status Register */
+#define AT91_ADC_CH(n) (1 << (n)) /* Channel Number */
+
+#define AT91_ADC_SR 0x1C /* Status Register */
+#define AT91_ADC_EOC(n) (1 << (n)) /* End of Conversion on Channel N */
+#define AT91_ADC_OVRE(n) (1 << ((n) + 8))/* Overrun Error on Channel N */
+#define AT91_ADC_DRDY (1 << 16) /* Data Ready */
+#define AT91_ADC_GOVRE (1 << 17) /* General Overrun Error */
+#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */
+#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */
+
+#define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */
+#define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */
+
+#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */
+#define AT91_ADC_LDATA (0x3ff)
+
+#define AT91_ADC_IER 0x24 /* Interrupt Enable Register */
+#define AT91_ADC_IDR 0x28 /* Interrupt Disable Register */
+#define AT91_ADC_IMR 0x2C /* Interrupt Mask Register */
+#define AT91RL_ADC_IER_PEN (1 << 20)
+#define AT91RL_ADC_IER_NOPEN (1 << 21)
+#define AT91_ADC_IER_PEN (1 << 29)
+#define AT91_ADC_IER_NOPEN (1 << 30)
+#define AT91_ADC_IER_XRDY (1 << 20)
+#define AT91_ADC_IER_YRDY (1 << 21)
+#define AT91_ADC_IER_PRDY (1 << 22)
+#define AT91_ADC_ISR_PENS (1 << 31)
+
+#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */
+#define AT91_ADC_DATA (0x3ff)
+
+#define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */
+
+#define AT91_ADC_ACR 0x94 /* Analog Control Register */
+#define AT91_ADC_ACR_PENDETSENS (0x3 << 0) /* pull-up resistor */
+
+#define AT91_ADC_TSMR 0xB0
+#define AT91_ADC_TSMR_TSMODE (3 << 0) /* Touch Screen Mode */
+#define AT91_ADC_TSMR_TSMODE_NONE (0 << 0)
+#define AT91_ADC_TSMR_TSMODE_4WIRE_NO_PRESS (1 << 0)
+#define AT91_ADC_TSMR_TSMODE_4WIRE_PRESS (2 << 0)
+#define AT91_ADC_TSMR_TSMODE_5WIRE (3 << 0)
+#define AT91_ADC_TSMR_TSAV (3 << 4) /* Averages samples */
+#define AT91_ADC_TSMR_TSAV_(x) ((x) << 4)
+#define AT91_ADC_TSMR_SCTIM (0x0f << 16) /* Switch closure time */
+#define AT91_ADC_TSMR_PENDBC (0x0f << 28) /* Pen Debounce time */
+#define AT91_ADC_TSMR_PENDBC_(x) ((x) << 28)
+#define AT91_ADC_TSMR_NOTSDMA (1 << 22) /* No Touchscreen DMA */
+#define AT91_ADC_TSMR_PENDET_DIS (0 << 24) /* Pen contact detection disable */
+#define AT91_ADC_TSMR_PENDET_ENA (1 << 24) /* Pen contact detection enable */
+
+#define AT91_ADC_TSXPOSR 0xB4
+#define AT91_ADC_TSYPOSR 0xB8
+#define AT91_ADC_TSPRESSR 0xBC
+
+#define AT91_ADC_TRGR_9260 AT91_ADC_MR
+#define AT91_ADC_TRGR_9G45 0x08
+#define AT91_ADC_TRGR_9X5 0xC0
+
+/* Trigger Register bit field */
+#define AT91_ADC_TRGR_TRGPER (0xffff << 16)
+#define AT91_ADC_TRGR_TRGPER_(x) ((x) << 16)
+#define AT91_ADC_TRGR_TRGMOD (0x7 << 0)
+#define AT91_ADC_TRGR_NONE (0 << 0)
+#define AT91_ADC_TRGR_MOD_PERIOD_TRIG (5 << 0)
+
+#define AT91_ADC_CHAN(st, ch) \
+ (st->registers->channel_base + (ch * 4))
+#define at91_adc_readl(st, reg) \
+ (readl_relaxed(st->reg_base + reg))
+#define at91_adc_writel(st, reg, val) \
+ (writel_relaxed(val, st->reg_base + reg))
+
+#define DRIVER_NAME "at91_adc"
+#define MAX_POS_BITS 12
+
+#define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */
+#define TOUCH_PEN_DETECT_DEBOUNCE_US 200
+
+#define MAX_RLPOS_BITS 10
+#define TOUCH_SAMPLE_PERIOD_US_RL 10000 /* 10ms, the SoC can't keep up with 2ms */
+#define TOUCH_SHTIM 0xa
+
+/**
+ * struct at91_adc_reg_desc - Various informations relative to registers
+ * @channel_base: Base offset for the channel data registers
+ * @drdy_mask: Mask of the DRDY field in the relevant registers
+ (Interruptions registers mostly)
+ * @status_register: Offset of the Interrupt Status Register
+ * @trigger_register: Offset of the Trigger setup register
+ * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register
+ * @mr_startup_mask: Mask of the STARTUP field in the adc MR register
+ */
+struct at91_adc_reg_desc {
+ u8 channel_base;
+ u32 drdy_mask;
+ u8 status_register;
+ u8 trigger_register;
+ u32 mr_prescal_mask;
+ u32 mr_startup_mask;
+};
+
+struct at91_adc_caps {
+ bool has_ts; /* Support touch screen */
+ bool has_tsmr; /* only at91sam9x5, sama5d3 have TSMR reg */
+ /*
+ * Numbers of sampling data will be averaged. Can be 0~3.
+ * Hardware can average (2 ^ ts_filter_average) sample data.
+ */
+ u8 ts_filter_average;
+ /* Pen Detection input pull-up resistor, can be 0~3 */
+ u8 ts_pen_detect_sensitivity;
+
+ /* startup time calculate function */
+ u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+
+ u8 num_channels;
+ struct at91_adc_reg_desc registers;
+};
+
+struct at91_adc_state {
+ struct clk *adc_clk;
+ u16 *buffer;
+ unsigned long channels_mask;
+ struct clk *clk;
+ bool done;
+ int irq;
+ u16 last_value;
+ int chnb;
+ struct mutex lock;
+ u8 num_channels;
+ void __iomem *reg_base;
+ struct at91_adc_reg_desc *registers;
+ u8 startup_time;
+ u8 sample_hold_time;
+ bool sleep_mode;
+ struct iio_trigger **trig;
+ struct at91_adc_trigger *trigger_list;
+ u32 trigger_number;
+ bool use_external;
+ u32 vref_mv;
+ u32 res; /* resolution used for convertions */
+ bool low_res; /* the resolution corresponds to the lowest one */
+ wait_queue_head_t wq_data_avail;
+ struct at91_adc_caps *caps;
+
+ /*
+ * Following ADC channels are shared by touchscreen:
+ *
+ * CH0 -- Touch screen XP/UL
+ * CH1 -- Touch screen XM/UR
+ * CH2 -- Touch screen YP/LL
+ * CH3 -- Touch screen YM/Sense
+ * CH4 -- Touch screen LR(5-wire only)
+ *
+ * The bitfields below represents the reserved channel in the
+ * touchscreen mode.
+ */
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 0)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 0)
+ enum atmel_adc_ts_type touchscreen_type;
+ struct input_dev *ts_input;
+
+ u16 ts_sample_period_val;
+ u32 ts_pressure_threshold;
+ u16 ts_pendbc;
+
+ bool ts_bufferedmeasure;
+ u32 ts_prev_absx;
+ u32 ts_prev_absy;
+};
+
+static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *idev = pf->indio_dev;
+ struct at91_adc_state *st = iio_priv(idev);
+ int i, j = 0;
+
+ for (i = 0; i < idev->masklength; i++) {
+ if (!test_bit(i, idev->active_scan_mask))
+ continue;
+ st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
+ j++;
+ }
+
+ iio_push_to_buffers_with_timestamp(idev, st->buffer, pf->timestamp);
+
+ iio_trigger_notify_done(idev->trig);
+
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_ADC_LCDR);
+
+ enable_irq(st->irq);
+
+ return IRQ_HANDLED;
+}
+
+/* Handler for classic adc channel eoc trigger */
+static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+
+ if (iio_buffer_enabled(idev)) {
+ disable_irq_nosync(irq);
+ iio_trigger_poll(idev->trig);
+ } else {
+ st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
+ st->done = true;
+ wake_up_interruptible(&st->wq_data_avail);
+ }
+}
+
+static int at91_ts_sample(struct at91_adc_state *st)
+{
+ unsigned int xscale, yscale, reg, z1, z2;
+ unsigned int x, y, pres, xpos, ypos;
+ unsigned int rxp = 1;
+ unsigned int factor = 1000;
+ struct iio_dev *idev = iio_priv_to_dev(st);
+
+ unsigned int xyz_mask_bits = st->res;
+ unsigned int xyz_mask = (1 << xyz_mask_bits) - 1;
+
+ /* calculate position */
+ /* x position = (x / xscale) * max, max = 2^MAX_POS_BITS - 1 */
+ reg = at91_adc_readl(st, AT91_ADC_TSXPOSR);
+ xpos = reg & xyz_mask;
+ x = (xpos << MAX_POS_BITS) - xpos;
+ xscale = (reg >> 16) & xyz_mask;
+ if (xscale == 0) {
+ dev_err(&idev->dev, "Error: xscale == 0!\n");
+ return -1;
+ }
+ x /= xscale;
+
+ /* y position = (y / yscale) * max, max = 2^MAX_POS_BITS - 1 */
+ reg = at91_adc_readl(st, AT91_ADC_TSYPOSR);
+ ypos = reg & xyz_mask;
+ y = (ypos << MAX_POS_BITS) - ypos;
+ yscale = (reg >> 16) & xyz_mask;
+ if (yscale == 0) {
+ dev_err(&idev->dev, "Error: yscale == 0!\n");
+ return -1;
+ }
+ y /= yscale;
+
+ /* calculate the pressure */
+ reg = at91_adc_readl(st, AT91_ADC_TSPRESSR);
+ z1 = reg & xyz_mask;
+ z2 = (reg >> 16) & xyz_mask;
+
+ if (z1 != 0)
+ pres = rxp * (x * factor / 1024) * (z2 * factor / z1 - factor)
+ / factor;
+ else
+ pres = st->ts_pressure_threshold; /* no pen contacted */
+
+ dev_dbg(&idev->dev, "xpos = %d, xscale = %d, ypos = %d, yscale = %d, z1 = %d, z2 = %d, press = %d\n",
+ xpos, xscale, ypos, yscale, z1, z2, pres);
+
+ if (pres < st->ts_pressure_threshold) {
+ dev_dbg(&idev->dev, "x = %d, y = %d, pressure = %d\n",
+ x, y, pres / factor);
+ input_report_abs(st->ts_input, ABS_X, x);
+ input_report_abs(st->ts_input, ABS_Y, y);
+ input_report_abs(st->ts_input, ABS_PRESSURE, pres);
+ input_report_key(st->ts_input, BTN_TOUCH, 1);
+ input_sync(st->ts_input);
+ } else {
+ dev_dbg(&idev->dev, "pressure too low: not reporting\n");
+ }
+
+ return 0;
+}
+
+static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct at91_adc_state *st = iio_priv(idev);
+ u32 status = at91_adc_readl(st, st->registers->status_register);
+ unsigned int reg;
+
+ status &= at91_adc_readl(st, AT91_ADC_IMR);
+ if (status & GENMASK(st->num_channels - 1, 0))
+ handle_adc_eoc_trigger(irq, idev);
+
+ if (status & AT91RL_ADC_IER_PEN) {
+ /* Disabling pen debounce is required to get a NOPEN irq */
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg &= ~AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN);
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_NOPEN
+ | AT91_ADC_EOC(3));
+ /* Set up period trigger for sampling */
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_MOD_PERIOD_TRIG |
+ AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val));
+ } else if (status & AT91RL_ADC_IER_NOPEN) {
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_NONE);
+
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_NOPEN
+ | AT91_ADC_EOC(3));
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN);
+ st->ts_bufferedmeasure = false;
+ input_report_key(st->ts_input, BTN_TOUCH, 0);
+ input_sync(st->ts_input);
+ } else if (status & AT91_ADC_EOC(3)) {
+ /* Conversion finished */
+ if (st->ts_bufferedmeasure) {
+ /*
+ * Last measurement is always discarded, since it can
+ * be erroneous.
+ * Always report previous measurement
+ */
+ input_report_abs(st->ts_input, ABS_X, st->ts_prev_absx);
+ input_report_abs(st->ts_input, ABS_Y, st->ts_prev_absy);
+ input_report_key(st->ts_input, BTN_TOUCH, 1);
+ input_sync(st->ts_input);
+ } else
+ st->ts_bufferedmeasure = true;
+
+ /* Now make new measurement */
+ st->ts_prev_absx = at91_adc_readl(st, AT91_ADC_CHAN(st, 3))
+ << MAX_RLPOS_BITS;
+ st->ts_prev_absx /= at91_adc_readl(st, AT91_ADC_CHAN(st, 2));
+
+ st->ts_prev_absy = at91_adc_readl(st, AT91_ADC_CHAN(st, 1))
+ << MAX_RLPOS_BITS;
+ st->ts_prev_absy /= at91_adc_readl(st, AT91_ADC_CHAN(st, 0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private)
+{
+ struct iio_dev *idev = private;
+ struct at91_adc_state *st = iio_priv(idev);
+ u32 status = at91_adc_readl(st, st->registers->status_register);
+ const uint32_t ts_data_irq_mask =
+ AT91_ADC_IER_XRDY |
+ AT91_ADC_IER_YRDY |
+ AT91_ADC_IER_PRDY;
+
+ if (status & GENMASK(st->num_channels - 1, 0))
+ handle_adc_eoc_trigger(irq, idev);
+
+ if (status & AT91_ADC_IER_PEN) {
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_NOPEN |
+ ts_data_irq_mask);
+ /* Set up period trigger for sampling */
+ at91_adc_writel(st, st->registers->trigger_register,
+ AT91_ADC_TRGR_MOD_PERIOD_TRIG |
+ AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val));
+ } else if (status & AT91_ADC_IER_NOPEN) {
+ at91_adc_writel(st, st->registers->trigger_register, 0);
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_NOPEN |
+ ts_data_irq_mask);
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+
+ input_report_key(st->ts_input, BTN_TOUCH, 0);
+ input_sync(st->ts_input);
+ } else if ((status & ts_data_irq_mask) == ts_data_irq_mask) {
+ /* Now all touchscreen data is ready */
+
+ if (status & AT91_ADC_ISR_PENS) {
+ /* validate data by pen contact */
+ at91_ts_sample(st);
+ } else {
+ /* triggered by event that is no pen contact, just read
+ * them to clean the interrupt and discard all.
+ */
+ at91_adc_readl(st, AT91_ADC_TSXPOSR);
+ at91_adc_readl(st, AT91_ADC_TSYPOSR);
+ at91_adc_readl(st, AT91_ADC_TSPRESSR);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int at91_adc_channel_init(struct iio_dev *idev)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+ struct iio_chan_spec *chan_array, *timestamp;
+ int bit, idx = 0;
+ unsigned long rsvd_mask = 0;
+
+ /* If touchscreen is enable, then reserve the adc channels */
+ if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
+ rsvd_mask = CHAN_MASK_TOUCHSCREEN_4WIRE;
+ else if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_5WIRE)
+ rsvd_mask = CHAN_MASK_TOUCHSCREEN_5WIRE;
+
+ /* set up the channel mask to reserve touchscreen channels */
+ st->channels_mask &= ~rsvd_mask;
+
+ idev->num_channels = bitmap_weight(&st->channels_mask,
+ st->num_channels) + 1;
+
+ chan_array = devm_kzalloc(&idev->dev,
+ ((idev->num_channels + 1) *
+ sizeof(struct iio_chan_spec)),
+ GFP_KERNEL);
+
+ if (!chan_array)
+ return -ENOMEM;
+
+ for_each_set_bit(bit, &st->channels_mask, st->num_channels) {
+ struct iio_chan_spec *chan = chan_array + idx;
+
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = bit;
+ chan->scan_index = idx;
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = st->res;
+ chan->scan_type.storagebits = 16;
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ idx++;
+ }
+ timestamp = chan_array + idx;
+
+ timestamp->type = IIO_TIMESTAMP;
+ timestamp->channel = -1;
+ timestamp->scan_index = idx;
+ timestamp->scan_type.sign = 's';
+ timestamp->scan_type.realbits = 64;
+ timestamp->scan_type.storagebits = 64;
+
+ idev->channels = chan_array;
+ return idev->num_channels;
+}
+
+static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+ struct at91_adc_trigger *triggers,
+ const char *trigger_name)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+ int i;
+
+ for (i = 0; i < st->trigger_number; i++) {
+ char *name = kasprintf(GFP_KERNEL,
+ "%s-dev%d-%s",
+ idev->name,
+ idev->id,
+ triggers[i].name);
+ if (!name)
+ return -ENOMEM;
+
+ if (strcmp(trigger_name, name) == 0) {
+ kfree(name);
+ if (triggers[i].value == 0)
+ return -EINVAL;
+ return triggers[i].value;
+ }
+
+ kfree(name);
+ }
+
+ return -EINVAL;
+}
+
+static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *idev = iio_trigger_get_drvdata(trig);
+ struct at91_adc_state *st = iio_priv(idev);
+ struct iio_buffer *buffer = idev->buffer;
+ struct at91_adc_reg_desc *reg = st->registers;
+ u32 status = at91_adc_readl(st, reg->trigger_register);
+ int value;
+ u8 bit;
+
+ value = at91_adc_get_trigger_value_by_name(idev,
+ st->trigger_list,
+ idev->trig->name);
+ if (value < 0)
+ return value;
+
+ if (state) {
+ st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
+ if (st->buffer == NULL)
+ return -ENOMEM;
+
+ at91_adc_writel(st, reg->trigger_register,
+ status | value);
+
+ for_each_set_bit(bit, buffer->scan_mask,
+ st->num_channels) {
+ struct iio_chan_spec const *chan = idev->channels + bit;
+ at91_adc_writel(st, AT91_ADC_CHER,
+ AT91_ADC_CH(chan->channel));
+ }
+
+ at91_adc_writel(st, AT91_ADC_IER, reg->drdy_mask);
+
+ } else {
+ at91_adc_writel(st, AT91_ADC_IDR, reg->drdy_mask);
+
+ at91_adc_writel(st, reg->trigger_register,
+ status & ~value);
+
+ for_each_set_bit(bit, buffer->scan_mask,
+ st->num_channels) {
+ struct iio_chan_spec const *chan = idev->channels + bit;
+ at91_adc_writel(st, AT91_ADC_CHDR,
+ AT91_ADC_CH(chan->channel));
+ }
+ kfree(st->buffer);
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops at91_adc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &at91_adc_configure_trigger,
+};
+
+static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
+ struct at91_adc_trigger *trigger)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = iio_trigger_alloc("%s-dev%d-%s", idev->name,
+ idev->id, trigger->name);
+ if (trig == NULL)
+ return NULL;
+
+ trig->dev.parent = idev->dev.parent;
+ iio_trigger_set_drvdata(trig, idev);
+ trig->ops = &at91_adc_trigger_ops;
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ return NULL;
+
+ return trig;
+}
+
+static int at91_adc_trigger_init(struct iio_dev *idev)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+ int i, ret;
+
+ st->trig = devm_kzalloc(&idev->dev,
+ st->trigger_number * sizeof(*st->trig),
+ GFP_KERNEL);
+
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ for (i = 0; i < st->trigger_number; i++) {
+ if (st->trigger_list[i].is_external && !(st->use_external))
+ continue;
+
+ st->trig[i] = at91_adc_allocate_trigger(idev,
+ st->trigger_list + i);
+ if (st->trig[i] == NULL) {
+ dev_err(&idev->dev,
+ "Could not allocate trigger %d\n", i);
+ ret = -ENOMEM;
+ goto error_trigger;
+ }
+ }
+
+ return 0;
+
+error_trigger:
+ for (i--; i >= 0; i--) {
+ iio_trigger_unregister(st->trig[i]);
+ iio_trigger_free(st->trig[i]);
+ }
+error_ret:
+ return ret;
+}
+
+static void at91_adc_trigger_remove(struct iio_dev *idev)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+ int i;
+
+ for (i = 0; i < st->trigger_number; i++) {
+ iio_trigger_unregister(st->trig[i]);
+ iio_trigger_free(st->trig[i]);
+ }
+}
+
+static int at91_adc_buffer_init(struct iio_dev *idev)
+{
+ return iio_triggered_buffer_setup(idev, &iio_pollfunc_store_time,
+ &at91_adc_trigger_handler, NULL);
+}
+
+static void at91_adc_buffer_remove(struct iio_dev *idev)
+{
+ iio_triggered_buffer_cleanup(idev);
+}
+
+static int at91_adc_read_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(idev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+
+ st->chnb = chan->channel;
+ at91_adc_writel(st, AT91_ADC_CHER,
+ AT91_ADC_CH(chan->channel));
+ at91_adc_writel(st, AT91_ADC_IER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START);
+
+ ret = wait_event_interruptible_timeout(st->wq_data_avail,
+ st->done,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ *val = st->last_value;
+
+ at91_adc_writel(st, AT91_ADC_CHDR,
+ AT91_ADC_CH(chan->channel));
+ at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
+
+ st->last_value = 0;
+ st->done = false;
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int at91_adc_of_get_resolution(struct at91_adc_state *st,
+ struct platform_device *pdev)
+{
+ struct iio_dev *idev = iio_priv_to_dev(st);
+ struct device_node *np = pdev->dev.of_node;
+ int count, i, ret = 0;
+ char *res_name, *s;
+ u32 *resolutions;
+
+ count = of_property_count_strings(np, "atmel,adc-res-names");
+ if (count < 2) {
+ dev_err(&idev->dev, "You must specified at least two resolution names for "
+ "adc-res-names property in the DT\n");
+ return count;
+ }
+
+ resolutions = kmalloc(count * sizeof(*resolutions), GFP_KERNEL);
+ if (!resolutions)
+ return -ENOMEM;
+
+ if (of_property_read_u32_array(np, "atmel,adc-res", resolutions, count)) {
+ dev_err(&idev->dev, "Missing adc-res property in the DT.\n");
+ ret = -ENODEV;
+ goto ret;
+ }
+
+ if (of_property_read_string(np, "atmel,adc-use-res", (const char **)&res_name))
+ res_name = "highres";
+
+ for (i = 0; i < count; i++) {
+ if (of_property_read_string_index(np, "atmel,adc-res-names", i, (const char **)&s))
+ continue;
+
+ if (strcmp(res_name, s))
+ continue;
+
+ st->res = resolutions[i];
+ if (!strcmp(res_name, "lowres"))
+ st->low_res = true;
+ else
+ st->low_res = false;
+
+ dev_info(&idev->dev, "Resolution used: %u bits\n", st->res);
+ goto ret;
+ }
+
+ dev_err(&idev->dev, "There is no resolution for %s\n", res_name);
+
+ret:
+ kfree(resolutions);
+ return ret;
+}
+
+static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+{
+ /*
+ * Number of ticks needed to cover the startup time of the ADC
+ * as defined in the electrical characteristics of the board,
+ * divided by 8. The formula thus is :
+ * Startup Time = (ticks + 1) * 8 / ADC Clock
+ */
+ return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
+}
+
+static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+{
+ /*
+ * For sama5d3x and at91sam9x5, the formula changes to:
+ * Startup Time = <lookup_table_value> / ADC Clock
+ */
+ const int startup_lookup[] = {
+ 0 , 8 , 16 , 24 ,
+ 64 , 80 , 96 , 112,
+ 512, 576, 640, 704,
+ 768, 832, 896, 960
+ };
+ int i, size = ARRAY_SIZE(startup_lookup);
+ unsigned int ticks;
+
+ ticks = startup_time * adc_clk_khz / 1000;
+ for (i = 0; i < size; i++)
+ if (ticks < startup_lookup[i])
+ break;
+
+ ticks = i;
+ if (ticks == size)
+ /* Reach the end of lookup table */
+ ticks = size - 1;
+
+ return ticks;
+}
+
+static const struct of_device_id at91_adc_dt_ids[];
+
+static int at91_adc_probe_dt_ts(struct device_node *node,
+ struct at91_adc_state *st, struct device *dev)
+{
+ int ret;
+ u32 prop;
+
+ ret = of_property_read_u32(node, "atmel,adc-ts-wires", &prop);
+ if (ret) {
+ dev_info(dev, "ADC Touch screen is disabled.\n");
+ return 0;
+ }
+
+ switch (prop) {
+ case 4:
+ case 5:
+ st->touchscreen_type = prop;
+ break;
+ default:
+ dev_err(dev, "Unsupported number of touchscreen wires (%d). Should be 4 or 5.\n", prop);
+ return -EINVAL;
+ }
+
+ if (!st->caps->has_tsmr)
+ return 0;
+ prop = 0;
+ of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop);
+ st->ts_pressure_threshold = prop;
+ if (st->ts_pressure_threshold) {
+ return 0;
+ } else {
+ dev_err(dev, "Invalid pressure threshold for the touchscreen\n");
+ return -EINVAL;
+ }
+}
+
+static int at91_adc_probe_dt(struct at91_adc_state *st,
+ struct platform_device *pdev)
+{
+ struct iio_dev *idev = iio_priv_to_dev(st);
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *trig_node;
+ int i = 0, ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ st->caps = (struct at91_adc_caps *)
+ of_match_device(at91_adc_dt_ids, &pdev->dev)->data;
+
+ st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers");
+
+ if (of_property_read_u32(node, "atmel,adc-channels-used", &prop)) {
+ dev_err(&idev->dev, "Missing adc-channels-used property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ st->channels_mask = prop;
+
+ st->sleep_mode = of_property_read_bool(node, "atmel,adc-sleep-mode");
+
+ if (of_property_read_u32(node, "atmel,adc-startup-time", &prop)) {
+ dev_err(&idev->dev, "Missing adc-startup-time property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ st->startup_time = prop;
+
+ prop = 0;
+ of_property_read_u32(node, "atmel,adc-sample-hold-time", &prop);
+ st->sample_hold_time = prop;
+
+ if (of_property_read_u32(node, "atmel,adc-vref", &prop)) {
+ dev_err(&idev->dev, "Missing adc-vref property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ st->vref_mv = prop;
+
+ ret = at91_adc_of_get_resolution(st, pdev);
+ if (ret)
+ goto error_ret;
+
+ st->registers = &st->caps->registers;
+ st->num_channels = st->caps->num_channels;
+ st->trigger_number = of_get_child_count(node);
+ st->trigger_list = devm_kzalloc(&idev->dev, st->trigger_number *
+ sizeof(struct at91_adc_trigger),
+ GFP_KERNEL);
+ if (!st->trigger_list) {
+ dev_err(&idev->dev, "Could not allocate trigger list memory.\n");
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ for_each_child_of_node(node, trig_node) {
+ struct at91_adc_trigger *trig = st->trigger_list + i;
+ const char *name;
+
+ if (of_property_read_string(trig_node, "trigger-name", &name)) {
+ dev_err(&idev->dev, "Missing trigger-name property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ trig->name = name;
+
+ if (of_property_read_u32(trig_node, "trigger-value", &prop)) {
+ dev_err(&idev->dev, "Missing trigger-value property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ trig->value = prop;
+ trig->is_external = of_property_read_bool(trig_node, "trigger-external");
+ i++;
+ }
+
+ /* Check if touchscreen is supported. */
+ if (st->caps->has_ts)
+ return at91_adc_probe_dt_ts(node, st, &idev->dev);
+ else
+ dev_info(&idev->dev, "not support touchscreen in the adc compatible string.\n");
+
+ return 0;
+
+error_ret:
+ return ret;
+}
+
+static int at91_adc_probe_pdata(struct at91_adc_state *st,
+ struct platform_device *pdev)
+{
+ struct at91_adc_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ st->caps = (struct at91_adc_caps *)
+ platform_get_device_id(pdev)->driver_data;
+
+ st->use_external = pdata->use_external_triggers;
+ st->vref_mv = pdata->vref;
+ st->channels_mask = pdata->channels_used;
+ st->num_channels = st->caps->num_channels;
+ st->startup_time = pdata->startup_time;
+ st->trigger_number = pdata->trigger_number;
+ st->trigger_list = pdata->trigger_list;
+ st->registers = &st->caps->registers;
+ st->touchscreen_type = pdata->touchscreen_type;
+
+ return 0;
+}
+
+static const struct iio_info at91_adc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &at91_adc_read_raw,
+};
+
+/* Touchscreen related functions */
+static int atmel_ts_open(struct input_dev *dev)
+{
+ struct at91_adc_state *st = input_get_drvdata(dev);
+
+ if (st->caps->has_tsmr)
+ at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN);
+ else
+ at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN);
+ return 0;
+}
+
+static void atmel_ts_close(struct input_dev *dev)
+{
+ struct at91_adc_state *st = input_get_drvdata(dev);
+
+ if (st->caps->has_tsmr)
+ at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN);
+ else
+ at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN);
+}
+
+static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz)
+{
+ u32 reg = 0;
+ int i = 0;
+
+ /* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid
+ * pen detect noise.
+ * The formula is : Pen Detect Debounce Time = (2 ^ pendbc) / ADCClock
+ */
+ st->ts_pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz /
+ 1000, 1);
+
+ while (st->ts_pendbc >> ++i)
+ ; /* Empty! Find the shift offset */
+ if (abs(st->ts_pendbc - (1 << i)) < abs(st->ts_pendbc - (1 << (i - 1))))
+ st->ts_pendbc = i;
+ else
+ st->ts_pendbc = i - 1;
+
+ if (!st->caps->has_tsmr) {
+ reg = at91_adc_readl(st, AT91_ADC_MR);
+ reg |= AT91_ADC_TSAMOD_TS_ONLY_MODE | AT91_ADC_PENDET;
+
+ reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+
+ reg = AT91_ADC_TSR_SHTIM_(TOUCH_SHTIM) & AT91_ADC_TSR_SHTIM;
+ at91_adc_writel(st, AT91_ADC_TSR, reg);
+
+ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US_RL *
+ adc_clk_khz / 1000) - 1, 1);
+
+ return 0;
+ }
+
+ if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE)
+ reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS;
+ else
+ reg = AT91_ADC_TSMR_TSMODE_5WIRE;
+
+ reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average)
+ & AT91_ADC_TSMR_TSAV;
+ reg |= AT91_ADC_TSMR_PENDBC_(st->ts_pendbc) & AT91_ADC_TSMR_PENDBC;
+ reg |= AT91_ADC_TSMR_NOTSDMA;
+ reg |= AT91_ADC_TSMR_PENDET_ENA;
+ reg |= 0x03 << 8; /* TSFREQ, needs to be bigger than TSAV */
+
+ at91_adc_writel(st, AT91_ADC_TSMR, reg);
+
+ /* Change adc internal resistor value for better pen detection,
+ * default value is 100 kOhm.
+ * 0 = 200 kOhm, 1 = 150 kOhm, 2 = 100 kOhm, 3 = 50 kOhm
+ * option only available on ES2 and higher
+ */
+ at91_adc_writel(st, AT91_ADC_ACR, st->caps->ts_pen_detect_sensitivity
+ & AT91_ADC_ACR_PENDETSENS);
+
+ /* Sample Period Time = (TRGPER + 1) / ADCClock */
+ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US *
+ adc_clk_khz / 1000) - 1, 1);
+
+ return 0;
+}
+
+static int at91_ts_register(struct at91_adc_state *st,
+ struct platform_device *pdev)
+{
+ struct input_dev *input;
+ struct iio_dev *idev = iio_priv_to_dev(st);
+ int ret;
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&idev->dev, "Failed to allocate TS device!\n");
+ return -ENOMEM;
+ }
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &pdev->dev;
+ input->open = atmel_ts_open;
+ input->close = atmel_ts_close;
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ if (st->caps->has_tsmr) {
+ input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0);
+ } else {
+ if (st->touchscreen_type != ATMEL_ADC_TOUCHSCREEN_4WIRE) {
+ dev_err(&pdev->dev,
+ "This touchscreen controller only support 4 wires\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ input_set_abs_params(input, ABS_X, 0, (1 << MAX_RLPOS_BITS) - 1,
+ 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, (1 << MAX_RLPOS_BITS) - 1,
+ 0, 0);
+ }
+
+ st->ts_input = input;
+ input_set_drvdata(input, st);
+
+ ret = input_register_device(input);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ input_free_device(st->ts_input);
+ return ret;
+}
+
+static void at91_ts_unregister(struct at91_adc_state *st)
+{
+ input_unregister_device(st->ts_input);
+}
+
+static int at91_adc_probe(struct platform_device *pdev)
+{
+ unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
+ int ret;
+ struct iio_dev *idev;
+ struct at91_adc_state *st;
+ struct resource *res;
+ u32 reg;
+
+ idev = devm_iio_device_alloc(&pdev->dev, sizeof(struct at91_adc_state));
+ if (!idev)
+ return -ENOMEM;
+
+ st = iio_priv(idev);
+
+ if (pdev->dev.of_node)
+ ret = at91_adc_probe_dt(st, pdev);
+ else
+ ret = at91_adc_probe_pdata(st, pdev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "No platform data available.\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, idev);
+
+ idev->dev.parent = &pdev->dev;
+ idev->name = dev_name(&pdev->dev);
+ idev->modes = INDIO_DIRECT_MODE;
+ idev->info = &at91_adc_info;
+
+ st->irq = platform_get_irq(pdev, 0);
+ if (st->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ ID is designated\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->reg_base)) {
+ return PTR_ERR(st->reg_base);
+ }
+
+ /*
+ * Disable all IRQs before setting up the handler
+ */
+ at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST);
+ at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF);
+
+ if (st->caps->has_tsmr)
+ ret = request_irq(st->irq, at91_adc_9x5_interrupt, 0,
+ pdev->dev.driver->name, idev);
+ else
+ ret = request_irq(st->irq, at91_adc_rl_interrupt, 0,
+ pdev->dev.driver->name, idev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to allocate IRQ.\n");
+ return ret;
+ }
+
+ st->clk = devm_clk_get(&pdev->dev, "adc_clk");
+ if (IS_ERR(st->clk)) {
+ dev_err(&pdev->dev, "Failed to get the clock.\n");
+ ret = PTR_ERR(st->clk);
+ goto error_free_irq;
+ }
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock.\n");
+ goto error_free_irq;
+ }
+
+ st->adc_clk = devm_clk_get(&pdev->dev, "adc_op_clk");
+ if (IS_ERR(st->adc_clk)) {
+ dev_err(&pdev->dev, "Failed to get the ADC clock.\n");
+ ret = PTR_ERR(st->adc_clk);
+ goto error_disable_clk;
+ }
+
+ ret = clk_prepare_enable(st->adc_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the ADC clock.\n");
+ goto error_disable_clk;
+ }
+
+ /*
+ * Prescaler rate computation using the formula from the Atmel's
+ * datasheet : ADC Clock = MCK / ((Prescaler + 1) * 2), ADC Clock being
+ * specified by the electrical characteristics of the board.
+ */
+ mstrclk = clk_get_rate(st->clk);
+ adc_clk = clk_get_rate(st->adc_clk);
+ adc_clk_khz = adc_clk / 1000;
+
+ dev_dbg(&pdev->dev, "Master clock is set as: %d Hz, adc_clk should set as: %d Hz\n",
+ mstrclk, adc_clk);
+
+ prsc = (mstrclk / (2 * adc_clk)) - 1;
+
+ if (!st->startup_time) {
+ dev_err(&pdev->dev, "No startup time available.\n");
+ ret = -EINVAL;
+ goto error_disable_adc_clk;
+ }
+ ticks = (*st->caps->calc_startup_ticks)(st->startup_time, adc_clk_khz);
+
+ /*
+ * a minimal Sample and Hold Time is necessary for the ADC to guarantee
+ * the best converted final value between two channels selection
+ * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
+ */
+ if (st->sample_hold_time > 0)
+ shtim = round_up((st->sample_hold_time * adc_clk_khz / 1000)
+ - 1, 1);
+ else
+ shtim = 0;
+
+ reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
+ reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
+ if (st->low_res)
+ reg |= AT91_ADC_LOWRES;
+ if (st->sleep_mode)
+ reg |= AT91_ADC_SLEEP;
+ reg |= AT91_ADC_SHTIM_(shtim) & AT91_ADC_SHTIM;
+ at91_adc_writel(st, AT91_ADC_MR, reg);
+
+ /* Setup the ADC channels available on the board */
+ ret = at91_adc_channel_init(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't initialize the channels.\n");
+ goto error_disable_adc_clk;
+ }
+
+ init_waitqueue_head(&st->wq_data_avail);
+ mutex_init(&st->lock);
+
+ /*
+ * Since touch screen will set trigger register as period trigger. So
+ * when touch screen is enabled, then we have to disable hardware
+ * trigger for classic adc.
+ */
+ if (!st->touchscreen_type) {
+ ret = at91_adc_buffer_init(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't initialize the buffer.\n");
+ goto error_disable_adc_clk;
+ }
+
+ ret = at91_adc_trigger_init(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't setup the triggers.\n");
+ at91_adc_buffer_remove(idev);
+ goto error_disable_adc_clk;
+ }
+ } else {
+ ret = at91_ts_register(st, pdev);
+ if (ret)
+ goto error_disable_adc_clk;
+
+ at91_ts_hw_init(st, adc_clk_khz);
+ }
+
+ ret = iio_device_register(idev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+error_iio_device_register:
+ if (!st->touchscreen_type) {
+ at91_adc_trigger_remove(idev);
+ at91_adc_buffer_remove(idev);
+ } else {
+ at91_ts_unregister(st);
+ }
+error_disable_adc_clk:
+ clk_disable_unprepare(st->adc_clk);
+error_disable_clk:
+ clk_disable_unprepare(st->clk);
+error_free_irq:
+ free_irq(st->irq, idev);
+ return ret;
+}
+
+static int at91_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *idev = platform_get_drvdata(pdev);
+ struct at91_adc_state *st = iio_priv(idev);
+
+ iio_device_unregister(idev);
+ if (!st->touchscreen_type) {
+ at91_adc_trigger_remove(idev);
+ at91_adc_buffer_remove(idev);
+ } else {
+ at91_ts_unregister(st);
+ }
+ clk_disable_unprepare(st->adc_clk);
+ clk_disable_unprepare(st->clk);
+ free_irq(st->irq, idev);
+
+ return 0;
+}
+
+static struct at91_adc_caps at91sam9260_caps = {
+ .calc_startup_ticks = calc_startup_ticks_9260,
+ .num_channels = 4,
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9260,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9260,
+ .mr_startup_mask = AT91_ADC_STARTUP_9260,
+ },
+};
+
+static struct at91_adc_caps at91sam9rl_caps = {
+ .has_ts = true,
+ .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */
+ .num_channels = 6,
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9G45,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9260,
+ .mr_startup_mask = AT91_ADC_STARTUP_9G45,
+ },
+};
+
+static struct at91_adc_caps at91sam9g45_caps = {
+ .has_ts = true,
+ .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */
+ .num_channels = 8,
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9G45,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9G45,
+ .mr_startup_mask = AT91_ADC_STARTUP_9G45,
+ },
+};
+
+static struct at91_adc_caps at91sam9x5_caps = {
+ .has_ts = true,
+ .has_tsmr = true,
+ .ts_filter_average = 3,
+ .ts_pen_detect_sensitivity = 2,
+ .calc_startup_ticks = calc_startup_ticks_9x5,
+ .num_channels = 12,
+ .registers = {
+ .channel_base = AT91_ADC_CDR0_9X5,
+ .drdy_mask = AT91_ADC_SR_DRDY_9X5,
+ .status_register = AT91_ADC_SR_9X5,
+ .trigger_register = AT91_ADC_TRGR_9X5,
+ /* prescal mask is same as 9G45 */
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9G45,
+ .mr_startup_mask = AT91_ADC_STARTUP_9X5,
+ },
+};
+
+static const struct of_device_id at91_adc_dt_ids[] = {
+ { .compatible = "atmel,at91sam9260-adc", .data = &at91sam9260_caps },
+ { .compatible = "atmel,at91sam9rl-adc", .data = &at91sam9rl_caps },
+ { .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps },
+ { .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps },
+ {},
+};
+MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
+
+static const struct platform_device_id at91_adc_ids[] = {
+ {
+ .name = "at91sam9260-adc",
+ .driver_data = (unsigned long)&at91sam9260_caps,
+ }, {
+ .name = "at91sam9rl-adc",
+ .driver_data = (unsigned long)&at91sam9rl_caps,
+ }, {
+ .name = "at91sam9g45-adc",
+ .driver_data = (unsigned long)&at91sam9g45_caps,
+ }, {
+ .name = "at91sam9x5-adc",
+ .driver_data = (unsigned long)&at91sam9x5_caps,
+ }, {
+ /* terminator */
+ }
+};
+MODULE_DEVICE_TABLE(platform, at91_adc_ids);
+
+static struct platform_driver at91_adc_driver = {
+ .probe = at91_adc_probe,
+ .remove = at91_adc_remove,
+ .id_table = at91_adc_ids,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(at91_adc_dt_ids),
+ },
+};
+
+module_platform_driver(at91_adc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atmel AT91 ADC Driver");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
new file mode 100644
index 00000000000000..08bcfb061ca561
--- /dev/null
+++ b/drivers/iio/adc/axp288_adc.c
@@ -0,0 +1,261 @@
+/*
+ * axp288_adc.c - X-Powers AXP288 PMIC ADC Driver
+ *
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/platform_device.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#define AXP288_ADC_EN_MASK 0xF1
+#define AXP288_ADC_TS_PIN_GPADC 0xF2
+#define AXP288_ADC_TS_PIN_ON 0xF3
+
+enum axp288_adc_id {
+ AXP288_ADC_TS,
+ AXP288_ADC_PMIC,
+ AXP288_ADC_GP,
+ AXP288_ADC_BATT_CHRG_I,
+ AXP288_ADC_BATT_DISCHRG_I,
+ AXP288_ADC_BATT_V,
+ AXP288_ADC_NR_CHAN,
+};
+
+struct axp288_adc_info {
+ int irq;
+ struct regmap *regmap;
+};
+
+static const struct iio_chan_spec const axp288_adc_channels[] = {
+ {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = 0,
+ .address = AXP288_TS_ADC_H,
+ .datasheet_name = "TS_PIN",
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = 1,
+ .address = AXP288_PMIC_ADC_H,
+ .datasheet_name = "PMIC_TEMP",
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = 2,
+ .address = AXP288_GP_ADC_H,
+ .datasheet_name = "GPADC",
+ }, {
+ .indexed = 1,
+ .type = IIO_CURRENT,
+ .channel = 3,
+ .address = AXP20X_BATT_CHRG_I_H,
+ .datasheet_name = "BATT_CHG_I",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .indexed = 1,
+ .type = IIO_CURRENT,
+ .channel = 4,
+ .address = AXP20X_BATT_DISCHRG_I_H,
+ .datasheet_name = "BATT_DISCHRG_I",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .indexed = 1,
+ .type = IIO_VOLTAGE,
+ .channel = 5,
+ .address = AXP20X_BATT_V_H,
+ .datasheet_name = "BATT_V",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+#define AXP288_ADC_MAP(_adc_channel_label, _consumer_dev_name, \
+ _consumer_channel) \
+ { \
+ .adc_channel_label = _adc_channel_label, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+ }
+
+/* for consumer drivers */
+static struct iio_map axp288_adc_default_maps[] = {
+ AXP288_ADC_MAP("TS_PIN", "axp288-batt", "axp288-batt-temp"),
+ AXP288_ADC_MAP("PMIC_TEMP", "axp288-pmic", "axp288-pmic-temp"),
+ AXP288_ADC_MAP("GPADC", "axp288-gpadc", "axp288-system-temp"),
+ AXP288_ADC_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
+ AXP288_ADC_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
+ AXP288_ADC_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
+ {},
+};
+
+static int axp288_adc_read_channel(int *val, unsigned long address,
+ struct regmap *regmap)
+{
+ u8 buf[2];
+
+ if (regmap_bulk_read(regmap, address, buf, 2))
+ return -EIO;
+ *val = (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
+
+ return IIO_VAL_INT;
+}
+
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+ unsigned long address)
+{
+ /* channels other than GPADC do not need to switch TS pin */
+ if (address != AXP288_GP_ADC_H)
+ return 0;
+
+ return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+}
+
+static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct axp288_adc_info *info = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ chan->address)) {
+ dev_err(&indio_dev->dev, "GPADC mode\n");
+ ret = -EINVAL;
+ break;
+ }
+ ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+ /* ADC should be always enabled for internal FG to function */
+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+ return -EIO;
+
+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
+static const struct iio_info axp288_adc_iio_info = {
+ .read_raw = &axp288_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int axp288_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct axp288_adc_info *info;
+ struct iio_dev *indio_dev;
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return info->irq;
+ }
+ platform_set_drvdata(pdev, indio_dev);
+ info->regmap = axp20x->regmap;
+ /*
+ * Set ADC to enabled state at all time, including system suspend.
+ * otherwise internal fuel gauge functionality may be affected.
+ */
+ ret = axp288_adc_set_state(axp20x->regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable ADC device\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+ indio_dev->channels = axp288_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(axp288_adc_channels);
+ indio_dev->info = &axp288_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_map_array_register(indio_dev, axp288_adc_default_maps);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register iio device\n");
+ goto err_array_unregister;
+ }
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+
+ return ret;
+}
+
+static int axp288_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static struct platform_device_id axp288_adc_id_table[] = {
+ { .name = "axp288_adc" },
+ {},
+};
+
+static struct platform_driver axp288_adc_driver = {
+ .probe = axp288_adc_probe,
+ .remove = axp288_adc_remove,
+ .id_table = axp288_adc_id_table,
+ .driver = {
+ .name = "axp288_adc",
+ },
+};
+
+MODULE_DEVICE_TABLE(platform, axp288_adc_id_table);
+
+module_platform_driver(axp288_adc_driver);
+
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("X-Powers AXP288 ADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
new file mode 100644
index 00000000000000..1958e4ae5e568d
--- /dev/null
+++ b/drivers/iio/adc/exynos_adc.c
@@ -0,0 +1,779 @@
+/*
+ * exynos_adc.c - Support for ADC in EXYNOS SoCs
+ *
+ * 8 ~ 10 channel, 10/12-bit ADC
+ *
+ * Copyright (C) 2013 Naveen Krishna Chatradhi <ch.naveen@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* S3C/EXYNOS4412/5250 ADC_V1 registers definitions */
+#define ADC_V1_CON(x) ((x) + 0x00)
+#define ADC_V1_DLY(x) ((x) + 0x08)
+#define ADC_V1_DATX(x) ((x) + 0x0C)
+#define ADC_V1_INTCLR(x) ((x) + 0x18)
+#define ADC_V1_MUX(x) ((x) + 0x1c)
+
+/* S3C2410 ADC registers definitions */
+#define ADC_S3C2410_MUX(x) ((x) + 0x18)
+
+/* Future ADC_V2 registers definitions */
+#define ADC_V2_CON1(x) ((x) + 0x00)
+#define ADC_V2_CON2(x) ((x) + 0x04)
+#define ADC_V2_STAT(x) ((x) + 0x08)
+#define ADC_V2_INT_EN(x) ((x) + 0x10)
+#define ADC_V2_INT_ST(x) ((x) + 0x14)
+#define ADC_V2_VER(x) ((x) + 0x20)
+
+/* Bit definitions for ADC_V1 */
+#define ADC_V1_CON_RES (1u << 16)
+#define ADC_V1_CON_PRSCEN (1u << 14)
+#define ADC_V1_CON_PRSCLV(x) (((x) & 0xFF) << 6)
+#define ADC_V1_CON_STANDBY (1u << 2)
+
+/* Bit definitions for S3C2410 ADC */
+#define ADC_S3C2410_CON_SELMUX(x) (((x) & 7) << 3)
+#define ADC_S3C2410_DATX_MASK 0x3FF
+#define ADC_S3C2416_CON_RES_SEL (1u << 3)
+
+/* Bit definitions for ADC_V2 */
+#define ADC_V2_CON1_SOFT_RESET (1u << 2)
+
+#define ADC_V2_CON2_OSEL (1u << 10)
+#define ADC_V2_CON2_ESEL (1u << 9)
+#define ADC_V2_CON2_HIGHF (1u << 8)
+#define ADC_V2_CON2_C_TIME(x) (((x) & 7) << 4)
+#define ADC_V2_CON2_ACH_SEL(x) (((x) & 0xF) << 0)
+#define ADC_V2_CON2_ACH_MASK 0xF
+
+#define MAX_ADC_V2_CHANNELS 10
+#define MAX_ADC_V1_CHANNELS 8
+#define MAX_EXYNOS3250_ADC_CHANNELS 2
+
+/* Bit definitions common for ADC_V1 and ADC_V2 */
+#define ADC_CON_EN_START (1u << 0)
+#define ADC_CON_EN_START_MASK (0x3 << 0)
+#define ADC_DATX_MASK 0xFFF
+
+#define EXYNOS_ADC_TIMEOUT (msecs_to_jiffies(100))
+
+#define EXYNOS_ADCV1_PHY_OFFSET 0x0718
+#define EXYNOS_ADCV2_PHY_OFFSET 0x0720
+
+struct exynos_adc {
+ struct exynos_adc_data *data;
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *pmu_map;
+ struct clk *clk;
+ struct clk *sclk;
+ unsigned int irq;
+ struct regulator *vdd;
+
+ struct completion completion;
+
+ u32 value;
+ unsigned int version;
+};
+
+struct exynos_adc_data {
+ int num_channels;
+ bool needs_sclk;
+ bool needs_adc_phy;
+ int phy_offset;
+ u32 mask;
+
+ void (*init_hw)(struct exynos_adc *info);
+ void (*exit_hw)(struct exynos_adc *info);
+ void (*clear_irq)(struct exynos_adc *info);
+ void (*start_conv)(struct exynos_adc *info, unsigned long addr);
+};
+
+static void exynos_adc_unprepare_clk(struct exynos_adc *info)
+{
+ if (info->data->needs_sclk)
+ clk_unprepare(info->sclk);
+ clk_unprepare(info->clk);
+}
+
+static int exynos_adc_prepare_clk(struct exynos_adc *info)
+{
+ int ret;
+
+ ret = clk_prepare(info->clk);
+ if (ret) {
+ dev_err(info->dev, "failed preparing adc clock: %d\n", ret);
+ return ret;
+ }
+
+ if (info->data->needs_sclk) {
+ ret = clk_prepare(info->sclk);
+ if (ret) {
+ clk_unprepare(info->clk);
+ dev_err(info->dev,
+ "failed preparing sclk_adc clock: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void exynos_adc_disable_clk(struct exynos_adc *info)
+{
+ if (info->data->needs_sclk)
+ clk_disable(info->sclk);
+ clk_disable(info->clk);
+}
+
+static int exynos_adc_enable_clk(struct exynos_adc *info)
+{
+ int ret;
+
+ ret = clk_enable(info->clk);
+ if (ret) {
+ dev_err(info->dev, "failed enabling adc clock: %d\n", ret);
+ return ret;
+ }
+
+ if (info->data->needs_sclk) {
+ ret = clk_enable(info->sclk);
+ if (ret) {
+ clk_disable(info->clk);
+ dev_err(info->dev,
+ "failed enabling sclk_adc clock: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void exynos_adc_v1_init_hw(struct exynos_adc *info)
+{
+ u32 con1;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
+
+ /* set default prescaler values and Enable prescaler */
+ con1 = ADC_V1_CON_PRSCLV(49) | ADC_V1_CON_PRSCEN;
+
+ /* Enable 12-bit ADC resolution */
+ con1 |= ADC_V1_CON_RES;
+ writel(con1, ADC_V1_CON(info->regs));
+}
+
+static void exynos_adc_v1_exit_hw(struct exynos_adc *info)
+{
+ u32 con;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
+
+ con = readl(ADC_V1_CON(info->regs));
+ con |= ADC_V1_CON_STANDBY;
+ writel(con, ADC_V1_CON(info->regs));
+}
+
+static void exynos_adc_v1_clear_irq(struct exynos_adc *info)
+{
+ writel(1, ADC_V1_INTCLR(info->regs));
+}
+
+static void exynos_adc_v1_start_conv(struct exynos_adc *info,
+ unsigned long addr)
+{
+ u32 con1;
+
+ writel(addr, ADC_V1_MUX(info->regs));
+
+ con1 = readl(ADC_V1_CON(info->regs));
+ writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
+}
+
+static const struct exynos_adc_data exynos_adc_v1_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+ .needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .clear_irq = exynos_adc_v1_clear_irq,
+ .start_conv = exynos_adc_v1_start_conv,
+};
+
+static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
+ unsigned long addr)
+{
+ u32 con1;
+
+ /* Enable 12 bit ADC resolution */
+ con1 = readl(ADC_V1_CON(info->regs));
+ con1 |= ADC_S3C2416_CON_RES_SEL;
+ writel(con1, ADC_V1_CON(info->regs));
+
+ /* Select channel for S3C2416 */
+ writel(addr, ADC_S3C2410_MUX(info->regs));
+
+ con1 = readl(ADC_V1_CON(info->regs));
+ writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
+}
+
+static struct exynos_adc_data const exynos_adc_s3c2416_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .start_conv = exynos_adc_s3c2416_start_conv,
+};
+
+static void exynos_adc_s3c2443_start_conv(struct exynos_adc *info,
+ unsigned long addr)
+{
+ u32 con1;
+
+ /* Select channel for S3C2433 */
+ writel(addr, ADC_S3C2410_MUX(info->regs));
+
+ con1 = readl(ADC_V1_CON(info->regs));
+ writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
+}
+
+static struct exynos_adc_data const exynos_adc_s3c2443_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .start_conv = exynos_adc_s3c2443_start_conv,
+};
+
+static void exynos_adc_s3c64xx_start_conv(struct exynos_adc *info,
+ unsigned long addr)
+{
+ u32 con1;
+
+ con1 = readl(ADC_V1_CON(info->regs));
+ con1 &= ~ADC_S3C2410_CON_SELMUX(0x7);
+ con1 |= ADC_S3C2410_CON_SELMUX(addr);
+ writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
+}
+
+static struct exynos_adc_data const exynos_adc_s3c24xx_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .start_conv = exynos_adc_s3c64xx_start_conv,
+};
+
+static struct exynos_adc_data const exynos_adc_s3c64xx_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+
+ .init_hw = exynos_adc_v1_init_hw,
+ .exit_hw = exynos_adc_v1_exit_hw,
+ .clear_irq = exynos_adc_v1_clear_irq,
+ .start_conv = exynos_adc_s3c64xx_start_conv,
+};
+
+static void exynos_adc_v2_init_hw(struct exynos_adc *info)
+{
+ u32 con1, con2;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
+
+ con1 = ADC_V2_CON1_SOFT_RESET;
+ writel(con1, ADC_V2_CON1(info->regs));
+
+ con2 = ADC_V2_CON2_OSEL | ADC_V2_CON2_ESEL |
+ ADC_V2_CON2_HIGHF | ADC_V2_CON2_C_TIME(0);
+ writel(con2, ADC_V2_CON2(info->regs));
+
+ /* Enable interrupts */
+ writel(1, ADC_V2_INT_EN(info->regs));
+}
+
+static void exynos_adc_v2_exit_hw(struct exynos_adc *info)
+{
+ u32 con;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 0);
+
+ con = readl(ADC_V2_CON1(info->regs));
+ con &= ~ADC_CON_EN_START;
+ writel(con, ADC_V2_CON1(info->regs));
+}
+
+static void exynos_adc_v2_clear_irq(struct exynos_adc *info)
+{
+ writel(1, ADC_V2_INT_ST(info->regs));
+}
+
+static void exynos_adc_v2_start_conv(struct exynos_adc *info,
+ unsigned long addr)
+{
+ u32 con1, con2;
+
+ con2 = readl(ADC_V2_CON2(info->regs));
+ con2 &= ~ADC_V2_CON2_ACH_MASK;
+ con2 |= ADC_V2_CON2_ACH_SEL(addr);
+ writel(con2, ADC_V2_CON2(info->regs));
+
+ con1 = readl(ADC_V2_CON1(info->regs));
+ writel(con1 | ADC_CON_EN_START, ADC_V2_CON1(info->regs));
+}
+
+static const struct exynos_adc_data exynos_adc_v2_data = {
+ .num_channels = MAX_ADC_V2_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+ .needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV2_PHY_OFFSET,
+
+ .init_hw = exynos_adc_v2_init_hw,
+ .exit_hw = exynos_adc_v2_exit_hw,
+ .clear_irq = exynos_adc_v2_clear_irq,
+ .start_conv = exynos_adc_v2_start_conv,
+};
+
+static const struct exynos_adc_data exynos3250_adc_data = {
+ .num_channels = MAX_EXYNOS3250_ADC_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+ .needs_sclk = true,
+ .needs_adc_phy = true,
+ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
+
+ .init_hw = exynos_adc_v2_init_hw,
+ .exit_hw = exynos_adc_v2_exit_hw,
+ .clear_irq = exynos_adc_v2_clear_irq,
+ .start_conv = exynos_adc_v2_start_conv,
+};
+
+static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
+{
+ u32 con1, con2;
+
+ if (info->data->needs_adc_phy)
+ regmap_write(info->pmu_map, info->data->phy_offset, 1);
+
+ con1 = ADC_V2_CON1_SOFT_RESET;
+ writel(con1, ADC_V2_CON1(info->regs));
+
+ con2 = readl(ADC_V2_CON2(info->regs));
+ con2 &= ~ADC_V2_CON2_C_TIME(7);
+ con2 |= ADC_V2_CON2_C_TIME(0);
+ writel(con2, ADC_V2_CON2(info->regs));
+
+ /* Enable interrupts */
+ writel(1, ADC_V2_INT_EN(info->regs));
+}
+
+static const struct exynos_adc_data exynos7_adc_data = {
+ .num_channels = MAX_ADC_V1_CHANNELS,
+ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
+
+ .init_hw = exynos_adc_exynos7_init_hw,
+ .exit_hw = exynos_adc_v2_exit_hw,
+ .clear_irq = exynos_adc_v2_clear_irq,
+ .start_conv = exynos_adc_v2_start_conv,
+};
+
+static const struct of_device_id exynos_adc_match[] = {
+ {
+ .compatible = "samsung,s3c2410-adc",
+ .data = &exynos_adc_s3c24xx_data,
+ }, {
+ .compatible = "samsung,s3c2416-adc",
+ .data = &exynos_adc_s3c2416_data,
+ }, {
+ .compatible = "samsung,s3c2440-adc",
+ .data = &exynos_adc_s3c24xx_data,
+ }, {
+ .compatible = "samsung,s3c2443-adc",
+ .data = &exynos_adc_s3c2443_data,
+ }, {
+ .compatible = "samsung,s3c6410-adc",
+ .data = &exynos_adc_s3c64xx_data,
+ }, {
+ .compatible = "samsung,exynos-adc-v1",
+ .data = &exynos_adc_v1_data,
+ }, {
+ .compatible = "samsung,exynos-adc-v2",
+ .data = &exynos_adc_v2_data,
+ }, {
+ .compatible = "samsung,exynos3250-adc",
+ .data = &exynos3250_adc_data,
+ }, {
+ .compatible = "samsung,exynos7-adc",
+ .data = &exynos7_adc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_adc_match);
+
+static struct exynos_adc_data *exynos_adc_get_data(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(exynos_adc_match, pdev->dev.of_node);
+ return (struct exynos_adc_data *)match->data;
+}
+
+static int exynos_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct exynos_adc *info = iio_priv(indio_dev);
+ unsigned long timeout;
+ int ret;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ reinit_completion(&info->completion);
+
+ /* Select the channel to be used and Trigger conversion */
+ if (info->data->start_conv)
+ info->data->start_conv(info, chan->address);
+
+ timeout = wait_for_completion_timeout
+ (&info->completion, EXYNOS_ADC_TIMEOUT);
+ if (timeout == 0) {
+ dev_warn(&indio_dev->dev, "Conversion timed out! Resetting\n");
+ if (info->data->init_hw)
+ info->data->init_hw(info);
+ ret = -ETIMEDOUT;
+ } else {
+ *val = info->value;
+ *val2 = 0;
+ ret = IIO_VAL_INT;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
+{
+ struct exynos_adc *info = (struct exynos_adc *)dev_id;
+ u32 mask = info->data->mask;
+
+ /* Read value */
+ info->value = readl(ADC_V1_DATX(info->regs)) & mask;
+
+ /* clear irq */
+ if (info->data->clear_irq)
+ info->data->clear_irq(info);
+
+ complete(&info->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int exynos_adc_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct exynos_adc *info = iio_priv(indio_dev);
+
+ if (readval == NULL)
+ return -EINVAL;
+
+ *readval = readl(info->regs + reg);
+
+ return 0;
+}
+
+static const struct iio_info exynos_adc_iio_info = {
+ .read_raw = &exynos_read_raw,
+ .debugfs_reg_access = &exynos_adc_reg_access,
+ .driver_module = THIS_MODULE,
+};
+
+#define ADC_CHANNEL(_index, _id) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .address = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .datasheet_name = _id, \
+}
+
+static const struct iio_chan_spec exynos_adc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+ ADC_CHANNEL(2, "adc2"),
+ ADC_CHANNEL(3, "adc3"),
+ ADC_CHANNEL(4, "adc4"),
+ ADC_CHANNEL(5, "adc5"),
+ ADC_CHANNEL(6, "adc6"),
+ ADC_CHANNEL(7, "adc7"),
+ ADC_CHANNEL(8, "adc8"),
+ ADC_CHANNEL(9, "adc9"),
+};
+
+static int exynos_adc_remove_devices(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int exynos_adc_probe(struct platform_device *pdev)
+{
+ struct exynos_adc *info = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_dev *indio_dev = NULL;
+ struct resource *mem;
+ int ret = -ENODEV;
+ int irq;
+
+ if (!np)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct exynos_adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+
+ info->data = exynos_adc_get_data(pdev);
+ if (!info->data) {
+ dev_err(&pdev->dev, "failed getting exynos_adc_data\n");
+ return -EINVAL;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+
+ if (info->data->needs_adc_phy) {
+ info->pmu_map = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(info->pmu_map)) {
+ dev_err(&pdev->dev, "syscon regmap lookup failed.\n");
+ return PTR_ERR(info->pmu_map);
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ info->irq = irq;
+ info->dev = &pdev->dev;
+
+ init_completion(&info->completion);
+
+ info->clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ return PTR_ERR(info->clk);
+ }
+
+ if (info->data->needs_sclk) {
+ info->sclk = devm_clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(info->sclk)) {
+ dev_err(&pdev->dev,
+ "failed getting sclk clock, err = %ld\n",
+ PTR_ERR(info->sclk));
+ return PTR_ERR(info->sclk);
+ }
+ }
+
+ info->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(info->vdd)) {
+ dev_err(&pdev->dev, "failed getting regulator, err = %ld\n",
+ PTR_ERR(info->vdd));
+ return PTR_ERR(info->vdd);
+ }
+
+ ret = regulator_enable(info->vdd);
+ if (ret)
+ return ret;
+
+ ret = exynos_adc_prepare_clk(info);
+ if (ret)
+ goto err_disable_reg;
+
+ ret = exynos_adc_enable_clk(info);
+ if (ret)
+ goto err_unprepare_clk;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &exynos_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = exynos_adc_iio_channels;
+ indio_dev->num_channels = info->data->num_channels;
+
+ ret = request_irq(info->irq, exynos_adc_isr,
+ 0, dev_name(&pdev->dev), info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed requesting irq, irq = %d\n",
+ info->irq);
+ goto err_disable_clk;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_irq;
+
+ if (info->data->init_hw)
+ info->data->init_hw(info);
+
+ ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed adding child nodes\n");
+ goto err_of_populate;
+ }
+
+ return 0;
+
+err_of_populate:
+ device_for_each_child(&indio_dev->dev, NULL,
+ exynos_adc_remove_devices);
+ iio_device_unregister(indio_dev);
+err_irq:
+ free_irq(info->irq, info);
+err_disable_clk:
+ if (info->data->exit_hw)
+ info->data->exit_hw(info);
+ exynos_adc_disable_clk(info);
+err_unprepare_clk:
+ exynos_adc_unprepare_clk(info);
+err_disable_reg:
+ regulator_disable(info->vdd);
+ return ret;
+}
+
+static int exynos_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct exynos_adc *info = iio_priv(indio_dev);
+
+ device_for_each_child(&indio_dev->dev, NULL,
+ exynos_adc_remove_devices);
+ iio_device_unregister(indio_dev);
+ free_irq(info->irq, info);
+ if (info->data->exit_hw)
+ info->data->exit_hw(info);
+ exynos_adc_disable_clk(info);
+ exynos_adc_unprepare_clk(info);
+ regulator_disable(info->vdd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_adc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct exynos_adc *info = iio_priv(indio_dev);
+
+ if (info->data->exit_hw)
+ info->data->exit_hw(info);
+ exynos_adc_disable_clk(info);
+ regulator_disable(info->vdd);
+
+ return 0;
+}
+
+static int exynos_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct exynos_adc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vdd);
+ if (ret)
+ return ret;
+
+ ret = exynos_adc_enable_clk(info);
+ if (ret)
+ return ret;
+
+ if (info->data->init_hw)
+ info->data->init_hw(info);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(exynos_adc_pm_ops,
+ exynos_adc_suspend,
+ exynos_adc_resume);
+
+static struct platform_driver exynos_adc_driver = {
+ .probe = exynos_adc_probe,
+ .remove = exynos_adc_remove,
+ .driver = {
+ .name = "exynos-adc",
+ .of_match_table = exynos_adc_match,
+ .pm = &exynos_adc_pm_ops,
+ },
+};
+
+module_platform_driver(exynos_adc_driver);
+
+MODULE_AUTHOR("Naveen Krishna Chatradhi <ch.naveen@samsung.com>");
+MODULE_DESCRIPTION("Samsung EXYNOS5 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
new file mode 100644
index 00000000000000..152cfc8e1c7b69
--- /dev/null
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -0,0 +1,254 @@
+/*
+ * TI LP8788 MFD - ADC driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register address */
+#define LP8788_ADC_CONF 0x60
+#define LP8788_ADC_RAW 0x61
+#define LP8788_ADC_DONE 0x63
+
+#define ADC_CONV_START 1
+
+struct lp8788_adc {
+ struct lp8788 *lp;
+ struct iio_map *map;
+ struct mutex lock;
+};
+
+static const int lp8788_scale[LPADC_MAX] = {
+ [LPADC_VBATT_5P5] = 1343101,
+ [LPADC_VIN_CHG] = 3052503,
+ [LPADC_IBATT] = 610500,
+ [LPADC_IC_TEMP] = 61050,
+ [LPADC_VBATT_6P0] = 1465201,
+ [LPADC_VBATT_5P0] = 1221001,
+ [LPADC_ADC1] = 610500,
+ [LPADC_ADC2] = 610500,
+ [LPADC_VDD] = 1025641,
+ [LPADC_VCOIN] = 757020,
+ [LPADC_ADC3] = 610500,
+ [LPADC_ADC4] = 610500,
+};
+
+static int lp8788_get_adc_result(struct lp8788_adc *adc, enum lp8788_adc_id id,
+ int *val)
+{
+ unsigned int msb;
+ unsigned int lsb;
+ unsigned int result;
+ u8 data;
+ u8 rawdata[2];
+ int size = ARRAY_SIZE(rawdata);
+ int retry = 5;
+ int ret;
+
+ data = (id << 1) | ADC_CONV_START;
+ ret = lp8788_write_byte(adc->lp, LP8788_ADC_CONF, data);
+ if (ret)
+ goto err_io;
+
+ /* retry until adc conversion is done */
+ data = 0;
+ while (retry--) {
+ usleep_range(100, 200);
+
+ ret = lp8788_read_byte(adc->lp, LP8788_ADC_DONE, &data);
+ if (ret)
+ goto err_io;
+
+ /* conversion done */
+ if (data)
+ break;
+ }
+
+ ret = lp8788_read_multi_bytes(adc->lp, LP8788_ADC_RAW, rawdata, size);
+ if (ret)
+ goto err_io;
+
+ msb = (rawdata[0] << 4) & 0x00000ff0;
+ lsb = (rawdata[1] >> 4) & 0x0000000f;
+ result = msb | lsb;
+ *val = result;
+
+ return 0;
+
+err_io:
+ return ret;
+}
+
+static int lp8788_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lp8788_adc *adc = iio_priv(indio_dev);
+ enum lp8788_adc_id id = chan->channel;
+ int ret;
+
+ mutex_lock(&adc->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = lp8788_get_adc_result(adc, id, val) ? -EIO : IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = lp8788_scale[id] / 1000000;
+ *val2 = lp8788_scale[id] % 1000000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
+}
+
+static const struct iio_info lp8788_adc_info = {
+ .read_raw = &lp8788_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define LP8788_CHAN(_id, _type) { \
+ .type = _type, \
+ .indexed = 1, \
+ .channel = LPADC_##_id, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = #_id, \
+}
+
+static const struct iio_chan_spec lp8788_adc_channels[] = {
+ [LPADC_VBATT_5P5] = LP8788_CHAN(VBATT_5P5, IIO_VOLTAGE),
+ [LPADC_VIN_CHG] = LP8788_CHAN(VIN_CHG, IIO_VOLTAGE),
+ [LPADC_IBATT] = LP8788_CHAN(IBATT, IIO_CURRENT),
+ [LPADC_IC_TEMP] = LP8788_CHAN(IC_TEMP, IIO_TEMP),
+ [LPADC_VBATT_6P0] = LP8788_CHAN(VBATT_6P0, IIO_VOLTAGE),
+ [LPADC_VBATT_5P0] = LP8788_CHAN(VBATT_5P0, IIO_VOLTAGE),
+ [LPADC_ADC1] = LP8788_CHAN(ADC1, IIO_VOLTAGE),
+ [LPADC_ADC2] = LP8788_CHAN(ADC2, IIO_VOLTAGE),
+ [LPADC_VDD] = LP8788_CHAN(VDD, IIO_VOLTAGE),
+ [LPADC_VCOIN] = LP8788_CHAN(VCOIN, IIO_VOLTAGE),
+ [LPADC_ADC3] = LP8788_CHAN(ADC3, IIO_VOLTAGE),
+ [LPADC_ADC4] = LP8788_CHAN(ADC4, IIO_VOLTAGE),
+};
+
+/* default maps used by iio consumer (lp8788-charger driver) */
+static struct iio_map lp8788_default_iio_maps[] = {
+ {
+ .consumer_dev_name = "lp8788-charger",
+ .consumer_channel = "lp8788_vbatt_5p0",
+ .adc_channel_label = "VBATT_5P0",
+ },
+ {
+ .consumer_dev_name = "lp8788-charger",
+ .consumer_channel = "lp8788_adc1",
+ .adc_channel_label = "ADC1",
+ },
+ { }
+};
+
+static int lp8788_iio_map_register(struct iio_dev *indio_dev,
+ struct lp8788_platform_data *pdata,
+ struct lp8788_adc *adc)
+{
+ struct iio_map *map;
+ int ret;
+
+ map = (!pdata || !pdata->adc_pdata) ?
+ lp8788_default_iio_maps : pdata->adc_pdata;
+
+ ret = iio_map_array_register(indio_dev, map);
+ if (ret) {
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
+ return ret;
+ }
+
+ adc->map = map;
+ return 0;
+}
+
+static int lp8788_adc_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct iio_dev *indio_dev;
+ struct lp8788_adc *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->lp = lp;
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ ret = lp8788_iio_map_register(indio_dev, lp->pdata, adc);
+ if (ret)
+ return ret;
+
+ mutex_init(&adc->lock);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &lp8788_adc_info;
+ indio_dev->channels = lp8788_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lp8788_adc_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio dev register err: %d\n", ret);
+ goto err_iio_device;
+ }
+
+ return 0;
+
+err_iio_device:
+ iio_map_array_unregister(indio_dev);
+ return ret;
+}
+
+static int lp8788_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_adc_driver = {
+ .probe = lp8788_adc_probe,
+ .remove = lp8788_adc_remove,
+ .driver = {
+ .name = LP8788_DEV_ADC,
+ },
+};
+module_platform_driver(lp8788_adc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 ADC Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-adc");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
new file mode 100644
index 00000000000000..87ee1c7d0b5424
--- /dev/null
+++ b/drivers/iio/adc/max1027.c
@@ -0,0 +1,521 @@
+ /*
+ * iio/adc/max1027.c
+ * Copyright (C) 2014 Philippe Reynes
+ *
+ * based on linux/drivers/iio/ad7923.c
+ * Copyright 2011 Analog Devices Inc (from AD7923 Driver)
+ * Copyright 2012 CS Systemes d'Information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * max1027.c
+ *
+ * Partial support for max1027 and similar chips.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define MAX1027_CONV_REG BIT(7)
+#define MAX1027_SETUP_REG BIT(6)
+#define MAX1027_AVG_REG BIT(5)
+#define MAX1027_RST_REG BIT(4)
+
+/* conversion register */
+#define MAX1027_TEMP BIT(0)
+#define MAX1027_SCAN_0_N (0x00 << 1)
+#define MAX1027_SCAN_N_M (0x01 << 1)
+#define MAX1027_SCAN_N (0x02 << 1)
+#define MAX1027_NOSCAN (0x03 << 1)
+#define MAX1027_CHAN(n) ((n) << 3)
+
+/* setup register */
+#define MAX1027_UNIPOLAR 0x02
+#define MAX1027_BIPOLAR 0x03
+#define MAX1027_REF_MODE0 (0x00 << 2)
+#define MAX1027_REF_MODE1 (0x01 << 2)
+#define MAX1027_REF_MODE2 (0x02 << 2)
+#define MAX1027_REF_MODE3 (0x03 << 2)
+#define MAX1027_CKS_MODE0 (0x00 << 4)
+#define MAX1027_CKS_MODE1 (0x01 << 4)
+#define MAX1027_CKS_MODE2 (0x02 << 4)
+#define MAX1027_CKS_MODE3 (0x03 << 4)
+
+/* averaging register */
+#define MAX1027_NSCAN_4 0x00
+#define MAX1027_NSCAN_8 0x01
+#define MAX1027_NSCAN_12 0x02
+#define MAX1027_NSCAN_16 0x03
+#define MAX1027_NAVG_4 (0x00 << 2)
+#define MAX1027_NAVG_8 (0x01 << 2)
+#define MAX1027_NAVG_16 (0x02 << 2)
+#define MAX1027_NAVG_32 (0x03 << 2)
+#define MAX1027_AVG_EN BIT(4)
+
+enum max1027_id {
+ max1027,
+ max1029,
+ max1031,
+};
+
+static const struct spi_device_id max1027_id[] = {
+ {"max1027", max1027},
+ {"max1029", max1029},
+ {"max1031", max1031},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, max1027_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id max1027_adc_dt_ids[] = {
+ { .compatible = "maxim,max1027" },
+ { .compatible = "maxim,max1029" },
+ { .compatible = "maxim,max1031" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
+#endif
+
+#define MAX1027_V_CHAN(index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = index + 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = 2, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define MAX1027_T_CHAN \
+ { \
+ .type = IIO_TEMP, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = 0, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec max1027_channels[] = {
+ MAX1027_T_CHAN,
+ MAX1027_V_CHAN(0),
+ MAX1027_V_CHAN(1),
+ MAX1027_V_CHAN(2),
+ MAX1027_V_CHAN(3),
+ MAX1027_V_CHAN(4),
+ MAX1027_V_CHAN(5),
+ MAX1027_V_CHAN(6),
+ MAX1027_V_CHAN(7)
+};
+
+static const struct iio_chan_spec max1029_channels[] = {
+ MAX1027_T_CHAN,
+ MAX1027_V_CHAN(0),
+ MAX1027_V_CHAN(1),
+ MAX1027_V_CHAN(2),
+ MAX1027_V_CHAN(3),
+ MAX1027_V_CHAN(4),
+ MAX1027_V_CHAN(5),
+ MAX1027_V_CHAN(6),
+ MAX1027_V_CHAN(7),
+ MAX1027_V_CHAN(8),
+ MAX1027_V_CHAN(9),
+ MAX1027_V_CHAN(10),
+ MAX1027_V_CHAN(11)
+};
+
+static const struct iio_chan_spec max1031_channels[] = {
+ MAX1027_T_CHAN,
+ MAX1027_V_CHAN(0),
+ MAX1027_V_CHAN(1),
+ MAX1027_V_CHAN(2),
+ MAX1027_V_CHAN(3),
+ MAX1027_V_CHAN(4),
+ MAX1027_V_CHAN(5),
+ MAX1027_V_CHAN(6),
+ MAX1027_V_CHAN(7),
+ MAX1027_V_CHAN(8),
+ MAX1027_V_CHAN(9),
+ MAX1027_V_CHAN(10),
+ MAX1027_V_CHAN(11),
+ MAX1027_V_CHAN(12),
+ MAX1027_V_CHAN(13),
+ MAX1027_V_CHAN(14),
+ MAX1027_V_CHAN(15)
+};
+
+static const unsigned long max1027_available_scan_masks[] = {
+ 0x000001ff,
+ 0x00000000,
+};
+
+static const unsigned long max1029_available_scan_masks[] = {
+ 0x00001fff,
+ 0x00000000,
+};
+
+static const unsigned long max1031_available_scan_masks[] = {
+ 0x0001ffff,
+ 0x00000000,
+};
+
+struct max1027_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const unsigned long *available_scan_masks;
+};
+
+static const struct max1027_chip_info max1027_chip_info_tbl[] = {
+ [max1027] = {
+ .channels = max1027_channels,
+ .num_channels = ARRAY_SIZE(max1027_channels),
+ .available_scan_masks = max1027_available_scan_masks,
+ },
+ [max1029] = {
+ .channels = max1029_channels,
+ .num_channels = ARRAY_SIZE(max1029_channels),
+ .available_scan_masks = max1029_available_scan_masks,
+ },
+ [max1031] = {
+ .channels = max1031_channels,
+ .num_channels = ARRAY_SIZE(max1031_channels),
+ .available_scan_masks = max1031_available_scan_masks,
+ },
+};
+
+struct max1027_state {
+ const struct max1027_chip_info *info;
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+ __be16 *buffer;
+ struct mutex lock;
+
+ u8 reg ____cacheline_aligned;
+};
+
+static int max1027_read_single_value(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int ret;
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ dev_warn(&indio_dev->dev, "trigger mode already enabled");
+ return -EBUSY;
+ }
+
+ /* Start acquisition on conversion register write */
+ st->reg = MAX1027_SETUP_REG | MAX1027_REF_MODE2 | MAX1027_CKS_MODE2;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Failed to configure setup register\n");
+ return ret;
+ }
+
+ /* Configure conversion register with the requested chan */
+ st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
+ MAX1027_NOSCAN | !!(chan->type == IIO_TEMP);
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Failed to configure conversion register\n");
+ return ret;
+ }
+
+ /*
+ * For an unknown reason, when we use the mode "10" (write
+ * conversion register), the interrupt doesn't occur every time.
+ * So we just wait 1 ms.
+ */
+ mdelay(1);
+
+ /* Read result */
+ ret = spi_read(st->spi, st->buffer, (chan->type == IIO_TEMP) ? 4 : 2);
+ if (ret < 0)
+ return ret;
+
+ *val = be16_to_cpu(st->buffer[0]);
+
+ return IIO_VAL_INT;
+}
+
+static int max1027_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = 0;
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = max1027_read_single_value(indio_dev, chan, val);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 8;
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ case IIO_VOLTAGE:
+ *val = 2500;
+ *val2 = 10;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct max1027_state *st = iio_priv(indio_dev);
+ u8 *val = (u8 *)st->buffer;
+
+ if (readval != NULL)
+ return -EINVAL;
+
+ *val = (u8)writeval;
+ return spi_write(st->spi, val, 1);
+}
+
+static int max1027_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct max1027_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (state) {
+ /* Start acquisition on cnvst */
+ st->reg = MAX1027_SETUP_REG | MAX1027_CKS_MODE0 |
+ MAX1027_REF_MODE2;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Scan from 0 to max */
+ st->reg = MAX1027_CONV_REG | MAX1027_CHAN(0) |
+ MAX1027_SCAN_N_M | MAX1027_TEMP;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Start acquisition on conversion register write */
+ st->reg = MAX1027_SETUP_REG | MAX1027_CKS_MODE2 |
+ MAX1027_REF_MODE2;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max1027_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+ if (indio != indio_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+static irqreturn_t max1027_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = (struct iio_poll_func *)private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max1027_state *st = iio_priv(indio_dev);
+
+ pr_debug("%s(irq=%d, private=0x%p)\n", __func__, irq, private);
+
+ /* fill buffer with all channel */
+ spi_read(st->spi, st->buffer, indio_dev->masklength * 2);
+
+ iio_push_to_buffers(indio_dev, st->buffer);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops max1027_trigger_ops = {
+ .owner = THIS_MODULE,
+ .validate_device = &max1027_validate_device,
+ .set_trigger_state = &max1027_set_trigger_state,
+};
+
+static const struct iio_info max1027_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &max1027_read_raw,
+ .validate_trigger = &max1027_validate_trigger,
+ .debugfs_reg_access = &max1027_debugfs_reg_access,
+};
+
+static int max1027_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct max1027_state *st;
+
+ pr_debug("%s: probe(spi = 0x%p)\n", __func__, spi);
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ pr_err("Can't allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+ st->info = &max1027_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ mutex_init(&st->lock);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &max1027_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->available_scan_masks = st->info->available_scan_masks;
+
+ st->buffer = devm_kmalloc(&indio_dev->dev,
+ indio_dev->num_channels * 2,
+ GFP_KERNEL);
+ if (st->buffer == NULL) {
+ dev_err(&indio_dev->dev, "Can't allocate bufffer\n");
+ return -ENOMEM;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &max1027_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to setup buffer\n");
+ return ret;
+ }
+
+ st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
+ indio_dev->name);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ dev_err(&indio_dev->dev, "Failed to allocate iio trigger\n");
+ goto fail_trigger_alloc;
+ }
+
+ st->trig->ops = &max1027_trigger_ops;
+ st->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ iio_trigger_register(st->trig);
+
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING,
+ spi->dev.driver->name, st->trig);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ goto fail_dev_register;
+ }
+
+ /* Disable averaging */
+ st->reg = MAX1027_AVG_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to configure averaging register\n");
+ goto fail_dev_register;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to register iio device\n");
+ goto fail_dev_register;
+ }
+
+ return 0;
+
+fail_dev_register:
+fail_trigger_alloc:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int max1027_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ pr_debug("%s: remove(spi = 0x%p)\n", __func__, spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver max1027_driver = {
+ .driver = {
+ .name = "max1027",
+ .owner = THIS_MODULE,
+ },
+ .probe = max1027_probe,
+ .remove = max1027_remove,
+ .id_table = max1027_id,
+};
+module_spi_driver(max1027_driver);
+
+MODULE_AUTHOR("Philippe Reynes <tremyfr@yahoo.fr>");
+MODULE_DESCRIPTION("MAX1027/MAX1029/MAX1031 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
new file mode 100644
index 00000000000000..3a2fb454c0801d
--- /dev/null
+++ b/drivers/iio/adc/max1363.c
@@ -0,0 +1,1701 @@
+ /*
+ * iio/adc/max1363.c
+ * Copyright (C) 2008-2010 Jonathan Cameron
+ *
+ * based on linux/drivers/i2c/chips/max123x
+ * Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ * based on linux/drivers/acron/char/pcf8583.c
+ * Copyright (C) 2000 Russell King
+ *
+ * Driver for max1363 and similar chips.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define MAX1363_SETUP_BYTE(a) ((a) | 0x80)
+
+/* There is a fair bit more defined here than currently
+ * used, but the intention is to support everything these
+ * chips do in the long run */
+
+/* see data sheets */
+/* max1363 and max1236, max1237, max1238, max1239 */
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD 0x00
+#define MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF 0x20
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT 0x40
+#define MAX1363_SETUP_AIN3_IS_REF_REF_IS_INT 0x60
+#define MAX1363_SETUP_POWER_UP_INT_REF 0x10
+#define MAX1363_SETUP_POWER_DOWN_INT_REF 0x00
+
+/* think about including max11600 etc - more settings */
+#define MAX1363_SETUP_EXT_CLOCK 0x08
+#define MAX1363_SETUP_INT_CLOCK 0x00
+#define MAX1363_SETUP_UNIPOLAR 0x00
+#define MAX1363_SETUP_BIPOLAR 0x04
+#define MAX1363_SETUP_RESET 0x00
+#define MAX1363_SETUP_NORESET 0x02
+/* max1363 only - though don't care on others.
+ * For now monitor modes are not implemented as the relevant
+ * line is not connected on my test board.
+ * The definitions are here as I intend to add this soon.
+ */
+#define MAX1363_SETUP_MONITOR_SETUP 0x01
+
+/* Specific to the max1363 */
+#define MAX1363_MON_RESET_CHAN(a) (1 << ((a) + 4))
+#define MAX1363_MON_INT_ENABLE 0x01
+
+/* defined for readability reasons */
+/* All chips */
+#define MAX1363_CONFIG_BYTE(a) ((a))
+
+#define MAX1363_CONFIG_SE 0x01
+#define MAX1363_CONFIG_DE 0x00
+#define MAX1363_CONFIG_SCAN_TO_CS 0x00
+#define MAX1363_CONFIG_SCAN_SINGLE_8 0x20
+#define MAX1363_CONFIG_SCAN_MONITOR_MODE 0x40
+#define MAX1363_CONFIG_SCAN_SINGLE_1 0x60
+/* max123{6-9} only */
+#define MAX1236_SCAN_MID_TO_CHANNEL 0x40
+
+/* max1363 only - merely part of channel selects or don't care for others */
+#define MAX1363_CONFIG_EN_MON_MODE_READ 0x18
+
+#define MAX1363_CHANNEL_SEL(a) ((a) << 1)
+
+/* max1363 strictly 0x06 - but doesn't matter */
+#define MAX1363_CHANNEL_SEL_MASK 0x1E
+#define MAX1363_SCAN_MASK 0x60
+#define MAX1363_SE_DE_MASK 0x01
+
+#define MAX1363_MAX_CHANNELS 25
+/**
+ * struct max1363_mode - scan mode information
+ * @conf: The corresponding value of the configuration register
+ * @modemask: Bit mask corresponding to channels enabled in this mode
+ */
+struct max1363_mode {
+ int8_t conf;
+ DECLARE_BITMAP(modemask, MAX1363_MAX_CHANNELS);
+};
+
+/* This must be maintained along side the max1363_mode_table in max1363_core */
+enum max1363_modes {
+ /* Single read of a single channel */
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ /* Differential single read */
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+ /* Scan to channel and mid to channel where overlapping */
+ s0to1, s0to2, s2to3, s0to3, s0to4, s0to5, s0to6,
+ s6to7, s0to7, s6to8, s0to8, s6to9,
+ s0to9, s6to10, s0to10, s6to11, s0to11,
+ /* Differential scan to channel and mid to channel where overlapping */
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
+ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
+ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
+ d7m6to11m10, d1m0to11m10,
+};
+
+/**
+ * struct max1363_chip_info - chip specifc information
+ * @info: iio core function callbacks structure
+ * @channels: channel specification
+ * @num_channels: number of channels
+ * @mode_list: array of available scan modes
+ * @default_mode: the scan mode in which the chip starts up
+ * @int_vref_mv: the internal reference voltage
+ * @num_modes: number of modes
+ * @bits: accuracy of the adc in bits
+ */
+struct max1363_chip_info {
+ const struct iio_info *info;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ const enum max1363_modes *mode_list;
+ enum max1363_modes default_mode;
+ u16 int_vref_mv;
+ u8 num_modes;
+ u8 bits;
+};
+
+/**
+ * struct max1363_state - driver instance specific data
+ * @client: i2c_client
+ * @setupbyte: cache of current device setup byte
+ * @configbyte: cache of current device config byte
+ * @chip_info: chip model specific constants, available modes, etc.
+ * @current_mode: the scan mode of this chip
+ * @requestedmask: a valid requested set of channels
+ * @reg: supply regulator
+ * @monitor_on: whether monitor mode is enabled
+ * @monitor_speed: parameter corresponding to device monitor speed setting
+ * @mask_high: bitmask for enabled high thresholds
+ * @mask_low: bitmask for enabled low thresholds
+ * @thresh_high: high threshold values
+ * @thresh_low: low threshold values
+ * @vref: Reference voltage regulator
+ * @vref_uv: Actual (external or internal) reference voltage
+ * @send: function used to send data to the chip
+ * @recv: function used to receive data from the chip
+ */
+struct max1363_state {
+ struct i2c_client *client;
+ u8 setupbyte;
+ u8 configbyte;
+ const struct max1363_chip_info *chip_info;
+ const struct max1363_mode *current_mode;
+ u32 requestedmask;
+ struct regulator *reg;
+
+ /* Using monitor modes and buffer at the same time is
+ currently not supported */
+ bool monitor_on;
+ unsigned int monitor_speed:3;
+ u8 mask_high;
+ u8 mask_low;
+ /* 4x unipolar first then the fours bipolar ones */
+ s16 thresh_high[8];
+ s16 thresh_low[8];
+ struct regulator *vref;
+ u32 vref_uv;
+ int (*send)(const struct i2c_client *client,
+ const char *buf, int count);
+ int (*recv)(const struct i2c_client *client,
+ char *buf, int count);
+};
+
+#define MAX1363_MODE_SINGLE(_num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_SINGLE_1 \
+ | MAX1363_CONFIG_SE, \
+ .modemask[0] = _mask, \
+ }
+
+#define MAX1363_MODE_SCAN_TO_CHANNEL(_num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_TO_CS \
+ | MAX1363_CONFIG_SE, \
+ .modemask[0] = _mask, \
+ }
+
+/* note not available for max1363 hence naming */
+#define MAX1236_MODE_SCAN_MID_TO_CHANNEL(_mid, _num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1236_SCAN_MID_TO_CHANNEL \
+ | MAX1363_CONFIG_SE, \
+ .modemask[0] = _mask \
+}
+
+#define MAX1363_MODE_DIFF_SINGLE(_nump, _numm, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_nump) \
+ | MAX1363_CONFIG_SCAN_SINGLE_1 \
+ | MAX1363_CONFIG_DE, \
+ .modemask[0] = _mask \
+ }
+
+/* Can't think how to automate naming so specify for now */
+#define MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(_num, _numvals, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1363_CONFIG_SCAN_TO_CS \
+ | MAX1363_CONFIG_DE, \
+ .modemask[0] = _mask \
+ }
+
+/* note only available for max1363 hence naming */
+#define MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(_num, _numvals, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
+ | MAX1236_SCAN_MID_TO_CHANNEL \
+ | MAX1363_CONFIG_SE, \
+ .modemask[0] = _mask \
+}
+
+static const struct max1363_mode max1363_mode_table[] = {
+ /* All of the single channel options first */
+ MAX1363_MODE_SINGLE(0, 1 << 0),
+ MAX1363_MODE_SINGLE(1, 1 << 1),
+ MAX1363_MODE_SINGLE(2, 1 << 2),
+ MAX1363_MODE_SINGLE(3, 1 << 3),
+ MAX1363_MODE_SINGLE(4, 1 << 4),
+ MAX1363_MODE_SINGLE(5, 1 << 5),
+ MAX1363_MODE_SINGLE(6, 1 << 6),
+ MAX1363_MODE_SINGLE(7, 1 << 7),
+ MAX1363_MODE_SINGLE(8, 1 << 8),
+ MAX1363_MODE_SINGLE(9, 1 << 9),
+ MAX1363_MODE_SINGLE(10, 1 << 10),
+ MAX1363_MODE_SINGLE(11, 1 << 11),
+
+ MAX1363_MODE_DIFF_SINGLE(0, 1, 1 << 12),
+ MAX1363_MODE_DIFF_SINGLE(2, 3, 1 << 13),
+ MAX1363_MODE_DIFF_SINGLE(4, 5, 1 << 14),
+ MAX1363_MODE_DIFF_SINGLE(6, 7, 1 << 15),
+ MAX1363_MODE_DIFF_SINGLE(8, 9, 1 << 16),
+ MAX1363_MODE_DIFF_SINGLE(10, 11, 1 << 17),
+ MAX1363_MODE_DIFF_SINGLE(1, 0, 1 << 18),
+ MAX1363_MODE_DIFF_SINGLE(3, 2, 1 << 19),
+ MAX1363_MODE_DIFF_SINGLE(5, 4, 1 << 20),
+ MAX1363_MODE_DIFF_SINGLE(7, 6, 1 << 21),
+ MAX1363_MODE_DIFF_SINGLE(9, 8, 1 << 22),
+ MAX1363_MODE_DIFF_SINGLE(11, 10, 1 << 23),
+
+ /* The multichannel scans next */
+ MAX1363_MODE_SCAN_TO_CHANNEL(1, 0x003),
+ MAX1363_MODE_SCAN_TO_CHANNEL(2, 0x007),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(2, 3, 0x00C),
+ MAX1363_MODE_SCAN_TO_CHANNEL(3, 0x00F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(4, 0x01F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(5, 0x03F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(6, 0x07F),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 7, 0x0C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(7, 0x0FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 8, 0x1C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(8, 0x1FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 9, 0x3C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(9, 0x3FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 10, 0x7C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(10, 0x7FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 11, 0xFC0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(11, 0xFFF),
+
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(2, 2, 0x003000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(4, 3, 0x007000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(6, 4, 0x00F000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(8, 2, 0x018000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(8, 5, 0x01F000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(10, 3, 0x038000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(10, 6, 0x3F000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(3, 2, 0x0C0000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(5, 3, 0x1C0000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(7, 4, 0x3C0000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(9, 2, 0x600000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(9, 5, 0x7C0000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(11, 3, 0xE00000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(11, 6, 0xFC0000),
+};
+
+static const struct max1363_mode
+*max1363_match_mode(const unsigned long *mask,
+ const struct max1363_chip_info *ci)
+{
+ int i;
+ if (mask)
+ for (i = 0; i < ci->num_modes; i++)
+ if (bitmap_subset(mask,
+ max1363_mode_table[ci->mode_list[i]].
+ modemask,
+ MAX1363_MAX_CHANNELS))
+ return &max1363_mode_table[ci->mode_list[i]];
+ return NULL;
+}
+
+static int max1363_smbus_send(const struct i2c_client *client, const char *buf,
+ int count)
+{
+ int i, err;
+
+ for (i = err = 0; err == 0 && i < count; ++i)
+ err = i2c_smbus_write_byte(client, buf[i]);
+
+ return err ? err : count;
+}
+
+static int max1363_smbus_recv(const struct i2c_client *client, char *buf,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; ++i) {
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0)
+ return ret;
+ buf[i] = ret;
+ }
+
+ return count;
+}
+
+static int max1363_write_basic_config(struct max1363_state *st)
+{
+ u8 tx_buf[2] = { st->setupbyte, st->configbyte };
+
+ return st->send(st->client, tx_buf, 2);
+}
+
+static int max1363_set_scan_mode(struct max1363_state *st)
+{
+ st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
+ | MAX1363_SCAN_MASK
+ | MAX1363_SE_DE_MASK);
+ st->configbyte |= st->current_mode->conf;
+
+ return max1363_write_basic_config(st);
+}
+
+static int max1363_read_single_chan(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ long m)
+{
+ int ret = 0;
+ s32 data;
+ u8 rxbuf[2];
+ struct max1363_state *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->client;
+
+ mutex_lock(&indio_dev->mlock);
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
+ */
+ if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ goto error_ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 2);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+ data = (rxbuf[1] | rxbuf[0] << 8) &
+ ((1 << st->chip_info->bits) - 1);
+ } else {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 1);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+ data = rxbuf[0];
+ }
+ *val = data;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+
+}
+
+static int max1363_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = max1363_read_single_chan(indio_dev, chan, val, m);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_uv / 1000;
+ *val2 = st->chip_info->bits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Applies to max1363 */
+static const enum max1363_modes max1363_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ s0to1, s0to2, s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+};
+
+static const struct iio_event_spec max1363_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define MAX1363_CHAN_U(num, addr, si, bits, ev_spec, num_ev_spec) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = num, \
+ .address = addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = "AIN"#num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
+ .scan_index = si, \
+ .event_spec = ev_spec, \
+ .num_event_specs = num_ev_spec, \
+ }
+
+/* bipolar channel */
+#define MAX1363_CHAN_B(num, num2, addr, si, bits, ev_spec, num_ev_spec) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .channel = num, \
+ .channel2 = num2, \
+ .address = addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = "AIN"#num"-AIN"#num2, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
+ .scan_index = si, \
+ .event_spec = ev_spec, \
+ .num_event_specs = num_ev_spec, \
+ }
+
+#define MAX1363_4X_CHANS(bits, ev_spec, num_ev_spec) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \
+ IIO_CHAN_SOFT_TIMESTAMP(8) \
+ }
+
+static const struct iio_chan_spec max1036_channels[] =
+ MAX1363_4X_CHANS(8, NULL, 0);
+static const struct iio_chan_spec max1136_channels[] =
+ MAX1363_4X_CHANS(10, NULL, 0);
+static const struct iio_chan_spec max1236_channels[] =
+ MAX1363_4X_CHANS(12, NULL, 0);
+static const struct iio_chan_spec max1361_channels[] =
+ MAX1363_4X_CHANS(10, max1363_events, ARRAY_SIZE(max1363_events));
+static const struct iio_chan_spec max1363_channels[] =
+ MAX1363_4X_CHANS(12, max1363_events, ARRAY_SIZE(max1363_events));
+
+/* Applies to max1236, max1237 */
+static const enum max1363_modes max1236_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ s0to1, s0to2, s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+ s2to3,
+};
+
+/* Applies to max1238, max1239 */
+static const enum max1363_modes max1238_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
+ s0to7, s0to8, s0to9, s0to10, s0to11,
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
+ d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
+ s6to7, s6to8, s6to9, s6to10, s6to11,
+ d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
+};
+
+#define MAX1363_12X_CHANS(bits) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, NULL, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, NULL, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, NULL, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
+ MAX1363_CHAN_U(8, _s8, 8, bits, NULL, 0), \
+ MAX1363_CHAN_U(9, _s9, 9, bits, NULL, 0), \
+ MAX1363_CHAN_U(10, _s10, 10, bits, NULL, 0), \
+ MAX1363_CHAN_U(11, _s11, 11, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(8, 9, d8m9, 16, bits, NULL, 0), \
+ MAX1363_CHAN_B(10, 11, d10m11, 17, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \
+ MAX1363_CHAN_B(9, 8, d9m8, 22, bits, NULL, 0), \
+ MAX1363_CHAN_B(11, 10, d11m10, 23, bits, NULL, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(24) \
+ }
+static const struct iio_chan_spec max1038_channels[] = MAX1363_12X_CHANS(8);
+static const struct iio_chan_spec max1138_channels[] = MAX1363_12X_CHANS(10);
+static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12);
+
+static const enum max1363_modes max11607_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ s0to1, s0to2, s0to3,
+ s2to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+};
+
+static const enum max1363_modes max11608_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
+ s6to7,
+ d0m1, d2m3, d4m5, d6m7,
+ d1m0, d3m2, d5m4, d7m6,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7,
+ d1m0to3m2, d1m0to5m4, d1m0to7m6,
+};
+
+#define MAX1363_8X_CHANS(bits) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, NULL, 0), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, NULL, 0), \
+ MAX1363_CHAN_U(4, _s4, 4, bits, NULL, 0), \
+ MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(16) \
+}
+static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
+static const struct iio_chan_spec max11608_channels[] = MAX1363_8X_CHANS(10);
+static const struct iio_chan_spec max11614_channels[] = MAX1363_8X_CHANS(12);
+
+static const enum max1363_modes max11644_mode_list[] = {
+ _s0, _s1, s0to1, d0m1, d1m0,
+};
+
+#define MAX1363_2X_CHANS(bits) { \
+ MAX1363_CHAN_U(0, _s0, 0, bits, NULL, 0), \
+ MAX1363_CHAN_U(1, _s1, 1, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 2, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 3, bits, NULL, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(4) \
+ }
+
+static const struct iio_chan_spec max11646_channels[] = MAX1363_2X_CHANS(10);
+static const struct iio_chan_spec max11644_channels[] = MAX1363_2X_CHANS(12);
+
+enum { max1361,
+ max1362,
+ max1363,
+ max1364,
+ max1036,
+ max1037,
+ max1038,
+ max1039,
+ max1136,
+ max1137,
+ max1138,
+ max1139,
+ max1236,
+ max1237,
+ max1238,
+ max1239,
+ max11600,
+ max11601,
+ max11602,
+ max11603,
+ max11604,
+ max11605,
+ max11606,
+ max11607,
+ max11608,
+ max11609,
+ max11610,
+ max11611,
+ max11612,
+ max11613,
+ max11614,
+ max11615,
+ max11616,
+ max11617,
+ max11644,
+ max11645,
+ max11646,
+ max11647
+};
+
+static const int max1363_monitor_speeds[] = { 133000, 665000, 33300, 16600,
+ 8300, 4200, 2000, 1000 };
+
+static ssize_t max1363_monitor_show_freq(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct max1363_state *st = iio_priv(dev_to_iio_dev(dev));
+ return sprintf(buf, "%d\n", max1363_monitor_speeds[st->monitor_speed]);
+}
+
+static ssize_t max1363_monitor_store_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max1363_state *st = iio_priv(indio_dev);
+ int i, ret;
+ unsigned long val;
+ bool found = false;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(max1363_monitor_speeds); i++)
+ if (val == max1363_monitor_speeds[i]) {
+ found = true;
+ break;
+ }
+ if (!found)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ st->monitor_speed = i;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
+ max1363_monitor_show_freq,
+ max1363_monitor_store_freq);
+
+static IIO_CONST_ATTR(sampling_frequency_available,
+ "133000 665000 33300 16600 8300 4200 2000 1000");
+
+static int max1363_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ if (dir == IIO_EV_DIR_FALLING)
+ *val = st->thresh_low[chan->channel];
+ else
+ *val = st->thresh_high[chan->channel];
+ return IIO_VAL_INT;
+}
+
+static int max1363_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ /* make it handle signed correctly as well */
+ switch (st->chip_info->bits) {
+ case 10:
+ if (val > 0x3FF)
+ return -EINVAL;
+ break;
+ case 12:
+ if (val > 0xFFF)
+ return -EINVAL;
+ break;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ st->thresh_low[chan->channel] = val;
+ break;
+ case IIO_EV_DIR_RISING:
+ st->thresh_high[chan->channel] = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const u64 max1363_event_codes[] = {
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 0,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 1,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 2,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, 3,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+};
+
+static irqreturn_t max1363_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct max1363_state *st = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns();
+ unsigned long mask, loc;
+ u8 rx;
+ u8 tx[2] = { st->setupbyte,
+ MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0 };
+
+ st->recv(st->client, &rx, 1);
+ mask = rx;
+ for_each_set_bit(loc, &mask, 8)
+ iio_push_event(indio_dev, max1363_event_codes[loc], timestamp);
+ st->send(st->client, tx, 2);
+
+ return IRQ_HANDLED;
+}
+
+static int max1363_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ int val;
+ int number = chan->channel;
+
+ mutex_lock(&indio_dev->mlock);
+ if (dir == IIO_EV_DIR_FALLING)
+ val = (1 << number) & st->mask_low;
+ else
+ val = (1 << number) & st->mask_high;
+ mutex_unlock(&indio_dev->mlock);
+
+ return val;
+}
+
+static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
+{
+ u8 *tx_buf;
+ int ret, i = 3, j;
+ unsigned long numelements;
+ int len;
+ const long *modemask;
+
+ if (!enabled) {
+ /* transition to buffered capture is not currently supported */
+ st->setupbyte &= ~MAX1363_SETUP_MONITOR_SETUP;
+ st->configbyte &= ~MAX1363_SCAN_MASK;
+ st->monitor_on = false;
+ return max1363_write_basic_config(st);
+ }
+
+ /* Ensure we are in the relevant mode */
+ st->setupbyte |= MAX1363_SETUP_MONITOR_SETUP;
+ st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
+ | MAX1363_SCAN_MASK
+ | MAX1363_SE_DE_MASK);
+ st->configbyte |= MAX1363_CONFIG_SCAN_MONITOR_MODE;
+ if ((st->mask_low | st->mask_high) & 0x0F) {
+ st->configbyte |= max1363_mode_table[s0to3].conf;
+ modemask = max1363_mode_table[s0to3].modemask;
+ } else if ((st->mask_low | st->mask_high) & 0x30) {
+ st->configbyte |= max1363_mode_table[d0m1to2m3].conf;
+ modemask = max1363_mode_table[d0m1to2m3].modemask;
+ } else {
+ st->configbyte |= max1363_mode_table[d1m0to3m2].conf;
+ modemask = max1363_mode_table[d1m0to3m2].modemask;
+ }
+ numelements = bitmap_weight(modemask, MAX1363_MAX_CHANNELS);
+ len = 3 * numelements + 3;
+ tx_buf = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ tx_buf[0] = st->configbyte;
+ tx_buf[1] = st->setupbyte;
+ tx_buf[2] = (st->monitor_speed << 1);
+
+ /*
+ * So we need to do yet another bit of nefarious scan mode
+ * setup to match what we need.
+ */
+ for (j = 0; j < 8; j++)
+ if (test_bit(j, modemask)) {
+ /* Establish the mode is in the scan */
+ if (st->mask_low & (1 << j)) {
+ tx_buf[i] = (st->thresh_low[j] >> 4) & 0xFF;
+ tx_buf[i + 1] = (st->thresh_low[j] << 4) & 0xF0;
+ } else if (j < 4) {
+ tx_buf[i] = 0;
+ tx_buf[i + 1] = 0;
+ } else {
+ tx_buf[i] = 0x80;
+ tx_buf[i + 1] = 0;
+ }
+ if (st->mask_high & (1 << j)) {
+ tx_buf[i + 1] |=
+ (st->thresh_high[j] >> 8) & 0x0F;
+ tx_buf[i + 2] = st->thresh_high[j] & 0xFF;
+ } else if (j < 4) {
+ tx_buf[i + 1] |= 0x0F;
+ tx_buf[i + 2] = 0xFF;
+ } else {
+ tx_buf[i + 1] |= 0x07;
+ tx_buf[i + 2] = 0xFF;
+ }
+ i += 3;
+ }
+
+
+ ret = st->send(st->client, tx_buf, len);
+ if (ret < 0)
+ goto error_ret;
+ if (ret != len) {
+ ret = -EIO;
+ goto error_ret;
+ }
+
+ /*
+ * Now that we hopefully have sensible thresholds in place it is
+ * time to turn the interrupts on.
+ * It is unclear from the data sheet if this should be necessary
+ * (i.e. whether monitor mode setup is atomic) but it appears to
+ * be in practice.
+ */
+ tx_buf[0] = st->setupbyte;
+ tx_buf[1] = MAX1363_MON_INT_ENABLE | (st->monitor_speed << 1) | 0xF0;
+ ret = st->send(st->client, tx_buf, 2);
+ if (ret < 0)
+ goto error_ret;
+ if (ret != 2) {
+ ret = -EIO;
+ goto error_ret;
+ }
+ ret = 0;
+ st->monitor_on = true;
+error_ret:
+
+ kfree(tx_buf);
+
+ return ret;
+}
+
+/*
+ * To keep this manageable we always use one of 3 scan modes.
+ * Scan 0...3, 0-1,2-3 and 1-0,3-2
+ */
+
+static inline int __max1363_check_event_mask(int thismask, int checkmask)
+{
+ int ret = 0;
+ /* Is it unipolar */
+ if (thismask < 4) {
+ if (checkmask & ~0x0F) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ } else if (thismask < 6) {
+ if (checkmask & ~0x30) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ } else if (checkmask & ~0xC0)
+ ret = -EBUSY;
+error_ret:
+ return ret;
+}
+
+static int max1363_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ int ret = 0;
+ struct max1363_state *st = iio_priv(indio_dev);
+ u16 unifiedmask;
+ int number = chan->channel;
+
+ mutex_lock(&indio_dev->mlock);
+ unifiedmask = st->mask_low | st->mask_high;
+ if (dir == IIO_EV_DIR_FALLING) {
+
+ if (state == 0)
+ st->mask_low &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ goto error_ret;
+ st->mask_low |= (1 << number);
+ }
+ } else {
+ if (state == 0)
+ st->mask_high &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ goto error_ret;
+ st->mask_high |= (1 << number);
+ }
+ }
+
+ max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+/*
+ * As with scan_elements, only certain sets of these can
+ * be combined.
+ */
+static struct attribute *max1363_event_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group max1363_event_attribute_group = {
+ .attrs = max1363_event_attributes,
+ .name = "events",
+};
+
+static int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+
+ /*
+ * Need to figure out the current mode based upon the requested
+ * scan mask in iio_dev
+ */
+ st->current_mode = max1363_match_mode(scan_mask, st->chip_info);
+ if (!st->current_mode)
+ return -EINVAL;
+ max1363_set_scan_mode(st);
+ return 0;
+}
+
+static const struct iio_info max1238_info = {
+ .read_raw = &max1363_read_raw,
+ .driver_module = THIS_MODULE,
+ .update_scan_mode = &max1363_update_scan_mode,
+};
+
+static const struct iio_info max1363_info = {
+ .read_event_value = &max1363_read_thresh,
+ .write_event_value = &max1363_write_thresh,
+ .read_event_config = &max1363_read_event_config,
+ .write_event_config = &max1363_write_event_config,
+ .read_raw = &max1363_read_raw,
+ .update_scan_mode = &max1363_update_scan_mode,
+ .driver_module = THIS_MODULE,
+ .event_attrs = &max1363_event_attribute_group,
+};
+
+/* max1363 and max1368 tested - rest from data sheet */
+static const struct max1363_chip_info max1363_chip_info_tbl[] = {
+ [max1361] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ .channels = max1361_channels,
+ .num_channels = ARRAY_SIZE(max1361_channels),
+ .info = &max1363_info,
+ },
+ [max1362] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ .channels = max1361_channels,
+ .num_channels = ARRAY_SIZE(max1361_channels),
+ .info = &max1363_info,
+ },
+ [max1363] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ .channels = max1363_channels,
+ .num_channels = ARRAY_SIZE(max1363_channels),
+ .info = &max1363_info,
+ },
+ [max1364] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1363_mode_list,
+ .num_modes = ARRAY_SIZE(max1363_mode_list),
+ .default_mode = s0to3,
+ .channels = max1363_channels,
+ .num_channels = ARRAY_SIZE(max1363_channels),
+ .info = &max1363_info,
+ },
+ [max1036] = {
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1036_channels,
+ .num_channels = ARRAY_SIZE(max1036_channels),
+ },
+ [max1037] = {
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1036_channels,
+ .num_channels = ARRAY_SIZE(max1036_channels),
+ },
+ [max1038] = {
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1038_channels,
+ .num_channels = ARRAY_SIZE(max1038_channels),
+ },
+ [max1039] = {
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1038_channels,
+ .num_channels = ARRAY_SIZE(max1038_channels),
+ },
+ [max1136] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1136_channels,
+ .num_channels = ARRAY_SIZE(max1136_channels),
+ },
+ [max1137] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1136_channels,
+ .num_channels = ARRAY_SIZE(max1136_channels),
+ },
+ [max1138] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1138_channels,
+ .num_channels = ARRAY_SIZE(max1138_channels),
+ },
+ [max1139] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1138_channels,
+ .num_channels = ARRAY_SIZE(max1138_channels),
+ },
+ [max1236] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1236_channels,
+ .num_channels = ARRAY_SIZE(max1236_channels),
+ },
+ [max1237] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1236_channels,
+ .num_channels = ARRAY_SIZE(max1236_channels),
+ },
+ [max1238] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1238_channels,
+ .num_channels = ARRAY_SIZE(max1238_channels),
+ },
+ [max1239] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1238_channels,
+ .num_channels = ARRAY_SIZE(max1238_channels),
+ },
+ [max11600] = {
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1036_channels,
+ .num_channels = ARRAY_SIZE(max1036_channels),
+ },
+ [max11601] = {
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1036_channels,
+ .num_channels = ARRAY_SIZE(max1036_channels),
+ },
+ [max11602] = {
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11602_channels,
+ .num_channels = ARRAY_SIZE(max11602_channels),
+ },
+ [max11603] = {
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11602_channels,
+ .num_channels = ARRAY_SIZE(max11602_channels),
+ },
+ [max11604] = {
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1038_channels,
+ .num_channels = ARRAY_SIZE(max1038_channels),
+ },
+ [max11605] = {
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1038_channels,
+ .num_channels = ARRAY_SIZE(max1038_channels),
+ },
+ [max11606] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1136_channels,
+ .num_channels = ARRAY_SIZE(max1136_channels),
+ },
+ [max11607] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1136_channels,
+ .num_channels = ARRAY_SIZE(max1136_channels),
+ },
+ [max11608] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11608_channels,
+ .num_channels = ARRAY_SIZE(max11608_channels),
+ },
+ [max11609] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11608_channels,
+ .num_channels = ARRAY_SIZE(max11608_channels),
+ },
+ [max11610] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1138_channels,
+ .num_channels = ARRAY_SIZE(max1138_channels),
+ },
+ [max11611] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1138_channels,
+ .num_channels = ARRAY_SIZE(max1138_channels),
+ },
+ [max11612] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1363_channels,
+ .num_channels = ARRAY_SIZE(max1363_channels),
+ },
+ [max11613] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .info = &max1238_info,
+ .channels = max1363_channels,
+ .num_channels = ARRAY_SIZE(max1363_channels),
+ },
+ [max11614] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11614_channels,
+ .num_channels = ARRAY_SIZE(max11614_channels),
+ },
+ [max11615] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .info = &max1238_info,
+ .channels = max11614_channels,
+ .num_channels = ARRAY_SIZE(max11614_channels),
+ },
+ [max11616] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1238_channels,
+ .num_channels = ARRAY_SIZE(max1238_channels),
+ },
+ [max11617] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .info = &max1238_info,
+ .channels = max1238_channels,
+ .num_channels = ARRAY_SIZE(max1238_channels),
+ },
+ [max11644] = {
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max11644_mode_list,
+ .num_modes = ARRAY_SIZE(max11644_mode_list),
+ .default_mode = s0to1,
+ .info = &max1238_info,
+ .channels = max11644_channels,
+ .num_channels = ARRAY_SIZE(max11644_channels),
+ },
+ [max11645] = {
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max11644_mode_list,
+ .num_modes = ARRAY_SIZE(max11644_mode_list),
+ .default_mode = s0to1,
+ .info = &max1238_info,
+ .channels = max11644_channels,
+ .num_channels = ARRAY_SIZE(max11644_channels),
+ },
+ [max11646] = {
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max11644_mode_list,
+ .num_modes = ARRAY_SIZE(max11644_mode_list),
+ .default_mode = s0to1,
+ .info = &max1238_info,
+ .channels = max11646_channels,
+ .num_channels = ARRAY_SIZE(max11646_channels),
+ },
+ [max11647] = {
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max11644_mode_list,
+ .num_modes = ARRAY_SIZE(max11644_mode_list),
+ .default_mode = s0to1,
+ .info = &max1238_info,
+ .channels = max11646_channels,
+ .num_channels = ARRAY_SIZE(max11646_channels),
+ },
+};
+
+static int max1363_initial_setup(struct max1363_state *st)
+{
+ st->setupbyte = MAX1363_SETUP_INT_CLOCK
+ | MAX1363_SETUP_UNIPOLAR
+ | MAX1363_SETUP_NORESET;
+
+ if (st->vref)
+ st->setupbyte |= MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF;
+ else
+ st->setupbyte |= MAX1363_SETUP_POWER_UP_INT_REF
+ | MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT;
+
+ /* Set scan mode writes the config anyway so wait until then */
+ st->setupbyte = MAX1363_SETUP_BYTE(st->setupbyte);
+ st->current_mode = &max1363_mode_table[st->chip_info->default_mode];
+ st->configbyte = MAX1363_CONFIG_BYTE(st->configbyte);
+
+ return max1363_set_scan_mode(st);
+}
+
+static int max1363_alloc_scan_masks(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ unsigned long *masks;
+ int i;
+
+ masks = devm_kzalloc(&indio_dev->dev,
+ BITS_TO_LONGS(MAX1363_MAX_CHANNELS) * sizeof(long) *
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
+ if (!masks)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_modes; i++)
+ bitmap_copy(masks + BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*i,
+ max1363_mode_table[st->chip_info->mode_list[i]]
+ .modemask, MAX1363_MAX_CHANNELS);
+
+ indio_dev->available_scan_masks = masks;
+
+ return 0;
+}
+
+static irqreturn_t max1363_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max1363_state *st = iio_priv(indio_dev);
+ __u8 *rxbuf;
+ int b_sent;
+ size_t d_size;
+ unsigned long numvals = bitmap_weight(st->current_mode->modemask,
+ MAX1363_MAX_CHANNELS);
+
+ /* Ensure the timestamp is 8 byte aligned */
+ if (st->chip_info->bits != 8)
+ d_size = numvals*2;
+ else
+ d_size = numvals;
+ if (indio_dev->scan_timestamp) {
+ d_size += sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
+ /* Monitor mode prevents reading. Whilst not currently implemented
+ * might as well have this test in here in the meantime as it does
+ * no harm.
+ */
+ if (numvals == 0)
+ goto done;
+
+ rxbuf = kmalloc(d_size, GFP_KERNEL);
+ if (rxbuf == NULL)
+ goto done;
+ if (st->chip_info->bits != 8)
+ b_sent = st->recv(st->client, rxbuf, numvals * 2);
+ else
+ b_sent = st->recv(st->client, rxbuf, numvals);
+ if (b_sent < 0)
+ goto done_free;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns());
+
+done_free:
+ kfree(rxbuf);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int max1363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct max1363_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *vref;
+
+ indio_dev = devm_iio_device_alloc(&client->dev,
+ sizeof(struct max1363_state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.of_node = client->dev.of_node;
+ ret = iio_map_array_register(indio_dev, client->dev.platform_data);
+ if (ret < 0)
+ return ret;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_unregister_map;
+ }
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_unregister_map;
+
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ st->chip_info = &max1363_chip_info_tbl[id->driver_data];
+ st->client = client;
+
+ st->vref_uv = st->chip_info->int_vref_mv * 1000;
+ vref = devm_regulator_get_optional(&client->dev, "vref");
+ if (!IS_ERR(vref)) {
+ int vref_uv;
+
+ ret = regulator_enable(vref);
+ if (ret)
+ goto error_disable_reg;
+ st->vref = vref;
+ vref_uv = regulator_get_voltage(vref);
+ if (vref_uv <= 0) {
+ ret = -EINVAL;
+ goto error_disable_reg;
+ }
+ st->vref_uv = vref_uv;
+ }
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ st->send = i2c_master_send;
+ st->recv = i2c_master_recv;
+ } else if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)
+ && st->chip_info->bits == 8) {
+ st->send = max1363_smbus_send;
+ st->recv = max1363_smbus_recv;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error_disable_reg;
+ }
+
+ ret = max1363_alloc_scan_masks(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ /* Establish that the iio_dev is a child of the i2c device */
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = st->chip_info->info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = max1363_initial_setup(st);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &max1363_trigger_handler, NULL);
+ if (ret)
+ goto error_disable_reg;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, st->client->irq,
+ NULL,
+ &max1363_event_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "max1363_event",
+ indio_dev);
+
+ if (ret)
+ goto error_uninit_buffer;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_uninit_buffer;
+
+ return 0;
+
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ if (st->vref)
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+error_unregister_map:
+ iio_map_array_unregister(indio_dev);
+ return ret;
+}
+
+static int max1363_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max1363_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->vref)
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id max1363_id[] = {
+ { "max1361", max1361 },
+ { "max1362", max1362 },
+ { "max1363", max1363 },
+ { "max1364", max1364 },
+ { "max1036", max1036 },
+ { "max1037", max1037 },
+ { "max1038", max1038 },
+ { "max1039", max1039 },
+ { "max1136", max1136 },
+ { "max1137", max1137 },
+ { "max1138", max1138 },
+ { "max1139", max1139 },
+ { "max1236", max1236 },
+ { "max1237", max1237 },
+ { "max1238", max1238 },
+ { "max1239", max1239 },
+ { "max11600", max11600 },
+ { "max11601", max11601 },
+ { "max11602", max11602 },
+ { "max11603", max11603 },
+ { "max11604", max11604 },
+ { "max11605", max11605 },
+ { "max11606", max11606 },
+ { "max11607", max11607 },
+ { "max11608", max11608 },
+ { "max11609", max11609 },
+ { "max11610", max11610 },
+ { "max11611", max11611 },
+ { "max11612", max11612 },
+ { "max11613", max11613 },
+ { "max11614", max11614 },
+ { "max11615", max11615 },
+ { "max11616", max11616 },
+ { "max11617", max11617 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max1363_id);
+
+static struct i2c_driver max1363_driver = {
+ .driver = {
+ .name = "max1363",
+ },
+ .probe = max1363_probe,
+ .remove = max1363_remove,
+ .id_table = max1363_id,
+};
+module_i2c_driver(max1363_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Maxim 1363 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
new file mode 100644
index 00000000000000..efbfd12a4bfdd9
--- /dev/null
+++ b/drivers/iio/adc/mcp320x.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2013 Oskar Andero <oskar.andero@gmail.com>
+ * Copyright (C) 2014 Rose Technology
+ * Allan Bendorff Jensen <abj@rosetechnology.dk>
+ * Soren Andersen <san@rosetechnology.dk>
+ *
+ * Driver for following ADC chips from Microchip Technology's:
+ * 10 Bit converter
+ * MCP3001
+ * MCP3002
+ * MCP3004
+ * MCP3008
+ * ------------
+ * 12 bit converter
+ * MCP3201
+ * MCP3202
+ * MCP3204
+ * MCP3208
+ * ------------
+ *
+ * Datasheet can be found here:
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21294E.pdf mcp3002
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21295d.pdf mcp3004/08
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21290D.pdf mcp3201
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+enum {
+ mcp3001,
+ mcp3002,
+ mcp3004,
+ mcp3008,
+ mcp3201,
+ mcp3202,
+ mcp3204,
+ mcp3208,
+};
+
+struct mcp320x_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int resolution;
+};
+
+struct mcp320x {
+ struct spi_device *spi;
+ struct spi_message msg;
+ struct spi_transfer transfer[2];
+
+ u8 tx_buf;
+ u8 rx_buf[2];
+
+ struct regulator *reg;
+ struct mutex lock;
+ const struct mcp320x_chip_info *chip_info;
+};
+
+static int mcp320x_channel_to_tx_data(int device_index,
+ const unsigned int channel, bool differential)
+{
+ int start_bit = 1;
+
+ switch (device_index) {
+ case mcp3001:
+ case mcp3201:
+ return 0;
+ case mcp3002:
+ case mcp3202:
+ return ((start_bit << 4) | (!differential << 3) |
+ (channel << 2));
+ case mcp3004:
+ case mcp3204:
+ case mcp3008:
+ case mcp3208:
+ return ((start_bit << 6) | (!differential << 5) |
+ (channel << 2));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
+ bool differential, int device_index)
+{
+ int ret;
+
+ adc->rx_buf[0] = 0;
+ adc->rx_buf[1] = 0;
+ adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
+ channel, differential);
+
+ if (device_index != mcp3001 && device_index != mcp3201) {
+ ret = spi_sync(adc->spi, &adc->msg);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = spi_read(adc->spi, &adc->rx_buf, sizeof(adc->rx_buf));
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (device_index) {
+ case mcp3001:
+ return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ case mcp3002:
+ case mcp3004:
+ case mcp3008:
+ return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ case mcp3201:
+ return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ case mcp3202:
+ case mcp3204:
+ case mcp3208:
+ return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp320x_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct mcp320x *adc = iio_priv(indio_dev);
+ int ret = -EINVAL;
+ int device_index = 0;
+
+ mutex_lock(&adc->lock);
+
+ device_index = spi_get_device_id(adc->spi)->driver_data;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = mcp320x_adc_conversion(adc, channel->address,
+ channel->differential, device_index);
+
+ if (ret < 0)
+ goto out;
+
+ *val = ret;
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(adc->reg);
+ if (ret < 0)
+ goto out;
+
+ /* convert regulator output voltage to mV */
+ *val = ret / 1000;
+ *val2 = adc->chip_info->resolution;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+ }
+
+out:
+ mutex_unlock(&adc->lock);
+
+ return ret;
+}
+
+#define MCP320X_VOLTAGE_CHANNEL(num) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (num), \
+ .address = (num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+#define MCP320X_VOLTAGE_CHANNEL_DIFF(num) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (num * 2), \
+ .channel2 = (num * 2 + 1), \
+ .address = (num * 2), \
+ .differential = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+static const struct iio_chan_spec mcp3201_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
+static const struct iio_chan_spec mcp3202_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL(0),
+ MCP320X_VOLTAGE_CHANNEL(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+};
+
+static const struct iio_chan_spec mcp3204_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL(0),
+ MCP320X_VOLTAGE_CHANNEL(1),
+ MCP320X_VOLTAGE_CHANNEL(2),
+ MCP320X_VOLTAGE_CHANNEL(3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1),
+};
+
+static const struct iio_chan_spec mcp3208_channels[] = {
+ MCP320X_VOLTAGE_CHANNEL(0),
+ MCP320X_VOLTAGE_CHANNEL(1),
+ MCP320X_VOLTAGE_CHANNEL(2),
+ MCP320X_VOLTAGE_CHANNEL(3),
+ MCP320X_VOLTAGE_CHANNEL(4),
+ MCP320X_VOLTAGE_CHANNEL(5),
+ MCP320X_VOLTAGE_CHANNEL(6),
+ MCP320X_VOLTAGE_CHANNEL(7),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(2),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(3),
+};
+
+static const struct iio_info mcp320x_info = {
+ .read_raw = mcp320x_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
+ [mcp3001] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 10
+ },
+ [mcp3002] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 10
+ },
+ [mcp3004] = {
+ .channels = mcp3204_channels,
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 10
+ },
+ [mcp3008] = {
+ .channels = mcp3208_channels,
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 10
+ },
+ [mcp3201] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 12
+ },
+ [mcp3202] = {
+ .channels = mcp3202_channels,
+ .num_channels = ARRAY_SIZE(mcp3202_channels),
+ .resolution = 12
+ },
+ [mcp3204] = {
+ .channels = mcp3204_channels,
+ .num_channels = ARRAY_SIZE(mcp3204_channels),
+ .resolution = 12
+ },
+ [mcp3208] = {
+ .channels = mcp3208_channels,
+ .num_channels = ARRAY_SIZE(mcp3208_channels),
+ .resolution = 12
+ },
+};
+
+static int mcp320x_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct mcp320x *adc;
+ const struct mcp320x_chip_info *chip_info;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mcp320x_info;
+
+ chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = chip_info->num_channels;
+
+ adc->transfer[0].tx_buf = &adc->tx_buf;
+ adc->transfer[0].len = sizeof(adc->tx_buf);
+ adc->transfer[1].rx_buf = adc->rx_buf;
+ adc->transfer[1].len = sizeof(adc->rx_buf);
+
+ spi_message_init_with_transfers(&adc->msg, adc->transfer,
+ ARRAY_SIZE(adc->transfer));
+
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
+
+ ret = regulator_enable(adc->reg);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&adc->lock);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto reg_disable;
+
+ return 0;
+
+reg_disable:
+ regulator_disable(adc->reg);
+
+ return ret;
+}
+
+static int mcp320x_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct mcp320x *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(adc->reg);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mcp320x_dt_ids[] = {
+ {
+ .compatible = "mcp3001",
+ .data = &mcp320x_chip_infos[mcp3001],
+ }, {
+ .compatible = "mcp3002",
+ .data = &mcp320x_chip_infos[mcp3002],
+ }, {
+ .compatible = "mcp3004",
+ .data = &mcp320x_chip_infos[mcp3004],
+ }, {
+ .compatible = "mcp3008",
+ .data = &mcp320x_chip_infos[mcp3008],
+ }, {
+ .compatible = "mcp3201",
+ .data = &mcp320x_chip_infos[mcp3201],
+ }, {
+ .compatible = "mcp3202",
+ .data = &mcp320x_chip_infos[mcp3202],
+ }, {
+ .compatible = "mcp3204",
+ .data = &mcp320x_chip_infos[mcp3204],
+ }, {
+ .compatible = "mcp3208",
+ .data = &mcp320x_chip_infos[mcp3208],
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
+#endif
+
+static const struct spi_device_id mcp320x_id[] = {
+ { "mcp3001", mcp3001 },
+ { "mcp3002", mcp3002 },
+ { "mcp3004", mcp3004 },
+ { "mcp3008", mcp3008 },
+ { "mcp3201", mcp3201 },
+ { "mcp3202", mcp3202 },
+ { "mcp3204", mcp3204 },
+ { "mcp3208", mcp3208 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp320x_id);
+
+static struct spi_driver mcp320x_driver = {
+ .driver = {
+ .name = "mcp320x",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcp320x_probe,
+ .remove = mcp320x_remove,
+ .id_table = mcp320x_id,
+};
+module_spi_driver(mcp320x_driver);
+
+MODULE_AUTHOR("Oskar Andero <oskar.andero@gmail.com>");
+MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
new file mode 100644
index 00000000000000..51672256072bc8
--- /dev/null
+++ b/drivers/iio/adc/mcp3422.c
@@ -0,0 +1,426 @@
+/*
+ * mcp3422.c - driver for the Microchip mcp3422/3/4/6/7/8 chip family
+ *
+ * Copyright (C) 2013, Angelo Compagnucci
+ * Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/22226a.pdf
+ *
+ * This driver exports the value of analog input voltage to sysfs, the
+ * voltage unit is nV.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Masks */
+#define MCP3422_CHANNEL_MASK 0x60
+#define MCP3422_PGA_MASK 0x03
+#define MCP3422_SRATE_MASK 0x0C
+#define MCP3422_SRATE_240 0x0
+#define MCP3422_SRATE_60 0x1
+#define MCP3422_SRATE_15 0x2
+#define MCP3422_SRATE_3 0x3
+#define MCP3422_PGA_1 0
+#define MCP3422_PGA_2 1
+#define MCP3422_PGA_4 2
+#define MCP3422_PGA_8 3
+#define MCP3422_CONT_SAMPLING 0x10
+
+#define MCP3422_CHANNEL(config) (((config) & MCP3422_CHANNEL_MASK) >> 5)
+#define MCP3422_PGA(config) ((config) & MCP3422_PGA_MASK)
+#define MCP3422_SAMPLE_RATE(config) (((config) & MCP3422_SRATE_MASK) >> 2)
+
+#define MCP3422_CHANNEL_VALUE(value) (((value) << 5) & MCP3422_CHANNEL_MASK)
+#define MCP3422_PGA_VALUE(value) ((value) & MCP3422_PGA_MASK)
+#define MCP3422_SAMPLE_RATE_VALUE(value) ((value << 2) & MCP3422_SRATE_MASK)
+
+#define MCP3422_CHAN(_index) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ }
+
+/* LSB is in nV to eliminate floating point */
+static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
+
+/*
+ * scales calculated as:
+ * rates_to_lsb[sample_rate] / (1 << pga);
+ * pga is 1 for 0, 2
+ */
+
+static const int mcp3422_scales[4][4] = {
+ { 1000000, 250000, 62500, 15625 },
+ { 500000 , 125000, 31250, 7812 },
+ { 250000 , 62500 , 15625, 3906 },
+ { 125000 , 31250 , 7812 , 1953 } };
+
+/* Constant msleep times for data acquisitions */
+static const int mcp3422_read_times[4] = {
+ [MCP3422_SRATE_240] = 1000 / 240,
+ [MCP3422_SRATE_60] = 1000 / 60,
+ [MCP3422_SRATE_15] = 1000 / 15,
+ [MCP3422_SRATE_3] = 1000 / 3 };
+
+/* sample rates to integer conversion table */
+static const int mcp3422_sample_rates[4] = {
+ [MCP3422_SRATE_240] = 240,
+ [MCP3422_SRATE_60] = 60,
+ [MCP3422_SRATE_15] = 15,
+ [MCP3422_SRATE_3] = 3 };
+
+/* sample rates to sign extension table */
+static const int mcp3422_sign_extend[4] = {
+ [MCP3422_SRATE_240] = 11,
+ [MCP3422_SRATE_60] = 13,
+ [MCP3422_SRATE_15] = 15,
+ [MCP3422_SRATE_3] = 17 };
+
+/* Client data (each client gets its own) */
+struct mcp3422 {
+ struct i2c_client *i2c;
+ u8 id;
+ u8 config;
+ u8 pga[4];
+ struct mutex lock;
+};
+
+static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
+{
+ int ret;
+
+ mutex_lock(&adc->lock);
+
+ ret = i2c_master_send(adc->i2c, &newconfig, 1);
+ if (ret > 0) {
+ adc->config = newconfig;
+ ret = 0;
+ }
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
+}
+
+static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
+{
+ int ret = 0;
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+ u8 buf[4] = {0, 0, 0, 0};
+ u32 temp;
+
+ if (sample_rate == MCP3422_SRATE_3) {
+ ret = i2c_master_recv(adc->i2c, buf, 4);
+ temp = buf[0] << 16 | buf[1] << 8 | buf[2];
+ *config = buf[3];
+ } else {
+ ret = i2c_master_recv(adc->i2c, buf, 3);
+ temp = buf[0] << 8 | buf[1];
+ *config = buf[2];
+ }
+
+ *value = sign_extend32(temp, mcp3422_sign_extend[sample_rate]);
+
+ return ret;
+}
+
+static int mcp3422_read_channel(struct mcp3422 *adc,
+ struct iio_chan_spec const *channel, int *value)
+{
+ int ret;
+ u8 config;
+ u8 req_channel = channel->channel;
+
+ if (req_channel != MCP3422_CHANNEL(adc->config)) {
+ config = adc->config;
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_PGA_MASK;
+ config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
+ ret = mcp3422_update_config(adc, config);
+ if (ret < 0)
+ return ret;
+ msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
+ }
+
+ return mcp3422_read(adc, value, &config);
+}
+
+static int mcp3422_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *val1,
+ int *val2, long mask)
+{
+ struct mcp3422 *adc = iio_priv(iio);
+ int err;
+
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+ u8 pga = MCP3422_PGA(adc->config);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = mcp3422_read_channel(adc, channel, val1);
+ if (err < 0)
+ return -EINVAL;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+
+ *val1 = 0;
+ *val2 = mcp3422_scales[sample_rate][pga];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val1 = mcp3422_sample_rates[MCP3422_SAMPLE_RATE(adc->config)];
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp3422_write_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int val1,
+ int val2, long mask)
+{
+ struct mcp3422 *adc = iio_priv(iio);
+ u8 temp;
+ u8 config = adc->config;
+ u8 req_channel = channel->channel;
+ u8 sample_rate = MCP3422_SAMPLE_RATE(config);
+ u8 i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val1 != 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mcp3422_scales[0]); i++) {
+ if (val2 == mcp3422_scales[sample_rate][i]) {
+ adc->pga[req_channel] = i;
+
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_PGA_MASK;
+ config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
+
+ return mcp3422_update_config(adc, config);
+ }
+ }
+ return -EINVAL;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (val1) {
+ case 240:
+ temp = MCP3422_SRATE_240;
+ break;
+ case 60:
+ temp = MCP3422_SRATE_60;
+ break;
+ case 15:
+ temp = MCP3422_SRATE_15;
+ break;
+ case 3:
+ if (adc->id > 4)
+ return -EINVAL;
+ temp = MCP3422_SRATE_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config &= ~MCP3422_CHANNEL_MASK;
+ config |= MCP3422_CHANNEL_VALUE(req_channel);
+ config &= ~MCP3422_SRATE_MASK;
+ config |= MCP3422_SAMPLE_RATE_VALUE(temp);
+
+ return mcp3422_update_config(adc, config);
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp3422_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t mcp3422_show_samp_freqs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcp3422 *adc = iio_priv(dev_to_iio_dev(dev));
+
+ if (adc->id > 4)
+ return sprintf(buf, "240 60 15\n");
+
+ return sprintf(buf, "240 60 15 3\n");
+}
+
+static ssize_t mcp3422_show_scales(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcp3422 *adc = iio_priv(dev_to_iio_dev(dev));
+ u8 sample_rate = MCP3422_SAMPLE_RATE(adc->config);
+
+ return sprintf(buf, "0.%09u 0.%09u 0.%09u 0.%09u\n",
+ mcp3422_scales[sample_rate][0],
+ mcp3422_scales[sample_rate][1],
+ mcp3422_scales[sample_rate][2],
+ mcp3422_scales[sample_rate][3]);
+}
+
+static IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO,
+ mcp3422_show_samp_freqs, NULL, 0);
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
+ mcp3422_show_scales, NULL, 0);
+
+static struct attribute *mcp3422_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mcp3422_attribute_group = {
+ .attrs = mcp3422_attributes,
+};
+
+static const struct iio_chan_spec mcp3422_channels[] = {
+ MCP3422_CHAN(0),
+ MCP3422_CHAN(1),
+};
+
+static const struct iio_chan_spec mcp3424_channels[] = {
+ MCP3422_CHAN(0),
+ MCP3422_CHAN(1),
+ MCP3422_CHAN(2),
+ MCP3422_CHAN(3),
+};
+
+static const struct iio_info mcp3422_info = {
+ .read_raw = mcp3422_read_raw,
+ .write_raw = mcp3422_write_raw,
+ .write_raw_get_fmt = mcp3422_write_raw_get_fmt,
+ .attrs = &mcp3422_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp3422_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct mcp3422 *adc;
+ int err;
+ u8 config;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->i2c = client;
+ adc->id = (u8)(id->driver_data);
+
+ mutex_init(&adc->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mcp3422_info;
+
+ switch (adc->id) {
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ indio_dev->channels = mcp3422_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mcp3422_channels);
+ break;
+ case 4:
+ case 8:
+ indio_dev->channels = mcp3424_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mcp3424_channels);
+ break;
+ }
+
+ /* meaningful default configuration */
+ config = (MCP3422_CONT_SAMPLING
+ | MCP3422_CHANNEL_VALUE(1)
+ | MCP3422_PGA_VALUE(MCP3422_PGA_1)
+ | MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
+ mcp3422_update_config(adc, config);
+
+ err = devm_iio_device_register(&client->dev, indio_dev);
+ if (err < 0)
+ return err;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id mcp3422_id[] = {
+ { "mcp3422", 2 },
+ { "mcp3423", 3 },
+ { "mcp3424", 4 },
+ { "mcp3426", 6 },
+ { "mcp3427", 7 },
+ { "mcp3428", 8 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp3422_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mcp3422_of_match[] = {
+ { .compatible = "mcp3422" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp3422_of_match);
+#endif
+
+static struct i2c_driver mcp3422_driver = {
+ .driver = {
+ .name = "mcp3422",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mcp3422_of_match),
+ },
+ .probe = mcp3422_probe,
+ .id_table = mcp3422_id,
+};
+module_i2c_driver(mcp3422_driver);
+
+MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
+MODULE_DESCRIPTION("Microchip mcp3422/3/4/6/7/8 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
new file mode 100644
index 00000000000000..d095efe1ba149c
--- /dev/null
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -0,0 +1,173 @@
+/*
+ * MEN 16z188 Analog to Digial Converter
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mcb.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#define Z188_ADC_MAX_CHAN 8
+#define Z188_ADC_GAIN 0x0700000
+#define Z188_MODE_VOLTAGE BIT(27)
+#define Z188_CFG_AUTO 0x1
+#define Z188_CTRL_REG 0x40
+
+#define ADC_DATA(x) (((x) >> 2) & 0x7ffffc)
+#define ADC_OVR(x) ((x) & 0x1)
+
+struct z188_adc {
+ struct resource *mem;
+ void __iomem *base;
+};
+
+#define Z188_ADC_CHANNEL(idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec z188_adc_iio_channels[] = {
+ Z188_ADC_CHANNEL(0),
+ Z188_ADC_CHANNEL(1),
+ Z188_ADC_CHANNEL(2),
+ Z188_ADC_CHANNEL(3),
+ Z188_ADC_CHANNEL(4),
+ Z188_ADC_CHANNEL(5),
+ Z188_ADC_CHANNEL(6),
+ Z188_ADC_CHANNEL(7),
+};
+
+static int z188_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ struct z188_adc *adc = iio_priv(iio_dev);
+ int ret;
+ u16 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ tmp = readw(adc->base + chan->channel * 4);
+
+ if (ADC_OVR(tmp)) {
+ dev_info(&iio_dev->dev,
+ "Oversampling error on ADC channel %d\n",
+ chan->channel);
+ return -EIO;
+ }
+ *val = ADC_DATA(tmp);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct iio_info z188_adc_info = {
+ .read_raw = &z188_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static void men_z188_config_channels(void __iomem *addr)
+{
+ int i;
+ u32 cfg;
+ u32 ctl;
+
+ ctl = readl(addr + Z188_CTRL_REG);
+ ctl |= Z188_CFG_AUTO;
+ writel(ctl, addr + Z188_CTRL_REG);
+
+ for (i = 0; i < Z188_ADC_MAX_CHAN; i++) {
+ cfg = readl(addr + i);
+ cfg &= ~Z188_ADC_GAIN;
+ cfg |= Z188_MODE_VOLTAGE;
+ writel(cfg, addr + i);
+ }
+}
+
+static int men_z188_probe(struct mcb_device *dev,
+ const struct mcb_device_id *id)
+{
+ struct z188_adc *adc;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+
+ indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ indio_dev->name = "z188-adc";
+ indio_dev->dev.parent = &dev->dev;
+ indio_dev->info = &z188_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = z188_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
+
+ mem = mcb_request_mem(dev, "z188-adc");
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ adc->base = ioremap(mem->start, resource_size(mem));
+ if (adc->base == NULL)
+ goto err;
+
+ men_z188_config_channels(adc->base);
+
+ adc->mem = mem;
+ mcb_set_drvdata(dev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+err:
+ mcb_release_mem(mem);
+ return -ENXIO;
+}
+
+static void men_z188_remove(struct mcb_device *dev)
+{
+ struct iio_dev *indio_dev = mcb_get_drvdata(dev);
+ struct z188_adc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iounmap(adc->base);
+ mcb_release_mem(adc->mem);
+}
+
+static const struct mcb_device_id men_z188_ids[] = {
+ { .device = 0xbc },
+ { }
+};
+MODULE_DEVICE_TABLE(mcb, men_z188_ids);
+
+static struct mcb_driver men_z188_driver = {
+ .driver = {
+ .name = "z188-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z188_probe,
+ .remove = men_z188_remove,
+ .id_table = men_z188_ids,
+};
+module_mcb_driver(men_z188_driver);
+
+MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
+MODULE_ALIAS("mcb:16z188");
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
new file mode 100644
index 00000000000000..e525aa6475c42b
--- /dev/null
+++ b/drivers/iio/adc/nau7802.c
@@ -0,0 +1,582 @@
+/*
+ * Driver for the Nuvoton NAU7802 ADC
+ *
+ * Copyright 2013 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/log2.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define NAU7802_REG_PUCTRL 0x00
+#define NAU7802_PUCTRL_RR(x) (x << 0)
+#define NAU7802_PUCTRL_RR_BIT NAU7802_PUCTRL_RR(1)
+#define NAU7802_PUCTRL_PUD(x) (x << 1)
+#define NAU7802_PUCTRL_PUD_BIT NAU7802_PUCTRL_PUD(1)
+#define NAU7802_PUCTRL_PUA(x) (x << 2)
+#define NAU7802_PUCTRL_PUA_BIT NAU7802_PUCTRL_PUA(1)
+#define NAU7802_PUCTRL_PUR(x) (x << 3)
+#define NAU7802_PUCTRL_PUR_BIT NAU7802_PUCTRL_PUR(1)
+#define NAU7802_PUCTRL_CS(x) (x << 4)
+#define NAU7802_PUCTRL_CS_BIT NAU7802_PUCTRL_CS(1)
+#define NAU7802_PUCTRL_CR(x) (x << 5)
+#define NAU7802_PUCTRL_CR_BIT NAU7802_PUCTRL_CR(1)
+#define NAU7802_PUCTRL_AVDDS(x) (x << 7)
+#define NAU7802_PUCTRL_AVDDS_BIT NAU7802_PUCTRL_AVDDS(1)
+#define NAU7802_REG_CTRL1 0x01
+#define NAU7802_CTRL1_VLDO(x) (x << 3)
+#define NAU7802_CTRL1_GAINS(x) (x)
+#define NAU7802_CTRL1_GAINS_BITS 0x07
+#define NAU7802_REG_CTRL2 0x02
+#define NAU7802_CTRL2_CHS(x) (x << 7)
+#define NAU7802_CTRL2_CRS(x) (x << 4)
+#define NAU7802_SAMP_FREQ_320 0x07
+#define NAU7802_CTRL2_CHS_BIT NAU7802_CTRL2_CHS(1)
+#define NAU7802_REG_ADC_B2 0x12
+#define NAU7802_REG_ADC_B1 0x13
+#define NAU7802_REG_ADC_B0 0x14
+#define NAU7802_REG_ADC_CTRL 0x15
+
+#define NAU7802_MIN_CONVERSIONS 6
+
+struct nau7802_state {
+ struct i2c_client *client;
+ s32 last_value;
+ struct mutex lock;
+ struct mutex data_lock;
+ u32 vref_mv;
+ u32 conversion_count;
+ u32 min_conversions;
+ u8 sample_rate;
+ u32 scale_avail[8];
+ struct completion value_ok;
+};
+
+#define NAU7802_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan), \
+ .scan_index = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) \
+}
+
+static const struct iio_chan_spec nau7802_chan_array[] = {
+ NAU7802_CHANNEL(0),
+ NAU7802_CHANNEL(1),
+};
+
+static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80,
+ 10, 10, 10, 320};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320");
+
+static struct attribute *nau7802_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group nau7802_attribute_group = {
+ .attrs = nau7802_attributes,
+};
+
+static int nau7802_set_gain(struct nau7802_state *st, int gain)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ st->conversion_count = 0;
+
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL1);
+ if (ret < 0)
+ goto nau7802_sysfs_set_gain_out;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL1,
+ (ret & (~NAU7802_CTRL1_GAINS_BITS)) |
+ gain);
+
+nau7802_sysfs_set_gain_out:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int nau7802_read_conversion(struct nau7802_state *st)
+{
+ int data;
+
+ mutex_lock(&st->data_lock);
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B2);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value = data << 16;
+
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B1);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value |= data << 8;
+
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B0);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value |= data;
+
+ st->last_value = sign_extend32(st->last_value, 23);
+
+nau7802_read_conversion_out:
+ mutex_unlock(&st->data_lock);
+
+ return data;
+}
+
+/*
+ * Conversions are synchronised on the rising edge of NAU7802_PUCTRL_CS_BIT
+ */
+static int nau7802_sync(struct nau7802_state *st)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ ret | NAU7802_PUCTRL_CS_BIT);
+
+ return ret;
+}
+
+static irqreturn_t nau7802_eoc_trigger(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int status;
+
+ status = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (status < 0)
+ return IRQ_HANDLED;
+
+ if (!(status & NAU7802_PUCTRL_CR_BIT))
+ return IRQ_NONE;
+
+ if (nau7802_read_conversion(st) < 0)
+ return IRQ_HANDLED;
+
+ /*
+ * Because there is actually only one ADC for both channels, we have to
+ * wait for enough conversions to happen before getting a significant
+ * value when changing channels and the values are far apart.
+ */
+ if (st->conversion_count < NAU7802_MIN_CONVERSIONS)
+ st->conversion_count++;
+ if (st->conversion_count >= NAU7802_MIN_CONVERSIONS)
+ complete_all(&st->value_ok);
+
+ return IRQ_HANDLED;
+}
+
+static int nau7802_read_irq(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ reinit_completion(&st->value_ok);
+ enable_irq(st->client->irq);
+
+ nau7802_sync(st);
+
+ /* read registers to ensure we flush everything */
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ goto read_chan_info_failure;
+
+ /* Wait for a conversion to finish */
+ ret = wait_for_completion_interruptible_timeout(&st->value_ok,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret < 0)
+ goto read_chan_info_failure;
+
+ disable_irq(st->client->irq);
+
+ *val = st->last_value;
+
+ return IIO_VAL_INT;
+
+read_chan_info_failure:
+ disable_irq(st->client->irq);
+
+ return ret;
+}
+
+static int nau7802_read_poll(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ nau7802_sync(st);
+
+ /* read registers to ensure we flush everything */
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Because there is actually only one ADC for both channels, we have to
+ * wait for enough conversions to happen before getting a significant
+ * value when changing channels and the values are far appart.
+ */
+ do {
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+
+ while (!(ret & NAU7802_PUCTRL_CR_BIT)) {
+ if (st->sample_rate != NAU7802_SAMP_FREQ_320)
+ msleep(20);
+ else
+ mdelay(4);
+ ret = i2c_smbus_read_byte_data(st->client,
+ NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ return ret;
+ if (st->conversion_count < NAU7802_MIN_CONVERSIONS)
+ st->conversion_count++;
+ } while (st->conversion_count < NAU7802_MIN_CONVERSIONS);
+
+ *val = st->last_value;
+
+ return IIO_VAL_INT;
+}
+
+static int nau7802_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ /*
+ * Select the channel to use
+ * - Channel 1 is value 0 in the CHS register
+ * - Channel 2 is value 1 in the CHS register
+ */
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ if (((ret & NAU7802_CTRL2_CHS_BIT) && !chan->channel) ||
+ (!(ret & NAU7802_CTRL2_CHS_BIT) &&
+ chan->channel)) {
+ st->conversion_count = 0;
+ ret = i2c_smbus_write_byte_data(st->client,
+ NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CHS(chan->channel) |
+ NAU7802_CTRL2_CRS(st->sample_rate));
+
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ }
+
+ if (st->client->irq)
+ ret = nau7802_read_irq(indio_dev, chan, val);
+ else
+ ret = nau7802_read_poll(indio_dev, chan, val);
+
+ mutex_unlock(&st->lock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We have 24 bits of signed data, that means 23 bits of data
+ * plus the sign bit
+ */
+ *val = st->vref_mv;
+ *val2 = 23 + (ret & NAU7802_CTRL1_GAINS_BITS);
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = nau7802_sample_freq_avail[st->sample_rate];
+ *val2 = 0;
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int nau7802_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ if (val2 == st->scale_avail[i])
+ return nau7802_set_gain(st, i);
+
+ break;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(nau7802_sample_freq_avail); i++)
+ if (val == nau7802_sample_freq_avail[i]) {
+ mutex_lock(&st->lock);
+ st->sample_rate = i;
+ st->conversion_count = 0;
+ ret = i2c_smbus_write_byte_data(st->client,
+ NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CRS(st->sample_rate));
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int nau7802_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info nau7802_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &nau7802_read_raw,
+ .write_raw = &nau7802_write_raw,
+ .write_raw_get_fmt = nau7802_write_raw_get_fmt,
+ .attrs = &nau7802_attribute_group,
+};
+
+static int nau7802_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct nau7802_state *st;
+ struct device_node *np = client->dev.of_node;
+ int i, ret;
+ u8 data;
+ u32 tmp = 0;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev, "No device tree node available.\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &nau7802_info;
+
+ st->client = client;
+
+ /* Reset the device */
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ NAU7802_PUCTRL_RR_BIT);
+ if (ret < 0)
+ return ret;
+
+ /* Enter normal operation mode */
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ NAU7802_PUCTRL_PUD_BIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * After about 200 usecs, the device should be ready and then
+ * the Power Up bit will be set to 1. If not, wait for it.
+ */
+ udelay(210);
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ if (!(ret & NAU7802_PUCTRL_PUR_BIT))
+ return ret;
+
+ of_property_read_u32(np, "nuvoton,vldo", &tmp);
+ st->vref_mv = tmp;
+
+ data = NAU7802_PUCTRL_PUD_BIT | NAU7802_PUCTRL_PUA_BIT |
+ NAU7802_PUCTRL_CS_BIT;
+ if (tmp >= 2400)
+ data |= NAU7802_PUCTRL_AVDDS_BIT;
+
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL, data);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_ADC_CTRL, 0x30);
+ if (ret < 0)
+ return ret;
+
+ if (tmp >= 2400) {
+ data = NAU7802_CTRL1_VLDO((4500 - tmp) / 300);
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL1,
+ data);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ st->scale_avail[i] = (((u64)st->vref_mv) * 1000000000ULL)
+ >> (23 + i);
+
+ init_completion(&st->value_ok);
+
+ /*
+ * The ADC fires continuously and we can't do anything about
+ * it. So we need to have the IRQ disabled by default, and we
+ * will enable them back when we will need them..
+ */
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ nau7802_eoc_trigger,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ client->dev.driver->name,
+ indio_dev);
+ if (ret) {
+ /*
+ * What may happen here is that our IRQ controller is
+ * not able to get level interrupt but this is required
+ * by this ADC as when going over 40 sample per second,
+ * the interrupt line may stay high between conversions.
+ * So, we continue no matter what but we switch to
+ * polling mode.
+ */
+ dev_info(&client->dev,
+ "Failed to allocate IRQ, using polling mode\n");
+ client->irq = 0;
+ } else
+ disable_irq(client->irq);
+ }
+
+ if (!client->irq) {
+ /*
+ * We are polling, use the fastest sample rate by
+ * default
+ */
+ st->sample_rate = NAU7802_SAMP_FREQ_320;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CRS(st->sample_rate));
+ if (ret)
+ goto error_free_irq;
+ }
+
+ /* Setup the ADC channels available on the board */
+ indio_dev->num_channels = ARRAY_SIZE(nau7802_chan_array);
+ indio_dev->channels = nau7802_chan_array;
+
+ mutex_init(&st->lock);
+ mutex_init(&st->data_lock);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't register the device.\n");
+ goto error_device_register;
+ }
+
+ return 0;
+
+error_device_register:
+ mutex_destroy(&st->lock);
+ mutex_destroy(&st->data_lock);
+error_free_irq:
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ return ret;
+}
+
+static int nau7802_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct nau7802_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_destroy(&st->lock);
+ mutex_destroy(&st->data_lock);
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id nau7802_i2c_id[] = {
+ { "nau7802", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id);
+
+static const struct of_device_id nau7802_dt_ids[] = {
+ { .compatible = "nuvoton,nau7802" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
+
+static struct i2c_driver nau7802_driver = {
+ .probe = nau7802_probe,
+ .remove = nau7802_remove,
+ .id_table = nau7802_i2c_id,
+ .driver = {
+ .name = "nau7802",
+ .of_match_table = nau7802_dt_ids,
+ },
+};
+
+module_i2c_driver(nau7802_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Nuvoton NAU7802 ADC Driver");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
new file mode 100644
index 00000000000000..b9666f2f5e514f
--- /dev/null
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* IADC register and bit definition */
+#define IADC_REVISION2 0x1
+#define IADC_REVISION2_SUPPORTED_IADC 1
+
+#define IADC_PERPH_TYPE 0x4
+#define IADC_PERPH_TYPE_ADC 8
+
+#define IADC_PERPH_SUBTYPE 0x5
+#define IADC_PERPH_SUBTYPE_IADC 3
+
+#define IADC_STATUS1 0x8
+#define IADC_STATUS1_OP_MODE 4
+#define IADC_STATUS1_REQ_STS BIT(1)
+#define IADC_STATUS1_EOC BIT(0)
+#define IADC_STATUS1_REQ_STS_EOC_MASK 0x3
+
+#define IADC_MODE_CTL 0x40
+#define IADC_OP_MODE_SHIFT 3
+#define IADC_OP_MODE_NORMAL 0
+#define IADC_TRIM_EN BIT(0)
+
+#define IADC_EN_CTL1 0x46
+#define IADC_EN_CTL1_SET BIT(7)
+
+#define IADC_CH_SEL_CTL 0x48
+
+#define IADC_DIG_PARAM 0x50
+#define IADC_DIG_DEC_RATIO_SEL_SHIFT 2
+
+#define IADC_HW_SETTLE_DELAY 0x51
+
+#define IADC_CONV_REQ 0x52
+#define IADC_CONV_REQ_SET BIT(7)
+
+#define IADC_FAST_AVG_CTL 0x5a
+#define IADC_FAST_AVG_EN 0x5b
+#define IADC_FAST_AVG_EN_SET BIT(7)
+
+#define IADC_PERH_RESET_CTL3 0xda
+#define IADC_FOLLOW_WARM_RB BIT(2)
+
+#define IADC_DATA 0x60 /* 16 bits */
+
+#define IADC_SEC_ACCESS 0xd0
+#define IADC_SEC_ACCESS_DATA 0xa5
+
+#define IADC_NOMINAL_RSENSE 0xf4
+#define IADC_NOMINAL_RSENSE_SIGN_MASK BIT(7)
+
+#define IADC_REF_GAIN_MICRO_VOLTS 17857
+
+#define IADC_INT_RSENSE_DEVIATION 15625 /* nano Ohms per bit */
+
+#define IADC_INT_RSENSE_IDEAL_VALUE 10000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_VALUE 7800 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_GF 9000 /* micro Ohms */
+#define IADC_INT_RSENSE_DEFAULT_SMIC 9700 /* micro Ohms */
+
+#define IADC_CONV_TIME_MIN_US 2000
+#define IADC_CONV_TIME_MAX_US 2100
+
+#define IADC_DEF_PRESCALING 0 /* 1:1 */
+#define IADC_DEF_DECIMATION 0 /* 512 */
+#define IADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define IADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+
+/* IADC channel list */
+#define IADC_INT_RSENSE 0
+#define IADC_EXT_RSENSE 1
+#define IADC_GAIN_17P857MV 3
+#define IADC_EXT_OFFSET_CSP_CSN 5
+#define IADC_INT_OFFSET_CSP2_CSN2 6
+
+/**
+ * struct iadc_chip - IADC Current ADC device structure.
+ * @regmap: regmap for register read/write.
+ * @dev: This device pointer.
+ * @base: base offset for the ADC peripheral.
+ * @rsense: Values of the internal and external sense resister in micro Ohms.
+ * @poll_eoc: Poll for end of conversion instead of waiting for IRQ.
+ * @offset: Raw offset values for the internal and external channels.
+ * @gain: Raw gain of the channels.
+ * @lock: ADC lock for access to the peripheral.
+ * @complete: ADC notification after end of conversion interrupt is received.
+ */
+struct iadc_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ u16 base;
+ bool poll_eoc;
+ u32 rsense[2];
+ u16 offset[2];
+ u16 gain;
+ struct mutex lock;
+ struct completion complete;
+};
+
+static int iadc_read(struct iadc_chip *iadc, u16 offset, u8 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(iadc->regmap, iadc->base + offset, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val;
+ return 0;
+}
+
+static int iadc_write(struct iadc_chip *iadc, u16 offset, u8 data)
+{
+ return regmap_write(iadc->regmap, iadc->base + offset, data);
+}
+
+static int iadc_reset(struct iadc_chip *iadc)
+{
+ u8 data;
+ int ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_read(iadc, IADC_PERH_RESET_CTL3, &data);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_SEC_ACCESS, IADC_SEC_ACCESS_DATA);
+ if (ret < 0)
+ return ret;
+
+ data |= IADC_FOLLOW_WARM_RB;
+
+ return iadc_write(iadc, IADC_PERH_RESET_CTL3, data);
+}
+
+static int iadc_set_state(struct iadc_chip *iadc, bool state)
+{
+ return iadc_write(iadc, IADC_EN_CTL1, state ? IADC_EN_CTL1_SET : 0);
+}
+
+static void iadc_status_show(struct iadc_chip *iadc)
+{
+ u8 mode, sta1, chan, dig, en, req;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_MODE_CTL, &mode);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_DIG_PARAM, &dig);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CH_SEL_CTL, &chan);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_CONV_REQ, &req);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return;
+
+ ret = iadc_read(iadc, IADC_EN_CTL1, &en);
+ if (ret < 0)
+ return;
+
+ dev_err(iadc->dev,
+ "mode:%02x en:%02x chan:%02x dig:%02x req:%02x sta1:%02x\n",
+ mode, en, chan, dig, req, sta1);
+}
+
+static int iadc_configure(struct iadc_chip *iadc, int channel)
+{
+ u8 decim, mode;
+ int ret;
+
+ /* Mode selection */
+ mode = (IADC_OP_MODE_NORMAL << IADC_OP_MODE_SHIFT) | IADC_TRIM_EN;
+ ret = iadc_write(iadc, IADC_MODE_CTL, mode);
+ if (ret < 0)
+ return ret;
+
+ /* Channel selection */
+ ret = iadc_write(iadc, IADC_CH_SEL_CTL, channel);
+ if (ret < 0)
+ return ret;
+
+ /* Digital parameter setup */
+ decim = IADC_DEF_DECIMATION << IADC_DIG_DEC_RATIO_SEL_SHIFT;
+ ret = iadc_write(iadc, IADC_DIG_PARAM, decim);
+ if (ret < 0)
+ return ret;
+
+ /* HW settle time delay */
+ ret = iadc_write(iadc, IADC_HW_SETTLE_DELAY, IADC_DEF_HW_SETTLE_TIME);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_write(iadc, IADC_FAST_AVG_CTL, IADC_DEF_AVG_SAMPLES);
+ if (ret < 0)
+ return ret;
+
+ if (IADC_DEF_AVG_SAMPLES)
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, IADC_FAST_AVG_EN_SET);
+ else
+ ret = iadc_write(iadc, IADC_FAST_AVG_EN, 0);
+
+ if (ret < 0)
+ return ret;
+
+ if (!iadc->poll_eoc)
+ reinit_completion(&iadc->complete);
+
+ ret = iadc_set_state(iadc, true);
+ if (ret < 0)
+ return ret;
+
+ /* Request conversion */
+ return iadc_write(iadc, IADC_CONV_REQ, IADC_CONV_REQ_SET);
+}
+
+static int iadc_poll_wait_eoc(struct iadc_chip *iadc, unsigned int interval_us)
+{
+ unsigned int count, retry;
+ int ret;
+ u8 sta1;
+
+ retry = interval_us / IADC_CONV_TIME_MIN_US;
+
+ for (count = 0; count < retry; count++) {
+ ret = iadc_read(iadc, IADC_STATUS1, &sta1);
+ if (ret < 0)
+ return ret;
+
+ sta1 &= IADC_STATUS1_REQ_STS_EOC_MASK;
+ if (sta1 == IADC_STATUS1_EOC)
+ return 0;
+
+ usleep_range(IADC_CONV_TIME_MIN_US, IADC_CONV_TIME_MAX_US);
+ }
+
+ iadc_status_show(iadc);
+
+ return -ETIMEDOUT;
+}
+
+static int iadc_read_result(struct iadc_chip *iadc, u16 *data)
+{
+ return regmap_bulk_read(iadc->regmap, iadc->base + IADC_DATA, data, 2);
+}
+
+static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
+{
+ unsigned int wait;
+ int ret;
+
+ ret = iadc_configure(iadc, chan);
+ if (ret < 0)
+ goto exit;
+
+ wait = BIT(IADC_DEF_AVG_SAMPLES) * IADC_CONV_TIME_MIN_US * 2;
+
+ if (iadc->poll_eoc) {
+ ret = iadc_poll_wait_eoc(iadc, wait);
+ } else {
+ ret = wait_for_completion_timeout(&iadc->complete, wait);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ /* double check conversion status */
+ ret = iadc_poll_wait_eoc(iadc, IADC_CONV_TIME_MIN_US);
+ }
+
+ if (!ret)
+ ret = iadc_read_result(iadc, data);
+exit:
+ iadc_set_state(iadc, false);
+ if (ret < 0)
+ dev_err(iadc->dev, "conversion failed\n");
+
+ return ret;
+}
+
+static int iadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iadc_chip *iadc = iio_priv(indio_dev);
+ s32 isense_ua, vsense_uv;
+ u16 adc_raw, vsense_raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&iadc->lock);
+ ret = iadc_do_conversion(iadc, chan->channel, &adc_raw);
+ mutex_unlock(&iadc->lock);
+ if (ret < 0)
+ return ret;
+
+ vsense_raw = adc_raw - iadc->offset[chan->channel];
+
+ vsense_uv = vsense_raw * IADC_REF_GAIN_MICRO_VOLTS;
+ vsense_uv /= (s32)iadc->gain - iadc->offset[chan->channel];
+
+ isense_ua = vsense_uv / iadc->rsense[chan->channel];
+
+ dev_dbg(iadc->dev, "off %d gain %d adc %d %duV I %duA\n",
+ iadc->offset[chan->channel], iadc->gain,
+ adc_raw, vsense_uv, isense_ua);
+
+ *val = isense_ua;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info iadc_info = {
+ .read_raw = iadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t iadc_isr(int irq, void *dev_id)
+{
+ struct iadc_chip *iadc = dev_id;
+
+ complete(&iadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int iadc_update_offset(struct iadc_chip *iadc)
+{
+ int ret;
+
+ ret = iadc_do_conversion(iadc, IADC_GAIN_17P857MV, &iadc->gain);
+ if (ret < 0)
+ return ret;
+
+ ret = iadc_do_conversion(iadc, IADC_INT_OFFSET_CSP2_CSN2,
+ &iadc->offset[IADC_INT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_INT_RSENSE]) {
+ dev_err(iadc->dev, "error: internal offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ ret = iadc_do_conversion(iadc, IADC_EXT_OFFSET_CSP_CSN,
+ &iadc->offset[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ return ret;
+
+ if (iadc->gain == iadc->offset[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "error: external offset == gain %d\n",
+ iadc->gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_version_check(struct iadc_chip *iadc)
+{
+ u8 val;
+ int ret;
+
+ ret = iadc_read(iadc, IADC_PERPH_TYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_TYPE_ADC) {
+ dev_err(iadc->dev, "%d is not ADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_PERPH_SUBTYPE, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_PERPH_SUBTYPE_IADC) {
+ dev_err(iadc->dev, "%d is not IADC\n", val);
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_REVISION2, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val < IADC_REVISION2_SUPPORTED_IADC) {
+ dev_err(iadc->dev, "revision %d not supported\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iadc_rsense_read(struct iadc_chip *iadc, struct device_node *node)
+{
+ int ret, sign, int_sense;
+ u8 deviation;
+
+ ret = of_property_read_u32(node, "qcom,external-resistor-micro-ohms",
+ &iadc->rsense[IADC_EXT_RSENSE]);
+ if (ret < 0)
+ iadc->rsense[IADC_EXT_RSENSE] = IADC_INT_RSENSE_IDEAL_VALUE;
+
+ if (!iadc->rsense[IADC_EXT_RSENSE]) {
+ dev_err(iadc->dev, "external resistor can't be zero Ohms");
+ return -EINVAL;
+ }
+
+ ret = iadc_read(iadc, IADC_NOMINAL_RSENSE, &deviation);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Deviation value stored is an offset from 10 mili Ohms, bit 7 is
+ * the sign, the remaining bits have an LSB of 15625 nano Ohms.
+ */
+ sign = (deviation & IADC_NOMINAL_RSENSE_SIGN_MASK) ? -1 : 1;
+
+ deviation &= ~IADC_NOMINAL_RSENSE_SIGN_MASK;
+
+ /* Scale it to nono Ohms */
+ int_sense = IADC_INT_RSENSE_IDEAL_VALUE * 1000;
+ int_sense += sign * deviation * IADC_INT_RSENSE_DEVIATION;
+ int_sense /= 1000; /* micro Ohms */
+
+ iadc->rsense[IADC_INT_RSENSE] = int_sense;
+ return 0;
+}
+
+static const struct iio_chan_spec iadc_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "INTERNAL_RSENSE",
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+ {
+ .type = IIO_CURRENT,
+ .datasheet_name = "EXTERNAL_RSENSE",
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ },
+};
+
+static int iadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct iadc_chip *iadc;
+ int ret, irq_eoc;
+ u32 res;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*iadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iadc = iio_priv(indio_dev);
+ iadc->dev = dev;
+
+ iadc->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!iadc->regmap)
+ return -ENODEV;
+
+ init_completion(&iadc->complete);
+ mutex_init(&iadc->lock);
+
+ ret = of_property_read_u32(node, "reg", &res);
+ if (ret < 0)
+ return -ENODEV;
+
+ iadc->base = res;
+
+ ret = iadc_version_check(iadc);
+ if (ret < 0)
+ return -ENODEV;
+
+ ret = iadc_rsense_read(iadc, node);
+ if (ret < 0)
+ return -ENODEV;
+
+ dev_dbg(iadc->dev, "sense resistors %d and %d micro Ohm\n",
+ iadc->rsense[IADC_INT_RSENSE],
+ iadc->rsense[IADC_EXT_RSENSE]);
+
+ irq_eoc = platform_get_irq(pdev, 0);
+ if (irq_eoc == -EPROBE_DEFER)
+ return irq_eoc;
+
+ if (irq_eoc < 0)
+ iadc->poll_eoc = true;
+
+ ret = iadc_reset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "reset failed\n");
+ return ret;
+ }
+
+ if (!iadc->poll_eoc) {
+ ret = devm_request_irq(dev, irq_eoc, iadc_isr, 0,
+ "spmi-iadc", iadc);
+ if (!ret)
+ enable_irq_wake(irq_eoc);
+ else
+ return ret;
+ } else {
+ device_init_wakeup(iadc->dev, 1);
+ }
+
+ ret = iadc_update_offset(iadc);
+ if (ret < 0) {
+ dev_err(dev, "failed offset calibration\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = node;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &iadc_info;
+ indio_dev->channels = iadc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iadc_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id iadc_match_table[] = {
+ { .compatible = "qcom,spmi-iadc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, iadc_match_table);
+
+static struct platform_driver iadc_driver = {
+ .driver = {
+ .name = "qcom-spmi-iadc",
+ .of_match_table = iadc_match_table,
+ },
+ .probe = iadc_probe,
+};
+
+module_platform_driver(iadc_driver);
+
+MODULE_ALIAS("platform:qcom-spmi-iadc");
+MODULE_DESCRIPTION("Qualcomm SPMI PMIC current ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
new file mode 100644
index 00000000000000..8d4e019ea4ca5d
--- /dev/null
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -0,0 +1,351 @@
+/*
+ * Rockchip Successive Approximation Register (SAR) A/D Converter
+ * Copyright (C) 2014 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+
+#define SARADC_DATA 0x00
+
+#define SARADC_STAS 0x04
+#define SARADC_STAS_BUSY BIT(0)
+
+#define SARADC_CTRL 0x08
+#define SARADC_CTRL_IRQ_STATUS BIT(6)
+#define SARADC_CTRL_IRQ_ENABLE BIT(5)
+#define SARADC_CTRL_POWER_CTRL BIT(3)
+#define SARADC_CTRL_CHN_MASK 0x7
+
+#define SARADC_DLY_PU_SOC 0x0c
+#define SARADC_DLY_PU_SOC_MASK 0x3f
+
+#define SARADC_TIMEOUT msecs_to_jiffies(100)
+
+struct rockchip_saradc_data {
+ int num_bits;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ unsigned long clk_rate;
+};
+
+struct rockchip_saradc {
+ void __iomem *regs;
+ struct clk *pclk;
+ struct clk *clk;
+ struct completion completion;
+ struct regulator *vref;
+ const struct rockchip_saradc_data *data;
+ u16 last_val;
+};
+
+static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rockchip_saradc *info = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+
+ reinit_completion(&info->completion);
+
+ /* 8 clock periods as delay between power up and start cmd */
+ writel_relaxed(8, info->regs + SARADC_DLY_PU_SOC);
+
+ /* Select the channel to be used and trigger conversion */
+ writel(SARADC_CTRL_POWER_CTRL
+ | (chan->channel & SARADC_CTRL_CHN_MASK)
+ | SARADC_CTRL_IRQ_ENABLE,
+ info->regs + SARADC_CTRL);
+
+ if (!wait_for_completion_timeout(&info->completion,
+ SARADC_TIMEOUT)) {
+ writel_relaxed(0, info->regs + SARADC_CTRL);
+ mutex_unlock(&indio_dev->mlock);
+ return -ETIMEDOUT;
+ }
+
+ *val = info->last_val;
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(info->vref);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "failed to get voltage\n");
+ return ret;
+ }
+
+ *val = ret / 1000;
+ *val2 = info->data->num_bits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
+{
+ struct rockchip_saradc *info = (struct rockchip_saradc *)dev_id;
+
+ /* Read value */
+ info->last_val = readl_relaxed(info->regs + SARADC_DATA);
+ info->last_val &= GENMASK(info->data->num_bits - 1, 0);
+
+ /* Clear irq & power down adc */
+ writel_relaxed(0, info->regs + SARADC_CTRL);
+
+ complete(&info->completion);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info rockchip_saradc_iio_info = {
+ .read_raw = rockchip_saradc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define ADC_CHANNEL(_index, _id) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _id, \
+}
+
+static const struct iio_chan_spec rockchip_saradc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+ ADC_CHANNEL(2, "adc2"),
+};
+
+static const struct rockchip_saradc_data saradc_data = {
+ .num_bits = 10,
+ .channels = rockchip_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_saradc_iio_channels),
+ .clk_rate = 1000000,
+};
+
+static const struct iio_chan_spec rockchip_rk3066_tsadc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+};
+
+static const struct rockchip_saradc_data rk3066_tsadc_data = {
+ .num_bits = 12,
+ .channels = rockchip_rk3066_tsadc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3066_tsadc_iio_channels),
+ .clk_rate = 50000,
+};
+
+static const struct of_device_id rockchip_saradc_match[] = {
+ {
+ .compatible = "rockchip,saradc",
+ .data = &saradc_data,
+ }, {
+ .compatible = "rockchip,rk3066-tsadc",
+ .data = &rk3066_tsadc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
+
+static int rockchip_saradc_probe(struct platform_device *pdev)
+{
+ struct rockchip_saradc *info = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_dev *indio_dev = NULL;
+ struct resource *mem;
+ const struct of_device_id *match;
+ int ret;
+ int irq;
+
+ if (!np)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+ info = iio_priv(indio_dev);
+
+ match = of_match_device(rockchip_saradc_match, &pdev->dev);
+ info->data = match->data;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ init_completion(&info->completion);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, rockchip_saradc_isr,
+ 0, dev_name(&pdev->dev), info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed requesting irq %d\n", irq);
+ return ret;
+ }
+
+ info->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(info->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ return PTR_ERR(info->pclk);
+ }
+
+ info->clk = devm_clk_get(&pdev->dev, "saradc");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get adc clock\n");
+ return PTR_ERR(info->clk);
+ }
+
+ info->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(info->vref)) {
+ dev_err(&pdev->dev, "failed to get regulator, %ld\n",
+ PTR_ERR(info->vref));
+ return PTR_ERR(info->vref);
+ }
+
+ /*
+ * Use a default value for the converter clock.
+ * This may become user-configurable in the future.
+ */
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set adc clk rate, %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(info->vref);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable vref regulator\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(info->pclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable pclk\n");
+ goto err_reg_voltage;
+ }
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable converter clock\n");
+ goto err_pclk;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &rockchip_saradc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ indio_dev->channels = info->data->channels;
+ indio_dev->num_channels = info->data->num_channels;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_clk;
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(info->clk);
+err_pclk:
+ clk_disable_unprepare(info->pclk);
+err_reg_voltage:
+ regulator_disable(info->vref);
+ return ret;
+}
+
+static int rockchip_saradc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct rockchip_saradc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(info->clk);
+ clk_disable_unprepare(info->pclk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_saradc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rockchip_saradc *info = iio_priv(indio_dev);
+
+ clk_disable_unprepare(info->clk);
+ clk_disable_unprepare(info->pclk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+static int rockchip_saradc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct rockchip_saradc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(info->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rockchip_saradc_pm_ops,
+ rockchip_saradc_suspend, rockchip_saradc_resume);
+
+static struct platform_driver rockchip_saradc_driver = {
+ .probe = rockchip_saradc_probe,
+ .remove = rockchip_saradc_remove,
+ .driver = {
+ .name = "rockchip-saradc",
+ .of_match_table = rockchip_saradc_match,
+ .pm = &rockchip_saradc_pm_ops,
+ },
+};
+
+module_platform_driver(rockchip_saradc_driver);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
new file mode 100644
index 00000000000000..b3a82b4d1a7587
--- /dev/null
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+struct adc081c {
+ struct i2c_client *i2c;
+ struct regulator *ref;
+};
+
+#define REG_CONV_RES 0x00
+
+static int adc081c_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct adc081c *adc = iio_priv(iio);
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = i2c_smbus_read_word_swapped(adc->i2c, REG_CONV_RES);
+ if (err < 0)
+ return err;
+
+ *value = (err >> 4) & 0xff;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ err = regulator_get_voltage(adc->ref);
+ if (err < 0)
+ return err;
+
+ *value = err / 1000;
+ *shift = 8;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adc081c_channel = {
+ .type = IIO_VOLTAGE,
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+};
+
+static const struct iio_info adc081c_info = {
+ .read_raw = adc081c_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc081c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *iio;
+ struct adc081c *adc;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
+ if (!iio)
+ return -ENOMEM;
+
+ adc = iio_priv(iio);
+ adc->i2c = client;
+
+ adc->ref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(adc->ref))
+ return PTR_ERR(adc->ref);
+
+ err = regulator_enable(adc->ref);
+ if (err < 0)
+ return err;
+
+ iio->dev.parent = &client->dev;
+ iio->name = dev_name(&client->dev);
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->info = &adc081c_info;
+
+ iio->channels = &adc081c_channel;
+ iio->num_channels = 1;
+
+ err = iio_device_register(iio);
+ if (err < 0)
+ goto regulator_disable;
+
+ i2c_set_clientdata(client, iio);
+
+ return 0;
+
+regulator_disable:
+ regulator_disable(adc->ref);
+
+ return err;
+}
+
+static int adc081c_remove(struct i2c_client *client)
+{
+ struct iio_dev *iio = i2c_get_clientdata(client);
+ struct adc081c *adc = iio_priv(iio);
+
+ iio_device_unregister(iio);
+ regulator_disable(adc->ref);
+
+ return 0;
+}
+
+static const struct i2c_device_id adc081c_id[] = {
+ { "adc081c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adc081c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id adc081c_of_match[] = {
+ { .compatible = "ti,adc081c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adc081c_of_match);
+#endif
+
+static struct i2c_driver adc081c_driver = {
+ .driver = {
+ .name = "adc081c",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(adc081c_of_match),
+ },
+ .probe = adc081c_probe,
+ .remove = adc081c_remove,
+ .id_table = adc081c_id,
+};
+module_i2c_driver(adc081c_driver);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("Texas Instruments ADC081C021/027 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
new file mode 100644
index 00000000000000..655cb564ec54af
--- /dev/null
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2014 Angelo Compagnucci <angelo.compagnucci@gmail.com>
+ *
+ * Driver for Texas Instruments' ADC128S052 ADC chip.
+ * Datasheet can be found here:
+ * http://www.ti.com/lit/ds/symlink/adc128s052.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+struct adc128 {
+ struct spi_device *spi;
+
+ struct regulator *reg;
+ struct mutex lock;
+
+ u8 buffer[2] ____cacheline_aligned;
+};
+
+static int adc128_adc_conversion(struct adc128 *adc, u8 channel)
+{
+ int ret;
+
+ mutex_lock(&adc->lock);
+
+ adc->buffer[0] = channel << 3;
+ adc->buffer[1] = 0;
+
+ ret = spi_write(adc->spi, &adc->buffer, 2);
+ if (ret < 0) {
+ mutex_unlock(&adc->lock);
+ return ret;
+ }
+
+ ret = spi_read(adc->spi, &adc->buffer, 2);
+
+ mutex_unlock(&adc->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return ((adc->buffer[0] << 8 | adc->buffer[1]) & 0xFFF);
+}
+
+static int adc128_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct adc128 *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+
+ ret = adc128_adc_conversion(adc, channel->channel);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+
+ ret = regulator_get_voltage(adc->reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+
+}
+
+#define ADC128_VOLTAGE_CHANNEL(num) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+static const struct iio_chan_spec adc128_channels[] = {
+ ADC128_VOLTAGE_CHANNEL(0),
+ ADC128_VOLTAGE_CHANNEL(1),
+ ADC128_VOLTAGE_CHANNEL(2),
+ ADC128_VOLTAGE_CHANNEL(3),
+ ADC128_VOLTAGE_CHANNEL(4),
+ ADC128_VOLTAGE_CHANNEL(5),
+ ADC128_VOLTAGE_CHANNEL(6),
+ ADC128_VOLTAGE_CHANNEL(7),
+};
+
+static const struct iio_info adc128_info = {
+ .read_raw = adc128_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc128_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adc128 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &adc128_info;
+
+ indio_dev->channels = adc128_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc128_channels);
+
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
+
+ ret = regulator_enable(adc->reg);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&adc->lock);
+
+ ret = iio_device_register(indio_dev);
+
+ return ret;
+}
+
+static int adc128_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adc128 *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(adc->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id adc128_id[] = {
+ { "adc128s052", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adc128_id);
+
+static struct spi_driver adc128_driver = {
+ .driver = {
+ .name = "adc128s052",
+ .owner = THIS_MODULE,
+ },
+ .probe = adc128_probe,
+ .remove = adc128_remove,
+ .id_table = adc128_id,
+};
+module_spi_driver(adc128_driver);
+
+MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
+MODULE_DESCRIPTION("Texas Instruments ADC128S052");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
new file mode 100644
index 00000000000000..b730864731e8b8
--- /dev/null
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -0,0 +1,558 @@
+/*
+ * TI ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+struct tiadc_device {
+ struct ti_tscadc_dev *mfd_tscadc;
+ int channels;
+ u8 channel_line[8];
+ u8 channel_step[8];
+ int buffer_en_ch_steps;
+ u16 data[8];
+};
+
+static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
+{
+ return readl(adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_writel(struct tiadc_device *adc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static u32 get_adc_step_mask(struct tiadc_device *adc_dev)
+{
+ u32 step_en;
+
+ step_en = ((1 << adc_dev->channels) - 1);
+ step_en <<= TOTAL_STEPS - adc_dev->channels + 1;
+ return step_en;
+}
+
+static u32 get_adc_chan_step_mask(struct tiadc_device *adc_dev,
+ struct iio_chan_spec const *chan)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) {
+ if (chan->channel == adc_dev->channel_line[i]) {
+ u32 step;
+
+ step = adc_dev->channel_step[i];
+ /* +1 for the charger */
+ return 1 << (step + 1);
+ }
+ }
+ WARN_ON(1);
+ return 0;
+}
+
+static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan)
+{
+ return 1 << adc_dev->channel_step[chan];
+}
+
+static void tiadc_step_config(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int stepconfig;
+ int i, steps;
+
+ /*
+ * There are 16 configurable steps and 8 analog input
+ * lines available which are shared between Touchscreen and ADC.
+ *
+ * Steps backwards i.e. from 16 towards 0 are used by ADC
+ * depending on number of input lines needed.
+ * Channel would represent which analog input
+ * needs to be given to ADC to digitalize data.
+ */
+
+ steps = TOTAL_STEPS - adc_dev->channels;
+ if (iio_buffer_enabled(indio_dev))
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1
+ | STEPCONFIG_MODE_SWCNT;
+ else
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+
+ for (i = 0; i < adc_dev->channels; i++) {
+ int chan;
+
+ chan = adc_dev->channel_line[i];
+ tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
+ stepconfig | STEPCONFIG_INP(chan));
+ tiadc_writel(adc_dev, REG_STEPDELAY(steps),
+ STEPCONFIG_OPENDLY);
+ adc_dev->channel_step[i] = steps;
+ steps++;
+ }
+}
+
+static irqreturn_t tiadc_irq_h(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int status, config;
+ status = tiadc_readl(adc_dev, REG_IRQSTATUS);
+
+ /*
+ * ADC and touchscreen share the IRQ line.
+ * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only
+ */
+ if (status & IRQENB_FIFO1OVRRUN) {
+ /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */
+ config = tiadc_readl(adc_dev, REG_CTRL);
+ config &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, config);
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
+ | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+ tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
+ return IRQ_HANDLED;
+ } else if (status & IRQENB_FIFO1THRES) {
+ /* Disable irq and wake worker thread */
+ tiadc_writel(adc_dev, REG_IRQCLR, IRQENB_FIFO1THRES);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t tiadc_worker_h(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i, k, fifo1count, read;
+ u16 *data = adc_dev->data;
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (k = 0; k < fifo1count; k = k + i) {
+ for (i = 0; i < (indio_dev->scan_bytes)/2; i++) {
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+ data[i] = read & FIFOREAD_DATA_MASK;
+ }
+ iio_push_to_buffers(indio_dev, (u8 *) data);
+ }
+
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES);
+ tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
+
+ return IRQ_HANDLED;
+}
+
+static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i, fifo1count, read;
+
+ tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
+ IRQENB_FIFO1OVRRUN |
+ IRQENB_FIFO1UNDRFLW));
+
+ /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+
+ return 0;
+}
+
+static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ unsigned int enb = 0;
+ u8 bit;
+
+ tiadc_step_config(indio_dev);
+ for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+ enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+ adc_dev->buffer_en_ch_steps = enb;
+
+ am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
+
+ tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES
+ | IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
+ tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES
+ | IRQENB_FIFO1OVRRUN);
+
+ return 0;
+}
+
+static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int fifo1count, i, read;
+
+ tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
+ IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
+ am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
+ adc_dev->buffer_en_ch_steps = 0;
+
+ /* Flush FIFO of leftover data in the time it takes to disable adc */
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+
+ return 0;
+}
+
+static int tiadc_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ tiadc_step_config(indio_dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops = {
+ .preenable = &tiadc_buffer_preenable,
+ .postenable = &tiadc_buffer_postenable,
+ .predisable = &tiadc_buffer_predisable,
+ .postdisable = &tiadc_buffer_postdisable,
+};
+
+static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*pollfunc_bh)(int irq, void *p),
+ irqreturn_t (*pollfunc_th)(int irq, void *p),
+ int irq,
+ unsigned long flags,
+ const struct iio_buffer_setup_ops *setup_ops)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ ret = request_threaded_irq(irq, pollfunc_th, pollfunc_bh,
+ flags, indio_dev->name, indio_dev);
+ if (ret)
+ goto error_kfifo_free;
+
+ indio_dev->setup_ops = setup_ops;
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(irq, indio_dev);
+error_kfifo_free:
+ iio_kfifo_free(indio_dev->buffer);
+ return ret;
+}
+
+static void tiadc_iio_buffered_hardware_remove(struct iio_dev *indio_dev)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+
+ free_irq(adc_dev->mfd_tscadc->irq, indio_dev);
+ iio_kfifo_free(indio_dev->buffer);
+ iio_buffer_unregister(indio_dev);
+}
+
+
+static const char * const chan_name_ain[] = {
+ "AIN0",
+ "AIN1",
+ "AIN2",
+ "AIN3",
+ "AIN4",
+ "AIN5",
+ "AIN6",
+ "AIN7",
+};
+
+static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct iio_chan_spec *chan_array;
+ struct iio_chan_spec *chan;
+ int i;
+
+ indio_dev->num_channels = channels;
+ chan_array = kcalloc(channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (chan_array == NULL)
+ return -ENOMEM;
+
+ chan = chan_array;
+ for (i = 0; i < channels; i++, chan++) {
+
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = adc_dev->channel_line[i];
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->datasheet_name = chan_name_ain[chan->channel];
+ chan->scan_index = i;
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+ }
+
+ indio_dev->channels = chan_array;
+
+ return 0;
+}
+
+static void tiadc_channels_remove(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->channels);
+}
+
+static int tiadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i, map_val;
+ unsigned int fifo1count, read, stepid;
+ bool found = false;
+ u32 step_en;
+ unsigned long timeout;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ step_en = get_adc_chan_step_mask(adc_dev, chan);
+ if (!step_en)
+ return -EINVAL;
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ while (fifo1count--)
+ tiadc_readl(adc_dev, REG_FIFO1);
+
+ am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
+
+ timeout = jiffies + usecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ /* Wait for Fifo threshold interrupt */
+ while (1) {
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ if (fifo1count)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
+ return -EAGAIN;
+ }
+ }
+ map_val = adc_dev->channel_step[chan->scan_index];
+
+ /*
+ * We check the complete FIFO. We programmed just one entry but in case
+ * something went wrong we left empty handed (-EAGAIN previously) and
+ * then the value apeared somehow in the FIFO we would have two entries.
+ * Therefore we read every item and keep only the latest version of the
+ * requested channel.
+ */
+ for (i = 0; i < fifo1count; i++) {
+ read = tiadc_readl(adc_dev, REG_FIFO1);
+ stepid = read & FIFOREAD_CHNLID_MASK;
+ stepid = stepid >> 0x10;
+
+ if (stepid == map_val) {
+ read = read & FIFOREAD_DATA_MASK;
+ found = true;
+ *val = (u16) read;
+ }
+ }
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
+
+ if (found == false)
+ return -EBUSY;
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info tiadc_info = {
+ .read_raw = &tiadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int tiadc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct tiadc_device *adc_dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct property *prop;
+ const __be32 *cur;
+ int err;
+ u32 val;
+ int channels = 0;
+
+ if (!node) {
+ dev_err(&pdev->dev, "Could not find valid DT data.\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct tiadc_device));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+ adc_dev = iio_priv(indio_dev);
+
+ adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev);
+
+ of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) {
+ adc_dev->channel_line[channels] = val;
+ channels++;
+ }
+ adc_dev->channels = channels;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tiadc_info;
+
+ tiadc_step_config(indio_dev);
+ tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
+
+ err = tiadc_channel_init(indio_dev, adc_dev->channels);
+ if (err < 0)
+ return err;
+
+ err = tiadc_iio_buffered_hardware_setup(indio_dev,
+ &tiadc_worker_h,
+ &tiadc_irq_h,
+ adc_dev->mfd_tscadc->irq,
+ IRQF_SHARED,
+ &tiadc_buffer_setup_ops);
+
+ if (err)
+ goto err_free_channels;
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_buffer_unregister;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_buffer_unregister:
+ tiadc_iio_buffered_hardware_remove(indio_dev);
+err_free_channels:
+ tiadc_channels_remove(indio_dev);
+ return err;
+}
+
+static int tiadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ u32 step_en;
+
+ iio_device_unregister(indio_dev);
+ tiadc_iio_buffered_hardware_remove(indio_dev);
+ tiadc_channels_remove(indio_dev);
+
+ step_en = get_adc_step_mask(adc_dev);
+ am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tiadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct ti_tscadc_dev *tscadc_dev;
+ unsigned int idle;
+
+ tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
+ if (!device_may_wakeup(tscadc_dev->dev)) {
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
+ }
+
+ return 0;
+}
+
+static int tiadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int restore;
+
+ /* Make sure ADC is powered up */
+ restore = tiadc_readl(adc_dev, REG_CTRL);
+ restore &= ~(CNTRLREG_POWERDOWN);
+ tiadc_writel(adc_dev, REG_CTRL, restore);
+
+ tiadc_step_config(indio_dev);
+ am335x_tsc_se_set_cache(adc_dev->mfd_tscadc,
+ adc_dev->buffer_en_ch_steps);
+ return 0;
+}
+
+static const struct dev_pm_ops tiadc_pm_ops = {
+ .suspend = tiadc_suspend,
+ .resume = tiadc_resume,
+};
+#define TIADC_PM_OPS (&tiadc_pm_ops)
+#else
+#define TIADC_PM_OPS NULL
+#endif
+
+static const struct of_device_id ti_adc_dt_ids[] = {
+ { .compatible = "ti,am3359-adc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
+
+static struct platform_driver tiadc_driver = {
+ .driver = {
+ .name = "TI-am335x-adc",
+ .pm = TIADC_PM_OPS,
+ .of_match_table = ti_adc_dt_ids,
+ },
+ .probe = tiadc_probe,
+ .remove = tiadc_remove,
+};
+module_platform_driver(tiadc_driver);
+
+MODULE_DESCRIPTION("TI ADC controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
new file mode 100644
index 00000000000000..94c5f05b4bc1b5
--- /dev/null
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -0,0 +1,895 @@
+/*
+ *
+ * TWL4030 MADC module driver-This driver monitors the real time
+ * conversion of analog signals like battery temperature,
+ * battery type, battery level etc.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * Amit Kucheria <amit.kucheria@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+
+/**
+ * struct twl4030_madc_data - a container for madc info
+ * @dev: Pointer to device structure for madc
+ * @lock: Mutex protecting this data structure
+ * @requests: Array of request struct corresponding to SW1, SW2 and RT
+ * @use_second_irq: IRQ selection (main or co-processor)
+ * @imr: Interrupt mask register of MADC
+ * @isr: Interrupt status register of MADC
+ */
+struct twl4030_madc_data {
+ struct device *dev;
+ struct mutex lock; /* mutex protecting this data structure */
+ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS];
+ bool use_second_irq;
+ u8 imr;
+ u8 isr;
+};
+
+static int twl4030_madc_read(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct twl4030_madc_data *madc = iio_priv(iio_dev);
+ struct twl4030_madc_request req;
+ int ret;
+
+ req.method = madc->use_second_irq ? TWL4030_MADC_SW2 : TWL4030_MADC_SW1;
+
+ req.channels = BIT(chan->channel);
+ req.active = false;
+ req.func_cb = NULL;
+ req.type = TWL4030_MADC_WAIT;
+ req.raw = !(mask == IIO_CHAN_INFO_PROCESSED);
+ req.do_avg = (mask == IIO_CHAN_INFO_AVERAGE_RAW);
+
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+
+ *val = req.rbuf[chan->channel];
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info twl4030_madc_iio_info = {
+ .read_raw = &twl4030_madc_read,
+ .driver_module = THIS_MODULE,
+};
+
+#define TWL4030_ADC_CHANNEL(_channel, _type, _name) { \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED), \
+ .datasheet_name = _name, \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec twl4030_madc_iio_channels[] = {
+ TWL4030_ADC_CHANNEL(0, IIO_VOLTAGE, "ADCIN0"),
+ TWL4030_ADC_CHANNEL(1, IIO_TEMP, "ADCIN1"),
+ TWL4030_ADC_CHANNEL(2, IIO_VOLTAGE, "ADCIN2"),
+ TWL4030_ADC_CHANNEL(3, IIO_VOLTAGE, "ADCIN3"),
+ TWL4030_ADC_CHANNEL(4, IIO_VOLTAGE, "ADCIN4"),
+ TWL4030_ADC_CHANNEL(5, IIO_VOLTAGE, "ADCIN5"),
+ TWL4030_ADC_CHANNEL(6, IIO_VOLTAGE, "ADCIN6"),
+ TWL4030_ADC_CHANNEL(7, IIO_VOLTAGE, "ADCIN7"),
+ TWL4030_ADC_CHANNEL(8, IIO_VOLTAGE, "ADCIN8"),
+ TWL4030_ADC_CHANNEL(9, IIO_VOLTAGE, "ADCIN9"),
+ TWL4030_ADC_CHANNEL(10, IIO_CURRENT, "ADCIN10"),
+ TWL4030_ADC_CHANNEL(11, IIO_VOLTAGE, "ADCIN11"),
+ TWL4030_ADC_CHANNEL(12, IIO_VOLTAGE, "ADCIN12"),
+ TWL4030_ADC_CHANNEL(13, IIO_VOLTAGE, "ADCIN13"),
+ TWL4030_ADC_CHANNEL(14, IIO_VOLTAGE, "ADCIN14"),
+ TWL4030_ADC_CHANNEL(15, IIO_VOLTAGE, "ADCIN15"),
+};
+
+static struct twl4030_madc_data *twl4030_madc;
+
+struct twl4030_prescale_divider_ratios {
+ s16 numerator;
+ s16 denominator;
+};
+
+static const struct twl4030_prescale_divider_ratios
+twl4030_divider_ratios[16] = {
+ {1, 1}, /* CHANNEL 0 No Prescaler */
+ {1, 1}, /* CHANNEL 1 No Prescaler */
+ {6, 10}, /* CHANNEL 2 */
+ {6, 10}, /* CHANNEL 3 */
+ {6, 10}, /* CHANNEL 4 */
+ {6, 10}, /* CHANNEL 5 */
+ {6, 10}, /* CHANNEL 6 */
+ {6, 10}, /* CHANNEL 7 */
+ {3, 14}, /* CHANNEL 8 */
+ {1, 3}, /* CHANNEL 9 */
+ {1, 1}, /* CHANNEL 10 No Prescaler */
+ {15, 100}, /* CHANNEL 11 */
+ {1, 4}, /* CHANNEL 12 */
+ {1, 1}, /* CHANNEL 13 Reserved channels */
+ {1, 1}, /* CHANNEL 14 Reseved channels */
+ {5, 11}, /* CHANNEL 15 */
+};
+
+
+/* Conversion table from -3 to 55 degrees Celcius */
+static int twl4030_therm_tbl[] = {
+ 30800, 29500, 28300, 27100,
+ 26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700,
+ 17900, 17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100,
+ 12600, 12100, 11600, 11200, 10800, 10400, 10000, 9630, 9280,
+ 8950, 8620, 8310, 8020, 7730, 7460, 7200, 6950, 6710,
+ 6470, 6250, 6040, 5830, 5640, 5450, 5260, 5090, 4920,
+ 4760, 4600, 4450, 4310, 4170, 4040, 3910, 3790, 3670,
+ 3550
+};
+
+/*
+ * Structure containing the registers
+ * of different conversion methods supported by MADC.
+ * Hardware or RT real time conversion request initiated by external host
+ * processor for RT Signal conversions.
+ * External host processors can also request for non RT conversions
+ * SW1 and SW2 software conversions also called asynchronous or GPC request.
+ */
+static
+const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = {
+ [TWL4030_MADC_RT] = {
+ .sel = TWL4030_MADC_RTSELECT_LSB,
+ .avg = TWL4030_MADC_RTAVERAGE_LSB,
+ .rbase = TWL4030_MADC_RTCH0_LSB,
+ },
+ [TWL4030_MADC_SW1] = {
+ .sel = TWL4030_MADC_SW1SELECT_LSB,
+ .avg = TWL4030_MADC_SW1AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW1,
+ },
+ [TWL4030_MADC_SW2] = {
+ .sel = TWL4030_MADC_SW2SELECT_LSB,
+ .avg = TWL4030_MADC_SW2AVERAGE_LSB,
+ .rbase = TWL4030_MADC_GPCH0_LSB,
+ .ctrl = TWL4030_MADC_CTRL_SW2,
+ },
+};
+
+/**
+ * twl4030_madc_channel_raw_read() - Function to read a particular channel value
+ * @madc: pointer to struct twl4030_madc_data
+ * @reg: lsb of ADC Channel
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg)
+{
+ u16 val;
+ int ret;
+ /*
+ * For each ADC channel, we have MSB and LSB register pair. MSB address
+ * is always LSB address+1. reg parameter is the address of LSB register
+ */
+ ret = twl_i2c_read_u16(TWL4030_MODULE_MADC, &val, reg);
+ if (ret) {
+ dev_err(madc->dev, "unable to read register 0x%X\n", reg);
+ return ret;
+ }
+
+ return (int)(val >> 6);
+}
+
+/*
+ * Return battery temperature in degrees Celsius
+ * Or < 0 on failure.
+ */
+static int twl4030battery_temperature(int raw_volt)
+{
+ u8 val;
+ int temp, curr, volt, res, ret;
+
+ volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
+ /* Getting and calculating the supply current in micro amperes */
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
+ REG_BCICTL2);
+ if (ret < 0)
+ return ret;
+
+ curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10;
+ /* Getting and calculating the thermistor resistance in ohms */
+ res = volt * 1000 / curr;
+ /* calculating temperature */
+ for (temp = 58; temp >= 0; temp--) {
+ int actual = twl4030_therm_tbl[temp];
+ if ((actual - res) >= 0)
+ break;
+ }
+
+ return temp + 1;
+}
+
+static int twl4030battery_current(int raw_volt)
+{
+ int ret;
+ u8 val;
+
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
+ TWL4030_BCI_BCICTL1);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BCI_CGAIN) /* slope of 0.44 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R1;
+ else /* slope of 0.88 mV/mA */
+ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2;
+}
+
+/*
+ * Function to read channel values
+ * @madc - pointer to twl4030_madc_data struct
+ * @reg_base - Base address of the first channel
+ * @Channels - 16 bit bitmap. If the bit is set, channel's value is read
+ * @buf - The channel values are stored here. if read fails error
+ * @raw - Return raw values without conversion
+ * value is stored
+ * Returns the number of successfully read channels.
+ */
+static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
+ u8 reg_base, unsigned
+ long channels, int *buf,
+ bool raw)
+{
+ int count = 0;
+ int i;
+ u8 reg;
+
+ for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) {
+ reg = reg_base + (2 * i);
+ buf[i] = twl4030_madc_channel_raw_read(madc, reg);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "Unable to read register 0x%X\n",
+ reg);
+ return buf[i];
+ }
+ if (raw) {
+ count++;
+ continue;
+ }
+ switch (i) {
+ case 10:
+ buf[i] = twl4030battery_current(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading current\n");
+ return buf[i];
+ } else {
+ count++;
+ buf[i] = buf[i] - 750;
+ }
+ break;
+ case 1:
+ buf[i] = twl4030battery_temperature(buf[i]);
+ if (buf[i] < 0) {
+ dev_err(madc->dev, "err reading temperature\n");
+ return buf[i];
+ } else {
+ buf[i] -= 3;
+ count++;
+ }
+ break;
+ default:
+ count++;
+ /* Analog Input (V) = conv_result * step_size / R
+ * conv_result = decimal value of 10-bit conversion
+ * result
+ * step size = 1.5 / (2 ^ 10 -1)
+ * R = Prescaler ratio for input channels.
+ * Result given in mV hence multiplied by 1000.
+ */
+ buf[i] = (buf[i] * 3 * 1000 *
+ twl4030_divider_ratios[i].denominator)
+ / (2 * 1023 *
+ twl4030_divider_ratios[i].numerator);
+ }
+ }
+
+ return count;
+}
+
+/*
+ * Enables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be enabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * If the i2c read fails it returns an error else returns 0.
+ */
+static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+
+ val &= ~(1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Disables irq.
+ * @madc - pointer to twl4030_madc_data struct
+ * @id - irq number to be disabled
+ * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
+ * corresponding to RT, SW1, SW2 conversion requests.
+ * Returns error if i2c read/write fails.
+ */
+static int twl4030_madc_disable_irq(struct twl4030_madc_data *madc, u8 id)
+{
+ u8 val;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ return ret;
+ }
+ val |= (1 << id);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to write imr register 0x%X\n", madc->imr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
+{
+ struct twl4030_madc_data *madc = _madc;
+ const struct twl4030_madc_conversion_method *method;
+ u8 isr_val, imr_val;
+ int i, len, ret;
+ struct twl4030_madc_request *r;
+
+ mutex_lock(&madc->lock);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &isr_val, madc->isr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read isr register 0x%X\n",
+ madc->isr);
+ goto err_i2c;
+ }
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &imr_val, madc->imr);
+ if (ret) {
+ dev_err(madc->dev, "unable to read imr register 0x%X\n",
+ madc->imr);
+ goto err_i2c;
+ }
+ isr_val &= ~imr_val;
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ if (!(isr_val & (1 << i)))
+ continue;
+ ret = twl4030_madc_disable_irq(madc, i);
+ if (ret < 0)
+ dev_dbg(madc->dev, "Disable interrupt failed %d\n", i);
+ madc->requests[i].result_pending = 1;
+ }
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ /* No pending results for this method, move to next one */
+ if (!r->result_pending)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf, r->raw);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+
+err_i2c:
+ /*
+ * In case of error check whichever request is active
+ * and service the same.
+ */
+ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
+ r = &madc->requests[i];
+ if (r->active == 0)
+ continue;
+ method = &twl4030_conversion_methods[r->method];
+ /* Read results */
+ len = twl4030_madc_read_channels(madc, method->rbase,
+ r->channels, r->rbuf, r->raw);
+ /* Return results to caller */
+ if (r->func_cb != NULL) {
+ r->func_cb(len, r->channels, r->rbuf);
+ r->func_cb = NULL;
+ }
+ /* Free request */
+ r->result_pending = 0;
+ r->active = 0;
+ }
+ mutex_unlock(&madc->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
+ struct twl4030_madc_request *req)
+{
+ struct twl4030_madc_request *p;
+ int ret;
+
+ p = &madc->requests[req->method];
+ memcpy(p, req, sizeof(*req));
+ ret = twl4030_madc_enable_irq(madc, req->method);
+ if (ret < 0) {
+ dev_err(madc->dev, "enable irq failed!!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function which enables the madc conversion
+ * by writing to the control register.
+ * @madc - pointer to twl4030_madc_data struct
+ * @conv_method - can be TWL4030_MADC_RT, TWL4030_MADC_SW2, TWL4030_MADC_SW1
+ * corresponding to RT SW1 or SW2 conversion methods.
+ * Returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc,
+ int conv_method)
+{
+ const struct twl4030_madc_conversion_method *method;
+ int ret = 0;
+
+ if (conv_method != TWL4030_MADC_SW1 && conv_method != TWL4030_MADC_SW2)
+ return -ENOTSUPP;
+
+ method = &twl4030_conversion_methods[conv_method];
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, TWL4030_MADC_SW_START,
+ method->ctrl);
+ if (ret) {
+ dev_err(madc->dev, "unable to write ctrl register 0x%X\n",
+ method->ctrl);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that waits for conversion to be ready
+ * @madc - pointer to twl4030_madc_data struct
+ * @timeout_ms - timeout value in milliseconds
+ * @status_reg - ctrl register
+ * returns 0 if succeeds else a negative error value
+ */
+static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
+ unsigned int timeout_ms,
+ u8 status_reg)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ u8 reg;
+
+ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &reg, status_reg);
+ if (ret) {
+ dev_err(madc->dev,
+ "unable to read status register 0x%X\n",
+ status_reg);
+ return ret;
+ }
+ if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW))
+ return 0;
+ usleep_range(500, 2000);
+ } while (!time_after(jiffies, timeout));
+ dev_err(madc->dev, "conversion timeout!\n");
+
+ return -EAGAIN;
+}
+
+/*
+ * An exported function which can be called from other kernel drivers.
+ * @req twl4030_madc_request structure
+ * req->rbuf will be filled with read values of channels based on the
+ * channel index. If a particular channel reading fails there will
+ * be a negative error value in the corresponding array element.
+ * returns 0 if succeeds else error value
+ */
+int twl4030_madc_conversion(struct twl4030_madc_request *req)
+{
+ const struct twl4030_madc_conversion_method *method;
+ int ret;
+
+ if (!req || !twl4030_madc)
+ return -EINVAL;
+
+ mutex_lock(&twl4030_madc->lock);
+ if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* Do we have a conversion request ongoing */
+ if (twl4030_madc->requests[req->method].active) {
+ ret = -EBUSY;
+ goto out;
+ }
+ method = &twl4030_conversion_methods[req->method];
+ /* Select channels to be converted */
+ ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels, method->sel);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write sel register 0x%X\n", method->sel);
+ goto out;
+ }
+ /* Select averaging for all channels if do_avg is set */
+ if (req->do_avg) {
+ ret = twl_i2c_write_u16(TWL4030_MODULE_MADC, req->channels,
+ method->avg);
+ if (ret) {
+ dev_err(twl4030_madc->dev,
+ "unable to write avg register 0x%X\n",
+ method->avg);
+ goto out;
+ }
+ }
+ if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
+ ret = twl4030_madc_set_irq(twl4030_madc, req);
+ if (ret < 0)
+ goto out;
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ ret = 0;
+ goto out;
+ }
+ /* With RT method we should not be here anymore */
+ if (req->method == TWL4030_MADC_RT) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
+ if (ret < 0)
+ goto out;
+ twl4030_madc->requests[req->method].active = 1;
+ /* Wait until conversion is ready (ctrl register returns EOC) */
+ ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
+ if (ret) {
+ twl4030_madc->requests[req->method].active = 0;
+ goto out;
+ }
+ ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
+ req->channels, req->rbuf, req->raw);
+ twl4030_madc->requests[req->method].active = 0;
+
+out:
+ mutex_unlock(&twl4030_madc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
+
+int twl4030_get_madc_conversion(int channel_no)
+{
+ struct twl4030_madc_request req;
+ int temp = 0;
+ int ret;
+
+ req.channels = (1 << channel_no);
+ req.method = TWL4030_MADC_SW2;
+ req.active = 0;
+ req.raw = 0;
+ req.func_cb = NULL;
+ ret = twl4030_madc_conversion(&req);
+ if (ret < 0)
+ return ret;
+ if (req.rbuf[channel_no] > 0)
+ temp = req.rbuf[channel_no];
+
+ return temp;
+}
+EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
+
+/**
+ * twl4030_madc_set_current_generator() - setup bias current
+ *
+ * @madc: pointer to twl4030_madc_data struct
+ * @chan: can be one of the two values:
+ * TWL4030_BCI_ITHEN
+ * Enables bias current for main battery type reading
+ * TWL4030_BCI_TYPEN
+ * Enables bias current for main battery temperature sensing
+ * @on: enable or disable chan.
+ *
+ * Function to enable or disable bias current for
+ * main battery type reading or temperature sensing
+ */
+static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
+ int chan, int on)
+{
+ int ret;
+ int regmask;
+ u8 regval;
+
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+
+ regmask = chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
+ if (on)
+ regval |= regmask;
+ else
+ regval &= ~regmask;
+
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Function that sets MADC software power on bit to enable MADC
+ * @madc - pointer to twl4030_madc_data struct
+ * @on - Enable or disable MADC software power on bit.
+ * returns error if i2c read/write fails else 0
+ */
+static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
+{
+ u8 regval;
+ int ret;
+
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+ if (on)
+ regval |= TWL4030_MADC_MADCON;
+ else
+ regval &= ~TWL4030_MADC_MADCON;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, regval, TWL4030_MADC_CTRL1);
+ if (ret) {
+ dev_err(madc->dev, "unable to write madc ctrl1 reg 0x%X\n",
+ TWL4030_MADC_CTRL1);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize MADC and request for threaded irq
+ */
+static int twl4030_madc_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_data *madc;
+ struct twl4030_madc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ int irq, ret;
+ u8 regval;
+ struct iio_dev *iio_dev = NULL;
+
+ if (!pdata && !np) {
+ dev_err(&pdev->dev, "neither platform data nor Device Tree node available\n");
+ return -EINVAL;
+ }
+
+ iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*madc));
+ if (!iio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ madc = iio_priv(iio_dev);
+ madc->dev = &pdev->dev;
+
+ iio_dev->name = dev_name(&pdev->dev);
+ iio_dev->dev.parent = &pdev->dev;
+ iio_dev->dev.of_node = pdev->dev.of_node;
+ iio_dev->info = &twl4030_madc_iio_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = twl4030_madc_iio_channels;
+ iio_dev->num_channels = ARRAY_SIZE(twl4030_madc_iio_channels);
+
+ /*
+ * Phoenix provides 2 interrupt lines. The first one is connected to
+ * the OMAP. The other one can be connected to the other processor such
+ * as modem. Hence two separate ISR and IMR registers.
+ */
+ if (pdata)
+ madc->use_second_irq = (pdata->irq_line != 1);
+ else
+ madc->use_second_irq = of_property_read_bool(np,
+ "ti,system-uses-second-madc-irq");
+
+ madc->imr = madc->use_second_irq ? TWL4030_MADC_IMR2 :
+ TWL4030_MADC_IMR1;
+ madc->isr = madc->use_second_irq ? TWL4030_MADC_ISR2 :
+ TWL4030_MADC_ISR1;
+
+ ret = twl4030_madc_set_power(madc, 1);
+ if (ret < 0)
+ return ret;
+ ret = twl4030_madc_set_current_generator(madc, 0, 1);
+ if (ret < 0)
+ goto err_current_generator;
+
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
+ &regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+ regval |= TWL4030_BCI_MESBAT;
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+ regval, TWL4030_BCI_BCICTL1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
+
+ /* Check that MADC clock is on */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
+ TWL4030_REG_GPBR1);
+ goto err_i2c;
+ }
+
+ /* If MADC clk is not on, turn it on */
+ if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
+ dev_info(&pdev->dev, "clk disabled, enabling\n");
+ regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
+ TWL4030_REG_GPBR1);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
+ TWL4030_REG_GPBR1);
+ goto err_i2c;
+ }
+ }
+
+ platform_set_drvdata(pdev, iio_dev);
+ mutex_init(&madc->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ twl4030_madc_threaded_irq_handler,
+ IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+ if (ret) {
+ dev_err(&pdev->dev, "could not request irq\n");
+ goto err_i2c;
+ }
+ twl4030_madc = madc;
+
+ ret = iio_device_register(iio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio device\n");
+ goto err_i2c;
+ }
+
+ return 0;
+
+err_i2c:
+ twl4030_madc_set_current_generator(madc, 0, 0);
+err_current_generator:
+ twl4030_madc_set_power(madc, 0);
+ return ret;
+}
+
+static int twl4030_madc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iio_dev = platform_get_drvdata(pdev);
+ struct twl4030_madc_data *madc = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
+
+ twl4030_madc_set_current_generator(madc, 0, 0);
+ twl4030_madc_set_power(madc, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id twl_madc_of_match[] = {
+ { .compatible = "ti,twl4030-madc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_madc_of_match);
+#endif
+
+static struct platform_driver twl4030_madc_driver = {
+ .probe = twl4030_madc_probe,
+ .remove = twl4030_madc_remove,
+ .driver = {
+ .name = "twl4030_madc",
+ .of_match_table = of_match_ptr(twl_madc_of_match),
+ },
+};
+
+module_platform_driver(twl4030_madc_driver);
+
+MODULE_DESCRIPTION("TWL4030 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("J Keerthy");
+MODULE_ALIAS("platform:twl4030_madc");
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
new file mode 100644
index 00000000000000..89d8aa1d281850
--- /dev/null
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -0,0 +1,1009 @@
+/*
+ * TWL6030 GPADC module driver
+ *
+ * Copyright (C) 2009-2013 Texas Instruments Inc.
+ * Nishant Kamat <nskamat@ti.com>
+ * Balaji T K <balajitk@ti.com>
+ * Graeme Gregory <gg@slimlogic.co.uk>
+ * Girish S Ghongdemath <girishsg@ti.com>
+ * Ambresh K <ambresh@ti.com>
+ * Oleksandr Kozaruk <oleksandr.kozaruk@ti.com
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/i2c/twl.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define DRIVER_NAME "twl6030_gpadc"
+
+/*
+ * twl6030 per TRM has 17 channels, and twl6032 has 19 channels
+ * 2 test network channels are not used,
+ * 2 die temperature channels are not used either, as it is not
+ * defined how to convert ADC value to temperature
+ */
+#define TWL6030_GPADC_USED_CHANNELS 13
+#define TWL6030_GPADC_MAX_CHANNELS 15
+#define TWL6032_GPADC_USED_CHANNELS 15
+#define TWL6032_GPADC_MAX_CHANNELS 19
+#define TWL6030_GPADC_NUM_TRIM_REGS 16
+
+#define TWL6030_GPADC_CTRL_P1 0x05
+
+#define TWL6032_GPADC_GPSELECT_ISB 0x07
+#define TWL6032_GPADC_CTRL_P1 0x08
+
+#define TWL6032_GPADC_GPCH0_LSB 0x0d
+#define TWL6032_GPADC_GPCH0_MSB 0x0e
+
+#define TWL6030_GPADC_CTRL_P1_SP1 BIT(3)
+
+#define TWL6030_GPADC_GPCH0_LSB (0x29)
+
+#define TWL6030_GPADC_RT_SW1_EOC_MASK BIT(5)
+
+#define TWL6030_GPADC_TRIM1 0xCD
+
+#define TWL6030_REG_TOGGLE1 0x90
+#define TWL6030_GPADCS BIT(1)
+#define TWL6030_GPADCR BIT(0)
+
+/**
+ * struct twl6030_chnl_calib - channel calibration
+ * @gain: slope coefficient for ideal curve
+ * @gain_error: gain error
+ * @offset_error: offset of the real curve
+ */
+struct twl6030_chnl_calib {
+ s32 gain;
+ s32 gain_error;
+ s32 offset_error;
+};
+
+/**
+ * struct twl6030_ideal_code - GPADC calibration parameters
+ * GPADC is calibrated in two points: close to the beginning and
+ * to the and of the measurable input range
+ *
+ * @channel: channel number
+ * @code1: ideal code for the input at the beginning
+ * @code2: ideal code for at the end of the range
+ * @volt1: voltage input at the beginning(low voltage)
+ * @volt2: voltage input at the end(high voltage)
+ */
+struct twl6030_ideal_code {
+ int channel;
+ u16 code1;
+ u16 code2;
+ u16 volt1;
+ u16 volt2;
+};
+
+struct twl6030_gpadc_data;
+
+/**
+ * struct twl6030_gpadc_platform_data - platform specific data
+ * @nchannels: number of GPADC channels
+ * @iio_channels: iio channels
+ * @twl6030_ideal: pointer to calibration parameters
+ * @start_conversion: pointer to ADC start conversion function
+ * @channel_to_reg pointer to ADC function to convert channel to
+ * register address for reading conversion result
+ * @calibrate: pointer to calibration function
+ */
+struct twl6030_gpadc_platform_data {
+ const int nchannels;
+ const struct iio_chan_spec *iio_channels;
+ const struct twl6030_ideal_code *ideal;
+ int (*start_conversion)(int channel);
+ u8 (*channel_to_reg)(int channel);
+ int (*calibrate)(struct twl6030_gpadc_data *gpadc);
+};
+
+/**
+ * struct twl6030_gpadc_data - GPADC data
+ * @dev: device pointer
+ * @lock: mutual exclusion lock for the structure
+ * @irq_complete: completion to signal end of conversion
+ * @twl6030_cal_tbl: pointer to calibration data for each
+ * channel with gain error and offset
+ * @pdata: pointer to device specific data
+ */
+struct twl6030_gpadc_data {
+ struct device *dev;
+ struct mutex lock;
+ struct completion irq_complete;
+ struct twl6030_chnl_calib *twl6030_cal_tbl;
+ const struct twl6030_gpadc_platform_data *pdata;
+};
+
+/*
+ * channels 11, 12, 13, 15 and 16 have no calibration data
+ * calibration offset is same for channels 1, 3, 4, 5
+ *
+ * The data is taken from GPADC_TRIM registers description.
+ * GPADC_TRIM registers keep difference between the code measured
+ * at volt1 and volt2 input voltages and corresponding code1 and code2
+ */
+static const struct twl6030_ideal_code
+ twl6030_ideal[TWL6030_GPADC_USED_CHANNELS] = {
+ [0] = { /* ch 0, external, battery type, resistor value */
+ .channel = 0,
+ .code1 = 116,
+ .code2 = 745,
+ .volt1 = 141,
+ .volt2 = 910,
+ },
+ [1] = { /* ch 1, external, battery temperature, NTC resistor value */
+ .channel = 1,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [2] = { /* ch 2, external, audio accessory/general purpose */
+ .channel = 2,
+ .code1 = 55,
+ .code2 = 818,
+ .volt1 = 101,
+ .volt2 = 1499,
+ },
+ [3] = { /* ch 3, external, general purpose */
+ .channel = 3,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [4] = { /* ch 4, external, temperature measurement/general purpose */
+ .channel = 4,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [5] = { /* ch 5, external, general purpose */
+ .channel = 5,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [6] = { /* ch 6, external, general purpose */
+ .channel = 6,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [7] = { /* ch 7, internal, main battery */
+ .channel = 7,
+ .code1 = 614,
+ .code2 = 941,
+ .volt1 = 3001,
+ .volt2 = 4599,
+ },
+ [8] = { /* ch 8, internal, backup battery */
+ .channel = 8,
+ .code1 = 82,
+ .code2 = 688,
+ .volt1 = 501,
+ .volt2 = 4203,
+ },
+ [9] = { /* ch 9, internal, external charger input */
+ .channel = 9,
+ .code1 = 182,
+ .code2 = 818,
+ .volt1 = 2001,
+ .volt2 = 8996,
+ },
+ [10] = { /* ch 10, internal, VBUS */
+ .channel = 10,
+ .code1 = 149,
+ .code2 = 818,
+ .volt1 = 1001,
+ .volt2 = 5497,
+ },
+ [11] = { /* ch 11, internal, VBUS charging current */
+ .channel = 11,
+ },
+ /* ch 12, internal, Die temperature */
+ /* ch 13, internal, Die temperature */
+ [12] = { /* ch 14, internal, USB ID line */
+ .channel = 14,
+ .code1 = 48,
+ .code2 = 714,
+ .volt1 = 323,
+ .volt2 = 4800,
+ },
+};
+
+static const struct twl6030_ideal_code
+ twl6032_ideal[TWL6032_GPADC_USED_CHANNELS] = {
+ [0] = { /* ch 0, external, battery type, resistor value */
+ .channel = 0,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [1] = { /* ch 1, external, battery temperature, NTC resistor value */
+ .channel = 1,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [2] = { /* ch 2, external, audio accessory/general purpose */
+ .channel = 2,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 660,
+ .volt2 = 1500,
+ },
+ [3] = { /* ch 3, external, temperature with external diode/general
+ purpose */
+ .channel = 3,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [4] = { /* ch 4, external, temperature measurement/general purpose */
+ .channel = 4,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [5] = { /* ch 5, external, general purpose */
+ .channel = 5,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [6] = { /* ch 6, external, general purpose */
+ .channel = 6,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [7] = { /* ch7, internal, system supply */
+ .channel = 7,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+ [8] = { /* ch8, internal, backup battery */
+ .channel = 8,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+ [9] = { /* ch 9, internal, external charger input */
+ .channel = 9,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 3960,
+ .volt2 = 9000,
+ },
+ [10] = { /* ch10, internal, VBUS */
+ .channel = 10,
+ .code1 = 150,
+ .code2 = 751,
+ .volt1 = 1000,
+ .volt2 = 5000,
+ },
+ [11] = { /* ch 11, internal, VBUS DC-DC output current */
+ .channel = 11,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 660,
+ .volt2 = 1500,
+ },
+ /* ch 12, internal, Die temperature */
+ /* ch 13, internal, Die temperature */
+ [12] = { /* ch 14, internal, USB ID line */
+ .channel = 14,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2420,
+ .volt2 = 5500,
+ },
+ /* ch 15, internal, test network */
+ /* ch 16, internal, test network */
+ [13] = { /* ch 17, internal, battery charging current */
+ .channel = 17,
+ },
+ [14] = { /* ch 18, internal, battery voltage */
+ .channel = 18,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+};
+
+static inline int twl6030_gpadc_write(u8 reg, u8 val)
+{
+ return twl_i2c_write_u8(TWL6030_MODULE_GPADC, val, reg);
+}
+
+static inline int twl6030_gpadc_read(u8 reg, u8 *val)
+{
+
+ return twl_i2c_read(TWL6030_MODULE_GPADC, val, reg, 2);
+}
+
+static int twl6030_gpadc_enable_irq(u8 mask)
+{
+ int ret;
+
+ ret = twl6030_interrupt_unmask(mask, REG_INT_MSK_LINE_B);
+ if (ret < 0)
+ return ret;
+
+ ret = twl6030_interrupt_unmask(mask, REG_INT_MSK_STS_B);
+
+ return ret;
+}
+
+static void twl6030_gpadc_disable_irq(u8 mask)
+{
+ twl6030_interrupt_mask(mask, REG_INT_MSK_LINE_B);
+ twl6030_interrupt_mask(mask, REG_INT_MSK_STS_B);
+}
+
+static irqreturn_t twl6030_gpadc_irq_handler(int irq, void *indio_dev)
+{
+ struct twl6030_gpadc_data *gpadc = iio_priv(indio_dev);
+
+ complete(&gpadc->irq_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int twl6030_start_conversion(int channel)
+{
+ return twl6030_gpadc_write(TWL6030_GPADC_CTRL_P1,
+ TWL6030_GPADC_CTRL_P1_SP1);
+}
+
+static int twl6032_start_conversion(int channel)
+{
+ int ret;
+
+ ret = twl6030_gpadc_write(TWL6032_GPADC_GPSELECT_ISB, channel);
+ if (ret)
+ return ret;
+
+ return twl6030_gpadc_write(TWL6032_GPADC_CTRL_P1,
+ TWL6030_GPADC_CTRL_P1_SP1);
+}
+
+static u8 twl6030_channel_to_reg(int channel)
+{
+ return TWL6030_GPADC_GPCH0_LSB + 2 * channel;
+}
+
+static u8 twl6032_channel_to_reg(int channel)
+{
+ /*
+ * for any prior chosen channel, when the conversion is ready
+ * the result is avalable in GPCH0_LSB, GPCH0_MSB.
+ */
+
+ return TWL6032_GPADC_GPCH0_LSB;
+}
+
+static int twl6030_gpadc_lookup(const struct twl6030_ideal_code *ideal,
+ int channel, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (ideal[i].channel == channel)
+ break;
+
+ return i;
+}
+
+static int twl6030_channel_calibrated(const struct twl6030_gpadc_platform_data
+ *pdata, int channel)
+{
+ const struct twl6030_ideal_code *ideal = pdata->ideal;
+ int i;
+
+ i = twl6030_gpadc_lookup(ideal, channel, pdata->nchannels);
+ /* not calibrated channels have 0 in all structure members */
+ return pdata->ideal[i].code2;
+}
+
+static int twl6030_gpadc_make_correction(struct twl6030_gpadc_data *gpadc,
+ int channel, int raw_code)
+{
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+ int corrected_code;
+ int i;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+ corrected_code = ((raw_code * 1000) -
+ gpadc->twl6030_cal_tbl[i].offset_error) /
+ gpadc->twl6030_cal_tbl[i].gain_error;
+
+ return corrected_code;
+}
+
+static int twl6030_gpadc_get_raw(struct twl6030_gpadc_data *gpadc,
+ int channel, int *res)
+{
+ u8 reg = gpadc->pdata->channel_to_reg(channel);
+ __le16 val;
+ int raw_code;
+ int ret;
+
+ ret = twl6030_gpadc_read(reg, (u8 *)&val);
+ if (ret) {
+ dev_dbg(gpadc->dev, "unable to read register 0x%X\n", reg);
+ return ret;
+ }
+
+ raw_code = le16_to_cpu(val);
+ dev_dbg(gpadc->dev, "GPADC raw code: %d", raw_code);
+
+ if (twl6030_channel_calibrated(gpadc->pdata, channel))
+ *res = twl6030_gpadc_make_correction(gpadc, channel, raw_code);
+ else
+ *res = raw_code;
+
+ return ret;
+}
+
+static int twl6030_gpadc_get_processed(struct twl6030_gpadc_data *gpadc,
+ int channel, int *val)
+{
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+ int corrected_code;
+ int channel_value;
+ int i;
+ int ret;
+
+ ret = twl6030_gpadc_get_raw(gpadc, channel, &corrected_code);
+ if (ret)
+ return ret;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+ channel_value = corrected_code *
+ gpadc->twl6030_cal_tbl[i].gain;
+
+ /* Shift back into mV range */
+ channel_value /= 1000;
+
+ dev_dbg(gpadc->dev, "GPADC corrected code: %d", corrected_code);
+ dev_dbg(gpadc->dev, "GPADC value: %d", channel_value);
+
+ *val = channel_value;
+
+ return ret;
+}
+
+static int twl6030_gpadc_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct twl6030_gpadc_data *gpadc = iio_priv(indio_dev);
+ int ret;
+ long timeout;
+
+ mutex_lock(&gpadc->lock);
+
+ ret = gpadc->pdata->start_conversion(chan->channel);
+ if (ret) {
+ dev_err(gpadc->dev, "failed to start conversion\n");
+ goto err;
+ }
+ /* wait for conversion to complete */
+ timeout = wait_for_completion_interruptible_timeout(
+ &gpadc->irq_complete, msecs_to_jiffies(5000));
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ } else if (timeout < 0) {
+ ret = -EINTR;
+ goto err;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = twl6030_gpadc_get_raw(gpadc, chan->channel, val);
+ ret = ret ? -EIO : IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = twl6030_gpadc_get_processed(gpadc, chan->channel, val);
+ ret = ret ? -EIO : IIO_VAL_INT;
+ break;
+
+ default:
+ break;
+ }
+err:
+ mutex_unlock(&gpadc->lock);
+
+ return ret;
+}
+
+/*
+ * The GPADC channels are calibrated using a two point calibration method.
+ * The channels measured with two known values: volt1 and volt2, and
+ * ideal corresponding output codes are known: code1, code2.
+ * The difference(d1, d2) between ideal and measured codes stored in trim
+ * registers.
+ * The goal is to find offset and gain of the real curve for each calibrated
+ * channel.
+ * gain: k = 1 + ((d2 - d1) / (x2 - x1))
+ * offset: b = d1 + (k - 1) * x1
+ */
+static void twl6030_calibrate_channel(struct twl6030_gpadc_data *gpadc,
+ int channel, int d1, int d2)
+{
+ int b, k, gain, x1, x2, i;
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+
+ /* Gain */
+ gain = ((ideal[i].volt2 - ideal[i].volt1) * 1000) /
+ (ideal[i].code2 - ideal[i].code1);
+
+ x1 = ideal[i].code1;
+ x2 = ideal[i].code2;
+
+ /* k - real curve gain */
+ k = 1000 + (((d2 - d1) * 1000) / (x2 - x1));
+
+ /* b - offset of the real curve gain */
+ b = (d1 * 1000) - (k - 1000) * x1;
+
+ gpadc->twl6030_cal_tbl[i].gain = gain;
+ gpadc->twl6030_cal_tbl[i].gain_error = k;
+ gpadc->twl6030_cal_tbl[i].offset_error = b;
+
+ dev_dbg(gpadc->dev, "GPADC d1 for Chn: %d = %d\n", channel, d1);
+ dev_dbg(gpadc->dev, "GPADC d2 for Chn: %d = %d\n", channel, d2);
+ dev_dbg(gpadc->dev, "GPADC x1 for Chn: %d = %d\n", channel, x1);
+ dev_dbg(gpadc->dev, "GPADC x2 for Chn: %d = %d\n", channel, x2);
+ dev_dbg(gpadc->dev, "GPADC Gain for Chn: %d = %d\n", channel, gain);
+ dev_dbg(gpadc->dev, "GPADC k for Chn: %d = %d\n", channel, k);
+ dev_dbg(gpadc->dev, "GPADC b for Chn: %d = %d\n", channel, b);
+}
+
+static inline int twl6030_gpadc_get_trim_offset(s8 d)
+{
+ /*
+ * XXX NOTE!
+ * bit 0 - sign, bit 7 - reserved, 6..1 - trim value
+ * though, the documentation states that trim value
+ * is absolute value, the correct conversion results are
+ * obtained if the value is interpreted as 2's complement.
+ */
+ __u32 temp = ((d & 0x7f) >> 1) | ((d & 1) << 6);
+
+ return sign_extend32(temp, 6);
+}
+
+static int twl6030_calibration(struct twl6030_gpadc_data *gpadc)
+{
+ int ret;
+ int chn;
+ u8 trim_regs[TWL6030_GPADC_NUM_TRIM_REGS];
+ s8 d1, d2;
+
+ /*
+ * for calibration two measurements have been performed at
+ * factory, for some channels, during the production test and
+ * have been stored in registers. This two stored values are
+ * used to correct the measurements. The values represent
+ * offsets for the given input from the output on ideal curve.
+ */
+ ret = twl_i2c_read(TWL6030_MODULE_ID2, trim_regs,
+ TWL6030_GPADC_TRIM1, TWL6030_GPADC_NUM_TRIM_REGS);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "calibration failed\n");
+ return ret;
+ }
+
+ for (chn = 0; chn < TWL6030_GPADC_MAX_CHANNELS; chn++) {
+
+ switch (chn) {
+ case 0:
+ d1 = trim_regs[0];
+ d2 = trim_regs[1];
+ break;
+ case 1:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ d1 = trim_regs[4];
+ d2 = trim_regs[5];
+ break;
+ case 2:
+ d1 = trim_regs[12];
+ d2 = trim_regs[13];
+ break;
+ case 7:
+ d1 = trim_regs[6];
+ d2 = trim_regs[7];
+ break;
+ case 8:
+ d1 = trim_regs[2];
+ d2 = trim_regs[3];
+ break;
+ case 9:
+ d1 = trim_regs[8];
+ d2 = trim_regs[9];
+ break;
+ case 10:
+ d1 = trim_regs[10];
+ d2 = trim_regs[11];
+ break;
+ case 14:
+ d1 = trim_regs[14];
+ d2 = trim_regs[15];
+ break;
+ default:
+ continue;
+ }
+
+ d1 = twl6030_gpadc_get_trim_offset(d1);
+ d2 = twl6030_gpadc_get_trim_offset(d2);
+
+ twl6030_calibrate_channel(gpadc, chn, d1, d2);
+ }
+
+ return 0;
+}
+
+static int twl6032_get_trim_value(u8 *trim_regs, unsigned int reg0,
+ unsigned int reg1, unsigned int mask0, unsigned int mask1,
+ unsigned int shift0)
+{
+ int val;
+
+ val = (trim_regs[reg0] & mask0) << shift0;
+ val |= (trim_regs[reg1] & mask1) >> 1;
+ if (trim_regs[reg1] & 0x01)
+ val = -val;
+
+ return val;
+}
+
+static int twl6032_calibration(struct twl6030_gpadc_data *gpadc)
+{
+ int chn, d1 = 0, d2 = 0, temp;
+ u8 trim_regs[TWL6030_GPADC_NUM_TRIM_REGS];
+ int ret;
+
+ ret = twl_i2c_read(TWL6030_MODULE_ID2, trim_regs,
+ TWL6030_GPADC_TRIM1, TWL6030_GPADC_NUM_TRIM_REGS);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "calibration failed\n");
+ return ret;
+ }
+
+ /*
+ * Loop to calculate the value needed for returning voltages from
+ * GPADC not values.
+ *
+ * gain is calculated to 3 decimal places fixed point.
+ */
+ for (chn = 0; chn < TWL6032_GPADC_MAX_CHANNELS; chn++) {
+
+ switch (chn) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 11:
+ case 14:
+ d1 = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d2 = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+ break;
+ case 8:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d1 = temp + twl6032_get_trim_value(trim_regs, 7, 6,
+ 0x18, 0x1E, 1);
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3F,
+ 0x06, 2);
+ d2 = temp + twl6032_get_trim_value(trim_regs, 9, 7,
+ 0x1F, 0x06, 2);
+ break;
+ case 9:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d1 = temp + twl6032_get_trim_value(trim_regs, 13, 11,
+ 0x18, 0x1E, 1);
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+ d2 = temp + twl6032_get_trim_value(trim_regs, 15, 13,
+ 0x1F, 0x06, 1);
+ break;
+ case 10:
+ d1 = twl6032_get_trim_value(trim_regs, 10, 8, 0x0f,
+ 0x0E, 3);
+ d2 = twl6032_get_trim_value(trim_regs, 14, 12, 0x0f,
+ 0x0E, 3);
+ break;
+ case 7:
+ case 18:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+
+ d1 = (trim_regs[4] & 0x7E) >> 1;
+ if (trim_regs[4] & 0x01)
+ d1 = -d1;
+ d1 += temp;
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+
+ d2 = (trim_regs[5] & 0xFE) >> 1;
+ if (trim_regs[5] & 0x01)
+ d2 = -d2;
+
+ d2 += temp;
+ break;
+ default:
+ /* No data for other channels */
+ continue;
+ }
+
+ twl6030_calibrate_channel(gpadc, chn, d1, d2);
+ }
+
+ return 0;
+}
+
+#define TWL6030_GPADC_CHAN(chn, _type, chan_info) { \
+ .type = _type, \
+ .channel = chn, \
+ .info_mask_separate = BIT(chan_info), \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec twl6030_gpadc_iio_channels[] = {
+ TWL6030_GPADC_CHAN(0, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(1, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(2, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(3, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(4, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(5, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(6, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(7, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(8, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(9, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(10, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(11, IIO_VOLTAGE, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(14, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+};
+
+static const struct iio_chan_spec twl6032_gpadc_iio_channels[] = {
+ TWL6030_GPADC_CHAN(0, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(1, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(2, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(3, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(4, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(5, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(6, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(7, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(8, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(9, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(10, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(11, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(14, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(17, IIO_VOLTAGE, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(18, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+};
+
+static const struct iio_info twl6030_gpadc_iio_info = {
+ .read_raw = &twl6030_gpadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct twl6030_gpadc_platform_data twl6030_pdata = {
+ .iio_channels = twl6030_gpadc_iio_channels,
+ .nchannels = TWL6030_GPADC_USED_CHANNELS,
+ .ideal = twl6030_ideal,
+ .start_conversion = twl6030_start_conversion,
+ .channel_to_reg = twl6030_channel_to_reg,
+ .calibrate = twl6030_calibration,
+};
+
+static const struct twl6030_gpadc_platform_data twl6032_pdata = {
+ .iio_channels = twl6032_gpadc_iio_channels,
+ .nchannels = TWL6032_GPADC_USED_CHANNELS,
+ .ideal = twl6032_ideal,
+ .start_conversion = twl6032_start_conversion,
+ .channel_to_reg = twl6032_channel_to_reg,
+ .calibrate = twl6032_calibration,
+};
+
+static const struct of_device_id of_twl6030_match_tbl[] = {
+ {
+ .compatible = "ti,twl6030-gpadc",
+ .data = &twl6030_pdata,
+ },
+ {
+ .compatible = "ti,twl6032-gpadc",
+ .data = &twl6032_pdata,
+ },
+ { /* end */ }
+};
+
+static int twl6030_gpadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct twl6030_gpadc_data *gpadc;
+ const struct twl6030_gpadc_platform_data *pdata;
+ const struct of_device_id *match;
+ struct iio_dev *indio_dev;
+ int irq;
+ int ret;
+
+ match = of_match_device(of_twl6030_match_tbl, dev);
+ if (!match)
+ return -EINVAL;
+
+ pdata = match->data;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ gpadc = iio_priv(indio_dev);
+
+ gpadc->twl6030_cal_tbl = devm_kzalloc(dev,
+ sizeof(*gpadc->twl6030_cal_tbl) *
+ pdata->nchannels, GFP_KERNEL);
+ if (!gpadc->twl6030_cal_tbl)
+ return -ENOMEM;
+
+ gpadc->dev = dev;
+ gpadc->pdata = pdata;
+
+ platform_set_drvdata(pdev, indio_dev);
+ mutex_init(&gpadc->lock);
+ init_completion(&gpadc->irq_complete);
+
+ ret = pdata->calibrate(gpadc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read calibration registers\n");
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ twl6030_gpadc_irq_handler,
+ IRQF_ONESHOT, "twl6030_gpadc", indio_dev);
+
+ ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable GPADC interrupt\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCS,
+ TWL6030_REG_TOGGLE1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable GPADC module\n");
+ return ret;
+ }
+
+ indio_dev->name = DRIVER_NAME;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &twl6030_gpadc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = pdata->iio_channels;
+ indio_dev->num_channels = pdata->nchannels;
+
+ return iio_device_register(indio_dev);
+}
+
+static int twl6030_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ twl6030_gpadc_disable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int twl6030_gpadc_suspend(struct device *pdev)
+{
+ int ret;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCR,
+ TWL6030_REG_TOGGLE1);
+ if (ret)
+ dev_err(pdev, "error resetting GPADC (%d)!\n", ret);
+
+ return 0;
+};
+
+static int twl6030_gpadc_resume(struct device *pdev)
+{
+ int ret;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCS,
+ TWL6030_REG_TOGGLE1);
+ if (ret)
+ dev_err(pdev, "error setting GPADC (%d)!\n", ret);
+
+ return 0;
+};
+#endif
+
+static SIMPLE_DEV_PM_OPS(twl6030_gpadc_pm_ops, twl6030_gpadc_suspend,
+ twl6030_gpadc_resume);
+
+static struct platform_driver twl6030_gpadc_driver = {
+ .probe = twl6030_gpadc_probe,
+ .remove = twl6030_gpadc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &twl6030_gpadc_pm_ops,
+ .of_match_table = of_twl6030_match_tbl,
+ },
+};
+
+module_platform_driver(twl6030_gpadc_driver);
+
+MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
+MODULE_DESCRIPTION("twl6030 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
new file mode 100644
index 00000000000000..8ec353c01d98e0
--- /dev/null
+++ b/drivers/iio/adc/vf610_adc.c
@@ -0,0 +1,735 @@
+/*
+ * Freescale Vybrid vf610 ADC driver
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/driver.h>
+
+/* This will be the driver name the kernel reports */
+#define DRIVER_NAME "vf610-adc"
+
+/* Vybrid/IMX ADC registers */
+#define VF610_REG_ADC_HC0 0x00
+#define VF610_REG_ADC_HC1 0x04
+#define VF610_REG_ADC_HS 0x08
+#define VF610_REG_ADC_R0 0x0c
+#define VF610_REG_ADC_R1 0x10
+#define VF610_REG_ADC_CFG 0x14
+#define VF610_REG_ADC_GC 0x18
+#define VF610_REG_ADC_GS 0x1c
+#define VF610_REG_ADC_CV 0x20
+#define VF610_REG_ADC_OFS 0x24
+#define VF610_REG_ADC_CAL 0x28
+#define VF610_REG_ADC_PCTL 0x30
+
+/* Configuration register field define */
+#define VF610_ADC_MODE_BIT8 0x00
+#define VF610_ADC_MODE_BIT10 0x04
+#define VF610_ADC_MODE_BIT12 0x08
+#define VF610_ADC_MODE_MASK 0x0c
+#define VF610_ADC_BUSCLK2_SEL 0x01
+#define VF610_ADC_ALTCLK_SEL 0x02
+#define VF610_ADC_ADACK_SEL 0x03
+#define VF610_ADC_ADCCLK_MASK 0x03
+#define VF610_ADC_CLK_DIV2 0x20
+#define VF610_ADC_CLK_DIV4 0x40
+#define VF610_ADC_CLK_DIV8 0x60
+#define VF610_ADC_CLK_MASK 0x60
+#define VF610_ADC_ADLSMP_LONG 0x10
+#define VF610_ADC_ADSTS_MASK 0x300
+#define VF610_ADC_ADLPC_EN 0x80
+#define VF610_ADC_ADHSC_EN 0x400
+#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VBG 0x1000
+#define VF610_ADC_ADTRG_HARD 0x2000
+#define VF610_ADC_AVGS_8 0x4000
+#define VF610_ADC_AVGS_16 0x8000
+#define VF610_ADC_AVGS_32 0xC000
+#define VF610_ADC_AVGS_MASK 0xC000
+#define VF610_ADC_OVWREN 0x10000
+
+/* General control register field define */
+#define VF610_ADC_ADACKEN 0x1
+#define VF610_ADC_DMAEN 0x2
+#define VF610_ADC_ACREN 0x4
+#define VF610_ADC_ACFGT 0x8
+#define VF610_ADC_ACFE 0x10
+#define VF610_ADC_AVGEN 0x20
+#define VF610_ADC_ADCON 0x40
+#define VF610_ADC_CAL 0x80
+
+/* Other field define */
+#define VF610_ADC_ADCHC(x) ((x) & 0x1F)
+#define VF610_ADC_AIEN (0x1 << 7)
+#define VF610_ADC_CONV_DISABLE 0x1F
+#define VF610_ADC_HS_COCO0 0x1
+#define VF610_ADC_CALF 0x2
+#define VF610_ADC_TIMEOUT msecs_to_jiffies(100)
+
+enum clk_sel {
+ VF610_ADCIOC_BUSCLK_SET,
+ VF610_ADCIOC_ALTCLK_SET,
+ VF610_ADCIOC_ADACK_SET,
+};
+
+enum vol_ref {
+ VF610_ADCIOC_VR_VREF_SET,
+ VF610_ADCIOC_VR_VALT_SET,
+ VF610_ADCIOC_VR_VBG_SET,
+};
+
+enum average_sel {
+ VF610_ADC_SAMPLE_1,
+ VF610_ADC_SAMPLE_4,
+ VF610_ADC_SAMPLE_8,
+ VF610_ADC_SAMPLE_16,
+ VF610_ADC_SAMPLE_32,
+};
+
+struct vf610_adc_feature {
+ enum clk_sel clk_sel;
+ enum vol_ref vol_ref;
+
+ int clk_div;
+ int sample_rate;
+ int res_mode;
+
+ bool lpm;
+ bool calibration;
+ bool ovwren;
+};
+
+struct vf610_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+
+ u32 vref_uv;
+ u32 value;
+ struct regulator *vref;
+ struct vf610_adc_feature adc_feature;
+
+ struct completion completion;
+};
+
+#define VF610_ADC_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+#define VF610_ADC_TEMPERATURE_CHAN(_idx, _chan_type) { \
+ .type = (_chan_type), \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+}
+
+static const struct iio_chan_spec vf610_adc_iio_channels[] = {
+ VF610_ADC_CHAN(0, IIO_VOLTAGE),
+ VF610_ADC_CHAN(1, IIO_VOLTAGE),
+ VF610_ADC_CHAN(2, IIO_VOLTAGE),
+ VF610_ADC_CHAN(3, IIO_VOLTAGE),
+ VF610_ADC_CHAN(4, IIO_VOLTAGE),
+ VF610_ADC_CHAN(5, IIO_VOLTAGE),
+ VF610_ADC_CHAN(6, IIO_VOLTAGE),
+ VF610_ADC_CHAN(7, IIO_VOLTAGE),
+ VF610_ADC_CHAN(8, IIO_VOLTAGE),
+ VF610_ADC_CHAN(9, IIO_VOLTAGE),
+ VF610_ADC_CHAN(10, IIO_VOLTAGE),
+ VF610_ADC_CHAN(11, IIO_VOLTAGE),
+ VF610_ADC_CHAN(12, IIO_VOLTAGE),
+ VF610_ADC_CHAN(13, IIO_VOLTAGE),
+ VF610_ADC_CHAN(14, IIO_VOLTAGE),
+ VF610_ADC_CHAN(15, IIO_VOLTAGE),
+ VF610_ADC_TEMPERATURE_CHAN(26, IIO_TEMP),
+ /* sentinel */
+};
+
+/*
+ * ADC sample frequency, unit is ADCK cycles.
+ * ADC clk source is ipg clock, which is the same as bus clock.
+ *
+ * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+ * SFCAdder: fixed to 6 ADCK cycles
+ * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+ * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+ * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+ *
+ * By default, enable 12 bit resolution mode, clock source
+ * set to ipg clock, So get below frequency group:
+ */
+static const u32 vf610_sample_freq_avail[5] =
+{1941176, 559332, 286957, 145374, 73171};
+
+static inline void vf610_adc_cfg_init(struct vf610_adc *info)
+{
+ /* set default Configuration for ADC controller */
+ info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
+ info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+ info->adc_feature.calibration = true;
+ info->adc_feature.ovwren = true;
+
+ info->adc_feature.clk_div = 1;
+ info->adc_feature.res_mode = 12;
+ info->adc_feature.sample_rate = 1;
+ info->adc_feature.lpm = true;
+}
+
+static void vf610_adc_cfg_post_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &info->adc_feature;
+ int cfg_data = 0;
+ int gc_data = 0;
+
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_ALTCLK_SET:
+ cfg_data |= VF610_ADC_ALTCLK_SEL;
+ break;
+ case VF610_ADCIOC_ADACK_SET:
+ cfg_data |= VF610_ADC_ADACK_SEL;
+ break;
+ default:
+ break;
+ }
+
+ /* low power set for calibration */
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* enable high speed for calibration */
+ cfg_data |= VF610_ADC_ADHSC_EN;
+
+ /* voltage reference */
+ switch (adc_feature->vol_ref) {
+ case VF610_ADCIOC_VR_VREF_SET:
+ break;
+ case VF610_ADCIOC_VR_VALT_SET:
+ cfg_data |= VF610_ADC_REFSEL_VALT;
+ break;
+ case VF610_ADCIOC_VR_VBG_SET:
+ cfg_data |= VF610_ADC_REFSEL_VBG;
+ break;
+ default:
+ dev_err(info->dev, "error voltage reference\n");
+ }
+
+ /* data overwrite enable */
+ if (adc_feature->ovwren)
+ cfg_data |= VF610_ADC_OVWREN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_calibration(struct vf610_adc *info)
+{
+ int adc_gc, hc_cfg;
+ int timeout;
+
+ if (!info->adc_feature.calibration)
+ return;
+
+ /* enable calibration interrupt */
+ hc_cfg = VF610_ADC_AIEN | VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GC);
+ writel(adc_gc | VF610_ADC_CAL, info->regs + VF610_REG_ADC_GC);
+
+ timeout = wait_for_completion_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (timeout == 0)
+ dev_err(info->dev, "Timeout for adc calibration\n");
+
+ adc_gc = readl(info->regs + VF610_REG_ADC_GS);
+ if (adc_gc & VF610_ADC_CALF)
+ dev_err(info->dev, "ADC calibration failed\n");
+
+ info->adc_feature.calibration = false;
+}
+
+static void vf610_adc_cfg_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+
+ /* low power configuration */
+ cfg_data &= ~VF610_ADC_ADLPC_EN;
+ if (adc_feature->lpm)
+ cfg_data |= VF610_ADC_ADLPC_EN;
+
+ /* disable high speed */
+ cfg_data &= ~VF610_ADC_ADHSC_EN;
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+}
+
+static void vf610_adc_sample_set(struct vf610_adc *info)
+{
+ struct vf610_adc_feature *adc_feature = &(info->adc_feature);
+ int cfg_data, gc_data;
+
+ cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+ gc_data = readl(info->regs + VF610_REG_ADC_GC);
+
+ /* resolution mode */
+ cfg_data &= ~VF610_ADC_MODE_MASK;
+ switch (adc_feature->res_mode) {
+ case 8:
+ cfg_data |= VF610_ADC_MODE_BIT8;
+ break;
+ case 10:
+ cfg_data |= VF610_ADC_MODE_BIT10;
+ break;
+ case 12:
+ cfg_data |= VF610_ADC_MODE_BIT12;
+ break;
+ default:
+ dev_err(info->dev, "error resolution mode\n");
+ break;
+ }
+
+ /* clock select and clock divider */
+ cfg_data &= ~(VF610_ADC_CLK_MASK | VF610_ADC_ADCCLK_MASK);
+ switch (adc_feature->clk_div) {
+ case 1:
+ break;
+ case 2:
+ cfg_data |= VF610_ADC_CLK_DIV2;
+ break;
+ case 4:
+ cfg_data |= VF610_ADC_CLK_DIV4;
+ break;
+ case 8:
+ cfg_data |= VF610_ADC_CLK_DIV8;
+ break;
+ case 16:
+ switch (adc_feature->clk_sel) {
+ case VF610_ADCIOC_BUSCLK_SET:
+ cfg_data |= VF610_ADC_BUSCLK2_SEL | VF610_ADC_CLK_DIV8;
+ break;
+ default:
+ dev_err(info->dev, "error clk divider\n");
+ break;
+ }
+ break;
+ }
+
+ /* Use the short sample mode */
+ cfg_data &= ~(VF610_ADC_ADLSMP_LONG | VF610_ADC_ADSTS_MASK);
+
+ /* update hardware average selection */
+ cfg_data &= ~VF610_ADC_AVGS_MASK;
+ gc_data &= ~VF610_ADC_AVGEN;
+ switch (adc_feature->sample_rate) {
+ case VF610_ADC_SAMPLE_1:
+ break;
+ case VF610_ADC_SAMPLE_4:
+ gc_data |= VF610_ADC_AVGEN;
+ break;
+ case VF610_ADC_SAMPLE_8:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_8;
+ break;
+ case VF610_ADC_SAMPLE_16:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_16;
+ break;
+ case VF610_ADC_SAMPLE_32:
+ gc_data |= VF610_ADC_AVGEN;
+ cfg_data |= VF610_ADC_AVGS_32;
+ break;
+ default:
+ dev_err(info->dev,
+ "error hardware sample average select\n");
+ }
+
+ writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+ writel(gc_data, info->regs + VF610_REG_ADC_GC);
+}
+
+static void vf610_adc_hw_init(struct vf610_adc *info)
+{
+ /* CFG: Feature set */
+ vf610_adc_cfg_post_set(info);
+ vf610_adc_sample_set(info);
+
+ /* adc calibration */
+ vf610_adc_calibration(info);
+
+ /* CFG: power and speed set */
+ vf610_adc_cfg_set(info);
+}
+
+static int vf610_adc_read_data(struct vf610_adc *info)
+{
+ int result;
+
+ result = readl(info->regs + VF610_REG_ADC_R0);
+
+ switch (info->adc_feature.res_mode) {
+ case 8:
+ result &= 0xFF;
+ break;
+ case 10:
+ result &= 0x3FF;
+ break;
+ case 12:
+ result &= 0xFFF;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
+{
+ struct vf610_adc *info = (struct vf610_adc *)dev_id;
+ int coco;
+
+ coco = readl(info->regs + VF610_REG_ADC_HS);
+ if (coco & VF610_ADC_HS_COCO0) {
+ info->value = vf610_adc_read_data(info);
+ complete(&info->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+
+static struct attribute *vf610_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group vf610_attribute_group = {
+ .attrs = vf610_attributes,
+};
+
+static int vf610_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ unsigned int hc_cfg;
+ long ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&indio_dev->mlock);
+ reinit_completion(&info->completion);
+
+ hc_cfg = VF610_ADC_ADCHC(chan->channel);
+ hc_cfg |= VF610_ADC_AIEN;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+ ret = wait_for_completion_interruptible_timeout
+ (&info->completion, VF610_ADC_TIMEOUT);
+ if (ret == 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return -ETIMEDOUT;
+ }
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->value;
+ break;
+ case IIO_TEMP:
+ /*
+ * Calculate in degree Celsius times 1000
+ * Using sensor slope of 1.84 mV/°C and
+ * V at 25°C of 696 mV
+ */
+ *val = 25000 - ((int)info->value - 864) * 1000000 / 1840;
+ break;
+ default:
+ mutex_unlock(&indio_dev->mlock);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = info->vref_uv / 1000;
+ *val2 = info->adc_feature.res_mode;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+ *val2 = 0;
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0;
+ i < ARRAY_SIZE(vf610_sample_freq_avail);
+ i++)
+ if (val == vf610_sample_freq_avail[i]) {
+ info->adc_feature.sample_rate = i;
+ vf610_adc_sample_set(info);
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int vf610_adc_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ if ((readval == NULL) ||
+ (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+ return -EINVAL;
+
+ *readval = readl(info->regs + reg);
+
+ return 0;
+}
+
+static const struct iio_info vf610_adc_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &vf610_read_raw,
+ .write_raw = &vf610_write_raw,
+ .debugfs_reg_access = &vf610_adc_reg_access,
+ .attrs = &vf610_attribute_group,
+};
+
+static const struct of_device_id vf610_adc_match[] = {
+ { .compatible = "fsl,vf610-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_adc_match);
+
+static int vf610_adc_probe(struct platform_device *pdev)
+{
+ struct vf610_adc *info;
+ struct iio_dev *indio_dev;
+ struct resource *mem;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+ info->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(info->dev, irq,
+ vf610_adc_isr, 0,
+ dev_name(&pdev->dev), info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq);
+ return ret;
+ }
+
+ info->clk = devm_clk_get(&pdev->dev, "adc");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ return PTR_ERR(info->clk);
+ }
+
+ info->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(info->vref))
+ return PTR_ERR(info->vref);
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ info->vref_uv = regulator_get_voltage(info->vref);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&info->completion);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &vf610_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vf610_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock.\n");
+ goto error_adc_clk_enable;
+ }
+
+ vf610_adc_cfg_init(info);
+ vf610_adc_hw_init(info);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device.\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+
+error_iio_device_register:
+ clk_disable_unprepare(info->clk);
+error_adc_clk_enable:
+ regulator_disable(info->vref);
+
+ return ret;
+}
+
+static int vf610_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(info->vref);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_adc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int hc_cfg;
+
+ /* ADC controller enters to stop mode */
+ hc_cfg = readl(info->regs + VF610_REG_ADC_HC0);
+ hc_cfg |= VF610_ADC_CONV_DISABLE;
+ writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
+
+ clk_disable_unprepare(info->clk);
+ regulator_disable(info->vref);
+
+ return 0;
+}
+
+static int vf610_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_adc *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(info->vref);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ goto disable_reg;
+
+ vf610_adc_hw_init(info);
+
+ return 0;
+
+disable_reg:
+ regulator_disable(info->vref);
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_adc_pm_ops, vf610_adc_suspend, vf610_adc_resume);
+
+static struct platform_driver vf610_adc_driver = {
+ .probe = vf610_adc_probe,
+ .remove = vf610_adc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = vf610_adc_match,
+ .pm = &vf610_adc_pm_ops,
+ },
+};
+
+module_platform_driver(vf610_adc_driver);
+
+MODULE_AUTHOR("Fugang Duan <B38611@freescale.com>");
+MODULE_DESCRIPTION("Freescale VF610 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
new file mode 100644
index 00000000000000..3be2e35721cc58
--- /dev/null
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -0,0 +1,157 @@
+/*
+ * Nano River Technologies viperboard IIO ADC driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_ADC_CMD_GET 0x00
+
+struct vprbrd_adc_msg {
+ u8 cmd;
+ u8 chan;
+ u8 val;
+} __packed;
+
+struct vprbrd_adc {
+ struct vprbrd *vb;
+};
+
+#define VPRBRD_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
+ VPRBRD_ADC_CHANNEL(0),
+ VPRBRD_ADC_CHANNEL(1),
+ VPRBRD_ADC_CHANNEL(2),
+ VPRBRD_ADC_CHANNEL(3),
+};
+
+static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ int ret, error = 0;
+ struct vprbrd_adc *adc = iio_priv(iio_dev);
+ struct vprbrd *vb = adc->vb;
+ struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&vb->lock);
+
+ admsg->cmd = VPRBRD_ADC_CMD_GET;
+ admsg->chan = chan->channel;
+ admsg->val = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb send error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ *val = admsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb recv error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ if (error)
+ goto error;
+
+ return IIO_VAL_INT;
+ default:
+ error = -EINVAL;
+ break;
+ }
+error:
+ return error;
+}
+
+static const struct iio_info vprbrd_adc_iio_info = {
+ .read_raw = &vprbrd_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int vprbrd_adc_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_adc *adc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ /* registering iio */
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(indio_dev);
+ adc->vb = vb;
+ indio_dev->name = "viperboard adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &vprbrd_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vprbrd_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio (adc)");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver vprbrd_adc_driver = {
+ .driver = {
+ .name = "viperboard-adc",
+ },
+ .probe = vprbrd_adc_probe,
+};
+
+module_platform_driver(vprbrd_adc_driver);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-adc");
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
new file mode 100644
index 00000000000000..a221f7329b7914
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -0,0 +1,1336 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013-2014 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Documentation for the parts can be found at:
+ * - XADC hardmacro: Xilinx UG480
+ * - ZYNQ XADC interface: Xilinx UG585
+ * - AXI XADC interface: Xilinx PG019
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "xilinx-xadc.h"
+
+static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
+
+/* ZYNQ register definitions */
+#define XADC_ZYNQ_REG_CFG 0x00
+#define XADC_ZYNQ_REG_INTSTS 0x04
+#define XADC_ZYNQ_REG_INTMSK 0x08
+#define XADC_ZYNQ_REG_STATUS 0x0c
+#define XADC_ZYNQ_REG_CFIFO 0x10
+#define XADC_ZYNQ_REG_DFIFO 0x14
+#define XADC_ZYNQ_REG_CTL 0x18
+
+#define XADC_ZYNQ_CFG_ENABLE BIT(31)
+#define XADC_ZYNQ_CFG_CFIFOTH_MASK (0xf << 20)
+#define XADC_ZYNQ_CFG_CFIFOTH_OFFSET 20
+#define XADC_ZYNQ_CFG_DFIFOTH_MASK (0xf << 16)
+#define XADC_ZYNQ_CFG_DFIFOTH_OFFSET 16
+#define XADC_ZYNQ_CFG_WEDGE BIT(13)
+#define XADC_ZYNQ_CFG_REDGE BIT(12)
+#define XADC_ZYNQ_CFG_TCKRATE_MASK (0x3 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV2 (0x0 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV4 (0x1 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV8 (0x2 << 8)
+#define XADC_ZYNQ_CFG_TCKRATE_DIV16 (0x3 << 8)
+#define XADC_ZYNQ_CFG_IGAP_MASK 0x1f
+#define XADC_ZYNQ_CFG_IGAP(x) (x)
+
+#define XADC_ZYNQ_INT_CFIFO_LTH BIT(9)
+#define XADC_ZYNQ_INT_DFIFO_GTH BIT(8)
+#define XADC_ZYNQ_INT_ALARM_MASK 0xff
+#define XADC_ZYNQ_INT_ALARM_OFFSET 0
+
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_MASK (0xf << 16)
+#define XADC_ZYNQ_STATUS_CFIFO_LVL_OFFSET 16
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_MASK (0xf << 12)
+#define XADC_ZYNQ_STATUS_DFIFO_LVL_OFFSET 12
+#define XADC_ZYNQ_STATUS_CFIFOF BIT(11)
+#define XADC_ZYNQ_STATUS_CFIFOE BIT(10)
+#define XADC_ZYNQ_STATUS_DFIFOF BIT(9)
+#define XADC_ZYNQ_STATUS_DFIFOE BIT(8)
+#define XADC_ZYNQ_STATUS_OT BIT(7)
+#define XADC_ZYNQ_STATUS_ALM(x) BIT(x)
+
+#define XADC_ZYNQ_CTL_RESET BIT(4)
+
+#define XADC_ZYNQ_CMD_NOP 0x00
+#define XADC_ZYNQ_CMD_READ 0x01
+#define XADC_ZYNQ_CMD_WRITE 0x02
+
+#define XADC_ZYNQ_CMD(cmd, addr, data) (((cmd) << 26) | ((addr) << 16) | (data))
+
+/* AXI register definitions */
+#define XADC_AXI_REG_RESET 0x00
+#define XADC_AXI_REG_STATUS 0x04
+#define XADC_AXI_REG_ALARM_STATUS 0x08
+#define XADC_AXI_REG_CONVST 0x0c
+#define XADC_AXI_REG_XADC_RESET 0x10
+#define XADC_AXI_REG_GIER 0x5c
+#define XADC_AXI_REG_IPISR 0x60
+#define XADC_AXI_REG_IPIER 0x68
+#define XADC_AXI_ADC_REG_OFFSET 0x200
+
+#define XADC_AXI_RESET_MAGIC 0xa
+#define XADC_AXI_GIER_ENABLE BIT(31)
+
+#define XADC_AXI_INT_EOS BIT(4)
+#define XADC_AXI_INT_ALARM_MASK 0x3c0f
+
+#define XADC_FLAGS_BUFFERED BIT(0)
+
+static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t val)
+{
+ writel(val, xadc->base + reg);
+}
+
+static void xadc_read_reg(struct xadc *xadc, unsigned int reg,
+ uint32_t *val)
+{
+ *val = readl(xadc->base + reg);
+}
+
+/*
+ * The ZYNQ interface uses two asynchronous FIFOs for communication with the
+ * XADC. Reads and writes to the XADC register are performed by submitting a
+ * request to the command FIFO (CFIFO), once the request has been completed the
+ * result can be read from the data FIFO (DFIFO). The method currently used in
+ * this driver is to submit the request for a read/write operation, then go to
+ * sleep and wait for an interrupt that signals that a response is available in
+ * the data FIFO.
+ */
+
+static void xadc_zynq_write_fifo(struct xadc *xadc, uint32_t *cmd,
+ unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFIFO, cmd[i]);
+}
+
+static void xadc_zynq_drain_fifo(struct xadc *xadc)
+{
+ uint32_t status, tmp;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+
+ while (!(status & XADC_ZYNQ_STATUS_DFIFOE)) {
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &status);
+ }
+}
+
+static void xadc_zynq_update_intmsk(struct xadc *xadc, unsigned int mask,
+ unsigned int val)
+{
+ xadc->zynq_intmask &= ~mask;
+ xadc->zynq_intmask |= val;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK,
+ xadc->zynq_intmask | xadc->zynq_masked_alarm);
+}
+
+static int xadc_zynq_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ uint32_t cmd[1];
+ uint32_t tmp;
+ int ret;
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+
+ reinit_completion(&xadc->completion);
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_WRITE, reg, val);
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 0 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ else
+ ret = 0;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &tmp);
+
+ return ret;
+}
+
+static int xadc_zynq_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t cmd[2];
+ uint32_t resp, tmp;
+ int ret;
+
+ cmd[0] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_READ, reg, 0);
+ cmd[1] = XADC_ZYNQ_CMD(XADC_ZYNQ_CMD_NOP, 0, 0);
+
+ spin_lock_irq(&xadc->lock);
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ xadc_zynq_drain_fifo(xadc);
+ reinit_completion(&xadc->completion);
+
+ xadc_zynq_write_fifo(xadc, cmd, ARRAY_SIZE(cmd));
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &tmp);
+ tmp &= ~XADC_ZYNQ_CFG_DFIFOTH_MASK;
+ tmp |= 1 << XADC_ZYNQ_CFG_DFIFOTH_OFFSET;
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, tmp);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH, 0);
+ spin_unlock_irq(&xadc->lock);
+ ret = wait_for_completion_interruptible_timeout(&xadc->completion, HZ);
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ return ret;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_DFIFO, &resp);
+
+ *val = resp & 0xffff;
+
+ return 0;
+}
+
+static unsigned int xadc_zynq_transform_alarm(unsigned int alarm)
+{
+ return ((alarm & 0x80) >> 4) |
+ ((alarm & 0x78) << 1) |
+ (alarm & 0x07);
+}
+
+/*
+ * The ZYNQ threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void xadc_zynq_unmask_worker(struct work_struct *work)
+{
+ struct xadc *xadc = container_of(work, struct xadc, zynq_unmask_work.work);
+ unsigned int misc_sts, unmask;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_STATUS, &misc_sts);
+
+ misc_sts &= XADC_ZYNQ_INT_ALARM_MASK;
+
+ spin_lock_irq(&xadc->lock);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (xadc->zynq_masked_alarm ^ misc_sts) & xadc->zynq_masked_alarm;
+ xadc->zynq_masked_alarm &= misc_sts;
+
+ /* Also clear those which are masked out anyway */
+ xadc->zynq_masked_alarm &= ~xadc->zynq_intmask;
+
+ /* Clear the interrupts before we unmask them */
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, unmask);
+
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+
+ spin_unlock_irq(&xadc->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (xadc->zynq_masked_alarm) {
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+ }
+}
+
+static irqreturn_t xadc_zynq_threaded_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ spin_lock_irq(&xadc->lock);
+ alarm = xadc->zynq_alarm;
+ xadc->zynq_alarm = 0;
+ spin_unlock_irq(&xadc->lock);
+
+ xadc_handle_events(indio_dev, xadc_zynq_transform_alarm(alarm));
+
+ /* unmask the required interrupts in timer. */
+ schedule_delayed_work(&xadc->zynq_unmask_work,
+ msecs_to_jiffies(XADC_ZYNQ_UNMASK_TIMEOUT));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xadc_zynq_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ irqreturn_t ret = IRQ_HANDLED;
+ uint32_t status;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+
+ status &= ~(xadc->zynq_intmask | xadc->zynq_masked_alarm);
+
+ if (!status)
+ return IRQ_NONE;
+
+ spin_lock(&xadc->lock);
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status);
+
+ if (status & XADC_ZYNQ_INT_DFIFO_GTH) {
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_DFIFO_GTH,
+ XADC_ZYNQ_INT_DFIFO_GTH);
+ complete(&xadc->completion);
+ }
+
+ status &= XADC_ZYNQ_INT_ALARM_MASK;
+ if (status) {
+ xadc->zynq_alarm |= status;
+ xadc->zynq_masked_alarm |= status;
+ /*
+ * mask the current event interrupt,
+ * unmask it when the interrupt is no more active.
+ */
+ xadc_zynq_update_intmsk(xadc, 0, 0);
+ ret = IRQ_WAKE_THREAD;
+ }
+ spin_unlock(&xadc->lock);
+
+ return ret;
+}
+
+#define XADC_ZYNQ_TCK_RATE_MAX 50000000
+#define XADC_ZYNQ_IGAP_DEFAULT 20
+
+static int xadc_zynq_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long pcap_rate;
+ unsigned int tck_div;
+ unsigned int div;
+ unsigned int igap;
+ unsigned int tck_rate;
+
+ /* TODO: Figure out how to make igap and tck_rate configurable */
+ igap = XADC_ZYNQ_IGAP_DEFAULT;
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+
+ xadc->zynq_intmask = ~0;
+
+ pcap_rate = clk_get_rate(xadc->clk);
+
+ if (tck_rate > XADC_ZYNQ_TCK_RATE_MAX)
+ tck_rate = XADC_ZYNQ_TCK_RATE_MAX;
+ if (tck_rate > pcap_rate / 2) {
+ div = 2;
+ } else {
+ div = pcap_rate / tck_rate;
+ if (pcap_rate / div > XADC_ZYNQ_TCK_RATE_MAX)
+ div++;
+ }
+
+ if (div <= 3)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV2;
+ else if (div <= 7)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV4;
+ else if (div <= 15)
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV8;
+ else
+ tck_div = XADC_ZYNQ_CFG_TCKRATE_DIV16;
+
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, XADC_ZYNQ_CTL_RESET);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CTL, 0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, ~0);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTMSK, xadc->zynq_intmask);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_CFG, XADC_ZYNQ_CFG_ENABLE |
+ XADC_ZYNQ_CFG_REDGE | XADC_ZYNQ_CFG_WEDGE |
+ tck_div | XADC_ZYNQ_CFG_IGAP(igap));
+
+ return 0;
+}
+
+static unsigned long xadc_zynq_get_dclk_rate(struct xadc *xadc)
+{
+ unsigned int div;
+ uint32_t val;
+
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_CFG, &val);
+
+ switch (val & XADC_ZYNQ_CFG_TCKRATE_MASK) {
+ case XADC_ZYNQ_CFG_TCKRATE_DIV4:
+ div = 4;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV8:
+ div = 8;
+ break;
+ case XADC_ZYNQ_CFG_TCKRATE_DIV16:
+ div = 16;
+ break;
+ default:
+ div = 2;
+ break;
+ }
+
+ return clk_get_rate(xadc->clk) / div;
+}
+
+static void xadc_zynq_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ unsigned long flags;
+ uint32_t status;
+
+ /* Move OT to bit 7 */
+ alarm = ((alarm & 0x08) << 4) | ((alarm & 0xf0) >> 1) | (alarm & 0x07);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+
+ /* Clear previous interrupts if any. */
+ xadc_read_reg(xadc, XADC_ZYNQ_REG_INTSTS, &status);
+ xadc_write_reg(xadc, XADC_ZYNQ_REG_INTSTS, status & alarm);
+
+ xadc_zynq_update_intmsk(xadc, XADC_ZYNQ_INT_ALARM_MASK,
+ ~alarm & XADC_ZYNQ_INT_ALARM_MASK);
+
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static const struct xadc_ops xadc_zynq_ops = {
+ .read = xadc_zynq_read_adc_reg,
+ .write = xadc_zynq_write_adc_reg,
+ .setup = xadc_zynq_setup,
+ .get_dclk_rate = xadc_zynq_get_dclk_rate,
+ .interrupt_handler = xadc_zynq_interrupt_handler,
+ .threaded_interrupt_handler = xadc_zynq_threaded_interrupt_handler,
+ .update_alarm = xadc_zynq_update_alarm,
+};
+
+static int xadc_axi_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_ADC_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
+static int xadc_axi_setup(struct platform_device *pdev,
+ struct iio_dev *indio_dev, int irq)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ xadc_write_reg(xadc, XADC_AXI_REG_RESET, XADC_AXI_RESET_MAGIC);
+ xadc_write_reg(xadc, XADC_AXI_REG_GIER, XADC_AXI_GIER_ENABLE);
+
+ return 0;
+}
+
+static irqreturn_t xadc_axi_interrupt_handler(int irq, void *devid)
+{
+ struct iio_dev *indio_dev = devid;
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint32_t status, mask;
+ unsigned int events;
+
+ xadc_read_reg(xadc, XADC_AXI_REG_IPISR, &status);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &mask);
+ status &= mask;
+
+ if (!status)
+ return IRQ_NONE;
+
+ if ((status & XADC_AXI_INT_EOS) && xadc->trigger)
+ iio_trigger_poll(xadc->trigger);
+
+ if (status & XADC_AXI_INT_ALARM_MASK) {
+ /*
+ * The order of the bits in the AXI-XADC status register does
+ * not match the order of the bits in the XADC alarm enable
+ * register. xadc_handle_events() expects the events to be in
+ * the same order as the XADC alarm enable register.
+ */
+ events = (status & 0x000e) >> 1;
+ events |= (status & 0x0001) << 3;
+ events |= (status & 0x3c00) >> 6;
+ xadc_handle_events(indio_dev, events);
+ }
+
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xadc_axi_update_alarm(struct xadc *xadc, unsigned int alarm)
+{
+ uint32_t val;
+ unsigned long flags;
+
+ /*
+ * The order of the bits in the AXI-XADC status register does not match
+ * the order of the bits in the XADC alarm enable register. We get
+ * passed the alarm mask in the same order as in the XADC alarm enable
+ * register.
+ */
+ alarm = ((alarm & 0x07) << 1) | ((alarm & 0x08) >> 3) |
+ ((alarm & 0xf0) << 6);
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ val &= ~XADC_AXI_INT_ALARM_MASK;
+ val |= alarm;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+}
+
+static unsigned long xadc_axi_get_dclk(struct xadc *xadc)
+{
+ return clk_get_rate(xadc->clk);
+}
+
+static const struct xadc_ops xadc_axi_ops = {
+ .read = xadc_axi_read_adc_reg,
+ .write = xadc_axi_write_adc_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
+static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ uint16_t tmp;
+ int ret;
+
+ ret = _xadc_read_adc_reg(xadc, reg, &tmp);
+ if (ret)
+ return ret;
+
+ return _xadc_write_adc_reg(xadc, reg, (tmp & ~mask) | val);
+}
+
+static int xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t mask, uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_update_adc_reg(xadc, reg, mask, val);
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static unsigned long xadc_get_dclk_rate(struct xadc *xadc)
+{
+ return xadc->ops->get_dclk_rate(xadc);
+}
+
+static int xadc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int n;
+
+ n = bitmap_weight(mask, indio_dev->masklength);
+
+ kfree(xadc->data);
+ xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
+ if (!xadc->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static unsigned int xadc_scan_index_to_channel(unsigned int scan_index)
+{
+ switch (scan_index) {
+ case 5:
+ return XADC_REG_VCCPINT;
+ case 6:
+ return XADC_REG_VCCPAUX;
+ case 7:
+ return XADC_REG_VCCO_DDR;
+ case 8:
+ return XADC_REG_TEMP;
+ case 9:
+ return XADC_REG_VCCINT;
+ case 10:
+ return XADC_REG_VCCAUX;
+ case 11:
+ return XADC_REG_VPVN;
+ case 12:
+ return XADC_REG_VREFP;
+ case 13:
+ return XADC_REG_VREFN;
+ case 14:
+ return XADC_REG_VCCBRAM;
+ default:
+ return XADC_REG_VAUX(scan_index - 16);
+ }
+}
+
+static irqreturn_t xadc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int chan;
+ int i, j;
+
+ if (!xadc->data)
+ goto out;
+
+ j = 0;
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ chan = xadc_scan_index_to_channel(i);
+ xadc_read_adc_reg(xadc, chan, &xadc->data[j]);
+ j++;
+ }
+
+ iio_push_to_buffers(indio_dev, xadc->data);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
+{
+ struct xadc *xadc = iio_trigger_get_drvdata(trigger);
+ unsigned long flags;
+ unsigned int convst;
+ unsigned int val;
+ int ret = 0;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state) {
+ /* Only one of the two triggers can be active at the a time. */
+ if (xadc->trigger != NULL) {
+ ret = -EBUSY;
+ goto err_out;
+ } else {
+ xadc->trigger = trigger;
+ if (trigger == xadc->convst_trigger)
+ convst = XADC_CONF0_EC;
+ else
+ convst = 0;
+ }
+ ret = _xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF0_EC,
+ convst);
+ if (ret)
+ goto err_out;
+ } else {
+ xadc->trigger = NULL;
+ }
+
+ spin_lock_irqsave(&xadc->lock, flags);
+ xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
+ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
+ if (state)
+ val |= XADC_AXI_INT_EOS;
+ else
+ val &= ~XADC_AXI_INT_EOS;
+ xadc_write_reg(xadc, XADC_AXI_REG_IPIER, val);
+ spin_unlock_irqrestore(&xadc->lock, flags);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops xadc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &xadc_trigger_set_state,
+};
+
+static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
+ const char *name)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
+ indio_dev->id, name);
+ if (trig == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ trig->dev.parent = indio_dev->dev.parent;
+ trig->ops = &xadc_trigger_ops;
+ iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_free_trig;
+
+ return trig;
+
+error_free_trig:
+ iio_trigger_free(trig);
+ return ERR_PTR(ret);
+}
+
+static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
+{
+ uint16_t val;
+
+ switch (seq_mode) {
+ case XADC_CONF1_SEQ_SIMULTANEOUS:
+ case XADC_CONF1_SEQ_INDEPENDENT:
+ val = XADC_CONF2_PD_ADC_B;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_PD_MASK,
+ val);
+}
+
+static int xadc_get_seq_mode(struct xadc *xadc, unsigned long scan_mode)
+{
+ unsigned int aux_scan_mode = scan_mode >> 16;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_DUAL)
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+
+ if ((aux_scan_mode & 0xff00) == 0 ||
+ (aux_scan_mode & 0x00ff) == 0)
+ return XADC_CONF1_SEQ_CONTINUOUS;
+
+ return XADC_CONF1_SEQ_SIMULTANEOUS;
+}
+
+static int xadc_postdisable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int ret;
+ int i;
+
+ scan_mask = 1; /* Run calibration as part of the sequence */
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ /* Enable all channels and calibration */
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ return ret;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ return ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ return xadc_power_adc_b(xadc, XADC_CONF1_SEQ_CONTINUOUS);
+}
+
+static int xadc_preenable(struct iio_dev *indio_dev)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long scan_mask;
+ int seq_mode;
+ int ret;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ XADC_CONF1_SEQ_DEFAULT);
+ if (ret)
+ goto err;
+
+ scan_mask = *indio_dev->active_scan_mask;
+ seq_mode = xadc_get_seq_mode(xadc, scan_mask);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(0), scan_mask & 0xffff);
+ if (ret)
+ goto err;
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
+ if (ret)
+ goto err;
+
+ ret = xadc_power_adc_b(xadc, seq_mode);
+ if (ret)
+ goto err;
+
+ ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_SEQ_MASK,
+ seq_mode);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ xadc_postdisable(indio_dev);
+ return ret;
+}
+
+static struct iio_buffer_setup_ops xadc_buffer_ops = {
+ .preenable = &xadc_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &xadc_postdisable,
+};
+
+static int xadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned int div;
+ uint16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ ret = xadc_read_adc_reg(xadc, chan->address, &val16);
+ if (ret < 0)
+ return ret;
+
+ val16 >>= 4;
+ if (chan->scan_type.sign == 'u')
+ *val = val16;
+ else
+ *val = sign_extend32(val16, 11);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* V = (val * 3.0) / 4096 */
+ switch (chan->address) {
+ case XADC_REG_VCCINT:
+ case XADC_REG_VCCAUX:
+ case XADC_REG_VCCBRAM:
+ case XADC_REG_VCCPINT:
+ case XADC_REG_VCCPAUX:
+ case XADC_REG_VCCO_DDR:
+ *val = 3000;
+ break;
+ default:
+ *val = 1000;
+ break;
+ }
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* Temp in C = (val * 503.975) / 4096 - 273.15 */
+ *val = 503975;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = -((273150 << 12) / 503975);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
+ if (ret)
+ return ret;
+
+ div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
+ if (div < 2)
+ div = 2;
+
+ *val = xadc_get_dclk_rate(xadc) / div / 26;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xadc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ unsigned long clk_rate = xadc_get_dclk_rate(xadc);
+ unsigned int div;
+
+ if (info != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ /* Max. 150 kSPS */
+ if (val > 150000)
+ val = 150000;
+
+ val *= 26;
+
+ /* Min 1MHz */
+ if (val < 1000000)
+ val = 1000000;
+
+ /*
+ * We want to round down, but only if we do not exceed the 150 kSPS
+ * limit.
+ */
+ div = clk_rate / val;
+ if (clk_rate / div / 26 > 150000)
+ div++;
+ if (div < 2)
+ div = 2;
+ else if (div > 0xff)
+ div = 0xff;
+
+ return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_DIV_MASK,
+ div << XADC_CONF2_DIV_OFFSET);
+}
+
+static const struct iio_event_spec xadc_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+/* Separate values for upper and lower thresholds, but only a shared enabled */
+static const struct iio_event_spec xadc_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define XADC_CHAN_TEMP(_chan, _scan_index, _addr) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = xadc_temp_events, \
+ .num_event_specs = ARRAY_SIZE(xadc_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define XADC_CHAN_VOLTAGE(_chan, _scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? xadc_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+static const struct iio_chan_spec xadc_channels[] = {
+ XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
+ XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
+ XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
+ XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
+ XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
+ XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
+ XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
+ XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
+ XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+};
+
+static const struct iio_info xadc_info = {
+ .read_raw = &xadc_read_raw,
+ .write_raw = &xadc_write_raw,
+ .read_event_config = &xadc_read_event_config,
+ .write_event_config = &xadc_write_event_config,
+ .read_event_value = &xadc_read_event_value,
+ .write_event_value = &xadc_write_event_value,
+ .update_scan_mode = &xadc_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct of_device_id xadc_of_match_table[] = {
+ { .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
+ { .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, xadc_of_match_table);
+
+static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
+ unsigned int *conf)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+ struct iio_chan_spec *channels, *chan;
+ struct device_node *chan_node, *child;
+ unsigned int num_channels;
+ const char *external_mux;
+ u32 ext_mux_chan;
+ int reg;
+ int ret;
+
+ *conf = 0;
+
+ ret = of_property_read_string(np, "xlnx,external-mux", &external_mux);
+ if (ret < 0 || strcasecmp(external_mux, "none") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_NONE;
+ else if (strcasecmp(external_mux, "single") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_SINGLE;
+ else if (strcasecmp(external_mux, "dual") == 0)
+ xadc->external_mux_mode = XADC_EXTERNAL_MUX_DUAL;
+ else
+ return -EINVAL;
+
+ if (xadc->external_mux_mode != XADC_EXTERNAL_MUX_NONE) {
+ ret = of_property_read_u32(np, "xlnx,external-mux-channel",
+ &ext_mux_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xadc->external_mux_mode == XADC_EXTERNAL_MUX_SINGLE) {
+ if (ext_mux_chan == 0)
+ ext_mux_chan = XADC_REG_VPVN;
+ else if (ext_mux_chan <= 16)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ } else {
+ if (ext_mux_chan > 0 && ext_mux_chan <= 8)
+ ext_mux_chan = XADC_REG_VAUX(ext_mux_chan - 1);
+ else
+ return -EINVAL;
+ }
+
+ *conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
+ }
+
+ channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ num_channels = 9;
+ chan = &channels[9];
+
+ chan_node = of_get_child_by_name(np, "xlnx,channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ if (num_channels >= ARRAY_SIZE(xadc_channels)) {
+ of_node_put(child);
+ break;
+ }
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > 16)
+ continue;
+
+ if (of_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
+
+ if (reg == 0) {
+ chan->scan_index = 11;
+ chan->address = XADC_REG_VPVN;
+ } else {
+ chan->scan_index = 15 + reg;
+ chan->address = XADC_REG_VAUX(reg - 1);
+ }
+ num_channels++;
+ chan++;
+ }
+ }
+ of_node_put(chan_node);
+
+ indio_dev->num_channels = num_channels;
+ indio_dev->channels = krealloc(channels, sizeof(*channels) *
+ num_channels, GFP_KERNEL);
+ /* If we can't resize the channels array, just use the original */
+ if (!indio_dev->channels)
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int xadc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct iio_dev *indio_dev;
+ unsigned int bipolar_mask;
+ struct resource *mem;
+ unsigned int conf0;
+ struct xadc *xadc;
+ int ret;
+ int irq;
+ int i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ xadc = iio_priv(indio_dev);
+ xadc->ops = id->data;
+ init_completion(&xadc->completion);
+ mutex_init(&xadc->mutex);
+ spin_lock_init(&xadc->lock);
+ INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xadc->base))
+ return PTR_ERR(xadc->base);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "xadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &xadc_info;
+
+ ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
+ if (ret)
+ goto err_device_free;
+
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ ret = iio_triggered_buffer_setup(indio_dev,
+ &iio_pollfunc_store_time, &xadc_trigger_handler,
+ &xadc_buffer_ops);
+ if (ret)
+ goto err_device_free;
+
+ xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
+ if (IS_ERR(xadc->convst_trigger)) {
+ ret = PTR_ERR(xadc->convst_trigger);
+ goto err_triggered_buffer_cleanup;
+ }
+ xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
+ "samplerate");
+ if (IS_ERR(xadc->samplerate_trigger)) {
+ ret = PTR_ERR(xadc->samplerate_trigger);
+ goto err_free_convst_trigger;
+ }
+ }
+
+ xadc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xadc->clk)) {
+ ret = PTR_ERR(xadc->clk);
+ goto err_free_samplerate_trigger;
+ }
+ clk_prepare_enable(xadc->clk);
+
+ ret = xadc->ops->setup(pdev, indio_dev, irq);
+ if (ret)
+ goto err_free_samplerate_trigger;
+
+ ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+ xadc->ops->threaded_interrupt_handler,
+ 0, dev_name(&pdev->dev), indio_dev);
+ if (ret)
+ goto err_clk_disable_unprepare;
+
+ for (i = 0; i < 16; i++)
+ xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ &xadc->threshold[i]);
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
+ if (ret)
+ goto err_free_irq;
+
+ bipolar_mask = 0;
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ if (indio_dev->channels[i].scan_type.sign == 's')
+ bipolar_mask |= BIT(indio_dev->channels[i].scan_index);
+ }
+
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
+ if (ret)
+ goto err_free_irq;
+ ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
+ bipolar_mask >> 16);
+ if (ret)
+ goto err_free_irq;
+
+ /* Disable all alarms */
+ xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
+ XADC_CONF1_ALARM_MASK);
+
+ /* Set thresholds to min/max */
+ for (i = 0; i < 16; i++) {
+ /*
+ * Set max voltage threshold and both temperature thresholds to
+ * 0xffff, min voltage threshold to 0.
+ */
+ if (i % 8 < 4 || i == 7)
+ xadc->threshold[i] = 0xffff;
+ else
+ xadc->threshold[i] = 0;
+ xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
+ xadc->threshold[i]);
+ }
+
+ /* Go to non-buffered mode */
+ xadc_postdisable(indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, indio_dev);
+err_free_samplerate_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->samplerate_trigger);
+err_free_convst_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->convst_trigger);
+err_triggered_buffer_cleanup:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
+err_device_free:
+ kfree(indio_dev->channels);
+
+ return ret;
+}
+
+static int xadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int irq = platform_get_irq(pdev, 0);
+
+ iio_device_unregister(indio_dev);
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
+ iio_trigger_free(xadc->samplerate_trigger);
+ iio_trigger_free(xadc->convst_trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
+ }
+ free_irq(irq, indio_dev);
+ clk_disable_unprepare(xadc->clk);
+ cancel_delayed_work(&xadc->zynq_unmask_work);
+ kfree(xadc->data);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_driver xadc_driver = {
+ .probe = xadc_probe,
+ .remove = xadc_remove,
+ .driver = {
+ .name = "xadc",
+ .of_match_table = xadc_of_match_table,
+ },
+};
+module_platform_driver(xadc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Xilinx XADC IIO driver");
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
new file mode 100644
index 00000000000000..edcf3aabd70d90
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -0,0 +1,248 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+
+#include "xilinx-xadc.h"
+
+static const struct iio_chan_spec *xadc_event_to_channel(
+ struct iio_dev *indio_dev, unsigned int event)
+{
+ switch (event) {
+ case XADC_THRESHOLD_OT_MAX:
+ case XADC_THRESHOLD_TEMP_MAX:
+ return &indio_dev->channels[0];
+ case XADC_THRESHOLD_VCCINT_MAX:
+ case XADC_THRESHOLD_VCCAUX_MAX:
+ return &indio_dev->channels[event];
+ default:
+ return &indio_dev->channels[event-1];
+ }
+}
+
+static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
+{
+ const struct iio_chan_spec *chan;
+
+ /* Temperature threshold error, we don't handle this yet */
+ if (event == 0)
+ return;
+
+ chan = xadc_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * The temperature channel only supports over-temperature
+ * events.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ } else {
+ /*
+ * For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+ }
+}
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int i;
+
+ for_each_set_bit(i, &events, 8)
+ xadc_handle_event(indio_dev, i);
+}
+
+static unsigned xadc_get_threshold_offset(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir)
+{
+ unsigned int offset;
+
+ if (chan->type == IIO_TEMP) {
+ offset = XADC_THRESHOLD_OT_MAX;
+ } else {
+ if (chan->channel < 2)
+ offset = chan->channel + 1;
+ else
+ offset = chan->channel + 6;
+ }
+
+ if (dir == IIO_EV_DIR_FALLING)
+ offset += 4;
+
+ return offset;
+}
+
+static unsigned int xadc_get_alarm_mask(const struct iio_chan_spec *chan)
+{
+ if (chan->type == IIO_TEMP) {
+ return XADC_ALARM_OT_MASK;
+ } else {
+ switch (chan->channel) {
+ case 0:
+ return XADC_ALARM_VCCINT_MASK;
+ case 1:
+ return XADC_ALARM_VCCAUX_MASK;
+ case 2:
+ return XADC_ALARM_VCCBRAM_MASK;
+ case 3:
+ return XADC_ALARM_VCCPINT_MASK;
+ case 4:
+ return XADC_ALARM_VCCPAUX_MASK;
+ case 5:
+ return XADC_ALARM_VCCODDR_MASK;
+ default:
+ /* We will never get here */
+ return 0;
+ }
+ }
+}
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ return (bool)(xadc->alarm_mask & xadc_get_alarm_mask(chan));
+}
+
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int alarm = xadc_get_alarm_mask(chan);
+ struct xadc *xadc = iio_priv(indio_dev);
+ uint16_t cfg, old_cfg;
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+
+ if (state)
+ xadc->alarm_mask |= alarm;
+ else
+ xadc->alarm_mask &= ~alarm;
+
+ xadc->ops->update_alarm(xadc, xadc->alarm_mask);
+
+ ret = _xadc_read_adc_reg(xadc, XADC_REG_CONF1, &cfg);
+ if (ret)
+ goto err_out;
+
+ old_cfg = cfg;
+ cfg |= XADC_CONF1_ALARM_MASK;
+ cfg &= ~((xadc->alarm_mask & 0xf0) << 4); /* bram, pint, paux, ddr */
+ cfg &= ~((xadc->alarm_mask & 0x08) >> 3); /* ot */
+ cfg &= ~((xadc->alarm_mask & 0x07) << 1); /* temp, vccint, vccaux */
+ if (old_cfg != cfg)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_CONF1, cfg);
+
+err_out:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
+
+/* Register value is msb aligned, the lower 4 bits are ignored */
+#define XADC_THRESHOLD_VALUE_SHIFT 4
+
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = xadc->threshold[offset];
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ *val = xadc->temp_hysteresis;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val >>= XADC_THRESHOLD_VALUE_SHIFT;
+
+ return IIO_VAL_INT;
+}
+
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ unsigned int offset = xadc_get_threshold_offset(chan, dir);
+ struct xadc *xadc = iio_priv(indio_dev);
+ int ret = 0;
+
+ val <<= XADC_THRESHOLD_VALUE_SHIFT;
+
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&xadc->mutex);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ xadc->threshold[offset] = val;
+ break;
+ case IIO_EV_INFO_HYSTERESIS:
+ xadc->temp_hysteresis = val;
+ break;
+ default:
+ mutex_unlock(&xadc->mutex);
+ return -EINVAL;
+ }
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * According to the datasheet we need to set the lower 4 bits to
+ * 0x3, otherwise 125 degree celsius will be used as the
+ * threshold.
+ */
+ val |= 0x3;
+
+ /*
+ * Since we store the hysteresis as relative (to the threshold)
+ * value, but the hardware expects an absolute value we need to
+ * recalcualte this value whenever the hysteresis or the
+ * threshold changes.
+ */
+ if (xadc->threshold[offset] < xadc->temp_hysteresis)
+ xadc->threshold[offset + 4] = 0;
+ else
+ xadc->threshold[offset + 4] = xadc->threshold[offset] -
+ xadc->temp_hysteresis;
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset + 4),
+ xadc->threshold[offset + 4]);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (info == IIO_EV_INFO_VALUE)
+ ret = _xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(offset), val);
+
+out_unlock:
+ mutex_unlock(&xadc->mutex);
+
+ return ret;
+}
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
new file mode 100644
index 00000000000000..c7487e8d7f809f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -0,0 +1,209 @@
+/*
+ * Xilinx XADC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clauen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_XILINX_XADC__
+#define __IIO_XILINX_XADC__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct iio_dev;
+struct clk;
+struct xadc_ops;
+struct platform_device;
+
+void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events);
+
+int xadc_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir);
+int xadc_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state);
+int xadc_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2);
+int xadc_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2);
+
+enum xadc_external_mux_mode {
+ XADC_EXTERNAL_MUX_NONE,
+ XADC_EXTERNAL_MUX_SINGLE,
+ XADC_EXTERNAL_MUX_DUAL,
+};
+
+struct xadc {
+ void __iomem *base;
+ struct clk *clk;
+
+ const struct xadc_ops *ops;
+
+ uint16_t threshold[16];
+ uint16_t temp_hysteresis;
+ unsigned int alarm_mask;
+
+ uint16_t *data;
+
+ struct iio_trigger *trigger;
+ struct iio_trigger *convst_trigger;
+ struct iio_trigger *samplerate_trigger;
+
+ enum xadc_external_mux_mode external_mux_mode;
+
+ unsigned int zynq_alarm;
+ unsigned int zynq_masked_alarm;
+ unsigned int zynq_intmask;
+ struct delayed_work zynq_unmask_work;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ struct completion completion;
+};
+
+struct xadc_ops {
+ int (*read)(struct xadc *, unsigned int, uint16_t *);
+ int (*write)(struct xadc *, unsigned int, uint16_t);
+ int (*setup)(struct platform_device *pdev, struct iio_dev *indio_dev,
+ int irq);
+ void (*update_alarm)(struct xadc *, unsigned int);
+ unsigned long (*get_dclk_rate)(struct xadc *);
+ irqreturn_t (*interrupt_handler)(int, void *);
+ irqreturn_t (*threaded_interrupt_handler)(int, void *);
+
+ unsigned int flags;
+};
+
+static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->read(xadc, reg, val);
+}
+
+static inline int _xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ lockdep_assert_held(&xadc->mutex);
+ return xadc->ops->write(xadc, reg, val);
+}
+
+static inline int xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_read_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ int ret;
+
+ mutex_lock(&xadc->mutex);
+ ret = _xadc_write_adc_reg(xadc, reg, val);
+ mutex_unlock(&xadc->mutex);
+ return ret;
+}
+
+/* XADC hardmacro register definitions */
+#define XADC_REG_TEMP 0x00
+#define XADC_REG_VCCINT 0x01
+#define XADC_REG_VCCAUX 0x02
+#define XADC_REG_VPVN 0x03
+#define XADC_REG_VREFP 0x04
+#define XADC_REG_VREFN 0x05
+#define XADC_REG_VCCBRAM 0x06
+
+#define XADC_REG_VCCPINT 0x0d
+#define XADC_REG_VCCPAUX 0x0e
+#define XADC_REG_VCCO_DDR 0x0f
+#define XADC_REG_VAUX(x) (0x10 + (x))
+
+#define XADC_REG_MAX_TEMP 0x20
+#define XADC_REG_MAX_VCCINT 0x21
+#define XADC_REG_MAX_VCCAUX 0x22
+#define XADC_REG_MAX_VCCBRAM 0x23
+#define XADC_REG_MIN_TEMP 0x24
+#define XADC_REG_MIN_VCCINT 0x25
+#define XADC_REG_MIN_VCCAUX 0x26
+#define XADC_REG_MIN_VCCBRAM 0x27
+#define XADC_REG_MAX_VCCPINT 0x28
+#define XADC_REG_MAX_VCCPAUX 0x29
+#define XADC_REG_MAX_VCCO_DDR 0x2a
+#define XADC_REG_MIN_VCCPINT 0x2b
+#define XADC_REG_MIN_VCCPAUX 0x2c
+#define XADC_REG_MIN_VCCO_DDR 0x2d
+
+#define XADC_REG_CONF0 0x40
+#define XADC_REG_CONF1 0x41
+#define XADC_REG_CONF2 0x42
+#define XADC_REG_SEQ(x) (0x48 + (x))
+#define XADC_REG_INPUT_MODE(x) (0x4c + (x))
+#define XADC_REG_THRESHOLD(x) (0x50 + (x))
+
+#define XADC_REG_FLAG 0x3f
+
+#define XADC_CONF0_EC BIT(9)
+#define XADC_CONF0_ACQ BIT(8)
+#define XADC_CONF0_MUX BIT(11)
+#define XADC_CONF0_CHAN(x) (x)
+
+#define XADC_CONF1_SEQ_MASK (0xf << 12)
+#define XADC_CONF1_SEQ_DEFAULT (0 << 12)
+#define XADC_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define XADC_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define XADC_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+#define XADC_CONF1_SEQ_SIMULTANEOUS (4 << 12)
+#define XADC_CONF1_SEQ_INDEPENDENT (8 << 12)
+#define XADC_CONF1_ALARM_MASK 0x0f0f
+
+#define XADC_CONF2_DIV_MASK 0xff00
+#define XADC_CONF2_DIV_OFFSET 8
+
+#define XADC_CONF2_PD_MASK (0x3 << 4)
+#define XADC_CONF2_PD_NONE (0x0 << 4)
+#define XADC_CONF2_PD_ADC_B (0x2 << 4)
+#define XADC_CONF2_PD_BOTH (0x3 << 4)
+
+#define XADC_ALARM_TEMP_MASK BIT(0)
+#define XADC_ALARM_VCCINT_MASK BIT(1)
+#define XADC_ALARM_VCCAUX_MASK BIT(2)
+#define XADC_ALARM_OT_MASK BIT(3)
+#define XADC_ALARM_VCCBRAM_MASK BIT(4)
+#define XADC_ALARM_VCCPINT_MASK BIT(5)
+#define XADC_ALARM_VCCPAUX_MASK BIT(6)
+#define XADC_ALARM_VCCODDR_MASK BIT(7)
+
+#define XADC_THRESHOLD_TEMP_MAX 0x0
+#define XADC_THRESHOLD_VCCINT_MAX 0x1
+#define XADC_THRESHOLD_VCCAUX_MAX 0x2
+#define XADC_THRESHOLD_OT_MAX 0x3
+#define XADC_THRESHOLD_TEMP_MIN 0x4
+#define XADC_THRESHOLD_VCCINT_MIN 0x5
+#define XADC_THRESHOLD_VCCAUX_MIN 0x6
+#define XADC_THRESHOLD_OT_MIN 0x7
+#define XADC_THRESHOLD_VCCBRAM_MAX 0x8
+#define XADC_THRESHOLD_VCCPINT_MAX 0x9
+#define XADC_THRESHOLD_VCCPAUX_MAX 0xa
+#define XADC_THRESHOLD_VCCODDR_MAX 0xb
+#define XADC_THRESHOLD_VCCBRAM_MIN 0xc
+#define XADC_THRESHOLD_VCCPINT_MIN 0xd
+#define XADC_THRESHOLD_VCCPAUX_MIN 0xe
+#define XADC_THRESHOLD_VCCODDR_MIN 0xf
+
+#endif
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
new file mode 100644
index 00000000000000..e9c5f2cd925712
--- /dev/null
+++ b/drivers/iio/amplifiers/Kconfig
@@ -0,0 +1,19 @@
+#
+# Gain Amplifiers, etc.
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Amplifiers"
+
+config AD8366
+ tristate "Analog Devices AD8366 VGA"
+ depends on SPI
+ select BITREVERSE
+ help
+ Say yes here to build support for Analog Devices AD8366
+ SPI Dual-Digital Variable Gain Amplifier (VGA).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad8366.
+
+endmenu
diff --git a/drivers/iio/amplifiers/Makefile b/drivers/iio/amplifiers/Makefile
new file mode 100644
index 00000000000000..8da4b787898a9a
--- /dev/null
+++ b/drivers/iio/amplifiers/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile iio/amplifiers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD8366) += ad8366.o
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
new file mode 100644
index 00000000000000..ba6f6a91dfffc6
--- /dev/null
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -0,0 +1,213 @@
+/*
+ * AD8366 SPI Dual-Digital Variable Gain Amplifier (VGA)
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/bitrev.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+struct ad8366_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned char ch[2];
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ unsigned char data[2] ____cacheline_aligned;
+};
+
+static int ad8366_write(struct iio_dev *indio_dev,
+ unsigned char ch_a, char unsigned ch_b)
+{
+ struct ad8366_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ch_a = bitrev8(ch_a & 0x3F);
+ ch_b = bitrev8(ch_b & 0x3F);
+
+ st->data[0] = ch_b >> 4;
+ st->data[1] = (ch_b << 4) | (ch_a >> 2);
+
+ ret = spi_write(st->spi, st->data, ARRAY_SIZE(st->data));
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "write failed (%d)", ret);
+
+ return ret;
+}
+
+static int ad8366_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad8366_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned code;
+
+ mutex_lock(&indio_dev->mlock);
+ switch (m) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ code = st->ch[chan->channel];
+
+ /* Values in dB */
+ code = code * 253 + 4500;
+ *val = code / 1000;
+ *val2 = (code % 1000) * 1000;
+
+ ret = IIO_VAL_INT_PLUS_MICRO_DB;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+};
+
+static int ad8366_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad8366_state *st = iio_priv(indio_dev);
+ unsigned code;
+ int ret;
+
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
+ /* Values in dB */
+ code = (((u8)val * 1000) + ((u32)val2 / 1000));
+
+ if (code > 20500 || code < 4500)
+ return -EINVAL;
+
+ code = (code - 4500) / 253;
+
+ mutex_lock(&indio_dev->mlock);
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ st->ch[chan->channel] = code;
+ ret = ad8366_write(indio_dev, st->ch[0], st->ch[1]);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static const struct iio_info ad8366_info = {
+ .read_raw = &ad8366_read_raw,
+ .write_raw = &ad8366_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD8366_CHAN(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN),\
+}
+
+static const struct iio_chan_spec ad8366_channels[] = {
+ AD8366_CHAN(0),
+ AD8366_CHAN(1),
+};
+
+static int ad8366_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad8366_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad8366_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad8366_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad8366_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ ad8366_write(indio_dev, 0 , 0);
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad8366_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad8366_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ iio_device_unregister(indio_dev);
+
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad8366_id[] = {
+ {"ad8366", 0},
+ {}
+};
+
+static struct spi_driver ad8366_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad8366_probe,
+ .remove = ad8366_remove,
+ .id_table = ad8366_id,
+};
+
+module_spi_driver(ad8366_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD8366 VGA");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
new file mode 100644
index 00000000000000..eb46e728aa2eff
--- /dev/null
+++ b/drivers/iio/buffer_cb.c
@@ -0,0 +1,124 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+
+struct iio_cb_buffer {
+ struct iio_buffer buffer;
+ int (*cb)(const void *data, void *private);
+ void *private;
+ struct iio_channel *channels;
+};
+
+static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct iio_cb_buffer, buffer);
+}
+
+static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
+{
+ struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
+ return cb_buff->cb(data, cb_buff->private);
+}
+
+static void iio_buffer_cb_release(struct iio_buffer *buffer)
+{
+ struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
+ kfree(cb_buff->buffer.scan_mask);
+ kfree(cb_buff);
+}
+
+static const struct iio_buffer_access_funcs iio_cb_access = {
+ .store_to = &iio_buffer_cb_store_to,
+ .release = &iio_buffer_cb_release,
+};
+
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
+ int (*cb)(const void *data,
+ void *private),
+ void *private)
+{
+ int ret;
+ struct iio_cb_buffer *cb_buff;
+ struct iio_dev *indio_dev;
+ struct iio_channel *chan;
+
+ cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
+ if (cb_buff == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ iio_buffer_init(&cb_buff->buffer);
+
+ cb_buff->private = private;
+ cb_buff->cb = cb;
+ cb_buff->buffer.access = &iio_cb_access;
+ INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
+
+ cb_buff->channels = iio_channel_get_all(dev);
+ if (IS_ERR(cb_buff->channels)) {
+ ret = PTR_ERR(cb_buff->channels);
+ goto error_free_cb_buff;
+ }
+
+ indio_dev = cb_buff->channels[0].indio_dev;
+ cb_buff->buffer.scan_mask
+ = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
+ GFP_KERNEL);
+ if (cb_buff->buffer.scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+ chan = &cb_buff->channels[0];
+ while (chan->indio_dev) {
+ if (chan->indio_dev != indio_dev) {
+ ret = -EINVAL;
+ goto error_free_scan_mask;
+ }
+ set_bit(chan->channel->scan_index,
+ cb_buff->buffer.scan_mask);
+ chan++;
+ }
+
+ return cb_buff;
+
+error_free_scan_mask:
+ kfree(cb_buff->buffer.scan_mask);
+error_release_channels:
+ iio_channel_release_all(cb_buff->channels);
+error_free_cb_buff:
+ kfree(cb_buff);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+
+int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ return iio_update_buffers(cb_buff->channels[0].indio_dev,
+ &cb_buff->buffer,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
+
+void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ iio_update_buffers(cb_buff->channels[0].indio_dev,
+ NULL,
+ &cb_buff->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
+
+void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ iio_channel_release_all(cb_buff->channels);
+ iio_buffer_put(&cb_buff->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
+
+struct iio_channel
+*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
+{
+ return cb_buffer->channels;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
new file mode 100644
index 00000000000000..0b6e97d18fa00a
--- /dev/null
+++ b/drivers/iio/common/Kconfig
@@ -0,0 +1,6 @@
+#
+# IIO common modules
+#
+
+source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/st_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
new file mode 100644
index 00000000000000..3112df0060e91b
--- /dev/null
+++ b/drivers/iio/common/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the IIO common modules.
+# Common modules contains modules, which can be shared among multiple
+# IIO modules. For example if the trigger processing is common for
+# multiple IIO modules then this can be moved to a common module
+# instead of duplicating in each module.
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-y += hid-sensors/
+obj-y += st_sensors/
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
new file mode 100644
index 00000000000000..39188b72cd3b28
--- /dev/null
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -0,0 +1,28 @@
+#
+# Hid Sensor common modules
+#
+menu "Hid Sensor IIO Common"
+
+config HID_SENSOR_IIO_COMMON
+ tristate "Common modules for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB
+ select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
+ help
+ Say yes here to build support for HID sensor to use
+ HID sensor common processing for attributes and IIO triggers.
+ There are many attributes which can be shared among multiple
+ HID sensor drivers, this module contains processing for those
+ attributes.
+
+config HID_SENSOR_IIO_TRIGGER
+ tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+ select IIO_TRIGGER
+ help
+ Say yes here to build trigger support for HID sensors.
+ Triggers will be send if all requested attributes were read.
+
+ If this driver is compiled as a module, it will be named
+ hid-sensor-trigger.
+
+endmenu
diff --git a/drivers/iio/common/hid-sensors/Makefile b/drivers/iio/common/hid-sensors/Makefile
new file mode 100644
index 00000000000000..22e7c5a82325e1
--- /dev/null
+++ b/drivers/iio/common/hid-sensors/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Hid sensor common modules.
+#
+
+obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
new file mode 100644
index 00000000000000..25b01e156d8264
--- /dev/null
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -0,0 +1,397 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+static struct {
+ u32 usage_id;
+ int unit; /* 0 for default others from HID sensor spec */
+ int scale_val0; /* scale, whole number */
+ int scale_val1; /* scale, fraction in micros */
+} unit_conversion[] = {
+ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+ HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+
+ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
+ {HID_USAGE_SENSOR_GYRO_3D,
+ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+
+ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
+
+ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
+
+ {HID_USAGE_SENSOR_ALS, 0, 1, 0},
+ {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
+
+ {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
+};
+
+static int pow_10(unsigned power)
+{
+ int i;
+ int ret = 1;
+ for (i = 0; i < power; ++i)
+ ret = ret * 10;
+
+ return ret;
+}
+
+static void simple_div(int dividend, int divisor, int *whole,
+ int *micro_frac)
+{
+ int rem;
+ int exp = 0;
+
+ *micro_frac = 0;
+ if (divisor == 0) {
+ *whole = 0;
+ return;
+ }
+ *whole = dividend/divisor;
+ rem = dividend % divisor;
+ if (rem) {
+ while (rem <= divisor) {
+ rem *= 10;
+ exp++;
+ }
+ *micro_frac = (rem / divisor) * pow_10(6-exp);
+ }
+}
+
+static void split_micro_fraction(unsigned int no, int exp, int *val1, int *val2)
+{
+ *val1 = no/pow_10(exp);
+ *val2 = no%pow_10(exp) * pow_10(6-exp);
+}
+
+/*
+VTF format uses exponent and variable size format.
+For example if the size is 2 bytes
+0x0067 with VTF16E14 format -> +1.03
+To convert just change to 0x67 to decimal and use two decimal as E14 stands
+for 10^-2.
+Negative numbers are 2's complement
+*/
+static void convert_from_vtf_format(u32 value, int size, int exp,
+ int *val1, int *val2)
+{
+ int sign = 1;
+
+ if (value & BIT(size*8 - 1)) {
+ value = ((1LL << (size * 8)) - value);
+ sign = -1;
+ }
+ exp = hid_sensor_convert_exponent(exp);
+ if (exp >= 0) {
+ *val1 = sign * value * pow_10(exp);
+ *val2 = 0;
+ } else {
+ split_micro_fraction(value, -exp, val1, val2);
+ if (*val1)
+ *val1 = sign * (*val1);
+ else
+ *val2 = sign * (*val2);
+ }
+}
+
+static u32 convert_to_vtf_format(int size, int exp, int val1, int val2)
+{
+ u32 value;
+ int sign = 1;
+
+ if (val1 < 0 || val2 < 0)
+ sign = -1;
+ exp = hid_sensor_convert_exponent(exp);
+ if (exp < 0) {
+ value = abs(val1) * pow_10(-exp);
+ value += abs(val2) / pow_10(6+exp);
+ } else
+ value = abs(val1) / pow_10(exp);
+ if (sign < 0)
+ value = ((1LL << (size * 8)) - value);
+
+ return value;
+}
+
+s32 hid_sensor_read_poll_value(struct hid_sensor_common *st)
+{
+ s32 value = 0;
+ int ret;
+
+ ret = sensor_hub_get_feature(st->hsdev,
+ st->poll.report_id,
+ st->poll.index, &value);
+
+ if (ret < 0 || value < 0) {
+ return -EINVAL;
+ } else {
+ if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND)
+ value = value * 1000;
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(hid_sensor_read_poll_value);
+
+int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
+ int *val1, int *val2)
+{
+ s32 value;
+ int ret;
+
+ ret = sensor_hub_get_feature(st->hsdev,
+ st->poll.report_id,
+ st->poll.index, &value);
+ if (ret < 0 || value < 0) {
+ *val1 = *val2 = 0;
+ return -EINVAL;
+ } else {
+ if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND)
+ simple_div(1000, value, val1, val2);
+ else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND)
+ simple_div(1, value, val1, val2);
+ else {
+ *val1 = *val2 = 0;
+ return -EINVAL;
+ }
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+EXPORT_SYMBOL(hid_sensor_read_samp_freq_value);
+
+int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
+ int val1, int val2)
+{
+ s32 value;
+ int ret;
+
+ if (val1 < 0 || val2 < 0)
+ ret = -EINVAL;
+
+ value = val1 * pow_10(6) + val2;
+ if (value) {
+ if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND)
+ value = pow_10(9)/value;
+ else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND)
+ value = pow_10(6)/value;
+ else
+ value = 0;
+ }
+ ret = sensor_hub_set_feature(st->hsdev,
+ st->poll.report_id,
+ st->poll.index, value);
+ if (ret < 0 || value < 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
+
+int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
+ int *val1, int *val2)
+{
+ s32 value;
+ int ret;
+
+ ret = sensor_hub_get_feature(st->hsdev,
+ st->sensitivity.report_id,
+ st->sensitivity.index, &value);
+ if (ret < 0 || value < 0) {
+ *val1 = *val2 = 0;
+ return -EINVAL;
+ } else {
+ convert_from_vtf_format(value, st->sensitivity.size,
+ st->sensitivity.unit_expo,
+ val1, val2);
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value);
+
+int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
+ int val1, int val2)
+{
+ s32 value;
+ int ret;
+
+ value = convert_to_vtf_format(st->sensitivity.size,
+ st->sensitivity.unit_expo,
+ val1, val2);
+ ret = sensor_hub_set_feature(st->hsdev,
+ st->sensitivity.report_id,
+ st->sensitivity.index, value);
+ if (ret < 0 || value < 0)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
+
+/*
+ * This fuction applies the unit exponent to the scale.
+ * For example:
+ * 9.806650 ->exp:2-> val0[980]val1[665000]
+ * 9.000806 ->exp:2-> val0[900]val1[80600]
+ * 0.174535 ->exp:2-> val0[17]val1[453500]
+ * 1.001745 ->exp:0-> val0[1]val1[1745]
+ * 1.001745 ->exp:2-> val0[100]val1[174500]
+ * 1.001745 ->exp:4-> val0[10017]val1[450000]
+ * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ */
+static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+ int scale1, int exp)
+{
+ int i;
+ int x;
+ int res;
+ int rem;
+
+ if (exp > 0) {
+ *val0 = scale0 * pow_10(exp);
+ res = 0;
+ if (exp > 6) {
+ *val1 = 0;
+ return;
+ }
+ for (i = 0; i < exp; ++i) {
+ x = scale1 / pow_10(5 - i);
+ res += (pow_10(exp - 1 - i) * x);
+ scale1 = scale1 % pow_10(5 - i);
+ }
+ *val0 += res;
+ *val1 = scale1 * pow_10(exp);
+ } else if (exp < 0) {
+ exp = abs(exp);
+ if (exp > 6) {
+ *val0 = *val1 = 0;
+ return;
+ }
+ *val0 = scale0 / pow_10(exp);
+ rem = scale0 % pow_10(exp);
+ res = 0;
+ for (i = 0; i < (6 - exp); ++i) {
+ x = scale1 / pow_10(5 - i);
+ res += (pow_10(5 - exp - i) * x);
+ scale1 = scale1 % pow_10(5 - i);
+ }
+ *val1 = rem * pow_10(6 - exp) + res;
+ } else {
+ *val0 = scale0;
+ *val1 = scale1;
+ }
+}
+
+int hid_sensor_format_scale(u32 usage_id,
+ struct hid_sensor_hub_attribute_info *attr_info,
+ int *val0, int *val1)
+{
+ int i;
+ int exp;
+
+ *val0 = 1;
+ *val1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(unit_conversion); ++i) {
+ if (unit_conversion[i].usage_id == usage_id &&
+ unit_conversion[i].unit == attr_info->units) {
+ exp = hid_sensor_convert_exponent(
+ attr_info->unit_expo);
+ adjust_exponent_micro(val0, val1,
+ unit_conversion[i].scale_val0,
+ unit_conversion[i].scale_val1, exp);
+ break;
+ }
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+EXPORT_SYMBOL(hid_sensor_format_scale);
+
+static
+int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_common *st)
+{
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_PROP_REPORT_INTERVAL,
+ &st->poll);
+ /* Default unit of measure is milliseconds */
+ if (st->poll.units == 0)
+ st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
+ return 0;
+
+}
+
+int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_common *st)
+{
+
+
+ hid_sensor_get_reporting_interval(hsdev, usage_id, st);
+
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_PROP_REPORT_STATE,
+ &st->report_state);
+
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_PROY_POWER_STATE,
+ &st->power_state);
+
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
+ &st->sensitivity);
+
+ hid_dbg(hsdev->hdev, "common attributes: %x:%x, %x:%x, %x:%x %x:%x\n",
+ st->poll.index, st->poll.report_id,
+ st->report_state.index, st->report_state.report_id,
+ st->power_state.index, st->power_state.report_id,
+ st->sensitivity.index, st->sensitivity.report_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_DESCRIPTION("HID Sensor common attribute processing");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
new file mode 100644
index 00000000000000..8866800a0bc78b
--- /dev/null
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -0,0 +1,139 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/sysfs.h>
+#include "hid-sensor-trigger.h"
+
+int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+{
+ int state_val;
+ int report_val;
+
+ if (state) {
+ if (sensor_hub_device_open(st->hsdev))
+ return -EIO;
+
+ atomic_inc(&st->data_ready);
+
+ state_val = hid_sensor_get_usage_index(st->hsdev,
+ st->power_state.report_id,
+ st->power_state.index,
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM);
+ report_val = hid_sensor_get_usage_index(st->hsdev,
+ st->report_state.report_id,
+ st->report_state.index,
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+ } else {
+ if (!atomic_dec_and_test(&st->data_ready))
+ return 0;
+ sensor_hub_device_close(st->hsdev);
+ state_val = hid_sensor_get_usage_index(st->hsdev,
+ st->power_state.report_id,
+ st->power_state.index,
+ HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM);
+ report_val = hid_sensor_get_usage_index(st->hsdev,
+ st->report_state.report_id,
+ st->report_state.index,
+ HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM);
+ }
+
+ if (state_val >= 0) {
+ state_val += st->power_state.logical_minimum;
+ sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
+ st->power_state.index,
+ (s32)state_val);
+ }
+
+ if (report_val >= 0) {
+ report_val += st->report_state.logical_minimum;
+ sensor_hub_set_feature(st->hsdev, st->report_state.report_id,
+ st->report_state.index,
+ (s32)report_val);
+ }
+
+ sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+ st->power_state.index,
+ &state_val);
+ return 0;
+}
+EXPORT_SYMBOL(hid_sensor_power_state);
+
+static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state);
+}
+
+void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
+{
+ iio_trigger_unregister(attrb->trigger);
+ iio_trigger_free(attrb->trigger);
+}
+EXPORT_SYMBOL(hid_sensor_remove_trigger);
+
+static const struct iio_trigger_ops hid_sensor_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &hid_sensor_data_rdy_trigger_set_state,
+};
+
+int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ struct hid_sensor_common *attrb)
+{
+ int ret;
+ struct iio_trigger *trig;
+
+ trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id);
+ if (trig == NULL) {
+ dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ trig->dev.parent = indio_dev->dev.parent;
+ iio_trigger_set_drvdata(trig, attrb);
+ trig->ops = &hid_sensor_trigger_ops;
+ ret = iio_trigger_register(trig);
+
+ if (ret) {
+ dev_err(&indio_dev->dev, "Trigger Register Failed\n");
+ goto error_free_trig;
+ }
+ attrb->trigger = trig;
+ indio_dev->trig = iio_trigger_get(trig);
+
+ return ret;
+
+error_free_trig:
+ iio_trigger_free(trig);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(hid_sensor_setup_trigger);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_DESCRIPTION("HID Sensor trigger processing");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
new file mode 100644
index 00000000000000..0f8e78c249d358
--- /dev/null
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -0,0 +1,27 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _HID_SENSOR_TRIGGER_H
+#define _HID_SENSOR_TRIGGER_H
+
+int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ struct hid_sensor_common *attrb);
+void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
+int hid_sensor_power_state(struct hid_sensor_common *st, bool state);
+
+#endif
diff --git a/drivers/iio/common/st_sensors/Kconfig b/drivers/iio/common/st_sensors/Kconfig
new file mode 100644
index 00000000000000..865f1ca33eb945
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Kconfig
@@ -0,0 +1,14 @@
+#
+# STMicroelectronics sensors common library
+#
+
+config IIO_ST_SENSORS_I2C
+ tristate
+
+config IIO_ST_SENSORS_SPI
+ tristate
+
+config IIO_ST_SENSORS_CORE
+ tristate
+ select IIO_ST_SENSORS_I2C if I2C
+ select IIO_ST_SENSORS_SPI if SPI_MASTER
diff --git a/drivers/iio/common/st_sensors/Makefile b/drivers/iio/common/st_sensors/Makefile
new file mode 100644
index 00000000000000..9f3e24f3024bb3
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the STMicroelectronics sensor common modules.
+#
+
+obj-$(CONFIG_IIO_ST_SENSORS_I2C) += st_sensors_i2c.o
+obj-$(CONFIG_IIO_ST_SENSORS_SPI) += st_sensors_spi.o
+obj-$(CONFIG_IIO_ST_SENSORS_CORE) += st_sensors.o
+st_sensors-y := st_sensors_core.o
+st_sensors-$(CONFIG_IIO_BUFFER) += st_sensors_buffer.o
+st_sensors-$(CONFIG_IIO_TRIGGER) += st_sensors_trigger.o
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
new file mode 100644
index 00000000000000..e18bc67822563e
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -0,0 +1,128 @@
+/*
+ * STMicroelectronics sensors buffer library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+{
+ u8 *addr;
+ int i, n = 0, len;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ unsigned int num_data_channels = sdata->num_data_channels;
+ unsigned int byte_for_channel =
+ indio_dev->channels[0].scan_type.storagebits >> 3;
+
+ addr = kmalloc(num_data_channels, GFP_KERNEL);
+ if (!addr) {
+ len = -ENOMEM;
+ goto st_sensors_get_buffer_element_error;
+ }
+
+ for (i = 0; i < num_data_channels; i++) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ addr[n] = indio_dev->channels[i].address;
+ n++;
+ }
+ }
+ switch (n) {
+ case 1:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], byte_for_channel, buf, sdata->multiread_bit);
+ break;
+ case 2:
+ if ((addr[1] - addr[0]) == byte_for_channel) {
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0], byte_for_channel * n,
+ buf, sdata->multiread_bit);
+ } else {
+ u8 *rx_array;
+ rx_array = kmalloc(byte_for_channel * num_data_channels,
+ GFP_KERNEL);
+ if (!rx_array) {
+ len = -ENOMEM;
+ goto st_sensors_free_memory;
+ }
+
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0],
+ byte_for_channel * num_data_channels,
+ rx_array, sdata->multiread_bit);
+ if (len < 0) {
+ kfree(rx_array);
+ goto st_sensors_free_memory;
+ }
+
+ for (i = 0; i < n * byte_for_channel; i++) {
+ if (i < n)
+ buf[i] = rx_array[i];
+ else
+ buf[i] = rx_array[n + i];
+ }
+ kfree(rx_array);
+ len = byte_for_channel * n;
+ }
+ break;
+ case 3:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], byte_for_channel * num_data_channels,
+ buf, sdata->multiread_bit);
+ break;
+ default:
+ len = -EINVAL;
+ goto st_sensors_free_memory;
+ }
+ if (len != byte_for_channel * n) {
+ len = -EIO;
+ goto st_sensors_free_memory;
+ }
+
+st_sensors_free_memory:
+ kfree(addr);
+st_sensors_get_buffer_element_error:
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_get_buffer_element);
+
+irqreturn_t st_sensors_trigger_handler(int irq, void *p)
+{
+ int len;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
+ if (len < 0)
+ goto st_sensors_get_buffer_element_error;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
+ pf->timestamp);
+
+st_sensors_get_buffer_element_error:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(st_sensors_trigger_handler);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
new file mode 100644
index 00000000000000..edd13d2b4121f7
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -0,0 +1,558 @@
+/*
+ * STMicroelectronics sensors core library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <asm/unaligned.h>
+#include <linux/iio/common/st_sensors.h>
+
+
+#define ST_SENSORS_WAI_ADDRESS 0x0f
+
+static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
+{
+ return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
+}
+
+static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data)
+{
+ int err;
+ u8 new_data;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data);
+ if (err < 0)
+ goto st_sensors_write_data_with_mask_error;
+
+ new_data = ((new_data & (~mask)) | ((data << __ffs(mask)) & mask));
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data);
+
+st_sensors_write_data_with_mask_error:
+ return err;
+}
+
+static int st_sensors_match_odr(struct st_sensor_settings *sensor_settings,
+ unsigned int odr, struct st_sensor_odr_avl *odr_out)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sensor_settings->odr.odr_avl[i].hz == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor_settings->odr.odr_avl[i].hz == odr) {
+ odr_out->hz = sensor_settings->odr.odr_avl[i].hz;
+ odr_out->value = sensor_settings->odr.odr_avl[i].value;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
+{
+ int err;
+ struct st_sensor_odr_avl odr_out = {0, 0};
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
+ if (err < 0)
+ goto st_sensors_match_odr_error;
+
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
+ if (sdata->enabled == true) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
+ odr_out.value);
+ } else {
+ err = 0;
+ }
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->odr.addr,
+ sdata->sensor_settings->odr.mask,
+ odr_out.value);
+ }
+ if (err >= 0)
+ sdata->odr = odr_out.hz;
+
+st_sensors_match_odr_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_odr);
+
+static int st_sensors_match_fs(struct st_sensor_settings *sensor_settings,
+ unsigned int fs, int *index_fs_avl)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sensor_settings->fs.fs_avl[i].num == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor_settings->fs.fs_avl[i].num == fs) {
+ *index_fs_avl = i;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
+{
+ int err, i = 0;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_fs(sdata->sensor_settings, fs, &i);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->fs.addr,
+ sdata->sensor_settings->fs.mask,
+ sdata->sensor_settings->fs.fs_avl[i].value);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &sdata->sensor_settings->fs.fs_avl[i];
+ return err;
+
+st_accel_set_fullscale_error:
+ dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
+ return err;
+}
+
+int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ u8 tmp_value;
+ int err = -EINVAL;
+ bool found = false;
+ struct st_sensor_odr_avl odr_out = {0, 0};
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ if (enable) {
+ tmp_value = sdata->sensor_settings->pw.value_on;
+ if ((sdata->sensor_settings->odr.addr ==
+ sdata->sensor_settings->pw.addr) &&
+ (sdata->sensor_settings->odr.mask ==
+ sdata->sensor_settings->pw.mask)) {
+ err = st_sensors_match_odr(sdata->sensor_settings,
+ sdata->odr, &odr_out);
+ if (err < 0)
+ goto set_enable_error;
+ tmp_value = odr_out.value;
+ found = true;
+ }
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask, tmp_value);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = true;
+
+ if (found)
+ sdata->odr = odr_out.hz;
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->pw.addr,
+ sdata->sensor_settings->pw.mask,
+ sdata->sensor_settings->pw.value_off);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = false;
+ }
+
+set_enable_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_enable);
+
+int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->enable_axis.addr,
+ sdata->sensor_settings->enable_axis.mask,
+ axis_enable);
+}
+EXPORT_SYMBOL(st_sensors_set_axis_enable);
+
+void st_sensors_power_enable(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *pdata = iio_priv(indio_dev);
+ int err;
+
+ /* Regulators not mandatory, but if requested we should enable them. */
+ pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd");
+ if (!IS_ERR(pdata->vdd)) {
+ err = regulator_enable(pdata->vdd);
+ if (err != 0)
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd supply\n");
+ }
+
+ pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio");
+ if (!IS_ERR(pdata->vdd_io)) {
+ err = regulator_enable(pdata->vdd_io);
+ if (err != 0)
+ dev_warn(&indio_dev->dev,
+ "Failed to enable specified Vdd_IO supply\n");
+ }
+}
+EXPORT_SYMBOL(st_sensors_power_enable);
+
+void st_sensors_power_disable(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *pdata = iio_priv(indio_dev);
+
+ if (!IS_ERR(pdata->vdd))
+ regulator_disable(pdata->vdd);
+
+ if (!IS_ERR(pdata->vdd_io))
+ regulator_disable(pdata->vdd_io);
+}
+EXPORT_SYMBOL(st_sensors_power_disable);
+
+static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ switch (pdata->drdy_int_pin) {
+ case 1:
+ if (sdata->sensor_settings->drdy_irq.mask_int1 == 0) {
+ dev_err(&indio_dev->dev,
+ "DRDY on INT1 not available.\n");
+ return -EINVAL;
+ }
+ sdata->drdy_int_pin = 1;
+ break;
+ case 2:
+ if (sdata->sensor_settings->drdy_irq.mask_int2 == 0) {
+ dev_err(&indio_dev->dev,
+ "DRDY on INT2 not available.\n");
+ return -EINVAL;
+ }
+ sdata->drdy_int_pin = 2;
+ break;
+ default:
+ dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
+ struct st_sensors_platform_data *defdata)
+{
+ struct st_sensors_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+ u32 val;
+
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!of_property_read_u32(np, "st,drdy-int-pin", &val) && (val <= 2))
+ pdata->drdy_int_pin = (u8) val;
+ else
+ pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 1;
+
+ return pdata;
+}
+#else
+static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
+ struct st_sensors_platform_data *defdata)
+{
+ return NULL;
+}
+#endif
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *of_pdata;
+ int err = 0;
+
+ mutex_init(&sdata->tb.buf_lock);
+
+ /* If OF/DT pdata exists, it will take precedence of anything else */
+ of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
+ if (of_pdata)
+ pdata = of_pdata;
+
+ if (pdata) {
+ err = st_sensors_set_drdy_int_pin(indio_dev, pdata);
+ if (err < 0)
+ return err;
+ }
+
+ err = st_sensors_set_enable(indio_dev, false);
+ if (err < 0)
+ return err;
+
+ if (sdata->current_fullscale) {
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->current_fullscale->num);
+ if (err < 0)
+ return err;
+ } else
+ dev_info(&indio_dev->dev, "Full-scale not possible\n");
+
+ err = st_sensors_set_odr(indio_dev, sdata->odr);
+ if (err < 0)
+ return err;
+
+ /* set BDU */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->bdu.addr,
+ sdata->sensor_settings->bdu.mask, true);
+ if (err < 0)
+ return err;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_init_sensor);
+
+int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
+{
+ int err;
+ u8 drdy_mask;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ if (!sdata->sensor_settings->drdy_irq.addr)
+ return 0;
+
+ /* Enable/Disable the interrupt generator 1. */
+ if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.ig1.en_addr,
+ sdata->sensor_settings->drdy_irq.ig1.en_mask,
+ (int)enable);
+ if (err < 0)
+ goto st_accel_set_dataready_irq_error;
+ }
+
+ if (sdata->drdy_int_pin == 1)
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int1;
+ else
+ drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+
+ /* Enable/Disable the interrupt generator for data ready. */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.addr,
+ drdy_mask, (int)enable);
+
+st_accel_set_dataready_irq_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_dataready_irq);
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
+{
+ int err = -EINVAL, i;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if ((sdata->sensor_settings->fs.fs_avl[i].gain == scale) &&
+ (sdata->sensor_settings->fs.fs_avl[i].gain != 0)) {
+ err = 0;
+ break;
+ }
+ }
+ if (err < 0)
+ goto st_sensors_match_scale_error;
+
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->sensor_settings->fs.fs_avl[i].num);
+
+st_sensors_match_scale_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain);
+
+static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *data)
+{
+ int err;
+ u8 *outdata;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ unsigned int byte_for_channel = ch->scan_type.storagebits >> 3;
+
+ outdata = kmalloc(byte_for_channel, GFP_KERNEL);
+ if (!outdata)
+ return -ENOMEM;
+
+ err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ ch->address, byte_for_channel,
+ outdata, sdata->multiread_bit);
+ if (err < 0)
+ goto st_sensors_free_memory;
+
+ if (byte_for_channel == 2)
+ *data = (s16)get_unaligned_le16(outdata);
+ else if (byte_for_channel == 3)
+ *data = (s32)st_sensors_get_unaligned_le24(outdata);
+
+st_sensors_free_memory:
+ kfree(outdata);
+
+ return err;
+}
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ err = -EBUSY;
+ goto out;
+ } else {
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto out;
+
+ msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
+ err = st_sensors_read_axis_data(indio_dev, ch, val);
+ if (err < 0)
+ goto out;
+
+ *val = *val >> ch->scan_type.shift;
+
+ err = st_sensors_set_enable(indio_dev, false);
+ }
+out:
+ mutex_unlock(&indio_dev->mlock);
+
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_read_info_raw);
+
+int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list,
+ const struct st_sensor_settings *sensor_settings)
+{
+ u8 wai;
+ int i, n, err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ ST_SENSORS_DEFAULT_WAI_ADDRESS, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
+ goto read_wai_error;
+ }
+
+ for (i = 0; i < num_sensors_list; i++) {
+ if (sensor_settings[i].wai == wai)
+ break;
+ }
+ if (i == num_sensors_list)
+ goto device_not_supported;
+
+ for (n = 0; n < ARRAY_SIZE(sensor_settings[i].sensors_supported); n++) {
+ if (strcmp(indio_dev->name,
+ &sensor_settings[i].sensors_supported[n][0]) == 0)
+ break;
+ }
+ if (n == ARRAY_SIZE(sensor_settings[i].sensors_supported)) {
+ dev_err(&indio_dev->dev, "device name and WhoAmI mismatch.\n");
+ goto sensor_name_mismatch;
+ }
+
+ sdata->sensor_settings =
+ (struct st_sensor_settings *)&sensor_settings[i];
+
+ return i;
+
+device_not_supported:
+ dev_err(&indio_dev->dev, "device not supported: WhoAmI (0x%x).\n", wai);
+sensor_name_mismatch:
+ err = -ENODEV;
+read_wai_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_check_device_support);
+
+ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sdata->sensor_settings->odr.odr_avl[i].hz == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ sdata->sensor_settings->odr.odr_avl[i].hz);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
+
+ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ sdata->sensor_settings->fs.fs_avl[i].gain);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_scale_avail);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
new file mode 100644
index 00000000000000..98cfee296d4622
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -0,0 +1,112 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/of_device.h>
+
+#include <linux/iio/common/st_sensors_i2c.h>
+
+
+#define ST_SENSORS_I2C_MULTIREAD 0x80
+
+static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_i2c_client(sdata->dev)->irq;
+}
+
+static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ int err;
+
+ err = i2c_smbus_read_byte_data(to_i2c_client(dev), reg_addr);
+ if (err < 0)
+ goto st_accel_i2c_read_byte_error;
+
+ *res_byte = err & 0xff;
+
+st_accel_i2c_read_byte_error:
+ return err < 0 ? err : 0;
+}
+
+static int st_sensors_i2c_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ if (multiread_bit)
+ reg_addr |= ST_SENSORS_I2C_MULTIREAD;
+
+ return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
+ reg_addr, len, data);
+}
+
+static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), reg_addr, data);
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_i2c = {
+ .read_byte = st_sensors_i2c_read_byte,
+ .write_byte = st_sensors_i2c_write_byte,
+ .read_multiple_byte = st_sensors_i2c_read_multiple_byte,
+};
+
+void st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct st_sensor_data *sdata)
+{
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->name;
+
+ sdata->dev = &client->dev;
+ sdata->tf = &st_sensors_tf_i2c;
+ sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_i2c_configure);
+
+#ifdef CONFIG_OF
+/**
+ * st_sensors_of_i2c_probe() - device tree probe for ST I2C sensors
+ * @client: the I2C client device for the sensor
+ * @match: the OF match table for the device, containing compatible strings
+ * but also a .data field with the corresponding internal kernel name
+ * used by this sensor.
+ *
+ * In effect this function matches a compatible string to an internal kernel
+ * name for a certain sensor device, so that the rest of the autodetection can
+ * rely on that name from this point on. I2C client devices will be renamed
+ * to match the internal kernel convention.
+ */
+void st_sensors_of_i2c_probe(struct i2c_client *client,
+ const struct of_device_id *match)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(match, &client->dev);
+ if (!of_id)
+ return;
+
+ /* The name from the OF match takes precedence if present */
+ strncpy(client->name, of_id->data, sizeof(client->name));
+ client->name[sizeof(client->name) - 1] = '\0';
+}
+EXPORT_SYMBOL(st_sensors_of_i2c_probe);
+#endif
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
new file mode 100644
index 00000000000000..78a6a1ab3ece00
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -0,0 +1,122 @@
+/*
+ * STMicroelectronics sensors spi library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors_spi.h>
+
+
+#define ST_SENSORS_SPI_MULTIREAD 0xc0
+#define ST_SENSORS_SPI_READ 0x80
+
+static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_spi_device(sdata->dev)->irq;
+}
+
+static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ int err;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = tb->rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ mutex_lock(&tb->buf_lock);
+ if ((multiread_bit) && (len > 1))
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD;
+ else
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ;
+
+ err = spi_sync_transfer(to_spi_device(dev), xfers, ARRAY_SIZE(xfers));
+ if (err)
+ goto acc_spi_read_error;
+
+ memcpy(data, tb->rx_buf, len*sizeof(u8));
+ mutex_unlock(&tb->buf_lock);
+ return len;
+
+acc_spi_read_error:
+ mutex_unlock(&tb->buf_lock);
+ return err;
+}
+
+static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false);
+}
+
+static int st_sensors_spi_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit);
+}
+
+static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ int err;
+
+ struct spi_transfer xfers = {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 2,
+ };
+
+ mutex_lock(&tb->buf_lock);
+ tb->tx_buf[0] = reg_addr;
+ tb->tx_buf[1] = data;
+
+ err = spi_sync_transfer(to_spi_device(dev), &xfers, 1);
+ mutex_unlock(&tb->buf_lock);
+
+ return err;
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_spi = {
+ .read_byte = st_sensors_spi_read_byte,
+ .write_byte = st_sensors_spi_write_byte,
+ .read_multiple_byte = st_sensors_spi_read_multiple_byte,
+};
+
+void st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi, struct st_sensor_data *sdata)
+{
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi->modalias;
+
+ sdata->dev = &spi->dev;
+ sdata->tf = &st_sensors_tf_spi;
+ sdata->get_irq_data_ready = st_sensors_spi_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_spi_configure);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
new file mode 100644
index 00000000000000..8d8ca6f1e16a5d
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -0,0 +1,77 @@
+/*
+ * STMicroelectronics sensors trigger library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
+ if (sdata->trig == NULL) {
+ err = -ENOMEM;
+ dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
+ goto iio_trigger_alloc_error;
+ }
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_RISING,
+ sdata->trig->name,
+ sdata->trig);
+ if (err)
+ goto request_irq_error;
+
+ iio_trigger_set_drvdata(sdata->trig, indio_dev);
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
+ err = iio_trigger_register(sdata->trig);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ goto iio_trigger_register_error;
+ }
+ indio_dev->trig = iio_trigger_get(sdata->trig);
+
+ return 0;
+
+iio_trigger_register_error:
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+request_irq_error:
+ iio_trigger_free(sdata->trig);
+iio_trigger_alloc_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_allocate_trigger);
+
+void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ iio_trigger_unregister(sdata->trig);
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+ iio_trigger_free(sdata->trig);
+}
+EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
new file mode 100644
index 00000000000000..2236ea22f98a50
--- /dev/null
+++ b/drivers/iio/dac/Kconfig
@@ -0,0 +1,184 @@
+#
+# DAC drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital to analog converters"
+
+config AD5064
+ tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
+ depends on (SPI_MASTER && I2C!=m) || I2C
+ help
+ Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
+ AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
+ AD5669R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5064.
+
+config AD5360
+ tristate "Analog Devices AD5360/61/62/63/70/71/73 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5360, AD5361,
+ AD5362, AD5363, AD5370, AD5371, AD5373 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5360.
+
+config AD5380
+ tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
+ depends on (SPI_MASTER && I2C!=m) || I2C
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5380, AD5381,
+ AD5382, AD5383, AD5384, AD5390, AD5391, AD5392 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5380.
+
+config AD5421
+ tristate "Analog Devices AD5421 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5421 loop-powered
+ digital-to-analog convertors (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5421.
+
+config AD5446
+ tristate "Analog Devices AD5446 and similar single channel DACs driver"
+ depends on (SPI_MASTER && I2C!=m) || I2C
+ help
+ Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
+ AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
+ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
+ AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5446.
+
+config AD5449
+ tristate "Analog Devices AD5449 and similar DACs driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5415, AD5426, AD5429,
+ AD5432, AD5439, AD5443, AD5449 Digital to Analog Converters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5449.
+
+config AD5504
+ tristate "Analog Devices AD5504/AD5501 DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5504, AD5501,
+ High Voltage Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5504.
+
+config AD5624R_SPI
+ tristate "Analog Devices AD5624/44/64R DAC spi driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5624R, AD5644R and
+ AD5664R converters (DAC). This driver uses the common SPI interface.
+
+config AD5686
+ tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5686R, AD5685R,
+ AD5684R, AD5791 Voltage Output Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5686.
+
+config AD5755
+ tristate "Analog Devices AD5755/AD5755-1/AD5757/AD5735/AD5737 DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5755, AD5755-1,
+ AD5757, AD5735, AD5737 quad channel Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5755.
+
+config AD5764
+ tristate "Analog Devices AD5764/64R/44/44R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5764, AD5764R, AD5744,
+ AD5744R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5764.
+
+config AD5791
+ tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5760, AD5780,
+ AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5791.
+
+config AD7303
+ tristate "Analog Devices AD7303 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7303 Digital to Analog
+ Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad7303.
+
+config MAX517
+ tristate "Maxim MAX517/518/519 DAC driver"
+ depends on I2C
+ help
+ If you say yes here you get support for the Maxim chips MAX517,
+ MAX518 and MAX519 (I2C 8-Bit DACs with rail-to-rail outputs).
+
+ This driver can also be built as a module. If so, the module
+ will be called max517.
+
+config MAX5821
+ tristate "Maxim MAX5821 DAC driver"
+ depends on I2C
+ depends on OF
+ help
+ Say yes here to build support for Maxim MAX5821
+ 10 bits DAC.
+
+config MCP4725
+ tristate "MCP4725 DAC driver"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for the Microchip
+ MCP 4725 12-bit digital-to-analog converter (DAC) with I2C
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mcp4725.
+
+config MCP4922
+ tristate "MCP4902, MCP4912, MCP4922 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build the driver for the Microchip MCP4902
+ MCP4912, and MCP4922 DAC devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mcp4922.
+
+endmenu
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
new file mode 100644
index 00000000000000..52be7e1acf16d1
--- /dev/null
+++ b/drivers/iio/dac/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for industrial I/O DAC drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD5360) += ad5360.o
+obj-$(CONFIG_AD5380) += ad5380.o
+obj-$(CONFIG_AD5421) += ad5421.o
+obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
+obj-$(CONFIG_AD5064) += ad5064.o
+obj-$(CONFIG_AD5504) += ad5504.o
+obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5449) += ad5449.o
+obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5764) += ad5764.o
+obj-$(CONFIG_AD5791) += ad5791.o
+obj-$(CONFIG_AD5686) += ad5686.o
+obj-$(CONFIG_AD7303) += ad7303.o
+obj-$(CONFIG_MAX517) += max517.o
+obj-$(CONFIG_MAX5821) += max5821.o
+obj-$(CONFIG_MCP4725) += mcp4725.o
+obj-$(CONFIG_MCP4922) += mcp4922.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
new file mode 100644
index 00000000000000..21a226ed333917
--- /dev/null
+++ b/drivers/iio/dac/ad5064.c
@@ -0,0 +1,684 @@
+/*
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R,
+ * AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AD5064_MAX_DAC_CHANNELS 8
+#define AD5064_MAX_VREFS 4
+
+#define AD5064_ADDR(x) ((x) << 20)
+#define AD5064_CMD(x) ((x) << 24)
+
+#define AD5064_ADDR_ALL_DAC 0xF
+
+#define AD5064_CMD_WRITE_INPUT_N 0x0
+#define AD5064_CMD_UPDATE_DAC_N 0x1
+#define AD5064_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
+#define AD5064_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5064_CMD_POWERDOWN_DAC 0x4
+#define AD5064_CMD_CLEAR 0x5
+#define AD5064_CMD_LDAC_MASK 0x6
+#define AD5064_CMD_RESET 0x7
+#define AD5064_CMD_CONFIG 0x8
+
+#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
+#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
+
+#define AD5064_LDAC_PWRDN_NONE 0x0
+#define AD5064_LDAC_PWRDN_1K 0x1
+#define AD5064_LDAC_PWRDN_100K 0x2
+#define AD5064_LDAC_PWRDN_3STATE 0x3
+
+/**
+ * struct ad5064_chip_info - chip specific information
+ * @shared_vref: whether the vref supply is shared between channels
+ * @internal_vref: internal reference voltage. 0 if the chip has no internal
+ * vref.
+ * @channel: channel specification
+ * @num_channels: number of channels
+ */
+
+struct ad5064_chip_info {
+ bool shared_vref;
+ unsigned long internal_vref;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct ad5064_state;
+
+typedef int (*ad5064_write_func)(struct ad5064_state *st, unsigned int cmd,
+ unsigned int addr, unsigned int val);
+
+/**
+ * struct ad5064_state - driver instance specific data
+ * @dev: the device for this driver instance
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @pwr_down: whether channel is powered down
+ * @pwr_down_mode: channel's current power down mode
+ * @dac_cache: current DAC raw value (chip does not support readback)
+ * @use_internal_vref: set to true if the internal reference voltage should be
+ * used.
+ * @write: register write callback
+ * @data: i2c/spi transfer buffers
+ */
+
+struct ad5064_state {
+ struct device *dev;
+ const struct ad5064_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[AD5064_MAX_VREFS];
+ bool pwr_down[AD5064_MAX_DAC_CHANNELS];
+ u8 pwr_down_mode[AD5064_MAX_DAC_CHANNELS];
+ unsigned int dac_cache[AD5064_MAX_DAC_CHANNELS];
+ bool use_internal_vref;
+
+ ad5064_write_func write;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ u8 i2c[3];
+ __be32 spi;
+ } data ____cacheline_aligned;
+};
+
+enum ad5064_type {
+ ID_AD5024,
+ ID_AD5025,
+ ID_AD5044,
+ ID_AD5045,
+ ID_AD5064,
+ ID_AD5064_1,
+ ID_AD5065,
+ ID_AD5628_1,
+ ID_AD5628_2,
+ ID_AD5648_1,
+ ID_AD5648_2,
+ ID_AD5666_1,
+ ID_AD5666_2,
+ ID_AD5668_1,
+ ID_AD5668_2,
+};
+
+static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
+ unsigned int addr, unsigned int val, unsigned int shift)
+{
+ val <<= shift;
+
+ return st->write(st, cmd, addr, val);
+}
+
+static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
+ const struct iio_chan_spec *chan)
+{
+ unsigned int val;
+ int ret;
+
+ val = (0x1 << chan->address);
+
+ if (st->pwr_down[chan->channel])
+ val |= st->pwr_down_mode[chan->channel] << 8;
+
+ ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
+
+ return ret;
+}
+
+static const char * const ad5064_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "three_state",
+};
+
+static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+
+ return st->pwr_down_mode[chan->channel] - 1;
+}
+
+static int ad5064_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->pwr_down_mode[chan->channel] = mode + 1;
+
+ ret = ad5064_sync_powerdown_mode(st, chan);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static const struct iio_enum ad5064_powerdown_mode_enum = {
+ .items = ad5064_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5064_powerdown_modes),
+ .get = ad5064_get_powerdown_mode,
+ .set = ad5064_set_powerdown_mode,
+};
+
+static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down[chan->channel]);
+}
+
+static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->pwr_down[chan->channel] = pwr_down;
+
+ ret = ad5064_sync_powerdown_mode(st, chan);
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : len;
+}
+
+static int ad5064_get_vref(struct ad5064_state *st,
+ struct iio_chan_spec const *chan)
+{
+ unsigned int i;
+
+ if (st->use_internal_vref)
+ return st->chip_info->internal_vref;
+
+ i = st->chip_info->shared_vref ? 0 : chan->channel;
+ return regulator_get_voltage(st->vref_reg[i].consumer);
+}
+
+static int ad5064_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ int scale_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ *val = st->dac_cache[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = ad5064_get_vref(st, chan);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5064_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad5064_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5064_write(st, AD5064_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val, chan->scan_type.shift);
+ if (ret == 0)
+ st->dac_cache[chan->channel] = val;
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5064_info = {
+ .read_raw = ad5064_read_raw,
+ .write_raw = ad5064_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5064_read_dac_powerdown,
+ .write = ad5064_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum),
+ { },
+};
+
+#define AD5064_CHANNEL(chan, addr, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = addr, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 20 - bits, \
+ }, \
+ .ext_info = ad5064_ext_info, \
+}
+
+#define DECLARE_AD5064_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5064_CHANNEL(0, 0, bits), \
+ AD5064_CHANNEL(1, 1, bits), \
+ AD5064_CHANNEL(2, 2, bits), \
+ AD5064_CHANNEL(3, 3, bits), \
+ AD5064_CHANNEL(4, 4, bits), \
+ AD5064_CHANNEL(5, 5, bits), \
+ AD5064_CHANNEL(6, 6, bits), \
+ AD5064_CHANNEL(7, 7, bits), \
+}
+
+#define DECLARE_AD5065_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5064_CHANNEL(0, 0, bits), \
+ AD5064_CHANNEL(1, 3, bits), \
+}
+
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
+
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16);
+
+static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
+ [ID_AD5024] = {
+ .shared_vref = false,
+ .channels = ad5024_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5025] = {
+ .shared_vref = false,
+ .channels = ad5025_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5044] = {
+ .shared_vref = false,
+ .channels = ad5044_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5045] = {
+ .shared_vref = false,
+ .channels = ad5045_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5064] = {
+ .shared_vref = false,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5064_1] = {
+ .shared_vref = true,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5065] = {
+ .shared_vref = false,
+ .channels = ad5065_channels,
+ .num_channels = 2,
+ },
+ [ID_AD5628_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5628_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5024_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5648_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5044_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5666_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5666_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 4,
+ },
+ [ID_AD5668_1] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+ [ID_AD5668_2] = {
+ .shared_vref = true,
+ .internal_vref = 5000000,
+ .channels = ad5064_channels,
+ .num_channels = 8,
+ },
+};
+
+static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
+{
+ return st->chip_info->shared_vref ? 1 : st->chip_info->num_channels;
+}
+
+static const char * const ad5064_vref_names[] = {
+ "vrefA",
+ "vrefB",
+ "vrefC",
+ "vrefD",
+};
+
+static const char * const ad5064_vref_name(struct ad5064_state *st,
+ unsigned int vref)
+{
+ return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
+}
+
+static int ad5064_probe(struct device *dev, enum ad5064_type type,
+ const char *name, ad5064_write_func write)
+{
+ struct iio_dev *indio_dev;
+ struct ad5064_state *st;
+ unsigned int midscale;
+ unsigned int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->chip_info = &ad5064_chip_info_tbl[type];
+ st->dev = dev;
+ st->write = write;
+
+ for (i = 0; i < ad5064_num_vref(st); ++i)
+ st->vref_reg[i].supply = ad5064_vref_name(st, i);
+
+ ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st),
+ st->vref_reg);
+ if (ret) {
+ if (!st->chip_info->internal_vref)
+ return ret;
+ st->use_internal_vref = true;
+ ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
+ AD5064_CONFIG_INT_VREF_ENABLE, 0);
+ if (ret) {
+ dev_err(dev, "Failed to enable internal vref: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
+ if (ret)
+ return ret;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5064_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ midscale = (1 << indio_dev->channels[0].scan_type.realbits) / 2;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
+ st->dac_cache[i] = midscale;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!st->use_internal_vref)
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+
+ return ret;
+}
+
+static int ad5064_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5064_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (!st->use_internal_vref)
+ regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
+ unsigned int addr, unsigned int val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+
+ st->data.spi = cpu_to_be32(AD5064_CMD(cmd) | AD5064_ADDR(addr) | val);
+ return spi_write(spi, &st->data.spi, sizeof(st->data.spi));
+}
+
+static int ad5064_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return ad5064_probe(&spi->dev, id->driver_data, id->name,
+ ad5064_spi_write);
+}
+
+static int ad5064_spi_remove(struct spi_device *spi)
+{
+ return ad5064_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5064_spi_ids[] = {
+ {"ad5024", ID_AD5024},
+ {"ad5025", ID_AD5025},
+ {"ad5044", ID_AD5044},
+ {"ad5045", ID_AD5045},
+ {"ad5064", ID_AD5064},
+ {"ad5064-1", ID_AD5064_1},
+ {"ad5065", ID_AD5065},
+ {"ad5628-1", ID_AD5628_1},
+ {"ad5628-2", ID_AD5628_2},
+ {"ad5648-1", ID_AD5648_1},
+ {"ad5648-2", ID_AD5648_2},
+ {"ad5666-1", ID_AD5666_1},
+ {"ad5666-2", ID_AD5666_2},
+ {"ad5668-1", ID_AD5668_1},
+ {"ad5668-2", ID_AD5668_2},
+ {"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5064_spi_ids);
+
+static struct spi_driver ad5064_spi_driver = {
+ .driver = {
+ .name = "ad5064",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5064_spi_probe,
+ .remove = ad5064_spi_remove,
+ .id_table = ad5064_spi_ids,
+};
+
+static int __init ad5064_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5064_spi_driver);
+}
+
+static void ad5064_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5064_spi_driver);
+}
+
+#else
+
+static inline int ad5064_spi_register_driver(void) { return 0; }
+static inline void ad5064_spi_unregister_driver(void) { }
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
+ unsigned int addr, unsigned int val)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+
+ st->data.i2c[0] = (cmd << 4) | addr;
+ put_unaligned_be16(val, &st->data.i2c[1]);
+ return i2c_master_send(i2c, st->data.i2c, 3);
+}
+
+static int ad5064_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return ad5064_probe(&i2c->dev, id->driver_data, id->name,
+ ad5064_i2c_write);
+}
+
+static int ad5064_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5064_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5064_i2c_ids[] = {
+ {"ad5629-1", ID_AD5628_1},
+ {"ad5629-2", ID_AD5628_2},
+ {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */
+ {"ad5669-1", ID_AD5668_1},
+ {"ad5669-2", ID_AD5668_2},
+ {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
+
+static struct i2c_driver ad5064_i2c_driver = {
+ .driver = {
+ .name = "ad5064",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5064_i2c_probe,
+ .remove = ad5064_i2c_remove,
+ .id_table = ad5064_i2c_ids,
+};
+
+static int __init ad5064_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5064_i2c_driver);
+}
+
+static void __exit ad5064_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5064_i2c_driver);
+}
+
+#else
+
+static inline int ad5064_i2c_register_driver(void) { return 0; }
+static inline void ad5064_i2c_unregister_driver(void) { }
+
+#endif
+
+static int __init ad5064_init(void)
+{
+ int ret;
+
+ ret = ad5064_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5064_i2c_register_driver();
+ if (ret) {
+ ad5064_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5064_init);
+
+static void __exit ad5064_exit(void)
+{
+ ad5064_i2c_unregister_driver();
+ ad5064_spi_unregister_driver();
+}
+module_exit(ad5064_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5024 and similar multi-channel DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
new file mode 100644
index 00000000000000..64634d7f578e2a
--- /dev/null
+++ b/drivers/iio/dac/ad5360.c
@@ -0,0 +1,562 @@
+/*
+ * Analog devices AD5360, AD5361, AD5362, AD5363, AD5370, AD5371, AD5373
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AD5360_CMD(x) ((x) << 22)
+#define AD5360_ADDR(x) ((x) << 16)
+
+#define AD5360_READBACK_TYPE(x) ((x) << 13)
+#define AD5360_READBACK_ADDR(x) ((x) << 7)
+
+#define AD5360_CHAN_ADDR(chan) ((chan) + 0x8)
+
+#define AD5360_CMD_WRITE_DATA 0x3
+#define AD5360_CMD_WRITE_OFFSET 0x2
+#define AD5360_CMD_WRITE_GAIN 0x1
+#define AD5360_CMD_SPECIAL_FUNCTION 0x0
+
+/* Special function register addresses */
+#define AD5360_REG_SF_NOP 0x0
+#define AD5360_REG_SF_CTRL 0x1
+#define AD5360_REG_SF_OFS(x) (0x2 + (x))
+#define AD5360_REG_SF_READBACK 0x5
+
+#define AD5360_SF_CTRL_PWR_DOWN BIT(0)
+
+#define AD5360_READBACK_X1A 0x0
+#define AD5360_READBACK_X1B 0x1
+#define AD5360_READBACK_OFFSET 0x2
+#define AD5360_READBACK_GAIN 0x3
+#define AD5360_READBACK_SF 0x4
+
+
+/**
+ * struct ad5360_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @channels_per_group: number of channels per group
+ * @num_vrefs: number of vref supplies for the chip
+*/
+
+struct ad5360_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int channels_per_group;
+ unsigned int num_vrefs;
+};
+
+/**
+ * struct ad5360_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @ctrl: control register cache
+ * @data: spi transfer buffers
+ */
+
+struct ad5360_state {
+ struct spi_device *spi;
+ const struct ad5360_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[3];
+ unsigned int ctrl;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5360_type {
+ ID_AD5360,
+ ID_AD5361,
+ ID_AD5362,
+ ID_AD5363,
+ ID_AD5370,
+ ID_AD5371,
+ ID_AD5372,
+ ID_AD5373,
+};
+
+#define AD5360_CHANNEL(bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 16 - (bits), \
+ }, \
+}
+
+static const struct ad5360_chip_info ad5360_chip_info_tbl[] = {
+ [ID_AD5360] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 16,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5361] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 16,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5362] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 8,
+ .channels_per_group = 4,
+ .num_vrefs = 2,
+ },
+ [ID_AD5363] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 8,
+ .channels_per_group = 4,
+ .num_vrefs = 2,
+ },
+ [ID_AD5370] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 40,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5371] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 40,
+ .channels_per_group = 8,
+ .num_vrefs = 3,
+ },
+ [ID_AD5372] = {
+ .channel_template = AD5360_CHANNEL(16),
+ .num_channels = 32,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+ [ID_AD5373] = {
+ .channel_template = AD5360_CHANNEL(14),
+ .num_channels = 32,
+ .channels_per_group = 8,
+ .num_vrefs = 2,
+ },
+};
+
+static unsigned int ad5360_get_channel_vref_index(struct ad5360_state *st,
+ unsigned int channel)
+{
+ unsigned int i;
+
+ /* The first groups have their own vref, while the remaining groups
+ * share the last vref */
+ i = channel / st->chip_info->channels_per_group;
+ if (i >= st->chip_info->num_vrefs)
+ i = st->chip_info->num_vrefs - 1;
+
+ return i;
+}
+
+static int ad5360_get_channel_vref(struct ad5360_state *st,
+ unsigned int channel)
+{
+ unsigned int i = ad5360_get_channel_vref_index(st, channel);
+
+ return regulator_get_voltage(st->vref_reg[i].consumer);
+}
+
+
+static int ad5360_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int cmd, unsigned int addr, unsigned int val,
+ unsigned int shift)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ val <<= shift;
+ val |= AD5360_CMD(cmd) | AD5360_ADDR(addr);
+ st->data[0].d32 = cpu_to_be32(val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd,
+ unsigned int addr, unsigned int val, unsigned int shift)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
+ unsigned int addr)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
+ AD5360_ADDR(AD5360_REG_SF_READBACK) |
+ AD5360_READBACK_TYPE(type) |
+ AD5360_READBACK_ADDR(addr));
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t ad5360_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", (bool)(st->ctrl & AD5360_SF_CTRL_PWR_DOWN));
+}
+
+static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl |= set;
+ st->ctrl &= ~clr;
+
+ ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
+ AD5360_REG_SF_CTRL, st->ctrl, 0);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t ad5360_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (pwr_down)
+ ret = ad5360_update_ctrl(indio_dev, AD5360_SF_CTRL_PWR_DOWN, 0);
+ else
+ ret = ad5360_update_ctrl(indio_dev, 0, AD5360_SF_CTRL_PWR_DOWN);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown,
+ S_IRUGO | S_IWUSR,
+ ad5360_read_dac_powerdown,
+ ad5360_write_dac_powerdown, 0);
+
+static struct attribute *ad5360_attributes[] = {
+ &iio_dev_attr_out_voltage_powerdown.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5360_attribute_group = {
+ .attrs = ad5360_attributes,
+};
+
+static int ad5360_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ int max_val = (1 << chan->scan_type.realbits);
+ unsigned int ofs_index;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA,
+ chan->address, val, chan->scan_type.shift);
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET,
+ chan->address, val, chan->scan_type.shift);
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN,
+ chan->address, val, chan->scan_type.shift);
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (val <= -max_val || val > 0)
+ return -EINVAL;
+
+ val = -val;
+
+ /* offset is supposed to have the same scale as raw, but it
+ * is always 14bits wide, so on a chip where the raw value has
+ * more bits, we need to shift offset. */
+ val >>= (chan->scan_type.realbits - 14);
+
+ /* There is one DAC offset register per vref. Changing one
+ * channels offset will also change the offset for all other
+ * channels which share the same vref supply. */
+ ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
+ return ad5360_write(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
+ AD5360_REG_SF_OFS(ofs_index), val, 0);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5360_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ unsigned int ofs_index;
+ int scale_uv;
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5360_read(indio_dev, AD5360_READBACK_X1A,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = ad5360_get_channel_vref(st, chan->channel);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ /* vout = 4 * vref * dac_code */
+ *val = scale_uv * 4 / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN,
+ chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
+ ret = ad5360_read(indio_dev, AD5360_READBACK_SF,
+ AD5360_REG_SF_OFS(ofs_index));
+ if (ret < 0)
+ return ret;
+
+ ret <<= (chan->scan_type.realbits - 14);
+ *val = -ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5360_info = {
+ .read_raw = ad5360_read_raw,
+ .write_raw = ad5360_write_raw,
+ .attrs = &ad5360_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const ad5360_vref_name[] = {
+ "vref0", "vref1", "vref2"
+};
+
+static int ad5360_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5360_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = AD5360_CHAN_ADDR(i);
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int ad5360_probe(struct spi_device *spi)
+{
+ enum ad5360_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5360_state *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5360_chip_info_tbl[type];
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5360_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5360_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < st->chip_info->num_vrefs; ++i)
+ st->vref_reg[i].supply = ad5360_vref_name[i];
+
+ ret = devm_regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs,
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n", ret);
+ goto error_free_channels;
+ }
+
+ ret = regulator_bulk_enable(st->chip_info->num_vrefs, st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n", ret);
+ goto error_free_channels;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
+error_free_channels:
+ kfree(indio_dev->channels);
+
+ return ret;
+}
+
+static int ad5360_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5360_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5360_ids[] = {
+ { "ad5360", ID_AD5360 },
+ { "ad5361", ID_AD5361 },
+ { "ad5362", ID_AD5362 },
+ { "ad5363", ID_AD5363 },
+ { "ad5370", ID_AD5370 },
+ { "ad5371", ID_AD5371 },
+ { "ad5372", ID_AD5372 },
+ { "ad5373", ID_AD5373 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5360_ids);
+
+static struct spi_driver ad5360_driver = {
+ .driver = {
+ .name = "ad5360",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5360_probe,
+ .remove = ad5360_remove,
+ .id_table = ad5360_ids,
+};
+module_spi_driver(ad5360_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
new file mode 100644
index 00000000000000..9de4c4d3828030
--- /dev/null
+++ b/drivers/iio/dac/ad5380.c
@@ -0,0 +1,654 @@
+/*
+ * Analog devices AD5380, AD5381, AD5382, AD5383, AD5390, AD5391, AD5392
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AD5380_REG_DATA(x) (((x) << 2) | 3)
+#define AD5380_REG_OFFSET(x) (((x) << 2) | 2)
+#define AD5380_REG_GAIN(x) (((x) << 2) | 1)
+#define AD5380_REG_SF_PWR_DOWN (8 << 2)
+#define AD5380_REG_SF_PWR_UP (9 << 2)
+#define AD5380_REG_SF_CTRL (12 << 2)
+
+#define AD5380_CTRL_PWR_DOWN_MODE_OFFSET 13
+#define AD5380_CTRL_INT_VREF_2V5 BIT(12)
+#define AD5380_CTRL_INT_VREF_EN BIT(10)
+
+/**
+ * struct ad5380_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @int_vref: internal vref in uV
+*/
+
+struct ad5380_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int int_vref;
+};
+
+/**
+ * struct ad5380_state - driver instance specific data
+ * @regmap: regmap instance used by the device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulator
+ * @vref: actual reference voltage used in uA
+ * @pwr_down: whether the chip is currently in power down mode
+ */
+
+struct ad5380_state {
+ struct regmap *regmap;
+ const struct ad5380_chip_info *chip_info;
+ struct regulator *vref_reg;
+ int vref;
+ bool pwr_down;
+};
+
+enum ad5380_type {
+ ID_AD5380_3,
+ ID_AD5380_5,
+ ID_AD5381_3,
+ ID_AD5381_5,
+ ID_AD5382_3,
+ ID_AD5382_5,
+ ID_AD5383_3,
+ ID_AD5383_5,
+ ID_AD5390_3,
+ ID_AD5390_5,
+ ID_AD5391_3,
+ ID_AD5391_5,
+ ID_AD5392_3,
+ ID_AD5392_5,
+};
+
+static ssize_t ad5380_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (pwr_down)
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
+ else
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_UP, 0);
+
+ st->pwr_down = pwr_down;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static const char * const ad5380_powerdown_modes[] = {
+ "100kohm_to_gnd",
+ "three_state",
+};
+
+static int ad5380_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int mode;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD5380_REG_SF_CTRL, &mode);
+ if (ret)
+ return ret;
+
+ mode = (mode >> AD5380_CTRL_PWR_DOWN_MODE_OFFSET) & 1;
+
+ return mode;
+}
+
+static int ad5380_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, AD5380_REG_SF_CTRL,
+ 1 << AD5380_CTRL_PWR_DOWN_MODE_OFFSET,
+ mode << AD5380_CTRL_PWR_DOWN_MODE_OFFSET);
+
+ return ret;
+}
+
+static const struct iio_enum ad5380_powerdown_mode_enum = {
+ .items = ad5380_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5380_powerdown_modes),
+ .get = ad5380_get_powerdown_mode,
+ .set = ad5380_set_powerdown_mode,
+};
+
+static unsigned int ad5380_info_to_reg(struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case 0:
+ return AD5380_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5380_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5380_REG_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const unsigned int max_val = (1 << chan->scan_type.realbits);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ ad5380_info_to_reg(chan, info),
+ val << chan->scan_type.shift);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += (1 << chan->scan_type.realbits) / 2;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ AD5380_REG_OFFSET(chan->address),
+ val << chan->scan_type.shift);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = regmap_read(st->regmap, ad5380_info_to_reg(chan, info),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(st->regmap, AD5380_REG_OFFSET(chan->address),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ val -= (1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 2 * st->vref;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5380_info = {
+ .read_raw = ad5380_read_raw,
+ .write_raw = ad5380_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5380_read_dac_powerdown,
+ .write = ad5380_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5380_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5380_powerdown_mode_enum),
+ { },
+};
+
+#define AD5380_CHANNEL(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 14 - (_bits), \
+ }, \
+ .ext_info = ad5380_ext_info, \
+}
+
+static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
+ [ID_AD5380_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 1250,
+ },
+ [ID_AD5380_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 2500,
+ },
+ [ID_AD5381_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250,
+ },
+ [ID_AD5381_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500,
+ },
+ [ID_AD5382_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 1250,
+ },
+ [ID_AD5382_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 2500,
+ },
+ [ID_AD5383_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 1250,
+ },
+ [ID_AD5383_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 2500,
+ },
+ [ID_AD5390_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 1250,
+ },
+ [ID_AD5390_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 2500,
+ },
+ [ID_AD5391_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250,
+ },
+ [ID_AD5391_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500,
+ },
+ [ID_AD5392_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 1250,
+ },
+ [ID_AD5392_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 2500,
+ },
+};
+
+static int ad5380_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(st->chip_info->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = i;
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct ad5380_state *st;
+ unsigned int ctrl = 0;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->chip_info = &ad5380_chip_info_tbl[type];
+ st->regmap = regmap;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5380_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
+ return ret;
+ }
+
+ if (st->chip_info->int_vref == 2500)
+ ctrl |= AD5380_CTRL_INT_VREF_2V5;
+
+ st->vref_reg = devm_regulator_get(dev, "vref");
+ if (!IS_ERR(st->vref_reg)) {
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref = ret / 1000;
+ } else {
+ st->vref = st->chip_info->int_vref;
+ ctrl |= AD5380_CTRL_INT_VREF_EN;
+ }
+
+ ret = regmap_write(st->regmap, AD5380_REG_SF_CTRL, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to write to device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_disable(st->vref_reg);
+error_free_reg:
+ kfree(indio_dev->channels);
+
+ return ret;
+}
+
+static int ad5380_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ if (!IS_ERR(st->vref_reg)) {
+ regulator_disable(st->vref_reg);
+ }
+
+ return 0;
+}
+
+static bool ad5380_reg_false(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config ad5380_regmap_config = {
+ .reg_bits = 10,
+ .val_bits = 14,
+
+ .max_register = AD5380_REG_DATA(40),
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = ad5380_reg_false,
+ .readable_reg = ad5380_reg_false,
+};
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int ad5380_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
+}
+
+static int ad5380_spi_remove(struct spi_device *spi)
+{
+ return ad5380_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5380_spi_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5380_spi_ids);
+
+static struct spi_driver ad5380_spi_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_spi_probe,
+ .remove = ad5380_spi_remove,
+ .id_table = ad5380_spi_ids,
+};
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5380_spi_driver);
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5380_spi_driver);
+}
+
+#else
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
+}
+
+static int ad5380_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5380_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5380_i2c_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5380_i2c_ids);
+
+static struct i2c_driver ad5380_i2c_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_i2c_probe,
+ .remove = ad5380_i2c_remove,
+ .id_table = ad5380_i2c_ids,
+};
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5380_i2c_driver);
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5380_i2c_driver);
+}
+
+#else
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ad5380_spi_init(void)
+{
+ int ret;
+
+ ret = ad5380_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5380_i2c_register_driver();
+ if (ret) {
+ ad5380_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5380_spi_init);
+
+static void __exit ad5380_spi_exit(void)
+{
+ ad5380_i2c_unregister_driver();
+ ad5380_spi_unregister_driver();
+
+}
+module_exit(ad5380_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5380/81/82/83/84/90/91/92 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
new file mode 100644
index 00000000000000..787ef1d859c630
--- /dev/null
+++ b/drivers/iio/dac/ad5421.c
@@ -0,0 +1,536 @@
+/*
+ * AD5421 Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/dac/ad5421.h>
+
+
+#define AD5421_REG_DAC_DATA 0x1
+#define AD5421_REG_CTRL 0x2
+#define AD5421_REG_OFFSET 0x3
+#define AD5421_REG_GAIN 0x4
+/* load dac and fault shared the same register number. Writing to it will cause
+ * a dac load command, reading from it will return the fault status register */
+#define AD5421_REG_LOAD_DAC 0x5
+#define AD5421_REG_FAULT 0x5
+#define AD5421_REG_FORCE_ALARM_CURRENT 0x6
+#define AD5421_REG_RESET 0x7
+#define AD5421_REG_START_CONVERSION 0x8
+#define AD5421_REG_NOOP 0x9
+
+#define AD5421_CTRL_WATCHDOG_DISABLE BIT(12)
+#define AD5421_CTRL_AUTO_FAULT_READBACK BIT(11)
+#define AD5421_CTRL_MIN_CURRENT BIT(9)
+#define AD5421_CTRL_ADC_SOURCE_TEMP BIT(8)
+#define AD5421_CTRL_ADC_ENABLE BIT(7)
+#define AD5421_CTRL_PWR_DOWN_INT_VREF BIT(6)
+
+#define AD5421_FAULT_SPI BIT(15)
+#define AD5421_FAULT_PEC BIT(14)
+#define AD5421_FAULT_OVER_CURRENT BIT(13)
+#define AD5421_FAULT_UNDER_CURRENT BIT(12)
+#define AD5421_FAULT_TEMP_OVER_140 BIT(11)
+#define AD5421_FAULT_TEMP_OVER_100 BIT(10)
+#define AD5421_FAULT_UNDER_VOLTAGE_6V BIT(9)
+#define AD5421_FAULT_UNDER_VOLTAGE_12V BIT(8)
+
+/* These bits will cause the fault pin to go high */
+#define AD5421_FAULT_TRIGGER_IRQ \
+ (AD5421_FAULT_SPI | AD5421_FAULT_PEC | AD5421_FAULT_OVER_CURRENT | \
+ AD5421_FAULT_UNDER_CURRENT | AD5421_FAULT_TEMP_OVER_140)
+
+/**
+ * struct ad5421_state - driver instance specific data
+ * @spi: spi_device
+ * @ctrl: control register cache
+ * @current_range: current range which the device is configured for
+ * @data: spi transfer buffers
+ * @fault_mask: software masking of events
+ */
+struct ad5421_state {
+ struct spi_device *spi;
+ unsigned int ctrl;
+ enum ad5421_current_range current_range;
+ unsigned int fault_mask;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+static const struct iio_event_spec ad5421_current_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec ad5421_temp_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ad5421_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ },
+ .event_spec = ad5421_current_event,
+ .num_event_specs = ARRAY_SIZE(ad5421_current_event),
+ },
+ {
+ .type = IIO_TEMP,
+ .channel = -1,
+ .event_spec = ad5421_temp_event,
+ .num_event_specs = ARRAY_SIZE(ad5421_temp_event),
+ },
+};
+
+static int ad5421_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int val)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5421_write_unlocked(indio_dev, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl &= ~clr;
+ st->ctrl |= set;
+
+ ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static irqreturn_t ad5421_fault_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int fault;
+ unsigned int old_fault = 0;
+ unsigned int events;
+
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+ if (!fault)
+ return IRQ_NONE;
+
+ /* If we had a fault, this might mean that the DAC has lost its state
+ * and has been reset. Make sure that the control register actually
+ * contains what we expect it to contain. Otherwise the watchdog might
+ * be enabled and we get watchdog timeout faults, which will render the
+ * DAC unusable. */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+
+ /* The fault pin stays high as long as a fault condition is present and
+ * it is not possible to mask fault conditions. For certain fault
+ * conditions for example like over-temperature it takes some time
+ * until the fault condition disappears. If we would exit the interrupt
+ * handler immediately after handling the event it would be entered
+ * again instantly. Thus we fall back to polling in case we detect that
+ * a interrupt condition is still present.
+ */
+ do {
+ /* 0xffff is a invalid value for the register and will only be
+ * read if there has been a communication error */
+ if (fault == 0xffff)
+ fault = 0;
+
+ /* we are only interested in new events */
+ events = (old_fault ^ fault) & fault;
+ events &= st->fault_mask;
+
+ if (events & AD5421_FAULT_OVER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_UNDER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_TEMP_OVER_140) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ old_fault = fault;
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+
+ /* still active? go to sleep for some time */
+ if (fault & AD5421_FAULT_TRIGGER_IRQ)
+ msleep(1000);
+
+ } while (fault & AD5421_FAULT_TRIGGER_IRQ);
+
+
+ return IRQ_HANDLED;
+}
+
+static void ad5421_get_current_min_max(struct ad5421_state *st,
+ unsigned int *min, unsigned int *max)
+{
+ /* The current range is configured using external pins, which are
+ * usually hard-wired and not run-time switchable. */
+ switch (st->current_range) {
+ case AD5421_CURRENT_RANGE_4mA_20mA:
+ *min = 4000;
+ *max = 20000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA8_21mA:
+ *min = 3800;
+ *max = 21000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA2_24mA:
+ *min = 3200;
+ *max = 24000;
+ break;
+ default:
+ *min = 0;
+ *max = 1;
+ break;
+ }
+}
+
+static inline unsigned int ad5421_get_offset(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return (min * (1 << 16)) / (max - min);
+}
+
+static int ad5421_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long m)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int min, max;
+ int ret;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ad5421_get_current_min_max(st, &min, &max);
+ *val = max - min;
+ *val2 = (1 << 16) * 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5421_get_offset(st);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad5421_read(indio_dev, AD5421_REG_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = ret - 32768;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad5421_read(indio_dev, AD5421_REG_GAIN);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ const unsigned int max_val = 1 << 16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_DAC_DATA, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += 32768;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_OFFSET, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_GAIN, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (chan->type) {
+ case IIO_CURRENT:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ if (state)
+ st->fault_mask |= mask;
+ else
+ st->fault_mask &= ~mask;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int ad5421_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (chan->type) {
+ case IIO_CURRENT:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (bool)(st->fault_mask & mask);
+}
+
+static int ad5421_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ int ret;
+
+ switch (chan->type) {
+ case IIO_CURRENT:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case IIO_TEMP:
+ *val = 140000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info ad5421_info = {
+ .read_raw = ad5421_read_raw,
+ .write_raw = ad5421_write_raw,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad5421_probe(struct spi_device *spi)
+{
+ struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct iio_dev *indio_dev;
+ struct ad5421_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = "ad5421";
+ indio_dev->info = &ad5421_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5421_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+
+ st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
+ AD5421_CTRL_AUTO_FAULT_READBACK;
+
+ if (pdata) {
+ st->current_range = pdata->current_range;
+ if (pdata->external_vref)
+ st->ctrl |= AD5421_CTRL_PWR_DOWN_INT_VREF;
+ } else {
+ st->current_range = AD5421_CURRENT_RANGE_4mA_20mA;
+ }
+
+ /* write initial ctrl register value */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+ if (spi->irq) {
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ NULL,
+ ad5421_fault_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ad5421 fault",
+ indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static struct spi_driver ad5421_driver = {
+ .driver = {
+ .name = "ad5421",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5421_probe,
+};
+module_spi_driver(ad5421_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5421");
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
new file mode 100644
index 00000000000000..46bb62a5c1d4d6
--- /dev/null
+++ b/drivers/iio/dac/ad5446.c
@@ -0,0 +1,623 @@
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MODE_PWRDWN_1k 0x1
+#define MODE_PWRDWN_100k 0x2
+#define MODE_PWRDWN_TRISTATE 0x3
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ */
+
+struct ad5446_state {
+ struct device *dev;
+ const struct ad5446_chip_info *chip_info;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned cached_val;
+ unsigned pwr_down_mode;
+ unsigned pwr_down;
+};
+
+/**
+ * struct ad5446_chip_info - chip specific information
+ * @channel: channel spec for the DAC
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @write: chip specific helper function to write to the register
+ */
+
+struct ad5446_chip_info {
+ struct iio_chan_spec channel;
+ u16 int_vref_mv;
+ int (*write)(struct ad5446_state *st, unsigned val);
+};
+
+static const char * const ad5446_powerdown_modes[] = {
+ "1kohm_to_gnd", "100kohm_to_gnd", "three_state"
+};
+
+static int ad5446_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+
+ st->pwr_down_mode = mode + 1;
+
+ return 0;
+}
+
+static int ad5446_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+
+ return st->pwr_down_mode - 1;
+}
+
+static const struct iio_enum ad5446_powerdown_mode_enum = {
+ .items = ad5446_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5446_powerdown_modes),
+ .get = ad5446_get_powerdown_mode,
+ .set = ad5446_set_powerdown_mode,
+};
+
+static ssize_t ad5446_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ unsigned int shift;
+ unsigned int val;
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->pwr_down = powerdown;
+
+ if (st->pwr_down) {
+ shift = chan->scan_type.realbits + chan->scan_type.shift;
+ val = st->pwr_down_mode << shift;
+ } else {
+ val = st->cached_val;
+ }
+
+ ret = st->chip_info->write(st, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
+ {
+ .name = "powerdown",
+ .read = ad5446_read_dac_powerdown,
+ .write = ad5446_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5446_powerdown_mode_enum),
+ { },
+};
+
+#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = (storage), \
+ .shift = (_shift), \
+ }, \
+ .ext_info = (ext), \
+}
+
+#define AD5446_CHANNEL(bits, storage, shift) \
+ _AD5446_CHANNEL(bits, storage, shift, NULL)
+
+#define AD5446_CHANNEL_POWERDOWN(bits, storage, shift) \
+ _AD5446_CHANNEL(bits, storage, shift, ad5446_ext_info_powerdown)
+
+static int ad5446_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ *val = st->cached_val;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int ad5446_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ val <<= chan->scan_type.shift;
+ mutex_lock(&indio_dev->mlock);
+ st->cached_val = val;
+ if (!st->pwr_down)
+ ret = st->chip_info->write(st, val);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5446_info = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad5446_probe(struct device *dev, const char *name,
+ const struct ad5446_chip_info *chip_info)
+{
+ struct ad5446_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *reg;
+ int ret, voltage_uv = 0;
+
+ reg = devm_regulator_get(dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ st = iio_priv(indio_dev);
+ st->chip_info = chip_info;
+
+ dev_set_drvdata(dev, indio_dev);
+ st->reg = reg;
+ st->dev = dev;
+
+ /* Establish that the iio_dev is a child of the device */
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5446_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &st->chip_info->channel;
+ indio_dev->num_channels = 1;
+
+ st->pwr_down_mode = MODE_PWRDWN_1k;
+
+ if (st->chip_info->int_vref_mv)
+ st->vref_mv = st->chip_info->int_vref_mv;
+ else if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ dev_warn(dev, "reference voltage unspecified\n");
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
+ return ret;
+}
+
+static int ad5446_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5446_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int ad5446_write(struct ad5446_state *st, unsigned val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+ __be16 data = cpu_to_be16(val);
+
+ return spi_write(spi, &data, sizeof(data));
+}
+
+static int ad5660_write(struct ad5446_state *st, unsigned val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+ uint8_t data[3];
+
+ data[0] = (val >> 16) & 0xFF;
+ data[1] = (val >> 8) & 0xFF;
+ data[2] = val & 0xFF;
+
+ return spi_write(spi, data, sizeof(data));
+}
+
+/**
+ * ad5446_supported_spi_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+enum ad5446_supported_spi_device_ids {
+ ID_AD5300,
+ ID_AD5310,
+ ID_AD5320,
+ ID_AD5444,
+ ID_AD5446,
+ ID_AD5450,
+ ID_AD5451,
+ ID_AD5541A,
+ ID_AD5512A,
+ ID_AD5553,
+ ID_AD5601,
+ ID_AD5611,
+ ID_AD5621,
+ ID_AD5641,
+ ID_AD5620_2500,
+ ID_AD5620_1250,
+ ID_AD5640_2500,
+ ID_AD5640_1250,
+ ID_AD5660_2500,
+ ID_AD5660_1250,
+ ID_AD5662,
+};
+
+static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
+ [ID_AD5300] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
+ .write = ad5446_write,
+ },
+ [ID_AD5310] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
+ .write = ad5446_write,
+ },
+ [ID_AD5320] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
+ .write = ad5446_write,
+ },
+ [ID_AD5444] = {
+ .channel = AD5446_CHANNEL(12, 16, 2),
+ .write = ad5446_write,
+ },
+ [ID_AD5446] = {
+ .channel = AD5446_CHANNEL(14, 16, 0),
+ .write = ad5446_write,
+ },
+ [ID_AD5450] = {
+ .channel = AD5446_CHANNEL(8, 16, 6),
+ .write = ad5446_write,
+ },
+ [ID_AD5451] = {
+ .channel = AD5446_CHANNEL(10, 16, 4),
+ .write = ad5446_write,
+ },
+ [ID_AD5541A] = {
+ .channel = AD5446_CHANNEL(16, 16, 0),
+ .write = ad5446_write,
+ },
+ [ID_AD5512A] = {
+ .channel = AD5446_CHANNEL(12, 16, 4),
+ .write = ad5446_write,
+ },
+ [ID_AD5553] = {
+ .channel = AD5446_CHANNEL(14, 16, 0),
+ .write = ad5446_write,
+ },
+ [ID_AD5601] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
+ .write = ad5446_write,
+ },
+ [ID_AD5611] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 4),
+ .write = ad5446_write,
+ },
+ [ID_AD5621] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .write = ad5446_write,
+ },
+ [ID_AD5641] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .write = ad5446_write,
+ },
+ [ID_AD5620_2500] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .int_vref_mv = 2500,
+ .write = ad5446_write,
+ },
+ [ID_AD5620_1250] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .int_vref_mv = 1250,
+ .write = ad5446_write,
+ },
+ [ID_AD5640_2500] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .int_vref_mv = 2500,
+ .write = ad5446_write,
+ },
+ [ID_AD5640_1250] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .int_vref_mv = 1250,
+ .write = ad5446_write,
+ },
+ [ID_AD5660_2500] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .int_vref_mv = 2500,
+ .write = ad5660_write,
+ },
+ [ID_AD5660_1250] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .int_vref_mv = 1250,
+ .write = ad5660_write,
+ },
+ [ID_AD5662] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .write = ad5660_write,
+ },
+};
+
+static const struct spi_device_id ad5446_spi_ids[] = {
+ {"ad5300", ID_AD5300},
+ {"ad5310", ID_AD5310},
+ {"ad5320", ID_AD5320},
+ {"ad5444", ID_AD5444},
+ {"ad5446", ID_AD5446},
+ {"ad5450", ID_AD5450},
+ {"ad5451", ID_AD5451},
+ {"ad5452", ID_AD5444}, /* ad5452 is compatible to the ad5444 */
+ {"ad5453", ID_AD5446}, /* ad5453 is compatible to the ad5446 */
+ {"ad5512a", ID_AD5512A},
+ {"ad5541a", ID_AD5541A},
+ {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
+ {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
+ {"ad5553", ID_AD5553},
+ {"ad5601", ID_AD5601},
+ {"ad5611", ID_AD5611},
+ {"ad5621", ID_AD5621},
+ {"ad5641", ID_AD5641},
+ {"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
+ {"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
+ {"ad5640-2500", ID_AD5640_2500},
+ {"ad5640-1250", ID_AD5640_1250},
+ {"ad5660-2500", ID_AD5660_2500},
+ {"ad5660-1250", ID_AD5660_1250},
+ {"ad5662", ID_AD5662},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
+
+static int ad5446_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return ad5446_probe(&spi->dev, id->name,
+ &ad5446_spi_chip_info[id->driver_data]);
+}
+
+static int ad5446_spi_remove(struct spi_device *spi)
+{
+ return ad5446_remove(&spi->dev);
+}
+
+static struct spi_driver ad5446_spi_driver = {
+ .driver = {
+ .name = "ad5446",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5446_spi_probe,
+ .remove = ad5446_spi_remove,
+ .id_table = ad5446_spi_ids,
+};
+
+static int __init ad5446_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5446_spi_driver);
+}
+
+static void ad5446_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5446_spi_driver);
+}
+
+#else
+
+static inline int ad5446_spi_register_driver(void) { return 0; }
+static inline void ad5446_spi_unregister_driver(void) { }
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int ad5622_write(struct ad5446_state *st, unsigned val)
+{
+ struct i2c_client *client = to_i2c_client(st->dev);
+ __be16 data = cpu_to_be16(val);
+
+ return i2c_master_send(client, (char *)&data, sizeof(data));
+}
+
+/**
+ * ad5446_supported_i2c_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+enum ad5446_supported_i2c_device_ids {
+ ID_AD5602,
+ ID_AD5612,
+ ID_AD5622,
+};
+
+static const struct ad5446_chip_info ad5446_i2c_chip_info[] = {
+ [ID_AD5602] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
+ .write = ad5622_write,
+ },
+ [ID_AD5612] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
+ .write = ad5622_write,
+ },
+ [ID_AD5622] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
+ .write = ad5622_write,
+ },
+};
+
+static int ad5446_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return ad5446_probe(&i2c->dev, id->name,
+ &ad5446_i2c_chip_info[id->driver_data]);
+}
+
+static int ad5446_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5446_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5446_i2c_ids[] = {
+ {"ad5301", ID_AD5602},
+ {"ad5311", ID_AD5612},
+ {"ad5321", ID_AD5622},
+ {"ad5602", ID_AD5602},
+ {"ad5612", ID_AD5612},
+ {"ad5622", ID_AD5622},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad5446_i2c_ids);
+
+static struct i2c_driver ad5446_i2c_driver = {
+ .driver = {
+ .name = "ad5446",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5446_i2c_probe,
+ .remove = ad5446_i2c_remove,
+ .id_table = ad5446_i2c_ids,
+};
+
+static int __init ad5446_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5446_i2c_driver);
+}
+
+static void __exit ad5446_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5446_i2c_driver);
+}
+
+#else
+
+static inline int ad5446_i2c_register_driver(void) { return 0; }
+static inline void ad5446_i2c_unregister_driver(void) { }
+
+#endif
+
+static int __init ad5446_init(void)
+{
+ int ret;
+
+ ret = ad5446_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5446_i2c_register_driver();
+ if (ret) {
+ ad5446_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5446_init);
+
+static void __exit ad5446_exit(void)
+{
+ ad5446_i2c_unregister_driver();
+ ad5446_spi_unregister_driver();
+}
+module_exit(ad5446_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
new file mode 100644
index 00000000000000..64d7256cbb6d0a
--- /dev/null
+++ b/drivers/iio/dac/ad5449.c
@@ -0,0 +1,369 @@
+/*
+ * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
+ * Converter driver.
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/platform_data/ad5449.h>
+
+#define AD5449_MAX_CHANNELS 2
+#define AD5449_MAX_VREFS 2
+
+#define AD5449_CMD_NOOP 0x0
+#define AD5449_CMD_LOAD_AND_UPDATE(x) (0x1 + (x) * 3)
+#define AD5449_CMD_READ(x) (0x2 + (x) * 3)
+#define AD5449_CMD_LOAD(x) (0x3 + (x) * 3)
+#define AD5449_CMD_CTRL 13
+
+#define AD5449_CTRL_SDO_OFFSET 10
+#define AD5449_CTRL_DAISY_CHAIN BIT(9)
+#define AD5449_CTRL_HCLR_TO_MIDSCALE BIT(8)
+#define AD5449_CTRL_SAMPLE_RISING BIT(7)
+
+/**
+ * struct ad5449_chip_info - chip specific information
+ * @channels: Channel specification
+ * @num_channels: Number of channels
+ * @has_ctrl: Chip has a control register
+ */
+struct ad5449_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ bool has_ctrl;
+};
+
+/**
+ * struct ad5449 - driver instance specific data
+ * @spi: the SPI device for this driver instance
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @has_sdo: whether the SDO line is connected
+ * @dac_cache: Cache for the DAC values
+ * @data: spi transfer buffers
+ */
+struct ad5449 {
+ struct spi_device *spi;
+ const struct ad5449_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS];
+
+ bool has_sdo;
+ uint16_t dac_cache[AD5449_MAX_CHANNELS];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data[2] ____cacheline_aligned;
+};
+
+enum ad5449_type {
+ ID_AD5426,
+ ID_AD5429,
+ ID_AD5432,
+ ID_AD5439,
+ ID_AD5443,
+ ID_AD5449,
+};
+
+static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr,
+ unsigned int val)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0] = cpu_to_be16((addr << 12) | val);
+ ret = spi_write(st->spi, st->data, 2);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
+ unsigned int *val)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0],
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1],
+ .rx_buf = &st->data[1],
+ .len = 2,
+ },
+ };
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0] = cpu_to_be16(addr << 12);
+ st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ goto out_unlock;
+
+ *val = be16_to_cpu(st->data[1]);
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad5449_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ struct regulator_bulk_data *reg;
+ int scale_uv;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (st->has_sdo) {
+ ret = ad5449_read(indio_dev,
+ AD5449_CMD_READ(chan->address), val);
+ if (ret)
+ return ret;
+ *val &= 0xfff;
+ } else {
+ *val = st->dac_cache[chan->address];
+ }
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ reg = &st->vref_reg[chan->channel];
+ scale_uv = regulator_get_voltage(reg->consumer);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5449_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val >= (1 << chan->scan_type.realbits))
+ return -EINVAL;
+
+ ret = ad5449_write(indio_dev,
+ AD5449_CMD_LOAD_AND_UPDATE(chan->address),
+ val << chan->scan_type.shift);
+ if (ret == 0)
+ st->dac_cache[chan->address] = val;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5449_info = {
+ .read_raw = ad5449_read_raw,
+ .write_raw = ad5449_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD5449_CHANNEL(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (chan), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 12 - (bits), \
+ }, \
+}
+
+#define DECLARE_AD5449_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5449_CHANNEL(0, bits), \
+ AD5449_CHANNEL(1, bits), \
+}
+
+static DECLARE_AD5449_CHANNELS(ad5429_channels, 8);
+static DECLARE_AD5449_CHANNELS(ad5439_channels, 10);
+static DECLARE_AD5449_CHANNELS(ad5449_channels, 12);
+
+static const struct ad5449_chip_info ad5449_chip_info[] = {
+ [ID_AD5426] = {
+ .channels = ad5429_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5429] = {
+ .channels = ad5429_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+ [ID_AD5432] = {
+ .channels = ad5439_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5439] = {
+ .channels = ad5439_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+ [ID_AD5443] = {
+ .channels = ad5449_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5449] = {
+ .channels = ad5449_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+};
+
+static const char *ad5449_vref_name(struct ad5449 *st, int n)
+{
+ if (st->chip_info->num_channels == 1)
+ return "VREF";
+
+ if (n == 0)
+ return "VREFA";
+ else
+ return "VREFB";
+}
+
+static int ad5449_spi_probe(struct spi_device *spi)
+{
+ struct ad5449_platform_data *pdata = spi->dev.platform_data;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad5449 *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5449_chip_info[id->driver_data];
+ st->spi = spi;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i)
+ st->vref_reg[i].supply = ad5449_vref_name(st, i);
+
+ ret = devm_regulator_bulk_get(&spi->dev, st->chip_info->num_channels,
+ st->vref_reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(st->chip_info->num_channels, st->vref_reg);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ad5449_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ if (st->chip_info->has_ctrl) {
+ unsigned int ctrl = 0x00;
+ if (pdata) {
+ if (pdata->hardware_clear_to_midscale)
+ ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE;
+ ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET;
+ st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED;
+ } else {
+ st->has_sdo = true;
+ }
+ ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl);
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
+
+ return ret;
+}
+
+static int ad5449_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5449 *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5449_spi_ids[] = {
+ { "ad5415", ID_AD5449 },
+ { "ad5426", ID_AD5426 },
+ { "ad5429", ID_AD5429 },
+ { "ad5432", ID_AD5432 },
+ { "ad5439", ID_AD5439 },
+ { "ad5443", ID_AD5443 },
+ { "ad5449", ID_AD5449 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5449_spi_ids);
+
+static struct spi_driver ad5449_spi_driver = {
+ .driver = {
+ .name = "ad5449",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5449_spi_probe,
+ .remove = ad5449_spi_remove,
+ .id_table = ad5449_spi_ids,
+};
+module_spi_driver(ad5449_spi_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5449 and similar DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
new file mode 100644
index 00000000000000..581ec141de3df7
--- /dev/null
+++ b/drivers/iio/dac/ad5504.c
@@ -0,0 +1,377 @@
+/*
+ * AD5504, AD5501 High Voltage Digital to Analog Converter
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/dac/ad5504.h>
+
+#define AD5504_RES_MASK GENMASK(11, 0)
+#define AD5504_CMD_READ BIT(15)
+#define AD5504_CMD_WRITE 0
+#define AD5504_ADDR(addr) ((addr) << 12)
+
+/* Registers */
+#define AD5504_ADDR_NOOP 0
+#define AD5504_ADDR_DAC(x) ((x) + 1)
+#define AD5504_ADDR_ALL_DAC 5
+#define AD5504_ADDR_CTRL 7
+
+/* Control Register */
+#define AD5504_DAC_PWR(ch) ((ch) << 2)
+#define AD5504_DAC_PWRDWN_MODE(mode) ((mode) << 6)
+#define AD5504_DAC_PWRDN_20K 0
+#define AD5504_DAC_PWRDN_3STATE 1
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @spi: spi_device
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ * @pwr_down_mask power down mask
+ * @pwr_down_mode current power down mode
+ * @data: transfer buffer
+ */
+struct ad5504_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned pwr_down_mask;
+ unsigned pwr_down_mode;
+
+ __be16 data[2] ____cacheline_aligned;
+};
+
+/**
+ * ad5504_supported_device_ids:
+ */
+
+enum ad5504_supported_device_ids {
+ ID_AD5504,
+ ID_AD5501,
+};
+
+static int ad5504_spi_write(struct ad5504_state *st, u8 addr, u16 val)
+{
+ st->data[0] = cpu_to_be16(AD5504_CMD_WRITE | AD5504_ADDR(addr) |
+ (val & AD5504_RES_MASK));
+
+ return spi_write(st->spi, &st->data[0], 2);
+}
+
+static int ad5504_spi_read(struct ad5504_state *st, u8 addr)
+{
+ int ret;
+ struct spi_transfer t = {
+ .tx_buf = &st->data[0],
+ .rx_buf = &st->data[1],
+ .len = 2,
+ };
+
+ st->data[0] = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->data[1]) & AD5504_RES_MASK;
+}
+
+static int ad5504_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5504_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5504_spi_read(st, chan->address);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int ad5504_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5504_spi_write(st, chan->address, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char * const ad5504_powerdown_modes[] = {
+ "20kohm_to_gnd",
+ "three_state",
+};
+
+static int ad5504_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ return st->pwr_down_mode;
+}
+
+static int ad5504_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ st->pwr_down_mode = mode;
+
+ return 0;
+}
+
+static const struct iio_enum ad5504_powerdown_mode_enum = {
+ .items = ad5504_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5504_powerdown_modes),
+ .get = ad5504_get_powerdown_mode,
+ .set = ad5504_set_powerdown_mode,
+};
+
+static ssize_t ad5504_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ !(st->pwr_down_mask & (1 << chan->channel)));
+}
+
+static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ bool pwr_down;
+ int ret;
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (pwr_down)
+ st->pwr_down_mask |= (1 << chan->channel);
+ else
+ st->pwr_down_mask &= ~(1 << chan->channel);
+
+ ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
+ AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
+ AD5504_DAC_PWR(st->pwr_down_mask));
+
+ /* writes to the CTRL register must be followed by a NOOP */
+ ad5504_spi_write(st, AD5504_ADDR_NOOP, 0);
+
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(temp0_thresh_rising_value, "110000");
+static IIO_CONST_ATTR(temp0_thresh_rising_en, "1");
+
+static struct attribute *ad5504_ev_attributes[] = {
+ &iio_const_attr_temp0_thresh_rising_value.dev_attr.attr,
+ &iio_const_attr_temp0_thresh_rising_en.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ad5504_ev_attribute_group = {
+ .attrs = ad5504_ev_attributes,
+ .name = "events",
+};
+
+static irqreturn_t ad5504_event_handler(int irq, void *private)
+{
+ iio_push_event(private,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info ad5504_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
+ .event_attrs = &ad5504_ev_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5504_read_dac_powerdown,
+ .write = ad5504_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5504_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5504_powerdown_mode_enum),
+ { },
+};
+
+#define AD5504_CHANNEL(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = AD5504_ADDR_DAC(_chan), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .ext_info = ad5504_ext_info, \
+}
+
+static const struct iio_chan_spec ad5504_channels[] = {
+ AD5504_CHANNEL(0),
+ AD5504_CHANNEL(1),
+ AD5504_CHANNEL(2),
+ AD5504_CHANNEL(3),
+};
+
+static int ad5504_probe(struct spi_device *spi)
+{
+ struct ad5504_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad5504_state *st;
+ struct regulator *reg;
+ int ret, voltage_uv = 0;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else if (pdata)
+ st->vref_mv = pdata->vref_mv;
+ else
+ dev_warn(&spi->dev, "reference voltage unspecified\n");
+
+ st->reg = reg;
+ st->spi = spi;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(st->spi)->name;
+ indio_dev->info = &ad5504_info;
+ if (spi_get_device_id(st->spi)->driver_data == ID_AD5501)
+ indio_dev->num_channels = 1;
+ else
+ indio_dev->num_channels = 4;
+ indio_dev->channels = ad5504_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (spi->irq) {
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ NULL,
+ &ad5504_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ spi_get_device_id(st->spi)->name,
+ indio_dev);
+ if (ret)
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
+
+ return ret;
+}
+
+static int ad5504_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5504_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5504_id[] = {
+ {"ad5504", ID_AD5504},
+ {"ad5501", ID_AD5501},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5504_id);
+
+static struct spi_driver ad5504_driver = {
+ .driver = {
+ .name = "ad5504",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5504_probe,
+ .remove = ad5504_remove,
+ .id_table = ad5504_id,
+};
+module_spi_driver(ad5504_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5501/AD5501 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5624r.h b/drivers/iio/dac/ad5624r.h
new file mode 100644
index 00000000000000..5dca3028cdfd77
--- /dev/null
+++ b/drivers/iio/dac/ad5624r.h
@@ -0,0 +1,79 @@
+/*
+ * AD5624R SPI DAC driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef SPI_AD5624R_H_
+#define SPI_AD5624R_H_
+
+#define AD5624R_DAC_CHANNELS 4
+
+#define AD5624R_ADDR_DAC0 0x0
+#define AD5624R_ADDR_DAC1 0x1
+#define AD5624R_ADDR_DAC2 0x2
+#define AD5624R_ADDR_DAC3 0x3
+#define AD5624R_ADDR_ALL_DAC 0x7
+
+#define AD5624R_CMD_WRITE_INPUT_N 0x0
+#define AD5624R_CMD_UPDATE_DAC_N 0x1
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5624R_CMD_POWERDOWN_DAC 0x4
+#define AD5624R_CMD_RESET 0x5
+#define AD5624R_CMD_LDAC_SETUP 0x6
+#define AD5624R_CMD_INTERNAL_REFER_SETUP 0x7
+
+#define AD5624R_LDAC_PWRDN_NONE 0x0
+#define AD5624R_LDAC_PWRDN_1K 0x1
+#define AD5624R_LDAC_PWRDN_100K 0x2
+#define AD5624R_LDAC_PWRDN_3STATE 0x3
+
+/**
+ * struct ad5624r_chip_info - chip specific information
+ * @channels: channel spec for the DAC
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ */
+
+struct ad5624r_chip_info {
+ const struct iio_chan_spec *channels;
+ u16 int_vref_mv;
+};
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @indio_dev: the industrial I/O device
+ * @us: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ * @pwr_down_mask power down mask
+ * @pwr_down_mode current power down mode
+ */
+
+struct ad5624r_state {
+ struct spi_device *us;
+ const struct ad5624r_chip_info *chip_info;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned pwr_down_mask;
+ unsigned pwr_down_mode;
+};
+
+/**
+ * ad5624r_supported_device_ids:
+ * The AD5624/44/64 parts are available in different
+ * fixed internal reference voltage options.
+ */
+
+enum ad5624r_supported_device_ids {
+ ID_AD5624R3,
+ ID_AD5644R3,
+ ID_AD5664R3,
+ ID_AD5624R5,
+ ID_AD5644R5,
+ ID_AD5664R5,
+};
+
+#endif /* SPI_AD5624R_H_ */
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
new file mode 100644
index 00000000000000..61bb9d4239eafd
--- /dev/null
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -0,0 +1,319 @@
+/*
+ * AD5624R, AD5644R, AD5664R Digital to analog convertors spi driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "ad5624r.h"
+
+static int ad5624r_spi_write(struct spi_device *spi,
+ u8 cmd, u8 addr, u16 val, u8 len)
+{
+ u32 data;
+ u8 msg[3];
+
+ /*
+ * The input shift register is 24 bits wide. The first two bits are
+ * don't care bits. The next three are the command bits, C2 to C0,
+ * followed by the 3-bit DAC address, A2 to A0, and then the
+ * 16-, 14-, 12-bit data-word. The data-word comprises the 16-,
+ * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
+ * for the AD5664R, AD5644R, and AD5624R, respectively.
+ */
+ data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+ msg[0] = data >> 16;
+ msg[1] = data >> 8;
+ msg[2] = data;
+
+ return spi_write(spi, msg, 3);
+}
+
+static int ad5624r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int ad5624r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5624r_spi_write(st->us,
+ AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val,
+ chan->scan_type.shift);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char * const ad5624r_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "three_state"
+};
+
+static int ad5624r_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ return st->pwr_down_mode;
+}
+
+static int ad5624r_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ st->pwr_down_mode = mode;
+
+ return 0;
+}
+
+static const struct iio_enum ad5624r_powerdown_mode_enum = {
+ .items = ad5624r_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5624r_powerdown_modes),
+ .get = ad5624r_get_powerdown_mode,
+ .set = ad5624r_set_powerdown_mode,
+};
+
+static ssize_t ad5624r_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ !!(st->pwr_down_mask & (1 << chan->channel)));
+}
+
+static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ bool pwr_down;
+ int ret;
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (pwr_down)
+ st->pwr_down_mask |= (1 << chan->channel);
+ else
+ st->pwr_down_mask &= ~(1 << chan->channel);
+
+ ret = ad5624r_spi_write(st->us, AD5624R_CMD_POWERDOWN_DAC, 0,
+ (st->pwr_down_mode << 4) |
+ st->pwr_down_mask, 16);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_info ad5624r_info = {
+ .write_raw = ad5624r_write_raw,
+ .read_raw = ad5624r_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5624r_read_dac_powerdown,
+ .write = ad5624r_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5624r_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5624r_powerdown_mode_enum),
+ { },
+};
+
+#define AD5624R_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (_chan), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+ .ext_info = ad5624r_ext_info, \
+}
+
+#define DECLARE_AD5624R_CHANNELS(_name, _bits) \
+ const struct iio_chan_spec _name##_channels[] = { \
+ AD5624R_CHANNEL(0, _bits), \
+ AD5624R_CHANNEL(1, _bits), \
+ AD5624R_CHANNEL(2, _bits), \
+ AD5624R_CHANNEL(3, _bits), \
+}
+
+static DECLARE_AD5624R_CHANNELS(ad5624r, 12);
+static DECLARE_AD5624R_CHANNELS(ad5644r, 14);
+static DECLARE_AD5624R_CHANNELS(ad5664r, 16);
+
+static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
+ [ID_AD5624R3] = {
+ .channels = ad5624r_channels,
+ .int_vref_mv = 1250,
+ },
+ [ID_AD5624R5] = {
+ .channels = ad5624r_channels,
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5644R3] = {
+ .channels = ad5644r_channels,
+ .int_vref_mv = 1250,
+ },
+ [ID_AD5644R5] = {
+ .channels = ad5644r_channels,
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5664R3] = {
+ .channels = ad5664r_channels,
+ .int_vref_mv = 1250,
+ },
+ [ID_AD5664R5] = {
+ .channels = ad5664r_channels,
+ .int_vref_mv = 2500,
+ },
+};
+
+static int ad5624r_probe(struct spi_device *spi)
+{
+ struct ad5624r_state *st;
+ struct iio_dev *indio_dev;
+ int ret, voltage_uv = 0;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st->chip_info =
+ &ad5624r_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ st->vref_mv = st->chip_info->int_vref_mv;
+
+ st->us = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5624r_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = AD5624R_DAC_CHANNELS;
+
+ ret = ad5624r_spi_write(spi, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
+ !!voltage_uv, 16);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad5624r_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5624r_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5624r_id[] = {
+ {"ad5624r3", ID_AD5624R3},
+ {"ad5644r3", ID_AD5644R3},
+ {"ad5664r3", ID_AD5664R3},
+ {"ad5624r5", ID_AD5624R5},
+ {"ad5644r5", ID_AD5644R5},
+ {"ad5664r5", ID_AD5664R5},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5624r_id);
+
+static struct spi_driver ad5624r_driver = {
+ .driver = {
+ .name = "ad5624r",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5624r_probe,
+ .remove = ad5624r_remove,
+ .id_table = ad5624r_id,
+};
+module_spi_driver(ad5624r_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
new file mode 100644
index 00000000000000..f57562aa396f44
--- /dev/null
+++ b/drivers/iio/dac/ad5686.c
@@ -0,0 +1,408 @@
+/*
+ * AD5686R, AD5685R, AD5684R Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AD5686_DAC_CHANNELS 4
+
+#define AD5686_ADDR(x) ((x) << 16)
+#define AD5686_CMD(x) ((x) << 20)
+
+#define AD5686_ADDR_DAC(chan) (0x1 << (chan))
+#define AD5686_ADDR_ALL_DAC 0xF
+
+#define AD5686_CMD_NOOP 0x0
+#define AD5686_CMD_WRITE_INPUT_N 0x1
+#define AD5686_CMD_UPDATE_DAC_N 0x2
+#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5686_CMD_POWERDOWN_DAC 0x4
+#define AD5686_CMD_LDAC_MASK 0x5
+#define AD5686_CMD_RESET 0x6
+#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7
+#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8
+#define AD5686_CMD_READBACK_ENABLE 0x9
+
+#define AD5686_LDAC_PWRDN_NONE 0x0
+#define AD5686_LDAC_PWRDN_1K 0x1
+#define AD5686_LDAC_PWRDN_100K 0x2
+#define AD5686_LDAC_PWRDN_3STATE 0x3
+
+/**
+ * struct ad5686_chip_info - chip specific information
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @channel: channel specification
+*/
+
+struct ad5686_chip_info {
+ u16 int_vref_mv;
+ struct iio_chan_spec channel[AD5686_DAC_CHANNELS];
+};
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ * @pwr_down_mask: power down mask
+ * @pwr_down_mode: current power down mode
+ * @data: spi transfer buffers
+ */
+
+struct ad5686_state {
+ struct spi_device *spi;
+ const struct ad5686_chip_info *chip_info;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned pwr_down_mask;
+ unsigned pwr_down_mode;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+/**
+ * ad5686_supported_device_ids:
+ */
+
+enum ad5686_supported_device_ids {
+ ID_AD5684,
+ ID_AD5685,
+ ID_AD5686,
+};
+static int ad5686_spi_write(struct ad5686_state *st,
+ u8 cmd, u8 addr, u16 val, u8 shift)
+{
+ val <<= shift;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
+ AD5686_ADDR(addr) |
+ val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
+{
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .len = 3,
+ },
+ };
+ int ret;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
+ AD5686_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ return be32_to_cpu(st->data[2].d32);
+}
+
+static const char * const ad5686_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "three_state"
+};
+
+static int ad5686_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+
+ return ((st->pwr_down_mode >> (chan->channel * 2)) & 0x3) - 1;
+}
+
+static int ad5686_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+
+ st->pwr_down_mode &= ~(0x3 << (chan->channel * 2));
+ st->pwr_down_mode |= ((mode + 1) << (chan->channel * 2));
+
+ return 0;
+}
+
+static const struct iio_enum ad5686_powerdown_mode_enum = {
+ .items = ad5686_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5686_powerdown_modes),
+ .get = ad5686_get_powerdown_mode,
+ .set = ad5686_set_powerdown_mode,
+};
+
+static ssize_t ad5686_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", !!(st->pwr_down_mask &
+ (0x3 << (chan->channel * 2))));
+}
+
+static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ bool readin;
+ int ret;
+ struct ad5686_state *st = iio_priv(indio_dev);
+
+ ret = strtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ if (readin)
+ st->pwr_down_mask |= (0x3 << (chan->channel * 2));
+ else
+ st->pwr_down_mask &= ~(0x3 << (chan->channel * 2));
+
+ ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0,
+ st->pwr_down_mask & st->pwr_down_mode, 0);
+
+ return ret ? ret : len;
+}
+
+static int ad5686_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5686_spi_read(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int ad5686_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5686_spi_write(st,
+ AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address,
+ val,
+ chan->scan_type.shift);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5686_info = {
+ .read_raw = ad5686_read_raw,
+ .write_raw = ad5686_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5686_read_dac_powerdown,
+ .write = ad5686_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5686_powerdown_mode_enum),
+ { },
+};
+
+#define AD5868_CHANNEL(chan, bits, _shift) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+ .address = AD5686_ADDR_DAC(chan), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = (_shift), \
+ }, \
+ .ext_info = ad5686_ext_info, \
+}
+
+static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
+ [ID_AD5684] = {
+ .channel[0] = AD5868_CHANNEL(0, 12, 4),
+ .channel[1] = AD5868_CHANNEL(1, 12, 4),
+ .channel[2] = AD5868_CHANNEL(2, 12, 4),
+ .channel[3] = AD5868_CHANNEL(3, 12, 4),
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5685] = {
+ .channel[0] = AD5868_CHANNEL(0, 14, 2),
+ .channel[1] = AD5868_CHANNEL(1, 14, 2),
+ .channel[2] = AD5868_CHANNEL(2, 14, 2),
+ .channel[3] = AD5868_CHANNEL(3, 14, 2),
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5686] = {
+ .channel[0] = AD5868_CHANNEL(0, 16, 0),
+ .channel[1] = AD5868_CHANNEL(1, 16, 0),
+ .channel[2] = AD5868_CHANNEL(2, 16, 0),
+ .channel[3] = AD5868_CHANNEL(3, 16, 0),
+ .int_vref_mv = 2500,
+ },
+};
+
+
+static int ad5686_probe(struct spi_device *spi)
+{
+ struct ad5686_state *st;
+ struct iio_dev *indio_dev;
+ int ret, voltage_uv = 0;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
+ }
+
+ st->chip_info =
+ &ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ st->vref_mv = st->chip_info->int_vref_mv;
+
+ st->spi = spi;
+
+ /* Set all the power down mode for all channels to 1K pulldown */
+ st->pwr_down_mode = 0x55;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5686_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = AD5686_DAC_CHANNELS;
+
+ ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0,
+ !!voltage_uv, 0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+ return ret;
+}
+
+static int ad5686_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5686_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5686_id[] = {
+ {"ad5684", ID_AD5684},
+ {"ad5685", ID_AD5685},
+ {"ad5686", ID_AD5686},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5686_id);
+
+static struct spi_driver ad5686_driver = {
+ .driver = {
+ .name = "ad5686",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5686_probe,
+ .remove = ad5686_remove,
+ .id_table = ad5686_id,
+};
+module_spi_driver(ad5686_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
new file mode 100644
index 00000000000000..a7c851f62d7c20
--- /dev/null
+++ b/drivers/iio/dac/ad5755.c
@@ -0,0 +1,622 @@
+/*
+ * AD5755, AD5755-1, AD5757, AD5735, AD5737 Digital to analog converters driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/platform_data/ad5755.h>
+
+#define AD5755_NUM_CHANNELS 4
+
+#define AD5755_ADDR(x) ((x) << 16)
+
+#define AD5755_WRITE_REG_DATA(chan) (chan)
+#define AD5755_WRITE_REG_GAIN(chan) (0x08 | (chan))
+#define AD5755_WRITE_REG_OFFSET(chan) (0x10 | (chan))
+#define AD5755_WRITE_REG_CTRL(chan) (0x1c | (chan))
+
+#define AD5755_READ_REG_DATA(chan) (chan)
+#define AD5755_READ_REG_CTRL(chan) (0x4 | (chan))
+#define AD5755_READ_REG_GAIN(chan) (0x8 | (chan))
+#define AD5755_READ_REG_OFFSET(chan) (0xc | (chan))
+#define AD5755_READ_REG_CLEAR(chan) (0x10 | (chan))
+#define AD5755_READ_REG_SLEW(chan) (0x14 | (chan))
+#define AD5755_READ_REG_STATUS 0x18
+#define AD5755_READ_REG_MAIN 0x19
+#define AD5755_READ_REG_DC_DC 0x1a
+
+#define AD5755_CTRL_REG_SLEW 0x0
+#define AD5755_CTRL_REG_MAIN 0x1
+#define AD5755_CTRL_REG_DAC 0x2
+#define AD5755_CTRL_REG_DC_DC 0x3
+#define AD5755_CTRL_REG_SW 0x4
+
+#define AD5755_READ_FLAG 0x800000
+
+#define AD5755_NOOP 0x1CE000
+
+#define AD5755_DAC_INT_EN BIT(8)
+#define AD5755_DAC_CLR_EN BIT(7)
+#define AD5755_DAC_OUT_EN BIT(6)
+#define AD5755_DAC_INT_CURRENT_SENSE_RESISTOR BIT(5)
+#define AD5755_DAC_DC_DC_EN BIT(4)
+#define AD5755_DAC_VOLTAGE_OVERRANGE_EN BIT(3)
+
+#define AD5755_DC_DC_MAXV 0
+#define AD5755_DC_DC_FREQ_SHIFT 2
+#define AD5755_DC_DC_PHASE_SHIFT 4
+#define AD5755_EXT_DC_DC_COMP_RES BIT(6)
+
+#define AD5755_SLEW_STEP_SIZE_SHIFT 0
+#define AD5755_SLEW_RATE_SHIFT 3
+#define AD5755_SLEW_ENABLE BIT(12)
+
+/**
+ * struct ad5755_chip_info - chip specific information
+ * @channel_template: channel specification
+ * @calib_shift: shift for the calibration data registers
+ * @has_voltage_out: whether the chip has voltage outputs
+ */
+struct ad5755_chip_info {
+ const struct iio_chan_spec channel_template;
+ unsigned int calib_shift;
+ bool has_voltage_out;
+};
+
+/**
+ * struct ad5755_state - driver instance specific data
+ * @spi: spi device the driver is attached to
+ * @chip_info: chip model specific constants, available modes etc
+ * @pwr_down: bitmask which contains hether a channel is powered down or not
+ * @ctrl: software shadow of the channel ctrl registers
+ * @channels: iio channel spec for the device
+ * @data: spi transfer buffers
+ */
+struct ad5755_state {
+ struct spi_device *spi;
+ const struct ad5755_chip_info *chip_info;
+ unsigned int pwr_down;
+ unsigned int ctrl[AD5755_NUM_CHANNELS];
+ struct iio_chan_spec channels[AD5755_NUM_CHANNELS];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5755_type {
+ ID_AD5755,
+ ID_AD5757,
+ ID_AD5735,
+ ID_AD5737,
+};
+
+static int ad5755_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int val)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5755_write_ctrl_unlocked(struct iio_dev *indio_dev,
+ unsigned int channel, unsigned int reg, unsigned int val)
+{
+ return ad5755_write_unlocked(indio_dev,
+ AD5755_WRITE_REG_CTRL(channel), (reg << 13) | val);
+}
+
+static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5755_write_unlocked(indio_dev, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
+ unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5755_write_ctrl_unlocked(indio_dev, channel, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
+ st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5755_update_dac_ctrl(struct iio_dev *indio_dev,
+ unsigned int channel, unsigned int set, unsigned int clr)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->ctrl[channel] |= set;
+ st->ctrl[channel] &= ~clr;
+
+ ret = ad5755_write_ctrl_unlocked(indio_dev, channel,
+ AD5755_CTRL_REG_DAC, st->ctrl[channel]);
+
+ return ret;
+}
+
+static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
+ unsigned int channel, bool pwr_down)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ unsigned int mask = BIT(channel);
+
+ mutex_lock(&indio_dev->mlock);
+
+ if ((bool)(st->pwr_down & mask) == pwr_down)
+ goto out_unlock;
+
+ if (!pwr_down) {
+ st->pwr_down &= ~mask;
+ ad5755_update_dac_ctrl(indio_dev, channel,
+ AD5755_DAC_INT_EN | AD5755_DAC_DC_DC_EN, 0);
+ udelay(200);
+ ad5755_update_dac_ctrl(indio_dev, channel,
+ AD5755_DAC_OUT_EN, 0);
+ } else {
+ st->pwr_down |= mask;
+ ad5755_update_dac_ctrl(indio_dev, channel,
+ 0, AD5755_DAC_INT_EN | AD5755_DAC_OUT_EN |
+ AD5755_DAC_DC_DC_EN);
+ }
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static const int ad5755_min_max_table[][2] = {
+ [AD5755_MODE_VOLTAGE_0V_5V] = { 0, 5000 },
+ [AD5755_MODE_VOLTAGE_0V_10V] = { 0, 10000 },
+ [AD5755_MODE_VOLTAGE_PLUSMINUS_5V] = { -5000, 5000 },
+ [AD5755_MODE_VOLTAGE_PLUSMINUS_10V] = { -10000, 10000 },
+ [AD5755_MODE_CURRENT_4mA_20mA] = { 4, 20 },
+ [AD5755_MODE_CURRENT_0mA_20mA] = { 0, 20 },
+ [AD5755_MODE_CURRENT_0mA_24mA] = { 0, 24 },
+};
+
+static void ad5755_get_min_max(struct ad5755_state *st,
+ struct iio_chan_spec const *chan, int *min, int *max)
+{
+ enum ad5755_mode mode = st->ctrl[chan->channel] & 7;
+ *min = ad5755_min_max_table[mode][0];
+ *max = ad5755_min_max_table[mode][1];
+}
+
+static inline int ad5755_get_offset(struct ad5755_state *st,
+ struct iio_chan_spec const *chan)
+{
+ int min, max;
+
+ ad5755_get_min_max(st, chan, &min, &max);
+ return (min * (1 << chan->scan_type.realbits)) / (max - min);
+}
+
+static int ad5755_chan_reg_info(struct ad5755_state *st,
+ struct iio_chan_spec const *chan, long info, bool write,
+ unsigned int *reg, unsigned int *shift, unsigned int *offset)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (write)
+ *reg = AD5755_WRITE_REG_DATA(chan->address);
+ else
+ *reg = AD5755_READ_REG_DATA(chan->address);
+ *shift = chan->scan_type.shift;
+ *offset = 0;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (write)
+ *reg = AD5755_WRITE_REG_OFFSET(chan->address);
+ else
+ *reg = AD5755_READ_REG_OFFSET(chan->address);
+ *shift = st->chip_info->calib_shift;
+ *offset = 32768;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (write)
+ *reg = AD5755_WRITE_REG_GAIN(chan->address);
+ else
+ *reg = AD5755_READ_REG_GAIN(chan->address);
+ *shift = st->chip_info->calib_shift;
+ *offset = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad5755_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ unsigned int reg, shift, offset;
+ int min, max;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ ad5755_get_min_max(st, chan, &min, &max);
+ *val = max - min;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5755_get_offset(st, chan);
+ return IIO_VAL_INT;
+ default:
+ ret = ad5755_chan_reg_info(st, chan, info, false,
+ &reg, &shift, &offset);
+ if (ret)
+ return ret;
+
+ ret = ad5755_read(indio_dev, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = (ret - offset) >> shift;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5755_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ unsigned int shift, reg, offset;
+ int ret;
+
+ ret = ad5755_chan_reg_info(st, chan, info, true,
+ &reg, &shift, &offset);
+ if (ret)
+ return ret;
+
+ val <<= shift;
+ val += offset;
+
+ if (val < 0 || val > 0xffff)
+ return -EINVAL;
+
+ return ad5755_write(indio_dev, reg, val);
+}
+
+static ssize_t ad5755_read_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ (bool)(st->pwr_down & (1 << chan->channel)));
+}
+
+static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
+ struct iio_chan_spec const *chan, const char *buf, size_t len)
+{
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ ret = ad5755_set_channel_pwr_down(indio_dev, chan->channel, pwr_down);
+ return ret ? ret : len;
+}
+
+static const struct iio_info ad5755_info = {
+ .read_raw = ad5755_read_raw,
+ .write_raw = ad5755_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5755_read_powerdown,
+ .write = ad5755_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ { },
+};
+
+#define AD5755_CHANNEL(_bits) { \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+ .ext_info = ad5755_ext_info, \
+}
+
+static const struct ad5755_chip_info ad5755_chip_info_tbl[] = {
+ [ID_AD5735] = {
+ .channel_template = AD5755_CHANNEL(14),
+ .has_voltage_out = true,
+ .calib_shift = 4,
+ },
+ [ID_AD5737] = {
+ .channel_template = AD5755_CHANNEL(14),
+ .has_voltage_out = false,
+ .calib_shift = 4,
+ },
+ [ID_AD5755] = {
+ .channel_template = AD5755_CHANNEL(16),
+ .has_voltage_out = true,
+ .calib_shift = 0,
+ },
+ [ID_AD5757] = {
+ .channel_template = AD5755_CHANNEL(16),
+ .has_voltage_out = false,
+ .calib_shift = 0,
+ },
+};
+
+static bool ad5755_is_valid_mode(struct ad5755_state *st, enum ad5755_mode mode)
+{
+ switch (mode) {
+ case AD5755_MODE_VOLTAGE_0V_5V:
+ case AD5755_MODE_VOLTAGE_0V_10V:
+ case AD5755_MODE_VOLTAGE_PLUSMINUS_5V:
+ case AD5755_MODE_VOLTAGE_PLUSMINUS_10V:
+ return st->chip_info->has_voltage_out;
+ case AD5755_MODE_CURRENT_4mA_20mA:
+ case AD5755_MODE_CURRENT_0mA_20mA:
+ case AD5755_MODE_CURRENT_0mA_24mA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ad5755_setup_pdata(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ unsigned int val;
+ unsigned int i;
+ int ret;
+
+ if (pdata->dc_dc_phase > AD5755_DC_DC_PHASE_90_DEGREE ||
+ pdata->dc_dc_freq > AD5755_DC_DC_FREQ_650kHZ ||
+ pdata->dc_dc_maxv > AD5755_DC_DC_MAXV_29V5)
+ return -EINVAL;
+
+ val = pdata->dc_dc_maxv << AD5755_DC_DC_MAXV;
+ val |= pdata->dc_dc_freq << AD5755_DC_DC_FREQ_SHIFT;
+ val |= pdata->dc_dc_phase << AD5755_DC_DC_PHASE_SHIFT;
+ if (pdata->ext_dc_dc_compenstation_resistor)
+ val |= AD5755_EXT_DC_DC_COMP_RES;
+
+ ret = ad5755_write_ctrl(indio_dev, 0, AD5755_CTRL_REG_DC_DC, val);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->dac); ++i) {
+ val = pdata->dac[i].slew.step_size <<
+ AD5755_SLEW_STEP_SIZE_SHIFT;
+ val |= pdata->dac[i].slew.rate <<
+ AD5755_SLEW_RATE_SHIFT;
+ if (pdata->dac[i].slew.enable)
+ val |= AD5755_SLEW_ENABLE;
+
+ ret = ad5755_write_ctrl(indio_dev, i,
+ AD5755_CTRL_REG_SLEW, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pdata->dac); ++i) {
+ if (!ad5755_is_valid_mode(st, pdata->dac[i].mode))
+ return -EINVAL;
+
+ val = 0;
+ if (!pdata->dac[i].ext_current_sense_resistor)
+ val |= AD5755_DAC_INT_CURRENT_SENSE_RESISTOR;
+ if (pdata->dac[i].enable_voltage_overrange)
+ val |= AD5755_DAC_VOLTAGE_OVERRANGE_EN;
+ val |= pdata->dac[i].mode;
+
+ ret = ad5755_update_dac_ctrl(indio_dev, i, val, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ad5755_is_voltage_mode(enum ad5755_mode mode)
+{
+ switch (mode) {
+ case AD5755_MODE_VOLTAGE_0V_5V:
+ case AD5755_MODE_VOLTAGE_0V_10V:
+ case AD5755_MODE_VOLTAGE_PLUSMINUS_5V:
+ case AD5755_MODE_VOLTAGE_PLUSMINUS_10V:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ad5755_init_channels(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
+{
+ struct ad5755_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels = st->channels;
+ unsigned int i;
+
+ for (i = 0; i < AD5755_NUM_CHANNELS; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = i;
+ if (pdata && ad5755_is_voltage_mode(pdata->dac[i].mode))
+ channels[i].type = IIO_VOLTAGE;
+ else
+ channels[i].type = IIO_CURRENT;
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+#define AD5755_DEFAULT_DAC_PDATA { \
+ .mode = AD5755_MODE_CURRENT_4mA_20mA, \
+ .ext_current_sense_resistor = true, \
+ .enable_voltage_overrange = false, \
+ .slew = { \
+ .enable = false, \
+ .rate = AD5755_SLEW_RATE_64k, \
+ .step_size = AD5755_SLEW_STEP_SIZE_1, \
+ }, \
+ }
+
+static const struct ad5755_platform_data ad5755_default_pdata = {
+ .ext_dc_dc_compenstation_resistor = false,
+ .dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE,
+ .dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ,
+ .dc_dc_maxv = AD5755_DC_DC_MAXV_23V,
+ .dac = {
+ [0] = AD5755_DEFAULT_DAC_PDATA,
+ [1] = AD5755_DEFAULT_DAC_PDATA,
+ [2] = AD5755_DEFAULT_DAC_PDATA,
+ [3] = AD5755_DEFAULT_DAC_PDATA,
+ },
+};
+
+static int ad5755_probe(struct spi_device *spi)
+{
+ enum ad5755_type type = spi_get_device_id(spi)->driver_data;
+ const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct iio_dev *indio_dev;
+ struct ad5755_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5755_chip_info_tbl[type];
+ st->spi = spi;
+ st->pwr_down = 0xf;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5755_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = AD5755_NUM_CHANNELS;
+
+ if (!pdata)
+ pdata = &ad5755_default_pdata;
+
+ ret = ad5755_init_channels(indio_dev, pdata);
+ if (ret)
+ return ret;
+
+ ret = ad5755_setup_pdata(indio_dev, pdata);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad5755_id[] = {
+ { "ad5755", ID_AD5755 },
+ { "ad5755-1", ID_AD5755 },
+ { "ad5757", ID_AD5757 },
+ { "ad5735", ID_AD5735 },
+ { "ad5737", ID_AD5737 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5755_id);
+
+static struct spi_driver ad5755_driver = {
+ .driver = {
+ .name = "ad5755",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5755_probe,
+ .id_table = ad5755_id,
+};
+module_spi_driver(ad5755_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5755/55-1/57/35/37 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
new file mode 100644
index 00000000000000..d0d38165339d5b
--- /dev/null
+++ b/drivers/iio/dac/ad5764.c
@@ -0,0 +1,370 @@
+/*
+ * Analog devices AD5764, AD5764R, AD5744, AD5744R quad-channel
+ * Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AD5764_REG_SF_NOP 0x0
+#define AD5764_REG_SF_CONFIG 0x1
+#define AD5764_REG_SF_CLEAR 0x4
+#define AD5764_REG_SF_LOAD 0x5
+#define AD5764_REG_DATA(x) ((2 << 3) | (x))
+#define AD5764_REG_COARSE_GAIN(x) ((3 << 3) | (x))
+#define AD5764_REG_FINE_GAIN(x) ((4 << 3) | (x))
+#define AD5764_REG_OFFSET(x) ((5 << 3) | (x))
+
+#define AD5764_NUM_CHANNELS 4
+
+/**
+ * struct ad5764_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in uV - 0 if external
+ * reference voltage is used
+ * @channel channel specification
+*/
+
+struct ad5764_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec *channels;
+};
+
+/**
+ * struct ad5764_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip info
+ * @vref_reg: vref supply regulators
+ * @data: spi transfer buffers
+ */
+
+struct ad5764_state {
+ struct spi_device *spi;
+ const struct ad5764_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5764_type {
+ ID_AD5744,
+ ID_AD5744R,
+ ID_AD5764,
+ ID_AD5764R,
+};
+
+#define AD5764_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+}
+
+#define DECLARE_AD5764_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD5764_CHANNEL(0, (_bits)), \
+ AD5764_CHANNEL(1, (_bits)), \
+ AD5764_CHANNEL(2, (_bits)), \
+ AD5764_CHANNEL(3, (_bits)), \
+};
+
+static DECLARE_AD5764_CHANNELS(ad5764, 16);
+static DECLARE_AD5764_CHANNELS(ad5744, 14);
+
+static const struct ad5764_chip_info ad5764_chip_infos[] = {
+ [ID_AD5744] = {
+ .int_vref = 0,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5744R] = {
+ .int_vref = 5000000,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5764] = {
+ .int_vref = 0,
+ .channels = ad5764_channels,
+ },
+ [ID_AD5764R] = {
+ .int_vref = 5000000,
+ .channels = ad5764_channels,
+ },
+};
+
+static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ ret = spi_write(st->spi, &st->data[0].d8[1], 3);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret >= 0)
+ *val = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_chan_info_to_reg(struct iio_chan_spec const *chan, long info)
+{
+ switch (info) {
+ case 0:
+ return AD5764_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5764_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5764_REG_FINE_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5764_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const int max_val = (1 << chan->scan_type.realbits);
+ unsigned int reg;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val >= 128 || val < -128)
+ return -EINVAL;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= 32 || val < -32)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = ad5764_chan_info_to_reg(chan, info);
+ return ad5764_write(indio_dev, reg, (u16)val);
+}
+
+static int ad5764_get_channel_vref(struct ad5764_state *st,
+ unsigned int channel)
+{
+ if (st->chip_info->int_vref)
+ return st->chip_info->int_vref;
+ else
+ return regulator_get_voltage(st->vref_reg[channel / 2].consumer);
+}
+
+static int ad5764_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ unsigned int reg;
+ int vref;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ reg = AD5764_REG_DATA(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ reg = AD5764_REG_OFFSET(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 7);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ reg = AD5764_REG_FINE_GAIN(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* vout = 4 * vref + ((dac_code / 65536) - 0.5) */
+ vref = ad5764_get_channel_vref(st, chan->channel);
+ if (vref < 0)
+ return vref;
+
+ *val = vref * 4 / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5764_info = {
+ .read_raw = ad5764_read_raw,
+ .write_raw = ad5764_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad5764_probe(struct spi_device *spi)
+{
+ enum ad5764_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5764_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+ st->chip_info = &ad5764_chip_infos[type];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5764_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = AD5764_NUM_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+
+ if (st->chip_info->int_vref == 0) {
+ st->vref_reg[0].supply = "vrefAB";
+ st->vref_reg[1].supply = "vrefCD";
+
+ ret = devm_regulator_bulk_get(&st->spi->dev,
+ ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(st->vref_reg),
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ return ret;
+}
+
+static int ad5764_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5764_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5764_ids[] = {
+ { "ad5744", ID_AD5744 },
+ { "ad5744r", ID_AD5744R },
+ { "ad5764", ID_AD5764 },
+ { "ad5764r", ID_AD5764R },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5764_ids);
+
+static struct spi_driver ad5764_driver = {
+ .driver = {
+ .name = "ad5764",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5764_probe,
+ .remove = ad5764_remove,
+ .id_table = ad5764_ids,
+};
+module_spi_driver(ad5764_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
new file mode 100644
index 00000000000000..5ba785f1858988
--- /dev/null
+++ b/drivers/iio/dac/ad5791.c
@@ -0,0 +1,474 @@
+/*
+ * AD5760, AD5780, AD5781, AD5790, AD5791 Voltage Output Digital to Analog
+ * Converter
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/dac/ad5791.h>
+
+#define AD5791_DAC_MASK GENMASK(19, 0)
+
+#define AD5791_CMD_READ BIT(23)
+#define AD5791_CMD_WRITE 0
+#define AD5791_ADDR(addr) ((addr) << 20)
+
+/* Registers */
+#define AD5791_ADDR_NOOP 0
+#define AD5791_ADDR_DAC0 1
+#define AD5791_ADDR_CTRL 2
+#define AD5791_ADDR_CLRCODE 3
+#define AD5791_ADDR_SW_CTRL 4
+
+/* Control Register */
+#define AD5791_CTRL_RBUF BIT(1)
+#define AD5791_CTRL_OPGND BIT(2)
+#define AD5791_CTRL_DACTRI BIT(3)
+#define AD5791_CTRL_BIN2SC BIT(4)
+#define AD5791_CTRL_SDODIS BIT(5)
+#define AD5761_CTRL_LINCOMP(x) ((x) << 6)
+
+#define AD5791_LINCOMP_0_10 0
+#define AD5791_LINCOMP_10_12 1
+#define AD5791_LINCOMP_12_16 2
+#define AD5791_LINCOMP_16_19 3
+#define AD5791_LINCOMP_19_20 12
+
+#define AD5780_LINCOMP_0_10 0
+#define AD5780_LINCOMP_10_20 12
+
+/* Software Control Register */
+#define AD5791_SWCTRL_LDAC BIT(0)
+#define AD5791_SWCTRL_CLR BIT(1)
+#define AD5791_SWCTRL_RESET BIT(2)
+
+#define AD5791_DAC_PWRDN_6K 0
+#define AD5791_DAC_PWRDN_3STATE 1
+
+/**
+ * struct ad5791_chip_info - chip specific information
+ * @get_lin_comp: function pointer to the device specific function
+ */
+
+struct ad5791_chip_info {
+ int (*get_lin_comp) (unsigned int span);
+};
+
+/**
+ * struct ad5791_state - driver instance specific data
+ * @spi: spi_device
+ * @reg_vdd: positive supply regulator
+ * @reg_vss: negative supply regulator
+ * @chip_info: chip model specific constants
+ * @vref_mv: actual reference voltage used
+ * @vref_neg_mv: voltage of the negative supply
+ * @pwr_down_mode current power down mode
+ */
+
+struct ad5791_state {
+ struct spi_device *spi;
+ struct regulator *reg_vdd;
+ struct regulator *reg_vss;
+ const struct ad5791_chip_info *chip_info;
+ unsigned short vref_mv;
+ unsigned int vref_neg_mv;
+ unsigned ctrl;
+ unsigned pwr_down_mode;
+ bool pwr_down;
+
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+/**
+ * ad5791_supported_device_ids:
+ */
+
+enum ad5791_supported_device_ids {
+ ID_AD5760,
+ ID_AD5780,
+ ID_AD5781,
+ ID_AD5791,
+};
+
+static int ad5791_spi_write(struct ad5791_state *st, u8 addr, u32 val)
+{
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_WRITE |
+ AD5791_ADDR(addr) |
+ (val & AD5791_DAC_MASK));
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5791_spi_read(struct ad5791_state *st, u8 addr, u32 *val)
+{
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ st->data[0].d32 = cpu_to_be32(AD5791_CMD_READ |
+ AD5791_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+
+ *val = be32_to_cpu(st->data[2].d32);
+
+ return ret;
+}
+
+static const char * const ad5791_powerdown_modes[] = {
+ "6kohm_to_gnd",
+ "three_state",
+};
+
+static int ad5791_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ return st->pwr_down_mode;
+}
+
+static int ad5791_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ st->pwr_down_mode = mode;
+
+ return 0;
+}
+
+static const struct iio_enum ad5791_powerdown_mode_enum = {
+ .items = ad5791_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad5791_powerdown_modes),
+ .get = ad5791_get_powerdown_mode,
+ .set = ad5791_set_powerdown_mode,
+};
+
+static ssize_t ad5791_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5791_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ bool pwr_down;
+ int ret;
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (!pwr_down) {
+ st->ctrl &= ~(AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
+ } else {
+ if (st->pwr_down_mode == AD5791_DAC_PWRDN_6K)
+ st->ctrl |= AD5791_CTRL_OPGND;
+ else if (st->pwr_down_mode == AD5791_DAC_PWRDN_3STATE)
+ st->ctrl |= AD5791_CTRL_DACTRI;
+ }
+ st->pwr_down = pwr_down;
+
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl);
+
+ return ret ? ret : len;
+}
+
+static int ad5791_get_lin_comp(unsigned int span)
+{
+ if (span <= 10000)
+ return AD5791_LINCOMP_0_10;
+ else if (span <= 12000)
+ return AD5791_LINCOMP_10_12;
+ else if (span <= 16000)
+ return AD5791_LINCOMP_12_16;
+ else if (span <= 19000)
+ return AD5791_LINCOMP_16_19;
+ else
+ return AD5791_LINCOMP_19_20;
+}
+
+static int ad5780_get_lin_comp(unsigned int span)
+{
+ if (span <= 10000)
+ return AD5780_LINCOMP_0_10;
+ else
+ return AD5780_LINCOMP_10_20;
+}
+static const struct ad5791_chip_info ad5791_chip_info_tbl[] = {
+ [ID_AD5760] = {
+ .get_lin_comp = ad5780_get_lin_comp,
+ },
+ [ID_AD5780] = {
+ .get_lin_comp = ad5780_get_lin_comp,
+ },
+ [ID_AD5781] = {
+ .get_lin_comp = ad5791_get_lin_comp,
+ },
+ [ID_AD5791] = {
+ .get_lin_comp = ad5791_get_lin_comp,
+ },
+};
+
+static int ad5791_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ u64 val64;
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5791_spi_read(st, chan->address, val);
+ if (ret)
+ return ret;
+ *val &= AD5791_DAC_MASK;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mv;
+ *val2 = (1 << chan->scan_type.realbits) - 1;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
+ do_div(val64, st->vref_mv);
+ *val = -val64;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+};
+
+static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
+ {
+ .name = "powerdown",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = ad5791_read_dac_powerdown,
+ .write = ad5791_write_dac_powerdown,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad5791_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ad5791_powerdown_mode_enum),
+ { },
+};
+
+#define AD5791_CHAN(bits, _shift) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .address = AD5791_ADDR_DAC0, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 24, \
+ .shift = (_shift), \
+ }, \
+ .ext_info = ad5791_ext_info, \
+}
+
+static const struct iio_chan_spec ad5791_channels[] = {
+ [ID_AD5760] = AD5791_CHAN(16, 4),
+ [ID_AD5780] = AD5791_CHAN(18, 2),
+ [ID_AD5781] = AD5791_CHAN(18, 2),
+ [ID_AD5791] = AD5791_CHAN(20, 0)
+};
+
+static int ad5791_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ val &= GENMASK(chan->scan_type.realbits - 1, 0);
+ val <<= chan->scan_type.shift;
+
+ return ad5791_spi_write(st, chan->address, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ad5791_info = {
+ .read_raw = &ad5791_read_raw,
+ .write_raw = &ad5791_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad5791_probe(struct spi_device *spi)
+{
+ struct ad5791_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad5791_state *st;
+ int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ st->reg_vdd = devm_regulator_get(&spi->dev, "vdd");
+ if (!IS_ERR(st->reg_vdd)) {
+ ret = regulator_enable(st->reg_vdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg_vdd);
+ if (ret < 0)
+ goto error_disable_reg_pos;
+
+ pos_voltage_uv = ret;
+ }
+
+ st->reg_vss = devm_regulator_get(&spi->dev, "vss");
+ if (!IS_ERR(st->reg_vss)) {
+ ret = regulator_enable(st->reg_vss);
+ if (ret)
+ goto error_disable_reg_pos;
+
+ ret = regulator_get_voltage(st->reg_vss);
+ if (ret < 0)
+ goto error_disable_reg_neg;
+
+ neg_voltage_uv = ret;
+ }
+
+ st->pwr_down = true;
+ st->spi = spi;
+
+ if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd)) {
+ st->vref_mv = (pos_voltage_uv + neg_voltage_uv) / 1000;
+ st->vref_neg_mv = neg_voltage_uv / 1000;
+ } else if (pdata) {
+ st->vref_mv = pdata->vref_pos_mv + pdata->vref_neg_mv;
+ st->vref_neg_mv = pdata->vref_neg_mv;
+ } else {
+ dev_warn(&spi->dev, "reference voltage unspecified\n");
+ }
+
+ ret = ad5791_spi_write(st, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
+ if (ret)
+ goto error_disable_reg_neg;
+
+ st->chip_info = &ad5791_chip_info_tbl[spi_get_device_id(spi)
+ ->driver_data];
+
+
+ st->ctrl = AD5761_CTRL_LINCOMP(st->chip_info->get_lin_comp(st->vref_mv))
+ | ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) |
+ AD5791_CTRL_BIN2SC;
+
+ ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl |
+ AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
+ if (ret)
+ goto error_disable_reg_neg;
+
+ spi_set_drvdata(spi, indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad5791_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels
+ = &ad5791_channels[spi_get_device_id(spi)->driver_data];
+ indio_dev->num_channels = 1;
+ indio_dev->name = spi_get_device_id(st->spi)->name;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg_neg;
+
+ return 0;
+
+error_disable_reg_neg:
+ if (!IS_ERR(st->reg_vss))
+ regulator_disable(st->reg_vss);
+error_disable_reg_pos:
+ if (!IS_ERR(st->reg_vdd))
+ regulator_disable(st->reg_vdd);
+ return ret;
+}
+
+static int ad5791_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg_vdd))
+ regulator_disable(st->reg_vdd);
+
+ if (!IS_ERR(st->reg_vss))
+ regulator_disable(st->reg_vss);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5791_id[] = {
+ {"ad5760", ID_AD5760},
+ {"ad5780", ID_AD5780},
+ {"ad5781", ID_AD5781},
+ {"ad5790", ID_AD5791},
+ {"ad5791", ID_AD5791},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5791_id);
+
+static struct spi_driver ad5791_driver = {
+ .driver = {
+ .name = "ad5791",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5791_probe,
+ .remove = ad5791_remove,
+ .id_table = ad5791_id,
+};
+module_spi_driver(ad5791_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
new file mode 100644
index 00000000000000..fa281003296846
--- /dev/null
+++ b/drivers/iio/dac/ad7303.c
@@ -0,0 +1,303 @@
+/*
+ * AD7303 Digital to analog converters driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/platform_data/ad7303.h>
+
+#define AD7303_CFG_EXTERNAL_VREF BIT(15)
+#define AD7303_CFG_POWER_DOWN(ch) BIT(11 + (ch))
+#define AD7303_CFG_ADDR_OFFSET 10
+
+#define AD7303_CMD_UPDATE_DAC (0x3 << 8)
+
+/**
+ * struct ad7303_state - driver instance specific data
+ * @spi: the device for this driver instance
+ * @config: cached config register value
+ * @dac_cache: current DAC raw value (chip does not support readback)
+ * @data: spi transfer buffer
+ */
+
+struct ad7303_state {
+ struct spi_device *spi;
+ uint16_t config;
+ uint8_t dac_cache[2];
+
+ struct regulator *vdd_reg;
+ struct regulator *vref_reg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data ____cacheline_aligned;
+};
+
+static int ad7303_write(struct ad7303_state *st, unsigned int chan,
+ uint8_t val)
+{
+ st->data = cpu_to_be16(AD7303_CMD_UPDATE_DAC |
+ (chan << AD7303_CFG_ADDR_OFFSET) |
+ st->config | val);
+
+ return spi_write(st->spi, &st->data, sizeof(st->data));
+}
+
+static ssize_t ad7303_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct ad7303_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", (bool)(st->config &
+ AD7303_CFG_POWER_DOWN(chan->channel)));
+}
+
+static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct ad7303_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (pwr_down)
+ st->config |= AD7303_CFG_POWER_DOWN(chan->channel);
+ else
+ st->config &= ~AD7303_CFG_POWER_DOWN(chan->channel);
+
+ /* There is no noop cmd which allows us to only update the powerdown
+ * mode, so just write one of the DAC channels again */
+ ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
+
+ mutex_unlock(&indio_dev->mlock);
+ return len;
+}
+
+static int ad7303_get_vref(struct ad7303_state *st,
+ struct iio_chan_spec const *chan)
+{
+ int ret;
+
+ if (st->config & AD7303_CFG_EXTERNAL_VREF)
+ return regulator_get_voltage(st->vref_reg);
+
+ ret = regulator_get_voltage(st->vdd_reg);
+ if (ret < 0)
+ return ret;
+ return ret / 2;
+}
+
+static int ad7303_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad7303_state *st = iio_priv(indio_dev);
+ int vref_uv;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ *val = st->dac_cache[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref_uv = ad7303_get_vref(st, chan);
+ if (vref_uv < 0)
+ return vref_uv;
+
+ *val = 2 * vref_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad7303_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad7303_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad7303_write(st, chan->address, val);
+ if (ret == 0)
+ st->dac_cache[chan->channel] = val;
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad7303_info = {
+ .read_raw = ad7303_read_raw,
+ .write_raw = ad7303_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad7303_read_dac_powerdown,
+ .write = ad7303_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ { },
+};
+
+#define AD7303_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (chan), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = '8', \
+ .storagebits = '8', \
+ .shift = '0', \
+ }, \
+ .ext_info = ad7303_ext_info, \
+}
+
+static const struct iio_chan_spec ad7303_channels[] = {
+ AD7303_CHANNEL(0),
+ AD7303_CHANNEL(1),
+};
+
+static int ad7303_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad7303_state *st;
+ bool ext_ref;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ st->vdd_reg = devm_regulator_get(&spi->dev, "Vdd");
+ if (IS_ERR(st->vdd_reg))
+ return PTR_ERR(st->vdd_reg);
+
+ ret = regulator_enable(st->vdd_reg);
+ if (ret)
+ return ret;
+
+ if (spi->dev.of_node) {
+ ext_ref = of_property_read_bool(spi->dev.of_node,
+ "REF-supply");
+ } else {
+ struct ad7303_platform_data *pdata = spi->dev.platform_data;
+ if (pdata && pdata->use_external_ref)
+ ext_ref = true;
+ else
+ ext_ref = false;
+ }
+
+ if (ext_ref) {
+ st->vref_reg = devm_regulator_get(&spi->dev, "REF");
+ if (IS_ERR(st->vref_reg)) {
+ ret = PTR_ERR(st->vref_reg);
+ goto err_disable_vdd_reg;
+ }
+
+ ret = regulator_enable(st->vref_reg);
+ if (ret)
+ goto err_disable_vdd_reg;
+
+ st->config |= AD7303_CFG_EXTERNAL_VREF;
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ad7303_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7303_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7303_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_disable_vref_reg;
+
+ return 0;
+
+err_disable_vref_reg:
+ if (st->vref_reg)
+ regulator_disable(st->vref_reg);
+err_disable_vdd_reg:
+ regulator_disable(st->vdd_reg);
+ return ret;
+}
+
+static int ad7303_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7303_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->vref_reg)
+ regulator_disable(st->vref_reg);
+ regulator_disable(st->vdd_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7303_spi_ids[] = {
+ { "ad7303", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7303_spi_ids);
+
+static struct spi_driver ad7303_driver = {
+ .driver = {
+ .name = "ad7303",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7303_probe,
+ .remove = ad7303_remove,
+ .id_table = ad7303_spi_ids,
+};
+module_spi_driver(ad7303_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7303 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
new file mode 100644
index 00000000000000..9a82a7255ebb8d
--- /dev/null
+++ b/drivers/iio/dac/max517.c
@@ -0,0 +1,222 @@
+/*
+ * max517.c - Support for Maxim MAX517, MAX518 and MAX519
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/dac/max517.h>
+
+#define MAX517_DRV_NAME "max517"
+
+/* Commands */
+#define COMMAND_CHANNEL0 0x00
+#define COMMAND_CHANNEL1 0x01 /* for MAX518 and MAX519 */
+#define COMMAND_PD 0x08 /* Power Down */
+
+enum max517_device_ids {
+ ID_MAX517,
+ ID_MAX518,
+ ID_MAX519,
+};
+
+struct max517_data {
+ struct i2c_client *client;
+ unsigned short vref_mv[2];
+};
+
+/*
+ * channel: bit 0: channel 1
+ * bit 1: channel 2
+ * (this way, it's possible to set both channels at once)
+ */
+static int max517_set_value(struct iio_dev *indio_dev,
+ long val, int channel)
+{
+ struct max517_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ u8 outbuf[2];
+ int res;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ outbuf[0] = channel;
+ outbuf[1] = val;
+
+ res = i2c_master_send(client, outbuf, 2);
+ if (res < 0)
+ return res;
+ else if (res != 2)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int max517_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct max517_data *data = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ /* Corresponds to Vref / 2^(bits) */
+ *val = data->vref_mv[chan->channel];
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int max517_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = max517_set_value(indio_dev, val, chan->channel);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max517_suspend(struct device *dev)
+{
+ u8 outbuf = COMMAND_PD;
+
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
+}
+
+static int max517_resume(struct device *dev)
+{
+ u8 outbuf = 0;
+
+ return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
+}
+
+static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
+#define MAX517_PM_OPS (&max517_pm_ops)
+#else
+#define MAX517_PM_OPS NULL
+#endif
+
+static const struct iio_info max517_info = {
+ .read_raw = max517_read_raw,
+ .write_raw = max517_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define MAX517_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec max517_channels[] = {
+ MAX517_CHANNEL(0),
+ MAX517_CHANNEL(1)
+};
+
+static int max517_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max517_data *data;
+ struct iio_dev *indio_dev;
+ struct max517_platform_data *platform_data = client->dev.platform_data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ /* establish that the iio_dev is a child of the i2c device */
+ indio_dev->dev.parent = &client->dev;
+
+ /* reduced channel set for MAX517 */
+ if (id->driver_data == ID_MAX517)
+ indio_dev->num_channels = 1;
+ else
+ indio_dev->num_channels = 2;
+ indio_dev->channels = max517_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &max517_info;
+
+ /*
+ * Reference voltage on MAX518 and default is 5V, else take vref_mv
+ * from platform_data
+ */
+ if (id->driver_data == ID_MAX518 || !platform_data) {
+ data->vref_mv[0] = data->vref_mv[1] = 5000; /* mV */
+ } else {
+ data->vref_mv[0] = platform_data->vref_mv[0];
+ data->vref_mv[1] = platform_data->vref_mv[1];
+ }
+
+ return iio_device_register(indio_dev);
+}
+
+static int max517_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id max517_id[] = {
+ { "max517", ID_MAX517 },
+ { "max518", ID_MAX518 },
+ { "max519", ID_MAX519 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max517_id);
+
+static struct i2c_driver max517_driver = {
+ .driver = {
+ .name = MAX517_DRV_NAME,
+ .pm = MAX517_PM_OPS,
+ },
+ .probe = max517_probe,
+ .remove = max517_remove,
+ .id_table = max517_id,
+};
+module_i2c_driver(max517_driver);
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
new file mode 100644
index 00000000000000..6e914495b34645
--- /dev/null
+++ b/drivers/iio/dac/max5821.c
@@ -0,0 +1,405 @@
+ /*
+ * iio/dac/max5821.c
+ * Copyright (C) 2014 Philippe Reynes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+#define MAX5821_MAX_DAC_CHANNELS 2
+
+/* command bytes */
+#define MAX5821_LOAD_DAC_A_IN_REG_B 0x00
+#define MAX5821_LOAD_DAC_B_IN_REG_A 0x10
+#define MAX5821_EXTENDED_COMMAND_MODE 0xf0
+#define MAX5821_READ_DAC_A_COMMAND 0xf1
+#define MAX5821_READ_DAC_B_COMMAND 0xf2
+
+#define MAX5821_EXTENDED_POWER_UP 0x00
+#define MAX5821_EXTENDED_POWER_DOWN_MODE0 0x01
+#define MAX5821_EXTENDED_POWER_DOWN_MODE1 0x02
+#define MAX5821_EXTENDED_POWER_DOWN_MODE2 0x03
+#define MAX5821_EXTENDED_DAC_A 0x04
+#define MAX5821_EXTENDED_DAC_B 0x08
+
+enum max5821_device_ids {
+ ID_MAX5821,
+};
+
+struct max5821_data {
+ struct i2c_client *client;
+ struct regulator *vref_reg;
+ unsigned short vref_mv;
+ bool powerdown[MAX5821_MAX_DAC_CHANNELS];
+ u8 powerdown_mode[MAX5821_MAX_DAC_CHANNELS];
+ struct mutex lock;
+};
+
+static const char * const max5821_powerdown_modes[] = {
+ "three_state",
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+};
+
+enum {
+ MAX5821_THREE_STATE,
+ MAX5821_1KOHM_TO_GND,
+ MAX5821_100KOHM_TO_GND
+};
+
+static int max5821_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct max5821_data *st = iio_priv(indio_dev);
+
+ return st->powerdown_mode[chan->channel];
+}
+
+static int max5821_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct max5821_data *st = iio_priv(indio_dev);
+
+ st->powerdown_mode[chan->channel] = mode;
+
+ return 0;
+}
+
+static const struct iio_enum max5821_powerdown_mode_enum = {
+ .items = max5821_powerdown_modes,
+ .num_items = ARRAY_SIZE(max5821_powerdown_modes),
+ .get = max5821_get_powerdown_mode,
+ .set = max5821_set_powerdown_mode,
+};
+
+static ssize_t max5821_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct max5821_data *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->powerdown[chan->channel]);
+}
+
+static int max5821_sync_powerdown_mode(struct max5821_data *data,
+ const struct iio_chan_spec *chan)
+{
+ u8 outbuf[2];
+
+ outbuf[0] = MAX5821_EXTENDED_COMMAND_MODE;
+
+ if (chan->channel == 0)
+ outbuf[1] = MAX5821_EXTENDED_DAC_A;
+ else
+ outbuf[1] = MAX5821_EXTENDED_DAC_B;
+
+ if (data->powerdown[chan->channel])
+ outbuf[1] |= data->powerdown_mode[chan->channel] + 1;
+ else
+ outbuf[1] |= MAX5821_EXTENDED_POWER_UP;
+
+ return i2c_master_send(data->client, outbuf, 2);
+}
+
+static ssize_t max5821_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct max5821_data *data = iio_priv(indio_dev);
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ data->powerdown[chan->channel] = powerdown;
+
+ ret = max5821_sync_powerdown_mode(data, chan);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info max5821_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = max5821_read_dac_powerdown,
+ .write = max5821_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &max5821_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &max5821_powerdown_mode_enum),
+ { },
+};
+
+#define MAX5821_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = max5821_ext_info, \
+}
+
+static const struct iio_chan_spec max5821_channels[] = {
+ MAX5821_CHANNEL(0),
+ MAX5821_CHANNEL(1)
+};
+
+static const u8 max5821_read_dac_command[] = {
+ MAX5821_READ_DAC_A_COMMAND,
+ MAX5821_READ_DAC_B_COMMAND
+};
+
+static const u8 max5821_load_dac_command[] = {
+ MAX5821_LOAD_DAC_A_IN_REG_B,
+ MAX5821_LOAD_DAC_B_IN_REG_A
+};
+
+static int max5821_get_value(struct iio_dev *indio_dev,
+ int *val, int channel)
+{
+ struct max5821_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ u8 outbuf[1];
+ u8 inbuf[2];
+ int ret;
+
+ if ((channel != 0) && (channel != 1))
+ return -EINVAL;
+
+ outbuf[0] = max5821_read_dac_command[channel];
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_master_send(client, outbuf, 1);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (ret != 1) {
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
+
+ ret = i2c_master_recv(client, inbuf, 2);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (ret != 2) {
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
+
+ mutex_unlock(&data->lock);
+
+ *val = ((inbuf[0] & 0x0f) << 6) | (inbuf[1] >> 2);
+
+ return IIO_VAL_INT;
+}
+
+static int max5821_set_value(struct iio_dev *indio_dev,
+ int val, int channel)
+{
+ struct max5821_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ u8 outbuf[2];
+ int ret;
+
+ if ((val < 0) || (val > 1023))
+ return -EINVAL;
+
+ if ((channel != 0) && (channel != 1))
+ return -EINVAL;
+
+ outbuf[0] = max5821_load_dac_command[channel];
+ outbuf[0] |= val >> 6;
+ outbuf[1] = (val & 0x3f) << 2;
+
+ ret = i2c_master_send(client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int max5821_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max5821_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return max5821_get_value(indio_dev, val, chan->channel);
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->vref_mv;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max5821_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return max5821_set_value(indio_dev, val, chan->channel);
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max5821_suspend(struct device *dev)
+{
+ u8 outbuf[2] = { MAX5821_EXTENDED_COMMAND_MODE,
+ MAX5821_EXTENDED_DAC_A |
+ MAX5821_EXTENDED_DAC_B |
+ MAX5821_EXTENDED_POWER_DOWN_MODE2 };
+
+ return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+}
+
+static int max5821_resume(struct device *dev)
+{
+ u8 outbuf[2] = { MAX5821_EXTENDED_COMMAND_MODE,
+ MAX5821_EXTENDED_DAC_A |
+ MAX5821_EXTENDED_DAC_B |
+ MAX5821_EXTENDED_POWER_UP };
+
+ return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+}
+
+static SIMPLE_DEV_PM_OPS(max5821_pm_ops, max5821_suspend, max5821_resume);
+#define MAX5821_PM_OPS (&max5821_pm_ops)
+#else
+#define MAX5821_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct iio_info max5821_info = {
+ .read_raw = max5821_read_raw,
+ .write_raw = max5821_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int max5821_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max5821_data *data;
+ struct iio_dev *indio_dev;
+ u32 tmp;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ /* max5821 start in powerdown mode 100Kohm to ground */
+ for (tmp = 0; tmp < MAX5821_MAX_DAC_CHANNELS; tmp++) {
+ data->powerdown[tmp] = true;
+ data->powerdown_mode[tmp] = MAX5821_100KOHM_TO_GND;
+ }
+
+ data->vref_reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(data->vref_reg)) {
+ ret = PTR_ERR(data->vref_reg);
+ dev_err(&client->dev,
+ "Failed to get vref regulator: %d\n", ret);
+ goto error_free_reg;
+ }
+
+ ret = regulator_enable(data->vref_reg);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to enable vref regulator: %d\n", ret);
+ goto error_free_reg;
+ }
+
+ ret = regulator_get_voltage(data->vref_reg);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to get voltage on regulator: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ data->vref_mv = ret / 1000;
+
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->num_channels = ARRAY_SIZE(max5821_channels);
+ indio_dev->channels = max5821_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &max5821_info;
+
+ return iio_device_register(indio_dev);
+
+error_disable_reg:
+ regulator_disable(data->vref_reg);
+
+error_free_reg:
+
+ return ret;
+}
+
+static int max5821_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max5821_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(data->vref_reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id max5821_id[] = {
+ { "max5821", ID_MAX5821 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max5821_id);
+
+static const struct of_device_id max5821_of_match[] = {
+ { .compatible = "maxim,max5821" },
+ { }
+};
+
+static struct i2c_driver max5821_driver = {
+ .driver = {
+ .name = "max5821",
+ .pm = MAX5821_PM_OPS,
+ .owner = THIS_MODULE,
+ },
+ .probe = max5821_probe,
+ .remove = max5821_remove,
+ .id_table = max5821_id,
+};
+module_i2c_driver(max5821_driver);
+
+MODULE_AUTHOR("Philippe Reynes <tremyfr@yahoo.fr>");
+MODULE_DESCRIPTION("MAX5821 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
new file mode 100644
index 00000000000000..43d14588448d65
--- /dev/null
+++ b/drivers/iio/dac/mcp4725.c
@@ -0,0 +1,349 @@
+/*
+ * mcp4725.c - Support for Microchip MCP4725
+ *
+ * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * Based on max517 by Roland Stigge <stigge@antcom.de>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * driver for the Microchip I2C 12-bit digital-to-analog converter (DAC)
+ * (7-bit I2C slave address 0x60, the three LSBs can be configured in
+ * hardware)
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/iio/dac/mcp4725.h>
+
+#define MCP4725_DRV_NAME "mcp4725"
+
+struct mcp4725_data {
+ struct i2c_client *client;
+ u16 vref_mv;
+ u16 dac_value;
+ bool powerdown;
+ unsigned powerdown_mode;
+};
+
+static int mcp4725_suspend(struct device *dev)
+{
+ struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ u8 outbuf[2];
+
+ outbuf[0] = (data->powerdown_mode + 1) << 4;
+ outbuf[1] = 0;
+ data->powerdown = true;
+
+ return i2c_master_send(data->client, outbuf, 2);
+}
+
+static int mcp4725_resume(struct device *dev)
+{
+ struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ u8 outbuf[2];
+
+ /* restore previous DAC value */
+ outbuf[0] = (data->dac_value >> 8) & 0xf;
+ outbuf[1] = data->dac_value & 0xff;
+ data->powerdown = false;
+
+ return i2c_master_send(data->client, outbuf, 2);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume);
+#define MCP4725_PM_OPS (&mcp4725_pm_ops)
+#else
+#define MCP4725_PM_OPS NULL
+#endif
+
+static ssize_t mcp4725_store_eeprom(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ int tries = 20;
+ u8 inoutbuf[3];
+ bool state;
+ int ret;
+
+ ret = strtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+
+ if (!state)
+ return 0;
+
+ inoutbuf[0] = 0x60; /* write EEPROM */
+ inoutbuf[1] = data->dac_value >> 4;
+ inoutbuf[2] = (data->dac_value & 0xf) << 4;
+
+ ret = i2c_master_send(data->client, inoutbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ /* wait for write complete, takes up to 50ms */
+ while (tries--) {
+ msleep(20);
+ ret = i2c_master_recv(data->client, inoutbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ if (inoutbuf[0] & 0x80)
+ break;
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev,
+ "mcp4725_store_eeprom() failed, incomplete\n");
+ return -EIO;
+ }
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(store_eeprom, S_IWUSR, NULL, mcp4725_store_eeprom, 0);
+
+static struct attribute *mcp4725_attributes[] = {
+ &iio_dev_attr_store_eeprom.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mcp4725_attribute_group = {
+ .attrs = mcp4725_attributes,
+};
+
+static const char * const mcp4725_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "500kohm_to_gnd"
+};
+
+static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ return data->powerdown_mode;
+}
+
+static int mcp4725_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned mode)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ data->powerdown_mode = mode;
+
+ return 0;
+}
+
+static ssize_t mcp4725_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->powerdown);
+}
+
+static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ bool state;
+ int ret;
+
+ ret = strtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ if (state)
+ ret = mcp4725_suspend(&data->client->dev);
+ else
+ ret = mcp4725_resume(&data->client->dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static const struct iio_enum mcp4725_powerdown_mode_enum = {
+ .items = mcp4725_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+};
+
+static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = mcp4725_read_powerdown,
+ .write = mcp4725_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
+ { },
+};
+
+static const struct iio_chan_spec mcp4725_channel = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = mcp4725_ext_info,
+};
+
+static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ u8 outbuf[2];
+ int ret;
+
+ if (val >= (1 << 12) || val < 0)
+ return -EINVAL;
+
+ outbuf[0] = (val >> 8) & 0xf;
+ outbuf[1] = val & 0xff;
+
+ ret = i2c_master_send(data->client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int mcp4725_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = data->dac_value;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->vref_mv;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int mcp4725_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = mcp4725_set_value(indio_dev, val);
+ data->dac_value = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info mcp4725_info = {
+ .read_raw = mcp4725_read_raw,
+ .write_raw = mcp4725_write_raw,
+ .attrs = &mcp4725_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp4725_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mcp4725_data *data;
+ struct iio_dev *indio_dev;
+ struct mcp4725_platform_data *platform_data = client->dev.platform_data;
+ u8 inbuf[3];
+ u8 pd;
+ int err;
+
+ if (!platform_data || !platform_data->vref_mv) {
+ dev_err(&client->dev, "invalid platform data");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &mcp4725_info;
+ indio_dev->channels = &mcp4725_channel;
+ indio_dev->num_channels = 1;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->vref_mv = platform_data->vref_mv;
+
+ /* read current DAC value */
+ err = i2c_master_recv(client, inbuf, 3);
+ if (err < 0) {
+ dev_err(&client->dev, "failed to read DAC value");
+ return err;
+ }
+ pd = (inbuf[0] >> 1) & 0x3;
+ data->powerdown = pd > 0 ? true : false;
+ data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
+ data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
+
+ return iio_device_register(indio_dev);
+}
+
+static int mcp4725_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ return 0;
+}
+
+static const struct i2c_device_id mcp4725_id[] = {
+ { "mcp4725", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp4725_id);
+
+static struct i2c_driver mcp4725_driver = {
+ .driver = {
+ .name = MCP4725_DRV_NAME,
+ .pm = MCP4725_PM_OPS,
+ },
+ .probe = mcp4725_probe,
+ .remove = mcp4725_remove,
+ .id_table = mcp4725_id,
+};
+module_i2c_driver(mcp4725_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("MCP4725 12-bit DAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
new file mode 100644
index 00000000000000..92cf4ca6981d0a
--- /dev/null
+++ b/drivers/iio/dac/mcp4922.c
@@ -0,0 +1,216 @@
+/*
+ * mcp4922.c
+ *
+ * Driver for Microchip Digital to Analog Converters.
+ * Supports MCP4902, MCP4912, and MCP4922.
+ *
+ * Copyright (c) 2014 EMAC Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/bitops.h>
+
+#define MCP4922_NUM_CHANNELS 2
+
+enum mcp4922_supported_device_ids {
+ ID_MCP4902,
+ ID_MCP4912,
+ ID_MCP4922,
+};
+
+struct mcp4922_state {
+ struct spi_device *spi;
+ unsigned int value[MCP4922_NUM_CHANNELS];
+ unsigned int vref_mv;
+ struct regulator *vref_reg;
+ u8 mosi[2] ____cacheline_aligned;
+};
+
+#define MCP4922_CHAN(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 12 - (bits), \
+ }, \
+}
+
+static int mcp4922_spi_write(struct mcp4922_state *state, u8 addr, u32 val)
+{
+ state->mosi[1] = val & 0xff;
+ state->mosi[0] = (addr == 0) ? 0x00 : 0x80;
+ state->mosi[0] |= 0x30 | ((val >> 8) & 0x0f);
+
+ return spi_write(state->spi, state->mosi, 2);
+}
+
+static int mcp4922_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct mcp4922_state *state = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = state->value[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = state->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp4922_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct mcp4922_state *state = iio_priv(indio_dev);
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > GENMASK(chan->scan_type.realbits-1, 0))
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ state->value[chan->channel] = val;
+ return mcp4922_spi_write(state, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec mcp4922_channels[3][MCP4922_NUM_CHANNELS] = {
+ [ID_MCP4902] = { MCP4922_CHAN(0, 8), MCP4922_CHAN(1, 8) },
+ [ID_MCP4912] = { MCP4922_CHAN(0, 10), MCP4922_CHAN(1, 10) },
+ [ID_MCP4922] = { MCP4922_CHAN(0, 12), MCP4922_CHAN(1, 12) },
+};
+
+static const struct iio_info mcp4922_info = {
+ .read_raw = &mcp4922_read_raw,
+ .write_raw = &mcp4922_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp4922_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct mcp4922_state *state;
+ const struct spi_device_id *id;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ state->spi = spi;
+ state->vref_reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(state->vref_reg)) {
+ dev_err(&spi->dev, "Vref regulator not specified\n");
+ return PTR_ERR(state->vref_reg);
+ }
+
+ ret = regulator_enable(state->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(state->vref_reg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to read vref regulator: %d\n",
+ ret);
+ goto error_disable_reg;
+ }
+ state->vref_mv = ret / 1000;
+
+ spi_set_drvdata(spi, indio_dev);
+ id = spi_get_device_id(spi);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &mcp4922_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mcp4922_channels[id->driver_data];
+ indio_dev->num_channels = MCP4922_NUM_CHANNELS;
+ indio_dev->name = id->name;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n",
+ ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ regulator_disable(state->vref_reg);
+
+ return ret;
+}
+
+static int mcp4922_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct mcp4922_state *state;
+
+ iio_device_unregister(indio_dev);
+ state = iio_priv(indio_dev);
+ regulator_disable(state->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id mcp4922_id[] = {
+ {"mcp4902", ID_MCP4902},
+ {"mcp4912", ID_MCP4912},
+ {"mcp4922", ID_MCP4922},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mcp4922_id);
+
+static struct spi_driver mcp4922_driver = {
+ .driver = {
+ .name = "mcp4922",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcp4922_probe,
+ .remove = mcp4922_remove,
+ .id_table = mcp4922_id,
+};
+module_spi_driver(mcp4922_driver);
+
+MODULE_AUTHOR("Michael Welling <mwelling@ieee.org>");
+MODULE_DESCRIPTION("Microchip MCP4902, MCP4912, MCP4922 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
new file mode 100644
index 00000000000000..dc5e0b72882faa
--- /dev/null
+++ b/drivers/iio/frequency/Kconfig
@@ -0,0 +1,42 @@
+#
+# Frequency
+# Direct Digital Synthesis drivers (DDS)
+# Clock Distribution device drivers
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Frequency Synthesizers DDS/PLL"
+
+menu "Clock Generator/Distribution"
+
+config AD9523
+ tristate "Analog Devices AD9523 Low Jitter Clock Generator"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD9523 Low Jitter
+ Clock Generator. The driver provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad9523.
+
+endmenu
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+
+menu "Phase-Locked Loop (PLL) frequency synthesizers"
+
+config ADF4350
+ tristate "Analog Devices ADF4350/ADF4351 Wideband Synthesizers"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADF4350/ADF4351
+ Wideband Synthesizers. The driver provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adf4350.
+
+endmenu
+endmenu
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
new file mode 100644
index 00000000000000..2bca03f3e2e3d3
--- /dev/null
+++ b/drivers/iio/frequency/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile iio/frequency
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD9523) += ad9523.o
+obj-$(CONFIG_ADF4350) += adf4350.o
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
new file mode 100644
index 00000000000000..7c5245d9f99ccc
--- /dev/null
+++ b/drivers/iio/frequency/ad9523.c
@@ -0,0 +1,1040 @@
+/*
+ * AD9523 SPI Low Jitter Clock Generator
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/frequency/ad9523.h>
+
+#define AD9523_READ (1 << 15)
+#define AD9523_WRITE (0 << 15)
+#define AD9523_CNT(x) (((x) - 1) << 13)
+#define AD9523_ADDR(x) ((x) & 0xFFF)
+
+#define AD9523_R1B (1 << 16)
+#define AD9523_R2B (2 << 16)
+#define AD9523_R3B (3 << 16)
+#define AD9523_TRANSF_LEN(x) ((x) >> 16)
+
+#define AD9523_SERIAL_PORT_CONFIG (AD9523_R1B | 0x0)
+#define AD9523_VERSION_REGISTER (AD9523_R1B | 0x2)
+#define AD9523_PART_REGISTER (AD9523_R1B | 0x3)
+#define AD9523_READBACK_CTRL (AD9523_R1B | 0x4)
+
+#define AD9523_EEPROM_CUSTOMER_VERSION_ID (AD9523_R2B | 0x6)
+
+#define AD9523_PLL1_REF_A_DIVIDER (AD9523_R2B | 0x11)
+#define AD9523_PLL1_REF_B_DIVIDER (AD9523_R2B | 0x13)
+#define AD9523_PLL1_REF_TEST_DIVIDER (AD9523_R1B | 0x14)
+#define AD9523_PLL1_FEEDBACK_DIVIDER (AD9523_R2B | 0x17)
+#define AD9523_PLL1_CHARGE_PUMP_CTRL (AD9523_R2B | 0x19)
+#define AD9523_PLL1_INPUT_RECEIVERS_CTRL (AD9523_R1B | 0x1A)
+#define AD9523_PLL1_REF_CTRL (AD9523_R1B | 0x1B)
+#define AD9523_PLL1_MISC_CTRL (AD9523_R1B | 0x1C)
+#define AD9523_PLL1_LOOP_FILTER_CTRL (AD9523_R1B | 0x1D)
+
+#define AD9523_PLL2_CHARGE_PUMP (AD9523_R1B | 0xF0)
+#define AD9523_PLL2_FEEDBACK_DIVIDER_AB (AD9523_R1B | 0xF1)
+#define AD9523_PLL2_CTRL (AD9523_R1B | 0xF2)
+#define AD9523_PLL2_VCO_CTRL (AD9523_R1B | 0xF3)
+#define AD9523_PLL2_VCO_DIVIDER (AD9523_R1B | 0xF4)
+#define AD9523_PLL2_LOOP_FILTER_CTRL (AD9523_R2B | 0xF6)
+#define AD9523_PLL2_R2_DIVIDER (AD9523_R1B | 0xF7)
+
+#define AD9523_CHANNEL_CLOCK_DIST(ch) (AD9523_R3B | (0x192 + 3 * ch))
+
+#define AD9523_PLL1_OUTPUT_CTRL (AD9523_R1B | 0x1BA)
+#define AD9523_PLL1_OUTPUT_CHANNEL_CTRL (AD9523_R1B | 0x1BB)
+
+#define AD9523_READBACK_0 (AD9523_R1B | 0x22C)
+#define AD9523_READBACK_1 (AD9523_R1B | 0x22D)
+
+#define AD9523_STATUS_SIGNALS (AD9523_R3B | 0x232)
+#define AD9523_POWER_DOWN_CTRL (AD9523_R1B | 0x233)
+#define AD9523_IO_UPDATE (AD9523_R1B | 0x234)
+
+#define AD9523_EEPROM_DATA_XFER_STATUS (AD9523_R1B | 0xB00)
+#define AD9523_EEPROM_ERROR_READBACK (AD9523_R1B | 0xB01)
+#define AD9523_EEPROM_CTRL1 (AD9523_R1B | 0xB02)
+#define AD9523_EEPROM_CTRL2 (AD9523_R1B | 0xB03)
+
+/* AD9523_SERIAL_PORT_CONFIG */
+
+#define AD9523_SER_CONF_SDO_ACTIVE (1 << 7)
+#define AD9523_SER_CONF_SOFT_RESET (1 << 5)
+
+/* AD9523_READBACK_CTRL */
+#define AD9523_READBACK_CTRL_READ_BUFFERED (1 << 0)
+
+/* AD9523_PLL1_CHARGE_PUMP_CTRL */
+#define AD9523_PLL1_CHARGE_PUMP_CURRENT_nA(x) (((x) / 500) & 0x7F)
+#define AD9523_PLL1_CHARGE_PUMP_TRISTATE (1 << 7)
+#define AD9523_PLL1_CHARGE_PUMP_MODE_NORMAL (3 << 8)
+#define AD9523_PLL1_CHARGE_PUMP_MODE_PUMP_DOWN (2 << 8)
+#define AD9523_PLL1_CHARGE_PUMP_MODE_PUMP_UP (1 << 8)
+#define AD9523_PLL1_CHARGE_PUMP_MODE_TRISTATE (0 << 8)
+#define AD9523_PLL1_BACKLASH_PW_MIN (0 << 10)
+#define AD9523_PLL1_BACKLASH_PW_LOW (1 << 10)
+#define AD9523_PLL1_BACKLASH_PW_HIGH (2 << 10)
+#define AD9523_PLL1_BACKLASH_PW_MAX (3 << 10)
+
+/* AD9523_PLL1_INPUT_RECEIVERS_CTRL */
+#define AD9523_PLL1_REF_TEST_RCV_EN (1 << 7)
+#define AD9523_PLL1_REFB_DIFF_RCV_EN (1 << 6)
+#define AD9523_PLL1_REFA_DIFF_RCV_EN (1 << 5)
+#define AD9523_PLL1_REFB_RCV_EN (1 << 4)
+#define AD9523_PLL1_REFA_RCV_EN (1 << 3)
+#define AD9523_PLL1_REFA_REFB_PWR_CTRL_EN (1 << 2)
+#define AD9523_PLL1_OSC_IN_CMOS_NEG_INP_EN (1 << 1)
+#define AD9523_PLL1_OSC_IN_DIFF_EN (1 << 0)
+
+/* AD9523_PLL1_REF_CTRL */
+#define AD9523_PLL1_BYPASS_REF_TEST_DIV_EN (1 << 7)
+#define AD9523_PLL1_BYPASS_FEEDBACK_DIV_EN (1 << 6)
+#define AD9523_PLL1_ZERO_DELAY_MODE_INT (1 << 5)
+#define AD9523_PLL1_ZERO_DELAY_MODE_EXT (0 << 5)
+#define AD9523_PLL1_OSC_IN_PLL_FEEDBACK_EN (1 << 4)
+#define AD9523_PLL1_ZD_IN_CMOS_NEG_INP_EN (1 << 3)
+#define AD9523_PLL1_ZD_IN_DIFF_EN (1 << 2)
+#define AD9523_PLL1_REFB_CMOS_NEG_INP_EN (1 << 1)
+#define AD9523_PLL1_REFA_CMOS_NEG_INP_EN (1 << 0)
+
+/* AD9523_PLL1_MISC_CTRL */
+#define AD9523_PLL1_REFB_INDEP_DIV_CTRL_EN (1 << 7)
+#define AD9523_PLL1_OSC_CTRL_FAIL_VCC_BY2_EN (1 << 6)
+#define AD9523_PLL1_REF_MODE(x) ((x) << 2)
+#define AD9523_PLL1_BYPASS_REFB_DIV (1 << 1)
+#define AD9523_PLL1_BYPASS_REFA_DIV (1 << 0)
+
+/* AD9523_PLL1_LOOP_FILTER_CTRL */
+#define AD9523_PLL1_LOOP_FILTER_RZERO(x) ((x) & 0xF)
+
+/* AD9523_PLL2_CHARGE_PUMP */
+#define AD9523_PLL2_CHARGE_PUMP_CURRENT_nA(x) ((x) / 3500)
+
+/* AD9523_PLL2_FEEDBACK_DIVIDER_AB */
+#define AD9523_PLL2_FB_NDIV_A_CNT(x) (((x) & 0x3) << 6)
+#define AD9523_PLL2_FB_NDIV_B_CNT(x) (((x) & 0x3F) << 0)
+#define AD9523_PLL2_FB_NDIV(a, b) (4 * (b) + (a))
+
+/* AD9523_PLL2_CTRL */
+#define AD9523_PLL2_CHARGE_PUMP_MODE_NORMAL (3 << 0)
+#define AD9523_PLL2_CHARGE_PUMP_MODE_PUMP_DOWN (2 << 0)
+#define AD9523_PLL2_CHARGE_PUMP_MODE_PUMP_UP (1 << 0)
+#define AD9523_PLL2_CHARGE_PUMP_MODE_TRISTATE (0 << 0)
+#define AD9523_PLL2_BACKLASH_PW_MIN (0 << 2)
+#define AD9523_PLL2_BACKLASH_PW_LOW (1 << 2)
+#define AD9523_PLL2_BACKLASH_PW_HIGH (2 << 2)
+#define AD9523_PLL2_BACKLASH_PW_MAX (3 << 1)
+#define AD9523_PLL2_BACKLASH_CTRL_EN (1 << 4)
+#define AD9523_PLL2_FREQ_DOUBLER_EN (1 << 5)
+#define AD9523_PLL2_LOCK_DETECT_PWR_DOWN_EN (1 << 7)
+
+/* AD9523_PLL2_VCO_CTRL */
+#define AD9523_PLL2_VCO_CALIBRATE (1 << 1)
+#define AD9523_PLL2_FORCE_VCO_MIDSCALE (1 << 2)
+#define AD9523_PLL2_FORCE_REFERENCE_VALID (1 << 3)
+#define AD9523_PLL2_FORCE_RELEASE_SYNC (1 << 4)
+
+/* AD9523_PLL2_VCO_DIVIDER */
+#define AD9523_PLL2_VCO_DIV_M1(x) ((((x) - 3) & 0x3) << 0)
+#define AD9523_PLL2_VCO_DIV_M2(x) ((((x) - 3) & 0x3) << 4)
+#define AD9523_PLL2_VCO_DIV_M1_PWR_DOWN_EN (1 << 2)
+#define AD9523_PLL2_VCO_DIV_M2_PWR_DOWN_EN (1 << 6)
+
+/* AD9523_PLL2_LOOP_FILTER_CTRL */
+#define AD9523_PLL2_LOOP_FILTER_CPOLE1(x) (((x) & 0x7) << 0)
+#define AD9523_PLL2_LOOP_FILTER_RZERO(x) (((x) & 0x7) << 3)
+#define AD9523_PLL2_LOOP_FILTER_RPOLE2(x) (((x) & 0x7) << 6)
+#define AD9523_PLL2_LOOP_FILTER_RZERO_BYPASS_EN (1 << 8)
+
+/* AD9523_PLL2_R2_DIVIDER */
+#define AD9523_PLL2_R2_DIVIDER_VAL(x) (((x) & 0x1F) << 0)
+
+/* AD9523_CHANNEL_CLOCK_DIST */
+#define AD9523_CLK_DIST_DIV_PHASE(x) (((x) & 0x3F) << 18)
+#define AD9523_CLK_DIST_DIV_PHASE_REV(x) ((ret >> 18) & 0x3F)
+#define AD9523_CLK_DIST_DIV(x) ((((x) - 1) & 0x3FF) << 8)
+#define AD9523_CLK_DIST_DIV_REV(x) (((ret >> 8) & 0x3FF) + 1)
+#define AD9523_CLK_DIST_INV_DIV_OUTPUT_EN (1 << 7)
+#define AD9523_CLK_DIST_IGNORE_SYNC_EN (1 << 6)
+#define AD9523_CLK_DIST_PWR_DOWN_EN (1 << 5)
+#define AD9523_CLK_DIST_LOW_PWR_MODE_EN (1 << 4)
+#define AD9523_CLK_DIST_DRIVER_MODE(x) (((x) & 0xF) << 0)
+
+/* AD9523_PLL1_OUTPUT_CTRL */
+#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH6_M2 (1 << 7)
+#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH5_M2 (1 << 6)
+#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH4_M2 (1 << 5)
+#define AD9523_PLL1_OUTP_CTRL_CMOS_DRV_WEAK (1 << 4)
+#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_1 (0 << 0)
+#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_2 (1 << 0)
+#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_4 (2 << 0)
+#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_8 (4 << 0)
+#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_16 (8 << 0)
+
+/* AD9523_PLL1_OUTPUT_CHANNEL_CTRL */
+#define AD9523_PLL1_OUTP_CH_CTRL_OUTPUT_PWR_DOWN_EN (1 << 7)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH9_M2 (1 << 6)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH8_M2 (1 << 5)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH7_M2 (1 << 4)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH3 (1 << 3)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH2 (1 << 2)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH1 (1 << 1)
+#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH0 (1 << 0)
+
+/* AD9523_READBACK_0 */
+#define AD9523_READBACK_0_STAT_PLL2_REF_CLK (1 << 7)
+#define AD9523_READBACK_0_STAT_PLL2_FB_CLK (1 << 6)
+#define AD9523_READBACK_0_STAT_VCXO (1 << 5)
+#define AD9523_READBACK_0_STAT_REF_TEST (1 << 4)
+#define AD9523_READBACK_0_STAT_REFB (1 << 3)
+#define AD9523_READBACK_0_STAT_REFA (1 << 2)
+#define AD9523_READBACK_0_STAT_PLL2_LD (1 << 1)
+#define AD9523_READBACK_0_STAT_PLL1_LD (1 << 0)
+
+/* AD9523_READBACK_1 */
+#define AD9523_READBACK_1_HOLDOVER_ACTIVE (1 << 3)
+#define AD9523_READBACK_1_AUTOMODE_SEL_REFB (1 << 2)
+#define AD9523_READBACK_1_VCO_CALIB_IN_PROGRESS (1 << 0)
+
+/* AD9523_STATUS_SIGNALS */
+#define AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL (1 << 16)
+#define AD9523_STATUS_MONITOR_01_PLL12_LOCKED (0x302)
+/* AD9523_POWER_DOWN_CTRL */
+#define AD9523_POWER_DOWN_CTRL_PLL1_PWR_DOWN (1 << 2)
+#define AD9523_POWER_DOWN_CTRL_PLL2_PWR_DOWN (1 << 1)
+#define AD9523_POWER_DOWN_CTRL_DIST_PWR_DOWN (1 << 0)
+
+/* AD9523_IO_UPDATE */
+#define AD9523_IO_UPDATE_EN (1 << 0)
+
+/* AD9523_EEPROM_DATA_XFER_STATUS */
+#define AD9523_EEPROM_DATA_XFER_IN_PROGRESS (1 << 0)
+
+/* AD9523_EEPROM_ERROR_READBACK */
+#define AD9523_EEPROM_ERROR_READBACK_FAIL (1 << 0)
+
+/* AD9523_EEPROM_CTRL1 */
+#define AD9523_EEPROM_CTRL1_SOFT_EEPROM (1 << 1)
+#define AD9523_EEPROM_CTRL1_EEPROM_WRITE_PROT_DIS (1 << 0)
+
+/* AD9523_EEPROM_CTRL2 */
+#define AD9523_EEPROM_CTRL2_REG2EEPROM (1 << 0)
+
+#define AD9523_NUM_CHAN 14
+#define AD9523_NUM_CHAN_ALT_CLK_SRC 10
+
+/* Helpers to avoid excess line breaks */
+#define AD_IFE(_pde, _a, _b) ((pdata->_pde) ? _a : _b)
+#define AD_IF(_pde, _a) AD_IFE(_pde, _a, 0)
+
+enum {
+ AD9523_STAT_PLL1_LD,
+ AD9523_STAT_PLL2_LD,
+ AD9523_STAT_REFA,
+ AD9523_STAT_REFB,
+ AD9523_STAT_REF_TEST,
+ AD9523_STAT_VCXO,
+ AD9523_STAT_PLL2_FB_CLK,
+ AD9523_STAT_PLL2_REF_CLK,
+ AD9523_SYNC,
+ AD9523_EEPROM,
+};
+
+enum {
+ AD9523_VCO1,
+ AD9523_VCO2,
+ AD9523_VCXO,
+ AD9523_NUM_CLK_SRC,
+};
+
+struct ad9523_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ struct ad9523_platform_data *pdata;
+ struct iio_chan_spec ad9523_channels[AD9523_NUM_CHAN];
+
+ unsigned long vcxo_freq;
+ unsigned long vco_freq;
+ unsigned long vco_out_freq[AD9523_NUM_CLK_SRC];
+ unsigned char vco_out_map[AD9523_NUM_CHAN_ALT_CLK_SRC];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* We encode the register size 1..3 bytes into the register address.
+ * On transfer we get the size from the register datum, and make sure
+ * the result is properly aligned.
+ */
+
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[2],
+ .len = 2,
+ }, {
+ .rx_buf = &st->data[1].d8[4 - AD9523_TRANSF_LEN(addr)],
+ .len = AD9523_TRANSF_LEN(addr),
+ },
+ };
+
+ st->data[0].d32 = cpu_to_be32(AD9523_READ |
+ AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
+ AD9523_ADDR(addr));
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "read failed (%d)", ret);
+ else
+ ret = be32_to_cpu(st->data[1].d32) & (0xFFFFFF >>
+ (8 * (3 - AD9523_TRANSF_LEN(addr))));
+
+ return ret;
+};
+
+static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[2],
+ .len = 2,
+ }, {
+ .tx_buf = &st->data[1].d8[4 - AD9523_TRANSF_LEN(addr)],
+ .len = AD9523_TRANSF_LEN(addr),
+ },
+ };
+
+ st->data[0].d32 = cpu_to_be32(AD9523_WRITE |
+ AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
+ AD9523_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(val);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "write failed (%d)", ret);
+
+ return ret;
+}
+
+static int ad9523_io_update(struct iio_dev *indio_dev)
+{
+ return ad9523_write(indio_dev, AD9523_IO_UPDATE, AD9523_IO_UPDATE_EN);
+}
+
+static int ad9523_vco_out_map(struct iio_dev *indio_dev,
+ unsigned ch, unsigned out)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned mask;
+
+ switch (ch) {
+ case 0 ... 3:
+ ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CHANNEL_CTRL);
+ if (ret < 0)
+ break;
+ mask = AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH0 << ch;
+ if (out) {
+ ret |= mask;
+ out = 2;
+ } else {
+ ret &= ~mask;
+ }
+ ret = ad9523_write(indio_dev,
+ AD9523_PLL1_OUTPUT_CHANNEL_CTRL, ret);
+ break;
+ case 4 ... 6:
+ ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CTRL);
+ if (ret < 0)
+ break;
+ mask = AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH4_M2 << (ch - 4);
+ if (out)
+ ret |= mask;
+ else
+ ret &= ~mask;
+ ret = ad9523_write(indio_dev, AD9523_PLL1_OUTPUT_CTRL, ret);
+ break;
+ case 7 ... 9:
+ ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CHANNEL_CTRL);
+ if (ret < 0)
+ break;
+ mask = AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH7_M2 << (ch - 7);
+ if (out)
+ ret |= mask;
+ else
+ ret &= ~mask;
+ ret = ad9523_write(indio_dev,
+ AD9523_PLL1_OUTPUT_CHANNEL_CTRL, ret);
+ break;
+ default:
+ return 0;
+ }
+
+ st->vco_out_map[ch] = out;
+
+ return ret;
+}
+
+static int ad9523_set_clock_provider(struct iio_dev *indio_dev,
+ unsigned ch, unsigned long freq)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ long tmp1, tmp2;
+ bool use_alt_clk_src;
+
+ switch (ch) {
+ case 0 ... 3:
+ use_alt_clk_src = (freq == st->vco_out_freq[AD9523_VCXO]);
+ break;
+ case 4 ... 9:
+ tmp1 = st->vco_out_freq[AD9523_VCO1] / freq;
+ tmp2 = st->vco_out_freq[AD9523_VCO2] / freq;
+ tmp1 *= freq;
+ tmp2 *= freq;
+ use_alt_clk_src = (abs(tmp1 - freq) > abs(tmp2 - freq));
+ break;
+ default:
+ /* Ch 10..14: No action required, return success */
+ return 0;
+ }
+
+ return ad9523_vco_out_map(indio_dev, ch, use_alt_clk_src);
+}
+
+static int ad9523_store_eeprom(struct iio_dev *indio_dev)
+{
+ int ret, tmp;
+
+ ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL1,
+ AD9523_EEPROM_CTRL1_EEPROM_WRITE_PROT_DIS);
+ if (ret < 0)
+ return ret;
+ ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL2,
+ AD9523_EEPROM_CTRL2_REG2EEPROM);
+ if (ret < 0)
+ return ret;
+
+ tmp = 4;
+ do {
+ msleep(16);
+ ret = ad9523_read(indio_dev,
+ AD9523_EEPROM_DATA_XFER_STATUS);
+ if (ret < 0)
+ return ret;
+ } while ((ret & AD9523_EEPROM_DATA_XFER_IN_PROGRESS) && tmp--);
+
+ ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_read(indio_dev, AD9523_EEPROM_ERROR_READBACK);
+ if (ret < 0)
+ return ret;
+
+ if (ret & AD9523_EEPROM_ERROR_READBACK_FAIL) {
+ dev_err(&indio_dev->dev, "Verify EEPROM failed");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int ad9523_sync(struct iio_dev *indio_dev)
+{
+ int ret, tmp;
+
+ ret = ad9523_read(indio_dev, AD9523_STATUS_SIGNALS);
+ if (ret < 0)
+ return ret;
+
+ tmp = ret;
+ tmp |= AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL;
+
+ ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS, tmp);
+ if (ret < 0)
+ return ret;
+
+ ad9523_io_update(indio_dev);
+ tmp &= ~AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL;
+
+ ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS, tmp);
+ if (ret < 0)
+ return ret;
+
+ return ad9523_io_update(indio_dev);
+}
+
+static ssize_t ad9523_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ bool state;
+ int ret;
+
+ ret = strtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+
+ if (!state)
+ return 0;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32)this_attr->address) {
+ case AD9523_SYNC:
+ ret = ad9523_sync(indio_dev);
+ break;
+ case AD9523_EEPROM:
+ ret = ad9523_store_eeprom(indio_dev);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad9523_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad9523_read(indio_dev, AD9523_READBACK_0);
+ if (ret >= 0) {
+ ret = sprintf(buf, "%d\n", !!(ret & (1 <<
+ (u32)this_attr->address)));
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static IIO_DEVICE_ATTR(pll1_locked, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_PLL1_LD);
+
+static IIO_DEVICE_ATTR(pll2_locked, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_PLL2_LD);
+
+static IIO_DEVICE_ATTR(pll1_reference_clk_a_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_REFA);
+
+static IIO_DEVICE_ATTR(pll1_reference_clk_b_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_REFB);
+
+static IIO_DEVICE_ATTR(pll1_reference_clk_test_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_REF_TEST);
+
+static IIO_DEVICE_ATTR(vcxo_clk_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_VCXO);
+
+static IIO_DEVICE_ATTR(pll2_feedback_clk_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_PLL2_FB_CLK);
+
+static IIO_DEVICE_ATTR(pll2_reference_clk_present, S_IRUGO,
+ ad9523_show,
+ NULL,
+ AD9523_STAT_PLL2_REF_CLK);
+
+static IIO_DEVICE_ATTR(sync_dividers, S_IWUSR,
+ NULL,
+ ad9523_store,
+ AD9523_SYNC);
+
+static IIO_DEVICE_ATTR(store_eeprom, S_IWUSR,
+ NULL,
+ ad9523_store,
+ AD9523_EEPROM);
+
+static struct attribute *ad9523_attributes[] = {
+ &iio_dev_attr_sync_dividers.dev_attr.attr,
+ &iio_dev_attr_store_eeprom.dev_attr.attr,
+ &iio_dev_attr_pll2_feedback_clk_present.dev_attr.attr,
+ &iio_dev_attr_pll2_reference_clk_present.dev_attr.attr,
+ &iio_dev_attr_pll1_reference_clk_a_present.dev_attr.attr,
+ &iio_dev_attr_pll1_reference_clk_b_present.dev_attr.attr,
+ &iio_dev_attr_pll1_reference_clk_test_present.dev_attr.attr,
+ &iio_dev_attr_vcxo_clk_present.dev_attr.attr,
+ &iio_dev_attr_pll1_locked.dev_attr.attr,
+ &iio_dev_attr_pll2_locked.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9523_attribute_group = {
+ .attrs = ad9523_attributes,
+};
+
+static int ad9523_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ unsigned code;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ *val = !(ret & AD9523_CLK_DIST_PWR_DOWN_EN);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_FREQUENCY:
+ *val = st->vco_out_freq[st->vco_out_map[chan->channel]] /
+ AD9523_CLK_DIST_DIV_REV(ret);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PHASE:
+ code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
+ AD9523_CLK_DIST_DIV_REV(ret);
+ *val = code / 1000000;
+ *val2 = (code % 1000000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+};
+
+static int ad9523_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ unsigned reg;
+ int ret, tmp, code;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
+ if (ret < 0)
+ goto out;
+
+ reg = ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val)
+ reg &= ~AD9523_CLK_DIST_PWR_DOWN_EN;
+ else
+ reg |= AD9523_CLK_DIST_PWR_DOWN_EN;
+ break;
+ case IIO_CHAN_INFO_FREQUENCY:
+ if (val <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = ad9523_set_clock_provider(indio_dev, chan->channel, val);
+ if (ret < 0)
+ goto out;
+ tmp = st->vco_out_freq[st->vco_out_map[chan->channel]] / val;
+ tmp = clamp(tmp, 1, 1024);
+ reg &= ~(0x3FF << 8);
+ reg |= AD9523_CLK_DIST_DIV(tmp);
+ break;
+ case IIO_CHAN_INFO_PHASE:
+ code = val * 1000000 + val2 % 1000000;
+ tmp = (code * AD9523_CLK_DIST_DIV_REV(ret)) / 3141592;
+ tmp = clamp(tmp, 0, 63);
+ reg &= ~AD9523_CLK_DIST_DIV_PHASE(~0);
+ reg |= AD9523_CLK_DIST_DIV_PHASE(tmp);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ad9523_write(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel),
+ reg);
+ if (ret < 0)
+ goto out;
+
+ ad9523_io_update(indio_dev);
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad9523_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (readval == NULL) {
+ ret = ad9523_write(indio_dev, reg | AD9523_R1B, writeval);
+ ad9523_io_update(indio_dev);
+ } else {
+ ret = ad9523_read(indio_dev, reg | AD9523_R1B);
+ if (ret < 0)
+ goto out_unlock;
+ *readval = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static const struct iio_info ad9523_info = {
+ .read_raw = &ad9523_read_raw,
+ .write_raw = &ad9523_write_raw,
+ .debugfs_reg_access = &ad9523_reg_access,
+ .attrs = &ad9523_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad9523_setup(struct iio_dev *indio_dev)
+{
+ struct ad9523_state *st = iio_priv(indio_dev);
+ struct ad9523_platform_data *pdata = st->pdata;
+ struct ad9523_channel_spec *chan;
+ unsigned long active_mask = 0;
+ int ret, i;
+
+ ret = ad9523_write(indio_dev, AD9523_SERIAL_PORT_CONFIG,
+ AD9523_SER_CONF_SOFT_RESET |
+ (st->spi->mode & SPI_3WIRE ? 0 :
+ AD9523_SER_CONF_SDO_ACTIVE));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_READBACK_CTRL,
+ AD9523_READBACK_CTRL_READ_BUFFERED);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_io_update(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * PLL1 Setup
+ */
+ ret = ad9523_write(indio_dev, AD9523_PLL1_REF_A_DIVIDER,
+ pdata->refa_r_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_REF_B_DIVIDER,
+ pdata->refb_r_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_FEEDBACK_DIVIDER,
+ pdata->pll1_feedback_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_CHARGE_PUMP_CTRL,
+ AD9523_PLL1_CHARGE_PUMP_CURRENT_nA(pdata->
+ pll1_charge_pump_current_nA) |
+ AD9523_PLL1_CHARGE_PUMP_MODE_NORMAL |
+ AD9523_PLL1_BACKLASH_PW_MIN);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_INPUT_RECEIVERS_CTRL,
+ AD_IF(refa_diff_rcv_en, AD9523_PLL1_REFA_RCV_EN) |
+ AD_IF(refb_diff_rcv_en, AD9523_PLL1_REFB_RCV_EN) |
+ AD_IF(osc_in_diff_en, AD9523_PLL1_OSC_IN_DIFF_EN) |
+ AD_IF(osc_in_cmos_neg_inp_en,
+ AD9523_PLL1_OSC_IN_CMOS_NEG_INP_EN) |
+ AD_IF(refa_diff_rcv_en, AD9523_PLL1_REFA_DIFF_RCV_EN) |
+ AD_IF(refb_diff_rcv_en, AD9523_PLL1_REFB_DIFF_RCV_EN));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_REF_CTRL,
+ AD_IF(zd_in_diff_en, AD9523_PLL1_ZD_IN_DIFF_EN) |
+ AD_IF(zd_in_cmos_neg_inp_en,
+ AD9523_PLL1_ZD_IN_CMOS_NEG_INP_EN) |
+ AD_IF(zero_delay_mode_internal_en,
+ AD9523_PLL1_ZERO_DELAY_MODE_INT) |
+ AD_IF(osc_in_feedback_en, AD9523_PLL1_OSC_IN_PLL_FEEDBACK_EN) |
+ AD_IF(refa_cmos_neg_inp_en, AD9523_PLL1_REFA_CMOS_NEG_INP_EN) |
+ AD_IF(refb_cmos_neg_inp_en, AD9523_PLL1_REFB_CMOS_NEG_INP_EN));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_MISC_CTRL,
+ AD9523_PLL1_REFB_INDEP_DIV_CTRL_EN |
+ AD9523_PLL1_REF_MODE(pdata->ref_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL1_LOOP_FILTER_CTRL,
+ AD9523_PLL1_LOOP_FILTER_RZERO(pdata->pll1_loop_filter_rzero));
+ if (ret < 0)
+ return ret;
+ /*
+ * PLL2 Setup
+ */
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_CHARGE_PUMP,
+ AD9523_PLL2_CHARGE_PUMP_CURRENT_nA(pdata->
+ pll2_charge_pump_current_nA));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_FEEDBACK_DIVIDER_AB,
+ AD9523_PLL2_FB_NDIV_A_CNT(pdata->pll2_ndiv_a_cnt) |
+ AD9523_PLL2_FB_NDIV_B_CNT(pdata->pll2_ndiv_b_cnt));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_CTRL,
+ AD9523_PLL2_CHARGE_PUMP_MODE_NORMAL |
+ AD9523_PLL2_BACKLASH_CTRL_EN |
+ AD_IF(pll2_freq_doubler_en, AD9523_PLL2_FREQ_DOUBLER_EN));
+ if (ret < 0)
+ return ret;
+
+ st->vco_freq = (pdata->vcxo_freq * (pdata->pll2_freq_doubler_en ? 2 : 1)
+ / pdata->pll2_r2_div) * AD9523_PLL2_FB_NDIV(pdata->
+ pll2_ndiv_a_cnt, pdata->pll2_ndiv_b_cnt);
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_VCO_CTRL,
+ AD9523_PLL2_VCO_CALIBRATE);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_VCO_DIVIDER,
+ AD9523_PLL2_VCO_DIV_M1(pdata->pll2_vco_diff_m1) |
+ AD9523_PLL2_VCO_DIV_M2(pdata->pll2_vco_diff_m2) |
+ AD_IFE(pll2_vco_diff_m1, 0,
+ AD9523_PLL2_VCO_DIV_M1_PWR_DOWN_EN) |
+ AD_IFE(pll2_vco_diff_m2, 0,
+ AD9523_PLL2_VCO_DIV_M2_PWR_DOWN_EN));
+ if (ret < 0)
+ return ret;
+
+ if (pdata->pll2_vco_diff_m1)
+ st->vco_out_freq[AD9523_VCO1] =
+ st->vco_freq / pdata->pll2_vco_diff_m1;
+
+ if (pdata->pll2_vco_diff_m2)
+ st->vco_out_freq[AD9523_VCO2] =
+ st->vco_freq / pdata->pll2_vco_diff_m2;
+
+ st->vco_out_freq[AD9523_VCXO] = pdata->vcxo_freq;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_R2_DIVIDER,
+ AD9523_PLL2_R2_DIVIDER_VAL(pdata->pll2_r2_div));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_PLL2_LOOP_FILTER_CTRL,
+ AD9523_PLL2_LOOP_FILTER_CPOLE1(pdata->cpole1) |
+ AD9523_PLL2_LOOP_FILTER_RZERO(pdata->rzero) |
+ AD9523_PLL2_LOOP_FILTER_RPOLE2(pdata->rpole2) |
+ AD_IF(rzero_bypass_en,
+ AD9523_PLL2_LOOP_FILTER_RZERO_BYPASS_EN));
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < pdata->num_channels; i++) {
+ chan = &pdata->channels[i];
+ if (chan->channel_num < AD9523_NUM_CHAN) {
+ __set_bit(chan->channel_num, &active_mask);
+ ret = ad9523_write(indio_dev,
+ AD9523_CHANNEL_CLOCK_DIST(chan->channel_num),
+ AD9523_CLK_DIST_DRIVER_MODE(chan->driver_mode) |
+ AD9523_CLK_DIST_DIV(chan->channel_divider) |
+ AD9523_CLK_DIST_DIV_PHASE(chan->divider_phase) |
+ (chan->sync_ignore_en ?
+ AD9523_CLK_DIST_IGNORE_SYNC_EN : 0) |
+ (chan->divider_output_invert_en ?
+ AD9523_CLK_DIST_INV_DIV_OUTPUT_EN : 0) |
+ (chan->low_power_mode_en ?
+ AD9523_CLK_DIST_LOW_PWR_MODE_EN : 0) |
+ (chan->output_dis ?
+ AD9523_CLK_DIST_PWR_DOWN_EN : 0));
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_vco_out_map(indio_dev, chan->channel_num,
+ chan->use_alt_clock_src);
+ if (ret < 0)
+ return ret;
+
+ st->ad9523_channels[i].type = IIO_ALTVOLTAGE;
+ st->ad9523_channels[i].output = 1;
+ st->ad9523_channels[i].indexed = 1;
+ st->ad9523_channels[i].channel = chan->channel_num;
+ st->ad9523_channels[i].extend_name =
+ chan->extended_name;
+ st->ad9523_channels[i].info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PHASE) |
+ BIT(IIO_CHAN_INFO_FREQUENCY);
+ }
+ }
+
+ for_each_clear_bit(i, &active_mask, AD9523_NUM_CHAN)
+ ad9523_write(indio_dev,
+ AD9523_CHANNEL_CLOCK_DIST(i),
+ AD9523_CLK_DIST_DRIVER_MODE(TRISTATE) |
+ AD9523_CLK_DIST_PWR_DOWN_EN);
+
+ ret = ad9523_write(indio_dev, AD9523_POWER_DOWN_CTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS,
+ AD9523_STATUS_MONITOR_01_PLL12_LOCKED);
+ if (ret < 0)
+ return ret;
+
+ ret = ad9523_io_update(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ad9523_probe(struct spi_device *spi)
+{
+ struct ad9523_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad9523_state *st;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->pdata = pdata;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = (pdata->name[0] != 0) ? pdata->name :
+ spi_get_device_id(spi)->name;
+ indio_dev->info = &ad9523_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->ad9523_channels;
+ indio_dev->num_channels = pdata->num_channels;
+
+ ret = ad9523_setup(indio_dev);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ dev_info(&spi->dev, "probed %s\n", indio_dev->name);
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return ret;
+}
+
+static int ad9523_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad9523_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad9523_id[] = {
+ {"ad9523-1", 9523},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad9523_id);
+
+static struct spi_driver ad9523_driver = {
+ .driver = {
+ .name = "ad9523",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9523_probe,
+ .remove = ad9523_remove,
+ .id_table = ad9523_id,
+};
+module_spi_driver(ad9523_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD9523 CLOCKDIST/PLL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
new file mode 100644
index 00000000000000..63a25d9e120486
--- /dev/null
+++ b/drivers/iio/frequency/adf4350.c
@@ -0,0 +1,642 @@
+/*
+ * ADF4350/ADF4351 SPI Wideband Synthesizer driver
+ *
+ * Copyright 2012-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/gcd.h>
+#include <linux/gpio.h>
+#include <asm/div64.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/frequency/adf4350.h>
+
+enum {
+ ADF4350_FREQ,
+ ADF4350_FREQ_REFIN,
+ ADF4350_FREQ_RESOLUTION,
+ ADF4350_PWRDOWN,
+};
+
+struct adf4350_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ struct adf4350_platform_data *pdata;
+ struct clk *clk;
+ unsigned long clkin;
+ unsigned long chspc; /* Channel Spacing */
+ unsigned long fpfd; /* Phase Frequency Detector */
+ unsigned long min_out_freq;
+ unsigned r0_fract;
+ unsigned r0_int;
+ unsigned r1_mod;
+ unsigned r4_rf_div_sel;
+ unsigned long regs[6];
+ unsigned long regs_hw[6];
+ unsigned long long freq_req;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be32 val ____cacheline_aligned;
+};
+
+static struct adf4350_platform_data default_pdata = {
+ .channel_spacing = 10000,
+ .r2_user_settings = ADF4350_REG2_PD_POLARITY_POS |
+ ADF4350_REG2_CHARGE_PUMP_CURR_uA(2500),
+ .r3_user_settings = ADF4350_REG3_12BIT_CLKDIV_MODE(0),
+ .r4_user_settings = ADF4350_REG4_OUTPUT_PWR(3) |
+ ADF4350_REG4_MUTE_TILL_LOCK_EN,
+ .gpio_lock_detect = -1,
+};
+
+static int adf4350_sync_config(struct adf4350_state *st)
+{
+ int ret, i, doublebuf = 0;
+
+ for (i = ADF4350_REG5; i >= ADF4350_REG0; i--) {
+ if ((st->regs_hw[i] != st->regs[i]) ||
+ ((i == ADF4350_REG0) && doublebuf)) {
+
+ switch (i) {
+ case ADF4350_REG1:
+ case ADF4350_REG4:
+ doublebuf = 1;
+ break;
+ }
+
+ st->val = cpu_to_be32(st->regs[i] | i);
+ ret = spi_write(st->spi, &st->val, 4);
+ if (ret < 0)
+ return ret;
+ st->regs_hw[i] = st->regs[i];
+ dev_dbg(&st->spi->dev, "[%d] 0x%X\n",
+ i, (u32)st->regs[i] | i);
+ }
+ }
+ return 0;
+}
+
+static int adf4350_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct adf4350_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (reg > ADF4350_REG5)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ if (readval == NULL) {
+ st->regs[reg] = writeval & ~(BIT(0) | BIT(1) | BIT(2));
+ ret = adf4350_sync_config(st);
+ } else {
+ *readval = st->regs_hw[reg];
+ ret = 0;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int adf4350_tune_r_cnt(struct adf4350_state *st, unsigned short r_cnt)
+{
+ struct adf4350_platform_data *pdata = st->pdata;
+
+ do {
+ r_cnt++;
+ st->fpfd = (st->clkin * (pdata->ref_doubler_en ? 2 : 1)) /
+ (r_cnt * (pdata->ref_div2_en ? 2 : 1));
+ } while (st->fpfd > ADF4350_MAX_FREQ_PFD);
+
+ return r_cnt;
+}
+
+static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
+{
+ struct adf4350_platform_data *pdata = st->pdata;
+ u64 tmp;
+ u32 div_gcd, prescaler, chspc;
+ u16 mdiv, r_cnt = 0;
+ u8 band_sel_div;
+
+ if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
+ return -EINVAL;
+
+ if (freq > ADF4350_MAX_FREQ_45_PRESC) {
+ prescaler = ADF4350_REG1_PRESCALER;
+ mdiv = 75;
+ } else {
+ prescaler = 0;
+ mdiv = 23;
+ }
+
+ st->r4_rf_div_sel = 0;
+
+ while (freq < ADF4350_MIN_VCO_FREQ) {
+ freq <<= 1;
+ st->r4_rf_div_sel++;
+ }
+
+ /*
+ * Allow a predefined reference division factor
+ * if not set, compute our own
+ */
+ if (pdata->ref_div_factor)
+ r_cnt = pdata->ref_div_factor - 1;
+
+ chspc = st->chspc;
+
+ do {
+ do {
+ do {
+ r_cnt = adf4350_tune_r_cnt(st, r_cnt);
+ st->r1_mod = st->fpfd / chspc;
+ if (r_cnt > ADF4350_MAX_R_CNT) {
+ /* try higher spacing values */
+ chspc++;
+ r_cnt = 0;
+ }
+ } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
+ } while (r_cnt == 0);
+
+ tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
+ do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
+ st->r0_fract = do_div(tmp, st->r1_mod);
+ st->r0_int = tmp;
+ } while (mdiv > st->r0_int);
+
+ band_sel_div = DIV_ROUND_UP(st->fpfd, ADF4350_MAX_BANDSEL_CLK);
+
+ if (st->r0_fract && st->r1_mod) {
+ div_gcd = gcd(st->r1_mod, st->r0_fract);
+ st->r1_mod /= div_gcd;
+ st->r0_fract /= div_gcd;
+ } else {
+ st->r0_fract = 0;
+ st->r1_mod = 1;
+ }
+
+ dev_dbg(&st->spi->dev, "VCO: %llu Hz, PFD %lu Hz\n"
+ "REF_DIV %d, R0_INT %d, R0_FRACT %d\n"
+ "R1_MOD %d, RF_DIV %d\nPRESCALER %s, BAND_SEL_DIV %d\n",
+ freq, st->fpfd, r_cnt, st->r0_int, st->r0_fract, st->r1_mod,
+ 1 << st->r4_rf_div_sel, prescaler ? "8/9" : "4/5",
+ band_sel_div);
+
+ st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) |
+ ADF4350_REG0_FRACT(st->r0_fract);
+
+ st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) |
+ ADF4350_REG1_MOD(st->r1_mod) |
+ prescaler;
+
+ st->regs[ADF4350_REG2] =
+ ADF4350_REG2_10BIT_R_CNT(r_cnt) |
+ ADF4350_REG2_DOUBLE_BUFF_EN |
+ (pdata->ref_doubler_en ? ADF4350_REG2_RMULT2_EN : 0) |
+ (pdata->ref_div2_en ? ADF4350_REG2_RDIV2_EN : 0) |
+ (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
+ ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
+ ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
+ ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
+
+ st->regs[ADF4350_REG3] = pdata->r3_user_settings &
+ (ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
+ ADF4350_REG3_12BIT_CLKDIV_MODE(0x3) |
+ ADF4350_REG3_12BIT_CSR_EN |
+ ADF4351_REG3_CHARGE_CANCELLATION_EN |
+ ADF4351_REG3_ANTI_BACKLASH_3ns_EN |
+ ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH);
+
+ st->regs[ADF4350_REG4] =
+ ADF4350_REG4_FEEDBACK_FUND |
+ ADF4350_REG4_RF_DIV_SEL(st->r4_rf_div_sel) |
+ ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(band_sel_div) |
+ ADF4350_REG4_RF_OUT_EN |
+ (pdata->r4_user_settings &
+ (ADF4350_REG4_OUTPUT_PWR(0x3) |
+ ADF4350_REG4_AUX_OUTPUT_PWR(0x3) |
+ ADF4350_REG4_AUX_OUTPUT_EN |
+ ADF4350_REG4_AUX_OUTPUT_FUND |
+ ADF4350_REG4_MUTE_TILL_LOCK_EN));
+
+ st->regs[ADF4350_REG5] = ADF4350_REG5_LD_PIN_MODE_DIGITAL;
+ st->freq_req = freq;
+
+ return adf4350_sync_config(st);
+}
+
+static ssize_t adf4350_write(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct adf4350_state *st = iio_priv(indio_dev);
+ unsigned long long readin;
+ unsigned long tmp;
+ int ret;
+
+ ret = kstrtoull(buf, 10, &readin);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32)private) {
+ case ADF4350_FREQ:
+ ret = adf4350_set_freq(st, readin);
+ break;
+ case ADF4350_FREQ_REFIN:
+ if (readin > ADF4350_MAX_FREQ_REFIN) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (st->clk) {
+ tmp = clk_round_rate(st->clk, readin);
+ if (tmp != readin) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = clk_set_rate(st->clk, tmp);
+ if (ret < 0)
+ break;
+ }
+ st->clkin = readin;
+ ret = adf4350_set_freq(st, st->freq_req);
+ break;
+ case ADF4350_FREQ_RESOLUTION:
+ if (readin == 0)
+ ret = -EINVAL;
+ else
+ st->chspc = readin;
+ break;
+ case ADF4350_PWRDOWN:
+ if (readin)
+ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+ else
+ st->regs[ADF4350_REG2] &= ~ADF4350_REG2_POWER_DOWN_EN;
+
+ adf4350_sync_config(st);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adf4350_read(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct adf4350_state *st = iio_priv(indio_dev);
+ unsigned long long val;
+ int ret = 0;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32)private) {
+ case ADF4350_FREQ:
+ val = (u64)((st->r0_int * st->r1_mod) + st->r0_fract) *
+ (u64)st->fpfd;
+ do_div(val, st->r1_mod * (1 << st->r4_rf_div_sel));
+ /* PLL unlocked? return error */
+ if (gpio_is_valid(st->pdata->gpio_lock_detect))
+ if (!gpio_get_value(st->pdata->gpio_lock_detect)) {
+ dev_dbg(&st->spi->dev, "PLL un-locked\n");
+ ret = -EBUSY;
+ }
+ break;
+ case ADF4350_FREQ_REFIN:
+ if (st->clk)
+ st->clkin = clk_get_rate(st->clk);
+
+ val = st->clkin;
+ break;
+ case ADF4350_FREQ_RESOLUTION:
+ val = st->chspc;
+ break;
+ case ADF4350_PWRDOWN:
+ val = !!(st->regs[ADF4350_REG2] & ADF4350_REG2_POWER_DOWN_EN);
+ break;
+ default:
+ ret = -EINVAL;
+ val = 0;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret < 0 ? ret : sprintf(buf, "%llu\n", val);
+}
+
+#define _ADF4350_EXT_INFO(_name, _ident) { \
+ .name = _name, \
+ .read = adf4350_read, \
+ .write = adf4350_write, \
+ .private = _ident, \
+ .shared = IIO_SEPARATE, \
+}
+
+static const struct iio_chan_spec_ext_info adf4350_ext_info[] = {
+ /* Ideally we use IIO_CHAN_INFO_FREQUENCY, but there are
+ * values > 2^32 in order to support the entire frequency range
+ * in Hz. Using scale is a bit ugly.
+ */
+ _ADF4350_EXT_INFO("frequency", ADF4350_FREQ),
+ _ADF4350_EXT_INFO("frequency_resolution", ADF4350_FREQ_RESOLUTION),
+ _ADF4350_EXT_INFO("refin_frequency", ADF4350_FREQ_REFIN),
+ _ADF4350_EXT_INFO("powerdown", ADF4350_PWRDOWN),
+ { },
+};
+
+static const struct iio_chan_spec adf4350_chan = {
+ .type = IIO_ALTVOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .ext_info = adf4350_ext_info,
+};
+
+static const struct iio_info adf4350_info = {
+ .debugfs_reg_access = &adf4350_reg_access,
+ .driver_module = THIS_MODULE,
+};
+
+#ifdef CONFIG_OF
+static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct adf4350_platform_data *pdata;
+ unsigned int tmp;
+ int ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return NULL;
+ }
+
+ strncpy(&pdata->name[0], np->name, SPI_NAME_SIZE - 1);
+
+ tmp = 10000;
+ of_property_read_u32(np, "adi,channel-spacing", &tmp);
+ pdata->channel_spacing = tmp;
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,power-up-frequency", &tmp);
+ pdata->power_up_frequency = tmp;
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,reference-div-factor", &tmp);
+ pdata->ref_div_factor = tmp;
+
+ ret = of_get_gpio(np, 0);
+ if (ret < 0)
+ pdata->gpio_lock_detect = -1;
+ else
+ pdata->gpio_lock_detect = ret;
+
+ pdata->ref_doubler_en = of_property_read_bool(np,
+ "adi,reference-doubler-enable");
+ pdata->ref_div2_en = of_property_read_bool(np,
+ "adi,reference-div2-enable");
+
+ /* r2_user_settings */
+ pdata->r2_user_settings = of_property_read_bool(np,
+ "adi,phase-detector-polarity-positive-enable") ?
+ ADF4350_REG2_PD_POLARITY_POS : 0;
+ pdata->r2_user_settings |= of_property_read_bool(np,
+ "adi,lock-detect-precision-6ns-enable") ?
+ ADF4350_REG2_LDP_6ns : 0;
+ pdata->r2_user_settings |= of_property_read_bool(np,
+ "adi,lock-detect-function-integer-n-enable") ?
+ ADF4350_REG2_LDF_INT_N : 0;
+
+ tmp = 2500;
+ of_property_read_u32(np, "adi,charge-pump-current", &tmp);
+ pdata->r2_user_settings |= ADF4350_REG2_CHARGE_PUMP_CURR_uA(tmp);
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,muxout-select", &tmp);
+ pdata->r2_user_settings |= ADF4350_REG2_MUXOUT(tmp);
+
+ pdata->r2_user_settings |= of_property_read_bool(np,
+ "adi,low-spur-mode-enable") ?
+ ADF4350_REG2_NOISE_MODE(0x3) : 0;
+
+ /* r3_user_settings */
+
+ pdata->r3_user_settings = of_property_read_bool(np,
+ "adi,cycle-slip-reduction-enable") ?
+ ADF4350_REG3_12BIT_CSR_EN : 0;
+ pdata->r3_user_settings |= of_property_read_bool(np,
+ "adi,charge-cancellation-enable") ?
+ ADF4351_REG3_CHARGE_CANCELLATION_EN : 0;
+
+ pdata->r3_user_settings |= of_property_read_bool(np,
+ "adi,anti-backlash-3ns-enable") ?
+ ADF4351_REG3_ANTI_BACKLASH_3ns_EN : 0;
+ pdata->r3_user_settings |= of_property_read_bool(np,
+ "adi,band-select-clock-mode-high-enable") ?
+ ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH : 0;
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,12bit-clk-divider", &tmp);
+ pdata->r3_user_settings |= ADF4350_REG3_12BIT_CLKDIV(tmp);
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,clk-divider-mode", &tmp);
+ pdata->r3_user_settings |= ADF4350_REG3_12BIT_CLKDIV_MODE(tmp);
+
+ /* r4_user_settings */
+
+ pdata->r4_user_settings = of_property_read_bool(np,
+ "adi,aux-output-enable") ?
+ ADF4350_REG4_AUX_OUTPUT_EN : 0;
+ pdata->r4_user_settings |= of_property_read_bool(np,
+ "adi,aux-output-fundamental-enable") ?
+ ADF4350_REG4_AUX_OUTPUT_FUND : 0;
+ pdata->r4_user_settings |= of_property_read_bool(np,
+ "adi,mute-till-lock-enable") ?
+ ADF4350_REG4_MUTE_TILL_LOCK_EN : 0;
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,output-power", &tmp);
+ pdata->r4_user_settings |= ADF4350_REG4_OUTPUT_PWR(tmp);
+
+ tmp = 0;
+ of_property_read_u32(np, "adi,aux-output-power", &tmp);
+ pdata->r4_user_settings |= ADF4350_REG4_AUX_OUTPUT_PWR(tmp);
+
+ return pdata;
+}
+#else
+static
+struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int adf4350_probe(struct spi_device *spi)
+{
+ struct adf4350_platform_data *pdata;
+ struct iio_dev *indio_dev;
+ struct adf4350_state *st;
+ struct clk *clk = NULL;
+ int ret;
+
+ if (spi->dev.of_node) {
+ pdata = adf4350_parse_dt(&spi->dev);
+ if (pdata == NULL)
+ return -EINVAL;
+ } else {
+ pdata = spi->dev.platform_data;
+ }
+
+ if (!pdata) {
+ dev_warn(&spi->dev, "no platform data? using default\n");
+ pdata = &default_pdata;
+ }
+
+ if (!pdata->clkin) {
+ clk = devm_clk_get(&spi->dev, "clkin");
+ if (IS_ERR(clk))
+ return -EPROBE_DEFER;
+
+ ret = clk_prepare_enable(clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_clk;
+ }
+
+ st = iio_priv(indio_dev);
+
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_disable_clk;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->pdata = pdata;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = (pdata->name[0] != 0) ? pdata->name :
+ spi_get_device_id(spi)->name;
+
+ indio_dev->info = &adf4350_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &adf4350_chan;
+ indio_dev->num_channels = 1;
+
+ st->chspc = pdata->channel_spacing;
+ if (clk) {
+ st->clk = clk;
+ st->clkin = clk_get_rate(clk);
+ } else {
+ st->clkin = pdata->clkin;
+ }
+
+ st->min_out_freq = spi_get_device_id(spi)->driver_data == 4351 ?
+ ADF4351_MIN_OUT_FREQ : ADF4350_MIN_OUT_FREQ;
+
+ memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
+
+ if (gpio_is_valid(pdata->gpio_lock_detect)) {
+ ret = devm_gpio_request(&spi->dev, pdata->gpio_lock_detect,
+ indio_dev->name);
+ if (ret) {
+ dev_err(&spi->dev, "fail to request lock detect GPIO-%d",
+ pdata->gpio_lock_detect);
+ goto error_disable_reg;
+ }
+ gpio_direction_input(pdata->gpio_lock_detect);
+ }
+
+ if (pdata->power_up_frequency) {
+ ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ if (ret)
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_disable_clk:
+ if (clk)
+ clk_disable_unprepare(clk);
+
+ return ret;
+}
+
+static int adf4350_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adf4350_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+ adf4350_sync_config(st);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->clk)
+ clk_disable_unprepare(st->clk);
+
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ }
+
+ return 0;
+}
+
+static const struct spi_device_id adf4350_id[] = {
+ {"adf4350", 4350},
+ {"adf4351", 4351},
+ {}
+};
+
+static struct spi_driver adf4350_driver = {
+ .driver = {
+ .name = "adf4350",
+ .owner = THIS_MODULE,
+ },
+ .probe = adf4350_probe,
+ .remove = adf4350_remove,
+ .id_table = adf4350_id,
+};
+module_spi_driver(adf4350_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADF4350/ADF4351 PLL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
new file mode 100644
index 00000000000000..b3d0e94f72eb74
--- /dev/null
+++ b/drivers/iio/gyro/Kconfig
@@ -0,0 +1,112 @@
+#
+# IIO Digital Gyroscope Sensor drivers configuration
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital gyroscope sensors"
+
+config ADIS16080
+ tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADIS16080, ADIS16100 Yaw
+ Rate Gyroscope with SPI.
+
+config ADIS16130
+ tristate "Analog Devices ADIS16130 High Precision Angular Rate Sensor driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADIS16130 High Precision
+ Angular Rate Sensor driver.
+
+config ADIS16136
+ tristate "Analog devices ADIS16136 and similar gyroscopes driver"
+ depends on SPI_MASTER
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for the Analog Devices ADIS16133, ADIS16135,
+ ADIS16136 gyroscope devices.
+
+config ADIS16260
+ tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16260 ADIS16265
+ ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called adis16260.
+
+config ADXRS450
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
+
+ This driver can also be built as a module. If so, the module
+ will be called adxrs450.
+
+config BMG160
+ tristate "BOSCH BMG160 Gyro Sensor"
+ depends on I2C
+ select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
+ driver. This driver also supports BMI055 gyroscope.
+
+ This driver can also be built as a module. If so, the module
+ will be called bmg160.
+
+config HID_SENSOR_GYRO_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Gyroscope 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ Gyroscope 3D.
+
+config IIO_ST_GYRO_3AXIS
+ tristate "STMicroelectronics gyroscopes 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_GYRO_I2C_3AXIS if (I2C)
+ select IIO_ST_GYRO_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics gyroscopes:
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
+
+ This driver can also be built as a module. If so, these modules
+ will be created:
+ - st_gyro (core functions for the driver [it is mandatory]);
+ - st_gyro_i2c (necessary for the I2C devices [optional*]);
+ - st_gyro_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_GYRO_I2C_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_GYRO_SPI_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
+config ITG3200
+ tristate "InvenSense ITG3200 Digital 3-Axis Gyroscope I2C driver"
+ depends on I2C
+ select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ help
+ Say yes here to add support for the InvenSense ITG3200 digital
+ 3-axis gyroscope sensor.
+
+endmenu
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
new file mode 100644
index 00000000000000..36a38776f739c9
--- /dev/null
+++ b/drivers/iio/gyro/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for industrial I/O gyroscope sensor drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADIS16080) += adis16080.o
+obj-$(CONFIG_ADIS16130) += adis16130.o
+obj-$(CONFIG_ADIS16136) += adis16136.o
+obj-$(CONFIG_ADIS16260) += adis16260.o
+obj-$(CONFIG_ADXRS450) += adxrs450.o
+obj-$(CONFIG_BMG160) += bmg160.o
+
+obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+
+itg3200-y := itg3200_core.o
+itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
+obj-$(CONFIG_ITG3200) += itg3200.o
+
+obj-$(CONFIG_IIO_ST_GYRO_3AXIS) += st_gyro.o
+st_gyro-y := st_gyro_core.o
+st_gyro-$(CONFIG_IIO_BUFFER) += st_gyro_buffer.o
+
+obj-$(CONFIG_IIO_ST_GYRO_I2C_3AXIS) += st_gyro_i2c.o
+obj-$(CONFIG_IIO_ST_GYRO_SPI_3AXIS) += st_gyro_spi.o
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
new file mode 100644
index 00000000000000..add509837269ba
--- /dev/null
+++ b/drivers/iio/gyro/adis16080.c
@@ -0,0 +1,241 @@
+/*
+ * ADIS16080/100 Yaw Rate Gyroscope with SPI driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define ADIS16080_DIN_GYRO (0 << 10) /* Gyroscope output */
+#define ADIS16080_DIN_TEMP (1 << 10) /* Temperature output */
+#define ADIS16080_DIN_AIN1 (2 << 10)
+#define ADIS16080_DIN_AIN2 (3 << 10)
+
+/*
+ * 1: Write contents on DIN to control register.
+ * 0: No changes to control register.
+ */
+
+#define ADIS16080_DIN_WRITE (1 << 15)
+
+struct adis16080_chip_info {
+ int scale_val;
+ int scale_val2;
+};
+
+/**
+ * struct adis16080_state - device instance specific data
+ * @us: actual spi_device to write data
+ * @info: chip specific parameters
+ * @buf: transmit or receive buffer
+ **/
+struct adis16080_state {
+ struct spi_device *us;
+ const struct adis16080_chip_info *info;
+
+ __be16 buf ____cacheline_aligned;
+};
+
+static int adis16080_read_sample(struct iio_dev *indio_dev,
+ u16 addr, int *val)
+{
+ struct adis16080_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->buf,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->buf,
+ .len = 2,
+ },
+ };
+
+ st->buf = cpu_to_be16(addr | ADIS16080_DIN_WRITE);
+
+ ret = spi_sync_transfer(st->us, t, ARRAY_SIZE(t));
+ if (ret == 0)
+ *val = sign_extend32(be16_to_cpu(st->buf), 11);
+
+ return ret;
+}
+
+static int adis16080_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct adis16080_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16080_read_sample(indio_dev, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->scale_val;
+ *val2 = st->info->scale_val2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* VREF = 5V, 12 bits */
+ *val = 5000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = 85000 - 25000;
+ *val2 = 585;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* 2.5 V = 0 */
+ *val = 2048;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = DIV_ROUND_CLOSEST(25 * 585, 85 - 25);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adis16080_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = ADIS16080_DIN_GYRO,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = ADIS16080_DIN_AIN1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = ADIS16080_DIN_AIN2,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = ADIS16080_DIN_TEMP,
+ }
+};
+
+static const struct iio_info adis16080_info = {
+ .read_raw = &adis16080_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+enum {
+ ID_ADIS16080,
+ ID_ADIS16100,
+};
+
+static const struct adis16080_chip_info adis16080_chip_info[] = {
+ [ID_ADIS16080] = {
+ /* 80 degree = 819, 819 rad = 46925 degree */
+ .scale_val = 80,
+ .scale_val2 = 46925,
+ },
+ [ID_ADIS16100] = {
+ /* 300 degree = 1230, 1230 rad = 70474 degree */
+ .scale_val = 300,
+ .scale_val2 = 70474,
+ },
+};
+
+static int adis16080_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct adis16080_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ /* Allocate the comms buffers */
+ st->us = spi;
+ st->info = &adis16080_chip_info[id->driver_data];
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->channels = adis16080_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16080_channels);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16080_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return iio_device_register(indio_dev);
+}
+
+static int adis16080_remove(struct spi_device *spi)
+{
+ iio_device_unregister(spi_get_drvdata(spi));
+ return 0;
+}
+
+static const struct spi_device_id adis16080_ids[] = {
+ { "adis16080", ID_ADIS16080 },
+ { "adis16100", ID_ADIS16100 },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, adis16080_ids);
+
+static struct spi_driver adis16080_driver = {
+ .driver = {
+ .name = "adis16080",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16080_probe,
+ .remove = adis16080_remove,
+ .id_table = adis16080_ids,
+};
+module_spi_driver(adis16080_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
new file mode 100644
index 00000000000000..8d08c7ed1ea64d
--- /dev/null
+++ b/drivers/iio/gyro/adis16130.c
@@ -0,0 +1,179 @@
+/*
+ * ADIS16130 Digital Output, High Precision Angular Rate Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+#define ADIS16130_CON 0x0
+#define ADIS16130_CON_RD (1 << 6)
+#define ADIS16130_IOP 0x1
+
+/* 1 = data-ready signal low when unread data on all channels; */
+#define ADIS16130_IOP_ALL_RDY (1 << 3)
+#define ADIS16130_IOP_SYNC (1 << 0) /* 1 = synchronization enabled */
+#define ADIS16130_RATEDATA 0x8 /* Gyroscope output, rate of rotation */
+#define ADIS16130_TEMPDATA 0xA /* Temperature output */
+#define ADIS16130_RATECS 0x28 /* Gyroscope channel setup */
+#define ADIS16130_RATECS_EN (1 << 3) /* 1 = channel enable; */
+#define ADIS16130_TEMPCS 0x2A /* Temperature channel setup */
+#define ADIS16130_TEMPCS_EN (1 << 3)
+#define ADIS16130_RATECONV 0x30
+#define ADIS16130_TEMPCONV 0x32
+#define ADIS16130_MODE 0x38
+#define ADIS16130_MODE_24BIT (1 << 1) /* 1 = 24-bit resolution; */
+
+/**
+ * struct adis16130_state - device instance specific data
+ * @us: actual spi_device to write data
+ * @buf_lock: mutex to protect tx and rx
+ * @buf: unified tx/rx buffer
+ **/
+struct adis16130_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 buf[4] ____cacheline_aligned;
+};
+
+static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
+{
+ int ret;
+ struct adis16130_state *st = iio_priv(indio_dev);
+ struct spi_transfer xfer = {
+ .tx_buf = st->buf,
+ .rx_buf = st->buf,
+ .len = 4,
+ };
+
+ mutex_lock(&st->buf_lock);
+
+ st->buf[0] = ADIS16130_CON_RD | reg_addr;
+ st->buf[1] = st->buf[2] = st->buf[3] = 0;
+
+ ret = spi_sync_transfer(st->us, &xfer, 1);
+ if (ret == 0)
+ *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+static int adis16130_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ int ret;
+ u32 temp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16130_spi_read(indio_dev, chan->address, &temp);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ *val = temp;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ /* 0 degree = 838860, 250 degree = 14260608 */
+ *val = 250;
+ *val2 = 336440817; /* RAD_TO_DEGREE(14260608 - 8388608) */
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ /* 0C = 8036283, 105C = 9516048 */
+ *val = 105000;
+ *val2 = 9516048 - 8036283;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = -8388608;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = -8036283;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adis16130_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = ADIS16130_RATEDATA,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = ADIS16130_TEMPDATA,
+ }
+};
+
+static const struct iio_info adis16130_info = {
+ .read_raw = &adis16130_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adis16130_probe(struct spi_device *spi)
+{
+ struct adis16130_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->channels = adis16130_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16130_channels);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16130_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static struct spi_driver adis16130_driver = {
+ .driver = {
+ .name = "adis16130",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16130_probe,
+};
+module_spi_driver(adis16130_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16130");
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
new file mode 100644
index 00000000000000..591bd555e1f397
--- /dev/null
+++ b/drivers/iio/gyro/adis16136.c
@@ -0,0 +1,577 @@
+/*
+ * ADIS16133/ADIS16135/ADIS16136 gyroscope driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/debugfs.h>
+
+#define ADIS16136_REG_FLASH_CNT 0x00
+#define ADIS16136_REG_TEMP_OUT 0x02
+#define ADIS16136_REG_GYRO_OUT2 0x04
+#define ADIS16136_REG_GYRO_OUT 0x06
+#define ADIS16136_REG_GYRO_OFF2 0x08
+#define ADIS16136_REG_GYRO_OFF 0x0A
+#define ADIS16136_REG_ALM_MAG1 0x10
+#define ADIS16136_REG_ALM_MAG2 0x12
+#define ADIS16136_REG_ALM_SAMPL1 0x14
+#define ADIS16136_REG_ALM_SAMPL2 0x16
+#define ADIS16136_REG_ALM_CTRL 0x18
+#define ADIS16136_REG_GPIO_CTRL 0x1A
+#define ADIS16136_REG_MSC_CTRL 0x1C
+#define ADIS16136_REG_SMPL_PRD 0x1E
+#define ADIS16136_REG_AVG_CNT 0x20
+#define ADIS16136_REG_DEC_RATE 0x22
+#define ADIS16136_REG_SLP_CTRL 0x24
+#define ADIS16136_REG_DIAG_STAT 0x26
+#define ADIS16136_REG_GLOB_CMD 0x28
+#define ADIS16136_REG_LOT1 0x32
+#define ADIS16136_REG_LOT2 0x34
+#define ADIS16136_REG_LOT3 0x36
+#define ADIS16136_REG_PROD_ID 0x38
+#define ADIS16136_REG_SERIAL_NUM 0x3A
+
+#define ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL 2
+#define ADIS16136_DIAG_STAT_SPI_FAIL 3
+#define ADIS16136_DIAG_STAT_SELF_TEST_FAIL 5
+#define ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL 6
+
+#define ADIS16136_MSC_CTRL_MEMORY_TEST BIT(11)
+#define ADIS16136_MSC_CTRL_SELF_TEST BIT(10)
+
+struct adis16136_chip_info {
+ unsigned int precision;
+ unsigned int fullscale;
+};
+
+struct adis16136 {
+ const struct adis16136_chip_info *chip_info;
+
+ struct adis adis;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16136_show_serial(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16136 *adis16136 = file->private_data;
+ uint16_t lot1, lot2, lot3, serial;
+ char buf[20];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SERIAL_NUM,
+ &serial);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT1, &lot1);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT2, &lot2);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT3, &lot3);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.4x%.4x%.4x-%.4x\n", lot1, lot2,
+ lot3, serial);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16136_serial_fops = {
+ .open = simple_open,
+ .read = adis16136_show_serial,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16136_show_product_id(void *arg, u64 *val)
+{
+ struct adis16136 *adis16136 = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
+ &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16136_product_id_fops,
+ adis16136_show_product_id, NULL, "%llu\n");
+
+static int adis16136_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16136 *adis16136 = arg;
+ uint16_t flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_FLASH_CNT,
+ &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16136_flash_count_fops,
+ adis16136_show_flash_count, NULL, "%lld\n");
+
+static int adis16136_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_serial_fops);
+ debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16136_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+static int adis16136_set_freq(struct adis16136 *adis16136, unsigned int freq)
+{
+ unsigned int t;
+
+ t = 32768 / freq;
+ if (t < 0xf)
+ t = 0xf;
+ else if (t > 0xffff)
+ t = 0xffff;
+ else
+ t--;
+
+ return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
+}
+
+static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
+{
+ uint16_t t;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ *freq = 32768 / (t + 1);
+
+ return 0;
+}
+
+static ssize_t adis16136_write_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0)
+ return -EINVAL;
+
+ ret = adis16136_set_freq(adis16136, val);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adis16136_read_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16136_read_frequency,
+ adis16136_write_frequency);
+
+static const unsigned adis16136_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 6,
+ [2] = 12,
+ [3] = 25,
+ [4] = 50,
+ [5] = 100,
+ [6] = 200,
+ [7] = 200, /* Not a valid setting */
+};
+
+static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ int i, ret;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ return ret;
+
+ for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
+ if (freq / adis16136_3db_divisors[i] >= val)
+ break;
+ }
+
+ return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+}
+
+static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ uint16_t val16;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ goto err_unlock;
+
+ *val = freq / adis16136_3db_divisors[val16 & 0x07];
+
+err_unlock:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : IIO_VAL_INT;
+}
+
+static int adis16136_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ uint32_t val32;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = adis16136->chip_info->precision;
+ *val2 = (adis16136->chip_info->fullscale << 16);
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = 10;
+ *val2 = 697000; /* 0.010697 degree Celsius */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&adis16136->adis,
+ ADIS16136_REG_GYRO_OFF2, &val32);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(val32, 31);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16136_get_filter(indio_dev, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16136_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&adis16136->adis,
+ ADIS16136_REG_GYRO_OFF2, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16136_set_filter(indio_dev, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+enum {
+ ADIS16136_SCAN_GYRO,
+ ADIS16136_SCAN_TEMP,
+};
+
+static const struct iio_chan_spec adis16136_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+
+ .address = ADIS16136_REG_GYRO_OUT2,
+ .scan_index = ADIS16136_SCAN_GYRO,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = ADIS16136_REG_TEMP_OUT,
+ .scan_index = ADIS16136_SCAN_TEMP,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static struct attribute *adis16136_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16136_attribute_group = {
+ .attrs = adis16136_attributes,
+};
+
+static const struct iio_info adis16136_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &adis16136_attribute_group,
+ .read_raw = &adis16136_read_raw,
+ .write_raw = &adis16136_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static int adis16136_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SLP_CTRL, 0xff);
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "Could not power down device: %d\n", ret);
+
+ return ret;
+}
+
+static int adis16136_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int device_id;
+ uint16_t prod_id;
+ int ret;
+
+ ret = adis_initial_startup(&adis16136->adis);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
+ &prod_id);
+ if (ret)
+ return ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ return 0;
+}
+
+static const char * const adis16136_status_error_msgs[] = {
+ [ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL] = "Flash update failed",
+ [ADIS16136_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16136_DIAG_STAT_SELF_TEST_FAIL] = "Self test error",
+ [ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL] = "Flash checksum error",
+};
+
+static const struct adis_data adis16136_data = {
+ .diag_stat_reg = ADIS16136_REG_DIAG_STAT,
+ .glob_cmd_reg = ADIS16136_REG_GLOB_CMD,
+ .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
+
+ .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
+ .startup_delay = 80,
+
+ .read_delay = 10,
+ .write_delay = 10,
+
+ .status_error_msgs = adis16136_status_error_msgs,
+ .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_SPI_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL),
+};
+
+enum adis16136_id {
+ ID_ADIS16133,
+ ID_ADIS16135,
+ ID_ADIS16136,
+};
+
+static const struct adis16136_chip_info adis16136_chip_info[] = {
+ [ID_ADIS16133] = {
+ .precision = IIO_DEGREE_TO_RAD(1200),
+ .fullscale = 24000,
+ },
+ [ID_ADIS16135] = {
+ .precision = IIO_DEGREE_TO_RAD(300),
+ .fullscale = 24000,
+ },
+ [ID_ADIS16136] = {
+ .precision = IIO_DEGREE_TO_RAD(450),
+ .fullscale = 24623,
+ },
+};
+
+static int adis16136_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct adis16136 *adis16136;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16136));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ adis16136 = iio_priv(indio_dev);
+
+ adis16136->chip_info = &adis16136_chip_info[id->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = adis16136_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16136_channels);
+ indio_dev->info = &adis16136_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(&adis16136->adis, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ ret = adis16136_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_stop_device;
+
+ adis16136_debugfs_init(indio_dev);
+
+ return 0;
+
+error_stop_device:
+ adis16136_stop_device(indio_dev);
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
+ return ret;
+}
+
+static int adis16136_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16136_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16136_ids[] = {
+ { "adis16133", ID_ADIS16133 },
+ { "adis16135", ID_ADIS16135 },
+ { "adis16136", ID_ADIS16136 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16136_ids);
+
+static struct spi_driver adis16136_driver = {
+ .driver = {
+ .name = "adis16136",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16136_ids,
+ .probe = adis16136_probe,
+ .remove = adis16136_remove,
+};
+module_spi_driver(adis16136_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16133/ADIS16135/ADIS16136 gyroscope driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
new file mode 100644
index 00000000000000..75fe0edd3d0f98
--- /dev/null
+++ b/drivers/iio/gyro/adis16260.c
@@ -0,0 +1,389 @@
+/*
+ * ADIS16260/ADIS16265 Programmable Digital Gyroscope Sensor Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16260_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16260_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16260_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16260_GYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16260_AUX_ADC 0x0A /* analog input channel measurement */
+#define ADIS16260_TEMP_OUT 0x0C /* internal temperature measurement */
+#define ADIS16260_ANGL_OUT 0x0E /* angle displacement */
+#define ADIS16260_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
+#define ADIS16260_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
+#define ADIS16260_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
+#define ADIS16260_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
+#define ADIS16260_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
+#define ADIS16260_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
+#define ADIS16260_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16260_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16260_GPIO_CTRL 0x32 /* Control, digital I/O line */
+#define ADIS16260_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
+#define ADIS16260_SMPL_PRD 0x36 /* Control, internal sample rate */
+#define ADIS16260_SENS_AVG 0x38 /* Control, dynamic range, filtering */
+#define ADIS16260_SLP_CNT 0x3A /* Control, sleep mode initiation */
+#define ADIS16260_DIAG_STAT 0x3C /* Diagnostic, error flags */
+#define ADIS16260_GLOB_CMD 0x3E /* Control, global commands */
+#define ADIS16260_LOT_ID1 0x52 /* Lot Identification Code 1 */
+#define ADIS16260_LOT_ID2 0x54 /* Lot Identification Code 2 */
+#define ADIS16260_PROD_ID 0x56 /* Product identifier;
+ * convert to decimal = 16,265/16,260 */
+#define ADIS16260_SERIAL_NUM 0x58 /* Serial number */
+
+#define ADIS16260_ERROR_ACTIVE (1<<14)
+#define ADIS16260_NEW_DATA (1<<15)
+
+/* MSC_CTRL */
+#define ADIS16260_MSC_CTRL_MEM_TEST (1<<11)
+/* Internal self-test enable */
+#define ADIS16260_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16260_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16260_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16260_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16260_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+/* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
+#define ADIS16260_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16260_SMPL_PRD_DIV_MASK 0x7F
+
+/* SLP_CNT */
+#define ADIS16260_SLP_CNT_POWER_OFF 0x80
+
+/* DIAG_STAT */
+#define ADIS16260_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16260_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16260_DIAG_STAT_FLASH_CHK_BIT 6
+#define ADIS16260_DIAG_STAT_SELF_TEST_BIT 5
+#define ADIS16260_DIAG_STAT_OVERFLOW_BIT 4
+#define ADIS16260_DIAG_STAT_SPI_FAIL_BIT 3
+#define ADIS16260_DIAG_STAT_FLASH_UPT_BIT 2
+#define ADIS16260_DIAG_STAT_POWER_HIGH_BIT 1
+#define ADIS16260_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+#define ADIS16260_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16260_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16260_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16260_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16260_GLOB_CMD_AUTO_NULL (1<<0)
+
+#define ADIS16260_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16260_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16260_SPI_FAST (u32)(2000 * 1000)
+
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+#define ADIS16260_SCAN_GYRO 0
+#define ADIS16260_SCAN_SUPPLY 1
+#define ADIS16260_SCAN_AUX_ADC 2
+#define ADIS16260_SCAN_TEMP 3
+#define ADIS16260_SCAN_ANGL 4
+
+/* Power down the device */
+static int adis16260_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis *adis = iio_priv(indio_dev);
+ int ret;
+ u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+
+ ret = adis_write_reg_16(adis, ADIS16260_SLP_CNT, val);
+ if (ret)
+ dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static const struct iio_chan_spec adis16260_channels[] = {
+ ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 14),
+ ADIS_INCLI_CHAN(X, ADIS16260_ANGL_OUT, ADIS16260_SCAN_ANGL, 0,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 14),
+ ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
+
+static const u8 adis16260_addresses[][2] = {
+ [ADIS16260_SCAN_GYRO] = { ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE },
+};
+
+static int adis16260_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *adis = iio_priv(indio_dev);
+ int ret;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16260_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ if (spi_get_device_id(adis->spi)->driver_data) {
+ /* 0.01832 degree / sec */
+ *val2 = IIO_DEGREE_TO_RAD(18320);
+ } else {
+ /* 0.07326 degree / sec */
+ *val2 = IIO_DEGREE_TO_RAD(73260);
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = IIO_DEGREE_TO_RAD(36630);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 831500; /* 1.8315 mV */
+ } else {
+ *val = 0;
+ *val2 = 610500; /* 610.5 uV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 145;
+ *val2 = 300000; /* 0.1453 C */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 250000 / 1453; /* 25 C = 0x00 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ addr = adis16260_addresses[chan->scan_index][0];
+ ret = adis_read_reg_16(adis, addr, &val16);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(val16, 11);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ addr = adis16260_addresses[chan->scan_index][1];
+ ret = adis_read_reg_16(adis, addr, &val16);
+ if (ret)
+ return ret;
+
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis_read_reg_16(adis, ADIS16260_SMPL_PRD, &val16);
+ if (ret)
+ return ret;
+
+ if (spi_get_device_id(adis->spi)->driver_data)
+ /* If an adis16251 */
+ *val = (val16 & ADIS16260_SMPL_PRD_TIME_BASE) ?
+ 8 : 256;
+ else
+ *val = (val16 & ADIS16260_SMPL_PRD_TIME_BASE) ?
+ 66 : 2048;
+ *val /= (val16 & ADIS16260_SMPL_PRD_DIV_MASK) + 1;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int adis16260_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *adis = iio_priv(indio_dev);
+ int ret;
+ u8 addr;
+ u8 t;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -2048 || val >= 2048)
+ return -EINVAL;
+
+ addr = adis16260_addresses[chan->scan_index][0];
+ return adis_write_reg_16(adis, addr, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val < 0 || val >= 4096)
+ return -EINVAL;
+
+ addr = adis16260_addresses[chan->scan_index][1];
+ return adis_write_reg_16(adis, addr, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&indio_dev->mlock);
+ if (spi_get_device_id(adis->spi)->driver_data)
+ t = 256 / val;
+ else
+ t = 2048 / val;
+
+ if (t > ADIS16260_SMPL_PRD_DIV_MASK)
+ t = ADIS16260_SMPL_PRD_DIV_MASK;
+ else if (t > 0)
+ t--;
+
+ if (t >= 0x0A)
+ adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
+ else
+ adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
+ ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info adis16260_info = {
+ .read_raw = &adis16260_read_raw,
+ .write_raw = &adis16260_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const adis1620_status_error_msgs[] = {
+ [ADIS16260_DIAG_STAT_FLASH_CHK_BIT] = "Flash checksum error",
+ [ADIS16260_DIAG_STAT_SELF_TEST_BIT] = "Self test error",
+ [ADIS16260_DIAG_STAT_OVERFLOW_BIT] = "Sensor overrange",
+ [ADIS16260_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16260_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16260_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 5.25",
+ [ADIS16260_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 4.75",
+};
+
+static const struct adis_data adis16260_data = {
+ .write_delay = 30,
+ .read_delay = 30,
+ .msc_ctrl_reg = ADIS16260_MSC_CTRL,
+ .glob_cmd_reg = ADIS16260_GLOB_CMD,
+ .diag_stat_reg = ADIS16260_DIAG_STAT,
+
+ .self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
+ .startup_delay = ADIS16260_STARTUP_DELAY,
+
+ .status_error_msgs = adis1620_status_error_msgs,
+ .status_error_mask = BIT(ADIS16260_DIAG_STAT_FLASH_CHK_BIT) |
+ BIT(ADIS16260_DIAG_STAT_SELF_TEST_BIT) |
+ BIT(ADIS16260_DIAG_STAT_OVERFLOW_BIT) |
+ BIT(ADIS16260_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16260_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16260_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16260_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16260_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis *adis;
+ int ret;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis));
+ if (!indio_dev)
+ return -ENOMEM;
+ adis = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16260_info;
+ indio_dev->channels = adis16260_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16260_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(adis, indio_dev, spi, &adis16260_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(adis, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis_initial_startup(adis);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(adis, indio_dev);
+ return ret;
+}
+
+static int adis16260_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *adis = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16260_stop_device(indio_dev);
+ adis_cleanup_buffer_and_trigger(adis, indio_dev);
+
+ return 0;
+}
+
+/*
+ * These parts do not need to be differentiated until someone adds
+ * support for the on chip filtering.
+ */
+static const struct spi_device_id adis16260_id[] = {
+ {"adis16260", 0},
+ {"adis16265", 0},
+ {"adis16250", 0},
+ {"adis16255", 0},
+ {"adis16251", 1},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adis16260_id);
+
+static struct spi_driver adis16260_driver = {
+ .driver = {
+ .name = "adis16260",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16260_probe,
+ .remove = adis16260_remove,
+ .id_table = adis16260_id,
+};
+module_spi_driver(adis16260_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
new file mode 100644
index 00000000000000..eb0e08ec9e20d4
--- /dev/null
+++ b/drivers/iio/gyro/adxrs450.c
@@ -0,0 +1,468 @@
+/*
+ * ADXRS450/ADXRS453 Digital Output Gyroscope Driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define ADXRS450_STARTUP_DELAY 50 /* ms */
+
+/* The MSB for the spi commands */
+#define ADXRS450_SENSOR_DATA (0x20 << 24)
+#define ADXRS450_WRITE_DATA (0x40 << 24)
+#define ADXRS450_READ_DATA (0x80 << 24)
+
+#define ADXRS450_RATE1 0x00 /* Rate Registers */
+#define ADXRS450_TEMP1 0x02 /* Temperature Registers */
+#define ADXRS450_LOCST1 0x04 /* Low CST Memory Registers */
+#define ADXRS450_HICST1 0x06 /* High CST Memory Registers */
+#define ADXRS450_QUAD1 0x08 /* Quad Memory Registers */
+#define ADXRS450_FAULT1 0x0A /* Fault Registers */
+#define ADXRS450_PID1 0x0C /* Part ID Register 1 */
+#define ADXRS450_SNH 0x0E /* Serial Number Registers, 4 bytes */
+#define ADXRS450_SNL 0x10
+#define ADXRS450_DNC1 0x12 /* Dynamic Null Correction Registers */
+/* Check bits */
+#define ADXRS450_P 0x01
+#define ADXRS450_CHK 0x02
+#define ADXRS450_CST 0x04
+#define ADXRS450_PWR 0x08
+#define ADXRS450_POR 0x10
+#define ADXRS450_NVM 0x20
+#define ADXRS450_Q 0x40
+#define ADXRS450_PLL 0x80
+#define ADXRS450_UV 0x100
+#define ADXRS450_OV 0x200
+#define ADXRS450_AMP 0x400
+#define ADXRS450_FAIL 0x800
+
+#define ADXRS450_WRERR_MASK (0x7 << 29)
+
+#define ADXRS450_MAX_RX 4
+#define ADXRS450_MAX_TX 4
+
+#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
+/**
+ * struct adxrs450_state - device instance specific data
+ * @us: actual spi_device
+ * @buf_lock: mutex to protect tx and rx
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ **/
+struct adxrs450_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ __be32 tx ____cacheline_aligned;
+ __be32 rx;
+
+};
+
+/**
+ * adxrs450_spi_read_reg_16() - read 2 bytes from a register pair
+ * @indio_dev: device associated with child of actual iio_dev
+ * @reg_address: the address of the lower of the two registers, which should be
+ * an even address, the second register's address is reg_address + 1.
+ * @val: somewhere to pass back the value read
+ **/
+static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
+ u8 reg_address,
+ u16 *val)
+{
+ struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ tx = ADXRS450_READ_DATA | (reg_address << 17);
+
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
+
+ st->tx = cpu_to_be32(tx);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
+ if (ret) {
+ dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
+ reg_address);
+ goto error_ret;
+ }
+
+ *val = (be32_to_cpu(st->rx) >> 5) & 0xFFFF;
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adxrs450_spi_write_reg_16() - write 2 bytes data to a register pair
+ * @indio_dev: device associated with child of actual actual iio_dev
+ * @reg_address: the address of the lower of the two registers,which should be
+ * an even address, the second register's address is reg_address + 1.
+ * @val: value to be written.
+ **/
+static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
+ u8 reg_address,
+ u16 val)
+{
+ struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
+ int ret;
+
+ mutex_lock(&st->buf_lock);
+ tx = ADXRS450_WRITE_DATA | (reg_address << 17) | (val << 1);
+
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
+
+ st->tx = cpu_to_be32(tx);
+ ret = spi_write(st->us, &st->tx, sizeof(st->tx));
+ if (ret)
+ dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n",
+ reg_address);
+ usleep_range(100, 1000); /* enforce sequential transfer delay 0.1ms */
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adxrs450_spi_sensor_data() - read 2 bytes sensor data
+ * @indio_dev: device associated with child of actual iio_dev
+ * @val: somewhere to pass back the value read
+ **/
+static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
+{
+ struct adxrs450_state *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx = cpu_to_be32(ADXRS450_SENSOR_DATA);
+
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
+ if (ret) {
+ dev_err(&st->us->dev, "Problem while reading sensor data\n");
+ goto error_ret;
+ }
+
+ *val = (be32_to_cpu(st->rx) >> 10) & 0xFFFF;
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adxrs450_spi_initial() - use for initializing procedure.
+ * @st: device instance specific data
+ * @val: somewhere to pass back the value read
+ * @chk: Whether to perform fault check
+ **/
+static int adxrs450_spi_initial(struct adxrs450_state *st,
+ u32 *val, char chk)
+{
+ int ret;
+ u32 tx;
+ struct spi_transfer xfers = {
+ .tx_buf = &st->tx,
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ };
+
+ mutex_lock(&st->buf_lock);
+ tx = ADXRS450_SENSOR_DATA;
+ if (chk)
+ tx |= (ADXRS450_CHK | ADXRS450_P);
+ st->tx = cpu_to_be32(tx);
+ ret = spi_sync_transfer(st->us, &xfers, 1);
+ if (ret) {
+ dev_err(&st->us->dev, "Problem while reading initializing data\n");
+ goto error_ret;
+ }
+
+ *val = be32_to_cpu(st->rx);
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/* Recommended Startup Sequence by spec */
+static int adxrs450_initial_setup(struct iio_dev *indio_dev)
+{
+ u32 t;
+ u16 data;
+ int ret;
+ struct adxrs450_state *st = iio_priv(indio_dev);
+
+ msleep(ADXRS450_STARTUP_DELAY*2);
+ ret = adxrs450_spi_initial(st, &t, 1);
+ if (ret)
+ return ret;
+ if (t != 0x01)
+ dev_warn(&st->us->dev, "The initial power on response is not correct! Restart without reset?\n");
+
+ msleep(ADXRS450_STARTUP_DELAY);
+ ret = adxrs450_spi_initial(st, &t, 0);
+ if (ret)
+ return ret;
+
+ msleep(ADXRS450_STARTUP_DELAY);
+ ret = adxrs450_spi_initial(st, &t, 0);
+ if (ret)
+ return ret;
+ if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) {
+ dev_err(&st->us->dev, "The second response is not correct!\n");
+ return -EIO;
+
+ }
+ ret = adxrs450_spi_initial(st, &t, 0);
+ if (ret)
+ return ret;
+ if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) {
+ dev_err(&st->us->dev, "The third response is not correct!\n");
+ return -EIO;
+
+ }
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_FAULT1, &data);
+ if (ret)
+ return ret;
+ if (data & 0x0fff) {
+ dev_err(&st->us->dev, "The device is not in normal status!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adxrs450_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -0x400 || val >= 0x400)
+ return -EINVAL;
+ ret = adxrs450_spi_write_reg_16(indio_dev,
+ ADXRS450_DNC1, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int adxrs450_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ s16 t;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = adxrs450_spi_sensor_data(indio_dev, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ ret = adxrs450_spi_read_reg_16(indio_dev,
+ ADXRS450_TEMP1, &t);
+ if (ret)
+ break;
+ *val = (t >> 6) + 225;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = 218166;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 200;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW:
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
+ if (ret)
+ break;
+ *val = sign_extend32(t, 9);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_chan_spec adxrs450_channels[2][2] = {
+ [ID_ADXRS450] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+ },
+ [ID_ADXRS453] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW),
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+ },
+};
+
+static const struct iio_info adxrs450_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &adxrs450_read_raw,
+ .write_raw = &adxrs450_write_raw,
+};
+
+static int adxrs450_probe(struct spi_device *spi)
+{
+ int ret;
+ struct adxrs450_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* This is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adxrs450_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels =
+ adxrs450_channels[spi_get_device_id(spi)->driver_data];
+ indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
+ indio_dev->name = spi->dev.driver->name;
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adxrs450_initial_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct spi_device_id adxrs450_id[] = {
+ {"adxrs450", ID_ADXRS450},
+ {"adxrs453", ID_ADXRS453},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adxrs450_id);
+
+static struct spi_driver adxrs450_driver = {
+ .driver = {
+ .name = "adxrs450",
+ .owner = THIS_MODULE,
+ },
+ .probe = adxrs450_probe,
+ .id_table = adxrs450_id,
+};
+module_spi_driver(adxrs450_driver);
+
+MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>");
+MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
new file mode 100644
index 00000000000000..60451b32824212
--- /dev/null
+++ b/drivers/iio/gyro/bmg160.c
@@ -0,0 +1,1273 @@
+/*
+ * BMG160 Gyro Sensor driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMG160_DRV_NAME "bmg160"
+#define BMG160_IRQ_NAME "bmg160_event"
+#define BMG160_GPIO_NAME "gpio_int"
+
+#define BMG160_REG_CHIP_ID 0x00
+#define BMG160_CHIP_ID_VAL 0x0F
+
+#define BMG160_REG_PMU_LPW 0x11
+#define BMG160_MODE_NORMAL 0x00
+#define BMG160_MODE_DEEP_SUSPEND 0x20
+#define BMG160_MODE_SUSPEND 0x80
+
+#define BMG160_REG_RANGE 0x0F
+
+#define BMG160_RANGE_2000DPS 0
+#define BMG160_RANGE_1000DPS 1
+#define BMG160_RANGE_500DPS 2
+#define BMG160_RANGE_250DPS 3
+#define BMG160_RANGE_125DPS 4
+
+#define BMG160_REG_PMU_BW 0x10
+#define BMG160_NO_FILTER 0
+#define BMG160_DEF_BW 100
+
+#define BMG160_REG_INT_MAP_0 0x17
+#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
+
+#define BMG160_REG_INT_MAP_1 0x18
+#define BMG160_INT_MAP_1_BIT_NEW_DATA BIT(0)
+
+#define BMG160_REG_INT_RST_LATCH 0x21
+#define BMG160_INT_MODE_LATCH_RESET 0x80
+#define BMG160_INT_MODE_LATCH_INT 0x0F
+#define BMG160_INT_MODE_NON_LATCH_INT 0x00
+
+#define BMG160_REG_INT_EN_0 0x15
+#define BMG160_DATA_ENABLE_INT BIT(7)
+
+#define BMG160_REG_INT_EN_1 0x16
+#define BMG160_INT1_BIT_OD BIT(1)
+
+#define BMG160_REG_XOUT_L 0x02
+#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2))
+
+#define BMG160_REG_SLOPE_THRES 0x1B
+#define BMG160_SLOPE_THRES_MASK 0x0F
+
+#define BMG160_REG_MOTION_INTR 0x1C
+#define BMG160_INT_MOTION_X BIT(0)
+#define BMG160_INT_MOTION_Y BIT(1)
+#define BMG160_INT_MOTION_Z BIT(2)
+#define BMG160_ANY_DUR_MASK 0x30
+#define BMG160_ANY_DUR_SHIFT 4
+
+#define BMG160_REG_INT_STATUS_2 0x0B
+#define BMG160_ANY_MOTION_MASK 0x07
+#define BMG160_ANY_MOTION_BIT_X BIT(0)
+#define BMG160_ANY_MOTION_BIT_Y BIT(1)
+#define BMG160_ANY_MOTION_BIT_Z BIT(2)
+
+#define BMG160_REG_TEMP 0x08
+#define BMG160_TEMP_CENTER_VAL 23
+
+#define BMG160_MAX_STARTUP_TIME_MS 80
+
+#define BMG160_AUTO_SUSPEND_DELAY_MS 2000
+
+struct bmg160_data {
+ struct i2c_client *client;
+ struct iio_trigger *dready_trig;
+ struct iio_trigger *motion_trig;
+ struct mutex mutex;
+ s16 buffer[8];
+ u8 bw_bits;
+ u32 dps_range;
+ int ev_enable_state;
+ int slope_thres;
+ bool dready_trigger_on;
+ bool motion_trigger_on;
+ int64_t timestamp;
+};
+
+enum bmg160_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static const struct {
+ int val;
+ int bw_bits;
+} bmg160_samp_freq_table[] = { {100, 0x07},
+ {200, 0x06},
+ {400, 0x03},
+ {1000, 0x02},
+ {2000, 0x01} };
+
+static const struct {
+ int scale;
+ int dps_range;
+} bmg160_scale_table[] = { { 1065, BMG160_RANGE_2000DPS},
+ { 532, BMG160_RANGE_1000DPS},
+ { 266, BMG160_RANGE_500DPS},
+ { 133, BMG160_RANGE_250DPS},
+ { 66, BMG160_RANGE_125DPS} };
+
+static int bmg160_set_mode(struct bmg160_data *data, u8 mode)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_PMU_LPW, mode);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_pmu_lpw\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmg160_convert_freq_to_bit(int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].val == val)
+ return bmg160_samp_freq_table[i].bw_bits;
+ }
+
+ return -EINVAL;
+}
+
+static int bmg160_set_bw(struct bmg160_data *data, int val)
+{
+ int ret;
+ int bw_bits;
+
+ bw_bits = bmg160_convert_freq_to_bit(val);
+ if (bw_bits < 0)
+ return bw_bits;
+
+ ret = i2c_smbus_write_byte_data(data->client, BMG160_REG_PMU_BW,
+ bw_bits);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_pmu_bw\n");
+ return ret;
+ }
+
+ data->bw_bits = bw_bits;
+
+ return 0;
+}
+
+static int bmg160_chip_init(struct bmg160_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_CHIP_ID);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_chip_id\n");
+ return ret;
+ }
+
+ dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
+ if (ret != BMG160_CHIP_ID_VAL) {
+ dev_err(&data->client->dev, "invalid chip %x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = bmg160_set_mode(data, BMG160_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ /* Wait upto 500 ms to be ready after changing mode */
+ usleep_range(500, 1000);
+
+ /* Set Bandwidth */
+ ret = bmg160_set_bw(data, BMG160_DEF_BW);
+ if (ret < 0)
+ return ret;
+
+ /* Set Default Range */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_RANGE,
+ BMG160_RANGE_500DPS);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_range\n");
+ return ret;
+ }
+ data->dps_range = BMG160_RANGE_500DPS;
+
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_SLOPE_THRES);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_slope_thres\n");
+ return ret;
+ }
+ data->slope_thres = ret;
+
+ /* Set default interrupt mode */
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
+ return ret;
+ }
+ ret &= ~BMG160_INT1_BIT_OD;
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_EN_1, ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_motion_intr\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmg160_set_power_state(struct bmg160_data *data, bool on)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(&data->client->dev);
+ else {
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
+ }
+
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed: bmg160_set_power_state for %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&data->client->dev);
+
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
+static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
+ bool status)
+{
+ int ret;
+
+ /* Enable/Disable INT_MAP0 mapping */
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_MAP_0);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_map0\n");
+ return ret;
+ }
+ if (status)
+ ret |= BMG160_INT_MAP_0_BIT_ANY;
+ else
+ ret &= ~BMG160_INT_MAP_0_BIT_ANY;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_MAP_0,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_map0\n");
+ return ret;
+ }
+
+ /* Enable/Disable slope interrupts */
+ if (status) {
+ /* Update slope thres */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_SLOPE_THRES,
+ data->slope_thres);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_slope_thres\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_MOTION_INTR,
+ BMG160_INT_MOTION_X |
+ BMG160_INT_MOTION_Y |
+ BMG160_INT_MOTION_Z);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_motion_intr\n");
+ return ret;
+ }
+
+ /*
+ * New data interrupt is always non-latched,
+ * which will have higher priority, so no need
+ * to set latched mode, we will be flooded anyway with INTR
+ */
+ if (!data->dready_trigger_on) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_rst_latch\n");
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_EN_0,
+ BMG160_DATA_ENABLE_INT);
+
+ } else
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_EN_0,
+ 0);
+
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_en0\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
+ bool status)
+{
+ int ret;
+
+ /* Enable/Disable INT_MAP1 mapping */
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_MAP_1);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_map1\n");
+ return ret;
+ }
+
+ if (status)
+ ret |= BMG160_INT_MAP_1_BIT_NEW_DATA;
+ else
+ ret &= ~BMG160_INT_MAP_1_BIT_NEW_DATA;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_MAP_1,
+ ret);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_map1\n");
+ return ret;
+ }
+
+ if (status) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_NON_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_rst_latch\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_EN_0,
+ BMG160_DATA_ENABLE_INT);
+
+ } else {
+ /* Restore interrupt mode */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_rst_latch\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_EN_0,
+ 0);
+ }
+
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_int_en0\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmg160_get_bw(struct bmg160_data *data, int *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].bw_bits == data->bw_bits) {
+ *val = bmg160_samp_freq_table[i].val;
+ return IIO_VAL_INT;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bmg160_set_scale(struct bmg160_data *data, int val)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
+ if (bmg160_scale_table[i].scale == val) {
+ ret = i2c_smbus_write_byte_data(
+ data->client,
+ BMG160_REG_RANGE,
+ bmg160_scale_table[i].dps_range);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Error writing reg_range\n");
+ return ret;
+ }
+ data->dps_range = bmg160_scale_table[i].dps_range;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bmg160_get_temp(struct bmg160_data *data, int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_TEMP);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_temp\n");
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ *val = sign_extend32(ret, 7);
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client, BMG160_AXIS_TO_REG(axis));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading axis %d\n", axis);
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ *val = sign_extend32(ret, 15);
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int bmg160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ return bmg160_get_temp(data, val);
+ case IIO_ANGL_VEL:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ else
+ return bmg160_get_axis(data, chan->scan_index,
+ val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP) {
+ *val = BMG160_TEMP_CENTER_VAL;
+ return IIO_VAL_INT;
+ } else
+ return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val2 = 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ANGL_VEL:
+ {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
+ if (bmg160_scale_table[i].dps_range ==
+ data->dps_range) {
+ *val2 = bmg160_scale_table[i].scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val2 = 0;
+ mutex_lock(&data->mutex);
+ ret = bmg160_get_bw(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmg160_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ mutex_lock(&data->mutex);
+ /*
+ * Section 4.2 of spec
+ * In suspend mode, the only supported operations are reading
+ * registers as well as writing to the (0x14) softreset
+ * register. Since we will be in suspend mode by default, change
+ * mode to power on for other writes.
+ */
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_bw(data, val);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ /* Refer to comments above for the suspend mode ops */
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_scale(data, val2);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int bmg160_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ *val2 = 0;
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = data->slope_thres & BMG160_SLOPE_THRES_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int bmg160_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (data->ev_enable_state)
+ return -EBUSY;
+ data->slope_thres &= ~BMG160_SLOPE_THRES_MASK;
+ data->slope_thres |= (val & BMG160_SLOPE_THRES_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bmg160_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ return data->ev_enable_state;
+}
+
+static int bmg160_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (state && data->ev_enable_state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->motion_trigger_on) {
+ data->ev_enable_state = 0;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+ /*
+ * We will expect the enable and disable to do operation in
+ * in reverse order. This will happen here anyway as our
+ * resume operation uses sync mode runtime pm calls, the
+ * suspend operation will be delayed by autosuspend delay
+ * So the disable operation will still happen in reverse of
+ * enable operation. When runtime pm is disabled the mode
+ * is always on so sequence doesn't matter
+ */
+ ret = bmg160_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ ret = bmg160_setup_any_motion_interrupt(data, state);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+
+ data->ev_enable_state = state;
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static int bmg160_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ if (data->dready_trig != trig && data->motion_trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 400 1000 2000");
+
+static IIO_CONST_ATTR(in_anglvel_scale_available,
+ "0.001065 0.000532 0.000266 0.000133 0.000066");
+
+static struct attribute *bmg160_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmg160_attrs_group = {
+ .attrs = bmg160_attributes,
+};
+
+static const struct iio_event_spec bmg160_event = {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE)
+};
+
+#define BMG160_CHANNEL(_axis) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ }, \
+ .event_spec = &bmg160_event, \
+ .num_event_specs = 1 \
+}
+
+static const struct iio_chan_spec bmg160_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = -1,
+ },
+ BMG160_CHANNEL(X),
+ BMG160_CHANNEL(Y),
+ BMG160_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_info bmg160_info = {
+ .attrs = &bmg160_attrs_group,
+ .read_raw = bmg160_read_raw,
+ .write_raw = bmg160_write_raw,
+ .read_event_value = bmg160_read_event,
+ .write_event_value = bmg160_write_event,
+ .write_event_config = bmg160_write_event_config,
+ .read_event_config = bmg160_read_event_config,
+ .validate_trigger = bmg160_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t bmg160_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = i2c_smbus_read_word_data(data->client,
+ BMG160_AXIS_TO_REG(bit));
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ goto err;
+ }
+ data->buffer[i++] = ret;
+ }
+ mutex_unlock(&data->mutex);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ data->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bmg160_trig_try_reen(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* new data interrupts don't need ack */
+ if (data->dready_trigger_on)
+ return 0;
+
+ /* Set latched mode interrupt and clear any latched interrupt */
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error writing reg_rst_latch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (!state && data->ev_enable_state && data->motion_trigger_on) {
+ data->motion_trigger_on = false;
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /*
+ * Refer to comment in bmg160_write_event_config for
+ * enable/disable operation order
+ */
+ ret = bmg160_set_power_state(data, state);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ ret = bmg160_setup_any_motion_interrupt(data, state);
+ else
+ ret = bmg160_setup_new_data_interrupt(data, state);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ if (data->motion_trig == trig)
+ data->motion_trigger_on = state;
+ else
+ data->dready_trigger_on = state;
+
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops bmg160_trigger_ops = {
+ .set_trigger_state = bmg160_data_rdy_trigger_set_state,
+ .try_reenable = bmg160_trig_try_reen,
+ .owner = THIS_MODULE,
+};
+
+static irqreturn_t bmg160_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+ int dir;
+
+ ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_STATUS_2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_status2\n");
+ goto ack_intr_status;
+ }
+
+ if (ret & 0x08)
+ dir = IIO_EV_DIR_RISING;
+ else
+ dir = IIO_EV_DIR_FALLING;
+
+ if (ret & BMG160_ANY_MOTION_BIT_X)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+ if (ret & BMG160_ANY_MOTION_BIT_Y)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+ if (ret & BMG160_ANY_MOTION_BIT_Z)
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_ROC,
+ dir),
+ data->timestamp);
+
+ack_intr_status:
+ if (!data->dready_trigger_on) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ BMG160_REG_INT_RST_LATCH,
+ BMG160_INT_MODE_LATCH_INT |
+ BMG160_INT_MODE_LATCH_RESET);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "Error writing reg_rst_latch\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bmg160_data_rdy_trig_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ data->timestamp = iio_get_time_ns();
+
+ if (data->dready_trigger_on)
+ iio_trigger_poll(data->dready_trig);
+ else if (data->motion_trigger_on)
+ iio_trigger_poll(data->motion_trig);
+
+ if (data->ev_enable_state)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
+
+}
+
+static int bmg160_gpio_probe(struct i2c_client *client,
+ struct bmg160_data *data)
+
+{
+ struct device *dev;
+ struct gpio_desc *gpio;
+ int ret;
+
+ if (!client)
+ return -EINVAL;
+
+ dev = &client->dev;
+
+ /* data ready gpio interrupt pin */
+ gpio = devm_gpiod_get_index(dev, BMG160_GPIO_NAME, 0);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "acpi gpio get index failed\n");
+ return PTR_ERR(gpio);
+ }
+
+ ret = gpiod_direction_input(gpio);
+ if (ret)
+ return ret;
+
+ ret = gpiod_to_irq(gpio);
+
+ dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+ return ret;
+}
+
+static const char *bmg160_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+static int bmg160_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bmg160_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+ const char *name = NULL;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ ret = bmg160_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&data->mutex);
+
+ if (id)
+ name = id->name;
+
+ if (ACPI_HANDLE(&client->dev))
+ name = bmg160_match_acpi_device(&client->dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = bmg160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmg160_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bmg160_info;
+
+ if (client->irq <= 0)
+ client->irq = bmg160_gpio_probe(client, data);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ bmg160_data_rdy_trig_poll,
+ bmg160_event_handler,
+ IRQF_TRIGGER_RISING,
+ BMG160_IRQ_NAME,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->dready_trig)
+ return -ENOMEM;
+
+ data->motion_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-any-motion-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->motion_trig)
+ return -ENOMEM;
+
+ data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->ops = &bmg160_trigger_ops;
+ iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+ ret = iio_trigger_register(data->dready_trig);
+ if (ret)
+ return ret;
+
+ data->motion_trig->dev.parent = &client->dev;
+ data->motion_trig->ops = &bmg160_trigger_ops;
+ iio_trigger_set_drvdata(data->motion_trig, indio_dev);
+ ret = iio_trigger_register(data->motion_trig);
+ if (ret) {
+ data->motion_trig = NULL;
+ goto err_trigger_unregister;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev,
+ NULL,
+ bmg160_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "iio triggered buffer setup failed\n");
+ goto err_trigger_unregister;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_iio_unregister;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ BMG160_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ return 0;
+
+err_iio_unregister:
+ iio_device_unregister(indio_dev);
+err_buffer_cleanup:
+ if (data->dready_trig)
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->dready_trig)
+ iio_trigger_unregister(data->dready_trig);
+ if (data->motion_trig)
+ iio_trigger_unregister(data->motion_trig);
+
+ return ret;
+}
+
+static int bmg160_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (data->dready_trig) {
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->dready_trig);
+ iio_trigger_unregister(data->motion_trig);
+ }
+
+ mutex_lock(&data->mutex);
+ bmg160_set_mode(data, BMG160_MODE_DEEP_SUSPEND);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bmg160_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+static int bmg160_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmg160_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ if (data->dready_trigger_on || data->motion_trigger_on ||
+ data->ev_enable_state)
+ bmg160_set_mode(data, BMG160_MODE_NORMAL);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int bmg160_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "set mode failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bmg160_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct bmg160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = bmg160_set_mode(data, BMG160_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ msleep_interruptible(BMG160_MAX_STARTUP_TIME_MS);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops bmg160_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bmg160_suspend, bmg160_resume)
+ SET_RUNTIME_PM_OPS(bmg160_runtime_suspend,
+ bmg160_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id bmg160_acpi_match[] = {
+ {"BMG0160", 0},
+ {"BMI055B", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
+
+static const struct i2c_device_id bmg160_id[] = {
+ {"bmg160", 0},
+ {"bmi055_gyro", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bmg160_id);
+
+static struct i2c_driver bmg160_driver = {
+ .driver = {
+ .name = BMG160_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(bmg160_acpi_match),
+ .pm = &bmg160_pm_ops,
+ },
+ .probe = bmg160_probe,
+ .remove = bmg160_remove,
+ .id_table = bmg160_id,
+};
+module_i2c_driver(bmg160_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BMG160 Gyro driver");
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
new file mode 100644
index 00000000000000..a3ea1e8785d748
--- /dev/null
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -0,0 +1,427 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum gyro_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ GYRO_3D_CHANNEL_MAX,
+};
+
+struct gyro_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info gyro[GYRO_3D_CHANNEL_MAX];
+ u32 gyro_val[GYRO_3D_CHANNEL_MAX];
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+static const u32 gyro_3d_addresses[GYRO_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS,
+ HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS,
+ HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec gyro_3d_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_X,
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Y,
+ }, {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Z,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void gyro_3d_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int gyro_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case 0:
+ poll_value = hid_sensor_read_poll_value(
+ &gyro_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&gyro_state->common_attributes, true);
+ msleep_interruptible(poll_value * 2);
+ report_id = gyro_state->gyro[chan->scan_index].report_id;
+ address = gyro_3d_addresses[chan->scan_index];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ gyro_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_GYRO_3D, address,
+ report_id);
+ else {
+ *val = 0;
+ hid_sensor_power_state(&gyro_state->common_attributes,
+ false);
+ return -EINVAL;
+ }
+ hid_sensor_power_state(&gyro_state->common_attributes, false);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = gyro_state->scale_pre_decml;
+ *val2 = gyro_state->scale_post_decml;
+ ret_type = gyro_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = gyro_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &gyro_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &gyro_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int gyro_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &gyro_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &gyro_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info gyro_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &gyro_3d_read_raw,
+ .write_raw = &gyro_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int gyro_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "gyro_3d_proc_event\n");
+ if (atomic_read(&gyro_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ gyro_state->gyro_val,
+ sizeof(gyro_state->gyro_val));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+ int offset;
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS:
+ case HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS:
+ case HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS:
+ offset = usage_id - HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS;
+ gyro_state->gyro_val[CHANNEL_SCAN_INDEX_X + offset] =
+ *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int gyro_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct gyro_3d_state *st)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i <= CHANNEL_SCAN_INDEX_Z; ++i) {
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS + i,
+ &st->gyro[CHANNEL_SCAN_INDEX_X + i]);
+ if (ret < 0)
+ break;
+ gyro_3d_adjust_channel_bit_mask(channels,
+ CHANNEL_SCAN_INDEX_X + i,
+ st->gyro[CHANNEL_SCAN_INDEX_X + i].size);
+ }
+ dev_dbg(&pdev->dev, "gyro_3d %x:%x, %x:%x, %x:%x\n",
+ st->gyro[0].index,
+ st->gyro[0].report_id,
+ st->gyro[1].index, st->gyro[1].report_id,
+ st->gyro[2].index, st->gyro[2].report_id);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_GYRO_3D,
+ &st->gyro[CHANNEL_SCAN_INDEX_X],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ANGL_VELOCITY,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_gyro_3d_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "gyro_3d";
+ struct iio_dev *indio_dev;
+ struct gyro_3d_state *gyro_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*gyro_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ gyro_state = iio_priv(indio_dev);
+ gyro_state->common_attributes.hsdev = hsdev;
+ gyro_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_GYRO_3D,
+ &gyro_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(gyro_3d_channels, sizeof(gyro_3d_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = gyro_3d_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_GYRO_3D, gyro_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(gyro_3d_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &gyro_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&gyro_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &gyro_state->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ gyro_state->callbacks.send_event = gyro_3d_proc_event;
+ gyro_state->callbacks.capture_sample = gyro_3d_capture_sample;
+ gyro_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D,
+ &gyro_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&gyro_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_gyro_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&gyro_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_gyro_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200076",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_gyro_3d_ids);
+
+static struct platform_driver hid_gyro_3d_platform_driver = {
+ .id_table = hid_gyro_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_gyro_3d_probe,
+ .remove = hid_gyro_3d_remove,
+};
+module_platform_driver(hid_gyro_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Gyroscope 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
new file mode 100644
index 00000000000000..eef50e91f17cfe
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -0,0 +1,153 @@
+/*
+ * itg3200_buffer.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/gyro/itg3200.h>
+
+
+static int itg3200_read_all_channels(struct i2c_client *i2c, __be16 *buf)
+{
+ u8 tx = 0x80 | ITG3200_REG_TEMP_OUT_H;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags,
+ .len = 1,
+ .buf = &tx,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags | I2C_M_RD,
+ .len = ITG3200_SCAN_ELEMENTS * sizeof(s16),
+ .buf = (char *)&buf,
+ },
+ };
+
+ return i2c_transfer(i2c->adapter, msg, 2);
+}
+
+static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct itg3200 *st = iio_priv(indio_dev);
+ __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
+
+ int ret = itg3200_read_all_channels(st->i2c, buf);
+ if (ret < 0)
+ goto error_ret;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+error_ret:
+ return IRQ_HANDLED;
+}
+
+int itg3200_buffer_configure(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ itg3200_trigger_handler, NULL);
+}
+
+void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+
+static int itg3200_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ int ret;
+ u8 msc;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, &msc);
+ if (ret)
+ goto error_ret;
+
+ if (state)
+ msc |= ITG3200_IRQ_DATA_RDY_ENABLE;
+ else
+ msc &= ~ITG3200_IRQ_DATA_RDY_ENABLE;
+
+ ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+
+}
+
+static const struct iio_trigger_ops itg3200_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &itg3200_data_rdy_trigger_set_state,
+};
+
+int itg3200_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
+
+ ret = request_irq(st->i2c->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "itg3200_data_rdy",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+
+
+ st->trig->dev.parent = &st->i2c->dev;
+ st->trig->ops = &itg3200_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+
+ /* select default trigger */
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->i2c->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+ return ret;
+}
+
+void itg3200_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(st->i2c->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
new file mode 100644
index 00000000000000..6a8020d4814038
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -0,0 +1,374 @@
+/*
+ * itg3200_core.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO:
+ * - Support digital low pass filter
+ * - Support power management
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/gyro/itg3200.h>
+
+
+int itg3200_write_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ return i2c_smbus_write_byte_data(st->i2c, 0x80 | reg_address, val);
+}
+
+int itg3200_read_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(st->i2c, reg_address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int itg3200_read_reg_s16(struct iio_dev *indio_dev, u8 lower_reg_address,
+ int *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->i2c;
+ int ret;
+ s16 out;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = 1,
+ .buf = (char *)&lower_reg_address,
+ },
+ {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = 2,
+ .buf = (char *)&out,
+ },
+ };
+
+ lower_reg_address |= 0x80;
+ ret = i2c_transfer(client->adapter, msg, 2);
+ be16_to_cpus(&out);
+ *val = out;
+
+ return (ret == 2) ? 0 : ret;
+}
+
+static int itg3200_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ int ret = 0;
+ u8 reg;
+ u8 regval;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ reg = (u8)chan->address;
+ ret = itg3200_read_reg_s16(indio_dev, reg, val);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_TEMP)
+ *val2 = 1000000000/280;
+ else
+ *val2 = 1214142; /* (1 / 14,375) * (PI / 180) */
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = 23000;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &regval);
+ if (ret)
+ return ret;
+
+ *val = (regval & ITG3200_DLPF_CFG_MASK) ? 1000 : 8000;
+
+ ret = itg3200_read_reg_8(indio_dev,
+ ITG3200_REG_SAMPLE_RATE_DIV,
+ &regval);
+ if (ret)
+ return ret;
+
+ *val /= regval + 1;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int itg3200_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+ u8 t;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val == 0 || val2 != 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &t);
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ t = ((t & ITG3200_DLPF_CFG_MASK) ? 1000u : 8000u) / val - 1;
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_SAMPLE_RATE_DIV,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Reset device and internal registers to the power-up-default settings
+ * Use the gyro clock as reference, as suggested by the datasheet
+ */
+static int itg3200_reset(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ dev_dbg(&st->i2c->dev, "reset device");
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_POWER_MANAGEMENT,
+ ITG3200_RESET);
+ if (ret) {
+ dev_err(&st->i2c->dev, "error resetting device");
+ goto error_ret;
+ }
+
+ /* Wait for PLL (1ms according to datasheet) */
+ udelay(1500);
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_IRQ_CONFIG,
+ ITG3200_IRQ_ACTIVE_HIGH |
+ ITG3200_IRQ_PUSH_PULL |
+ ITG3200_IRQ_LATCH_50US_PULSE |
+ ITG3200_IRQ_LATCH_CLEAR_ANY);
+
+ if (ret)
+ dev_err(&st->i2c->dev, "error init device");
+
+error_ret:
+ return ret;
+}
+
+/* itg3200_enable_full_scale() - Disables the digital low pass filter */
+static int itg3200_enable_full_scale(struct iio_dev *indio_dev)
+{
+ u8 val;
+ int ret;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &val);
+ if (ret)
+ goto err_ret;
+
+ val |= ITG3200_DLPF_FS_SEL_2000;
+ return itg3200_write_reg_8(indio_dev, ITG3200_REG_DLPF, val);
+
+err_ret:
+ return ret;
+}
+
+static int itg3200_initial_setup(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+ u8 val;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_ADDRESS, &val);
+ if (ret)
+ goto err_ret;
+
+ if (((val >> 1) & 0x3f) != 0x34) {
+ dev_err(&st->i2c->dev, "invalid reg value 0x%02x", val);
+ ret = -ENXIO;
+ goto err_ret;
+ }
+
+ ret = itg3200_reset(indio_dev);
+ if (ret)
+ goto err_ret;
+
+ ret = itg3200_enable_full_scale(indio_dev);
+err_ret:
+ return ret;
+}
+
+#define ITG3200_ST \
+ { .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_BE }
+
+#define ITG3200_GYRO_CHAN(_mod) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ITG3200_REG_GYRO_ ## _mod ## OUT_H, \
+ .scan_index = ITG3200_SCAN_GYRO_ ## _mod, \
+ .scan_type = ITG3200_ST, \
+}
+
+static const struct iio_chan_spec itg3200_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = ITG3200_REG_TEMP_OUT_H,
+ .scan_index = ITG3200_SCAN_TEMP,
+ .scan_type = ITG3200_ST,
+ },
+ ITG3200_GYRO_CHAN(X),
+ ITG3200_GYRO_CHAN(Y),
+ ITG3200_GYRO_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(ITG3200_SCAN_ELEMENTS),
+};
+
+static const struct iio_info itg3200_info = {
+ .read_raw = &itg3200_read_raw,
+ .write_raw = &itg3200_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 };
+
+static int itg3200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct itg3200 *st;
+ struct iio_dev *indio_dev;
+
+ dev_dbg(&client->dev, "probe I2C dev with IRQ %i", client->irq);
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ st->i2c = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->dev.driver->name;
+ indio_dev->channels = itg3200_channels;
+ indio_dev->num_channels = ARRAY_SIZE(itg3200_channels);
+ indio_dev->available_scan_masks = itg3200_available_scan_masks;
+ indio_dev->info = &itg3200_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = itg3200_buffer_configure(indio_dev);
+ if (ret)
+ return ret;
+
+ if (client->irq) {
+ ret = itg3200_probe_trigger(indio_dev);
+ if (ret)
+ goto error_unconfigure_buffer;
+ }
+
+ ret = itg3200_initial_setup(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+error_unconfigure_buffer:
+ itg3200_buffer_unconfigure(indio_dev);
+ return ret;
+}
+
+static int itg3200_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+
+ itg3200_buffer_unconfigure(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id itg3200_id[] = {
+ { "itg3200", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, itg3200_id);
+
+static struct i2c_driver itg3200_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "itg3200",
+ },
+ .id_table = itg3200_id,
+ .probe = itg3200_probe,
+ .remove = itg3200_remove,
+};
+
+module_i2c_driver(itg3200_driver);
+
+MODULE_AUTHOR("Christian Strobel <christian.strobel@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("ITG3200 Gyroscope I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
new file mode 100644
index 00000000000000..5353d6328c5444
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro.h
@@ -0,0 +1,52 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_GYRO_H
+#define ST_GYRO_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define L3G4200D_GYRO_DEV_NAME "l3g4200d"
+#define LSM330D_GYRO_DEV_NAME "lsm330d_gyro"
+#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
+#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
+#define L3GD20_GYRO_DEV_NAME "l3gd20"
+#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
+#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+
+/**
+ * struct st_sensors_platform_data - gyro platform data
+ * @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
+ */
+static const struct st_sensors_platform_data gyro_pdata = {
+ .drdy_int_pin = 2,
+};
+
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+void st_gyro_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_gyro_allocate_ring(struct iio_dev *indio_dev);
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev);
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_GYRO_TRIGGER_SET_STATE (&st_gyro_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_GYRO_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_GYRO_H */
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
new file mode 100644
index 00000000000000..d67b17b6a7aab1
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -0,0 +1,105 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_gyro_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return st_sensors_set_enable(indio_dev, true);
+}
+
+static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ gdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (gdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ return err;
+
+st_gyro_buffer_postenable_error:
+ kfree(gdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_gyro_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_gyro_buffer_predisable_error:
+ kfree(gdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
+ .preenable = &st_gyro_buffer_preenable,
+ .postenable = &st_gyro_buffer_postenable,
+ .predisable = &st_gyro_buffer_predisable,
+};
+
+int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
+}
+
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
new file mode 100644
index 00000000000000..f07a2336f7dc70
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -0,0 +1,389 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+#define ST_GYRO_NUMBER_DATA_CHANNELS 3
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_GYRO_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_GYRO_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_GYRO_FS_AVL_250DPS 250
+#define ST_GYRO_FS_AVL_500DPS 500
+#define ST_GYRO_FS_AVL_2000DPS 2000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_GYRO_1_WAI_EXP 0xd3
+#define ST_GYRO_1_ODR_ADDR 0x20
+#define ST_GYRO_1_ODR_MASK 0xc0
+#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
+#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
+#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
+#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
+#define ST_GYRO_1_PW_ADDR 0x20
+#define ST_GYRO_1_PW_MASK 0x08
+#define ST_GYRO_1_FS_ADDR 0x23
+#define ST_GYRO_1_FS_MASK 0x30
+#define ST_GYRO_1_FS_AVL_250_VAL 0x00
+#define ST_GYRO_1_FS_AVL_500_VAL 0x01
+#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_1_BDU_ADDR 0x23
+#define ST_GYRO_1_BDU_MASK 0x80
+#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
+#define ST_GYRO_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_GYRO_2_WAI_EXP 0xd4
+#define ST_GYRO_2_ODR_ADDR 0x20
+#define ST_GYRO_2_ODR_MASK 0xc0
+#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
+#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
+#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
+#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
+#define ST_GYRO_2_PW_ADDR 0x20
+#define ST_GYRO_2_PW_MASK 0x08
+#define ST_GYRO_2_FS_ADDR 0x23
+#define ST_GYRO_2_FS_MASK 0x30
+#define ST_GYRO_2_FS_AVL_250_VAL 0x00
+#define ST_GYRO_2_FS_AVL_500_VAL 0x01
+#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_2_BDU_ADDR 0x23
+#define ST_GYRO_2_BDU_MASK 0x80
+#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
+#define ST_GYRO_2_MULTIREAD_BIT true
+
+static const struct iio_chan_spec st_gyro_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+ ST_GYRO_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+ ST_GYRO_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+ ST_GYRO_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensor_settings st_gyro_sensors_settings[] = {
+ {
+ .wai = ST_GYRO_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3G4200D_GYRO_DEV_NAME,
+ [1] = LSM330DL_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_1_ODR_ADDR,
+ .mask = ST_GYRO_1_ODR_MASK,
+ .odr_avl = {
+ { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
+ { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_1_PW_ADDR,
+ .mask = ST_GYRO_1_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_1_FS_ADDR,
+ .mask = ST_GYRO_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_1_FS_AVL_250_VAL,
+ .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_1_FS_AVL_500_VAL,
+ .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_1_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_1_BDU_ADDR,
+ .mask = ST_GYRO_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
+ .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_GYRO_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3GD20_GYRO_DEV_NAME,
+ [1] = LSM330D_GYRO_DEV_NAME,
+ [2] = LSM330DLC_GYRO_DEV_NAME,
+ [3] = L3G4IS_GYRO_DEV_NAME,
+ [4] = LSM330_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_2_ODR_ADDR,
+ .mask = ST_GYRO_2_ODR_MASK,
+ .odr_avl = {
+ { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
+ { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
+ { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
+ { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_2_PW_ADDR,
+ .mask = ST_GYRO_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_2_FS_ADDR,
+ .mask = ST_GYRO_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_2_FS_AVL_250_VAL,
+ .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_2_FS_AVL_500_VAL,
+ .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_2_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_2_BDU_ADDR,
+ .mask = ST_GYRO_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
+ .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_gyro_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = gdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = gdata->odr;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_gyro_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_anglvel_scale_available);
+
+static struct attribute *st_gyro_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_gyro_attribute_group = {
+ .attrs = st_gyro_attributes,
+};
+
+static const struct iio_info gyro_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_gyro_attribute_group,
+ .read_raw = &st_gyro_read_raw,
+ .write_raw = &st_gyro_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_gyro_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+};
+#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
+#else
+#define ST_GYRO_TRIGGER_OPS NULL
+#endif
+
+int st_gyro_common_probe(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+ int irq = gdata->get_irq_data_ready(indio_dev);
+ int err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &gyro_info;
+
+ st_sensors_power_enable(indio_dev);
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_gyro_sensors_settings),
+ st_gyro_sensors_settings);
+ if (err < 0)
+ return err;
+
+ gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
+ gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = gdata->sensor_settings->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev,
+ (struct st_sensors_platform_data *)&gyro_pdata);
+ if (err < 0)
+ return err;
+
+ err = st_gyro_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+
+ if (irq > 0) {
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_GYRO_TRIGGER_OPS);
+ if (err < 0)
+ goto st_gyro_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_gyro_device_register_error;
+
+ dev_info(&indio_dev->dev, "registered gyroscope %s\n",
+ indio_dev->name);
+
+ return 0;
+
+st_gyro_device_register_error:
+ if (irq > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_gyro_probe_trigger_error:
+ st_gyro_deallocate_ring(indio_dev);
+
+ return err;
+}
+EXPORT_SYMBOL(st_gyro_common_probe);
+
+void st_gyro_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ st_sensors_power_disable(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+
+ st_gyro_deallocate_ring(indio_dev);
+}
+EXPORT_SYMBOL(st_gyro_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
new file mode 100644
index 00000000000000..64480b16c68953
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -0,0 +1,114 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_gyro.h"
+
+#ifdef CONFIG_OF
+static const struct of_device_id st_gyro_of_match[] = {
+ {
+ .compatible = "st,l3g4200d-gyro",
+ .data = L3G4200D_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330d-gyro",
+ .data = LSM330D_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dl-gyro",
+ .data = LSM330DL_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dlc-gyro",
+ .data = LSM330DLC_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,l3gd20-gyro",
+ .data = L3GD20_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,l3g4is-gyro",
+ .data = L3G4IS_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330-gyro",
+ .data = LSM330_GYRO_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_gyro_of_match);
+#else
+#define st_gyro_of_match NULL
+#endif
+
+static int st_gyro_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*gdata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ gdata = iio_priv(indio_dev);
+ st_sensors_of_i2c_probe(client, st_gyro_of_match);
+
+ st_sensors_i2c_configure(indio_dev, client, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_gyro_i2c_remove(struct i2c_client *client)
+{
+ st_gyro_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
+
+static struct i2c_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-i2c",
+ .of_match_table = of_match_ptr(st_gyro_of_match),
+ },
+ .probe = st_gyro_i2c_probe,
+ .remove = st_gyro_i2c_remove,
+ .id_table = st_gyro_id_table,
+};
+module_i2c_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
new file mode 100644
index 00000000000000..e59bead6bc3cb2
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -0,0 +1,74 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_gyro.h"
+
+static int st_gyro_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*gdata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ gdata = iio_priv(indio_dev);
+
+ st_sensors_spi_configure(indio_dev, spi, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_gyro_spi_remove(struct spi_device *spi)
+{
+ st_gyro_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
+
+static struct spi_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-spi",
+ },
+ .probe = st_gyro_spi_probe,
+ .remove = st_gyro_spi_remove,
+ .id_table = st_gyro_id_table,
+};
+module_spi_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
new file mode 100644
index 00000000000000..4813b793b9f7f9
--- /dev/null
+++ b/drivers/iio/humidity/Kconfig
@@ -0,0 +1,35 @@
+#
+# humidity sensor drivers
+#
+menu "Humidity sensors"
+
+config DHT11
+ tristate "DHT11 (and compatible sensors) driver"
+ depends on GPIOLIB
+ help
+ This driver supports reading data via a single interrupt
+ generating GPIO line. Currently tested are DHT11 and DHT22.
+ Other sensors should work as well as long as they speak the
+ same protocol.
+
+config SI7005
+ tristate "SI7005 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the Silabs Si7005 relative
+ humidity and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7005.
+
+config SI7020
+ tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
+ depends on I2C
+ help
+ Say yes here to build support for the Silicon Labs Si7013/20/21
+ Relative Humidity and Temperature Sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called si7020.
+
+endmenu
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
new file mode 100644
index 00000000000000..86e2d26e9f4dd8
--- /dev/null
+++ b/drivers/iio/humidity/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO humidity sensor drivers
+#
+
+obj-$(CONFIG_DHT11) += dht11.o
+obj-$(CONFIG_SI7005) += si7005.o
+obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
new file mode 100644
index 00000000000000..623c145d8a9724
--- /dev/null
+++ b/drivers/iio/humidity/dht11.c
@@ -0,0 +1,293 @@
+/*
+ * DHT11/DHT22 bit banging GPIO driver
+ *
+ * Copyright (c) Harald Geyer <harald@ccbib.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/iio/iio.h>
+
+#define DRIVER_NAME "dht11"
+
+#define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */
+
+#define DHT11_EDGES_PREAMBLE 4
+#define DHT11_BITS_PER_READ 40
+#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
+
+/* Data transmission timing (nano seconds) */
+#define DHT11_START_TRANSMISSION 18 /* ms */
+#define DHT11_SENSOR_RESPONSE 80000
+#define DHT11_START_BIT 50000
+#define DHT11_DATA_BIT_LOW 27000
+#define DHT11_DATA_BIT_HIGH 70000
+
+struct dht11 {
+ struct device *dev;
+
+ int gpio;
+ int irq;
+
+ struct completion completion;
+
+ s64 timestamp;
+ int temperature;
+ int humidity;
+
+ /* num_edges: -1 means "no transmission in progress" */
+ int num_edges;
+ struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
+};
+
+static unsigned char dht11_decode_byte(int *timing, int threshold)
+{
+ unsigned char ret = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ ret <<= 1;
+ if (timing[i] >= threshold)
+ ++ret;
+ }
+
+ return ret;
+}
+
+static int dht11_decode(struct dht11 *dht11, int offset)
+{
+ int i, t, timing[DHT11_BITS_PER_READ], threshold,
+ timeres = DHT11_SENSOR_RESPONSE;
+ unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
+
+ /* Calculate timestamp resolution */
+ for (i = 0; i < dht11->num_edges; ++i) {
+ t = dht11->edges[i].ts - dht11->edges[i-1].ts;
+ if (t > 0 && t < timeres)
+ timeres = t;
+ }
+ if (2*timeres > DHT11_DATA_BIT_HIGH) {
+ pr_err("dht11: timeresolution %d too bad for decoding\n",
+ timeres);
+ return -EIO;
+ }
+ threshold = DHT11_DATA_BIT_HIGH / timeres;
+ if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold)
+ pr_err("dht11: WARNING: decoding ambiguous\n");
+
+ /* scale down with timeres and check validity */
+ for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
+ t = dht11->edges[offset + 2*i + 2].ts -
+ dht11->edges[offset + 2*i + 1].ts;
+ if (!dht11->edges[offset + 2*i + 1].value)
+ return -EIO; /* lost synchronisation */
+ timing[i] = t / timeres;
+ }
+
+ hum_int = dht11_decode_byte(timing, threshold);
+ hum_dec = dht11_decode_byte(&timing[8], threshold);
+ temp_int = dht11_decode_byte(&timing[16], threshold);
+ temp_dec = dht11_decode_byte(&timing[24], threshold);
+ checksum = dht11_decode_byte(&timing[32], threshold);
+
+ if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
+ return -EIO;
+
+ dht11->timestamp = iio_get_time_ns();
+ if (hum_int < 20) { /* DHT22 */
+ dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
+ ((temp_int & 0x80) ? -100 : 100);
+ dht11->humidity = ((hum_int << 8) + hum_dec) * 100;
+ } else if (temp_dec == 0 && hum_dec == 0) { /* DHT11 */
+ dht11->temperature = temp_int * 1000;
+ dht11->humidity = hum_int * 1000;
+ } else {
+ dev_err(dht11->dev,
+ "Don't know how to decode data: %d %d %d %d\n",
+ hum_int, hum_dec, temp_int, temp_dec);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int dht11_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct dht11 *dht11 = iio_priv(iio_dev);
+ int ret;
+
+ if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
+ reinit_completion(&dht11->completion);
+
+ dht11->num_edges = 0;
+ ret = gpio_direction_output(dht11->gpio, 0);
+ if (ret)
+ goto err;
+ msleep(DHT11_START_TRANSMISSION);
+ ret = gpio_direction_input(dht11->gpio);
+ if (ret)
+ goto err;
+
+ ret = wait_for_completion_killable_timeout(&dht11->completion,
+ HZ);
+ if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
+ dev_err(&iio_dev->dev,
+ "Only %d signal edges detected\n",
+ dht11->num_edges);
+ ret = -ETIMEDOUT;
+ }
+ if (ret < 0)
+ goto err;
+
+ ret = dht11_decode(dht11,
+ dht11->num_edges == DHT11_EDGES_PER_READ ?
+ DHT11_EDGES_PREAMBLE :
+ DHT11_EDGES_PREAMBLE - 2);
+ if (ret)
+ goto err;
+ }
+
+ ret = IIO_VAL_INT;
+ if (chan->type == IIO_TEMP)
+ *val = dht11->temperature;
+ else if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val = dht11->humidity;
+ else
+ ret = -EINVAL;
+err:
+ dht11->num_edges = -1;
+ return ret;
+}
+
+static const struct iio_info dht11_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = dht11_read_raw,
+};
+
+/*
+ * IRQ handler called on GPIO edges
+*/
+static irqreturn_t dht11_handle_irq(int irq, void *data)
+{
+ struct iio_dev *iio = data;
+ struct dht11 *dht11 = iio_priv(iio);
+
+ /* TODO: Consider making the handler safe for IRQ sharing */
+ if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
+ dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+ dht11->edges[dht11->num_edges++].value =
+ gpio_get_value(dht11->gpio);
+
+ if (dht11->num_edges >= DHT11_EDGES_PER_READ)
+ complete(&dht11->completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec dht11_chan_spec[] = {
+ { .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
+ { .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }
+};
+
+static const struct of_device_id dht11_dt_ids[] = {
+ { .compatible = "dht11", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dht11_dt_ids);
+
+static int dht11_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct dht11 *dht11;
+ struct iio_dev *iio;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*dht11));
+ if (!iio) {
+ dev_err(dev, "Failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ dht11 = iio_priv(iio);
+ dht11->dev = dev;
+
+ dht11->gpio = ret = of_get_gpio(node, 0);
+ if (ret < 0)
+ return ret;
+ ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name);
+ if (ret)
+ return ret;
+
+ dht11->irq = gpio_to_irq(dht11->gpio);
+ if (dht11->irq < 0) {
+ dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
+ return -EINVAL;
+ }
+ ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, iio);
+ if (ret)
+ return ret;
+
+ dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
+ dht11->num_edges = -1;
+
+ platform_set_drvdata(pdev, iio);
+
+ init_completion(&dht11->completion);
+ iio->name = pdev->name;
+ iio->dev.parent = &pdev->dev;
+ iio->info = &dht11_iio_info;
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->channels = dht11_chan_spec;
+ iio->num_channels = ARRAY_SIZE(dht11_chan_spec);
+
+ return devm_iio_device_register(dev, iio);
+}
+
+static struct platform_driver dht11_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dht11_dt_ids,
+ },
+ .probe = dht11_probe,
+};
+
+module_platform_driver(dht11_driver);
+
+MODULE_AUTHOR("Harald Geyer <harald@ccbib.org>");
+MODULE_DESCRIPTION("DHT11 humidity/temperature sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
new file mode 100644
index 00000000000000..bdd586e6d95523
--- /dev/null
+++ b/drivers/iio/humidity/si7005.c
@@ -0,0 +1,189 @@
+/*
+ * si7005.c - Support for Silabs Si7005 humidity and temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x40)
+ *
+ * TODO: heater, fast mode, processed mode (temp. / linearity compensation)
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define SI7005_STATUS 0x00
+#define SI7005_DATA 0x01 /* 16-bit, MSB */
+#define SI7005_CONFIG 0x03
+#define SI7005_ID 0x11
+
+#define SI7005_STATUS_NRDY BIT(0)
+#define SI7005_CONFIG_TEMP BIT(4)
+#define SI7005_CONFIG_START BIT(0)
+
+#define SI7005_ID_7005 0x50
+#define SI7005_ID_7015 0xf0
+
+struct si7005_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 config;
+};
+
+static int si7005_read_measurement(struct si7005_data *data, bool temp)
+{
+ int tries = 50;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, SI7005_CONFIG,
+ data->config | SI7005_CONFIG_START |
+ (temp ? SI7005_CONFIG_TEMP : 0));
+ if (ret < 0)
+ goto done;
+
+ while (tries-- > 0) {
+ msleep(20);
+ ret = i2c_smbus_read_byte_data(data->client, SI7005_STATUS);
+ if (ret < 0)
+ goto done;
+ if (!(ret & SI7005_STATUS_NRDY))
+ break;
+ }
+ if (tries < 0) {
+ ret = -EIO;
+ goto done;
+ }
+
+ ret = i2c_smbus_read_word_swapped(data->client, SI7005_DATA);
+
+done:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int si7005_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct si7005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = si7005_read_measurement(data, chan->type == IIO_TEMP);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ *val = 7;
+ *val2 = 812500;
+ } else {
+ *val = 3;
+ *val2 = 906250;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP)
+ *val = -50 * 32 * 4;
+ else
+ *val = -24 * 16 * 16;
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7005_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7005_info = {
+ .read_raw = si7005_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7005_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct si7005_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7005_info;
+
+ indio_dev->channels = si7005_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7005_channels);
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != SI7005_ID_7005 && ret != SI7005_ID_7015)
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, SI7005_CONFIG);
+ if (ret < 0)
+ return ret;
+ data->config = ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7005_id[] = {
+ { "si7005", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7005_id);
+
+static struct i2c_driver si7005_driver = {
+ .driver = {
+ .name = "si7005",
+ .owner = THIS_MODULE,
+ },
+ .probe = si7005_probe,
+ .id_table = si7005_id,
+};
+module_i2c_driver(si7005_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Silabs Si7005 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
new file mode 100644
index 00000000000000..b54164677b898d
--- /dev/null
+++ b/drivers/iio/humidity/si7020.c
@@ -0,0 +1,161 @@
+/*
+ * si7020.c - Silicon Labs Si7013/20/21 Relative Humidity and Temp Sensors
+ * Copyright (c) 2013,2014 Uplogix, Inc.
+ * David Barksdale <dbarksdale@uplogix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors
+ * are i2c devices which have an identical programming interface for
+ * measuring relative humidity and temperature. The Si7013 has an additional
+ * temperature input which this driver does not support.
+ *
+ * Data Sheets:
+ * Si7013: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7013.pdf
+ * Si7020: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7020.pdf
+ * Si7021: http://www.silabs.com/Support%20Documents/TechnicalDocs/Si7021.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Measure Relative Humidity, Hold Master Mode */
+#define SI7020CMD_RH_HOLD 0xE5
+/* Measure Temperature, Hold Master Mode */
+#define SI7020CMD_TEMP_HOLD 0xE3
+/* Software Reset */
+#define SI7020CMD_RESET 0xFE
+
+static int si7020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct i2c_client *client = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(client,
+ chan->type == IIO_TEMP ?
+ SI7020CMD_TEMP_HOLD :
+ SI7020CMD_RH_HOLD);
+ if (ret < 0)
+ return ret;
+ *val = ret >> 2;
+ if (chan->type == IIO_HUMIDITYRELATIVE)
+ *val &= GENMASK(11, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP)
+ *val = 175720; /* = 175.72 * 1000 */
+ else
+ *val = 125 * 1000;
+ *val2 = 65536 >> 2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ /*
+ * Since iio_convert_raw_to_processed_unlocked assumes offset
+ * is an integer we have to round these values and lose
+ * accuracy.
+ * Relative humidity will be 0.0032959% too high and
+ * temperature will be 0.00277344 degrees too high.
+ * This is no big deal because it's within the accuracy of the
+ * sensor.
+ */
+ if (chan->type == IIO_TEMP)
+ *val = -4368; /* = -46.85 * (65536 >> 2) / 175.72 */
+ else
+ *val = -786; /* = -6 * (65536 >> 2) / 125 */
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec si7020_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }
+};
+
+static const struct iio_info si7020_info = {
+ .read_raw = si7020_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int si7020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct i2c_client **data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ /* Reset device, loads default settings. */
+ ret = i2c_smbus_write_byte(client, SI7020CMD_RESET);
+ if (ret < 0)
+ return ret;
+ /* Wait the maximum power-up time after software reset. */
+ msleep(15);
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ *data = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7020_info;
+ indio_dev->channels = si7020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7020_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7020_id[] = {
+ { "si7020", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7020_id);
+
+static struct i2c_driver si7020_driver = {
+ .driver.name = "si7020",
+ .probe = si7020_probe,
+ .id_table = si7020_id,
+};
+
+module_i2c_driver(si7020_driver);
+MODULE_DESCRIPTION("Silicon Labs Si7013/20/21 Relative Humidity and Temperature Sensors");
+MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
new file mode 100644
index 00000000000000..5f0ea77fe7176e
--- /dev/null
+++ b/drivers/iio/iio_core.h
@@ -0,0 +1,73 @@
+/* The industrial I/O core function defs.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * These definitions are meant for use only within the IIO core, not individual
+ * drivers.
+ */
+
+#ifndef _IIO_CORE_H_
+#define _IIO_CORE_H_
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+struct iio_chan_spec;
+struct iio_dev;
+
+extern struct device_type iio_device_type;
+
+int __iio_add_chan_devattr(const char *postfix,
+ struct iio_chan_spec const *chan,
+ ssize_t (*func)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*writefunc)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len),
+ u64 mask,
+ enum iio_shared_by shared_by,
+ struct device *dev,
+ struct list_head *attr_list);
+void iio_free_chan_devattr_list(struct list_head *attr_list);
+
+ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
+
+/* Event interface flags */
+#define IIO_BUSY_BIT_POS 1
+
+#ifdef CONFIG_IIO_BUFFER
+struct poll_table_struct;
+
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait);
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps);
+
+
+#define iio_buffer_poll_addr (&iio_buffer_poll)
+#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+
+void iio_disable_all_buffers(struct iio_dev *indio_dev);
+void iio_buffer_wakeup_poll(struct iio_dev *indio_dev);
+
+#else
+
+#define iio_buffer_poll_addr NULL
+#define iio_buffer_read_first_n_outer_addr NULL
+
+static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
+static inline void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) {}
+
+#endif
+
+int iio_device_register_eventset(struct iio_dev *indio_dev);
+void iio_device_unregister_eventset(struct iio_dev *indio_dev);
+void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
+int iio_event_getfd(struct iio_dev *indio_dev);
+
+#endif
diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h
new file mode 100644
index 00000000000000..06dbc44ffc994e
--- /dev/null
+++ b/drivers/iio/iio_core_trigger.h
@@ -0,0 +1,42 @@
+
+/* The industrial I/O core, trigger consumer handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifdef CONFIG_IIO_TRIGGER
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
+ * @indio_dev: iio_dev associated with the device that will consume the trigger
+ **/
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process
+ * @indio_dev: iio_dev associated with the device that consumed the trigger
+ **/
+void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev);
+
+#else
+
+/**
+ * iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
+ * @indio_dev: iio_dev associated with the device that will consume the trigger
+ **/
+static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+/**
+ * iio_device_unregister_trigger_consumer() - reverse the registration process
+ * @indio_dev: iio_dev associated with the device that consumed the trigger
+ **/
+static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_TRIGGER_CONSUMER */
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
new file mode 100644
index 00000000000000..2b0e45133e9d12
--- /dev/null
+++ b/drivers/iio/imu/Kconfig
@@ -0,0 +1,42 @@
+#
+# IIO imu drivers configuration
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Inertial measurement units"
+
+config ADIS16400
+ tristate "Analog Devices ADIS16400 and similar IMU SPI driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16300, adis16344,
+ adis16350, adis16354, adis16355, adis16360, adis16362, adis16364,
+ adis16365, adis16400 and adis16405 triaxial inertial sensors
+ (adis16400 series also have magnetometers).
+
+config ADIS16480
+ tristate "Analog Devices ADIS16480 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
+ ADIS16485, ADIS16488 inertial sensors.
+
+source "drivers/iio/imu/inv_mpu6050/Kconfig"
+
+endmenu
+
+config IIO_ADIS_LIB
+ tristate
+ help
+ A set of IO helper functions for the Analog Devices ADIS* device family.
+
+config IIO_ADIS_LIB_BUFFER
+ bool
+ select IIO_TRIGGERED_BUFFER
+ help
+ A set of buffer helper functions for the Analog Devices ADIS* device
+ family.
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
new file mode 100644
index 00000000000000..114d2c17cbe204
--- /dev/null
+++ b/drivers/iio/imu/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for Inertial Measurement Units
+#
+
+# When adding new entries keep the list in alphabetical order
+adis16400-y := adis16400_core.o
+adis16400-$(CONFIG_IIO_BUFFER) += adis16400_buffer.o
+obj-$(CONFIG_ADIS16400) += adis16400.o
+obj-$(CONFIG_ADIS16480) += adis16480.o
+
+adis_lib-y += adis.o
+adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
+adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
+obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
+
+obj-y += inv_mpu6050/
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
new file mode 100644
index 00000000000000..911255d41c1a43
--- /dev/null
+++ b/drivers/iio/imu/adis.c
@@ -0,0 +1,440 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS_MSC_CTRL_DATA_RDY_EN BIT(2)
+#define ADIS_MSC_CTRL_DATA_RDY_POL_HIGH BIT(1)
+#define ADIS_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
+#define ADIS_GLOB_CMD_SW_RESET BIT(7)
+
+int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int value, unsigned int size)
+{
+ unsigned int page = reg / ADIS_PAGE_SIZE;
+ int ret, i;
+ struct spi_message msg;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = adis->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 4,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 6,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 8,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->write_delay,
+ },
+ };
+
+ mutex_lock(&adis->txrx_lock);
+
+ spi_message_init(&msg);
+
+ if (adis->current_page != page) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = page;
+ spi_message_add_tail(&xfers[0], &msg);
+ }
+
+ switch (size) {
+ case 4:
+ adis->tx[8] = ADIS_WRITE_REG(reg + 3);
+ adis->tx[9] = (value >> 24) & 0xff;
+ adis->tx[6] = ADIS_WRITE_REG(reg + 2);
+ adis->tx[7] = (value >> 16) & 0xff;
+ case 2:
+ adis->tx[4] = ADIS_WRITE_REG(reg + 1);
+ adis->tx[5] = (value >> 8) & 0xff;
+ case 1:
+ adis->tx[2] = ADIS_WRITE_REG(reg);
+ adis->tx[3] = value & 0xff;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ xfers[size].cs_change = 0;
+
+ for (i = 1; i <= size; i++)
+ spi_message_add_tail(&xfers[i], &msg);
+
+ ret = spi_sync(adis->spi, &msg);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n",
+ reg, ret);
+ } else {
+ adis->current_page = page;
+ }
+
+out_unlock:
+ mutex_unlock(&adis->txrx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_write_reg);
+
+/**
+ * adis_read_reg() - read 2 bytes from a 16-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size)
+{
+ unsigned int page = reg / ADIS_PAGE_SIZE;
+ struct spi_message msg;
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = adis->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->read_delay,
+ }, {
+ .tx_buf = adis->tx + 4,
+ .rx_buf = adis->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->read_delay,
+ }, {
+ .rx_buf = adis->rx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->read_delay,
+ },
+ };
+
+ mutex_lock(&adis->txrx_lock);
+ spi_message_init(&msg);
+
+ if (adis->current_page != page) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = page;
+ spi_message_add_tail(&xfers[0], &msg);
+ }
+
+ switch (size) {
+ case 4:
+ adis->tx[2] = ADIS_READ_REG(reg + 2);
+ adis->tx[3] = 0;
+ spi_message_add_tail(&xfers[1], &msg);
+ case 2:
+ adis->tx[4] = ADIS_READ_REG(reg);
+ adis->tx[5] = 0;
+ spi_message_add_tail(&xfers[2], &msg);
+ spi_message_add_tail(&xfers[3], &msg);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = spi_sync(adis->spi, &msg);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n",
+ reg, ret);
+ goto out_unlock;
+ } else {
+ adis->current_page = page;
+ }
+
+ switch (size) {
+ case 4:
+ *val = get_unaligned_be32(adis->rx);
+ break;
+ case 2:
+ *val = get_unaligned_be16(adis->rx + 2);
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&adis->txrx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_read_reg);
+
+#ifdef CONFIG_DEBUG_FS
+
+int adis_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval, unsigned int *readval)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+
+ if (readval) {
+ uint16_t val16;
+ int ret;
+
+ ret = adis_read_reg_16(adis, reg, &val16);
+ *readval = val16;
+
+ return ret;
+ } else {
+ return adis_write_reg_16(adis, reg, writeval);
+ }
+}
+EXPORT_SYMBOL(adis_debugfs_reg_access);
+
+#endif
+
+/**
+ * adis_enable_irq() - Enable or disable data ready IRQ
+ * @adis: The adis device
+ * @enable: Whether to enable the IRQ
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int adis_enable_irq(struct adis *adis, bool enable)
+{
+ int ret = 0;
+ uint16_t msc;
+
+ if (adis->data->enable_irq)
+ return adis->data->enable_irq(adis, enable);
+
+ ret = adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(adis_enable_irq);
+
+/**
+ * adis_check_status() - Check the device for error conditions
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int adis_check_status(struct adis *adis)
+{
+ uint16_t status;
+ int ret;
+ int i;
+
+ ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ if (ret < 0)
+ return ret;
+
+ status &= adis->data->status_error_mask;
+
+ if (status == 0)
+ return 0;
+
+ for (i = 0; i < 16; ++i) {
+ if (status & BIT(i)) {
+ dev_err(&adis->spi->dev, "%s.\n",
+ adis->data->status_error_msgs[i]);
+ }
+ }
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(adis_check_status);
+
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int adis_reset(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_write_reg_8(adis, adis->data->glob_cmd_reg,
+ ADIS_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_reset);
+
+static int adis_self_test(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
+ adis->data->self_test_mask);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(adis->data->startup_delay);
+
+ return adis_check_status(adis);
+}
+
+/**
+ * adis_inital_startup() - Performs device self-test
+ * @adis: The adis device
+ *
+ * Returns 0 if the device is operational, a negative error code otherwise.
+ *
+ * This function should be called early on in the device initialization sequence
+ * to ensure that the device is in a sane and known state and that it is usable.
+ */
+int adis_initial_startup(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_self_test(adis);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
+ adis_reset(adis);
+ msleep(adis->data->startup_delay);
+ ret = adis_self_test(adis);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adis_initial_startup);
+
+/**
+ * adis_single_conversion() - Performs a single sample conversion
+ * @indio_dev: The IIO device
+ * @chan: The IIO channel
+ * @error_mask: Mask for the error bit
+ * @val: Result of the conversion
+ *
+ * Returns IIO_VAL_INT on success, a negative error code otherwise.
+ *
+ * The function performs a single conversion on a given channel and post
+ * processes the value accordingly to the channel spec. If a error_mask is given
+ * the function will check if the mask is set in the returned raw value. If it
+ * is set the function will perform a self-check. If the device does not report
+ * a error bit in the channels raw value set error_mask to 0.
+ */
+int adis_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int error_mask, int *val)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ unsigned int uval;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis_read_reg(adis, chan->address, &uval,
+ chan->scan_type.storagebits / 8);
+ if (ret)
+ goto err_unlock;
+
+ if (uval & error_mask) {
+ ret = adis_check_status(adis);
+ if (ret)
+ goto err_unlock;
+ }
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(uval, chan->scan_type.realbits - 1);
+ else
+ *val = uval & ((1 << chan->scan_type.realbits) - 1);
+
+ ret = IIO_VAL_INT;
+err_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_single_conversion);
+
+/**
+ * adis_init() - Initialize adis device structure
+ * @adis: The adis device
+ * @indio_dev: The iio device
+ * @spi: The spi device
+ * @data: Chip specific data
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function must be called, before any other adis helper function may be
+ * called.
+ */
+int adis_init(struct adis *adis, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct adis_data *data)
+{
+ mutex_init(&adis->txrx_lock);
+ adis->spi = spi;
+ adis->data = data;
+ iio_device_set_drvdata(indio_dev, adis);
+
+ if (data->has_paging) {
+ /* Need to set the page before first read/write */
+ adis->current_page = -1;
+ } else {
+ /* Page will always be 0 */
+ adis->current_page = 0;
+ }
+
+ return adis_enable_irq(adis, false);
+}
+EXPORT_SYMBOL_GPL(adis_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Common library code for ADIS16XXX devices");
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
new file mode 100644
index 00000000000000..0916bf6b6c311c
--- /dev/null
+++ b/drivers/iio/imu/adis16400.h
@@ -0,0 +1,213 @@
+/*
+ * adis16400.h support Analog Devices ADIS16400
+ * 3d 18g accelerometers,
+ * 3d gyroscopes,
+ * 3d 2.5gauss magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
+ *
+ * Loosely based upon lis3l02dq.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_ADIS16400_H_
+#define SPI_ADIS16400_H_
+
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16400_STARTUP_DELAY 290 /* ms */
+#define ADIS16400_MTEST_DELAY 90 /* ms */
+
+#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16400_YGYRO_OUT 0x06 /* Y-axis gyroscope output */
+#define ADIS16400_ZGYRO_OUT 0x08 /* Z-axis gyroscope output */
+#define ADIS16400_XACCL_OUT 0x0A /* X-axis accelerometer output */
+#define ADIS16400_YACCL_OUT 0x0C /* Y-axis accelerometer output */
+#define ADIS16400_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
+#define ADIS16400_XMAGN_OUT 0x10 /* X-axis magnetometer measurement */
+#define ADIS16400_YMAGN_OUT 0x12 /* Y-axis magnetometer measurement */
+#define ADIS16400_ZMAGN_OUT 0x14 /* Z-axis magnetometer measurement */
+#define ADIS16400_TEMP_OUT 0x16 /* Temperature output */
+#define ADIS16400_AUX_ADC 0x18 /* Auxiliary ADC measurement */
+
+#define ADIS16350_XTEMP_OUT 0x10 /* X-axis gyroscope temperature measurement */
+#define ADIS16350_YTEMP_OUT 0x12 /* Y-axis gyroscope temperature measurement */
+#define ADIS16350_ZTEMP_OUT 0x14 /* Z-axis gyroscope temperature measurement */
+
+#define ADIS16300_PITCH_OUT 0x12 /* X axis inclinometer output measurement */
+#define ADIS16300_ROLL_OUT 0x14 /* Y axis inclinometer output measurement */
+#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+
+#define ADIS16448_BARO_OUT 0x16 /* Barometric pressure output */
+#define ADIS16448_TEMP_OUT 0x18 /* Temperature output */
+
+/* Calibration parameters */
+#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
+#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
+#define ADIS16400_ZGYRO_OFF 0x1E /* Z-axis gyroscope bias offset factor */
+#define ADIS16400_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
+#define ADIS16400_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
+#define ADIS16400_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
+#define ADIS16400_XMAGN_HIF 0x26 /* X-axis magnetometer, hard-iron factor */
+#define ADIS16400_YMAGN_HIF 0x28 /* Y-axis magnetometer, hard-iron factor */
+#define ADIS16400_ZMAGN_HIF 0x2A /* Z-axis magnetometer, hard-iron factor */
+#define ADIS16400_XMAGN_SIF 0x2C /* X-axis magnetometer, soft-iron factor */
+#define ADIS16400_YMAGN_SIF 0x2E /* Y-axis magnetometer, soft-iron factor */
+#define ADIS16400_ZMAGN_SIF 0x30 /* Z-axis magnetometer, soft-iron factor */
+
+#define ADIS16400_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
+#define ADIS16400_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16400_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16400_SENS_AVG 0x38 /* Dynamic range and digital filter control */
+#define ADIS16400_SLP_CNT 0x3A /* Sleep mode control */
+#define ADIS16400_DIAG_STAT 0x3C /* System status */
+
+/* Alarm functions */
+#define ADIS16400_GLOB_CMD 0x3E /* System command */
+#define ADIS16400_ALM_MAG1 0x40 /* Alarm 1 amplitude threshold */
+#define ADIS16400_ALM_MAG2 0x42 /* Alarm 2 amplitude threshold */
+#define ADIS16400_ALM_SMPL1 0x44 /* Alarm 1 sample size */
+#define ADIS16400_ALM_SMPL2 0x46 /* Alarm 2 sample size */
+#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
+#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
+
+#define ADIS16334_LOT_ID1 0x52 /* Lot identification code 1 */
+#define ADIS16334_LOT_ID2 0x54 /* Lot identification code 2 */
+#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
+#define ADIS16334_SERIAL_NUMBER 0x58 /* Serial number, lot specific */
+
+#define ADIS16400_ERROR_ACTIVE (1<<14)
+#define ADIS16400_NEW_DATA (1<<14)
+
+/* MSC_CTRL */
+#define ADIS16400_MSC_CTRL_MEM_TEST (1<<11)
+#define ADIS16400_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16400_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16400_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16400_MSC_CTRL_GYRO_BIAS (1<<7)
+#define ADIS16400_MSC_CTRL_ACCL_ALIGN (1<<6)
+#define ADIS16400_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16400_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16400_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
+
+/* DIAG_STAT */
+#define ADIS16400_DIAG_STAT_ZACCL_FAIL 15
+#define ADIS16400_DIAG_STAT_YACCL_FAIL 14
+#define ADIS16400_DIAG_STAT_XACCL_FAIL 13
+#define ADIS16400_DIAG_STAT_XGYRO_FAIL 12
+#define ADIS16400_DIAG_STAT_YGYRO_FAIL 11
+#define ADIS16400_DIAG_STAT_ZGYRO_FAIL 10
+#define ADIS16400_DIAG_STAT_ALARM2 9
+#define ADIS16400_DIAG_STAT_ALARM1 8
+#define ADIS16400_DIAG_STAT_FLASH_CHK 6
+#define ADIS16400_DIAG_STAT_SELF_TEST 5
+#define ADIS16400_DIAG_STAT_OVERFLOW 4
+#define ADIS16400_DIAG_STAT_SPI_FAIL 3
+#define ADIS16400_DIAG_STAT_FLASH_UPT 2
+#define ADIS16400_DIAG_STAT_POWER_HIGH 1
+#define ADIS16400_DIAG_STAT_POWER_LOW 0
+
+/* GLOB_CMD */
+#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16400_GLOB_CMD_P_AUTO_NULL (1<<4)
+#define ADIS16400_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16400_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16400_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16400_GLOB_CMD_AUTO_NULL (1<<0)
+
+/* SLP_CNT */
+#define ADIS16400_SLP_CNT_POWER_OFF (1<<8)
+
+#define ADIS16334_RATE_DIV_SHIFT 8
+#define ADIS16334_RATE_INT_CLK BIT(0)
+
+#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
+
+#define ADIS16400_HAS_PROD_ID BIT(0)
+#define ADIS16400_NO_BURST BIT(1)
+#define ADIS16400_HAS_SLOW_MODE BIT(2)
+#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
+
+struct adis16400_state;
+
+struct adis16400_chip_info {
+ const struct iio_chan_spec *channels;
+ const int num_channels;
+ const long flags;
+ unsigned int gyro_scale_micro;
+ unsigned int accel_scale_micro;
+ int temp_scale_nano;
+ int temp_offset;
+ int (*set_freq)(struct adis16400_state *st, unsigned int freq);
+ int (*get_freq)(struct adis16400_state *st);
+};
+
+/**
+ * struct adis16400_state - device instance specific data
+ * @variant: chip variant info
+ * @filt_int: integer part of requested filter frequency
+ * @adis: adis device
+ **/
+struct adis16400_state {
+ struct adis16400_chip_info *variant;
+ int filt_int;
+
+ struct adis adis;
+};
+
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum {
+ ADIS16400_SCAN_SUPPLY,
+ ADIS16400_SCAN_GYRO_X,
+ ADIS16400_SCAN_GYRO_Y,
+ ADIS16400_SCAN_GYRO_Z,
+ ADIS16400_SCAN_ACC_X,
+ ADIS16400_SCAN_ACC_Y,
+ ADIS16400_SCAN_ACC_Z,
+ ADIS16400_SCAN_MAGN_X,
+ ADIS16400_SCAN_MAGN_Y,
+ ADIS16400_SCAN_MAGN_Z,
+ ADIS16400_SCAN_BARO,
+ ADIS16350_SCAN_TEMP_X,
+ ADIS16350_SCAN_TEMP_Y,
+ ADIS16350_SCAN_TEMP_Z,
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+ ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
+};
+
+#ifdef CONFIG_IIO_BUFFER
+
+ssize_t adis16400_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+irqreturn_t adis16400_trigger_handler(int irq, void *p);
+
+#else /* CONFIG_IIO_BUFFER */
+
+#define adis16400_update_scan_mode NULL
+#define adis16400_trigger_handler NULL
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
new file mode 100644
index 00000000000000..6e727ffe52621f
--- /dev/null
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -0,0 +1,90 @@
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "adis16400.h"
+
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ uint16_t *tx;
+
+ if (st->variant->flags & ADIS16400_NO_BURST)
+ return adis_update_scan_mode(indio_dev, scan_mask);
+
+ kfree(adis->xfer);
+ kfree(adis->buffer);
+
+ adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
+ GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ tx = adis->buffer + indio_dev->scan_bytes;
+
+ tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
+ tx[1] = 0;
+
+ adis->xfer[0].tx_buf = tx;
+ adis->xfer[0].bits_per_word = 8;
+ adis->xfer[0].len = 2;
+ adis->xfer[1].tx_buf = tx;
+ adis->xfer[1].bits_per_word = 8;
+ adis->xfer[1].len = indio_dev->scan_bytes;
+
+ spi_message_init(&adis->msg);
+ spi_message_add_tail(&adis->xfer[0], &adis->msg);
+ spi_message_add_tail(&adis->xfer[1], &adis->msg);
+
+ return 0;
+}
+
+irqreturn_t adis16400_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ u32 old_speed_hz = st->adis.spi->max_speed_hz;
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST) &&
+ st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
+ spi_setup(st->adis.spi);
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+ st->adis.spi->max_speed_hz = old_speed_hz;
+ spi_setup(st->adis.spi);
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+ pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
new file mode 100644
index 00000000000000..b70873de04ea50
--- /dev/null
+++ b/drivers/iio/imu/adis16400_core.c
@@ -0,0 +1,939 @@
+/*
+ * adis16400.c support Analog Devices ADIS16400/5
+ * 3d 2g Linear Accelerometers,
+ * 3d Gyroscopes,
+ * 3d Magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include "adis16400.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16400_show_serial_number(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16400_state *st = file->private_data;
+ u16 lot1, lot2, serial_number;
+ char buf[16];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
+ &serial_number);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
+ serial_number);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16400_serial_number_fops = {
+ .open = simple_open,
+ .read = adis16400_show_serial_number,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16400_show_product_id(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+ adis16400_show_product_id, NULL, "%lld\n");
+
+static int adis16400_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+ adis16400_show_flash_count, NULL, "%lld\n");
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
+ debugfs_create_file("serial_number", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_serial_number_fops);
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID)
+ debugfs_create_file("product_id", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ st, &adis16400_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+enum adis16400_chip_variant {
+ ADIS16300,
+ ADIS16334,
+ ADIS16350,
+ ADIS16360,
+ ADIS16362,
+ ADIS16364,
+ ADIS16400,
+ ADIS16448,
+};
+
+static int adis16334_get_freq(struct adis16400_state *st)
+{
+ int ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ t >>= ADIS16334_RATE_DIV_SHIFT;
+
+ return 819200 >> t;
+}
+
+static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+
+ if (freq < 819200)
+ t = ilog2(819200 / freq);
+ else
+ t = 0;
+
+ if (t > 0x31)
+ t = 0x31;
+
+ t <<= ADIS16334_RATE_DIV_SHIFT;
+ t |= ADIS16334_RATE_INT_CLK;
+
+ return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+}
+
+static int adis16400_get_freq(struct adis16400_state *st)
+{
+ int sps, ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
+static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+ uint8_t val = 0;
+
+ t = 1638404 / freq;
+ if (t >= 128) {
+ val |= ADIS16400_SMPL_PRD_TIME_BASE;
+ t = 52851 / freq;
+ if (t >= 128)
+ t = 127;
+ } else if (t != 0) {
+ t--;
+ }
+
+ val |= t;
+
+ if (t >= 0x0A || (val & ADIS16400_SMPL_PRD_TIME_BASE))
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+
+ return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+}
+
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 6,
+ [2] = 12,
+ [3] = 25,
+ [4] = 50,
+ [5] = 100,
+ [6] = 200,
+ [7] = 200, /* Not a valid setting */
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t val16;
+ int i, ret;
+
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 1; i--) {
+ if (sps / adis16400_3db_divisors[i] >= val)
+ break;
+ }
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ (val16 & ~0x07) | i);
+ return ret;
+}
+
+/* Power down the device */
+static int adis16400_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SLP_CNT,
+ ADIS16400_SLP_CNT_POWER_OFF);
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16400_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t prod_id, smp_prd;
+ unsigned int device_id;
+ int ret;
+
+ /* use low spi speed for init if the device has a slow mode */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE)
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ st->adis.spi->mode = SPI_MODE_3;
+ spi_setup(st->adis.spi);
+
+ ret = adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID) {
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret)
+ goto err_ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ dev_info(&indio_dev->dev, "%s: prod_id 0x%04x at CS%d (irq %d)\n",
+ indio_dev->name, prod_id,
+ st->adis.spi->chip_select, st->adis.spi->irq);
+ }
+ /* use high spi speed if possible */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE) {
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &smp_prd);
+ if (ret)
+ goto err_ret;
+
+ if ((smp_prd & ADIS16400_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ spi_setup(st->adis.spi);
+ }
+ }
+
+err_ret:
+ return ret;
+}
+
+static const uint8_t adis16400_addresses[] = {
+ [ADIS16400_SCAN_GYRO_X] = ADIS16400_XGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Y] = ADIS16400_YGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Z] = ADIS16400_ZGYRO_OFF,
+ [ADIS16400_SCAN_ACC_X] = ADIS16400_XACCL_OFF,
+ [ADIS16400_SCAN_ACC_Y] = ADIS16400_YACCL_OFF,
+ [ADIS16400_SCAN_ACC_Z] = ADIS16400_ZACCL_OFF,
+};
+
+static int adis16400_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_write_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /*
+ * Need to cache values so we can update if the frequency
+ * changes.
+ */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = st->variant->get_freq(st);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps,
+ val * 1000 + val2 / 1000);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ sps = val * 1000 + val2 / 1000;
+
+ if (sps <= 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = st->variant->set_freq(st, sps);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16400_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = st->variant->gyro_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_VOLTAGE:
+ *val = 0;
+ if (chan->channel == 0) {
+ *val = 2;
+ *val2 = 418000; /* 2.418 mV */
+ } else {
+ *val = 0;
+ *val2 = 805800; /* 805.8 uV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = st->variant->accel_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 500; /* 0.5 mgauss */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = st->variant->temp_scale_nano / 1000000;
+ *val2 = (st->variant->temp_scale_nano % 1000000);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_read_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], &val16);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ val16 = ((val16 & 0xFFF) << 4) >> 4;
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ /* currently only temperature */
+ *val = st->variant->temp_offset;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = st->variant->get_freq(st);
+ if (ret >= 0) {
+ ret /= adis16400_3db_divisors[val16 & 0x07];
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
+ }
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = st->variant->get_freq(st);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_SUPPLY_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+
+#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+
+#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = addr, \
+ .scan_index = ADIS16400_SCAN_GYRO_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_ACCEL_CHAN(mod, addr, bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_ACC_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MAGN_CHAN(mod, addr, bits) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_MAGN_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MOD_TEMP_NAME_X "x"
+#define ADIS16400_MOD_TEMP_NAME_Y "y"
+#define ADIS16400_MOD_TEMP_NAME_Z "z"
+
+#define ADIS16400_MOD_TEMP_CHAN(mod, addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = ADIS16400_MOD_TEMP_NAME_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_TEMP_CHAN(addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_X, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_INCLI_CHAN(mod, addr, bits) { \
+ .type = IIO_INCLI, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (addr), \
+ .scan_index = ADIS16300_SCAN_INCLI_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec adis16400_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 14),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
+static const struct iio_chan_spec adis16448_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 16),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 16),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 16),
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = ADIS16448_BARO_OUT,
+ .scan_index = ADIS16400_SCAN_BARO,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
+static const struct iio_chan_spec adis16350_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
+static const struct iio_chan_spec adis16300_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
+ ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
+static const struct iio_chan_spec adis16334_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
+static struct adis16400_chip_info adis16400_chips[] = {
+ [ADIS16300] = {
+ .channels = adis16300_channels,
+ .num_channels = ARRAY_SIZE(adis16300_channels),
+ .flags = ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = 5884,
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16334] = {
+ .channels = adis16334_channels,
+ .num_channels = ARRAY_SIZE(adis16334_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_NO_BURST |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 67850000, /* 0.06785 C */
+ .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ },
+ [ADIS16350] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
+ .temp_scale_nano = 145300000, /* 0.1453 C */
+ .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
+ .flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16360] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16362] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16364] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16400] = {
+ .channels = adis16400_channels,
+ .num_channels = ARRAY_SIZE(adis16400_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16448] = {
+ .channels = adis16448_channels,
+ .num_channels = ARRAY_SIZE(adis16448_channels),
+ .flags = ADIS16400_HAS_PROD_ID |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
+ .temp_scale_nano = 73860000, /* 0.07386 C */
+ .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ }
+};
+
+static const struct iio_info adis16400_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &adis16400_read_raw,
+ .write_raw = &adis16400_write_raw,
+ .update_scan_mode = adis16400_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static const unsigned long adis16400_burst_scan_mask[] = {
+ ~0UL,
+ 0,
+};
+
+static const char * const adis16400_status_error_msgs[] = {
+ [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active",
+ [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active",
+ [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error",
+ [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error",
+ [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange",
+ [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed",
+ [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V",
+ [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
+};
+
+static const struct adis_data adis16400_data = {
+ .msc_ctrl_reg = ADIS16400_MSC_CTRL,
+ .glob_cmd_reg = ADIS16400_GLOB_CMD,
+ .diag_stat_reg = ADIS16400_DIAG_STAT,
+
+ .read_delay = 50,
+ .write_delay = 50,
+
+ .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
+ .startup_delay = ADIS16400_STARTUP_DELAY,
+
+ .status_error_msgs = adis16400_status_error_msgs,
+ .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ALARM2) |
+ BIT(ADIS16400_DIAG_STAT_ALARM1) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_CHK) |
+ BIT(ADIS16400_DIAG_STAT_SELF_TEST) |
+ BIT(ADIS16400_DIAG_STAT_OVERFLOW) |
+ BIT(ADIS16400_DIAG_STAT_SPI_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_UPT) |
+ BIT(ADIS16400_DIAG_STAT_POWER_HIGH) |
+ BIT(ADIS16400_DIAG_STAT_POWER_LOW),
+};
+
+static int adis16400_probe(struct spi_device *spi)
+{
+ struct adis16400_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ /* setup the industrialio driver allocated elements */
+ st->variant = &adis16400_chips[spi_get_device_id(spi)->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->variant->channels;
+ indio_dev->num_channels = st->variant->num_channels;
+ indio_dev->info = &adis16400_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST))
+ indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16400_trigger_handler);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis16400_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ adis16400_debugfs_init(indio_dev);
+ return 0;
+
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+ return ret;
+}
+
+static int adis16400_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16400_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16400_id[] = {
+ {"adis16300", ADIS16300},
+ {"adis16334", ADIS16334},
+ {"adis16350", ADIS16350},
+ {"adis16354", ADIS16350},
+ {"adis16355", ADIS16350},
+ {"adis16360", ADIS16360},
+ {"adis16362", ADIS16362},
+ {"adis16364", ADIS16364},
+ {"adis16365", ADIS16360},
+ {"adis16400", ADIS16400},
+ {"adis16405", ADIS16400},
+ {"adis16448", ADIS16448},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
+
+static struct spi_driver adis16400_driver = {
+ .driver = {
+ .name = "adis16400",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16400_id,
+ .probe = adis16400_probe,
+ .remove = adis16400_remove,
+};
+module_spi_driver(adis16400_driver);
+
+MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
new file mode 100644
index 00000000000000..989605dd6f7810
--- /dev/null
+++ b/drivers/iio/imu/adis16480.c
@@ -0,0 +1,882 @@
+/*
+ * ADIS16480 and similar IMUs driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/debugfs.h>
+
+#define ADIS16480_PAGE_SIZE 0x80
+
+#define ADIS16480_REG(page, reg) ((page) * ADIS16480_PAGE_SIZE + (reg))
+
+#define ADIS16480_REG_PAGE_ID 0x00 /* Same address on each page */
+#define ADIS16480_REG_SEQ_CNT ADIS16480_REG(0x00, 0x06)
+#define ADIS16480_REG_SYS_E_FLA ADIS16480_REG(0x00, 0x08)
+#define ADIS16480_REG_DIAG_STS ADIS16480_REG(0x00, 0x0A)
+#define ADIS16480_REG_ALM_STS ADIS16480_REG(0x00, 0x0C)
+#define ADIS16480_REG_TEMP_OUT ADIS16480_REG(0x00, 0x0E)
+#define ADIS16480_REG_X_GYRO_OUT ADIS16480_REG(0x00, 0x10)
+#define ADIS16480_REG_Y_GYRO_OUT ADIS16480_REG(0x00, 0x14)
+#define ADIS16480_REG_Z_GYRO_OUT ADIS16480_REG(0x00, 0x18)
+#define ADIS16480_REG_X_ACCEL_OUT ADIS16480_REG(0x00, 0x1C)
+#define ADIS16480_REG_Y_ACCEL_OUT ADIS16480_REG(0x00, 0x20)
+#define ADIS16480_REG_Z_ACCEL_OUT ADIS16480_REG(0x00, 0x24)
+#define ADIS16480_REG_X_MAGN_OUT ADIS16480_REG(0x00, 0x28)
+#define ADIS16480_REG_Y_MAGN_OUT ADIS16480_REG(0x00, 0x2A)
+#define ADIS16480_REG_Z_MAGN_OUT ADIS16480_REG(0x00, 0x2C)
+#define ADIS16480_REG_BAROM_OUT ADIS16480_REG(0x00, 0x2E)
+#define ADIS16480_REG_X_DELTAANG_OUT ADIS16480_REG(0x00, 0x40)
+#define ADIS16480_REG_Y_DELTAANG_OUT ADIS16480_REG(0x00, 0x44)
+#define ADIS16480_REG_Z_DELTAANG_OUT ADIS16480_REG(0x00, 0x48)
+#define ADIS16480_REG_X_DELTAVEL_OUT ADIS16480_REG(0x00, 0x4C)
+#define ADIS16480_REG_Y_DELTAVEL_OUT ADIS16480_REG(0x00, 0x50)
+#define ADIS16480_REG_Z_DELTAVEL_OUT ADIS16480_REG(0x00, 0x54)
+#define ADIS16480_REG_PROD_ID ADIS16480_REG(0x00, 0x7E)
+
+#define ADIS16480_REG_X_GYRO_SCALE ADIS16480_REG(0x02, 0x04)
+#define ADIS16480_REG_Y_GYRO_SCALE ADIS16480_REG(0x02, 0x06)
+#define ADIS16480_REG_Z_GYRO_SCALE ADIS16480_REG(0x02, 0x08)
+#define ADIS16480_REG_X_ACCEL_SCALE ADIS16480_REG(0x02, 0x0A)
+#define ADIS16480_REG_Y_ACCEL_SCALE ADIS16480_REG(0x02, 0x0C)
+#define ADIS16480_REG_Z_ACCEL_SCALE ADIS16480_REG(0x02, 0x0E)
+#define ADIS16480_REG_X_GYRO_BIAS ADIS16480_REG(0x02, 0x10)
+#define ADIS16480_REG_Y_GYRO_BIAS ADIS16480_REG(0x02, 0x14)
+#define ADIS16480_REG_Z_GYRO_BIAS ADIS16480_REG(0x02, 0x18)
+#define ADIS16480_REG_X_ACCEL_BIAS ADIS16480_REG(0x02, 0x1C)
+#define ADIS16480_REG_Y_ACCEL_BIAS ADIS16480_REG(0x02, 0x20)
+#define ADIS16480_REG_Z_ACCEL_BIAS ADIS16480_REG(0x02, 0x24)
+#define ADIS16480_REG_X_HARD_IRON ADIS16480_REG(0x02, 0x28)
+#define ADIS16480_REG_Y_HARD_IRON ADIS16480_REG(0x02, 0x2A)
+#define ADIS16480_REG_Z_HARD_IRON ADIS16480_REG(0x02, 0x2C)
+#define ADIS16480_REG_BAROM_BIAS ADIS16480_REG(0x02, 0x40)
+#define ADIS16480_REG_FLASH_CNT ADIS16480_REG(0x02, 0x7C)
+
+#define ADIS16480_REG_GLOB_CMD ADIS16480_REG(0x03, 0x02)
+#define ADIS16480_REG_FNCTIO_CTRL ADIS16480_REG(0x03, 0x06)
+#define ADIS16480_REG_GPIO_CTRL ADIS16480_REG(0x03, 0x08)
+#define ADIS16480_REG_CONFIG ADIS16480_REG(0x03, 0x0A)
+#define ADIS16480_REG_DEC_RATE ADIS16480_REG(0x03, 0x0C)
+#define ADIS16480_REG_SLP_CNT ADIS16480_REG(0x03, 0x10)
+#define ADIS16480_REG_FILTER_BNK0 ADIS16480_REG(0x03, 0x16)
+#define ADIS16480_REG_FILTER_BNK1 ADIS16480_REG(0x03, 0x18)
+#define ADIS16480_REG_ALM_CNFG0 ADIS16480_REG(0x03, 0x20)
+#define ADIS16480_REG_ALM_CNFG1 ADIS16480_REG(0x03, 0x22)
+#define ADIS16480_REG_ALM_CNFG2 ADIS16480_REG(0x03, 0x24)
+#define ADIS16480_REG_XG_ALM_MAGN ADIS16480_REG(0x03, 0x28)
+#define ADIS16480_REG_YG_ALM_MAGN ADIS16480_REG(0x03, 0x2A)
+#define ADIS16480_REG_ZG_ALM_MAGN ADIS16480_REG(0x03, 0x2C)
+#define ADIS16480_REG_XA_ALM_MAGN ADIS16480_REG(0x03, 0x2E)
+#define ADIS16480_REG_YA_ALM_MAGN ADIS16480_REG(0x03, 0x30)
+#define ADIS16480_REG_ZA_ALM_MAGN ADIS16480_REG(0x03, 0x32)
+#define ADIS16480_REG_XM_ALM_MAGN ADIS16480_REG(0x03, 0x34)
+#define ADIS16480_REG_YM_ALM_MAGN ADIS16480_REG(0x03, 0x36)
+#define ADIS16480_REG_ZM_ALM_MAGN ADIS16480_REG(0x03, 0x38)
+#define ADIS16480_REG_BR_ALM_MAGN ADIS16480_REG(0x03, 0x3A)
+#define ADIS16480_REG_FIRM_REV ADIS16480_REG(0x03, 0x78)
+#define ADIS16480_REG_FIRM_DM ADIS16480_REG(0x03, 0x7A)
+#define ADIS16480_REG_FIRM_Y ADIS16480_REG(0x03, 0x7C)
+
+#define ADIS16480_REG_SERIAL_NUM ADIS16480_REG(0x04, 0x20)
+
+/* Each filter coefficent bank spans two pages */
+#define ADIS16480_FIR_COEF(page) (x < 60 ? ADIS16480_REG(page, (x) + 8) : \
+ ADIS16480_REG((page) + 1, (x) - 60 + 8))
+#define ADIS16480_FIR_COEF_A(x) ADIS16480_FIR_COEF(0x05, (x))
+#define ADIS16480_FIR_COEF_B(x) ADIS16480_FIR_COEF(0x07, (x))
+#define ADIS16480_FIR_COEF_C(x) ADIS16480_FIR_COEF(0x09, (x))
+#define ADIS16480_FIR_COEF_D(x) ADIS16480_FIR_COEF(0x0B, (x))
+
+struct adis16480_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+};
+
+struct adis16480 {
+ const struct adis16480_chip_info *chip_info;
+
+ struct adis adis;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16480_show_firmware_revision(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16480 *adis16480 = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_REV, &rev);
+ if (ret < 0)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16480_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16480_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16480_show_firmware_date(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16480 *adis16480 = file->private_data;
+ u16 md, year;
+ char buf[12];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_Y, &year);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n",
+ md >> 8, md & 0xff, year);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16480_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16480_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16480_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_SERIAL_NUM,
+ &serial);
+ if (ret < 0)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_serial_number_fops,
+ adis16480_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16480_show_product_id(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_PROD_ID,
+ &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_product_id_fops,
+ adis16480_show_product_id, NULL, "%llu\n");
+
+static int adis16480_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&adis16480->adis, ADIS16480_REG_FLASH_CNT,
+ &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_flash_count_fops,
+ adis16480_show_flash_count, NULL, "%lld\n");
+
+static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16480 *adis16480 = iio_priv(indio_dev);
+
+ debugfs_create_file("firmware_revision", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_firmware_date_fops);
+ debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_serial_number_fops);
+ debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int t;
+
+ t = val * 1000 + val2 / 1000;
+ if (t <= 0)
+ return -EINVAL;
+
+ t = 2460000 / t;
+ if (t > 2048)
+ t = 2048;
+
+ if (t != 0)
+ t--;
+
+ return adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
+}
+
+static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t t;
+ int ret;
+ unsigned freq;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
+ if (ret < 0)
+ return ret;
+
+ freq = 2460000 / (t + 1);
+ *val = freq / 1000;
+ *val2 = (freq % 1000) * 1000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+enum {
+ ADIS16480_SCAN_GYRO_X,
+ ADIS16480_SCAN_GYRO_Y,
+ ADIS16480_SCAN_GYRO_Z,
+ ADIS16480_SCAN_ACCEL_X,
+ ADIS16480_SCAN_ACCEL_Y,
+ ADIS16480_SCAN_ACCEL_Z,
+ ADIS16480_SCAN_MAGN_X,
+ ADIS16480_SCAN_MAGN_Y,
+ ADIS16480_SCAN_MAGN_Z,
+ ADIS16480_SCAN_BARO,
+ ADIS16480_SCAN_TEMP,
+};
+
+static const unsigned int adis16480_calibbias_regs[] = {
+ [ADIS16480_SCAN_GYRO_X] = ADIS16480_REG_X_GYRO_BIAS,
+ [ADIS16480_SCAN_GYRO_Y] = ADIS16480_REG_Y_GYRO_BIAS,
+ [ADIS16480_SCAN_GYRO_Z] = ADIS16480_REG_Z_GYRO_BIAS,
+ [ADIS16480_SCAN_ACCEL_X] = ADIS16480_REG_X_ACCEL_BIAS,
+ [ADIS16480_SCAN_ACCEL_Y] = ADIS16480_REG_Y_ACCEL_BIAS,
+ [ADIS16480_SCAN_ACCEL_Z] = ADIS16480_REG_Z_ACCEL_BIAS,
+ [ADIS16480_SCAN_MAGN_X] = ADIS16480_REG_X_HARD_IRON,
+ [ADIS16480_SCAN_MAGN_Y] = ADIS16480_REG_Y_HARD_IRON,
+ [ADIS16480_SCAN_MAGN_Z] = ADIS16480_REG_Z_HARD_IRON,
+ [ADIS16480_SCAN_BARO] = ADIS16480_REG_BAROM_BIAS,
+};
+
+static const unsigned int adis16480_calibscale_regs[] = {
+ [ADIS16480_SCAN_GYRO_X] = ADIS16480_REG_X_GYRO_SCALE,
+ [ADIS16480_SCAN_GYRO_Y] = ADIS16480_REG_Y_GYRO_SCALE,
+ [ADIS16480_SCAN_GYRO_Z] = ADIS16480_REG_Z_GYRO_SCALE,
+ [ADIS16480_SCAN_ACCEL_X] = ADIS16480_REG_X_ACCEL_SCALE,
+ [ADIS16480_SCAN_ACCEL_Y] = ADIS16480_REG_Y_ACCEL_SCALE,
+ [ADIS16480_SCAN_ACCEL_Z] = ADIS16480_REG_Z_ACCEL_SCALE,
+};
+
+static int adis16480_set_calibbias(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int bias)
+{
+ unsigned int reg = adis16480_calibbias_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_PRESSURE:
+ if (bias < -0x8000 || bias >= 0x8000)
+ return -EINVAL;
+ return adis_write_reg_16(&st->adis, reg, bias);
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ return adis_write_reg_32(&st->adis, reg, bias);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int adis16480_get_calibbias(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *bias)
+{
+ unsigned int reg = adis16480_calibbias_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t val16;
+ uint32_t val32;
+ int ret;
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_PRESSURE:
+ ret = adis_read_reg_16(&st->adis, reg, &val16);
+ *bias = sign_extend32(val16, 15);
+ break;
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ ret = adis_read_reg_32(&st->adis, reg, &val32);
+ *bias = sign_extend32(val32, 31);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int adis16480_set_calibscale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int scale)
+{
+ unsigned int reg = adis16480_calibscale_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ if (scale < -0x8000 || scale >= 0x8000)
+ return -EINVAL;
+
+ return adis_write_reg_16(&st->adis, reg, scale);
+}
+
+static int adis16480_get_calibscale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *scale)
+{
+ unsigned int reg = adis16480_calibscale_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t val16;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, reg, &val16);
+ if (ret < 0)
+ return ret;
+
+ *scale = sign_extend32(val16, 15);
+ return IIO_VAL_INT;
+}
+
+static const unsigned int adis16480_def_filter_freqs[] = {
+ 310,
+ 55,
+ 275,
+ 63,
+};
+
+static const unsigned int ad16480_filter_data[][2] = {
+ [ADIS16480_SCAN_GYRO_X] = { ADIS16480_REG_FILTER_BNK0, 0 },
+ [ADIS16480_SCAN_GYRO_Y] = { ADIS16480_REG_FILTER_BNK0, 3 },
+ [ADIS16480_SCAN_GYRO_Z] = { ADIS16480_REG_FILTER_BNK0, 6 },
+ [ADIS16480_SCAN_ACCEL_X] = { ADIS16480_REG_FILTER_BNK0, 9 },
+ [ADIS16480_SCAN_ACCEL_Y] = { ADIS16480_REG_FILTER_BNK0, 12 },
+ [ADIS16480_SCAN_ACCEL_Z] = { ADIS16480_REG_FILTER_BNK1, 0 },
+ [ADIS16480_SCAN_MAGN_X] = { ADIS16480_REG_FILTER_BNK1, 3 },
+ [ADIS16480_SCAN_MAGN_Y] = { ADIS16480_REG_FILTER_BNK1, 6 },
+ [ADIS16480_SCAN_MAGN_Z] = { ADIS16480_REG_FILTER_BNK1, 9 },
+};
+
+static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *freq)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int enable_mask, offset, reg;
+ uint16_t val;
+ int ret;
+
+ reg = ad16480_filter_data[chan->scan_index][0];
+ offset = ad16480_filter_data[chan->scan_index][1];
+ enable_mask = BIT(offset + 2);
+
+ ret = adis_read_reg_16(&st->adis, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!(val & enable_mask))
+ *freq = 0;
+ else
+ *freq = adis16480_def_filter_freqs[(val >> offset) & 0x3];
+
+ return IIO_VAL_INT;
+}
+
+static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int freq)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int enable_mask, offset, reg;
+ unsigned int diff, best_diff;
+ unsigned int i, best_freq;
+ uint16_t val;
+ int ret;
+
+ reg = ad16480_filter_data[chan->scan_index][0];
+ offset = ad16480_filter_data[chan->scan_index][1];
+ enable_mask = BIT(offset + 2);
+
+ ret = adis_read_reg_16(&st->adis, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ if (freq == 0) {
+ val &= ~enable_mask;
+ } else {
+ best_freq = 0;
+ best_diff = 310;
+ for (i = 0; i < ARRAY_SIZE(adis16480_def_filter_freqs); i++) {
+ if (adis16480_def_filter_freqs[i] >= freq) {
+ diff = adis16480_def_filter_freqs[i] - freq;
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_freq = i;
+ }
+ }
+ }
+
+ val &= ~(0x3 << offset);
+ val |= best_freq << offset;
+ val |= enable_mask;
+ }
+
+ return adis_write_reg_16(&st->adis, reg, val);
+}
+
+static int adis16480_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 100; /* 0.0001 gauss */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 5;
+ *val2 = 650000; /* 5.65 milli degree Celsius */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 4000; /* 40ubar = 0.004 kPa */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has a offset */
+ *val = 4425; /* 25 degree Celsius = 0x0000 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis16480_get_calibbias(indio_dev, chan, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis16480_get_calibscale(indio_dev, chan, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16480_get_filter_freq(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return adis16480_get_freq(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16480_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis16480_set_calibbias(indio_dev, chan, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis16480_set_calibscale(indio_dev, chan, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16480_set_filter_freq(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return adis16480_set_freq(indio_dev, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16480_MOD_CHANNEL(_type, _mod, _address, _si, _info_sep, _bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ _info_sep, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_bits), \
+ .storagebits = (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16480_GYRO_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _GYRO_OUT, ADIS16480_SCAN_GYRO_ ## _mod, \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ 32)
+
+#define ADIS16480_ACCEL_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _ACCEL_OUT, ADIS16480_SCAN_ACCEL_ ## _mod, \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ 32)
+
+#define ADIS16480_MAGN_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_MAGN, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _MAGN_OUT, ADIS16480_SCAN_MAGN_ ## _mod, \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ 16)
+
+#define ADIS16480_PRESSURE_CHANNEL() \
+ { \
+ .type = IIO_PRESSURE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ADIS16480_REG_BAROM_OUT, \
+ .scan_index = ADIS16480_SCAN_BARO, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16480_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ADIS16480_REG_TEMP_OUT, \
+ .scan_index = ADIS16480_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16480_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_MAGN_CHANNEL(X),
+ ADIS16480_MAGN_CHANNEL(Y),
+ ADIS16480_MAGN_CHANNEL(Z),
+ ADIS16480_PRESSURE_CHANNEL(),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16485_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+enum adis16480_variant {
+ ADIS16375,
+ ADIS16480,
+ ADIS16485,
+ ADIS16488,
+};
+
+static const struct adis16480_chip_info adis16480_chip_info[] = {
+ [ADIS16375] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ },
+ [ADIS16480] = {
+ .channels = adis16480_channels,
+ .num_channels = ARRAY_SIZE(adis16480_channels),
+ },
+ [ADIS16485] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ },
+ [ADIS16488] = {
+ .channels = adis16480_channels,
+ .num_channels = ARRAY_SIZE(adis16480_channels),
+ },
+};
+
+static const struct iio_info adis16480_info = {
+ .read_raw = &adis16480_read_raw,
+ .write_raw = &adis16480_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int adis16480_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9));
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "Could not power down device: %d\n", ret);
+
+ return ret;
+}
+
+static int adis16480_enable_irq(struct adis *adis, bool enable)
+{
+ return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL,
+ enable ? BIT(3) : 0);
+}
+
+static int adis16480_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t prod_id;
+ unsigned int device_id;
+ int ret;
+
+ adis_reset(&st->adis);
+ msleep(70);
+
+ ret = adis_write_reg_16(&st->adis, ADIS16480_REG_GLOB_CMD, BIT(1));
+ if (ret)
+ return ret;
+ msleep(30);
+
+ ret = adis_check_status(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16480_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ return 0;
+}
+
+#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0
+#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1
+#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2
+#define ADIS16480_DIAG_STAT_XACCL_FAIL 3
+#define ADIS16480_DIAG_STAT_YACCL_FAIL 4
+#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5
+#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8
+#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9
+#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10
+#define ADIS16480_DIAG_STAT_BARO_FAIL 11
+
+static const char * const adis16480_status_error_msgs[] = {
+ [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure",
+};
+
+static const struct adis_data adis16480_data = {
+ .diag_stat_reg = ADIS16480_REG_DIAG_STS,
+ .glob_cmd_reg = ADIS16480_REG_GLOB_CMD,
+ .has_paging = true,
+
+ .read_delay = 5,
+ .write_delay = 5,
+
+ .status_error_msgs = adis16480_status_error_msgs,
+ .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_BARO_FAIL),
+
+ .enable_irq = adis16480_enable_irq,
+};
+
+static int adis16480_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct adis16480 *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st = iio_priv(indio_dev);
+
+ st->chip_info = &adis16480_chip_info[id->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = &adis16480_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ ret = adis16480_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_stop_device;
+
+ adis16480_debugfs_init(indio_dev);
+
+ return 0;
+
+error_stop_device:
+ adis16480_stop_device(indio_dev);
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+ return ret;
+}
+
+static int adis16480_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16480_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16480_ids[] = {
+ { "adis16375", ADIS16375 },
+ { "adis16480", ADIS16480 },
+ { "adis16485", ADIS16485 },
+ { "adis16488", ADIS16488 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16480_ids);
+
+static struct spi_driver adis16480_driver = {
+ .driver = {
+ .name = "adis16480",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16480_ids,
+ .probe = adis16480_probe,
+ .remove = adis16480_remove,
+};
+module_spi_driver(adis16480_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16480 IMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
new file mode 100644
index 00000000000000..cb32b593f1c55c
--- /dev/null
+++ b/drivers/iio/imu/adis_buffer.c
@@ -0,0 +1,171 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/imu/adis.h>
+
+int adis_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ const struct iio_chan_spec *chan;
+ unsigned int scan_count;
+ unsigned int i, j;
+ __be16 *tx, *rx;
+
+ kfree(adis->xfer);
+ kfree(adis->buffer);
+
+ scan_count = indio_dev->scan_bytes / 2;
+
+ adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ rx = adis->buffer;
+ tx = rx + indio_dev->scan_bytes;
+
+ spi_message_init(&adis->msg);
+
+ for (j = 0; j <= scan_count; j++) {
+ adis->xfer[j].bits_per_word = 8;
+ if (j != scan_count)
+ adis->xfer[j].cs_change = 1;
+ adis->xfer[j].len = 2;
+ adis->xfer[j].delay_usecs = adis->data->read_delay;
+ if (j < scan_count)
+ adis->xfer[j].tx_buf = &tx[j];
+ if (j >= 1)
+ adis->xfer[j].rx_buf = &rx[j - 1];
+ spi_message_add_tail(&adis->xfer[j], &adis->msg);
+ }
+
+ chan = indio_dev->channels;
+ for (i = 0; i < indio_dev->num_channels; i++, chan++) {
+ if (!test_bit(chan->scan_index, scan_mask))
+ continue;
+ if (chan->scan_type.storagebits == 32)
+ *tx++ = cpu_to_be16((chan->address + 2) << 8);
+ *tx++ = cpu_to_be16(chan->address << 8);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adis_update_scan_mode);
+
+static irqreturn_t adis_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (adis->data->has_paging) {
+ mutex_lock(&adis->txrx_lock);
+ if (adis->current_page != 0) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = 0;
+ spi_write(adis->spi, adis->tx, 2);
+ }
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d", ret);
+
+
+ if (adis->data->has_paging) {
+ adis->current_page = 0;
+ mutex_unlock(&adis->txrx_lock);
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+ pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
+ * @adis: The adis device.
+ * @indio_dev: The IIO device.
+ * @trigger_handler: Optional trigger handler, may be NULL.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function sets up the buffer and trigger for a adis devices. If
+ * 'trigger_handler' is NULL the default trigger handler will be used. The
+ * default trigger handler will simply read the registers assigned to the
+ * currently active channels.
+ *
+ * adis_cleanup_buffer_and_trigger() should be called to free the resources
+ * allocated by this function.
+ */
+int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irqreturn_t (*trigger_handler)(int, void *))
+{
+ int ret;
+
+ if (!trigger_handler)
+ trigger_handler = adis_trigger_handler;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (adis->spi->irq) {
+ ret = adis_probe_trigger(adis, indio_dev);
+ if (ret)
+ goto error_buffer_cleanup;
+ }
+ return 0;
+
+error_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
+
+/**
+ * adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
+ * @adis: The adis device.
+ * @indio_dev: The IIO device.
+ *
+ * Frees resources allocated by adis_setup_buffer_and_trigger()
+ */
+void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ if (adis->spi->irq)
+ adis_remove_trigger(adis);
+ kfree(adis->buffer);
+ kfree(adis->xfer);
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+EXPORT_SYMBOL_GPL(adis_cleanup_buffer_and_trigger);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
new file mode 100644
index 00000000000000..e0017c22bb9c6c
--- /dev/null
+++ b/drivers/iio/imu/adis_trigger.c
@@ -0,0 +1,89 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/export.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/imu/adis.h>
+
+static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis *adis = iio_trigger_get_drvdata(trig);
+
+ return adis_enable_irq(adis, state);
+}
+
+static const struct iio_trigger_ops adis_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis_data_rdy_trigger_set_state,
+};
+
+/**
+ * adis_probe_trigger() - Sets up trigger for a adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ *
+ * Returns 0 on success or a negative error code
+ *
+ * adis_remove_trigger() should be used to free the trigger.
+ */
+int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+{
+ int ret;
+
+ adis->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (adis->trig == NULL)
+ return -ENOMEM;
+
+ ret = request_irq(adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ indio_dev->name,
+ adis->trig);
+ if (ret)
+ goto error_free_trig;
+
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+ ret = iio_trigger_register(adis->trig);
+
+ indio_dev->trig = adis->trig;
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(adis->spi->irq, adis->trig);
+error_free_trig:
+ iio_trigger_free(adis->trig);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_probe_trigger);
+
+/**
+ * adis_remove_trigger() - Remove trigger for a adis devices
+ * @adis: The adis device
+ *
+ * Removes the trigger previously registered with adis_probe_trigger().
+ */
+void adis_remove_trigger(struct adis *adis)
+{
+ iio_trigger_unregister(adis->trig);
+ free_irq(adis->spi->irq, adis->trig);
+ iio_trigger_free(adis->trig);
+}
+EXPORT_SYMBOL_GPL(adis_remove_trigger);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
new file mode 100644
index 00000000000000..2d0608ba88d7d6
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -0,0 +1,16 @@
+#
+# inv-mpu6050 drivers for Invensense MPU devices and combos
+#
+
+config INV_MPU6050_IIO
+ tristate "Invensense MPU6050 devices"
+ depends on I2C && SYSFS
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ This driver supports the Invensense MPU6050 devices.
+ This driver can also support MPU6500 in MPU6050 compatibility mode
+ and also in MPU6500 mode with some limitations.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
new file mode 100644
index 00000000000000..3a677c778afbaf
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense MPU6050 device.
+#
+
+obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
new file mode 100644
index 00000000000000..b75519deac1a8f
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -0,0 +1,790 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include "inv_mpu_iio.h"
+
+/*
+ * this is the gyro scale translated from dynamic range plus/minus
+ * {250, 500, 1000, 2000} to rad/s
+ */
+static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
+
+/*
+ * this is the accel scale translated from dynamic range plus/minus
+ * {2, 4, 8, 16} to m/s^2
+ */
+static const int accel_scale[] = {598, 1196, 2392, 4785};
+
+static const struct inv_mpu6050_reg_map reg_set_6050 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6050 = {
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_20HZ,
+ .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_02G,
+};
+
+static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+ {
+ .num_reg = 117,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+};
+
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
+{
+ return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
+}
+
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
+{
+ u8 d, mgmt_1;
+ int result;
+
+ /* switch clock needs to be careful. Only when gyro is on, can
+ clock source be switched to gyro. Otherwise, it must be set to
+ internal clock */
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_1, 1, &mgmt_1);
+ if (result != 1)
+ return result;
+
+ mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
+ }
+
+ if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
+ /* turning off gyro requires switch to internal clock first.
+ Then turn off gyro engine */
+ mgmt_1 |= INV_CLK_INTERNAL;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_2, 1, &d);
+ if (result != 1)
+ return result;
+ if (en)
+ d &= ~mask;
+ else
+ d |= mask;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ if (result)
+ return result;
+
+ if (en) {
+ /* Wait for output stabilize */
+ msleep(INV_MPU6050_TEMP_UP_TIME);
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ /* switch internal clock to PLL */
+ mgmt_1 |= INV_CLK_PLL;
+ result = inv_mpu6050_write_reg(st,
+ st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
+{
+ int result;
+
+ if (power_on)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, 0);
+ else
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ if (result)
+ return result;
+
+ if (power_on)
+ msleep(INV_MPU6050_REG_UP_TIME);
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
+ *
+ * Initial configuration:
+ * FSR: Âħ 2000DPS
+ * DLPF: 20Hz
+ * FIFO rate: 50Hz
+ * Clock source: Gyro PLL
+ */
+static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_FILTER_20HZ;
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ return result;
+
+ d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+
+ memcpy(&st->chip_config, hw_info[st->chip_type].config,
+ sizeof(struct inv_mpu6050_chip_config));
+ result = inv_mpu6050_set_power_itg(st, false);
+
+ return result;
+}
+
+static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
+ int axis, int *val)
+{
+ int ind, result;
+ __be16 d;
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
+ (u8 *)&d);
+ if (result != 2)
+ return -EINVAL;
+ *val = (short)be16_to_cpup(&d);
+
+ return IIO_VAL_INT;
+}
+
+static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ int ret, result;
+
+ ret = IIO_VAL_INT;
+ result = 0;
+ mutex_lock(&indio_dev->mlock);
+ if (!st->chip_config.enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_read_raw;
+ }
+ /* when enable is on, power is already on */
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_ACCEL:
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
+ chan->channel2, val);
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_TEMP:
+ /* wait for stablization */
+ msleep(INV_MPU6050_SENSOR_UP_TIME);
+ inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error_read_raw:
+ if (!st->chip_config.enable)
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = gyro_scale_6050[st->chip_config.fsr];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = accel_scale[st->chip_config.accl_fs];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = INV_MPU6050_TEMP_SCALE;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = INV_MPU6050_TEMP_OFFSET;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+{
+ int result;
+ u8 d;
+
+ if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
+ return -EINVAL;
+ if (fsr == st->chip_config.fsr)
+ return 0;
+
+ d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+ st->chip_config.fsr = fsr;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+{
+ int result;
+ u8 d;
+
+ if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
+ return -EINVAL;
+ if (fs == st->chip_config.accl_fs)
+ return 0;
+
+ d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+ st->chip_config.accl_fs = fs;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ /* we should only update scale when the chip is disabled, i.e.,
+ not running */
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto error_write_raw;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_write_raw;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_write_fsr(st, val);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_write_accel_fs(st, val);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+error_write_raw:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+/**
+ * inv_mpu6050_set_lpf() - set low pass filer based on fifo rate.
+ *
+ * Based on the Nyquist principle, the sampling rate must
+ * exceed twice of the bandwidth of the signal, or there
+ * would be alising. This function basically search for the
+ * correct low pass parameters based on the fifo rate, e.g,
+ * sampling frequency.
+ */
+static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
+{
+ const int hz[] = {188, 98, 42, 20, 10, 5};
+ const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ int i, h, result;
+ u8 data;
+
+ h = (rate >> 1);
+ i = 0;
+ while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+ i++;
+ data = d[i];
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+ if (result)
+ return result;
+ st->chip_config.lpf = data;
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_fifo_rate_store() - Set fifo rate.
+ */
+static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 fifo_rate;
+ u8 d;
+ int result;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (kstrtoint(buf, 10, &fifo_rate))
+ return -EINVAL;
+ if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ return -EINVAL;
+ if (fifo_rate == st->chip_config.fifo_rate)
+ return count;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto fifo_rate_fail;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto fifo_rate_fail;
+
+ d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ goto fifo_rate_fail;
+ st->chip_config.fifo_rate = fifo_rate;
+
+ result = inv_mpu6050_set_lpf(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail;
+
+fifo_rate_fail:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return count;
+}
+
+/**
+ * inv_fifo_rate_show() - Get the current sampling rate.
+ */
+static ssize_t inv_fifo_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+
+ return sprintf(buf, "%d\n", st->chip_config.fifo_rate);
+}
+
+/**
+ * inv_attr_show() - calling this function will show current
+ * parameters.
+ */
+static ssize_t inv_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s8 *m;
+
+ switch (this_attr->address) {
+ /* In MPU6050, the two matrix are the same because gyro and accel
+ are integrated in one chip */
+ case ATTR_GYRO_MATRIX:
+ case ATTR_ACCL_MATRIX:
+ m = st->plat_data.orientation;
+
+ return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * inv_mpu6050_validate_trigger() - validate_trigger callback for invensense
+ * MPU6050 device.
+ * @indio_dev: The IIO device
+ * @trig: The new trigger
+ *
+ * Returns: 0 if the 'trig' matches the trigger registered by the MPU6050
+ * device, -EINVAL otherwise.
+ */
+static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define INV_MPU6050_CHAN(_type, _channel2, _index) \
+ { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = _channel2, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0 , \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_OFFSET)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+};
+
+/* constant IIO attribute */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
+ inv_mpu6050_fifo_rate_store);
+static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_ACCL_MATRIX);
+
+static struct attribute *inv_attributes[] = {
+ &iio_dev_attr_in_gyro_matrix.dev_attr.attr,
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group inv_attribute_group = {
+ .attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &inv_mpu6050_read_raw,
+ .write_raw = &inv_mpu6050_write_raw,
+ .attrs = &inv_attribute_group,
+ .validate_trigger = inv_mpu6050_validate_trigger,
+};
+
+/**
+ * inv_check_and_setup_chip() - check and setup chip.
+ */
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
+ const struct i2c_device_id *id)
+{
+ int result;
+
+ st->chip_type = INV_MPU6050;
+ st->hw = &hw_info[st->chip_type];
+ st->reg = hw_info[st->chip_type].reg;
+
+ /* reset to make sure previous state are not there */
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
+ if (result)
+ return result;
+ msleep(INV_MPU6050_POWER_UP_TIME);
+ /* toggle power state. After reset, the sleep bit could be on
+ or off depending on the OTP settings. Toggling power would
+ make it in a definite state as well as making the hardware
+ state align with the software state */
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ struct iio_dev *indio_dev;
+ struct inv_mpu6050_platform_data *pdata;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENOSYS;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->client = client;
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata)
+ st->plat_data = *pdata;
+ /* power is turned on inside check chip type*/
+ result = inv_check_and_setup_chip(st, id);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_init_config(indio_dev);
+ if (result) {
+ dev_err(&client->dev,
+ "Could not initialize device.\n");
+ return result;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+
+ indio_dev->info = &mpu_info;
+ indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+
+ result = iio_triggered_buffer_setup(indio_dev,
+ inv_mpu6050_irq_handler,
+ inv_mpu6050_read_fifo,
+ NULL);
+ if (result) {
+ dev_err(&st->client->dev, "configure buffer fail %d\n",
+ result);
+ return result;
+ }
+ result = inv_mpu6050_probe_trigger(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ goto out_unreg_ring;
+ }
+
+ INIT_KFIFO(st->timestamps);
+ spin_lock_init(&st->time_stamp_lock);
+ result = iio_device_register(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ goto out_remove_trigger;
+ }
+
+ return 0;
+
+out_remove_trigger:
+ inv_mpu6050_remove_trigger(st);
+out_unreg_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ inv_mpu6050_remove_trigger(st);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+
+static int inv_mpu_resume(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+}
+
+static int inv_mpu_suspend(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+}
+static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+
+#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
+#else
+#define INV_MPU6050_PMOPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {"mpu6500", INV_MPU6500},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu6050",
+ .pm = INV_MPU6050_PMOPS,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
new file mode 100644
index 00000000000000..e7799315d4dc1c
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -0,0 +1,247 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/platform_data/invensense_mpu6050.h>
+
+/**
+ * struct inv_mpu6050_reg_map - Notable registers.
+ * @sample_rate_div: Divider applied to gyro output rate.
+ * @lpf: Configures internal low pass filter.
+ * @user_ctrl: Enables/resets the FIFO.
+ * @fifo_en: Determines which data will appear in FIFO.
+ * @gyro_config: gyro config register.
+ * @accl_config: accel config register
+ * @fifo_count_h: Upper byte of FIFO count.
+ * @fifo_r_w: FIFO register.
+ * @raw_gyro: Address of first gyro register.
+ * @raw_accl: Address of first accel register.
+ * @temperature: temperature register
+ * @int_enable: Interrupt enable register.
+ * @pwr_mgmt_1: Controls chip's power state and clock source.
+ * @pwr_mgmt_2: Controls power state of individual sensors.
+ */
+struct inv_mpu6050_reg_map {
+ u8 sample_rate_div;
+ u8 lpf;
+ u8 user_ctrl;
+ u8 fifo_en;
+ u8 gyro_config;
+ u8 accl_config;
+ u8 fifo_count_h;
+ u8 fifo_r_w;
+ u8 raw_gyro;
+ u8 raw_accl;
+ u8 temperature;
+ u8 int_enable;
+ u8 pwr_mgmt_1;
+ u8 pwr_mgmt_2;
+};
+
+/*device enum */
+enum inv_devices {
+ INV_MPU6050,
+ INV_MPU6500,
+ INV_NUM_PARTS
+};
+
+/**
+ * struct inv_mpu6050_chip_config - Cached chip configuration data.
+ * @fsr: Full scale range.
+ * @lpf: Digital low pass filter frequency.
+ * @accl_fs: accel full scale range.
+ * @enable: master enable state.
+ * @accl_fifo_enable: enable accel data output
+ * @gyro_fifo_enable: enable gyro data output
+ * @fifo_rate: FIFO update rate.
+ */
+struct inv_mpu6050_chip_config {
+ unsigned int fsr:2;
+ unsigned int lpf:3;
+ unsigned int accl_fs:2;
+ unsigned int enable:1;
+ unsigned int accl_fifo_enable:1;
+ unsigned int gyro_fifo_enable:1;
+ u16 fifo_rate;
+};
+
+/**
+ * struct inv_mpu6050_hw - Other important hardware information.
+ * @num_reg: Number of registers on device.
+ * @name: name of the chip.
+ * @reg: register map of the chip.
+ * @config: configuration of the chip.
+ */
+struct inv_mpu6050_hw {
+ u8 num_reg;
+ u8 *name;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_chip_config *config;
+};
+
+/*
+ * struct inv_mpu6050_state - Driver state variables.
+ * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ * @trig: IIO trigger.
+ * @chip_config: Cached attribute information.
+ * @reg: Map of important registers.
+ * @hw: Other hardware-specific information.
+ * @chip_type: chip type.
+ * @time_stamp_lock: spin lock to time stamp.
+ * @client: i2c client handle.
+ * @plat_data: platform data.
+ * @timestamps: kfifo queue to store time stamp.
+ */
+struct inv_mpu6050_state {
+#define TIMESTAMP_FIFO_SIZE 16
+ struct iio_trigger *trig;
+ struct inv_mpu6050_chip_config chip_config;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_hw *hw;
+ enum inv_devices chip_type;
+ spinlock_t time_stamp_lock;
+ struct i2c_client *client;
+ struct inv_mpu6050_platform_data plat_data;
+ DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+};
+
+/*register and associated bit definition*/
+#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
+#define INV_MPU6050_REG_CONFIG 0x1A
+#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+
+#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
+
+#define INV_MPU6050_REG_INT_ENABLE 0x38
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+
+#define INV_MPU6050_REG_RAW_ACCEL 0x3B
+#define INV_MPU6050_REG_TEMPERATURE 0x41
+#define INV_MPU6050_REG_RAW_GYRO 0x43
+
+#define INV_MPU6050_REG_USER_CTRL 0x6A
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
+
+#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CLK_MASK 0x7
+
+#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+
+#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
+#define INV_MPU6050_REG_FIFO_R_W 0x74
+
+#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
+#define INV_MPU6050_FIFO_COUNT_BYTE 2
+#define INV_MPU6050_FIFO_THRESHOLD 500
+#define INV_MPU6050_POWER_UP_TIME 100
+#define INV_MPU6050_TEMP_UP_TIME 100
+#define INV_MPU6050_SENSOR_UP_TIME 30
+#define INV_MPU6050_REG_UP_TIME 5
+
+#define INV_MPU6050_TEMP_OFFSET 12421
+#define INV_MPU6050_TEMP_SCALE 2941
+#define INV_MPU6050_MAX_GYRO_FS_PARAM 3
+#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
+#define INV_MPU6050_THREE_AXIS 3
+#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
+#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
+
+/* 6 + 6 round up and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+
+/* init parameters */
+#define INV_MPU6050_INIT_FIFO_RATE 50
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
+
+/* scan element definition */
+enum inv_mpu6050_scan {
+ INV_MPU6050_SCAN_ACCL_X,
+ INV_MPU6050_SCAN_ACCL_Y,
+ INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_GYRO_X,
+ INV_MPU6050_SCAN_GYRO_Y,
+ INV_MPU6050_SCAN_GYRO_Z,
+ INV_MPU6050_SCAN_TIMESTAMP,
+};
+
+enum inv_mpu6050_filter_e {
+ INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
+ INV_MPU6050_FILTER_188HZ,
+ INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ,
+ INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ,
+ INV_MPU6050_FILTER_5HZ,
+ INV_MPU6050_FILTER_2100HZ_NOLPF,
+ NUM_MPU6050_FILTER
+};
+
+/* IIO attribute address */
+enum INV_MPU6050_IIO_ATTR_ADDR {
+ ATTR_GYRO_MATRIX,
+ ATTR_ACCL_MATRIX,
+};
+
+enum inv_mpu6050_accl_fs_e {
+ INV_MPU6050_FS_02G = 0,
+ INV_MPU6050_FS_04G,
+ INV_MPU6050_FS_08G,
+ INV_MPU6050_FS_16G,
+ NUM_ACCL_FSR
+};
+
+enum inv_mpu6050_fsr_e {
+ INV_MPU6050_FSR_250DPS = 0,
+ INV_MPU6050_FSR_500DPS,
+ INV_MPU6050_FSR_1000DPS,
+ INV_MPU6050_FSR_2000DPS,
+ NUM_MPU6050_FSR
+};
+
+enum inv_mpu6050_clock_sel_e {
+ INV_CLK_INTERNAL = 0,
+ INV_CLK_PLL,
+ NUM_CLK
+};
+
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st);
+int inv_reset_fifo(struct iio_dev *indio_dev);
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
new file mode 100644
index 00000000000000..0cd306a72a6e34
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -0,0 +1,192 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include "inv_mpu_iio.h"
+
+int inv_reset_fifo(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ /* disable interrupt */
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result) {
+ dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ return result;
+ }
+ /* disable the sensor output to FIFO */
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ goto reset_fifo_fail;
+ /* disable fifo reading */
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* reset FIFO*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable interrupt */
+ if (st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+ if (result)
+ return result;
+ }
+ /* enable FIFO reading and I2C master interface*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable sensor output to FIFO */
+ d = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ d |= INV_MPU6050_BITS_GYRO_OUT;
+ if (st->chip_config.accl_fifo_enable)
+ d |= INV_MPU6050_BIT_ACCEL_OUT;
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ if (result)
+ goto reset_fifo_fail;
+
+ return 0;
+
+reset_fifo_fail:
+ dev_err(&st->client->dev, "reset fifo failed %d\n", result);
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+
+ return result;
+}
+
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
+/**
+ * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ s64 timestamp;
+
+ timestamp = iio_get_time_ns();
+ kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
+ &st->time_stamp_lock);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ size_t bytes_per_datum;
+ int result;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u16 fifo_count;
+ s64 timestamp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (!(st->chip_config.accl_fifo_enable |
+ st->chip_config.gyro_fifo_enable))
+ goto end_session;
+ bytes_per_datum = 0;
+ if (st->chip_config.accl_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ if (st->chip_config.gyro_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ /*
+ * read fifo_count register to know how many bytes inside FIFO
+ * right now
+ */
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_count_h,
+ INV_MPU6050_FIFO_COUNT_BYTE, data);
+ if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ goto end_session;
+ fifo_count = be16_to_cpup((__be16 *)(&data[0]));
+ if (fifo_count < bytes_per_datum)
+ goto end_session;
+ /* fifo count can't be odd number, if it is odd, reset fifo*/
+ if (fifo_count & 1)
+ goto flush_fifo;
+ if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
+ goto flush_fifo;
+ /* Timestamp mismatch. */
+ if (kfifo_len(&st->timestamps) >
+ fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
+ while (fifo_count >= bytes_per_datum) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_r_w,
+ bytes_per_datum, data);
+ if (result != bytes_per_datum)
+ goto flush_fifo;
+
+ result = kfifo_out(&st->timestamps, &timestamp, 1);
+ /* when there is no timestamp, put timestamp as 0 */
+ if (0 == result)
+ timestamp = 0;
+
+ result = iio_push_to_buffers_with_timestamp(indio_dev, data,
+ timestamp);
+ if (result)
+ goto flush_fifo;
+ fifo_count -= bytes_per_datum;
+ }
+
+end_session:
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+
+flush_fifo:
+ /* Flush HW and SW FIFOs. */
+ inv_reset_fifo(indio_dev);
+ inv_clear_kfifo(st);
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
new file mode 100644
index 00000000000000..926fccea8de023
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -0,0 +1,155 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include "inv_mpu_iio.h"
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->chip_config.gyro_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_GYRO_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
+
+ st->chip_config.accl_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_ACCL_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
+}
+
+/**
+ * inv_mpu6050_set_enable() - enable chip functions.
+ * @indio_dev: Device driver instance.
+ * @enable: enable/disable
+ */
+static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ if (enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ inv_scan_query(indio_dev);
+ if (st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+ }
+ if (st->chip_config.accl_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ }
+ result = inv_reset_fifo(indio_dev);
+ if (result)
+ return result;
+ } else {
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ }
+ st->chip_config.enable = enable;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_data_rdy_trigger_set_state() - set data ready interrupt state
+ * @trig: Trigger instance
+ * @state: Desired trigger state
+ */
+static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ return inv_mpu6050_set_enable(iio_trigger_get_drvdata(trig), state);
+}
+
+static const struct iio_trigger_ops inv_mpu_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
+};
+
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = request_irq(st->client->irq, &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "inv_mpu",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+ st->trig->dev.parent = &st->client->dev;
+ st->trig->ops = &inv_mpu_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->client->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+error_ret:
+ return ret;
+}
+
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
+{
+ iio_trigger_unregister(st->trig);
+ free_irq(st->client->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
new file mode 100644
index 00000000000000..702b02d04d5b25
--- /dev/null
+++ b/drivers/iio/industrialio-buffer.c
@@ -0,0 +1,1131 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Handling of buffer allocation / resizing.
+ *
+ *
+ * Things to look at here.
+ * - Better memory allocation techniques?
+ * - Alternative access techniques?
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include <linux/iio/iio.h>
+#include "iio_core.h"
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+static const char * const iio_endian_prefix[] = {
+ [IIO_BE] = "be",
+ [IIO_LE] = "le",
+};
+
+static bool iio_buffer_is_active(struct iio_buffer *buf)
+{
+ return !list_empty(&buf->buffer_list);
+}
+
+static bool iio_buffer_data_available(struct iio_buffer *buf)
+{
+ return buf->access->data_available(buf);
+}
+
+/**
+ * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ *
+ * This function relies on all buffer implementations having an
+ * iio_buffer as their first element.
+ **/
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+ int ret;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ if (!rb || !rb->access->read_first_n)
+ return -EINVAL;
+
+ do {
+ if (!iio_buffer_data_available(rb)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(rb->pollq,
+ iio_buffer_data_available(rb) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ ret = rb->access->read_first_n(rb, n, buf);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+ } while (ret == 0);
+
+ return ret;
+}
+
+/**
+ * iio_buffer_poll() - poll the buffer to find out if it has data
+ */
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ poll_wait(filp, &rb->pollq, wait);
+ if (iio_buffer_data_available(rb))
+ return POLLIN | POLLRDNORM;
+ /* need a way of knowing if there may be enough data... */
+ return 0;
+}
+
+/**
+ * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll(). Should usually
+ * be called when the device is unregistered.
+ */
+void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
+{
+ if (!indio_dev->buffer)
+ return;
+
+ wake_up(&indio_dev->buffer->pollq);
+}
+
+void iio_buffer_init(struct iio_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->demux_list);
+ INIT_LIST_HEAD(&buffer->buffer_list);
+ init_waitqueue_head(&buffer->pollq);
+ kref_init(&buffer->ref);
+}
+EXPORT_SYMBOL(iio_buffer_init);
+
+static ssize_t iio_show_scan_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
+}
+
+static ssize_t iio_show_fixed_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 type = this_attr->c->scan_type.endianness;
+
+ if (type == IIO_CPU) {
+#ifdef __LITTLE_ENDIAN
+ type = IIO_LE;
+#else
+ type = IIO_BE;
+#endif
+ }
+ if (this_attr->c->scan_type.repeat > 1)
+ return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.repeat,
+ this_attr->c->scan_type.shift);
+ else
+ return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.shift);
+}
+
+static ssize_t iio_scan_el_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
+ /* Ensure ret is 0 or 1. */
+ ret = !!test_bit(to_iio_dev_attr(attr)->address,
+ indio_dev->buffer->scan_mask);
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
+{
+ clear_bit(bit, buffer->scan_mask);
+ return 0;
+}
+
+static ssize_t iio_scan_el_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool state;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_is_active(indio_dev->buffer)) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
+ if (ret < 0)
+ goto error_ret;
+ if (!state && ret) {
+ ret = iio_scan_mask_clear(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ } else if (state && !ret) {
+ ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret < 0 ? ret : len;
+
+}
+
+static ssize_t iio_scan_el_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+}
+
+static ssize_t iio_scan_el_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool state;
+
+ ret = strtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_is_active(indio_dev->buffer)) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ indio_dev->buffer->scan_timestamp = state;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret, attrcount = 0;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = __iio_add_chan_devattr("index",
+ chan,
+ &iio_show_scan_index,
+ NULL,
+ 0,
+ IIO_SEPARATE,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ return ret;
+ attrcount++;
+ ret = __iio_add_chan_devattr("type",
+ chan,
+ &iio_show_fixed_type,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ return ret;
+ attrcount++;
+ if (chan->type != IIO_TIMESTAMP)
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_show,
+ &iio_scan_el_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ else
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_ts_show,
+ &iio_scan_el_ts_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ return ret;
+ attrcount++;
+ ret = attrcount;
+ return ret;
+}
+
+static const char * const iio_scan_elements_group_name = "scan_elements";
+
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels)
+{
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, i, attrn, attrcount, attrcount_orig = 0;
+
+ if (buffer->attrs)
+ indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
+
+ if (buffer->scan_el_attrs != NULL) {
+ attr = buffer->scan_el_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ if (channels) {
+ /* new magic */
+ for (i = 0; i < num_channels; i++) {
+ if (channels[i].scan_index < 0)
+ continue;
+
+ /* Establish necessary mask length */
+ if (channels[i].scan_index >
+ (int)indio_dev->masklength - 1)
+ indio_dev->masklength
+ = channels[i].scan_index + 1;
+
+ ret = iio_buffer_add_channel_sysfs(indio_dev,
+ &channels[i]);
+ if (ret < 0)
+ goto error_cleanup_dynamic;
+ attrcount += ret;
+ if (channels[i].type == IIO_TIMESTAMP)
+ indio_dev->scan_index_timestamp =
+ channels[i].scan_index;
+ }
+ if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*buffer->scan_mask),
+ GFP_KERNEL);
+ if (buffer->scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_cleanup_dynamic;
+ }
+ }
+ }
+
+ buffer->scan_el_group.name = iio_scan_elements_group_name;
+
+ buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
+ sizeof(buffer->scan_el_group.attrs[0]),
+ GFP_KERNEL);
+ if (buffer->scan_el_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ if (buffer->scan_el_attrs)
+ memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
+ sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
+ attrn = attrcount_orig;
+
+ list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
+ buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
+
+ return 0;
+
+error_free_scan_mask:
+ kfree(buffer->scan_mask);
+error_cleanup_dynamic:
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_register);
+
+void iio_buffer_unregister(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->buffer->scan_mask);
+ kfree(indio_dev->buffer->scan_el_group.attrs);
+ iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
+}
+EXPORT_SYMBOL(iio_buffer_unregister);
+
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_length)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_length(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_length);
+
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (buffer->access->get_length)
+ if (val == buffer->access->get_length(buffer))
+ return len;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_is_active(indio_dev->buffer)) {
+ ret = -EBUSY;
+ } else {
+ if (buffer->access->set_length)
+ buffer->access->set_length(buffer, val);
+ ret = 0;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+EXPORT_SYMBOL(iio_buffer_write_length);
+
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
+}
+EXPORT_SYMBOL(iio_buffer_show_enable);
+
+/* Note NULL used as error indicator as it doesn't make sense. */
+static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
+ unsigned int masklength,
+ const unsigned long *mask)
+{
+ if (bitmap_empty(mask, masklength))
+ return NULL;
+ while (*av_masks) {
+ if (bitmap_subset(mask, av_masks, masklength))
+ return av_masks;
+ av_masks += BITS_TO_LONGS(masklength);
+ }
+ return NULL;
+}
+
+static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
+ const unsigned long *mask, bool timestamp)
+{
+ const struct iio_chan_spec *ch;
+ unsigned bytes = 0;
+ int length, i;
+
+ /* How much space will the demuxed element take? */
+ for_each_set_bit(i, mask,
+ indio_dev->masklength) {
+ ch = iio_find_channel_from_si(indio_dev, i);
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ if (timestamp) {
+ ch = iio_find_channel_from_si(indio_dev,
+ indio_dev->scan_index_timestamp);
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ return bytes;
+}
+
+static void iio_buffer_activate(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ iio_buffer_get(buffer);
+ list_add(&buffer->buffer_list, &indio_dev->buffer_list);
+}
+
+static void iio_buffer_deactivate(struct iio_buffer *buffer)
+{
+ list_del_init(&buffer->buffer_list);
+ iio_buffer_put(buffer);
+}
+
+void iio_disable_all_buffers(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer, *_buffer;
+
+ if (list_empty(&indio_dev->buffer_list))
+ return;
+
+ if (indio_dev->setup_ops->predisable)
+ indio_dev->setup_ops->predisable(indio_dev);
+
+ list_for_each_entry_safe(buffer, _buffer,
+ &indio_dev->buffer_list, buffer_list)
+ iio_buffer_deactivate(buffer);
+
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(indio_dev->active_scan_mask);
+}
+
+static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ unsigned int bytes;
+
+ if (!buffer->access->set_bytes_per_datum)
+ return;
+
+ bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
+ buffer->scan_timestamp);
+
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+}
+
+static int __iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
+{
+ int ret;
+ int success = 0;
+ struct iio_buffer *buffer;
+ unsigned long *compound_mask;
+ const unsigned long *old_mask;
+
+ /* Wind down existing buffers - iff there are any */
+ if (!list_empty(&indio_dev->buffer_list)) {
+ if (indio_dev->setup_ops->predisable) {
+ ret = indio_dev->setup_ops->predisable(indio_dev);
+ if (ret)
+ return ret;
+ }
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable) {
+ ret = indio_dev->setup_ops->postdisable(indio_dev);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Keep a copy of current setup to allow roll back */
+ old_mask = indio_dev->active_scan_mask;
+ if (!indio_dev->available_scan_masks)
+ indio_dev->active_scan_mask = NULL;
+
+ if (remove_buffer)
+ iio_buffer_deactivate(remove_buffer);
+ if (insert_buffer)
+ iio_buffer_activate(indio_dev, insert_buffer);
+
+ /* If no buffers in list, we are done */
+ if (list_empty(&indio_dev->buffer_list)) {
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(old_mask);
+ return 0;
+ }
+
+ /* What scan mask do we actually have? */
+ compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(long), GFP_KERNEL);
+ if (compound_mask == NULL) {
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(old_mask);
+ return -ENOMEM;
+ }
+ indio_dev->scan_timestamp = 0;
+
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
+ indio_dev->masklength);
+ indio_dev->scan_timestamp |= buffer->scan_timestamp;
+ }
+ if (indio_dev->available_scan_masks) {
+ indio_dev->active_scan_mask =
+ iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ compound_mask);
+ if (indio_dev->active_scan_mask == NULL) {
+ /*
+ * Roll back.
+ * Note can only occur when adding a buffer.
+ */
+ iio_buffer_deactivate(insert_buffer);
+ if (old_mask) {
+ indio_dev->active_scan_mask = old_mask;
+ success = -EINVAL;
+ }
+ else {
+ kfree(compound_mask);
+ ret = -EINVAL;
+ return ret;
+ }
+ }
+ } else {
+ indio_dev->active_scan_mask = compound_mask;
+ }
+
+ iio_update_demux(indio_dev);
+
+ /* Wind up again */
+ if (indio_dev->setup_ops->preenable) {
+ ret = indio_dev->setup_ops->preenable(indio_dev);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started: buffer preenable failed (%d)\n", ret);
+ goto error_remove_inserted;
+ }
+ }
+ indio_dev->scan_bytes =
+ iio_compute_scan_bytes(indio_dev,
+ indio_dev->active_scan_mask,
+ indio_dev->scan_timestamp);
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ iio_buffer_update_bytes_per_datum(indio_dev, buffer);
+ if (buffer->access->request_update) {
+ ret = buffer->access->request_update(buffer);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started: buffer parameter update failed (%d)\n", ret);
+ goto error_run_postdisable;
+ }
+ }
+ }
+ if (indio_dev->info->update_scan_mode) {
+ ret = indio_dev->info
+ ->update_scan_mode(indio_dev,
+ indio_dev->active_scan_mask);
+ if (ret < 0) {
+ printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
+ goto error_run_postdisable;
+ }
+ }
+ /* Definitely possible for devices to support both of these. */
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
+ if (!indio_dev->trig) {
+ printk(KERN_INFO "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ /* Can only occur on first buffer */
+ goto error_run_postdisable;
+ }
+ indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
+ indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
+ } else { /* Should never be reached */
+ ret = -EINVAL;
+ goto error_run_postdisable;
+ }
+
+ if (indio_dev->setup_ops->postenable) {
+ ret = indio_dev->setup_ops->postenable(indio_dev);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started: postenable failed (%d)\n", ret);
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+ goto error_disable_all_buffers;
+ }
+ }
+
+ if (indio_dev->available_scan_masks)
+ kfree(compound_mask);
+ else
+ kfree(old_mask);
+
+ return success;
+
+error_disable_all_buffers:
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+error_run_postdisable:
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+error_remove_inserted:
+ if (insert_buffer)
+ iio_buffer_deactivate(insert_buffer);
+ indio_dev->active_scan_mask = old_mask;
+ kfree(compound_mask);
+ return ret;
+}
+
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
+{
+ int ret;
+
+ if (insert_buffer == remove_buffer)
+ return 0;
+
+ mutex_lock(&indio_dev->info_exist_lock);
+ mutex_lock(&indio_dev->mlock);
+
+ if (insert_buffer && iio_buffer_is_active(insert_buffer))
+ insert_buffer = NULL;
+
+ if (remove_buffer && !iio_buffer_is_active(remove_buffer))
+ remove_buffer = NULL;
+
+ if (!insert_buffer && !remove_buffer) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_buffers);
+
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool inlist;
+
+ ret = strtobool(buf, &requested_state);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ /* Find out if it is in the list */
+ inlist = iio_buffer_is_active(indio_dev->buffer);
+ /* Already in desired state */
+ if (inlist == requested_state)
+ goto done;
+
+ if (requested_state)
+ ret = __iio_update_buffers(indio_dev,
+ indio_dev->buffer, NULL);
+ else
+ ret = __iio_update_buffers(indio_dev,
+ NULL, indio_dev->buffer);
+
+ if (ret < 0)
+ goto done;
+done:
+ mutex_unlock(&indio_dev->mlock);
+ return (ret < 0) ? ret : len;
+}
+EXPORT_SYMBOL(iio_buffer_store_enable);
+
+/**
+ * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
+ * @indio_dev: the iio device
+ * @mask: scan mask to be checked
+ *
+ * Return true if exactly one bit is set in the scan mask, false otherwise. It
+ * can be used for devices where only one channel can be active for sampling at
+ * a time.
+ */
+bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ return bitmap_weight(mask, indio_dev->masklength) == 1;
+}
+EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
+
+static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *mask)
+{
+ if (!indio_dev->setup_ops->validate_scan_mask)
+ return true;
+
+ return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
+}
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @indio_dev: the iio device
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ *
+ * Note that at this point we have no way of knowing what other
+ * buffers might request, hence this code only verifies that the
+ * individual buffers request is plausible.
+ */
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
+{
+ const unsigned long *mask;
+ unsigned long *trialmask;
+
+ trialmask = kmalloc(sizeof(*trialmask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+
+ if (trialmask == NULL)
+ return -ENOMEM;
+ if (!indio_dev->masklength) {
+ WARN_ON("Trying to set scanmask prior to registering buffer\n");
+ goto err_invalid_mask;
+ }
+ bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ set_bit(bit, trialmask);
+
+ if (!iio_validate_scan_mask(indio_dev, trialmask))
+ goto err_invalid_mask;
+
+ if (indio_dev->available_scan_masks) {
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ trialmask);
+ if (!mask)
+ goto err_invalid_mask;
+ }
+ bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+
+ kfree(trialmask);
+
+ return 0;
+
+err_invalid_mask:
+ kfree(trialmask);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iio_scan_mask_set);
+
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
+{
+ if (bit > indio_dev->masklength)
+ return -EINVAL;
+
+ if (!buffer->scan_mask)
+ return 0;
+
+ /* Ensure return value is 0 or 1. */
+ return !!test_bit(bit, buffer->scan_mask);
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_query);
+
+/**
+ * struct iio_demux_table() - table describing demux memcpy ops
+ * @from: index to copy from
+ * @to: index to copy to
+ * @length: how many bytes to copy
+ * @l: list head used for management
+ */
+struct iio_demux_table {
+ unsigned from;
+ unsigned to;
+ unsigned length;
+ struct list_head l;
+};
+
+static const void *iio_demux(struct iio_buffer *buffer,
+ const void *datain)
+{
+ struct iio_demux_table *t;
+
+ if (list_empty(&buffer->demux_list))
+ return datain;
+ list_for_each_entry(t, &buffer->demux_list, l)
+ memcpy(buffer->demux_bounce + t->to,
+ datain + t->from, t->length);
+
+ return buffer->demux_bounce;
+}
+
+static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
+{
+ const void *dataout = iio_demux(buffer, data);
+
+ return buffer->access->store_to(buffer, dataout);
+}
+
+static int iio_push_to_buffer_n(struct iio_buffer *buffer, const void *data, int n)
+{
+ const void *dataout = iio_demux(buffer, data);
+
+ return buffer->access->store_to_buffer(buffer, dataout, n);
+}
+
+static void iio_buffer_demux_free(struct iio_buffer *buffer)
+{
+ struct iio_demux_table *p, *q;
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+}
+
+
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
+{
+ int ret;
+ struct iio_buffer *buf;
+
+ list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
+ ret = iio_push_to_buffer(buf, data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffers);
+
+int iio_push_to_buffers_n(struct iio_dev *indio_dev, const void *data, int n)
+{
+ int ret;
+ struct iio_buffer *buf;
+
+ list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
+ ret = iio_push_to_buffer_n(buf, data, n);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffers_n);
+
+static int iio_buffer_add_demux(struct iio_buffer *buffer,
+ struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
+ unsigned int length)
+{
+
+ if (*p && (*p)->from + (*p)->length == in_loc &&
+ (*p)->to + (*p)->length == out_loc) {
+ (*p)->length += length;
+ } else {
+ *p = kmalloc(sizeof(**p), GFP_KERNEL);
+ if (*p == NULL)
+ return -ENOMEM;
+ (*p)->from = in_loc;
+ (*p)->to = out_loc;
+ (*p)->length = length;
+ list_add_tail(&(*p)->l, &buffer->demux_list);
+ }
+
+ return 0;
+}
+
+static int iio_buffer_update_demux(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ const struct iio_chan_spec *ch;
+ int ret, in_ind = -1, out_ind, length;
+ unsigned in_loc = 0, out_loc = 0;
+ struct iio_demux_table *p = NULL;
+
+ /* Clear out any old demux */
+ iio_buffer_demux_free(buffer);
+ kfree(buffer->demux_bounce);
+ buffer->demux_bounce = NULL;
+
+ /* First work out which scan mode we will actually have */
+ if (bitmap_equal(indio_dev->active_scan_mask,
+ buffer->scan_mask,
+ indio_dev->masklength))
+ return 0;
+
+ /* Now we have the two masks, work from least sig and build up sizes */
+ for_each_set_bit(out_ind,
+ buffer->scan_mask,
+ indio_dev->masklength) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ while (in_ind != out_ind) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
+ /* Make sure we are aligned */
+ in_loc = roundup(in_loc, length) + length;
+ }
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
+ out_loc = roundup(out_loc, length);
+ in_loc = roundup(in_loc, length);
+ ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
+ if (ret)
+ goto error_clear_mux_table;
+ out_loc += length;
+ in_loc += length;
+ }
+ /* Relies on scan_timestamp being last */
+ if (buffer->scan_timestamp) {
+ ch = iio_find_channel_from_si(indio_dev,
+ indio_dev->scan_index_timestamp);
+ if (ch->scan_type.repeat > 1)
+ length = ch->scan_type.storagebits / 8 *
+ ch->scan_type.repeat;
+ else
+ length = ch->scan_type.storagebits / 8;
+ out_loc = roundup(out_loc, length);
+ in_loc = roundup(in_loc, length);
+ ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
+ if (ret)
+ goto error_clear_mux_table;
+ out_loc += length;
+ in_loc += length;
+ }
+ buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
+ if (buffer->demux_bounce == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ iio_buffer_demux_free(buffer);
+
+ return ret;
+}
+
+int iio_update_demux(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ ret = iio_buffer_update_demux(indio_dev, buffer);
+ if (ret < 0)
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ iio_buffer_demux_free(buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_demux);
+
+/**
+ * iio_buffer_release() - Free a buffer's resources
+ * @ref: Pointer to the kref embedded in the iio_buffer struct
+ *
+ * This function is called when the last reference to the buffer has been
+ * dropped. It will typically free all resources allocated by the buffer. Do not
+ * call this function manually, always use iio_buffer_put() when done using a
+ * buffer.
+ */
+static void iio_buffer_release(struct kref *ref)
+{
+ struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
+
+ buffer->access->release(buffer);
+}
+
+/**
+ * iio_buffer_get() - Grab a reference to the buffer
+ * @buffer: The buffer to grab a reference for, may be NULL
+ *
+ * Returns the pointer to the buffer that was passed into the function.
+ */
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_get(&buffer->ref);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(iio_buffer_get);
+
+/**
+ * iio_buffer_put() - Release the reference to the buffer
+ * @buffer: The buffer to release the reference for, may be NULL
+ */
+void iio_buffer_put(struct iio_buffer *buffer)
+{
+ if (buffer)
+ kref_put(&buffer->ref, iio_buffer_release);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_put);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
new file mode 100644
index 00000000000000..12a651b4829dfe
--- /dev/null
+++ b/drivers/iio/industrialio-core.c
@@ -0,0 +1,1284 @@
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#define pr_fmt(fmt) "iio-core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
+#include <linux/iio/iio.h>
+#include "iio_core.h"
+#include "iio_core_trigger.h"
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+
+/* IDA to assign each registered device a unique id */
+static DEFINE_IDA(iio_ida);
+
+static dev_t iio_devt;
+
+#define IIO_DEV_MAX 256
+struct bus_type iio_bus_type = {
+ .name = "iio",
+};
+EXPORT_SYMBOL(iio_bus_type);
+
+static struct dentry *iio_debugfs_dentry;
+
+static const char * const iio_direction[] = {
+ [0] = "in",
+ [1] = "out",
+};
+
+static const char * const iio_chan_type_name_spec[] = {
+ [IIO_VOLTAGE] = "voltage",
+ [IIO_CURRENT] = "current",
+ [IIO_POWER] = "power",
+ [IIO_ACCEL] = "accel",
+ [IIO_ANGL_VEL] = "anglvel",
+ [IIO_MAGN] = "magn",
+ [IIO_LIGHT] = "illuminance",
+ [IIO_INTENSITY] = "intensity",
+ [IIO_PROXIMITY] = "proximity",
+ [IIO_TEMP] = "temp",
+ [IIO_INCLI] = "incli",
+ [IIO_ROT] = "rot",
+ [IIO_ANGL] = "angl",
+ [IIO_TIMESTAMP] = "timestamp",
+ [IIO_CAPACITANCE] = "capacitance",
+ [IIO_ALTVOLTAGE] = "altvoltage",
+ [IIO_CCT] = "cct",
+ [IIO_PRESSURE] = "pressure",
+ [IIO_HUMIDITYRELATIVE] = "humidityrelative",
+};
+
+static const char * const iio_modifier_names[] = {
+ [IIO_MOD_X] = "x",
+ [IIO_MOD_Y] = "y",
+ [IIO_MOD_Z] = "z",
+ [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
+ [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
+ [IIO_MOD_LIGHT_BOTH] = "both",
+ [IIO_MOD_LIGHT_IR] = "ir",
+ [IIO_MOD_LIGHT_CLEAR] = "clear",
+ [IIO_MOD_LIGHT_RED] = "red",
+ [IIO_MOD_LIGHT_GREEN] = "green",
+ [IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_QUATERNION] = "quaternion",
+ [IIO_MOD_TEMP_AMBIENT] = "ambient",
+ [IIO_MOD_TEMP_OBJECT] = "object",
+ [IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
+ [IIO_MOD_NORTH_TRUE] = "from_north_true",
+ [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
+ [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
+};
+
+/* relies on pairs of these shared then separate */
+static const char * const iio_chan_info_postfix[] = {
+ [IIO_CHAN_INFO_RAW] = "raw",
+ [IIO_CHAN_INFO_PROCESSED] = "input",
+ [IIO_CHAN_INFO_SCALE] = "scale",
+ [IIO_CHAN_INFO_OFFSET] = "offset",
+ [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
+ [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
+ [IIO_CHAN_INFO_PEAK] = "peak_raw",
+ [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
+ [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
+ [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
+ [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
+ = "filter_low_pass_3db_frequency",
+ [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
+ [IIO_CHAN_INFO_FREQUENCY] = "frequency",
+ [IIO_CHAN_INFO_PHASE] = "phase",
+ [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
+ [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
+ [IIO_CHAN_INFO_INT_TIME] = "integration_time",
+};
+
+/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == si)
+ return &indio_dev->channels[i];
+ return NULL;
+}
+
+/* This turns up an awful lot */
+ssize_t iio_read_const_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
+}
+EXPORT_SYMBOL(iio_read_const_attr);
+
+static int __init iio_init(void)
+{
+ int ret;
+
+ /* Register sysfs bus */
+ ret = bus_register(&iio_bus_type);
+ if (ret < 0) {
+ pr_err("could not register bus type\n");
+ goto error_nothing;
+ }
+
+ ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
+ if (ret < 0) {
+ pr_err("failed to allocate char dev region\n");
+ goto error_unregister_bus_type;
+ }
+
+ iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
+
+ return 0;
+
+error_unregister_bus_type:
+ bus_unregister(&iio_bus_type);
+error_nothing:
+ return ret;
+}
+
+static void __exit iio_exit(void)
+{
+ if (iio_devt)
+ unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
+ bus_unregister(&iio_bus_type);
+ debugfs_remove(iio_debugfs_dentry);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct iio_dev *indio_dev = file->private_data;
+ char buf[20];
+ unsigned val = 0;
+ ssize_t len;
+ int ret;
+
+ ret = indio_dev->info->debugfs_reg_access(indio_dev,
+ indio_dev->cached_reg_addr,
+ 0, &val);
+ if (ret)
+ dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+
+ len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static ssize_t iio_debugfs_write_reg(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct iio_dev *indio_dev = file->private_data;
+ unsigned reg, val;
+ char buf[80];
+ int ret;
+
+ count = min_t(size_t, count, (sizeof(buf)-1));
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ ret = sscanf(buf, "%i %i", &reg, &val);
+
+ switch (ret) {
+ case 1:
+ indio_dev->cached_reg_addr = reg;
+ break;
+ case 2:
+ indio_dev->cached_reg_addr = reg;
+ ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
+ val, NULL);
+ if (ret) {
+ dev_err(indio_dev->dev.parent, "%s: write failed\n",
+ __func__);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations iio_debugfs_reg_fops = {
+ .open = simple_open,
+ .read = iio_debugfs_read_reg,
+ .write = iio_debugfs_write_reg,
+};
+
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
+ debugfs_remove_recursive(indio_dev->debugfs_dentry);
+}
+
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ struct dentry *d;
+
+ if (indio_dev->info->debugfs_reg_access == NULL)
+ return 0;
+
+ if (!iio_debugfs_dentry)
+ return 0;
+
+ indio_dev->debugfs_dentry =
+ debugfs_create_dir(dev_name(&indio_dev->dev),
+ iio_debugfs_dentry);
+ if (indio_dev->debugfs_dentry == NULL) {
+ dev_warn(indio_dev->dev.parent,
+ "Failed to create debugfs directory\n");
+ return -EFAULT;
+ }
+
+ d = debugfs_create_file("direct_reg_access", 0644,
+ indio_dev->debugfs_dentry,
+ indio_dev, &iio_debugfs_reg_fops);
+ if (!d) {
+ iio_device_unregister_debugfs(indio_dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#else
+static int iio_device_register_debugfs(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static ssize_t iio_read_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
+}
+
+static ssize_t iio_write_channel_ext_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = &this_attr->c->ext_info[this_attr->address];
+
+ return ext_info->write(indio_dev, ext_info->private,
+ this_attr->c, buf, len);
+}
+
+ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct iio_enum *e = (const struct iio_enum *)priv;
+ unsigned int i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; ++i)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s ", e->items[i]);
+
+ /* replace last space with a newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iio_enum_available_read);
+
+ssize_t iio_enum_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct iio_enum *e = (const struct iio_enum *)priv;
+ int i;
+
+ if (!e->get)
+ return -EINVAL;
+
+ i = e->get(indio_dev, chan);
+ if (i < 0)
+ return i;
+ else if (i >= e->num_items)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", e->items[i]);
+}
+EXPORT_SYMBOL_GPL(iio_enum_read);
+
+ssize_t iio_enum_write(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ const struct iio_enum *e = (const struct iio_enum *)priv;
+ unsigned int i;
+ int ret;
+
+ if (!e->set)
+ return -EINVAL;
+
+ for (i = 0; i < e->num_items; i++) {
+ if (sysfs_streq(buf, e->items[i]))
+ break;
+ }
+
+ if (i == e->num_items)
+ return -EINVAL;
+
+ ret = e->set(indio_dev, chan, i);
+ return ret ? ret : len;
+}
+EXPORT_SYMBOL_GPL(iio_enum_write);
+
+/**
+ * iio_format_value() - Formats a IIO value into its string representation
+ * @buf: The buffer to which the formated value gets written
+ * @type: One of the IIO_VAL_... constants. This decides how the val and val2
+ * parameters are formatted.
+ * @vals: pointer to the values, exact meaning depends on the type parameter.
+ */
+ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+{
+ unsigned long long tmp;
+ bool scale_db = false;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ return sprintf(buf, "%d\n", vals[0]);
+ case IIO_VAL_INT_PLUS_MICRO_DB:
+ scale_db = true;
+ case IIO_VAL_INT_PLUS_MICRO:
+ if (vals[1] < 0)
+ return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
+ -vals[1],
+ scale_db ? " dB" : "");
+ else
+ return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
+ scale_db ? " dB" : "");
+ case IIO_VAL_INT_PLUS_NANO:
+ if (vals[1] < 0)
+ return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
+ -vals[1]);
+ else
+ return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ case IIO_VAL_FRACTIONAL:
+ tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
+ vals[1] = do_div(tmp, 1000000000LL);
+ vals[0] = tmp;
+ return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = (s64)vals[0] * 1000000000LL >> vals[1];
+ vals[1] = do_div(tmp, 1000000000LL);
+ vals[0] = tmp;
+ return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ case IIO_VAL_INT_MULTIPLE:
+ {
+ int i;
+ int len = 0;
+
+ for (i = 0; i < size; ++i)
+ len += snprintf(&buf[len], PAGE_SIZE - len, "%d ",
+ vals[i]);
+ len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
+ return len;
+ }
+ default:
+ return 0;
+ }
+}
+
+static ssize_t iio_read_channel_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int vals[INDIO_MAX_RAW_ELEMENTS];
+ int ret;
+ int val_len = 2;
+
+ if (indio_dev->info->read_raw_multi)
+ ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
+ INDIO_MAX_RAW_ELEMENTS,
+ vals, &val_len,
+ this_attr->address);
+ else
+ ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
+ &vals[0], &vals[1], this_attr->address);
+
+ if (ret < 0)
+ return ret;
+
+ return iio_format_value(buf, ret, val_len, vals);
+}
+
+/**
+ * iio_str_to_fixpoint() - Parse a fixed-point number from a string
+ * @str: The string to parse
+ * @fract_mult: Multiplier for the first decimal place, should be a power of 10
+ * @integer: The integer part of the number
+ * @fract: The fractional part of the number
+ *
+ * Returns 0 on success, or a negative error code if the string could not be
+ * parsed.
+ */
+int iio_str_to_fixpoint(const char *str, int fract_mult,
+ int *integer, int *fract)
+{
+ int i = 0, f = 0;
+ bool integer_part = true, negative = false;
+
+ if (str[0] == '-') {
+ negative = true;
+ str++;
+ } else if (str[0] == '+') {
+ str++;
+ }
+
+ while (*str) {
+ if ('0' <= *str && *str <= '9') {
+ if (integer_part) {
+ i = i * 10 + *str - '0';
+ } else {
+ f += fract_mult * (*str - '0');
+ fract_mult /= 10;
+ }
+ } else if (*str == '\n') {
+ if (*(str + 1) == '\0')
+ break;
+ else
+ return -EINVAL;
+ } else if (*str == '.' && integer_part) {
+ integer_part = false;
+ } else {
+ return -EINVAL;
+ }
+ str++;
+ }
+
+ if (negative) {
+ if (i)
+ i = -i;
+ else
+ f = -f;
+ }
+
+ *integer = i;
+ *fract = f;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
+
+static ssize_t iio_write_channel_info(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret, fract_mult = 100000;
+ int integer, fract;
+
+ /* Assumes decimal - precision based on number of digits */
+ if (!indio_dev->info->write_raw)
+ return -EINVAL;
+
+ if (indio_dev->info->write_raw_get_fmt)
+ switch (indio_dev->info->write_raw_get_fmt(indio_dev,
+ this_attr->c, this_attr->address)) {
+ case IIO_VAL_INT_PLUS_MICRO:
+ fract_mult = 100000;
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ fract_mult = 100000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+ if (ret)
+ return ret;
+
+ ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
+ integer, fract, this_attr->address);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static
+int __iio_device_attr_init(struct device_attribute *dev_attr,
+ const char *postfix,
+ struct iio_chan_spec const *chan,
+ ssize_t (*readfunc)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*writefunc)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len),
+ enum iio_shared_by shared_by)
+{
+ int ret = 0;
+ char *name = NULL;
+ char *full_postfix;
+ sysfs_attr_init(&dev_attr->attr);
+
+ /* Build up postfix of <extend_name>_<modifier>_postfix */
+ if (chan->modified && (shared_by == IIO_SEPARATE)) {
+ if (chan->extend_name)
+ full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_modifier_names[chan
+ ->channel2],
+ chan->extend_name,
+ postfix);
+ else
+ full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_modifier_names[chan
+ ->channel2],
+ postfix);
+ } else {
+ if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
+ full_postfix = kstrdup(postfix, GFP_KERNEL);
+ else
+ full_postfix = kasprintf(GFP_KERNEL,
+ "%s_%s",
+ chan->extend_name,
+ postfix);
+ }
+ if (full_postfix == NULL)
+ return -ENOMEM;
+
+ if (chan->differential) { /* Differential can not have modifier */
+ switch (shared_by) {
+ case IIO_SHARED_BY_ALL:
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ break;
+ case IIO_SHARED_BY_DIR:
+ name = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_direction[chan->output],
+ full_postfix);
+ break;
+ case IIO_SHARED_BY_TYPE:
+ name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ break;
+ case IIO_SEPARATE:
+ if (!chan->indexed) {
+ WARN_ON("Differential channels must be indexed\n");
+ ret = -EINVAL;
+ goto error_free_full_postfix;
+ }
+ name = kasprintf(GFP_KERNEL,
+ "%s_%s%d-%s%d_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ chan->channel,
+ iio_chan_type_name_spec[chan->type],
+ chan->channel2,
+ full_postfix);
+ break;
+ }
+ } else { /* Single ended */
+ switch (shared_by) {
+ case IIO_SHARED_BY_ALL:
+ name = kasprintf(GFP_KERNEL, "%s", full_postfix);
+ break;
+ case IIO_SHARED_BY_DIR:
+ name = kasprintf(GFP_KERNEL, "%s_%s",
+ iio_direction[chan->output],
+ full_postfix);
+ break;
+ case IIO_SHARED_BY_TYPE:
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ break;
+
+ case IIO_SEPARATE:
+ if (chan->indexed)
+ name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ chan->channel,
+ full_postfix);
+ else
+ name = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_direction[chan->output],
+ iio_chan_type_name_spec[chan->type],
+ full_postfix);
+ break;
+ }
+ }
+ if (name == NULL) {
+ ret = -ENOMEM;
+ goto error_free_full_postfix;
+ }
+ dev_attr->attr.name = name;
+
+ if (readfunc) {
+ dev_attr->attr.mode |= S_IRUGO;
+ dev_attr->show = readfunc;
+ }
+
+ if (writefunc) {
+ dev_attr->attr.mode |= S_IWUSR;
+ dev_attr->store = writefunc;
+ }
+
+error_free_full_postfix:
+ kfree(full_postfix);
+
+ return ret;
+}
+
+static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
+{
+ kfree(dev_attr->attr.name);
+}
+
+int __iio_add_chan_devattr(const char *postfix,
+ struct iio_chan_spec const *chan,
+ ssize_t (*readfunc)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*writefunc)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len),
+ u64 mask,
+ enum iio_shared_by shared_by,
+ struct device *dev,
+ struct list_head *attr_list)
+{
+ int ret;
+ struct iio_dev_attr *iio_attr, *t;
+
+ iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
+ if (iio_attr == NULL)
+ return -ENOMEM;
+ ret = __iio_device_attr_init(&iio_attr->dev_attr,
+ postfix, chan,
+ readfunc, writefunc, shared_by);
+ if (ret)
+ goto error_iio_dev_attr_free;
+ iio_attr->c = chan;
+ iio_attr->address = mask;
+ list_for_each_entry(t, attr_list, l)
+ if (strcmp(t->dev_attr.attr.name,
+ iio_attr->dev_attr.attr.name) == 0) {
+ if (shared_by == IIO_SEPARATE)
+ dev_err(dev, "tried to double register : %s\n",
+ t->dev_attr.attr.name);
+ ret = -EBUSY;
+ goto error_device_attr_deinit;
+ }
+ list_add(&iio_attr->l, attr_list);
+
+ return 0;
+
+error_device_attr_deinit:
+ __iio_device_attr_deinit(&iio_attr->dev_attr);
+error_iio_dev_attr_free:
+ kfree(iio_attr);
+ return ret;
+}
+
+static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_shared_by shared_by,
+ const long *infomask)
+{
+ int i, ret, attrcount = 0;
+
+ for_each_set_bit(i, infomask, sizeof(infomask)*8) {
+ if (i >= ARRAY_SIZE(iio_chan_info_postfix))
+ return -EINVAL;
+ ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
+ chan,
+ &iio_read_channel_info,
+ &iio_write_channel_info,
+ i,
+ shared_by,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+ else if (ret < 0)
+ return ret;
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
+static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret, attrcount = 0;
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ if (chan->channel < 0)
+ return 0;
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SEPARATE,
+ &chan->info_mask_separate);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_TYPE,
+ &chan->info_mask_shared_by_type);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_DIR,
+ &chan->info_mask_shared_by_dir);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_info_mask_type(indio_dev, chan,
+ IIO_SHARED_BY_ALL,
+ &chan->info_mask_shared_by_all);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ if (chan->ext_info) {
+ unsigned int i = 0;
+ for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
+ ret = __iio_add_chan_devattr(ext_info->name,
+ chan,
+ ext_info->read ?
+ &iio_read_channel_ext_info : NULL,
+ ext_info->write ?
+ &iio_write_channel_ext_info : NULL,
+ i,
+ ext_info->shared,
+ &indio_dev->dev,
+ &indio_dev->channel_attr_list);
+ i++;
+ if (ret == -EBUSY && ext_info->shared)
+ continue;
+
+ if (ret)
+ return ret;
+
+ attrcount++;
+ }
+ }
+
+ return attrcount;
+}
+
+/**
+ * iio_free_chan_devattr_list() - Free a list of IIO device attributes
+ * @attr_list: List of IIO device attributes
+ *
+ * This function frees the memory allocated for each of the IIO device
+ * attributes in the list. Note: if you want to reuse the list after calling
+ * this function you have to reinitialize it using INIT_LIST_HEAD().
+ */
+void iio_free_chan_devattr_list(struct list_head *attr_list)
+{
+ struct iio_dev_attr *p, *n;
+
+ list_for_each_entry_safe(p, n, attr_list, l) {
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+ }
+}
+
+static ssize_t iio_show_dev_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+
+static int iio_device_register_sysfs(struct iio_dev *indio_dev)
+{
+ int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+
+ /* First count elements in any existing group */
+ if (indio_dev->info->attrs) {
+ attr = indio_dev->info->attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ /*
+ * New channel registration method - relies on the fact a group does
+ * not need to be initialized if its name is NULL.
+ */
+ if (indio_dev->channels)
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = iio_device_add_channel_sysfs(indio_dev,
+ &indio_dev
+ ->channels[i]);
+ if (ret < 0)
+ goto error_clear_attrs;
+ attrcount += ret;
+ }
+
+ if (indio_dev->name)
+ attrcount++;
+
+ indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->chan_attr_group.attrs[0]),
+ GFP_KERNEL);
+ if (indio_dev->chan_attr_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_attrs;
+ }
+ /* Copy across original attributes */
+ if (indio_dev->info->attrs)
+ memcpy(indio_dev->chan_attr_group.attrs,
+ indio_dev->info->attrs->attrs,
+ sizeof(indio_dev->chan_attr_group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p, &indio_dev->channel_attr_list, l)
+ indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
+ if (indio_dev->name)
+ indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->chan_attr_group;
+
+ return 0;
+
+error_clear_attrs:
+ iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
+
+ return ret;
+}
+
+static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
+{
+
+ iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
+ kfree(indio_dev->chan_attr_group.attrs);
+}
+
+static void iio_dev_release(struct device *device)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(device);
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
+ iio_device_unregister_trigger_consumer(indio_dev);
+ iio_device_unregister_eventset(indio_dev);
+ iio_device_unregister_sysfs(indio_dev);
+
+ iio_buffer_put(indio_dev->buffer);
+
+ ida_simple_remove(&iio_ida, indio_dev->id);
+ kfree(indio_dev);
+}
+
+struct device_type iio_device_type = {
+ .name = "iio_device",
+ .release = iio_dev_release,
+};
+
+/**
+ * iio_device_alloc() - allocate an iio_dev from a driver
+ * @sizeof_priv: Space to allocate for private structure.
+ **/
+struct iio_dev *iio_device_alloc(int sizeof_priv)
+{
+ struct iio_dev *dev;
+ size_t alloc_size;
+
+ alloc_size = sizeof(struct iio_dev);
+ if (sizeof_priv) {
+ alloc_size = ALIGN(alloc_size, IIO_ALIGN);
+ alloc_size += sizeof_priv;
+ }
+ /* ensure 32-byte alignment of whole construct ? */
+ alloc_size += IIO_ALIGN - 1;
+
+ dev = kzalloc(alloc_size, GFP_KERNEL);
+
+ if (dev) {
+ dev->dev.groups = dev->groups;
+ dev->dev.type = &iio_device_type;
+ dev->dev.bus = &iio_bus_type;
+ device_initialize(&dev->dev);
+ dev_set_drvdata(&dev->dev, (void *)dev);
+ mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
+
+ dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (dev->id < 0) {
+ /* cannot use a dev_err as the name isn't available */
+ pr_err("failed to get device id\n");
+ kfree(dev);
+ return NULL;
+ }
+ dev_set_name(&dev->dev, "iio:device%d", dev->id);
+ INIT_LIST_HEAD(&dev->buffer_list);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(iio_device_alloc);
+
+/**
+ * iio_device_free() - free an iio_dev from a driver
+ * @dev: the iio_dev associated with the device
+ **/
+void iio_device_free(struct iio_dev *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+EXPORT_SYMBOL(iio_device_free);
+
+static void devm_iio_device_release(struct device *dev, void *res)
+{
+ iio_device_free(*(struct iio_dev **)res);
+}
+
+static int devm_iio_device_match(struct device *dev, void *res, void *data)
+{
+ struct iio_dev **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_device_alloc - Resource-managed iio_device_alloc()
+ * @dev: Device to allocate iio_dev for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed iio_device_alloc. iio_dev allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_dev allocated with this function needs to be freed separately,
+ * devm_iio_device_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_dev on success, NULL on failure.
+ */
+struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
+{
+ struct iio_dev **ptr, *iio_dev;
+
+ ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ iio_dev = iio_device_alloc(sizeof_priv);
+ if (iio_dev) {
+ *ptr = iio_dev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return iio_dev;
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
+
+/**
+ * devm_iio_device_free - Resource-managed iio_device_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_dev: the iio_dev associated with the device
+ *
+ * Free iio_dev allocated with devm_iio_device_alloc().
+ */
+void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_device_release,
+ devm_iio_device_match, iio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_free);
+
+/**
+ * iio_chrdev_open() - chrdev file open for buffer access and ioctls
+ **/
+static int iio_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
+ return -EBUSY;
+
+ iio_device_get(indio_dev);
+
+ filp->private_data = indio_dev;
+
+ return 0;
+}
+
+/**
+ * iio_chrdev_release() - chrdev file close buffer access and ioctls
+ **/
+static int iio_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+ clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+ iio_device_put(indio_dev);
+
+ return 0;
+}
+
+/* Somewhat of a cross file organization violation - ioctls here are actually
+ * event related */
+static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ int __user *ip = (int __user *)arg;
+ int fd;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ if (cmd == IIO_GET_EVENT_FD_IOCTL) {
+ fd = iio_event_getfd(indio_dev);
+ if (copy_to_user(ip, &fd, sizeof(fd)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct file_operations iio_buffer_fileops = {
+ .read = iio_buffer_read_first_n_outer_addr,
+ .release = iio_chrdev_release,
+ .open = iio_chrdev_open,
+ .poll = iio_buffer_poll_addr,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = iio_ioctl,
+ .compat_ioctl = iio_ioctl,
+};
+
+static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
+int iio_device_register(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ /* If the calling driver did not initialize of_node, do it here */
+ if (!indio_dev->dev.of_node && indio_dev->dev.parent)
+ indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
+
+ /* configure elements for the chrdev */
+ indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
+
+ ret = iio_device_register_debugfs(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to register debugfs interfaces\n");
+ return ret;
+ }
+ ret = iio_device_register_sysfs(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to register sysfs interfaces\n");
+ goto error_unreg_debugfs;
+ }
+ ret = iio_device_register_eventset(indio_dev);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to register event set\n");
+ goto error_free_sysfs;
+ }
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
+ iio_device_register_trigger_consumer(indio_dev);
+
+ if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
+ indio_dev->setup_ops == NULL)
+ indio_dev->setup_ops = &noop_ring_setup_ops;
+
+ cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
+ indio_dev->chrdev.owner = indio_dev->info->driver_module;
+ indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
+ ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
+ if (ret < 0)
+ goto error_unreg_eventset;
+
+ ret = device_add(&indio_dev->dev);
+ if (ret < 0)
+ goto error_cdev_del;
+
+ return 0;
+error_cdev_del:
+ cdev_del(&indio_dev->chrdev);
+error_unreg_eventset:
+ iio_device_unregister_eventset(indio_dev);
+error_free_sysfs:
+ iio_device_unregister_sysfs(indio_dev);
+error_unreg_debugfs:
+ iio_device_unregister_debugfs(indio_dev);
+ return ret;
+}
+EXPORT_SYMBOL(iio_device_register);
+
+/**
+ * iio_device_unregister() - unregister a device from the IIO subsystem
+ * @indio_dev: Device structure representing the device.
+ **/
+void iio_device_unregister(struct iio_dev *indio_dev)
+{
+ mutex_lock(&indio_dev->info_exist_lock);
+
+ device_del(&indio_dev->dev);
+
+ if (indio_dev->chrdev.dev)
+ cdev_del(&indio_dev->chrdev);
+ iio_device_unregister_debugfs(indio_dev);
+
+ iio_disable_all_buffers(indio_dev);
+
+ indio_dev->info = NULL;
+
+ iio_device_wakeup_eventset(indio_dev);
+ iio_buffer_wakeup_poll(indio_dev);
+
+ mutex_unlock(&indio_dev->info_exist_lock);
+}
+EXPORT_SYMBOL(iio_device_unregister);
+
+static void devm_iio_device_unreg(struct device *dev, void *res)
+{
+ iio_device_unregister(*(struct iio_dev **)res);
+}
+
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * If an iio_dev registered with this function needs to be unregistered
+ * separately, devm_iio_device_unregister() must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
+{
+ struct iio_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_iio_device_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = indio_dev;
+ ret = iio_device_register(indio_dev);
+ if (!ret)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_register);
+
+/**
+ * devm_iio_device_unregister - Resource-managed iio_device_unregister()
+ * @dev: Device this iio_dev belongs to
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Unregister iio_dev registered with devm_iio_device_register().
+ */
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_device_unreg,
+ devm_iio_device_match, indio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
+
+subsys_initcall(iio_init);
+module_exit(iio_exit);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Industrial I/O core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
new file mode 100644
index 00000000000000..7cf58f4ea8c52d
--- /dev/null
+++ b/drivers/iio/industrialio-event.c
@@ -0,0 +1,520 @@
+/* Industrial I/O event handling
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Based on elements of hwmon and input subsystems.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/iio/iio.h>
+#include "iio_core.h"
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/**
+ * struct iio_event_interface - chrdev interface for an event line
+ * @wait: wait queue to allow blocking reads of events
+ * @det_events: list of detected events
+ * @dev_attr_list: list of event interface sysfs attribute
+ * @flags: file operations related flags including busy flag.
+ * @group: event interface sysfs attribute group
+ */
+struct iio_event_interface {
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(det_events, struct iio_event_data, 16);
+
+ struct list_head dev_attr_list;
+ unsigned long flags;
+ struct attribute_group group;
+ struct mutex read_lock;
+};
+
+/**
+ * iio_push_event() - try to add event to the list for userspace reading
+ * @indio_dev: IIO device structure
+ * @ev_code: What event
+ * @timestamp: When the event occurred
+ *
+ * Note: The caller must make sure that this function is not running
+ * concurrently for the same indio_dev more than once.
+ **/
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ struct iio_event_data ev;
+ int copied;
+
+ /* Does anyone care? */
+ if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+
+ ev.id = ev_code;
+ ev.timestamp = timestamp;
+
+ copied = kfifo_put(&ev_int->det_events, ev);
+ if (copied != 0)
+ wake_up_poll(&ev_int->wait, POLLIN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_push_event);
+
+/**
+ * iio_event_poll() - poll the event queue to find out if it has data
+ */
+static unsigned int iio_event_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ unsigned int events = 0;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ poll_wait(filep, &ev_int->wait, wait);
+
+ if (!kfifo_is_empty(&ev_int->det_events))
+ events = POLLIN | POLLRDNORM;
+
+ return events;
+}
+
+static ssize_t iio_event_chrdev_read(struct file *filep,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ unsigned int copied;
+ int ret;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+
+ if (count < sizeof(struct iio_event_data))
+ return -EINVAL;
+
+ do {
+ if (kfifo_is_empty(&ev_int->det_events)) {
+ if (filep->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ev_int->wait,
+ !kfifo_is_empty(&ev_int->det_events) ||
+ indio_dev->info == NULL);
+ if (ret)
+ return ret;
+ if (indio_dev->info == NULL)
+ return -ENODEV;
+ }
+
+ if (mutex_lock_interruptible(&ev_int->read_lock))
+ return -ERESTARTSYS;
+ ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+ mutex_unlock(&ev_int->read_lock);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If we couldn't read anything from the fifo (a different
+ * thread might have been faster) we either return -EAGAIN if
+ * the file descriptor is non-blocking, otherwise we go back to
+ * sleep and wait for more data to arrive.
+ */
+ if (copied == 0 && (filep->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ } while (copied == 0);
+
+ return copied;
+}
+
+static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
+{
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+
+ iio_device_put(indio_dev);
+
+ return 0;
+}
+
+static const struct file_operations iio_event_chrdev_fileops = {
+ .read = iio_event_chrdev_read,
+ .poll = iio_event_poll,
+ .release = iio_event_chrdev_release,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+int iio_event_getfd(struct iio_dev *indio_dev)
+{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
+ return -ENODEV;
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
+ return -EBUSY;
+
+ iio_device_get(indio_dev);
+
+ fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
+ indio_dev, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ iio_device_put(indio_dev);
+ } else {
+ kfifo_reset_out(&ev_int->det_events);
+ }
+
+ return fd;
+}
+
+static const char * const iio_ev_type_text[] = {
+ [IIO_EV_TYPE_THRESH] = "thresh",
+ [IIO_EV_TYPE_MAG] = "mag",
+ [IIO_EV_TYPE_ROC] = "roc",
+ [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
+ [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
+};
+
+static const char * const iio_ev_dir_text[] = {
+ [IIO_EV_DIR_EITHER] = "either",
+ [IIO_EV_DIR_RISING] = "rising",
+ [IIO_EV_DIR_FALLING] = "falling"
+};
+
+static const char * const iio_ev_info_text[] = {
+ [IIO_EV_INFO_ENABLE] = "en",
+ [IIO_EV_INFO_VALUE] = "value",
+ [IIO_EV_INFO_HYSTERESIS] = "hysteresis",
+ [IIO_EV_INFO_PERIOD] = "period",
+};
+
+static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].dir;
+}
+
+static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
+{
+ return attr->c->event_spec[attr->address & 0xffff].type;
+}
+
+static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
+{
+ return (attr->address >> 16) & 0xffff;
+}
+
+static ssize_t iio_ev_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ bool val;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
+
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t iio_ev_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val;
+
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
+ if (val < 0)
+ return val;
+ else
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t iio_ev_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, val2, val_arr[2];
+ int ret;
+
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ &val, &val2);
+ if (ret < 0)
+ return ret;
+ val_arr[0] = val;
+ val_arr[1] = val2;
+ return iio_format_value(buf, ret, 2, val_arr);
+}
+
+static ssize_t iio_ev_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val, val2;
+ int ret;
+
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
+ if (ret)
+ return ret;
+ ret = indio_dev->info->write_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+ val, val2);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static int iio_device_add_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int spec_index,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_shared_by shared_by, const unsigned long *mask)
+{
+ ssize_t (*show)(struct device *, struct device_attribute *, char *);
+ ssize_t (*store)(struct device *, struct device_attribute *,
+ const char *, size_t);
+ unsigned int attrcount = 0;
+ unsigned int i;
+ char *postfix;
+ int ret;
+
+ for_each_set_bit(i, mask, sizeof(*mask)*8) {
+ if (i >= ARRAY_SIZE(iio_ev_info_text))
+ return -EINVAL;
+ postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
+ iio_ev_type_text[type], iio_ev_dir_text[dir],
+ iio_ev_info_text[i]);
+ if (postfix == NULL)
+ return -ENOMEM;
+
+ if (i == IIO_EV_INFO_ENABLE) {
+ show = iio_ev_state_show;
+ store = iio_ev_state_store;
+ } else {
+ show = iio_ev_value_show;
+ store = iio_ev_value_store;
+ }
+
+ ret = __iio_add_chan_devattr(postfix, chan, show, store,
+ (i << 16) | spec_index, shared_by, &indio_dev->dev,
+ &indio_dev->event_interface->dev_attr_list);
+ kfree(postfix);
+
+ if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+ continue;
+
+ if (ret)
+ return ret;
+
+ attrcount++;
+ }
+
+ return attrcount;
+}
+
+static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ int ret = 0, i, attrcount = 0;
+ enum iio_event_direction dir;
+ enum iio_event_type type;
+
+ for (i = 0; i < chan->num_event_specs; i++) {
+ type = chan->event_spec[i].type;
+ dir = chan->event_spec[i].dir;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SEPARATE, &chan->event_spec[i].mask_separate);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_TYPE,
+ &chan->event_spec[i].mask_shared_by_type);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_DIR,
+ &chan->event_spec[i].mask_shared_by_dir);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+
+ ret = iio_device_add_event(indio_dev, chan, i, type, dir,
+ IIO_SHARED_BY_ALL,
+ &chan->event_spec[i].mask_shared_by_all);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+ }
+ ret = attrcount;
+ return ret;
+}
+
+static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
+{
+ int j, ret, attrcount = 0;
+
+ /* Dynically created from the channels array */
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ ret = iio_device_add_event_sysfs(indio_dev,
+ &indio_dev->channels[j]);
+ if (ret < 0)
+ return ret;
+ attrcount += ret;
+ }
+ return attrcount;
+}
+
+static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
+{
+ int j;
+
+ for (j = 0; j < indio_dev->num_channels; j++) {
+ if (indio_dev->channels[j].num_event_specs != 0)
+ return true;
+ }
+ return false;
+}
+
+static void iio_setup_ev_int(struct iio_event_interface *ev_int)
+{
+ INIT_KFIFO(ev_int->det_events);
+ init_waitqueue_head(&ev_int->wait);
+ mutex_init(&ev_int->read_lock);
+}
+
+static const char *iio_event_group_name = "events";
+int iio_device_register_eventset(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p;
+ int ret = 0, attrcount_orig = 0, attrcount, attrn;
+ struct attribute **attr;
+
+ if (!(indio_dev->info->event_attrs ||
+ iio_check_for_dynamic_events(indio_dev)))
+ return 0;
+
+ indio_dev->event_interface =
+ kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (indio_dev->event_interface == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);
+
+ iio_setup_ev_int(indio_dev->event_interface);
+ if (indio_dev->info->event_attrs != NULL) {
+ attr = indio_dev->info->event_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ if (indio_dev->channels) {
+ ret = __iio_add_event_config_attrs(indio_dev);
+ if (ret < 0)
+ goto error_free_setup_event_lines;
+ attrcount += ret;
+ }
+
+ indio_dev->event_interface->group.name = iio_event_group_name;
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
+ if (indio_dev->event_interface->group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_setup_event_lines;
+ }
+ if (indio_dev->info->event_attrs)
+ memcpy(indio_dev->event_interface->group.attrs,
+ indio_dev->info->event_attrs->attrs,
+ sizeof(indio_dev->event_interface->group.attrs[0])
+ *attrcount_orig);
+ attrn = attrcount_orig;
+ /* Add all elements from the list. */
+ list_for_each_entry(p,
+ &indio_dev->event_interface->dev_attr_list,
+ l)
+ indio_dev->event_interface->group.attrs[attrn++] =
+ &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &indio_dev->event_interface->group;
+
+ return 0;
+
+error_free_setup_event_lines:
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
+ kfree(indio_dev->event_interface);
+ return ret;
+}
+
+/**
+ * iio_device_wakeup_eventset - Wakes up the event waitqueue
+ * @indio_dev: The IIO device
+ *
+ * Wakes up the event waitqueue used for poll() and blocking read().
+ * Should usually be called when the device is unregistered.
+ */
+void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ wake_up(&indio_dev->event_interface->wait);
+}
+
+void iio_device_unregister_eventset(struct iio_dev *indio_dev)
+{
+ if (indio_dev->event_interface == NULL)
+ return;
+ iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
+ kfree(indio_dev->event_interface->group.attrs);
+ kfree(indio_dev->event_interface);
+}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
new file mode 100644
index 00000000000000..d31098e0c43f48
--- /dev/null
+++ b/drivers/iio/industrialio-trigger.c
@@ -0,0 +1,583 @@
+/* The industrial I/O core, trigger handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include "iio_core.h"
+#include "iio_core_trigger.h"
+#include <linux/iio/trigger_consumer.h>
+
+/* RFC - Question of approach
+ * Make the common case (single sensor single trigger)
+ * simple by starting trigger capture from when first sensors
+ * is added.
+ *
+ * Complex simultaneous start requires use of 'hold' functionality
+ * of the trigger. (not implemented)
+ *
+ * Any other suggestions?
+ */
+
+static DEFINE_IDA(iio_trigger_ida);
+
+/* Single list of all available triggers */
+static LIST_HEAD(iio_trigger_list);
+static DEFINE_MUTEX(iio_trigger_list_lock);
+
+/**
+ * iio_trigger_read_name() - retrieve useful identifying name
+ **/
+static ssize_t iio_trigger_read_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ return sprintf(buf, "%s\n", trig->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *iio_trig_dev_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(iio_trig_dev);
+
+int iio_trigger_register(struct iio_trigger *trig_info)
+{
+ int ret;
+
+ trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
+ if (trig_info->id < 0)
+ return trig_info->id;
+
+ /* Set the name used for the sysfs directory etc */
+ dev_set_name(&trig_info->dev, "trigger%ld",
+ (unsigned long) trig_info->id);
+
+ ret = device_add(&trig_info->dev);
+ if (ret)
+ goto error_unregister_id;
+
+ /* Add to list of available triggers held by the IIO core */
+ mutex_lock(&iio_trigger_list_lock);
+ list_add_tail(&trig_info->list, &iio_trigger_list);
+ mutex_unlock(&iio_trigger_list_lock);
+
+ return 0;
+
+error_unregister_id:
+ ida_simple_remove(&iio_trigger_ida, trig_info->id);
+ return ret;
+}
+EXPORT_SYMBOL(iio_trigger_register);
+
+void iio_trigger_unregister(struct iio_trigger *trig_info)
+{
+ mutex_lock(&iio_trigger_list_lock);
+ list_del(&trig_info->list);
+ mutex_unlock(&iio_trigger_list_lock);
+
+ ida_simple_remove(&iio_trigger_ida, trig_info->id);
+ /* Possible issue in here */
+ device_del(&trig_info->dev);
+}
+EXPORT_SYMBOL(iio_trigger_unregister);
+
+static struct iio_trigger *iio_trigger_find_by_name(const char *name,
+ size_t len)
+{
+ struct iio_trigger *trig = NULL, *iter;
+
+ mutex_lock(&iio_trigger_list_lock);
+ list_for_each_entry(iter, &iio_trigger_list, list)
+ if (sysfs_streq(iter->name, name)) {
+ trig = iter;
+ break;
+ }
+ mutex_unlock(&iio_trigger_list_lock);
+
+ return trig;
+}
+
+void iio_trigger_poll(struct iio_trigger *trig)
+{
+ int i;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
+ generic_handle_irq(trig->subirq_base + i);
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
+}
+EXPORT_SYMBOL(iio_trigger_poll);
+
+irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
+{
+ iio_trigger_poll(private);
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
+
+void iio_trigger_poll_chained(struct iio_trigger *trig)
+{
+ int i;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
+ handle_nested_irq(trig->subirq_base + i);
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
+}
+EXPORT_SYMBOL(iio_trigger_poll_chained);
+
+void iio_trigger_notify_done(struct iio_trigger *trig)
+{
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
+ if (trig->ops->try_reenable(trig))
+ /* Missed an interrupt so launch new poll now */
+ iio_trigger_poll(trig);
+}
+EXPORT_SYMBOL(iio_trigger_notify_done);
+
+/* Trigger Consumer related functions */
+static int iio_trigger_get_irq(struct iio_trigger *trig)
+{
+ int ret;
+ mutex_lock(&trig->pool_lock);
+ ret = bitmap_find_free_region(trig->pool,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER,
+ ilog2(1));
+ mutex_unlock(&trig->pool_lock);
+ if (ret >= 0)
+ ret += trig->subirq_base;
+
+ return ret;
+}
+
+static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
+{
+ mutex_lock(&trig->pool_lock);
+ clear_bit(irq - trig->subirq_base, trig->pool);
+ mutex_unlock(&trig->pool_lock);
+}
+
+/* Complexity in here. With certain triggers (datardy) an acknowledgement
+ * may be needed if the pollfuncs do not include the data read for the
+ * triggering device.
+ * This is not currently handled. Alternative of not enabling trigger unless
+ * the relevant function is in there may be the best option.
+ */
+/* Worth protecting against double additions? */
+static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
+{
+ int ret = 0;
+ bool notinuse
+ = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ /* Prevent the module from being removed whilst attached to a trigger */
+ __module_get(pf->indio_dev->info->driver_module);
+ pf->irq = iio_trigger_get_irq(trig);
+ ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
+ pf->type, pf->name,
+ pf);
+ if (ret < 0) {
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
+ }
+
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ ret = trig->ops->set_trigger_state(trig, true);
+ if (ret < 0)
+ module_put(pf->indio_dev->info->driver_module);
+ }
+
+ return ret;
+}
+
+static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
+ struct iio_poll_func *pf)
+{
+ int ret = 0;
+ bool no_other_users
+ = (bitmap_weight(trig->pool,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER)
+ == 1);
+ if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
+ ret = trig->ops->set_trigger_state(trig, false);
+ if (ret)
+ return ret;
+ }
+ iio_trigger_put_irq(trig, pf->irq);
+ free_irq(pf->irq, pf);
+ module_put(pf->indio_dev->info->driver_module);
+
+ return ret;
+}
+
+irqreturn_t iio_pollfunc_store_time(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ pf->timestamp = iio_get_time_ns();
+ return IRQ_WAKE_THREAD;
+}
+EXPORT_SYMBOL(iio_pollfunc_store_time);
+
+struct iio_poll_func
+*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ int type,
+ struct iio_dev *indio_dev,
+ const char *fmt,
+ ...)
+{
+ va_list vargs;
+ struct iio_poll_func *pf;
+
+ pf = kmalloc(sizeof *pf, GFP_KERNEL);
+ if (pf == NULL)
+ return NULL;
+ va_start(vargs, fmt);
+ pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ va_end(vargs);
+ if (pf->name == NULL) {
+ kfree(pf);
+ return NULL;
+ }
+ pf->h = h;
+ pf->thread = thread;
+ pf->type = type;
+ pf->indio_dev = indio_dev;
+
+ return pf;
+}
+EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
+
+void iio_dealloc_pollfunc(struct iio_poll_func *pf)
+{
+ kfree(pf->name);
+ kfree(pf);
+}
+EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
+
+/**
+ * iio_trigger_read_current() - trigger consumer sysfs query current trigger
+ *
+ * For trigger consumers the current_trigger interface allows the trigger
+ * used by the device to be queried.
+ **/
+static ssize_t iio_trigger_read_current(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
+ if (indio_dev->trig)
+ return sprintf(buf, "%s\n", indio_dev->trig->name);
+ return 0;
+}
+
+/**
+ * iio_trigger_write_current() - trigger consumer sysfs set current trigger
+ *
+ * For trigger consumers the current_trigger interface allows the trigger
+ * used for this device to be specified at run time based on the trigger's
+ * name.
+ **/
+static ssize_t iio_trigger_write_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_trigger *oldtrig = indio_dev->trig;
+ struct iio_trigger *trig;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ trig = iio_trigger_find_by_name(buf, len);
+ if (oldtrig == trig)
+ return len;
+
+ if (trig && indio_dev->info->validate_trigger) {
+ ret = indio_dev->info->validate_trigger(indio_dev, trig);
+ if (ret)
+ return ret;
+ }
+
+ if (trig && trig->ops && trig->ops->validate_device) {
+ ret = trig->ops->validate_device(trig, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ indio_dev->trig = trig;
+
+ if (oldtrig)
+ iio_trigger_put(oldtrig);
+ if (indio_dev->trig)
+ iio_trigger_get(indio_dev->trig);
+
+ return len;
+}
+
+static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
+ iio_trigger_read_current,
+ iio_trigger_write_current);
+
+static struct attribute *iio_trigger_consumer_attrs[] = {
+ &dev_attr_current_trigger.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_trigger_consumer_attr_group = {
+ .name = "trigger",
+ .attrs = iio_trigger_consumer_attrs,
+};
+
+static void iio_trig_release(struct device *device)
+{
+ struct iio_trigger *trig = to_iio_trigger(device);
+ int i;
+
+ if (trig->subirq_base) {
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ irq_modify_status(trig->subirq_base + i,
+ IRQ_NOAUTOEN,
+ IRQ_NOREQUEST | IRQ_NOPROBE);
+ irq_set_chip(trig->subirq_base + i,
+ NULL);
+ irq_set_handler(trig->subirq_base + i,
+ NULL);
+ }
+
+ irq_free_descs(trig->subirq_base,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+ }
+ kfree(trig->name);
+ kfree(trig);
+}
+
+static struct device_type iio_trig_type = {
+ .release = iio_trig_release,
+ .groups = iio_trig_dev_groups,
+};
+
+static void iio_trig_subirqmask(struct irq_data *d)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct iio_trigger *trig
+ = container_of(chip,
+ struct iio_trigger, subirq_chip);
+ trig->subirqs[d->irq - trig->subirq_base].enabled = false;
+}
+
+static void iio_trig_subirqunmask(struct irq_data *d)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct iio_trigger *trig
+ = container_of(chip,
+ struct iio_trigger, subirq_chip);
+ trig->subirqs[d->irq - trig->subirq_base].enabled = true;
+}
+
+static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
+{
+ struct iio_trigger *trig;
+ trig = kzalloc(sizeof *trig, GFP_KERNEL);
+ if (trig) {
+ int i;
+ trig->dev.type = &iio_trig_type;
+ trig->dev.bus = &iio_bus_type;
+ device_initialize(&trig->dev);
+
+ mutex_init(&trig->pool_lock);
+ trig->subirq_base
+ = irq_alloc_descs(-1, 0,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER,
+ 0);
+ if (trig->subirq_base < 0) {
+ kfree(trig);
+ return NULL;
+ }
+
+ trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ if (trig->name == NULL) {
+ irq_free_descs(trig->subirq_base,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+ kfree(trig);
+ return NULL;
+ }
+ trig->subirq_chip.name = trig->name;
+ trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
+ trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ irq_set_chip(trig->subirq_base + i,
+ &trig->subirq_chip);
+ irq_set_handler(trig->subirq_base + i,
+ &handle_simple_irq);
+ irq_modify_status(trig->subirq_base + i,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN,
+ IRQ_NOPROBE);
+ }
+ get_device(&trig->dev);
+ }
+
+ return trig;
+}
+
+struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
+{
+ struct iio_trigger *trig;
+ va_list vargs;
+
+ va_start(vargs, fmt);
+ trig = viio_trigger_alloc(fmt, vargs);
+ va_end(vargs);
+
+ return trig;
+}
+EXPORT_SYMBOL(iio_trigger_alloc);
+
+void iio_trigger_free(struct iio_trigger *trig)
+{
+ if (trig)
+ put_device(&trig->dev);
+}
+EXPORT_SYMBOL(iio_trigger_free);
+
+static void devm_iio_trigger_release(struct device *dev, void *res)
+{
+ iio_trigger_free(*(struct iio_trigger **)res);
+}
+
+static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
+{
+ struct iio_trigger **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+/**
+ * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
+ * @dev: Device to allocate iio_trigger for
+ * @fmt: trigger name format. If it includes format
+ * specifiers, the additional arguments following
+ * format are formatted and inserted in the resulting
+ * string replacing their respective specifiers.
+ *
+ * Managed iio_trigger_alloc. iio_trigger allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_trigger allocated with this function needs to be freed separately,
+ * devm_iio_trigger_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_trigger on success, NULL on failure.
+ */
+struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
+ const char *fmt, ...)
+{
+ struct iio_trigger **ptr, *trig;
+ va_list vargs;
+
+ ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ va_start(vargs, fmt);
+ trig = viio_trigger_alloc(fmt, vargs);
+ va_end(vargs);
+ if (trig) {
+ *ptr = trig;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return trig;
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
+
+/**
+ * devm_iio_trigger_free - Resource-managed iio_trigger_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_trig: the iio_trigger associated with the device
+ *
+ * Free iio_trigger allocated with devm_iio_trigger_alloc().
+ */
+void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_trigger_release,
+ devm_iio_trigger_match, iio_trig);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
+
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+{
+ indio_dev->groups[indio_dev->groupcounter++] =
+ &iio_trigger_consumer_attr_group;
+}
+
+void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
+{
+ /* Clean up an associated but not attached trigger reference */
+ if (indio_dev->trig)
+ iio_trigger_put(indio_dev->trig);
+}
+
+int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
+{
+ return iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+}
+EXPORT_SYMBOL(iio_triggered_buffer_postenable);
+
+int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
+{
+ return iio_trigger_detach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+}
+EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/iio/industrialio-triggered-buffer.c b/drivers/iio/industrialio-triggered-buffer.c
new file mode 100644
index 00000000000000..d6f54930b34af4
--- /dev/null
+++ b/drivers/iio/industrialio-triggered-buffer.c
@@ -0,0 +1,112 @@
+ /*
+ * Copyright (c) 2012 Analog Devices, Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+};
+
+/**
+ * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
+ * @indio_dev: IIO device structure
+ * @pollfunc_bh: Function which will be used as pollfunc bottom half
+ * @pollfunc_th: Function which will be used as pollfunc top half
+ * @setup_ops: Buffer setup functions to use for this device.
+ * If NULL the default setup functions for triggered
+ * buffers will be used.
+ *
+ * This function combines some common tasks which will normally be performed
+ * when setting up a triggered buffer. It will allocate the buffer and the
+ * pollfunc, as well as register the buffer with the IIO core.
+ *
+ * Before calling this function the indio_dev structure should already be
+ * completely initialized, but not yet registered. In practice this means that
+ * this function should be called right before iio_device_register().
+ *
+ * To free the resources allocated by this function call
+ * iio_triggered_buffer_cleanup().
+ */
+int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*pollfunc_bh)(int irq, void *p),
+ irqreturn_t (*pollfunc_th)(int irq, void *p),
+ const struct iio_buffer_setup_ops *setup_ops)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_kfifo_allocate(indio_dev);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh,
+ pollfunc_th,
+ IRQF_ONESHOT,
+ indio_dev,
+ "%s_consumer%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_kfifo_free;
+ }
+
+ /* Ring buffer functions - here trigger setup related */
+ if (setup_ops)
+ indio_dev->setup_ops = setup_ops;
+ else
+ indio_dev->setup_ops = &iio_triggered_buffer_setup_ops;
+
+ /* Flag that polled ring buffering is possible */
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+
+ ret = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (ret)
+ goto error_dealloc_pollfunc;
+
+ return 0;
+
+error_dealloc_pollfunc:
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+error_kfifo_free:
+ iio_kfifo_free(indio_dev->buffer);
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(iio_triggered_buffer_setup);
+
+/**
+ * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup()
+ * @indio_dev: IIO device structure
+ */
+void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev)
+{
+ iio_buffer_unregister(indio_dev);
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+ iio_kfifo_free(indio_dev->buffer);
+}
+EXPORT_SYMBOL(iio_triggered_buffer_cleanup);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
new file mode 100644
index 00000000000000..90c8cb727cc700
--- /dev/null
+++ b/drivers/iio/inkern.c
@@ -0,0 +1,636 @@
+/* The industrial I/O core in kernel channel mapping
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include "iio_core.h"
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/consumer.h>
+
+struct iio_map_internal {
+ struct iio_dev *indio_dev;
+ struct iio_map *map;
+ struct list_head l;
+};
+
+static LIST_HEAD(iio_map_list);
+static DEFINE_MUTEX(iio_map_list_lock);
+
+int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
+{
+ int i = 0, ret = 0;
+ struct iio_map_internal *mapi;
+
+ if (maps == NULL)
+ return 0;
+
+ mutex_lock(&iio_map_list_lock);
+ while (maps[i].consumer_dev_name != NULL) {
+ mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
+ if (mapi == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ mapi->map = &maps[i];
+ mapi->indio_dev = indio_dev;
+ list_add(&mapi->l, &iio_map_list);
+ i++;
+ }
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_register);
+
+
+/*
+ * Remove all map entries associated with the given iio device
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev)
+{
+ int ret = -ENODEV;
+ struct iio_map_internal *mapi;
+ struct list_head *pos, *tmp;
+
+ mutex_lock(&iio_map_list_lock);
+ list_for_each_safe(pos, tmp, &iio_map_list) {
+ mapi = list_entry(pos, struct iio_map_internal, l);
+ if (indio_dev == mapi->indio_dev) {
+ list_del(&mapi->l);
+ kfree(mapi);
+ ret = 0;
+ }
+ }
+ mutex_unlock(&iio_map_list_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_map_array_unregister);
+
+static const struct iio_chan_spec
+*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
+{
+ int i;
+ const struct iio_chan_spec *chan = NULL;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].datasheet_name &&
+ strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
+ chan = &indio_dev->channels[i];
+ break;
+ }
+ return chan;
+}
+
+#ifdef CONFIG_OF
+
+static int iio_dev_node_match(struct device *dev, void *data)
+{
+ return dev->of_node == data && dev->type == &iio_device_type;
+}
+
+/**
+ * __of_iio_simple_xlate - translate iiospec to the IIO channel index
+ * @indio_dev: pointer to the iio_dev structure
+ * @iiospec: IIO specifier as found in the device tree
+ *
+ * This is simple translation function, suitable for the most 1:1 mapped
+ * channels in IIO chips. This function performs only one sanity check:
+ * whether IIO index is less than num_channels (that is specified in the
+ * iio_dev).
+ */
+static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ if (!iiospec->args_count)
+ return 0;
+
+ if (iiospec->args[0] >= indio_dev->num_channels)
+ return -EINVAL;
+
+ return iiospec->args[0];
+}
+
+static int __of_iio_channel_get(struct iio_channel *channel,
+ struct device_node *np, int index)
+{
+ struct device *idev;
+ struct iio_dev *indio_dev;
+ int err;
+ struct of_phandle_args iiospec;
+
+ err = of_parse_phandle_with_args(np, "io-channels",
+ "#io-channel-cells",
+ index, &iiospec);
+ if (err)
+ return err;
+
+ idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
+ iio_dev_node_match);
+ of_node_put(iiospec.np);
+ if (idev == NULL)
+ return -EPROBE_DEFER;
+
+ indio_dev = dev_to_iio_dev(idev);
+ channel->indio_dev = indio_dev;
+ if (indio_dev->info->of_xlate)
+ index = indio_dev->info->of_xlate(indio_dev, &iiospec);
+ else
+ index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ if (index < 0)
+ goto err_put;
+ channel->channel = &indio_dev->channels[index];
+
+ return 0;
+
+err_put:
+ iio_device_put(indio_dev);
+ return index;
+}
+
+static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
+{
+ struct iio_channel *channel;
+ int err;
+
+ if (index < 0)
+ return ERR_PTR(-EINVAL);
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (channel == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ err = __of_iio_channel_get(channel, np, index);
+ if (err)
+ goto err_free_channel;
+
+ return channel;
+
+err_free_channel:
+ kfree(channel);
+ return ERR_PTR(err);
+}
+
+static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ const char *name)
+{
+ struct iio_channel *chan = NULL;
+
+ /* Walk up the tree of devices looking for a matching iio channel */
+ while (np) {
+ int index = 0;
+
+ /*
+ * For named iio channels, first look up the name in the
+ * "io-channel-names" property. If it cannot be found, the
+ * index will be an error code, and of_iio_channel_get()
+ * will fail.
+ */
+ if (name)
+ index = of_property_match_string(np, "io-channel-names",
+ name);
+ chan = of_iio_channel_get(np, index);
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ break;
+ else if (name && index >= 0) {
+ pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
+ np->full_name, name ? name : "", index);
+ return NULL;
+ }
+
+ /*
+ * No matching IIO channel found on this node.
+ * If the parent node has a "io-channel-ranges" property,
+ * then we can try one of its channels.
+ */
+ np = np->parent;
+ if (np && !of_get_property(np, "io-channel-ranges", NULL))
+ return NULL;
+ }
+
+ return chan;
+}
+
+static struct iio_channel *of_iio_channel_get_all(struct device *dev)
+{
+ struct iio_channel *chans;
+ int i, mapind, nummaps = 0;
+ int ret;
+
+ do {
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "io-channels",
+ "#io-channel-cells",
+ nummaps, NULL);
+ if (ret < 0)
+ break;
+ } while (++nummaps);
+
+ if (nummaps == 0) /* no error, return NULL to search map table */
+ return NULL;
+
+ /* NULL terminated array to save passing size */
+ chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
+ if (chans == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Search for OF matches */
+ for (mapind = 0; mapind < nummaps; mapind++) {
+ ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
+ mapind);
+ if (ret)
+ goto error_free_chans;
+ }
+ return chans;
+
+error_free_chans:
+ for (i = 0; i < mapind; i++)
+ iio_device_put(chans[i].indio_dev);
+ kfree(chans);
+ return ERR_PTR(ret);
+}
+
+#else /* CONFIG_OF */
+
+static inline struct iio_channel *
+of_iio_channel_get_by_name(struct device_node *np, const char *name)
+{
+ return NULL;
+}
+
+static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_OF */
+
+static struct iio_channel *iio_channel_get_sys(const char *name,
+ const char *channel_name)
+{
+ struct iio_map_internal *c_i = NULL, *c = NULL;
+ struct iio_channel *channel;
+ int err;
+
+ if (name == NULL && channel_name == NULL)
+ return ERR_PTR(-ENODEV);
+
+ /* first find matching entry the channel map */
+ mutex_lock(&iio_map_list_lock);
+ list_for_each_entry(c_i, &iio_map_list, l) {
+ if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
+ (channel_name &&
+ strcmp(channel_name, c_i->map->consumer_channel) != 0))
+ continue;
+ c = c_i;
+ iio_device_get(c->indio_dev);
+ break;
+ }
+ mutex_unlock(&iio_map_list_lock);
+ if (c == NULL)
+ return ERR_PTR(-ENODEV);
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (channel == NULL) {
+ err = -ENOMEM;
+ goto error_no_mem;
+ }
+
+ channel->indio_dev = c->indio_dev;
+
+ if (c->map->adc_channel_label) {
+ channel->channel =
+ iio_chan_spec_from_name(channel->indio_dev,
+ c->map->adc_channel_label);
+
+ if (channel->channel == NULL) {
+ err = -EINVAL;
+ goto error_no_chan;
+ }
+ }
+
+ return channel;
+
+error_no_chan:
+ kfree(channel);
+error_no_mem:
+ iio_device_put(c->indio_dev);
+ return ERR_PTR(err);
+}
+
+struct iio_channel *iio_channel_get(struct device *dev,
+ const char *channel_name)
+{
+ const char *name = dev ? dev_name(dev) : NULL;
+ struct iio_channel *channel;
+
+ if (dev) {
+ channel = of_iio_channel_get_by_name(dev->of_node,
+ channel_name);
+ if (channel != NULL)
+ return channel;
+ }
+
+ return iio_channel_get_sys(name, channel_name);
+}
+EXPORT_SYMBOL_GPL(iio_channel_get);
+
+void iio_channel_release(struct iio_channel *channel)
+{
+ iio_device_put(channel->indio_dev);
+ kfree(channel);
+}
+EXPORT_SYMBOL_GPL(iio_channel_release);
+
+struct iio_channel *iio_channel_get_all(struct device *dev)
+{
+ const char *name;
+ struct iio_channel *chans;
+ struct iio_map_internal *c = NULL;
+ int nummaps = 0;
+ int mapind = 0;
+ int i, ret;
+
+ if (dev == NULL)
+ return ERR_PTR(-EINVAL);
+
+ chans = of_iio_channel_get_all(dev);
+ if (chans)
+ return chans;
+
+ name = dev_name(dev);
+
+ mutex_lock(&iio_map_list_lock);
+ /* first count the matching maps */
+ list_for_each_entry(c, &iio_map_list, l)
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ else
+ nummaps++;
+
+ if (nummaps == 0) {
+ ret = -ENODEV;
+ goto error_ret;
+ }
+
+ /* NULL terminated array to save passing size */
+ chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
+ if (chans == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ /* for each map fill in the chans element */
+ list_for_each_entry(c, &iio_map_list, l) {
+ if (name && strcmp(name, c->map->consumer_dev_name) != 0)
+ continue;
+ chans[mapind].indio_dev = c->indio_dev;
+ chans[mapind].data = c->map->consumer_data;
+ chans[mapind].channel =
+ iio_chan_spec_from_name(chans[mapind].indio_dev,
+ c->map->adc_channel_label);
+ if (chans[mapind].channel == NULL) {
+ ret = -EINVAL;
+ goto error_free_chans;
+ }
+ iio_device_get(chans[mapind].indio_dev);
+ mapind++;
+ }
+ if (mapind == 0) {
+ ret = -ENODEV;
+ goto error_free_chans;
+ }
+ mutex_unlock(&iio_map_list_lock);
+
+ return chans;
+
+error_free_chans:
+ for (i = 0; i < nummaps; i++)
+ iio_device_put(chans[i].indio_dev);
+ kfree(chans);
+error_ret:
+ mutex_unlock(&iio_map_list_lock);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_all);
+
+void iio_channel_release_all(struct iio_channel *channels)
+{
+ struct iio_channel *chan = &channels[0];
+
+ while (chan->indio_dev) {
+ iio_device_put(chan->indio_dev);
+ chan++;
+ }
+ kfree(channels);
+}
+EXPORT_SYMBOL_GPL(iio_channel_release_all);
+
+static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum info)
+{
+ int unused;
+ int vals[INDIO_MAX_RAW_ELEMENTS];
+ int ret;
+ int val_len = 2;
+
+ if (val2 == NULL)
+ val2 = &unused;
+
+ if(!iio_channel_has_info(chan->channel, info))
+ return -EINVAL;
+
+ if (chan->indio_dev->info->read_raw_multi) {
+ ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
+ chan->channel, INDIO_MAX_RAW_ELEMENTS,
+ vals, &val_len, info);
+ *val = vals[0];
+ *val2 = vals[1];
+ } else
+ ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+ chan->channel, val, val2, info);
+
+ return ret;
+}
+
+int iio_read_channel_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_raw);
+
+int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
+
+static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ int raw, int *processed, unsigned int scale)
+{
+ int scale_type, scale_val, scale_val2, offset;
+ s64 raw64 = raw;
+ int ret;
+
+ ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
+ if (ret >= 0)
+ raw64 += offset;
+
+ scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
+ IIO_CHAN_INFO_SCALE);
+ if (scale_type < 0)
+ return scale_type;
+
+ switch (scale_type) {
+ case IIO_VAL_INT:
+ *processed = raw64 * scale_val;
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ if (scale_val2 < 0)
+ *processed = -raw64 * scale_val;
+ else
+ *processed = raw64 * scale_val;
+ *processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 1000000LL);
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ if (scale_val2 < 0)
+ *processed = -raw64 * scale_val;
+ else
+ *processed = raw64 * scale_val;
+ *processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 1000000000LL);
+ break;
+ case IIO_VAL_FRACTIONAL:
+ *processed = div_s64(raw64 * (s64)scale_val * scale,
+ scale_val2);
+ break;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
+ int *processed, unsigned int scale)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
+ scale);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
+
+int iio_read_channel_processed(struct iio_channel *chan, int *val)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
+ ret = iio_channel_read(chan, val, NULL,
+ IIO_CHAN_INFO_PROCESSED);
+ } else {
+ ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
+ if (ret < 0)
+ goto err_unlock;
+ ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
+ }
+
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_processed);
+
+int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
+{
+ int ret;
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+
+int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
+{
+ int ret = 0;
+ /* Need to verify underlying driver has not gone away */
+
+ mutex_lock(&chan->indio_dev->info_exist_lock);
+ if (chan->indio_dev->info == NULL) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ *type = chan->channel->type;
+err_unlock:
+ mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_get_channel_type);
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
new file mode 100644
index 00000000000000..bf0e37cc65ac57
--- /dev/null
+++ b/drivers/iio/kfifo_buf.c
@@ -0,0 +1,207 @@
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+struct iio_kfifo {
+ struct iio_buffer buffer;
+ struct kfifo kf;
+ struct mutex user_lock;
+ int update_needed;
+};
+
+#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
+
+static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
+ int bytes_per_datum, int length)
+{
+ if ((length == 0) || (bytes_per_datum == 0))
+ return -EINVAL;
+
+ return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
+ bytes_per_datum, GFP_KERNEL);
+}
+
+static int iio_request_update_kfifo(struct iio_buffer *r)
+{
+ int ret = 0;
+ struct iio_kfifo *buf = iio_to_kfifo(r);
+
+ mutex_lock(&buf->user_lock);
+ if (buf->update_needed) {
+ kfifo_free(&buf->kf);
+ ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
+ buf->buffer.length);
+ buf->update_needed = false;
+ } else {
+ kfifo_reset_out(&buf->kf);
+ }
+ mutex_unlock(&buf->user_lock);
+
+ return ret;
+}
+
+static int iio_get_length_kfifo(struct iio_buffer *r)
+{
+ return r->length;
+}
+
+static IIO_BUFFER_ENABLE_ATTR;
+static IIO_BUFFER_LENGTH_ATTR;
+
+static struct attribute *iio_kfifo_attributes[] = {
+ &dev_attr_length.attr,
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group iio_kfifo_attribute_group = {
+ .attrs = iio_kfifo_attributes,
+ .name = "buffer",
+};
+
+static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
+{
+ return r->bytes_per_datum;
+}
+
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ kf->update_needed = true;
+ return 0;
+}
+
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+{
+ if (r->bytes_per_datum != bpd) {
+ r->bytes_per_datum = bpd;
+ iio_mark_update_needed_kfifo(r);
+ }
+ return 0;
+}
+
+static int iio_set_length_kfifo(struct iio_buffer *r, int length)
+{
+ /* Avoid an invalid state */
+ if (length < 2)
+ length = 2;
+ if (r->length != length) {
+ r->length = length;
+ iio_mark_update_needed_kfifo(r);
+ }
+ return 0;
+}
+
+static int iio_store_to_kfifo(struct iio_buffer *r,
+ const void *data)
+{
+ int ret;
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ ret = kfifo_in(&kf->kf, data, 1);
+ if (ret != 1)
+ return -EBUSY;
+
+ wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
+
+ return 0;
+}
+
+static int iio_store_to_kfifo_buffer(struct iio_buffer *r,
+ const void *data, int n)
+{
+ int ret;
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ ret = kfifo_in(&kf->kf, data, n);
+ if (ret <= 0)
+ return -EBUSY;
+
+ wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);
+
+ return ret;
+}
+
+static int iio_read_first_n_kfifo(struct iio_buffer *r,
+ size_t n, char __user *buf)
+{
+ int ret, copied;
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+
+ if (mutex_lock_interruptible(&kf->user_lock))
+ return -ERESTARTSYS;
+
+ if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf))
+ ret = -EINVAL;
+ else
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
+ mutex_unlock(&kf->user_lock);
+ if (ret < 0)
+ return ret;
+
+ return copied;
+}
+
+static bool iio_kfifo_buf_data_available(struct iio_buffer *r)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ bool empty;
+
+ mutex_lock(&kf->user_lock);
+ empty = kfifo_is_empty(&kf->kf);
+ mutex_unlock(&kf->user_lock);
+
+ return !empty;
+}
+
+static void iio_kfifo_buffer_release(struct iio_buffer *buffer)
+{
+ struct iio_kfifo *kf = iio_to_kfifo(buffer);
+
+ mutex_destroy(&kf->user_lock);
+ kfifo_free(&kf->kf);
+ kfree(kf);
+}
+
+static const struct iio_buffer_access_funcs kfifo_access_funcs = {
+ .store_to = &iio_store_to_kfifo,
+ .store_to_buffer = &iio_store_to_kfifo_buffer,
+ .read_first_n = &iio_read_first_n_kfifo,
+ .data_available = iio_kfifo_buf_data_available,
+ .request_update = &iio_request_update_kfifo,
+ .get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
+ .set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
+ .get_length = &iio_get_length_kfifo,
+ .set_length = &iio_set_length_kfifo,
+ .release = &iio_kfifo_buffer_release,
+};
+
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
+{
+ struct iio_kfifo *kf;
+
+ kf = kzalloc(sizeof *kf, GFP_KERNEL);
+ if (!kf)
+ return NULL;
+ kf->update_needed = true;
+ iio_buffer_init(&kf->buffer);
+ kf->buffer.attrs = &iio_kfifo_attribute_group;
+ kf->buffer.access = &kfifo_access_funcs;
+ kf->buffer.length = 2;
+ mutex_init(&kf->user_lock);
+ return &kf->buffer;
+}
+EXPORT_SYMBOL(iio_kfifo_allocate);
+
+void iio_kfifo_free(struct iio_buffer *r)
+{
+ iio_buffer_put(r);
+}
+EXPORT_SYMBOL(iio_kfifo_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
new file mode 100644
index 00000000000000..5bea821adcaeae
--- /dev/null
+++ b/drivers/iio/light/Kconfig
@@ -0,0 +1,195 @@
+#
+# Light sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Light sensors"
+
+config ADJD_S311
+ tristate "ADJD-S311-CR999 digital color sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ depends on I2C
+ help
+ If you say yes here you get support for the Avago ADJD-S311-CR999
+ digital color light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called adjd_s311.
+
+config AL3320A
+ tristate "AL3320A ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Dyna Image AL3320A
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called al3320a.
+
+config APDS9300
+ tristate "APDS9300 ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Avago APDS9300
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds9300.
+
+config CM32181
+ depends on I2C
+ tristate "CM32181 driver"
+ help
+ Say Y here if you use cm32181.
+ This option enables ambient light sensor using
+ Capella cm32181 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm32181.
+
+config CM36651
+ depends on I2C
+ tristate "CM36651 driver"
+ help
+ Say Y here if you use cm36651.
+ This option enables proximity & RGB sensor using
+ Capella cm36651 device driver.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cm36651.
+
+config GP2AP020A00F
+ tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IRQ_WORK
+ help
+ Say Y here if you have a Sharp GP2AP020A00F proximity/ALS combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap020a00f.
+
+config ISL29125
+ tristate "Intersil ISL29125 digital color light sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for the Intersil ISL29125
+ RGB light sensor for I2C.
+
+ To compile this driver as a module, choose M here: the module will be
+ called isl29125.
+
+config HID_SENSOR_ALS
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID ALS"
+ help
+ Say yes here to build support for the HID SENSOR
+ Ambient light sensor.
+
+config HID_SENSOR_PROX
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PROX"
+ help
+ Say yes here to build support for the HID SENSOR
+ Proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-sensor-prox.
+
+config SENSORS_LM3533
+ tristate "LM3533 ambient light sensor"
+ depends on MFD_LM3533
+ help
+ If you say yes here you get support for the ambient light sensor
+ interface on National Semiconductor / TI LM3533 Lighting Power
+ chips.
+
+ The sensor interface can be used to control the LEDs and backlights
+ of the chip through defining five light zones and three sets of
+ corresponding output-current values.
+
+ The driver provides raw and mean adc readings along with the current
+ light zone through sysfs. A threshold event can be generated on zone
+ changes. The ALS-control output values can be set per zone for the
+ three current output channels.
+
+config LTR501
+ tristate "LTR-501ALS-01 light sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the Lite-On LTR-501ALS-01
+ ambient light and proximity sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ltr501.
+
+config TCS3414
+ tristate "TAOS TCS3414 digital color sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the TAOS TCS3414
+ family of digital color sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called tcs3414.
+
+config TCS3472
+ tristate "TAOS TCS3472 color light-to-digital converter"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the TAOS TCS3472
+ family of color light-to-digital converters with IR filter.
+
+ This driver can also be built as a module. If so, the module
+ will be called tcs3472.
+
+config SENSORS_TSL2563
+ tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Taos TSL2560,
+ TSL2561, TSL2562 and TSL2563 ambient light sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsl2563.
+
+config TSL4531
+ tristate "TAOS TSL4531 ambient light sensors"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the TAOS TSL4531 family
+ of ambient light sensors with direct lux output.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsl4531.
+
+config VCNL4000
+ tristate "VCNL4000 combined ALS and proximity sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VCNL4000
+ combined ambient light and proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl4000.
+
+endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
new file mode 100644
index 00000000000000..47877a36cc1286
--- /dev/null
+++ b/drivers/iio/light/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for IIO Light sensors
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADJD_S311) += adjd_s311.o
+obj-$(CONFIG_AL3320A) += al3320a.o
+obj-$(CONFIG_APDS9300) += apds9300.o
+obj-$(CONFIG_CM32181) += cm32181.o
+obj-$(CONFIG_CM36651) += cm36651.o
+obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
+obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
+obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_ISL29125) += isl29125.o
+obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_TCS3414) += tcs3414.o
+obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_TSL4531) += tsl4531.o
+obj-$(CONFIG_VCNL4000) += vcnl4000.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
new file mode 100644
index 00000000000000..09ad5f1ce539a4
--- /dev/null
+++ b/drivers/iio/light/adjd_s311.c
@@ -0,0 +1,321 @@
+/*
+ * adjd_s311.c - Support for ADJD-S311-CR999 digital color sensor
+ *
+ * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * driver for ADJD-S311-CR999 digital color sensor (10-bit channels for
+ * red, green, blue, clear); 7-bit I2C slave address 0x74
+ *
+ * limitations: no calibration, no offset mode, no sleep mode
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define ADJD_S311_DRV_NAME "adjd_s311"
+
+#define ADJD_S311_CTRL 0x00
+#define ADJD_S311_CONFIG 0x01
+#define ADJD_S311_CAP_RED 0x06
+#define ADJD_S311_CAP_GREEN 0x07
+#define ADJD_S311_CAP_BLUE 0x08
+#define ADJD_S311_CAP_CLEAR 0x09
+#define ADJD_S311_INT_RED 0x0a
+#define ADJD_S311_INT_GREEN 0x0c
+#define ADJD_S311_INT_BLUE 0x0e
+#define ADJD_S311_INT_CLEAR 0x10
+#define ADJD_S311_DATA_RED 0x40
+#define ADJD_S311_DATA_GREEN 0x42
+#define ADJD_S311_DATA_BLUE 0x44
+#define ADJD_S311_DATA_CLEAR 0x46
+#define ADJD_S311_OFFSET_RED 0x48
+#define ADJD_S311_OFFSET_GREEN 0x49
+#define ADJD_S311_OFFSET_BLUE 0x4a
+#define ADJD_S311_OFFSET_CLEAR 0x4b
+
+#define ADJD_S311_CTRL_GOFS 0x02
+#define ADJD_S311_CTRL_GSSR 0x01
+#define ADJD_S311_CAP_MASK 0x0f
+#define ADJD_S311_INT_MASK 0x0fff
+#define ADJD_S311_DATA_MASK 0x03ff
+
+struct adjd_s311_data {
+ struct i2c_client *client;
+ u16 *buffer;
+};
+
+enum adjd_s311_channel_idx {
+ IDX_RED, IDX_GREEN, IDX_BLUE, IDX_CLEAR
+};
+
+#define ADJD_S311_DATA_REG(chan) (ADJD_S311_DATA_RED + (chan) * 2)
+#define ADJD_S311_INT_REG(chan) (ADJD_S311_INT_RED + (chan) * 2)
+#define ADJD_S311_CAP_REG(chan) (ADJD_S311_CAP_RED + (chan))
+
+static int adjd_s311_req_data(struct iio_dev *indio_dev)
+{
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+ int tries = 10;
+
+ int ret = i2c_smbus_write_byte_data(data->client, ADJD_S311_CTRL,
+ ADJD_S311_CTRL_GSSR);
+ if (ret < 0)
+ return ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, ADJD_S311_CTRL);
+ if (ret < 0)
+ return ret;
+ if (!(ret & ADJD_S311_CTRL_GSSR))
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev,
+ "adjd_s311_req_data() failed, data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adjd_s311_read_data(struct iio_dev *indio_dev, u8 reg, int *val)
+{
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+
+ int ret = adjd_s311_req_data(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & ADJD_S311_DATA_MASK;
+
+ return 0;
+}
+
+static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+ s64 time_ns = iio_get_time_ns();
+ int i, j = 0;
+
+ int ret = adjd_s311_req_data(indio_dev);
+ if (ret < 0)
+ goto done;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = i2c_smbus_read_word_data(data->client,
+ ADJD_S311_DATA_REG(i));
+ if (ret < 0)
+ goto done;
+
+ data->buffer[j++] = ret & ADJD_S311_DATA_MASK;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, time_ns);
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define ADJD_S311_CHANNEL(_color, _scan_idx) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .address = (IDX_##_color), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .channel2 = (IIO_MOD_LIGHT_##_color), \
+ .scan_index = (_scan_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec adjd_s311_channels[] = {
+ ADJD_S311_CHANNEL(RED, 0),
+ ADJD_S311_CHANNEL(GREEN, 1),
+ ADJD_S311_CHANNEL(BLUE, 2),
+ ADJD_S311_CHANNEL(CLEAR, 3),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int adjd_s311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = adjd_s311_read_data(indio_dev,
+ ADJD_S311_DATA_REG(chan->address), val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = i2c_smbus_read_byte_data(data->client,
+ ADJD_S311_CAP_REG(chan->address));
+ if (ret < 0)
+ return ret;
+ *val = ret & ADJD_S311_CAP_MASK;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = i2c_smbus_read_word_data(data->client,
+ ADJD_S311_INT_REG(chan->address));
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ /*
+ * not documented, based on measurement:
+ * 4095 LSBs correspond to roughly 4 ms
+ */
+ *val2 = ret & ADJD_S311_INT_MASK;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int adjd_s311_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ if (val < 0 || val > ADJD_S311_CAP_MASK)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(data->client,
+ ADJD_S311_CAP_REG(chan->address), val);
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0 || val2 < 0 || val2 > ADJD_S311_INT_MASK)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_data(data->client,
+ ADJD_S311_INT_REG(chan->address), val2);
+ }
+ return -EINVAL;
+}
+
+static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+
+ kfree(data->buffer);
+ data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (data->buffer == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct iio_info adjd_s311_info = {
+ .read_raw = adjd_s311_read_raw,
+ .write_raw = adjd_s311_write_raw,
+ .update_scan_mode = adjd_s311_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int adjd_s311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adjd_s311_data *data;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adjd_s311_info;
+ indio_dev->name = ADJD_S311_DRV_NAME;
+ indio_dev->channels = adjd_s311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adjd_s311_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ err = iio_triggered_buffer_setup(indio_dev, NULL,
+ adjd_s311_trigger_handler, NULL);
+ if (err < 0)
+ return err;
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto exit_unreg_buffer;
+
+ dev_info(&client->dev, "ADJD-S311 color sensor registered\n");
+
+ return 0;
+
+exit_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return err;
+}
+
+static int adjd_s311_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct adjd_s311_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(data->buffer);
+
+ return 0;
+}
+
+static const struct i2c_device_id adjd_s311_id[] = {
+ { "adjd_s311", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adjd_s311_id);
+
+static struct i2c_driver adjd_s311_driver = {
+ .driver = {
+ .name = ADJD_S311_DRV_NAME,
+ },
+ .probe = adjd_s311_probe,
+ .remove = adjd_s311_remove,
+ .id_table = adjd_s311_id,
+};
+module_i2c_driver(adjd_s311_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("ADJD-S311 color sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
new file mode 100644
index 00000000000000..6aac6513fd4121
--- /dev/null
+++ b/drivers/iio/light/al3320a.c
@@ -0,0 +1,232 @@
+/*
+ * AL3320A - Dyna Image Ambient Light Sensor
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for AL3320A (7-bit I2C slave address 0x1C).
+ *
+ * TODO: interrupt support, thresholds
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define AL3320A_DRV_NAME "al3320a"
+
+#define AL3320A_REG_CONFIG 0x00
+#define AL3320A_REG_STATUS 0x01
+#define AL3320A_REG_INT 0x02
+#define AL3320A_REG_WAIT 0x06
+#define AL3320A_REG_CONFIG_RANGE 0x07
+#define AL3320A_REG_PERSIST 0x08
+#define AL3320A_REG_MEAN_TIME 0x09
+#define AL3320A_REG_ADUMMY 0x0A
+#define AL3320A_REG_DATA_LOW 0x22
+
+#define AL3320A_REG_LOW_THRESH_LOW 0x30
+#define AL3320A_REG_LOW_THRESH_HIGH 0x31
+#define AL3320A_REG_HIGH_THRESH_LOW 0x32
+#define AL3320A_REG_HIGH_THRESH_HIGH 0x33
+
+#define AL3320A_CONFIG_DISABLE 0x00
+#define AL3320A_CONFIG_ENABLE 0x01
+
+#define AL3320A_GAIN_SHIFT 1
+#define AL3320A_GAIN_MASK (BIT(2) | BIT(1))
+
+/* chip params default values */
+#define AL3320A_DEFAULT_MEAN_TIME 4
+#define AL3320A_DEFAULT_WAIT_TIME 0 /* no waiting */
+
+#define AL3320A_SCALE_AVAILABLE "0.512 0.128 0.032 0.01"
+
+enum al3320a_range {
+ AL3320A_RANGE_1, /* 33.28 Klx */
+ AL3320A_RANGE_2, /* 8.32 Klx */
+ AL3320A_RANGE_3, /* 2.08 Klx */
+ AL3320A_RANGE_4 /* 0.65 Klx */
+};
+
+static const int al3320a_scales[][2] = {
+ {0, 512000}, {0, 128000}, {0, 32000}, {0, 10000}
+};
+
+struct al3320a_data {
+ struct i2c_client *client;
+};
+
+static const struct iio_chan_spec al3320a_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static IIO_CONST_ATTR(in_illuminance_scale_available, AL3320A_SCALE_AVAILABLE);
+
+static struct attribute *al3320a_attributes[] = {
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group al3320a_attribute_group = {
+ .attrs = al3320a_attributes,
+};
+
+static int al3320a_init(struct al3320a_data *data)
+{
+ int ret;
+
+ /* power on */
+ ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG,
+ AL3320A_CONFIG_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG_RANGE,
+ AL3320A_RANGE_3 << AL3320A_GAIN_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_MEAN_TIME,
+ AL3320A_DEFAULT_MEAN_TIME);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_WAIT,
+ AL3320A_DEFAULT_WAIT_TIME);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int al3320a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct al3320a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * ALS ADC value is stored in two adjacent registers:
+ * - low byte of output is stored at AL3320A_REG_DATA_LOW
+ * - high byte of output is stored at AL3320A_REG_DATA_LOW + 1
+ */
+ ret = i2c_smbus_read_word_data(data->client,
+ AL3320A_REG_DATA_LOW);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = i2c_smbus_read_byte_data(data->client,
+ AL3320A_REG_CONFIG_RANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & AL3320A_GAIN_MASK) >> AL3320A_GAIN_SHIFT;
+ *val = al3320a_scales[ret][0];
+ *val2 = al3320a_scales[ret][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int al3320a_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct al3320a_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(al3320a_scales); i++) {
+ if (val == al3320a_scales[i][0] &&
+ val2 == al3320a_scales[i][1])
+ return i2c_smbus_write_byte_data(data->client,
+ AL3320A_REG_CONFIG_RANGE,
+ i << AL3320A_GAIN_SHIFT);
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info al3320a_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = al3320a_read_raw,
+ .write_raw = al3320a_write_raw,
+ .attrs = &al3320a_attribute_group,
+};
+
+static int al3320a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct al3320a_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &al3320a_info;
+ indio_dev->name = AL3320A_DRV_NAME;
+ indio_dev->channels = al3320a_channels;
+ indio_dev->num_channels = ARRAY_SIZE(al3320a_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = al3320a_init(data);
+ if (ret < 0) {
+ dev_err(&client->dev, "al3320a chip init failed\n");
+ return ret;
+ }
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int al3320a_remove(struct i2c_client *client)
+{
+ return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG,
+ AL3320A_CONFIG_DISABLE);
+}
+
+static const struct i2c_device_id al3320a_id[] = {
+ {"al3320a", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, al3320a_id);
+
+static struct i2c_driver al3320a_driver = {
+ .driver = {
+ .name = AL3320A_DRV_NAME,
+ },
+ .probe = al3320a_probe,
+ .remove = al3320a_remove,
+ .id_table = al3320a_id,
+};
+
+module_i2c_driver(al3320a_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("AL3320A Ambient Light Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
new file mode 100644
index 00000000000000..9ddde0ca9c3415
--- /dev/null
+++ b/drivers/iio/light/apds9300.c
@@ -0,0 +1,531 @@
+/*
+ * apds9300.c - IIO driver for Avago APDS9300 ambient light sensor
+ *
+ * Copyright 2013 Oleksandr Kravchenko <o.v.kravchenko@globallogic.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#define APDS9300_DRV_NAME "apds9300"
+#define APDS9300_IRQ_NAME "apds9300_event"
+
+/* Command register bits */
+#define APDS9300_CMD BIT(7) /* Select command register. Must write as 1 */
+#define APDS9300_WORD BIT(5) /* I2C write/read: if 1 word, if 0 byte */
+#define APDS9300_CLEAR BIT(6) /* Interrupt clear. Clears pending interrupt */
+
+/* Register set */
+#define APDS9300_CONTROL 0x00 /* Control of basic functions */
+#define APDS9300_THRESHLOWLOW 0x02 /* Low byte of low interrupt threshold */
+#define APDS9300_THRESHHIGHLOW 0x04 /* Low byte of high interrupt threshold */
+#define APDS9300_INTERRUPT 0x06 /* Interrupt control */
+#define APDS9300_DATA0LOW 0x0c /* Low byte of ADC channel 0 */
+#define APDS9300_DATA1LOW 0x0e /* Low byte of ADC channel 1 */
+
+/* Power on/off value for APDS9300_CONTROL register */
+#define APDS9300_POWER_ON 0x03
+#define APDS9300_POWER_OFF 0x00
+
+/* Interrupts */
+#define APDS9300_INTR_ENABLE 0x10
+/* Interrupt Persist Function: Any value outside of threshold range */
+#define APDS9300_THRESH_INTR 0x01
+
+#define APDS9300_THRESH_MAX 0xffff /* Max threshold value */
+
+struct apds9300_data {
+ struct i2c_client *client;
+ struct mutex mutex;
+ int power_state;
+ int thresh_low;
+ int thresh_hi;
+ int intr_en;
+};
+
+/* Lux calculation */
+
+/* Calculated values 1000 * (CH1/CH0)^1.4 for CH1/CH0 from 0 to 0.52 */
+static const u16 apds9300_lux_ratio[] = {
+ 0, 2, 4, 7, 11, 15, 19, 24, 29, 34, 40, 45, 51, 57, 64, 70, 77, 84, 91,
+ 98, 105, 112, 120, 128, 136, 144, 152, 160, 168, 177, 185, 194, 203,
+ 212, 221, 230, 239, 249, 258, 268, 277, 287, 297, 307, 317, 327, 337,
+ 347, 358, 368, 379, 390, 400,
+};
+
+static unsigned long apds9300_calculate_lux(u16 ch0, u16 ch1)
+{
+ unsigned long lux, tmp;
+
+ /* avoid division by zero */
+ if (ch0 == 0)
+ return 0;
+
+ tmp = DIV_ROUND_UP(ch1 * 100, ch0);
+ if (tmp <= 52) {
+ lux = 3150 * ch0 - (unsigned long)DIV_ROUND_UP_ULL(ch0
+ * apds9300_lux_ratio[tmp] * 5930ull, 1000);
+ } else if (tmp <= 65) {
+ lux = 2290 * ch0 - 2910 * ch1;
+ } else if (tmp <= 80) {
+ lux = 1570 * ch0 - 1800 * ch1;
+ } else if (tmp <= 130) {
+ lux = 338 * ch0 - 260 * ch1;
+ } else {
+ lux = 0;
+ }
+
+ return lux / 100000;
+}
+
+static int apds9300_get_adc_val(struct apds9300_data *data, int adc_number)
+{
+ int ret;
+ u8 flags = APDS9300_CMD | APDS9300_WORD;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ /* Select ADC0 or ADC1 data register */
+ flags |= adc_number ? APDS9300_DATA1LOW : APDS9300_DATA0LOW;
+
+ ret = i2c_smbus_read_word_data(data->client, flags);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "failed to read ADC%d value\n", adc_number);
+
+ return ret;
+}
+
+static int apds9300_set_thresh_low(struct apds9300_data *data, int value)
+{
+ int ret;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ if (value > APDS9300_THRESH_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(data->client, APDS9300_THRESHLOWLOW
+ | APDS9300_CMD | APDS9300_WORD, value);
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set thresh_low\n");
+ return ret;
+ }
+ data->thresh_low = value;
+
+ return 0;
+}
+
+static int apds9300_set_thresh_hi(struct apds9300_data *data, int value)
+{
+ int ret;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ if (value > APDS9300_THRESH_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(data->client, APDS9300_THRESHHIGHLOW
+ | APDS9300_CMD | APDS9300_WORD, value);
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set thresh_hi\n");
+ return ret;
+ }
+ data->thresh_hi = value;
+
+ return 0;
+}
+
+static int apds9300_set_intr_state(struct apds9300_data *data, int state)
+{
+ int ret;
+ u8 cmd;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ cmd = state ? APDS9300_INTR_ENABLE | APDS9300_THRESH_INTR : 0x00;
+ ret = i2c_smbus_write_byte_data(data->client,
+ APDS9300_INTERRUPT | APDS9300_CMD, cmd);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set interrupt state %d\n", state);
+ return ret;
+ }
+ data->intr_en = state;
+
+ return 0;
+}
+
+static int apds9300_set_power_state(struct apds9300_data *data, int state)
+{
+ int ret;
+ u8 cmd;
+
+ cmd = state ? APDS9300_POWER_ON : APDS9300_POWER_OFF;
+ ret = i2c_smbus_write_byte_data(data->client,
+ APDS9300_CONTROL | APDS9300_CMD, cmd);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set power state %d\n", state);
+ return ret;
+ }
+ data->power_state = state;
+
+ return 0;
+}
+
+static void apds9300_clear_intr(struct apds9300_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte(data->client, APDS9300_CLEAR | APDS9300_CMD);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to clear interrupt\n");
+}
+
+static int apds9300_chip_init(struct apds9300_data *data)
+{
+ int ret;
+
+ /* Need to set power off to ensure that the chip is off */
+ ret = apds9300_set_power_state(data, 0);
+ if (ret < 0)
+ goto err;
+ /*
+ * Probe the chip. To do so we try to power up the device and then to
+ * read back the 0x03 code
+ */
+ ret = apds9300_set_power_state(data, 1);
+ if (ret < 0)
+ goto err;
+ ret = i2c_smbus_read_byte_data(data->client,
+ APDS9300_CONTROL | APDS9300_CMD);
+ if (ret != APDS9300_POWER_ON) {
+ ret = -ENODEV;
+ goto err;
+ }
+ /*
+ * Disable interrupt to ensure thai it is doesn't enable
+ * i.e. after device soft reset
+ */
+ ret = apds9300_set_intr_state(data, 0);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to init the chip\n");
+ return ret;
+}
+
+static int apds9300_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ int ch0, ch1, ret = -EINVAL;
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ch0 = apds9300_get_adc_val(data, 0);
+ if (ch0 < 0) {
+ ret = ch0;
+ break;
+ }
+ ch1 = apds9300_get_adc_val(data, 1);
+ if (ch1 < 0) {
+ ret = ch1;
+ break;
+ }
+ *val = apds9300_calculate_lux(ch0, ch1);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_INTENSITY:
+ ret = apds9300_get_adc_val(data, chan->channel);
+ if (ret < 0)
+ break;
+ *val = ret;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = data->thresh_hi;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = data->thresh_low;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int apds9300_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ if (dir == IIO_EV_DIR_RISING)
+ ret = apds9300_set_thresh_hi(data, val);
+ else
+ ret = apds9300_set_thresh_low(data, val);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ return data->intr_en;
+}
+
+static int apds9300_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_intr_state(data, state);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_info apds9300_info_no_irq = {
+ .driver_module = THIS_MODULE,
+ .read_raw = apds9300_read_raw,
+};
+
+static const struct iio_info apds9300_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = apds9300_read_raw,
+ .read_event_value = apds9300_read_thresh,
+ .write_event_value = apds9300_write_thresh,
+ .read_event_config = apds9300_read_interrupt_config,
+ .write_event_config = apds9300_write_interrupt_config,
+};
+
+static const struct iio_event_spec apds9300_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec apds9300_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = 0,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .channel = 0,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = apds9300_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9300_event_spec),
+ }, {
+ .type = IIO_INTENSITY,
+ .channel = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static irqreturn_t apds9300_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *dev_info = private;
+ struct apds9300_data *data = iio_priv(dev_info);
+
+ iio_push_event(dev_info,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+
+ apds9300_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int apds9300_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct apds9300_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ ret = apds9300_chip_init(data);
+ if (ret < 0)
+ goto err;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = apds9300_channels;
+ indio_dev->num_channels = ARRAY_SIZE(apds9300_channels);
+ indio_dev->name = APDS9300_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq)
+ indio_dev->info = &apds9300_info;
+ else
+ indio_dev->info = &apds9300_info_no_irq;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, apds9300_interrupt_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ APDS9300_IRQ_NAME, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ goto err;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ /* Ensure that power off in case of error */
+ apds9300_set_power_state(data, 0);
+ return ret;
+}
+
+static int apds9300_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Ensure that power off and interrupts are disabled */
+ apds9300_set_intr_state(data, 0);
+ apds9300_set_power_state(data, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int apds9300_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_power_state(data, 0);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_power_state(data, 1);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(apds9300_pm_ops, apds9300_suspend, apds9300_resume);
+#define APDS9300_PM_OPS (&apds9300_pm_ops)
+#else
+#define APDS9300_PM_OPS NULL
+#endif
+
+static struct i2c_device_id apds9300_id[] = {
+ { APDS9300_DRV_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9300_id);
+
+static struct i2c_driver apds9300_driver = {
+ .driver = {
+ .name = APDS9300_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = APDS9300_PM_OPS,
+ },
+ .probe = apds9300_probe,
+ .remove = apds9300_remove,
+ .id_table = apds9300_id,
+};
+
+module_i2c_driver(apds9300_driver);
+
+MODULE_AUTHOR("Kravchenko Oleksandr <o.v.kravchenko@globallogic.com>");
+MODULE_AUTHOR("GlobalLogic inc.");
+MODULE_DESCRIPTION("APDS9300 ambient light photo sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
new file mode 100644
index 00000000000000..ad36b294e4d506
--- /dev/null
+++ b/drivers/iio/light/cm32181.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (C) 2013 Capella Microsystems Inc.
+ * Author: Kevin Tsai <ktsai@capellamicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/init.h>
+
+/* Registers Address */
+#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_ALS 0x04
+#define CM32181_REG_ADDR_STATUS 0x06
+#define CM32181_REG_ADDR_ID 0x07
+
+/* Number of Configurable Registers */
+#define CM32181_CONF_REG_NUM 0x01
+
+/* CMD register */
+#define CM32181_CMD_ALS_ENABLE 0x00
+#define CM32181_CMD_ALS_DISABLE 0x01
+#define CM32181_CMD_ALS_INT_EN 0x02
+
+#define CM32181_CMD_ALS_IT_SHIFT 6
+#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
+#define CM32181_CMD_ALS_IT_DEFAULT (0x00 << CM32181_CMD_ALS_IT_SHIFT)
+
+#define CM32181_CMD_ALS_SM_SHIFT 11
+#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
+#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
+
+#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
+#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 1000
+#define CM32181_CALIBSCALE_RESOLUTION 1000
+#define MLUX_PER_LUX 1000
+
+static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
+ CM32181_REG_ADDR_CMD,
+};
+
+static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
+static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
+ 800000};
+
+struct cm32181_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ u16 conf_regs[CM32181_CONF_REG_NUM];
+ int calibscale;
+};
+
+/**
+ * cm32181_reg_init() - Initialize CM32181 registers
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Initialize CM32181 ambient light sensor register to default values.
+ *
+ * Return: 0 for success; otherwise for error code.
+ */
+static int cm32181_reg_init(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int i;
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ID);
+ if (ret < 0)
+ return ret;
+
+ /* check device ID */
+ if ((ret & 0xFF) != 0x81)
+ return -ENODEV;
+
+ /* Default Values */
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+
+ /* Initialize registers*/
+ for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
+ ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
+ cm32181->conf_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * cm32181_read_als_it() - Get sensor integration time (ms)
+ * @cm32181: pointer of struct cm32181
+ * @val2: pointer of int to load the als_it value.
+ *
+ * Report the current integartion time by millisecond.
+ *
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
+ */
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
+{
+ u16 als_it;
+ int i;
+
+ als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
+ als_it &= CM32181_CMD_ALS_IT_MASK;
+ als_it >>= CM32181_CMD_ALS_IT_SHIFT;
+ for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
+ if (als_it == als_it_bits[i]) {
+ *val2 = als_it_value[i];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_write_als_it() - Write sensor integration time
+ * @cm32181: pointer of struct cm32181.
+ * @val: integration time by millisecond.
+ *
+ * Convert integration time (ms) to sensor value.
+ *
+ * Return: i2c_smbus_write_word_data command return value.
+ */
+static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
+{
+ struct i2c_client *client = cm32181->client;
+ u16 als_it;
+ int ret, i, n;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0; i < n; i++)
+ if (val <= als_it_value[i])
+ break;
+ if (i >= n)
+ i = n - 1;
+
+ als_it = als_it_bits[i];
+ als_it <<= CM32181_CMD_ALS_IT_SHIFT;
+
+ mutex_lock(&cm32181->lock);
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] &=
+ ~CM32181_CMD_ALS_IT_MASK;
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] |=
+ als_it;
+ ret = i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
+ mutex_unlock(&cm32181->lock);
+
+ return ret;
+}
+
+/**
+ * cm32181_get_lux() - report current lux value
+ * @cm32181: pointer of struct cm32181.
+ *
+ * Convert sensor raw data to lux. It depends on integration
+ * time and claibscale variable.
+ *
+ * Return: Positive value is lux, otherwise is error code.
+ */
+static int cm32181_get_lux(struct cm32181_chip *cm32181)
+{
+ struct i2c_client *client = cm32181->client;
+ int ret;
+ int als_it;
+ unsigned long lux;
+
+ ret = cm32181_read_als_it(cm32181, &als_it);
+ if (ret < 0)
+ return -EINVAL;
+
+ lux = CM32181_MLUX_PER_BIT;
+ lux *= CM32181_MLUX_PER_BIT_BASE_IT;
+ lux /= als_it;
+
+ ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
+ if (ret < 0)
+ return ret;
+
+ lux *= ret;
+ lux *= cm32181->calibscale;
+ lux /= CM32181_CALIBSCALE_RESOLUTION;
+ lux /= MLUX_PER_LUX;
+
+ if (lux > 0xFFFF)
+ lux = 0xFFFF;
+
+ return lux;
+}
+
+static int cm32181_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = cm32181_get_lux(cm32181);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = cm32181->calibscale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ ret = cm32181_read_als_it(cm32181, val2);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int cm32181_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm32181_chip *cm32181 = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ cm32181->calibscale = val;
+ return val;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = cm32181_write_als_it(cm32181, val2);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * cm32181_get_it_available() - Get available ALS IT value
+ * @dev: pointer of struct device.
+ * @attr: pointer of struct device_attribute.
+ * @buf: pointer of return string buffer.
+ *
+ * Display the available integration time values by millisecond.
+ *
+ * Return: string length.
+ */
+static ssize_t cm32181_get_it_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, n, len;
+
+ n = ARRAY_SIZE(als_it_value);
+ for (i = 0, len = 0; i < n; i++)
+ len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
+ return len + sprintf(buf + len, "\n");
+}
+
+static const struct iio_chan_spec cm32181_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static IIO_DEVICE_ATTR(in_illuminance_integration_time_available,
+ S_IRUGO, cm32181_get_it_available, NULL, 0);
+
+static struct attribute *cm32181_attributes[] = {
+ &iio_dev_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm32181_attribute_group = {
+ .attrs = cm32181_attributes
+};
+
+static const struct iio_info cm32181_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm32181_read_raw,
+ .write_raw = &cm32181_write_raw,
+ .attrs = &cm32181_attribute_group,
+};
+
+static int cm32181_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm32181_chip *cm32181;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
+ if (!indio_dev) {
+ dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cm32181 = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ cm32181->client = client;
+
+ mutex_init(&cm32181->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm32181_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
+ indio_dev->info = &cm32181_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm32181_reg_init(cm32181);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: register init failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: regist device failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id cm32181_id[] = {
+ { "cm32181", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cm32181_id);
+
+static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm32181" },
+ { }
+};
+
+static struct i2c_driver cm32181_driver = {
+ .driver = {
+ .name = "cm32181",
+ .of_match_table = of_match_ptr(cm32181_of_match),
+ .owner = THIS_MODULE,
+ },
+ .id_table = cm32181_id,
+ .probe = cm32181_probe,
+};
+
+module_i2c_driver(cm32181_driver);
+
+MODULE_AUTHOR("Kevin Tsai <ktsai@capellamicro.com>");
+MODULE_DESCRIPTION("CM32181 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
new file mode 100644
index 00000000000000..39fc67e8213847
--- /dev/null
+++ b/drivers/iio/light/cm36651.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/* Slave address 0x19 for PS of 7 bit addressing protocol for I2C */
+#define CM36651_I2C_ADDR_PS 0x19
+/* Alert Response Address */
+#define CM36651_ARA 0x0C
+
+/* Ambient light sensor */
+#define CM36651_CS_CONF1 0x00
+#define CM36651_CS_CONF2 0x01
+#define CM36651_ALS_WH_M 0x02
+#define CM36651_ALS_WH_L 0x03
+#define CM36651_ALS_WL_M 0x04
+#define CM36651_ALS_WL_L 0x05
+#define CM36651_CS_CONF3 0x06
+#define CM36651_CS_CONF_REG_NUM 0x02
+
+/* Proximity sensor */
+#define CM36651_PS_CONF1 0x00
+#define CM36651_PS_THD 0x01
+#define CM36651_PS_CANC 0x02
+#define CM36651_PS_CONF2 0x03
+#define CM36651_PS_REG_NUM 0x04
+
+/* CS_CONF1 command code */
+#define CM36651_ALS_ENABLE 0x00
+#define CM36651_ALS_DISABLE 0x01
+#define CM36651_ALS_INT_EN 0x02
+#define CM36651_ALS_THRES 0x04
+
+/* CS_CONF2 command code */
+#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
+
+/* CS_CONF3 channel integration time */
+#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
+
+/* PS_CONF1 command code */
+#define CM36651_PS_ENABLE 0x00
+#define CM36651_PS_DISABLE 0x01
+#define CM36651_PS_INT_EN 0x02
+#define CM36651_PS_PERS2 0x04
+#define CM36651_PS_PERS3 0x08
+#define CM36651_PS_PERS4 0x0C
+
+/* PS_CONF1 command code: integration time */
+#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
+#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
+#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
+#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
+
+/* PS_CONF1 command code: duty ratio */
+#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
+#define CM36651_PS_DR2 0x40 /* Duty ratio 1/160 */
+#define CM36651_PS_DR3 0x80 /* Duty ratio 1/320 */
+#define CM36651_PS_DR4 0xC0 /* Duty ratio 1/640 */
+
+/* PS_THD command code */
+#define CM36651_PS_INITIAL_THD 0x05
+
+/* PS_CANC command code */
+#define CM36651_PS_CANC_DEFAULT 0x00
+
+/* PS_CONF2 command code */
+#define CM36651_PS_HYS1 0x00
+#define CM36651_PS_HYS2 0x01
+#define CM36651_PS_SMART_PERS_EN 0x02
+#define CM36651_PS_DIR_INT 0x04
+#define CM36651_PS_MS 0x10
+
+#define CM36651_CS_COLOR_NUM 4
+
+#define CM36651_CLOSE_PROXIMITY 0x32
+#define CM36651_FAR_PROXIMITY 0x33
+
+#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
+#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
+
+enum cm36651_operation_mode {
+ CM36651_LIGHT_EN,
+ CM36651_PROXIMITY_EN,
+ CM36651_PROXIMITY_EV_EN,
+};
+
+enum cm36651_light_channel_idx {
+ CM36651_LIGHT_CHANNEL_IDX_RED,
+ CM36651_LIGHT_CHANNEL_IDX_GREEN,
+ CM36651_LIGHT_CHANNEL_IDX_BLUE,
+ CM36651_LIGHT_CHANNEL_IDX_CLEAR,
+};
+
+enum cm36651_command {
+ CM36651_CMD_READ_RAW_LIGHT,
+ CM36651_CMD_READ_RAW_PROXIMITY,
+ CM36651_CMD_PROX_EV_EN,
+ CM36651_CMD_PROX_EV_DIS,
+};
+
+static const u8 cm36651_cs_reg[CM36651_CS_CONF_REG_NUM] = {
+ CM36651_CS_CONF1,
+ CM36651_CS_CONF2,
+};
+
+static const u8 cm36651_ps_reg[CM36651_PS_REG_NUM] = {
+ CM36651_PS_CONF1,
+ CM36651_PS_THD,
+ CM36651_PS_CANC,
+ CM36651_PS_CONF2,
+};
+
+struct cm36651_data {
+ const struct cm36651_platform_data *pdata;
+ struct i2c_client *client;
+ struct i2c_client *ps_client;
+ struct i2c_client *ara_client;
+ struct mutex lock;
+ struct regulator *vled_reg;
+ unsigned long flags;
+ int cs_int_time[CM36651_CS_COLOR_NUM];
+ int ps_int_time;
+ u8 cs_ctrl_regs[CM36651_CS_CONF_REG_NUM];
+ u8 ps_ctrl_regs[CM36651_PS_REG_NUM];
+ u16 color[CM36651_CS_COLOR_NUM];
+};
+
+static int cm36651_setup_reg(struct cm36651_data *cm36651)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int i, ret;
+
+ /* CS initialization */
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF1] = CM36651_ALS_ENABLE |
+ CM36651_ALS_THRES;
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF2] = CM36651_CS_CONF2_DEFAULT_BIT;
+
+ for (i = 0; i < CM36651_CS_CONF_REG_NUM; i++) {
+ ret = i2c_smbus_write_byte_data(client, cm36651_cs_reg[i],
+ cm36651->cs_ctrl_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PS initialization */
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF1] = CM36651_PS_ENABLE |
+ CM36651_PS_IT2;
+ cm36651->ps_ctrl_regs[CM36651_PS_THD] = CM36651_PS_INITIAL_THD;
+ cm36651->ps_ctrl_regs[CM36651_PS_CANC] = CM36651_PS_CANC_DEFAULT;
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF2] = CM36651_PS_HYS2 |
+ CM36651_PS_DIR_INT | CM36651_PS_SMART_PERS_EN;
+
+ for (i = 0; i < CM36651_PS_REG_NUM; i++) {
+ ret = i2c_smbus_write_byte_data(ps_client, cm36651_ps_reg[i],
+ cm36651->ps_ctrl_regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Set shutdown mode */
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ CM36651_ALS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int cm36651_read_output(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct i2c_client *client = cm36651->client;
+ int ret = -EINVAL;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = i2c_smbus_read_word_data(client, chan->address);
+ if (*val < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ CM36651_ALS_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_PROXIMITY:
+ *val = i2c_smbus_read_byte(cm36651->ps_client);
+ if (*val < 0)
+ return ret;
+
+ if (!test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static irqreturn_t cm36651_irq_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ev_dir, ret;
+ u64 ev_code;
+
+ /*
+ * The PS INT pin is an active low signal that PS INT move logic low
+ * when the object is detect. Once the MCU host received the PS INT
+ * "LOW" signal, the Host needs to read the data at Alert Response
+ * Address(ARA) to clear the PS INT signal. After clearing the PS
+ * INT pin, the PS INT signal toggles from low to high.
+ */
+ ret = i2c_smbus_read_byte(cm36651->ara_client);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: Data read failed: %d\n", __func__, ret);
+ return IRQ_HANDLED;
+ }
+ switch (ret) {
+ case CM36651_CLOSE_PROXIMITY:
+ ev_dir = IIO_EV_DIR_RISING;
+ break;
+ case CM36651_FAR_PROXIMITY:
+ ev_dir = IIO_EV_DIR_FALLING;
+ break;
+ default:
+ dev_err(&client->dev,
+ "%s: Data read wrong: %d\n", __func__, ret);
+ return IRQ_HANDLED;
+ }
+
+ ev_code = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
+ CM36651_CMD_READ_RAW_PROXIMITY,
+ IIO_EV_TYPE_THRESH, ev_dir);
+
+ iio_push_event(indio_dev, ev_code, iio_get_time_ns());
+
+ return IRQ_HANDLED;
+}
+
+static int cm36651_set_operation_mode(struct cm36651_data *cm36651, int cmd)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int ret = -EINVAL;
+
+ switch (cmd) {
+ case CM36651_CMD_READ_RAW_LIGHT:
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF1,
+ cm36651->cs_ctrl_regs[CM36651_CS_CONF1]);
+ break;
+ case CM36651_CMD_READ_RAW_PROXIMITY:
+ if (test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags))
+ return CM36651_PROXIMITY_EV_EN;
+
+ ret = i2c_smbus_write_byte_data(ps_client, CM36651_PS_CONF1,
+ cm36651->ps_ctrl_regs[CM36651_PS_CONF1]);
+ break;
+ case CM36651_CMD_PROX_EV_EN:
+ if (test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ dev_err(&client->dev,
+ "Already proximity event enable state\n");
+ return ret;
+ }
+ set_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+
+ ret = i2c_smbus_write_byte_data(ps_client,
+ cm36651_ps_reg[CM36651_PS_CONF1],
+ CM36651_PS_INT_EN | CM36651_PS_PERS2 | CM36651_PS_IT2);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Proximity enable event failed\n");
+ return ret;
+ }
+ break;
+ case CM36651_CMD_PROX_EV_DIS:
+ if (!test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags)) {
+ dev_err(&client->dev,
+ "Already proximity event disable state\n");
+ return ret;
+ }
+ clear_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+ ret = i2c_smbus_write_byte_data(ps_client,
+ CM36651_PS_CONF1, CM36651_PS_DISABLE);
+ break;
+ }
+
+ if (ret < 0)
+ dev_err(&client->dev, "Write register failed\n");
+
+ return ret;
+}
+
+static int cm36651_read_channel(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct i2c_client *client = cm36651->client;
+ int cmd, ret;
+
+ if (chan->type == IIO_LIGHT)
+ cmd = CM36651_CMD_READ_RAW_LIGHT;
+ else if (chan->type == IIO_PROXIMITY)
+ cmd = CM36651_CMD_READ_RAW_PROXIMITY;
+ else
+ return -EINVAL;
+
+ ret = cm36651_set_operation_mode(cm36651, cmd);
+ if (ret < 0) {
+ dev_err(&client->dev, "CM36651 set operation mode failed\n");
+ return ret;
+ }
+ /* Delay for work after enable operation */
+ msleep(50);
+ ret = cm36651_read_output(cm36651, chan, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "CM36651 read output failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cm36651_read_int_time(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int *val2)
+{
+ switch (chan->type) {
+ case IIO_LIGHT:
+ if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
+ *val2 = 80000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
+ *val2 = 160000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
+ *val2 = 320000;
+ else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
+ *val2 = 640000;
+ else
+ return -EINVAL;
+ break;
+ case IIO_PROXIMITY:
+ if (cm36651->ps_int_time == CM36651_PS_IT1)
+ *val2 = 320;
+ else if (cm36651->ps_int_time == CM36651_PS_IT2)
+ *val2 = 420;
+ else if (cm36651->ps_int_time == CM36651_PS_IT3)
+ *val2 = 520;
+ else if (cm36651->ps_int_time == CM36651_PS_IT4)
+ *val2 = 640;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int cm36651_write_int_time(struct cm36651_data *cm36651,
+ struct iio_chan_spec const *chan, int val)
+{
+ struct i2c_client *client = cm36651->client;
+ struct i2c_client *ps_client = cm36651->ps_client;
+ int int_time, ret;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ if (val == 80000)
+ int_time = CM36651_CS_IT1;
+ else if (val == 160000)
+ int_time = CM36651_CS_IT2;
+ else if (val == 320000)
+ int_time = CM36651_CS_IT3;
+ else if (val == 640000)
+ int_time = CM36651_CS_IT4;
+ else
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(client, CM36651_CS_CONF3,
+ int_time >> 2 * (chan->address));
+ if (ret < 0) {
+ dev_err(&client->dev, "CS integration time write failed\n");
+ return ret;
+ }
+ cm36651->cs_int_time[chan->address] = int_time;
+ break;
+ case IIO_PROXIMITY:
+ if (val == 320)
+ int_time = CM36651_PS_IT1;
+ else if (val == 420)
+ int_time = CM36651_PS_IT2;
+ else if (val == 520)
+ int_time = CM36651_PS_IT3;
+ else if (val == 640)
+ int_time = CM36651_PS_IT4;
+ else
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(ps_client,
+ CM36651_PS_CONF1, int_time);
+ if (ret < 0) {
+ dev_err(&client->dev, "PS integration time write failed\n");
+ return ret;
+ }
+ cm36651->ps_int_time = int_time;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cm36651_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&cm36651->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = cm36651_read_channel(cm36651, chan, val);
+ break;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ ret = cm36651_read_int_time(cm36651, chan, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&cm36651->lock);
+
+ return ret;
+}
+
+static int cm36651_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ret = -EINVAL;
+
+ if (mask == IIO_CHAN_INFO_INT_TIME) {
+ ret = cm36651_write_int_time(cm36651, chan, val2);
+ if (ret < 0)
+ dev_err(&client->dev, "Integration time write failed\n");
+ }
+
+ return ret;
+}
+
+static int cm36651_read_prox_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+
+ *val = cm36651->ps_ctrl_regs[CM36651_PS_THD];
+
+ return 0;
+}
+
+static int cm36651_write_prox_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ struct i2c_client *client = cm36651->client;
+ int ret;
+
+ if (val < 3 || val > 255)
+ return -EINVAL;
+
+ cm36651->ps_ctrl_regs[CM36651_PS_THD] = val;
+ ret = i2c_smbus_write_byte_data(cm36651->ps_client, CM36651_PS_THD,
+ cm36651->ps_ctrl_regs[CM36651_PS_THD]);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "PS threshold write failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int cmd, ret = -EINVAL;
+
+ mutex_lock(&cm36651->lock);
+
+ cmd = state ? CM36651_CMD_PROX_EV_EN : CM36651_CMD_PROX_EV_DIS;
+ ret = cm36651_set_operation_mode(cm36651, cmd);
+
+ mutex_unlock(&cm36651->lock);
+
+ return ret;
+}
+
+static int cm36651_read_prox_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+ int event_en;
+
+ mutex_lock(&cm36651->lock);
+
+ event_en = test_bit(CM36651_PROXIMITY_EV_EN, &cm36651->flags);
+
+ mutex_unlock(&cm36651->lock);
+
+ return event_en;
+}
+
+#define CM36651_LIGHT_CHANNEL(_color, _idx) { \
+ .type = IIO_LIGHT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .address = _idx, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+} \
+
+static const struct iio_event_spec cm36651_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
+static const struct iio_chan_spec cm36651_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .event_spec = cm36651_event_spec,
+ .num_event_specs = ARRAY_SIZE(cm36651_event_spec),
+ },
+ CM36651_LIGHT_CHANNEL(RED, CM36651_LIGHT_CHANNEL_IDX_RED),
+ CM36651_LIGHT_CHANNEL(GREEN, CM36651_LIGHT_CHANNEL_IDX_GREEN),
+ CM36651_LIGHT_CHANNEL(BLUE, CM36651_LIGHT_CHANNEL_IDX_BLUE),
+ CM36651_LIGHT_CHANNEL(CLEAR, CM36651_LIGHT_CHANNEL_IDX_CLEAR),
+};
+
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ CM36651_CS_INT_TIME_AVAIL);
+static IIO_CONST_ATTR(in_proximity_integration_time_available,
+ CM36651_PS_INT_TIME_AVAIL);
+
+static struct attribute *cm36651_attributes[] = {
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_proximity_integration_time_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm36651_attribute_group = {
+ .attrs = cm36651_attributes
+};
+
+static const struct iio_info cm36651_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &cm36651_read_raw,
+ .write_raw = &cm36651_write_raw,
+ .read_event_value = &cm36651_read_prox_thresh,
+ .write_event_value = &cm36651_write_prox_thresh,
+ .read_event_config = &cm36651_read_prox_event_config,
+ .write_event_config = &cm36651_write_prox_event_config,
+ .attrs = &cm36651_attribute_group,
+};
+
+static int cm36651_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm36651_data *cm36651;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm36651));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ cm36651 = iio_priv(indio_dev);
+
+ cm36651->vled_reg = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(cm36651->vled_reg)) {
+ dev_err(&client->dev, "get regulator vled failed\n");
+ return PTR_ERR(cm36651->vled_reg);
+ }
+
+ ret = regulator_enable(cm36651->vled_reg);
+ if (ret) {
+ dev_err(&client->dev, "enable regulator vled failed\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ cm36651->client = client;
+ cm36651->ps_client = i2c_new_dummy(client->adapter,
+ CM36651_I2C_ADDR_PS);
+ if (!cm36651->ps_client) {
+ dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+ ret = -ENODEV;
+ goto error_disable_reg;
+ }
+
+ cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
+ if (!cm36651->ara_client) {
+ dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+ ret = -ENODEV;
+ goto error_i2c_unregister_ps;
+ }
+
+ mutex_init(&cm36651->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = cm36651_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cm36651_channels);
+ indio_dev->info = &cm36651_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = cm36651_setup_reg(cm36651);
+ if (ret) {
+ dev_err(&client->dev, "%s: register setup failed\n", __func__);
+ goto error_i2c_unregister_ara;
+ }
+
+ ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cm36651", indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: request irq failed\n", __func__);
+ goto error_i2c_unregister_ara;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "%s: regist device failed\n", __func__);
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ free_irq(client->irq, indio_dev);
+error_i2c_unregister_ara:
+ i2c_unregister_device(cm36651->ara_client);
+error_i2c_unregister_ps:
+ i2c_unregister_device(cm36651->ps_client);
+error_disable_reg:
+ regulator_disable(cm36651->vled_reg);
+ return ret;
+}
+
+static int cm36651_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm36651_data *cm36651 = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(cm36651->vled_reg);
+ free_irq(client->irq, indio_dev);
+ i2c_unregister_device(cm36651->ps_client);
+ i2c_unregister_device(cm36651->ara_client);
+
+ return 0;
+}
+
+static const struct i2c_device_id cm36651_id[] = {
+ { "cm36651", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, cm36651_id);
+
+static const struct of_device_id cm36651_of_match[] = {
+ { .compatible = "capella,cm36651" },
+ { }
+};
+
+static struct i2c_driver cm36651_driver = {
+ .driver = {
+ .name = "cm36651",
+ .of_match_table = cm36651_of_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = cm36651_probe,
+ .remove = cm36651_remove,
+ .id_table = cm36651_id,
+};
+
+module_i2c_driver(cm36651_driver);
+
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_DESCRIPTION("CM36651 proximity/ambient light sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
new file mode 100644
index 00000000000000..221ed16de1f7ba
--- /dev/null
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -0,0 +1,1654 @@
+/*
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * IIO features supported by the driver:
+ *
+ * Read-only raw channels:
+ * - illuminance_clear [lux]
+ * - illuminance_ir
+ * - proximity
+ *
+ * Triggered buffer:
+ * - illuminance_clear
+ * - illuminance_ir
+ * - proximity
+ *
+ * Events:
+ * - illuminance_clear (rising and falling)
+ * - proximity (rising and falling)
+ * - both falling and rising thresholds for the proximity events
+ * must be set to the values greater than 0.
+ *
+ * The driver supports triggered buffers for all the three
+ * channels as well as high and low threshold events for the
+ * illuminance_clear and proxmimity channels. Triggers
+ * can be enabled simultaneously with both illuminance_clear
+ * events. Proximity events cannot be enabled simultaneously
+ * with any triggers or illuminance events. Enabling/disabling
+ * one of the proximity events automatically enables/disables
+ * the other one.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define GP2A_I2C_NAME "gp2ap020a00f"
+
+/* Registers */
+#define GP2AP020A00F_OP_REG 0x00 /* Basic operations */
+#define GP2AP020A00F_ALS_REG 0x01 /* ALS related settings */
+#define GP2AP020A00F_PS_REG 0x02 /* PS related settings */
+#define GP2AP020A00F_LED_REG 0x03 /* LED reg */
+#define GP2AP020A00F_TL_L_REG 0x04 /* ALS: Threshold low LSB */
+#define GP2AP020A00F_TL_H_REG 0x05 /* ALS: Threshold low MSB */
+#define GP2AP020A00F_TH_L_REG 0x06 /* ALS: Threshold high LSB */
+#define GP2AP020A00F_TH_H_REG 0x07 /* ALS: Threshold high MSB */
+#define GP2AP020A00F_PL_L_REG 0x08 /* PS: Threshold low LSB */
+#define GP2AP020A00F_PL_H_REG 0x09 /* PS: Threshold low MSB */
+#define GP2AP020A00F_PH_L_REG 0x0a /* PS: Threshold high LSB */
+#define GP2AP020A00F_PH_H_REG 0x0b /* PS: Threshold high MSB */
+#define GP2AP020A00F_D0_L_REG 0x0c /* ALS result: Clear/Illuminance LSB */
+#define GP2AP020A00F_D0_H_REG 0x0d /* ALS result: Clear/Illuminance MSB */
+#define GP2AP020A00F_D1_L_REG 0x0e /* ALS result: IR LSB */
+#define GP2AP020A00F_D1_H_REG 0x0f /* ALS result: IR LSB */
+#define GP2AP020A00F_D2_L_REG 0x10 /* PS result LSB */
+#define GP2AP020A00F_D2_H_REG 0x11 /* PS result MSB */
+#define GP2AP020A00F_NUM_REGS 0x12 /* Number of registers */
+
+/* OP_REG bits */
+#define GP2AP020A00F_OP3_MASK 0x80 /* Software shutdown */
+#define GP2AP020A00F_OP3_SHUTDOWN 0x00
+#define GP2AP020A00F_OP3_OPERATION 0x80
+#define GP2AP020A00F_OP2_MASK 0x40 /* Auto shutdown/Continuous mode */
+#define GP2AP020A00F_OP2_AUTO_SHUTDOWN 0x00
+#define GP2AP020A00F_OP2_CONT_OPERATION 0x40
+#define GP2AP020A00F_OP_MASK 0x30 /* Operating mode selection */
+#define GP2AP020A00F_OP_ALS_AND_PS 0x00
+#define GP2AP020A00F_OP_ALS 0x10
+#define GP2AP020A00F_OP_PS 0x20
+#define GP2AP020A00F_OP_DEBUG 0x30
+#define GP2AP020A00F_PROX_MASK 0x08 /* PS: detection/non-detection */
+#define GP2AP020A00F_PROX_NON_DETECT 0x00
+#define GP2AP020A00F_PROX_DETECT 0x08
+#define GP2AP020A00F_FLAG_P 0x04 /* PS: interrupt result */
+#define GP2AP020A00F_FLAG_A 0x02 /* ALS: interrupt result */
+#define GP2AP020A00F_TYPE_MASK 0x01 /* Output data type selection */
+#define GP2AP020A00F_TYPE_MANUAL_CALC 0x00
+#define GP2AP020A00F_TYPE_AUTO_CALC 0x01
+
+/* ALS_REG bits */
+#define GP2AP020A00F_PRST_MASK 0xc0 /* Number of measurement cycles */
+#define GP2AP020A00F_PRST_ONCE 0x00
+#define GP2AP020A00F_PRST_4_CYCLES 0x40
+#define GP2AP020A00F_PRST_8_CYCLES 0x80
+#define GP2AP020A00F_PRST_16_CYCLES 0xc0
+#define GP2AP020A00F_RES_A_MASK 0x38 /* ALS: Resolution */
+#define GP2AP020A00F_RES_A_800ms 0x00
+#define GP2AP020A00F_RES_A_400ms 0x08
+#define GP2AP020A00F_RES_A_200ms 0x10
+#define GP2AP020A00F_RES_A_100ms 0x18
+#define GP2AP020A00F_RES_A_25ms 0x20
+#define GP2AP020A00F_RES_A_6_25ms 0x28
+#define GP2AP020A00F_RES_A_1_56ms 0x30
+#define GP2AP020A00F_RES_A_0_39ms 0x38
+#define GP2AP020A00F_RANGE_A_MASK 0x07 /* ALS: Max measurable range */
+#define GP2AP020A00F_RANGE_A_x1 0x00
+#define GP2AP020A00F_RANGE_A_x2 0x01
+#define GP2AP020A00F_RANGE_A_x4 0x02
+#define GP2AP020A00F_RANGE_A_x8 0x03
+#define GP2AP020A00F_RANGE_A_x16 0x04
+#define GP2AP020A00F_RANGE_A_x32 0x05
+#define GP2AP020A00F_RANGE_A_x64 0x06
+#define GP2AP020A00F_RANGE_A_x128 0x07
+
+/* PS_REG bits */
+#define GP2AP020A00F_ALC_MASK 0x80 /* Auto light cancel */
+#define GP2AP020A00F_ALC_ON 0x80
+#define GP2AP020A00F_ALC_OFF 0x00
+#define GP2AP020A00F_INTTYPE_MASK 0x40 /* Interrupt type setting */
+#define GP2AP020A00F_INTTYPE_LEVEL 0x00
+#define GP2AP020A00F_INTTYPE_PULSE 0x40
+#define GP2AP020A00F_RES_P_MASK 0x38 /* PS: Resolution */
+#define GP2AP020A00F_RES_P_800ms_x2 0x00
+#define GP2AP020A00F_RES_P_400ms_x2 0x08
+#define GP2AP020A00F_RES_P_200ms_x2 0x10
+#define GP2AP020A00F_RES_P_100ms_x2 0x18
+#define GP2AP020A00F_RES_P_25ms_x2 0x20
+#define GP2AP020A00F_RES_P_6_25ms_x2 0x28
+#define GP2AP020A00F_RES_P_1_56ms_x2 0x30
+#define GP2AP020A00F_RES_P_0_39ms_x2 0x38
+#define GP2AP020A00F_RANGE_P_MASK 0x07 /* PS: Max measurable range */
+#define GP2AP020A00F_RANGE_P_x1 0x00
+#define GP2AP020A00F_RANGE_P_x2 0x01
+#define GP2AP020A00F_RANGE_P_x4 0x02
+#define GP2AP020A00F_RANGE_P_x8 0x03
+#define GP2AP020A00F_RANGE_P_x16 0x04
+#define GP2AP020A00F_RANGE_P_x32 0x05
+#define GP2AP020A00F_RANGE_P_x64 0x06
+#define GP2AP020A00F_RANGE_P_x128 0x07
+
+/* LED reg bits */
+#define GP2AP020A00F_INTVAL_MASK 0xc0 /* Intermittent operating */
+#define GP2AP020A00F_INTVAL_0 0x00
+#define GP2AP020A00F_INTVAL_4 0x40
+#define GP2AP020A00F_INTVAL_8 0x80
+#define GP2AP020A00F_INTVAL_16 0xc0
+#define GP2AP020A00F_IS_MASK 0x30 /* ILED drive peak current */
+#define GP2AP020A00F_IS_13_8mA 0x00
+#define GP2AP020A00F_IS_27_5mA 0x10
+#define GP2AP020A00F_IS_55mA 0x20
+#define GP2AP020A00F_IS_110mA 0x30
+#define GP2AP020A00F_PIN_MASK 0x0c /* INT terminal setting */
+#define GP2AP020A00F_PIN_ALS_OR_PS 0x00
+#define GP2AP020A00F_PIN_ALS 0x04
+#define GP2AP020A00F_PIN_PS 0x08
+#define GP2AP020A00F_PIN_PS_DETECT 0x0c
+#define GP2AP020A00F_FREQ_MASK 0x02 /* LED modulation frequency */
+#define GP2AP020A00F_FREQ_327_5kHz 0x00
+#define GP2AP020A00F_FREQ_81_8kHz 0x02
+#define GP2AP020A00F_RST 0x01 /* Software reset */
+
+#define GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR 0
+#define GP2AP020A00F_SCAN_MODE_LIGHT_IR 1
+#define GP2AP020A00F_SCAN_MODE_PROXIMITY 2
+#define GP2AP020A00F_CHAN_TIMESTAMP 3
+
+#define GP2AP020A00F_DATA_READY_TIMEOUT msecs_to_jiffies(1000)
+#define GP2AP020A00F_DATA_REG(chan) (GP2AP020A00F_D0_L_REG + \
+ (chan) * 2)
+#define GP2AP020A00F_THRESH_REG(th_val_id) (GP2AP020A00F_TL_L_REG + \
+ (th_val_id) * 2)
+#define GP2AP020A00F_THRESH_VAL_ID(reg_addr) ((reg_addr - 4) / 2)
+
+#define GP2AP020A00F_SUBTRACT_MODE 0
+#define GP2AP020A00F_ADD_MODE 1
+
+#define GP2AP020A00F_MAX_CHANNELS 3
+
+enum gp2ap020a00f_opmode {
+ GP2AP020A00F_OPMODE_READ_RAW_CLEAR,
+ GP2AP020A00F_OPMODE_READ_RAW_IR,
+ GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_OPMODE_ALS_AND_PS,
+ GP2AP020A00F_OPMODE_PROX_DETECT,
+ GP2AP020A00F_OPMODE_SHUTDOWN,
+ GP2AP020A00F_NUM_OPMODES,
+};
+
+enum gp2ap020a00f_cmd {
+ GP2AP020A00F_CMD_READ_RAW_CLEAR,
+ GP2AP020A00F_CMD_READ_RAW_IR,
+ GP2AP020A00F_CMD_READ_RAW_PROXIMITY,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_EN,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS,
+ GP2AP020A00F_CMD_TRIGGER_IR_EN,
+ GP2AP020A00F_CMD_TRIGGER_IR_DIS,
+ GP2AP020A00F_CMD_TRIGGER_PROX_EN,
+ GP2AP020A00F_CMD_TRIGGER_PROX_DIS,
+ GP2AP020A00F_CMD_ALS_HIGH_EV_EN,
+ GP2AP020A00F_CMD_ALS_HIGH_EV_DIS,
+ GP2AP020A00F_CMD_ALS_LOW_EV_EN,
+ GP2AP020A00F_CMD_ALS_LOW_EV_DIS,
+ GP2AP020A00F_CMD_PROX_HIGH_EV_EN,
+ GP2AP020A00F_CMD_PROX_HIGH_EV_DIS,
+ GP2AP020A00F_CMD_PROX_LOW_EV_EN,
+ GP2AP020A00F_CMD_PROX_LOW_EV_DIS,
+};
+
+enum gp2ap020a00f_flags {
+ GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER,
+ GP2AP020A00F_FLAG_ALS_IR_TRIGGER,
+ GP2AP020A00F_FLAG_PROX_TRIGGER,
+ GP2AP020A00F_FLAG_PROX_RISING_EV,
+ GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ GP2AP020A00F_FLAG_ALS_RISING_EV,
+ GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ GP2AP020A00F_FLAG_LUX_MODE_HI,
+ GP2AP020A00F_FLAG_DATA_READY,
+};
+
+enum gp2ap020a00f_thresh_val_id {
+ GP2AP020A00F_THRESH_TL,
+ GP2AP020A00F_THRESH_TH,
+ GP2AP020A00F_THRESH_PL,
+ GP2AP020A00F_THRESH_PH,
+};
+
+struct gp2ap020a00f_data {
+ const struct gp2ap020a00f_platform_data *pdata;
+ struct i2c_client *client;
+ struct mutex lock;
+ char *buffer;
+ struct regulator *vled_reg;
+ unsigned long flags;
+ enum gp2ap020a00f_opmode cur_opmode;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ unsigned int thresh_val[4];
+ u8 debug_reg_addr;
+ struct irq_work work;
+ wait_queue_head_t data_ready_queue;
+};
+
+static const u8 gp2ap020a00f_reg_init_tab[] = {
+ [GP2AP020A00F_OP_REG] = GP2AP020A00F_OP3_SHUTDOWN,
+ [GP2AP020A00F_ALS_REG] = GP2AP020A00F_RES_A_25ms |
+ GP2AP020A00F_RANGE_A_x8,
+ [GP2AP020A00F_PS_REG] = GP2AP020A00F_ALC_ON |
+ GP2AP020A00F_RES_P_1_56ms_x2 |
+ GP2AP020A00F_RANGE_P_x4,
+ [GP2AP020A00F_LED_REG] = GP2AP020A00F_INTVAL_0 |
+ GP2AP020A00F_IS_110mA |
+ GP2AP020A00F_FREQ_327_5kHz,
+ [GP2AP020A00F_TL_L_REG] = 0,
+ [GP2AP020A00F_TL_H_REG] = 0,
+ [GP2AP020A00F_TH_L_REG] = 0,
+ [GP2AP020A00F_TH_H_REG] = 0,
+ [GP2AP020A00F_PL_L_REG] = 0,
+ [GP2AP020A00F_PL_H_REG] = 0,
+ [GP2AP020A00F_PH_L_REG] = 0,
+ [GP2AP020A00F_PH_H_REG] = 0,
+};
+
+static bool gp2ap020a00f_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case GP2AP020A00F_OP_REG:
+ case GP2AP020A00F_D0_L_REG:
+ case GP2AP020A00F_D0_H_REG:
+ case GP2AP020A00F_D1_L_REG:
+ case GP2AP020A00F_D1_H_REG:
+ case GP2AP020A00F_D2_L_REG:
+ case GP2AP020A00F_D2_H_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config gp2ap020a00f_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = GP2AP020A00F_D2_H_REG,
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = gp2ap020a00f_is_volatile_reg,
+};
+
+static const struct gp2ap020a00f_mutable_config_regs {
+ u8 op_reg;
+ u8 als_reg;
+ u8 ps_reg;
+ u8 led_reg;
+} opmode_regs_settings[GP2AP020A00F_NUM_OPMODES] = {
+ [GP2AP020A00F_OPMODE_READ_RAW_CLEAR] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_READ_RAW_IR] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_PS
+ },
+ [GP2AP020A00F_OPMODE_PROX_DETECT] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_PULSE,
+ GP2AP020A00F_PIN_PS_DETECT
+ },
+ [GP2AP020A00F_OPMODE_ALS] = {
+ GP2AP020A00F_OP_ALS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_ONCE,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS
+ },
+ [GP2AP020A00F_OPMODE_PS] = {
+ GP2AP020A00F_OP_PS | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_MANUAL_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_PS
+ },
+ [GP2AP020A00F_OPMODE_ALS_AND_PS] = {
+ GP2AP020A00F_OP_ALS_AND_PS
+ | GP2AP020A00F_OP2_CONT_OPERATION
+ | GP2AP020A00F_OP3_OPERATION
+ | GP2AP020A00F_TYPE_AUTO_CALC,
+ GP2AP020A00F_PRST_4_CYCLES,
+ GP2AP020A00F_INTTYPE_LEVEL,
+ GP2AP020A00F_PIN_ALS_OR_PS
+ },
+ [GP2AP020A00F_OPMODE_SHUTDOWN] = { GP2AP020A00F_OP3_SHUTDOWN, },
+};
+
+static int gp2ap020a00f_set_operation_mode(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_opmode op)
+{
+ unsigned int op_reg_val;
+ int err;
+
+ if (op != GP2AP020A00F_OPMODE_SHUTDOWN) {
+ err = regmap_read(data->regmap, GP2AP020A00F_OP_REG,
+ &op_reg_val);
+ if (err < 0)
+ return err;
+ /*
+ * Shutdown the device if the operation being executed entails
+ * mode transition.
+ */
+ if ((opmode_regs_settings[op].op_reg & GP2AP020A00F_OP_MASK) !=
+ (op_reg_val & GP2AP020A00F_OP_MASK)) {
+ /* set shutdown mode */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG, GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_SHUTDOWN);
+ if (err < 0)
+ return err;
+ }
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_ALS_REG,
+ GP2AP020A00F_PRST_MASK, opmode_regs_settings[op]
+ .als_reg);
+ if (err < 0)
+ return err;
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_PS_REG,
+ GP2AP020A00F_INTTYPE_MASK, opmode_regs_settings[op]
+ .ps_reg);
+ if (err < 0)
+ return err;
+
+ err = regmap_update_bits(data->regmap, GP2AP020A00F_LED_REG,
+ GP2AP020A00F_PIN_MASK, opmode_regs_settings[op]
+ .led_reg);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set OP_REG and apply operation mode (power on / off) */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP_MASK | GP2AP020A00F_OP2_MASK |
+ GP2AP020A00F_OP3_MASK | GP2AP020A00F_TYPE_MASK,
+ opmode_regs_settings[op].op_reg);
+ if (err < 0)
+ return err;
+
+ data->cur_opmode = op;
+
+ return 0;
+}
+
+static bool gp2ap020a00f_als_enabled(struct gp2ap020a00f_data *data)
+{
+ return test_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+}
+
+static bool gp2ap020a00f_prox_detect_enabled(struct gp2ap020a00f_data *data)
+{
+ return test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags) ||
+ test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+}
+
+static int gp2ap020a00f_write_event_threshold(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_thresh_val_id th_val_id,
+ bool enable)
+{
+ __le16 thresh_buf = 0;
+ unsigned int thresh_reg_val;
+
+ if (!enable)
+ thresh_reg_val = 0;
+ else if (test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags) &&
+ th_val_id != GP2AP020A00F_THRESH_PL &&
+ th_val_id != GP2AP020A00F_THRESH_PH)
+ /*
+ * For the high lux mode ALS threshold has to be scaled down
+ * to allow for proper comparison with the output value.
+ */
+ thresh_reg_val = data->thresh_val[th_val_id] / 16;
+ else
+ thresh_reg_val = data->thresh_val[th_val_id] > 16000 ?
+ 16000 :
+ data->thresh_val[th_val_id];
+
+ thresh_buf = cpu_to_le16(thresh_reg_val);
+
+ return regmap_bulk_write(data->regmap,
+ GP2AP020A00F_THRESH_REG(th_val_id),
+ (u8 *)&thresh_buf, 2);
+}
+
+static int gp2ap020a00f_alter_opmode(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_opmode diff_mode, int add_sub)
+{
+ enum gp2ap020a00f_opmode new_mode;
+
+ if (diff_mode != GP2AP020A00F_OPMODE_ALS &&
+ diff_mode != GP2AP020A00F_OPMODE_PS)
+ return -EINVAL;
+
+ if (add_sub == GP2AP020A00F_ADD_MODE) {
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_SHUTDOWN)
+ new_mode = diff_mode;
+ else
+ new_mode = GP2AP020A00F_OPMODE_ALS_AND_PS;
+ } else {
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_ALS_AND_PS)
+ new_mode = (diff_mode == GP2AP020A00F_OPMODE_ALS) ?
+ GP2AP020A00F_OPMODE_PS :
+ GP2AP020A00F_OPMODE_ALS;
+ else
+ new_mode = GP2AP020A00F_OPMODE_SHUTDOWN;
+ }
+
+ return gp2ap020a00f_set_operation_mode(data, new_mode);
+}
+
+static int gp2ap020a00f_exec_cmd(struct gp2ap020a00f_data *data,
+ enum gp2ap020a00f_cmd cmd)
+{
+ int err = 0;
+
+ switch (cmd) {
+ case GP2AP020A00F_CMD_READ_RAW_CLEAR:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_CLEAR);
+ break;
+ case GP2AP020A00F_CMD_READ_RAW_IR:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_IR);
+ break;
+ case GP2AP020A00F_CMD_READ_RAW_PROXIMITY:
+ if (data->cur_opmode != GP2AP020A00F_OPMODE_SHUTDOWN)
+ return -EBUSY;
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_CLEAR_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data))
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS:
+ clear_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &data->flags);
+ if (gp2ap020a00f_als_enabled(data))
+ break;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_IR_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data))
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_IR_DIS:
+ clear_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &data->flags);
+ if (gp2ap020a00f_als_enabled(data))
+ break;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_PROX_EN:
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_ADD_MODE);
+ set_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &data->flags);
+ break;
+ case GP2AP020A00F_CMD_TRIGGER_PROX_DIS:
+ clear_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &data->flags);
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_PS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ break;
+ case GP2AP020A00F_CMD_ALS_HIGH_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags))
+ return 0;
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, true);
+ break;
+ case GP2AP020A00F_CMD_ALS_HIGH_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags);
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ if (err < 0)
+ return err;
+ }
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, false);
+ break;
+ case GP2AP020A00F_CMD_ALS_LOW_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags))
+ return 0;
+ if (data->cur_opmode == GP2AP020A00F_OPMODE_PROX_DETECT)
+ return -EBUSY;
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_ADD_MODE);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, true);
+ break;
+ case GP2AP020A00F_CMD_ALS_LOW_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags);
+ if (!gp2ap020a00f_als_enabled(data)) {
+ err = gp2ap020a00f_alter_opmode(data,
+ GP2AP020A00F_OPMODE_ALS,
+ GP2AP020A00F_SUBTRACT_MODE);
+ if (err < 0)
+ return err;
+ }
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, false);
+ break;
+ case GP2AP020A00F_CMD_PROX_HIGH_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags))
+ return 0;
+ if (gp2ap020a00f_als_enabled(data) ||
+ data->cur_opmode == GP2AP020A00F_OPMODE_PS)
+ return -EBUSY;
+ if (!gp2ap020a00f_prox_detect_enabled(data)) {
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_PROX_DETECT);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PH, true);
+ break;
+ case GP2AP020A00F_CMD_PROX_HIGH_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_PROX_RISING_EV, &data->flags);
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ return err;
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PH, false);
+ break;
+ case GP2AP020A00F_CMD_PROX_LOW_EV_EN:
+ if (test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags))
+ return 0;
+ if (gp2ap020a00f_als_enabled(data) ||
+ data->cur_opmode == GP2AP020A00F_OPMODE_PS)
+ return -EBUSY;
+ if (!gp2ap020a00f_prox_detect_enabled(data)) {
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_PROX_DETECT);
+ if (err < 0)
+ return err;
+ }
+ set_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PL, true);
+ break;
+ case GP2AP020A00F_CMD_PROX_LOW_EV_DIS:
+ if (!test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags))
+ return 0;
+ clear_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV, &data->flags);
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ return err;
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_PL, false);
+ break;
+ }
+
+ return err;
+}
+
+static int wait_conversion_complete_irq(struct gp2ap020a00f_data *data)
+{
+ int ret;
+
+ ret = wait_event_timeout(data->data_ready_queue,
+ test_bit(GP2AP020A00F_FLAG_DATA_READY,
+ &data->flags),
+ GP2AP020A00F_DATA_READY_TIMEOUT);
+ clear_bit(GP2AP020A00F_FLAG_DATA_READY, &data->flags);
+
+ return ret > 0 ? 0 : -ETIME;
+}
+
+static int gp2ap020a00f_read_output(struct gp2ap020a00f_data *data,
+ unsigned int output_reg, int *val)
+{
+ u8 reg_buf[2];
+ int err;
+
+ err = wait_conversion_complete_irq(data);
+ if (err < 0)
+ dev_dbg(&data->client->dev, "data ready timeout\n");
+
+ err = regmap_bulk_read(data->regmap, output_reg, reg_buf, 2);
+ if (err < 0)
+ return err;
+
+ *val = le16_to_cpup((__le16 *)reg_buf);
+
+ return err;
+}
+
+static bool gp2ap020a00f_adjust_lux_mode(struct gp2ap020a00f_data *data,
+ int output_val)
+{
+ u8 new_range = 0xff;
+ int err;
+
+ if (!test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags)) {
+ if (output_val > 16000) {
+ set_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags);
+ new_range = GP2AP020A00F_RANGE_A_x128;
+ }
+ } else {
+ if (output_val < 1000) {
+ clear_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags);
+ new_range = GP2AP020A00F_RANGE_A_x8;
+ }
+ }
+
+ if (new_range != 0xff) {
+ /* Clear als threshold registers to avoid spurious
+ * events caused by lux mode transition.
+ */
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, false);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Clearing als threshold register failed.\n");
+ return false;
+ }
+
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, false);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Clearing als threshold register failed.\n");
+ return false;
+ }
+
+ /* Change lux mode */
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_SHUTDOWN);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Shutting down the device failed.\n");
+ return false;
+ }
+
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_ALS_REG,
+ GP2AP020A00F_RANGE_A_MASK,
+ new_range);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting device lux mode failed.\n");
+ return false;
+ }
+
+ err = regmap_update_bits(data->regmap,
+ GP2AP020A00F_OP_REG,
+ GP2AP020A00F_OP3_MASK,
+ GP2AP020A00F_OP3_OPERATION);
+
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Powering up the device failed.\n");
+ return false;
+ }
+
+ /* Adjust als threshold register values to the new lux mode */
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &data->flags)) {
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TH, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting als threshold value failed.\n");
+ return false;
+ }
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &data->flags)) {
+ err = gp2ap020a00f_write_event_threshold(data,
+ GP2AP020A00F_THRESH_TL, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "Adjusting als threshold value failed.\n");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static void gp2ap020a00f_output_to_lux(struct gp2ap020a00f_data *data,
+ int *output_val)
+{
+ if (test_bit(GP2AP020A00F_FLAG_LUX_MODE_HI, &data->flags))
+ *output_val *= 16;
+}
+
+static void gp2ap020a00f_iio_trigger_work(struct irq_work *work)
+{
+ struct gp2ap020a00f_data *data =
+ container_of(work, struct gp2ap020a00f_data, work);
+
+ iio_trigger_poll(data->trig);
+}
+
+static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ unsigned int op_reg_val;
+ int ret;
+
+ /* Read interrupt flags */
+ ret = regmap_read(priv->regmap, GP2AP020A00F_OP_REG, &op_reg_val);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (gp2ap020a00f_prox_detect_enabled(priv)) {
+ if (op_reg_val & GP2AP020A00F_PROX_DETECT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ } else {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ u8 op_reg_flags, d0_reg_buf[2];
+ unsigned int output_val, op_reg_val;
+ int thresh_val_id, ret;
+
+ /* Read interrupt flags */
+ ret = regmap_read(priv->regmap, GP2AP020A00F_OP_REG,
+ &op_reg_val);
+ if (ret < 0)
+ goto done;
+
+ op_reg_flags = op_reg_val & (GP2AP020A00F_FLAG_A | GP2AP020A00F_FLAG_P
+ | GP2AP020A00F_PROX_DETECT);
+
+ op_reg_val &= (~GP2AP020A00F_FLAG_A & ~GP2AP020A00F_FLAG_P
+ & ~GP2AP020A00F_PROX_DETECT);
+
+ /* Clear interrupt flags (if not in INTTYPE_PULSE mode) */
+ if (priv->cur_opmode != GP2AP020A00F_OPMODE_PROX_DETECT) {
+ ret = regmap_write(priv->regmap, GP2AP020A00F_OP_REG,
+ op_reg_val);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (op_reg_flags & GP2AP020A00F_FLAG_A) {
+ /* Check D0 register to assess if the lux mode
+ * transition is required.
+ */
+ ret = regmap_bulk_read(priv->regmap, GP2AP020A00F_D0_L_REG,
+ d0_reg_buf, 2);
+ if (ret < 0)
+ goto done;
+
+ output_val = le16_to_cpup((__le16 *)d0_reg_buf);
+
+ if (gp2ap020a00f_adjust_lux_mode(priv, output_val))
+ goto done;
+
+ gp2ap020a00f_output_to_lux(priv, &output_val);
+
+ /*
+ * We need to check output value to distinguish
+ * between high and low ambient light threshold event.
+ */
+ if (test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV, &priv->flags)) {
+ thresh_val_id =
+ GP2AP020A00F_THRESH_VAL_ID(GP2AP020A00F_TH_L_REG);
+ if (output_val > priv->thresh_val[thresh_val_id])
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(
+ IIO_LIGHT,
+ GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) {
+ thresh_val_id =
+ GP2AP020A00F_THRESH_VAL_ID(GP2AP020A00F_TL_L_REG);
+ if (output_val < priv->thresh_val[thresh_val_id])
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(
+ IIO_LIGHT,
+ GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+ }
+
+ if (priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_CLEAR ||
+ priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_IR ||
+ priv->cur_opmode == GP2AP020A00F_OPMODE_READ_RAW_PROXIMITY) {
+ set_bit(GP2AP020A00F_FLAG_DATA_READY, &priv->flags);
+ wake_up(&priv->data_ready_queue);
+ goto done;
+ }
+
+ if (test_bit(GP2AP020A00F_FLAG_ALS_CLEAR_TRIGGER, &priv->flags) ||
+ test_bit(GP2AP020A00F_FLAG_ALS_IR_TRIGGER, &priv->flags) ||
+ test_bit(GP2AP020A00F_FLAG_PROX_TRIGGER, &priv->flags))
+ /* This fires off the trigger. */
+ irq_work_queue(&priv->work);
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gp2ap020a00f_trigger_handler(int irq, void *data)
+{
+ struct iio_poll_func *pf = data;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct gp2ap020a00f_data *priv = iio_priv(indio_dev);
+ size_t d_size = 0;
+ __le32 light_lux;
+ int i, out_val, ret;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_bulk_read(priv->regmap,
+ GP2AP020A00F_DATA_REG(i),
+ &priv->buffer[d_size], 2);
+ if (ret < 0)
+ goto done;
+
+ if (i == GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR ||
+ i == GP2AP020A00F_SCAN_MODE_LIGHT_IR) {
+ out_val = le16_to_cpup((__le16 *)&priv->buffer[d_size]);
+ gp2ap020a00f_output_to_lux(priv, &out_val);
+ light_lux = cpu_to_le32(out_val);
+ memcpy(&priv->buffer[d_size], (u8 *)&light_lux, 4);
+ d_size += 4;
+ } else {
+ d_size += 2;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer,
+ pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static u8 gp2ap020a00f_get_thresh_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction event_dir)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (event_dir == IIO_EV_DIR_RISING)
+ return GP2AP020A00F_PH_L_REG;
+ else
+ return GP2AP020A00F_PL_L_REG;
+ case IIO_LIGHT:
+ if (event_dir == IIO_EV_DIR_RISING)
+ return GP2AP020A00F_TH_L_REG;
+ else
+ return GP2AP020A00F_TL_L_REG;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gp2ap020a00f_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ bool event_en = false;
+ u8 thresh_val_id;
+ u8 thresh_reg_l;
+ int err = 0;
+
+ mutex_lock(&data->lock);
+
+ thresh_reg_l = gp2ap020a00f_get_thresh_reg(chan, dir);
+ thresh_val_id = GP2AP020A00F_THRESH_VAL_ID(thresh_reg_l);
+
+ if (thresh_val_id > GP2AP020A00F_THRESH_PH) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+
+ switch (thresh_reg_l) {
+ case GP2AP020A00F_TH_L_REG:
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_TL_L_REG:
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_PH_L_REG:
+ if (val == 0) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV,
+ &data->flags);
+ break;
+ case GP2AP020A00F_PL_L_REG:
+ if (val == 0) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ &data->flags);
+ break;
+ }
+
+ data->thresh_val[thresh_val_id] = val;
+ err = gp2ap020a00f_write_event_threshold(data, thresh_val_id,
+ event_en);
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ u8 thresh_reg_l;
+ int err = IIO_VAL_INT;
+
+ mutex_lock(&data->lock);
+
+ thresh_reg_l = gp2ap020a00f_get_thresh_reg(chan, dir);
+
+ if (thresh_reg_l > GP2AP020A00F_PH_L_REG) {
+ err = -EINVAL;
+ goto error_unlock;
+ }
+
+ *val = data->thresh_val[GP2AP020A00F_THRESH_VAL_ID(thresh_reg_l)];
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_write_prox_event_config(struct iio_dev *indio_dev,
+ int state)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ enum gp2ap020a00f_cmd cmd_high_ev, cmd_low_ev;
+ int err;
+
+ cmd_high_ev = state ? GP2AP020A00F_CMD_PROX_HIGH_EV_EN :
+ GP2AP020A00F_CMD_PROX_HIGH_EV_DIS;
+ cmd_low_ev = state ? GP2AP020A00F_CMD_PROX_LOW_EV_EN :
+ GP2AP020A00F_CMD_PROX_LOW_EV_DIS;
+
+ /*
+ * In order to enable proximity detection feature in the device
+ * both high and low threshold registers have to be written
+ * with different values, greater than zero.
+ */
+ if (state) {
+ if (data->thresh_val[GP2AP020A00F_THRESH_PL] == 0)
+ return -EINVAL;
+
+ if (data->thresh_val[GP2AP020A00F_THRESH_PH] == 0)
+ return -EINVAL;
+ }
+
+ err = gp2ap020a00f_exec_cmd(data, cmd_high_ev);
+ if (err < 0)
+ return err;
+
+ err = gp2ap020a00f_exec_cmd(data, cmd_low_ev);
+ if (err < 0)
+ return err;
+
+ free_irq(data->client->irq, indio_dev);
+
+ if (state)
+ err = request_threaded_irq(data->client->irq, NULL,
+ &gp2ap020a00f_prox_sensing_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_prox_sensing",
+ indio_dev);
+ else {
+ err = request_threaded_irq(data->client->irq, NULL,
+ &gp2ap020a00f_thresh_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_thresh_event",
+ indio_dev);
+ }
+
+ return err;
+}
+
+static int gp2ap020a00f_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ enum gp2ap020a00f_cmd cmd;
+ int err;
+
+ mutex_lock(&data->lock);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ err = gp2ap020a00f_write_prox_event_config(indio_dev, state);
+ break;
+ case IIO_LIGHT:
+ if (dir == IIO_EV_DIR_RISING) {
+ cmd = state ? GP2AP020A00F_CMD_ALS_HIGH_EV_EN :
+ GP2AP020A00F_CMD_ALS_HIGH_EV_DIS;
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ } else {
+ cmd = state ? GP2AP020A00F_CMD_ALS_LOW_EV_EN :
+ GP2AP020A00F_CMD_ALS_LOW_EV_DIS;
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int event_en = 0;
+
+ mutex_lock(&data->lock);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_RISING_EV,
+ &data->flags);
+ else
+ event_en = test_bit(GP2AP020A00F_FLAG_PROX_FALLING_EV,
+ &data->flags);
+ break;
+ case IIO_LIGHT:
+ if (dir == IIO_EV_DIR_RISING)
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_RISING_EV,
+ &data->flags);
+ else
+ event_en = test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV,
+ &data->flags);
+ break;
+ default:
+ event_en = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return event_en;
+}
+
+static int gp2ap020a00f_read_channel(struct gp2ap020a00f_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ enum gp2ap020a00f_cmd cmd;
+ int err;
+
+ switch (chan->scan_index) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ cmd = GP2AP020A00F_CMD_READ_RAW_CLEAR;
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ cmd = GP2AP020A00F_CMD_READ_RAW_IR;
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ cmd = GP2AP020A00F_CMD_READ_RAW_PROXIMITY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = gp2ap020a00f_exec_cmd(data, cmd);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "gp2ap020a00f_exec_cmd failed\n");
+ goto error_ret;
+ }
+
+ err = gp2ap020a00f_read_output(data, chan->address, val);
+ if (err < 0)
+ dev_err(&data->client->dev,
+ "gp2ap020a00f_read_output failed\n");
+
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ dev_err(&data->client->dev,
+ "Failed to shut down the device.\n");
+
+ if (cmd == GP2AP020A00F_CMD_READ_RAW_CLEAR ||
+ cmd == GP2AP020A00F_CMD_READ_RAW_IR)
+ gp2ap020a00f_output_to_lux(data, val);
+
+error_ret:
+ return err;
+}
+
+static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int err = -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev)) {
+ err = -EBUSY;
+ goto error_unlock;
+ }
+
+ err = gp2ap020a00f_read_channel(data, chan, val);
+ break;
+ }
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err < 0 ? err : IIO_VAL_INT;
+}
+
+static const struct iio_event_spec gp2ap020a00f_event_spec_light[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec gp2ap020a00f_event_spec_prox[] = {
+ {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec gp2ap020a00f_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .shift = 0,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR,
+ .address = GP2AP020A00F_D0_L_REG,
+ .event_spec = gp2ap020a00f_event_spec_light,
+ .num_event_specs = ARRAY_SIZE(gp2ap020a00f_event_spec_light),
+ },
+ {
+ .type = IIO_LIGHT,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .shift = 0,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_LIGHT_IR,
+ .address = GP2AP020A00F_D1_L_REG,
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .modified = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .shift = 0,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .scan_index = GP2AP020A00F_SCAN_MODE_PROXIMITY,
+ .address = GP2AP020A00F_D2_L_REG,
+ .event_spec = gp2ap020a00f_event_spec_prox,
+ .num_event_specs = ARRAY_SIZE(gp2ap020a00f_event_spec_prox),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(GP2AP020A00F_CHAN_TIMESTAMP),
+};
+
+static const struct iio_info gp2ap020a00f_info = {
+ .read_raw = &gp2ap020a00f_read_raw,
+ .read_event_value = &gp2ap020a00f_read_event_val,
+ .read_event_config = &gp2ap020a00f_read_event_config,
+ .write_event_value = &gp2ap020a00f_write_event_val,
+ .write_event_config = &gp2ap020a00f_write_event_config,
+ .driver_module = THIS_MODULE,
+};
+
+static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int i, err = 0;
+
+ mutex_lock(&data->lock);
+
+ /*
+ * Enable triggers according to the scan_mask. Enabling either
+ * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS
+ * module in the device, which generates samples in both D0 (clear)
+ * and D1 (ir) registers. As the two registers are bound to the
+ * two separate IIO channels they are treated in the driver logic
+ * as if they were controlled independently.
+ */
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ switch (i) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_EN);
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_IR_EN);
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_PROX_EN);
+ break;
+ }
+ }
+
+ if (err < 0)
+ goto error_unlock;
+
+ data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (!data->buffer) {
+ err = -ENOMEM;
+ goto error_unlock;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int i, err;
+
+ mutex_lock(&data->lock);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto error_unlock;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ switch (i) {
+ case GP2AP020A00F_SCAN_MODE_LIGHT_CLEAR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_CLEAR_DIS);
+ break;
+ case GP2AP020A00F_SCAN_MODE_LIGHT_IR:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_IR_DIS);
+ break;
+ case GP2AP020A00F_SCAN_MODE_PROXIMITY:
+ err = gp2ap020a00f_exec_cmd(data,
+ GP2AP020A00F_CMD_TRIGGER_PROX_DIS);
+ break;
+ }
+ }
+
+ if (err == 0)
+ kfree(data->buffer);
+
+error_unlock:
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
+ .postenable = &gp2ap020a00f_buffer_postenable,
+ .predisable = &gp2ap020a00f_buffer_predisable,
+};
+
+static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static int gp2ap020a00f_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gp2ap020a00f_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+
+ data->vled_reg = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(data->vled_reg))
+ return PTR_ERR(data->vled_reg);
+
+ err = regulator_enable(data->vled_reg);
+ if (err)
+ return err;
+
+ regmap = devm_regmap_init_i2c(client, &gp2ap020a00f_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Regmap initialization failed.\n");
+ err = PTR_ERR(regmap);
+ goto error_regulator_disable;
+ }
+
+ /* Initialize device registers */
+ err = regmap_bulk_write(regmap, GP2AP020A00F_OP_REG,
+ gp2ap020a00f_reg_init_tab,
+ ARRAY_SIZE(gp2ap020a00f_reg_init_tab));
+
+ if (err < 0) {
+ dev_err(&client->dev, "Device initialization failed.\n");
+ goto error_regulator_disable;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data->client = client;
+ data->cur_opmode = GP2AP020A00F_OPMODE_SHUTDOWN;
+ data->regmap = regmap;
+ init_waitqueue_head(&data->data_ready_queue);
+
+ mutex_init(&data->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = gp2ap020a00f_channels;
+ indio_dev->num_channels = ARRAY_SIZE(gp2ap020a00f_channels);
+ indio_dev->info = &gp2ap020a00f_info;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Allocate buffer */
+ err = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &gp2ap020a00f_trigger_handler, &gp2ap020a00f_buffer_setup_ops);
+ if (err < 0)
+ goto error_regulator_disable;
+
+ /* Allocate trigger */
+ data->trig = devm_iio_trigger_alloc(&client->dev, "%s-trigger",
+ indio_dev->name);
+ if (data->trig == NULL) {
+ err = -ENOMEM;
+ dev_err(&indio_dev->dev, "Failed to allocate iio trigger.\n");
+ goto error_uninit_buffer;
+ }
+
+ /* This needs to be requested here for read_raw calls to work. */
+ err = request_threaded_irq(client->irq, NULL,
+ &gp2ap020a00f_thresh_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "gp2ap020a00f_als_event",
+ indio_dev);
+ if (err < 0) {
+ dev_err(&client->dev, "Irq request failed.\n");
+ goto error_uninit_buffer;
+ }
+
+ data->trig->ops = &gp2ap020a00f_trigger_ops;
+ data->trig->dev.parent = &data->client->dev;
+
+ init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work);
+
+ err = iio_trigger_register(data->trig);
+ if (err < 0) {
+ dev_err(&client->dev, "Failed to register iio trigger.\n");
+ goto error_free_irq;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err < 0)
+ goto error_trigger_unregister;
+
+ return 0;
+
+error_trigger_unregister:
+ iio_trigger_unregister(data->trig);
+error_free_irq:
+ free_irq(client->irq, indio_dev);
+error_uninit_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_regulator_disable:
+ regulator_disable(data->vled_reg);
+
+ return err;
+}
+
+static int gp2ap020a00f_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct gp2ap020a00f_data *data = iio_priv(indio_dev);
+ int err;
+
+ err = gp2ap020a00f_set_operation_mode(data,
+ GP2AP020A00F_OPMODE_SHUTDOWN);
+ if (err < 0)
+ dev_err(&indio_dev->dev, "Failed to power off the device.\n");
+
+ iio_device_unregister(indio_dev);
+ iio_trigger_unregister(data->trig);
+ free_irq(client->irq, indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ regulator_disable(data->vled_reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id gp2ap020a00f_id[] = {
+ { GP2A_I2C_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, gp2ap020a00f_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id gp2ap020a00f_of_match[] = {
+ { .compatible = "sharp,gp2ap020a00f" },
+ { }
+};
+#endif
+
+static struct i2c_driver gp2ap020a00f_driver = {
+ .driver = {
+ .name = GP2A_I2C_NAME,
+ .of_match_table = of_match_ptr(gp2ap020a00f_of_match),
+ .owner = THIS_MODULE,
+ },
+ .probe = gp2ap020a00f_probe,
+ .remove = gp2ap020a00f_remove,
+ .id_table = gp2ap020a00f_id,
+};
+
+module_i2c_driver(gp2ap020a00f_driver);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("Sharp GP2AP020A00F Proximity/ALS sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
new file mode 100644
index 00000000000000..a5283d75c0961d
--- /dev/null
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -0,0 +1,392 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_ILLUM 0
+
+struct als_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info als_illum;
+ u32 illum;
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec als_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_ILLUM,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void als_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int als_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct als_state *als_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case 0:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_ILLUM:
+ report_id = als_state->als_illum.report_id;
+ address =
+ HID_USAGE_SENSOR_LIGHT_ILLUM;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0) {
+ poll_value = hid_sensor_read_poll_value(
+ &als_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&als_state->common_attributes,
+ true);
+ msleep_interruptible(poll_value * 2);
+
+ *val = sensor_hub_input_attr_get_raw_value(
+ als_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_ALS, address,
+ report_id);
+ hid_sensor_power_state(&als_state->common_attributes,
+ false);
+ } else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = als_state->scale_pre_decml;
+ *val2 = als_state->scale_post_decml;
+ ret_type = als_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = als_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &als_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &als_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int als_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct als_state *als_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &als_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &als_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info als_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &als_read_raw,
+ .write_raw = &als_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int als_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct als_state *als_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "als_proc_event\n");
+ if (atomic_read(&als_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ &als_state->illum,
+ sizeof(als_state->illum));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct als_state *als_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_LIGHT_ILLUM:
+ als_state->illum = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int als_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct als_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ &st->als_illum);
+ if (ret < 0)
+ return ret;
+ als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_ILLUM,
+ st->als_illum.size);
+
+ dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index,
+ st->als_illum.report_id);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_ALS,
+ &st->als_illum,
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_LIGHT,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_als_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "als";
+ struct iio_dev *indio_dev;
+ struct als_state *als_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct als_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ als_state = iio_priv(indio_dev);
+ als_state->common_attributes.hsdev = hsdev;
+ als_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_ALS,
+ &als_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(als_channels, sizeof(als_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = als_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_ALS, als_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(als_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &als_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&als_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &als_state->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ als_state->callbacks.send_event = als_proc_event;
+ als_state->callbacks.capture_sample = als_capture_sample;
+ als_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_ALS,
+ &als_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&als_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_als_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct als_state *als_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&als_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_als_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200041",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_als_ids);
+
+static struct platform_driver hid_als_platform_driver = {
+ .id_table = hid_als_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_als_probe,
+ .remove = hid_als_remove,
+};
+module_platform_driver(hid_als_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor ALS");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
new file mode 100644
index 00000000000000..f5a514698fd861
--- /dev/null
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -0,0 +1,384 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESENCE 0
+
+struct prox_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info prox_attr;
+ u32 human_presence;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec prox_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESENCE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void prox_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int prox_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESENCE:
+ report_id = prox_state->prox_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_HUMAN_PRESENCE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0) {
+ poll_value = hid_sensor_read_poll_value(
+ &prox_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&prox_state->common_attributes,
+ true);
+
+ msleep_interruptible(poll_value * 2);
+
+ *val = sensor_hub_input_attr_get_raw_value(
+ prox_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PROX, address,
+ report_id);
+ hid_sensor_power_state(&prox_state->common_attributes,
+ false);
+ } else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = prox_state->prox_attr.units;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = hid_sensor_convert_exponent(
+ prox_state->prox_attr.unit_expo);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int prox_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &prox_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info prox_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &prox_read_raw,
+ .write_raw = &prox_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int prox_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "prox_proc_event\n");
+ if (atomic_read(&prox_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ &prox_state->human_presence,
+ sizeof(prox_state->human_presence));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int prox_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_HUMAN_PRESENCE:
+ prox_state->human_presence = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int prox_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct prox_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_HUMAN_PRESENCE,
+ &st->prox_attr);
+ if (ret < 0)
+ return ret;
+ prox_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESENCE,
+ st->prox_attr.size);
+
+ dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index,
+ st->prox_attr.report_id);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_PRESENCE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_prox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "prox";
+ struct iio_dev *indio_dev;
+ struct prox_state *prox_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct prox_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ prox_state = iio_priv(indio_dev);
+ prox_state->common_attributes.hsdev = hsdev;
+ prox_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = prox_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PROX, prox_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(prox_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &prox_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&prox_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &prox_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ prox_state->callbacks.send_event = prox_proc_event;
+ prox_state->callbacks.capture_sample = prox_capture_sample;
+ prox_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PROX,
+ &prox_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_prox_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct prox_state *prox_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&prox_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_prox_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200011",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_prox_ids);
+
+static struct platform_driver hid_prox_platform_driver = {
+ .id_table = hid_prox_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_prox_probe,
+ .remove = hid_prox_remove,
+};
+module_platform_driver(hid_prox_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Proximity");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
new file mode 100644
index 00000000000000..c82f4a6f84645c
--- /dev/null
+++ b/drivers/iio/light/isl29125.c
@@ -0,0 +1,347 @@
+/*
+ * isl29125.c - Support for Intersil ISL29125 RGB light sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * RGB light sensor with 16-bit channels for red, green, blue);
+ * 7-bit I2C slave address 0x44
+ *
+ * TODO: interrupt support, IR compensation, thresholds, 12bit
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define ISL29125_DRV_NAME "isl29125"
+
+#define ISL29125_DEVICE_ID 0x00
+#define ISL29125_CONF1 0x01
+#define ISL29125_CONF2 0x02
+#define ISL29125_CONF3 0x03
+#define ISL29125_STATUS 0x08
+#define ISL29125_GREEN_DATA 0x09
+#define ISL29125_RED_DATA 0x0b
+#define ISL29125_BLUE_DATA 0x0d
+
+#define ISL29125_ID 0x7d
+
+#define ISL29125_MODE_MASK GENMASK(2, 0)
+#define ISL29125_MODE_PD 0x0
+#define ISL29125_MODE_G 0x1
+#define ISL29125_MODE_R 0x2
+#define ISL29125_MODE_B 0x3
+#define ISL29125_MODE_RGB 0x5
+
+#define ISL29125_MODE_RANGE BIT(3)
+
+#define ISL29125_STATUS_CONV BIT(1)
+
+struct isl29125_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 conf1;
+ u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
+};
+
+#define ISL29125_CHANNEL(_color, _si) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec isl29125_channels[] = {
+ ISL29125_CHANNEL(GREEN, 0),
+ ISL29125_CHANNEL(RED, 1),
+ ISL29125_CHANNEL(BLUE, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct {
+ u8 mode, data;
+} isl29125_regs[] = {
+ {ISL29125_MODE_G, ISL29125_GREEN_DATA},
+ {ISL29125_MODE_R, ISL29125_RED_DATA},
+ {ISL29125_MODE_B, ISL29125_BLUE_DATA},
+};
+
+static int isl29125_read_data(struct isl29125_data *data, int si)
+{
+ int tries = 5;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1 | isl29125_regs[si].mode);
+ if (ret < 0)
+ return ret;
+
+ msleep(101);
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, ISL29125_STATUS);
+ if (ret < 0)
+ goto fail;
+ if (ret & ISL29125_STATUS_CONV)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ ret = -EIO;
+ goto fail;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client, isl29125_regs[si].data);
+
+fail:
+ i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, data->conf1);
+ return ret;
+}
+
+static int isl29125_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct isl29125_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ mutex_lock(&data->lock);
+ ret = isl29125_read_data(data, chan->scan_index);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (data->conf1 & ISL29125_MODE_RANGE)
+ *val2 = 152590; /* 10k lux full range */
+ else
+ *val2 = 5722; /* 375 lux full range */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int isl29125_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct isl29125_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val != 0)
+ return -EINVAL;
+ if (val2 == 152590)
+ data->conf1 |= ISL29125_MODE_RANGE;
+ else if (val2 == 5722)
+ data->conf1 &= ~ISL29125_MODE_RANGE;
+ else
+ return -EINVAL;
+ return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1);
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t isl29125_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct isl29125_data *data = iio_priv(indio_dev);
+ int i, j = 0;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ int ret = i2c_smbus_read_word_data(data->client,
+ isl29125_regs[i].data);
+ if (ret < 0)
+ goto done;
+
+ data->buffer[j++] = ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info isl29125_info = {
+ .read_raw = isl29125_read_raw,
+ .write_raw = isl29125_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct isl29125_data *data = iio_priv(indio_dev);
+
+ data->conf1 |= ISL29125_MODE_RGB;
+ return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1);
+}
+
+static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct isl29125_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->conf1 &= ~ISL29125_MODE_MASK;
+ data->conf1 |= ISL29125_MODE_PD;
+ return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1);
+}
+
+static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = {
+ .preenable = isl29125_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = isl29125_buffer_predisable,
+};
+
+static int isl29125_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct isl29125_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &isl29125_info;
+ indio_dev->name = ISL29125_DRV_NAME;
+ indio_dev->channels = isl29125_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl29125_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = i2c_smbus_read_byte_data(data->client, ISL29125_DEVICE_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != ISL29125_ID)
+ return -ENODEV;
+
+ data->conf1 = ISL29125_MODE_PD | ISL29125_MODE_RANGE;
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_STATUS, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ isl29125_trigger_handler, &isl29125_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int isl29125_powerdown(struct isl29125_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ (data->conf1 & ~ISL29125_MODE_MASK) | ISL29125_MODE_PD);
+}
+
+static int isl29125_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ isl29125_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int isl29125_suspend(struct device *dev)
+{
+ struct isl29125_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return isl29125_powerdown(data);
+}
+
+static int isl29125_resume(struct device *dev)
+{
+ struct isl29125_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ data->conf1);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(isl29125_pm_ops, isl29125_suspend, isl29125_resume);
+
+static const struct i2c_device_id isl29125_id[] = {
+ { "isl29125", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl29125_id);
+
+static struct i2c_driver isl29125_driver = {
+ .driver = {
+ .name = ISL29125_DRV_NAME,
+ .pm = &isl29125_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = isl29125_probe,
+ .remove = isl29125_remove,
+ .id_table = isl29125_id,
+};
+module_i2c_driver(isl29125_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("ISL29125 RGB light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
new file mode 100644
index 00000000000000..ae3c71bdd6c6c6
--- /dev/null
+++ b/drivers/iio/light/lm3533-als.c
@@ -0,0 +1,927 @@
+/*
+ * lm3533-als.c -- LM3533 Ambient Light Sensor driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_ALS_RESISTOR_MIN 1
+#define LM3533_ALS_RESISTOR_MAX 127
+#define LM3533_ALS_CHANNEL_CURRENT_MAX 2
+#define LM3533_ALS_THRESH_MAX 3
+#define LM3533_ALS_ZONE_MAX 4
+
+#define LM3533_REG_ALS_RESISTOR_SELECT 0x30
+#define LM3533_REG_ALS_CONF 0x31
+#define LM3533_REG_ALS_ZONE_INFO 0x34
+#define LM3533_REG_ALS_READ_ADC_RAW 0x37
+#define LM3533_REG_ALS_READ_ADC_AVERAGE 0x38
+#define LM3533_REG_ALS_BOUNDARY_BASE 0x50
+#define LM3533_REG_ALS_TARGET_BASE 0x60
+
+#define LM3533_ALS_ENABLE_MASK 0x01
+#define LM3533_ALS_INPUT_MODE_MASK 0x02
+#define LM3533_ALS_INT_ENABLE_MASK 0x01
+
+#define LM3533_ALS_ZONE_SHIFT 2
+#define LM3533_ALS_ZONE_MASK 0x1c
+
+#define LM3533_ALS_FLAG_INT_ENABLED 1
+
+
+struct lm3533_als {
+ struct lm3533 *lm3533;
+ struct platform_device *pdev;
+
+ unsigned long flags;
+ int irq;
+
+ atomic_t zone;
+ struct mutex thresh_mutex;
+};
+
+
+static int lm3533_als_get_adc(struct iio_dev *indio_dev, bool average,
+ int *adc)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 reg;
+ u8 val;
+ int ret;
+
+ if (average)
+ reg = LM3533_REG_ALS_READ_ADC_AVERAGE;
+ else
+ reg = LM3533_REG_ALS_READ_ADC_RAW;
+
+ ret = lm3533_read(als->lm3533, reg, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to read adc\n");
+ return ret;
+ }
+
+ *adc = val;
+
+ return 0;
+}
+
+static int _lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_read(als->lm3533, LM3533_REG_ALS_ZONE_INFO, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to read zone\n");
+ return ret;
+ }
+
+ val = (val & LM3533_ALS_ZONE_MASK) >> LM3533_ALS_ZONE_SHIFT;
+ *zone = min_t(u8, val, LM3533_ALS_ZONE_MAX);
+
+ return 0;
+}
+
+static int lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ int ret;
+
+ if (test_bit(LM3533_ALS_FLAG_INT_ENABLED, &als->flags)) {
+ *zone = atomic_read(&als->zone);
+ } else {
+ ret = _lm3533_als_get_zone(indio_dev, zone);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * channel output channel 0..2
+ * zone zone 0..4
+ */
+static inline u8 lm3533_als_get_target_reg(unsigned channel, unsigned zone)
+{
+ return LM3533_REG_ALS_TARGET_BASE + 5 * channel + zone;
+}
+
+static int lm3533_als_get_target(struct iio_dev *indio_dev, unsigned channel,
+ unsigned zone, u8 *val)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+
+ if (channel > LM3533_ALS_CHANNEL_CURRENT_MAX)
+ return -EINVAL;
+
+ if (zone > LM3533_ALS_ZONE_MAX)
+ return -EINVAL;
+
+ reg = lm3533_als_get_target_reg(channel, zone);
+ ret = lm3533_read(als->lm3533, reg, val);
+ if (ret)
+ dev_err(&indio_dev->dev, "failed to get target current\n");
+
+ return ret;
+}
+
+static int lm3533_als_set_target(struct iio_dev *indio_dev, unsigned channel,
+ unsigned zone, u8 val)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+
+ if (channel > LM3533_ALS_CHANNEL_CURRENT_MAX)
+ return -EINVAL;
+
+ if (zone > LM3533_ALS_ZONE_MAX)
+ return -EINVAL;
+
+ reg = lm3533_als_get_target_reg(channel, zone);
+ ret = lm3533_write(als->lm3533, reg, val);
+ if (ret)
+ dev_err(&indio_dev->dev, "failed to set target current\n");
+
+ return ret;
+}
+
+static int lm3533_als_get_current(struct iio_dev *indio_dev, unsigned channel,
+ int *val)
+{
+ u8 zone;
+ u8 target;
+ int ret;
+
+ ret = lm3533_als_get_zone(indio_dev, &zone);
+ if (ret)
+ return ret;
+
+ ret = lm3533_als_get_target(indio_dev, channel, zone, &target);
+ if (ret)
+ return ret;
+
+ *val = target;
+
+ return 0;
+}
+
+static int lm3533_als_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case 0:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = lm3533_als_get_adc(indio_dev, false, val);
+ break;
+ case IIO_CURRENT:
+ ret = lm3533_als_get_current(indio_dev, chan->channel,
+ val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_AVERAGE_RAW:
+ ret = lm3533_als_get_adc(indio_dev, true, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+#define CHANNEL_CURRENT(_channel) \
+ { \
+ .type = IIO_CURRENT, \
+ .channel = _channel, \
+ .indexed = true, \
+ .output = true, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ }
+
+static const struct iio_chan_spec lm3533_als_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = 0,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_AVERAGE_RAW) |
+ BIT(IIO_CHAN_INFO_RAW),
+ },
+ CHANNEL_CURRENT(0),
+ CHANNEL_CURRENT(1),
+ CHANNEL_CURRENT(2),
+};
+
+static irqreturn_t lm3533_als_isr(int irq, void *dev_id)
+{
+
+ struct iio_dev *indio_dev = dev_id;
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 zone;
+ int ret;
+
+ /* Clear interrupt by reading the ALS zone register. */
+ ret = _lm3533_als_get_zone(indio_dev, &zone);
+ if (ret)
+ goto out;
+
+ atomic_set(&als->zone, zone);
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+out:
+ return IRQ_HANDLED;
+}
+
+static int lm3533_als_set_int_mode(struct iio_dev *indio_dev, int enable)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 mask = LM3533_ALS_INT_ENABLE_MASK;
+ u8 val;
+ int ret;
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(als->lm3533, LM3533_REG_ALS_ZONE_INFO, val, mask);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to set int mode %d\n",
+ enable);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3533_als_get_int_mode(struct iio_dev *indio_dev, int *enable)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 mask = LM3533_ALS_INT_ENABLE_MASK;
+ u8 val;
+ int ret;
+
+ ret = lm3533_read(als->lm3533, LM3533_REG_ALS_ZONE_INFO, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to get int mode\n");
+ return ret;
+ }
+
+ *enable = !!(val & mask);
+
+ return 0;
+}
+
+static inline u8 lm3533_als_get_threshold_reg(unsigned nr, bool raising)
+{
+ u8 offset = !raising;
+
+ return LM3533_REG_ALS_BOUNDARY_BASE + 2 * nr + offset;
+}
+
+static int lm3533_als_get_threshold(struct iio_dev *indio_dev, unsigned nr,
+ bool raising, u8 *val)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+
+ if (nr > LM3533_ALS_THRESH_MAX)
+ return -EINVAL;
+
+ reg = lm3533_als_get_threshold_reg(nr, raising);
+ ret = lm3533_read(als->lm3533, reg, val);
+ if (ret)
+ dev_err(&indio_dev->dev, "failed to get threshold\n");
+
+ return ret;
+}
+
+static int lm3533_als_set_threshold(struct iio_dev *indio_dev, unsigned nr,
+ bool raising, u8 val)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 val2;
+ u8 reg, reg2;
+ int ret;
+
+ if (nr > LM3533_ALS_THRESH_MAX)
+ return -EINVAL;
+
+ reg = lm3533_als_get_threshold_reg(nr, raising);
+ reg2 = lm3533_als_get_threshold_reg(nr, !raising);
+
+ mutex_lock(&als->thresh_mutex);
+ ret = lm3533_read(als->lm3533, reg2, &val2);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to get threshold\n");
+ goto out;
+ }
+ /*
+ * This device does not allow negative hysteresis (in fact, it uses
+ * whichever value is smaller as the lower bound) so we need to make
+ * sure that thresh_falling <= thresh_raising.
+ */
+ if ((raising && (val < val2)) || (!raising && (val > val2))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = lm3533_write(als->lm3533, reg, val);
+ if (ret) {
+ dev_err(&indio_dev->dev, "failed to set threshold\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&als->thresh_mutex);
+
+ return ret;
+}
+
+static int lm3533_als_get_hysteresis(struct iio_dev *indio_dev, unsigned nr,
+ u8 *val)
+{
+ struct lm3533_als *als = iio_priv(indio_dev);
+ u8 falling;
+ u8 raising;
+ int ret;
+
+ if (nr > LM3533_ALS_THRESH_MAX)
+ return -EINVAL;
+
+ mutex_lock(&als->thresh_mutex);
+ ret = lm3533_als_get_threshold(indio_dev, nr, false, &falling);
+ if (ret)
+ goto out;
+ ret = lm3533_als_get_threshold(indio_dev, nr, true, &raising);
+ if (ret)
+ goto out;
+
+ *val = raising - falling;
+out:
+ mutex_unlock(&als->thresh_mutex);
+
+ return ret;
+}
+
+static ssize_t show_thresh_either_en(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct lm3533_als *als = iio_priv(indio_dev);
+ int enable;
+ int ret;
+
+ if (als->irq) {
+ ret = lm3533_als_get_int_mode(indio_dev, &enable);
+ if (ret)
+ return ret;
+ } else {
+ enable = 0;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", enable);
+}
+
+static ssize_t store_thresh_either_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct lm3533_als *als = iio_priv(indio_dev);
+ unsigned long enable;
+ bool int_enabled;
+ u8 zone;
+ int ret;
+
+ if (!als->irq)
+ return -EBUSY;
+
+ if (kstrtoul(buf, 0, &enable))
+ return -EINVAL;
+
+ int_enabled = test_bit(LM3533_ALS_FLAG_INT_ENABLED, &als->flags);
+
+ if (enable && !int_enabled) {
+ ret = lm3533_als_get_zone(indio_dev, &zone);
+ if (ret)
+ return ret;
+
+ atomic_set(&als->zone, zone);
+
+ set_bit(LM3533_ALS_FLAG_INT_ENABLED, &als->flags);
+ }
+
+ ret = lm3533_als_set_int_mode(indio_dev, enable);
+ if (ret) {
+ if (!int_enabled)
+ clear_bit(LM3533_ALS_FLAG_INT_ENABLED, &als->flags);
+
+ return ret;
+ }
+
+ if (!enable)
+ clear_bit(LM3533_ALS_FLAG_INT_ENABLED, &als->flags);
+
+ return len;
+}
+
+static ssize_t show_zone(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ u8 zone;
+ int ret;
+
+ ret = lm3533_als_get_zone(indio_dev, &zone);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", zone);
+}
+
+enum lm3533_als_attribute_type {
+ LM3533_ATTR_TYPE_HYSTERESIS,
+ LM3533_ATTR_TYPE_TARGET,
+ LM3533_ATTR_TYPE_THRESH_FALLING,
+ LM3533_ATTR_TYPE_THRESH_RAISING,
+};
+
+struct lm3533_als_attribute {
+ struct device_attribute dev_attr;
+ enum lm3533_als_attribute_type type;
+ u8 val1;
+ u8 val2;
+};
+
+static inline struct lm3533_als_attribute *
+to_lm3533_als_attr(struct device_attribute *attr)
+{
+ return container_of(attr, struct lm3533_als_attribute, dev_attr);
+}
+
+static ssize_t show_als_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct lm3533_als_attribute *als_attr = to_lm3533_als_attr(attr);
+ u8 val;
+ int ret;
+
+ switch (als_attr->type) {
+ case LM3533_ATTR_TYPE_HYSTERESIS:
+ ret = lm3533_als_get_hysteresis(indio_dev, als_attr->val1,
+ &val);
+ break;
+ case LM3533_ATTR_TYPE_TARGET:
+ ret = lm3533_als_get_target(indio_dev, als_attr->val1,
+ als_attr->val2, &val);
+ break;
+ case LM3533_ATTR_TYPE_THRESH_FALLING:
+ ret = lm3533_als_get_threshold(indio_dev, als_attr->val1,
+ false, &val);
+ break;
+ case LM3533_ATTR_TYPE_THRESH_RAISING:
+ ret = lm3533_als_get_threshold(indio_dev, als_attr->val1,
+ true, &val);
+ break;
+ default:
+ ret = -ENXIO;
+ }
+
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_als_attr(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct lm3533_als_attribute *als_attr = to_lm3533_als_attr(attr);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ switch (als_attr->type) {
+ case LM3533_ATTR_TYPE_TARGET:
+ ret = lm3533_als_set_target(indio_dev, als_attr->val1,
+ als_attr->val2, val);
+ break;
+ case LM3533_ATTR_TYPE_THRESH_FALLING:
+ ret = lm3533_als_set_threshold(indio_dev, als_attr->val1,
+ false, val);
+ break;
+ case LM3533_ATTR_TYPE_THRESH_RAISING:
+ ret = lm3533_als_set_threshold(indio_dev, als_attr->val1,
+ true, val);
+ break;
+ default:
+ ret = -ENXIO;
+ }
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+#define ALS_ATTR(_name, _mode, _show, _store, _type, _val1, _val2) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .type = _type, \
+ .val1 = _val1, \
+ .val2 = _val2 }
+
+#define LM3533_ALS_ATTR(_name, _mode, _show, _store, _type, _val1, _val2) \
+ struct lm3533_als_attribute lm3533_als_attr_##_name = \
+ ALS_ATTR(_name, _mode, _show, _store, _type, _val1, _val2)
+
+#define ALS_TARGET_ATTR_RW(_channel, _zone) \
+ LM3533_ALS_ATTR(out_current##_channel##_current##_zone##_raw, \
+ S_IRUGO | S_IWUSR, \
+ show_als_attr, store_als_attr, \
+ LM3533_ATTR_TYPE_TARGET, _channel, _zone)
+/*
+ * ALS output current values (ALS mapper targets)
+ *
+ * out_current[0-2]_current[0-4]_raw 0-255
+ */
+static ALS_TARGET_ATTR_RW(0, 0);
+static ALS_TARGET_ATTR_RW(0, 1);
+static ALS_TARGET_ATTR_RW(0, 2);
+static ALS_TARGET_ATTR_RW(0, 3);
+static ALS_TARGET_ATTR_RW(0, 4);
+
+static ALS_TARGET_ATTR_RW(1, 0);
+static ALS_TARGET_ATTR_RW(1, 1);
+static ALS_TARGET_ATTR_RW(1, 2);
+static ALS_TARGET_ATTR_RW(1, 3);
+static ALS_TARGET_ATTR_RW(1, 4);
+
+static ALS_TARGET_ATTR_RW(2, 0);
+static ALS_TARGET_ATTR_RW(2, 1);
+static ALS_TARGET_ATTR_RW(2, 2);
+static ALS_TARGET_ATTR_RW(2, 3);
+static ALS_TARGET_ATTR_RW(2, 4);
+
+#define ALS_THRESH_FALLING_ATTR_RW(_nr) \
+ LM3533_ALS_ATTR(in_illuminance0_thresh##_nr##_falling_value, \
+ S_IRUGO | S_IWUSR, \
+ show_als_attr, store_als_attr, \
+ LM3533_ATTR_TYPE_THRESH_FALLING, _nr, 0)
+
+#define ALS_THRESH_RAISING_ATTR_RW(_nr) \
+ LM3533_ALS_ATTR(in_illuminance0_thresh##_nr##_raising_value, \
+ S_IRUGO | S_IWUSR, \
+ show_als_attr, store_als_attr, \
+ LM3533_ATTR_TYPE_THRESH_RAISING, _nr, 0)
+/*
+ * ALS Zone thresholds (boundaries)
+ *
+ * in_illuminance0_thresh[0-3]_falling_value 0-255
+ * in_illuminance0_thresh[0-3]_raising_value 0-255
+ */
+static ALS_THRESH_FALLING_ATTR_RW(0);
+static ALS_THRESH_FALLING_ATTR_RW(1);
+static ALS_THRESH_FALLING_ATTR_RW(2);
+static ALS_THRESH_FALLING_ATTR_RW(3);
+
+static ALS_THRESH_RAISING_ATTR_RW(0);
+static ALS_THRESH_RAISING_ATTR_RW(1);
+static ALS_THRESH_RAISING_ATTR_RW(2);
+static ALS_THRESH_RAISING_ATTR_RW(3);
+
+#define ALS_HYSTERESIS_ATTR_RO(_nr) \
+ LM3533_ALS_ATTR(in_illuminance0_thresh##_nr##_hysteresis, \
+ S_IRUGO, show_als_attr, NULL, \
+ LM3533_ATTR_TYPE_HYSTERESIS, _nr, 0)
+/*
+ * ALS Zone threshold hysteresis
+ *
+ * threshY_hysteresis = threshY_raising - threshY_falling
+ *
+ * in_illuminance0_thresh[0-3]_hysteresis 0-255
+ * in_illuminance0_thresh[0-3]_hysteresis 0-255
+ */
+static ALS_HYSTERESIS_ATTR_RO(0);
+static ALS_HYSTERESIS_ATTR_RO(1);
+static ALS_HYSTERESIS_ATTR_RO(2);
+static ALS_HYSTERESIS_ATTR_RO(3);
+
+#define ILLUMINANCE_ATTR_RO(_name) \
+ DEVICE_ATTR(in_illuminance0_##_name, S_IRUGO, show_##_name, NULL)
+#define ILLUMINANCE_ATTR_RW(_name) \
+ DEVICE_ATTR(in_illuminance0_##_name, S_IRUGO | S_IWUSR , \
+ show_##_name, store_##_name)
+/*
+ * ALS Zone threshold-event enable
+ *
+ * in_illuminance0_thresh_either_en 0,1
+ */
+static ILLUMINANCE_ATTR_RW(thresh_either_en);
+
+/*
+ * ALS Current Zone
+ *
+ * in_illuminance0_zone 0-4
+ */
+static ILLUMINANCE_ATTR_RO(zone);
+
+static struct attribute *lm3533_als_event_attributes[] = {
+ &dev_attr_in_illuminance0_thresh_either_en.attr,
+ &lm3533_als_attr_in_illuminance0_thresh0_falling_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh0_hysteresis.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh0_raising_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh1_falling_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh1_hysteresis.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh1_raising_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh2_falling_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh2_hysteresis.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh2_raising_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh3_falling_value.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh3_hysteresis.dev_attr.attr,
+ &lm3533_als_attr_in_illuminance0_thresh3_raising_value.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group lm3533_als_event_attribute_group = {
+ .attrs = lm3533_als_event_attributes
+};
+
+static struct attribute *lm3533_als_attributes[] = {
+ &dev_attr_in_illuminance0_zone.attr,
+ &lm3533_als_attr_out_current0_current0_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current0_current1_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current0_current2_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current0_current3_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current0_current4_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current1_current0_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current1_current1_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current1_current2_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current1_current3_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current1_current4_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current2_current0_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current2_current1_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current2_current2_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current2_current3_raw.dev_attr.attr,
+ &lm3533_als_attr_out_current2_current4_raw.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group lm3533_als_attribute_group = {
+ .attrs = lm3533_als_attributes
+};
+
+static int lm3533_als_set_input_mode(struct lm3533_als *als, bool pwm_mode)
+{
+ u8 mask = LM3533_ALS_INPUT_MODE_MASK;
+ u8 val;
+ int ret;
+
+ if (pwm_mode)
+ val = mask; /* pwm input */
+ else
+ val = 0; /* analog input */
+
+ ret = lm3533_update(als->lm3533, LM3533_REG_ALS_CONF, val, mask);
+ if (ret) {
+ dev_err(&als->pdev->dev, "failed to set input mode %d\n",
+ pwm_mode);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
+{
+ int ret;
+
+ if (val < LM3533_ALS_RESISTOR_MIN || val > LM3533_ALS_RESISTOR_MAX)
+ return -EINVAL;
+
+ ret = lm3533_write(als->lm3533, LM3533_REG_ALS_RESISTOR_SELECT, val);
+ if (ret) {
+ dev_err(&als->pdev->dev, "failed to set resistor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3533_als_setup(struct lm3533_als *als,
+ struct lm3533_als_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_als_set_input_mode(als, pdata->pwm_mode);
+ if (ret)
+ return ret;
+
+ /* ALS input is always high impedance in PWM-mode. */
+ if (!pdata->pwm_mode) {
+ ret = lm3533_als_set_resistor(als, pdata->r_select);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
+{
+ u8 mask = LM3533_ALS_INT_ENABLE_MASK;
+ int ret;
+
+ /* Make sure interrupts are disabled. */
+ ret = lm3533_update(als->lm3533, LM3533_REG_ALS_ZONE_INFO, 0, mask);
+ if (ret) {
+ dev_err(&als->pdev->dev, "failed to disable interrupts\n");
+ return ret;
+ }
+
+ ret = request_threaded_irq(als->irq, NULL, lm3533_als_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(&als->pdev->dev), dev);
+ if (ret) {
+ dev_err(&als->pdev->dev, "failed to request irq %d\n",
+ als->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3533_als_enable(struct lm3533_als *als)
+{
+ u8 mask = LM3533_ALS_ENABLE_MASK;
+ int ret;
+
+ ret = lm3533_update(als->lm3533, LM3533_REG_ALS_CONF, mask, mask);
+ if (ret)
+ dev_err(&als->pdev->dev, "failed to enable ALS\n");
+
+ return ret;
+}
+
+static int lm3533_als_disable(struct lm3533_als *als)
+{
+ u8 mask = LM3533_ALS_ENABLE_MASK;
+ int ret;
+
+ ret = lm3533_update(als->lm3533, LM3533_REG_ALS_CONF, 0, mask);
+ if (ret)
+ dev_err(&als->pdev->dev, "failed to disable ALS\n");
+
+ return ret;
+}
+
+static const struct iio_info lm3533_als_info = {
+ .attrs = &lm3533_als_attribute_group,
+ .event_attrs = &lm3533_als_event_attribute_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = &lm3533_als_read_raw,
+};
+
+static int lm3533_als_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_als_platform_data *pdata;
+ struct lm3533_als *als;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*als));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &lm3533_als_info;
+ indio_dev->channels = lm3533_als_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lm3533_als_channels);
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = pdev->dev.parent;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ als = iio_priv(indio_dev);
+ als->lm3533 = lm3533;
+ als->pdev = pdev;
+ als->irq = lm3533->irq;
+ atomic_set(&als->zone, 0);
+ mutex_init(&als->thresh_mutex);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ if (als->irq) {
+ ret = lm3533_als_setup_irq(als, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = lm3533_als_setup(als, pdata);
+ if (ret)
+ goto err_free_irq;
+
+ ret = lm3533_als_enable(als);
+ if (ret)
+ goto err_free_irq;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ALS\n");
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ lm3533_als_disable(als);
+err_free_irq:
+ if (als->irq)
+ free_irq(als->irq, indio_dev);
+
+ return ret;
+}
+
+static int lm3533_als_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct lm3533_als *als = iio_priv(indio_dev);
+
+ lm3533_als_set_int_mode(indio_dev, false);
+ iio_device_unregister(indio_dev);
+ lm3533_als_disable(als);
+ if (als->irq)
+ free_irq(als->irq, indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver lm3533_als_driver = {
+ .driver = {
+ .name = "lm3533-als",
+ },
+ .probe = lm3533_als_probe,
+ .remove = lm3533_als_remove,
+};
+module_platform_driver(lm3533_als_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-als");
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
new file mode 100644
index 00000000000000..62b7072af4de14
--- /dev/null
+++ b/drivers/iio/light/ltr501.c
@@ -0,0 +1,445 @@
+/*
+ * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor
+ *
+ * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x23
+ *
+ * TODO: interrupt, threshold, measurement rate, IR LED characteristics
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LTR501_DRV_NAME "ltr501"
+
+#define LTR501_ALS_CONTR 0x80 /* ALS operation mode, SW reset */
+#define LTR501_PS_CONTR 0x81 /* PS operation mode */
+#define LTR501_PART_ID 0x86
+#define LTR501_MANUFAC_ID 0x87
+#define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */
+#define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */
+#define LTR501_ALS_PS_STATUS 0x8c
+#define LTR501_PS_DATA 0x8d /* 16-bit, little endian */
+
+#define LTR501_ALS_CONTR_SW_RESET BIT(2)
+#define LTR501_CONTR_PS_GAIN_MASK (BIT(3) | BIT(2))
+#define LTR501_CONTR_PS_GAIN_SHIFT 2
+#define LTR501_CONTR_ALS_GAIN_MASK BIT(3)
+#define LTR501_CONTR_ACTIVE BIT(1)
+
+#define LTR501_STATUS_ALS_RDY BIT(2)
+#define LTR501_STATUS_PS_RDY BIT(0)
+
+#define LTR501_PS_DATA_MASK 0x7ff
+
+struct ltr501_data {
+ struct i2c_client *client;
+ struct mutex lock_als, lock_ps;
+ u8 als_contr, ps_contr;
+};
+
+static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+{
+ int tries = 100;
+ int ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ LTR501_ALS_PS_STATUS);
+ if (ret < 0)
+ return ret;
+ if ((ret & drdy_mask) == drdy_mask)
+ return 0;
+ msleep(25);
+ }
+
+ dev_err(&data->client->dev, "ltr501_drdy() failed, data not ready\n");
+ return -EIO;
+}
+
+static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_ALS_RDY);
+ if (ret < 0)
+ return ret;
+ /* always read both ALS channels in given order */
+ return i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, 2 * sizeof(__le16), (u8 *) buf);
+}
+
+static int ltr501_read_ps(struct ltr501_data *data)
+{
+ int ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+}
+
+#define LTR501_INTENSITY_CHANNEL(_idx, _addr, _mod, _shared) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .address = (_addr), \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = (_shared), \
+ .scan_index = (_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static const struct iio_chan_spec ltr501_channels[] = {
+ LTR501_INTENSITY_CHANNEL(0, LTR501_ALS_DATA0, IIO_MOD_LIGHT_BOTH, 0),
+ LTR501_INTENSITY_CHANNEL(1, LTR501_ALS_DATA1, IIO_MOD_LIGHT_IR,
+ BIT(IIO_CHAN_INFO_SCALE)),
+ {
+ .type = IIO_PROXIMITY,
+ .address = LTR501_PS_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 11,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const int ltr501_ps_gain[4][2] = {
+ {1, 0}, {0, 250000}, {0, 125000}, {0, 62500}
+};
+
+static int ltr501_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ __le16 buf[2];
+ int ret, i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ mutex_lock(&data->lock_als);
+ ret = ltr501_read_als(data, buf);
+ mutex_unlock(&data->lock_als);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
+ buf[0] : buf[1]);
+ return IIO_VAL_INT;
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock_ps);
+ ret = ltr501_read_ps(data);
+ mutex_unlock(&data->lock_ps);
+ if (ret < 0)
+ return ret;
+ *val = ret & LTR501_PS_DATA_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (data->als_contr & LTR501_CONTR_ALS_GAIN_MASK) {
+ *val = 0;
+ *val2 = 5000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+ case IIO_PROXIMITY:
+ i = (data->ps_contr & LTR501_CONTR_PS_GAIN_MASK) >>
+ LTR501_CONTR_PS_GAIN_SHIFT;
+ *val = ltr501_ps_gain[i][0];
+ *val2 = ltr501_ps_gain[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static int ltr501_get_ps_gain_index(int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ltr501_ps_gain); i++)
+ if (val == ltr501_ps_gain[i][0] && val2 == ltr501_ps_gain[i][1])
+ return i;
+
+ return -1;
+}
+
+static int ltr501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+ int i;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ if (val == 0 && val2 == 5000)
+ data->als_contr |= LTR501_CONTR_ALS_GAIN_MASK;
+ else if (val == 1 && val2 == 0)
+ data->als_contr &= ~LTR501_CONTR_ALS_GAIN_MASK;
+ else
+ return -EINVAL;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_ALS_CONTR, data->als_contr);
+ case IIO_PROXIMITY:
+ i = ltr501_get_ps_gain_index(val, val2);
+ if (i < 0)
+ return -EINVAL;
+ data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
+ data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
+ return i2c_smbus_write_byte_data(data->client,
+ LTR501_PS_CONTR, data->ps_contr);
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static IIO_CONST_ATTR(in_proximity_scale_available, "1 0.25 0.125 0.0625");
+static IIO_CONST_ATTR(in_intensity_scale_available, "1 0.005");
+
+static struct attribute *ltr501_attributes[] = {
+ &iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+ &iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ltr501_attribute_group = {
+ .attrs = ltr501_attributes,
+};
+
+static const struct iio_info ltr501_info = {
+ .read_raw = ltr501_read_raw,
+ .write_raw = ltr501_write_raw,
+ .attrs = &ltr501_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltr501_write_contr(struct i2c_client *client, u8 als_val, u8 ps_val)
+{
+ int ret = i2c_smbus_write_byte_data(client, LTR501_ALS_CONTR, als_val);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, LTR501_PS_CONTR, ps_val);
+}
+
+static irqreturn_t ltr501_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ltr501_data *data = iio_priv(indio_dev);
+ u16 buf[8];
+ __le16 als_buf[2];
+ u8 mask = 0;
+ int j = 0;
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* figure out which data needs to be ready */
+ if (test_bit(0, indio_dev->active_scan_mask) ||
+ test_bit(1, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_ALS_RDY;
+ if (test_bit(2, indio_dev->active_scan_mask))
+ mask |= LTR501_STATUS_PS_RDY;
+
+ ret = ltr501_drdy(data, mask);
+ if (ret < 0)
+ goto done;
+
+ if (mask & LTR501_STATUS_ALS_RDY) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ LTR501_ALS_DATA1, sizeof(als_buf), (u8 *) als_buf);
+ if (ret < 0)
+ return ret;
+ if (test_bit(0, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[1]);
+ if (test_bit(1, indio_dev->active_scan_mask))
+ buf[j++] = le16_to_cpu(als_buf[0]);
+ }
+
+ if (mask & LTR501_STATUS_PS_RDY) {
+ ret = i2c_smbus_read_word_data(data->client, LTR501_PS_DATA);
+ if (ret < 0)
+ goto done;
+ buf[j++] = ret & LTR501_PS_DATA_MASK;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ltr501_init(struct ltr501_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_ALS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->als_contr = ret | LTR501_CONTR_ACTIVE;
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PS_CONTR);
+ if (ret < 0)
+ return ret;
+ data->ps_contr = ret | LTR501_CONTR_ACTIVE;
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+
+static int ltr501_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ltr501_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock_als);
+ mutex_init(&data->lock_ps);
+
+ ret = i2c_smbus_read_byte_data(data->client, LTR501_PART_ID);
+ if (ret < 0)
+ return ret;
+ if ((ret >> 4) != 0x8)
+ return -ENODEV;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ltr501_info;
+ indio_dev->channels = ltr501_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltr501_channels);
+ indio_dev->name = LTR501_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = ltr501_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ltr501_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_buffer;
+
+ return 0;
+
+error_unreg_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int ltr501_powerdown(struct ltr501_data *data)
+{
+ return ltr501_write_contr(data->client,
+ data->als_contr & ~LTR501_CONTR_ACTIVE,
+ data->ps_contr & ~LTR501_CONTR_ACTIVE);
+}
+
+static int ltr501_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ ltr501_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ltr501_suspend(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return ltr501_powerdown(data);
+}
+
+static int ltr501_resume(struct device *dev)
+{
+ struct ltr501_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return ltr501_write_contr(data->client, data->als_contr,
+ data->ps_contr);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ltr501_pm_ops, ltr501_suspend, ltr501_resume);
+
+static const struct i2c_device_id ltr501_id[] = {
+ { "ltr501", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltr501_id);
+
+static struct i2c_driver ltr501_driver = {
+ .driver = {
+ .name = LTR501_DRV_NAME,
+ .pm = &ltr501_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = ltr501_probe,
+ .remove = ltr501_remove,
+ .id_table = ltr501_id,
+};
+
+module_i2c_driver(ltr501_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Lite-On LTR501 ambient light and proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
new file mode 100644
index 00000000000000..a9e449b0be0cc4
--- /dev/null
+++ b/drivers/iio/light/tcs3414.c
@@ -0,0 +1,405 @@
+/*
+ * tcs3414.c - Support for TAOS TCS3414 digital color sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Digital color sensor with 16-bit channels for red, green, blue, clear);
+ * 7-bit I2C slave address 0x39 (TCS3414) or 0x29, 0x49, 0x59 (TCS3413,
+ * TCS3415, TCS3416, resp.)
+ *
+ * TODO: sync, interrupt support, thresholds, prescaler
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define TCS3414_DRV_NAME "tcs3414"
+
+#define TCS3414_COMMAND BIT(7)
+#define TCS3414_COMMAND_WORD (TCS3414_COMMAND | BIT(5))
+
+#define TCS3414_CONTROL (TCS3414_COMMAND | 0x00)
+#define TCS3414_TIMING (TCS3414_COMMAND | 0x01)
+#define TCS3414_ID (TCS3414_COMMAND | 0x04)
+#define TCS3414_GAIN (TCS3414_COMMAND | 0x07)
+#define TCS3414_DATA_GREEN (TCS3414_COMMAND_WORD | 0x10)
+#define TCS3414_DATA_RED (TCS3414_COMMAND_WORD | 0x12)
+#define TCS3414_DATA_BLUE (TCS3414_COMMAND_WORD | 0x14)
+#define TCS3414_DATA_CLEAR (TCS3414_COMMAND_WORD | 0x16)
+
+#define TCS3414_CONTROL_ADC_VALID BIT(4)
+#define TCS3414_CONTROL_ADC_EN BIT(1)
+#define TCS3414_CONTROL_POWER BIT(0)
+
+#define TCS3414_INTEG_MASK GENMASK(1, 0)
+#define TCS3414_INTEG_12MS 0x0
+#define TCS3414_INTEG_100MS 0x1
+#define TCS3414_INTEG_400MS 0x2
+
+#define TCS3414_GAIN_MASK GENMASK(5, 4)
+#define TCS3414_GAIN_SHIFT 4
+
+struct tcs3414_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 control;
+ u8 gain;
+ u8 timing;
+ u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */
+};
+
+#define TCS3414_CHANNEL(_color, _si, _addr) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+/* scale factors: 1/gain */
+static const int tcs3414_scales[][2] = {
+ {1, 0}, {0, 250000}, {0, 62500}, {0, 15625}
+};
+
+/* integration time in ms */
+static const int tcs3414_times[] = { 12, 100, 400 };
+
+static const struct iio_chan_spec tcs3414_channels[] = {
+ TCS3414_CHANNEL(GREEN, 0, TCS3414_DATA_GREEN),
+ TCS3414_CHANNEL(RED, 1, TCS3414_DATA_RED),
+ TCS3414_CHANNEL(BLUE, 2, TCS3414_DATA_BLUE),
+ TCS3414_CHANNEL(CLEAR, 3, TCS3414_DATA_CLEAR),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int tcs3414_req_data(struct tcs3414_data *data)
+{
+ int tries = 25;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control | TCS3414_CONTROL_ADC_EN);
+ if (ret < 0)
+ return ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, TCS3414_CONTROL);
+ if (ret < 0)
+ return ret;
+ if (ret & TCS3414_CONTROL_ADC_VALID)
+ break;
+ msleep(20);
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control);
+ if (ret < 0)
+ return ret;
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tcs3414_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tcs3414_data *data = iio_priv(indio_dev);
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+ mutex_lock(&data->lock);
+ ret = tcs3414_req_data(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ i = (data->gain & TCS3414_GAIN_MASK) >> TCS3414_GAIN_SHIFT;
+ *val = tcs3414_scales[i][0];
+ *val2 = tcs3414_scales[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = tcs3414_times[data->timing & TCS3414_INTEG_MASK] * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int tcs3414_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tcs3414_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(tcs3414_scales); i++) {
+ if (val == tcs3414_scales[i][0] &&
+ val2 == tcs3414_scales[i][1]) {
+ data->gain &= ~TCS3414_GAIN_MASK;
+ data->gain |= i << TCS3414_GAIN_SHIFT;
+ return i2c_smbus_write_byte_data(
+ data->client, TCS3414_GAIN,
+ data->gain);
+ }
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
+ if (val == tcs3414_times[i] * 1000) {
+ data->timing &= ~TCS3414_INTEG_MASK;
+ data->timing |= i;
+ return i2c_smbus_write_byte_data(
+ data->client, TCS3414_TIMING,
+ data->timing);
+ }
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tcs3414_data *data = iio_priv(indio_dev);
+ int i, j = 0;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ int ret = i2c_smbus_read_word_data(data->client,
+ TCS3414_DATA_GREEN + 2*i);
+ if (ret < 0)
+ goto done;
+
+ data->buffer[j++] = ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static IIO_CONST_ATTR(scale_available, "1 0.25 0.0625 0.015625");
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.012 0.1 0.4");
+
+static struct attribute *tcs3414_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tcs3414_attribute_group = {
+ .attrs = tcs3414_attributes,
+};
+
+static const struct iio_info tcs3414_info = {
+ .read_raw = tcs3414_read_raw,
+ .write_raw = tcs3414_write_raw,
+ .attrs = &tcs3414_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct tcs3414_data *data = iio_priv(indio_dev);
+
+ data->control |= TCS3414_CONTROL_ADC_EN;
+ return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control);
+}
+
+static int tcs3414_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct tcs3414_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->control &= ~TCS3414_CONTROL_ADC_EN;
+ return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control);
+}
+
+static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = {
+ .preenable = tcs3414_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = tcs3414_buffer_predisable,
+};
+
+static int tcs3414_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tcs3414_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &tcs3414_info;
+ indio_dev->name = TCS3414_DRV_NAME;
+ indio_dev->channels = tcs3414_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tcs3414_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3414_ID);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & 0xf0) {
+ case 0x00:
+ dev_info(&client->dev, "TCS3404 found\n");
+ break;
+ case 0x10:
+ dev_info(&client->dev, "TCS3413/14/15/16 found\n");
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ data->control = TCS3414_CONTROL_POWER;
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control);
+ if (ret < 0)
+ return ret;
+
+ data->timing = TCS3414_INTEG_12MS; /* free running */
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_TIMING,
+ data->timing);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3414_GAIN);
+ if (ret < 0)
+ return ret;
+ data->gain = ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ tcs3414_trigger_handler, &tcs3414_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int tcs3414_powerdown(struct tcs3414_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control & ~(TCS3414_CONTROL_POWER |
+ TCS3414_CONTROL_ADC_EN));
+}
+
+static int tcs3414_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ tcs3414_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tcs3414_suspend(struct device *dev)
+{
+ struct tcs3414_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return tcs3414_powerdown(data);
+}
+
+static int tcs3414_resume(struct device *dev)
+{
+ struct tcs3414_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ data->control);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tcs3414_pm_ops, tcs3414_suspend, tcs3414_resume);
+
+static const struct i2c_device_id tcs3414_id[] = {
+ { "tcs3414", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tcs3414_id);
+
+static struct i2c_driver tcs3414_driver = {
+ .driver = {
+ .name = TCS3414_DRV_NAME,
+ .pm = &tcs3414_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = tcs3414_probe,
+ .remove = tcs3414_remove,
+ .id_table = tcs3414_id,
+};
+module_i2c_driver(tcs3414_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TCS3414 digital color sensors driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
new file mode 100644
index 00000000000000..752569985d1dff
--- /dev/null
+++ b/drivers/iio/light/tcs3472.c
@@ -0,0 +1,379 @@
+/*
+ * tcs3472.c - Support for TAOS TCS3472 color light-to-digital converter
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Color light sensor with 16-bit channels for red, green, blue, clear);
+ * 7-bit I2C slave address 0x39 (TCS34721, TCS34723) or 0x29 (TCS34725,
+ * TCS34727)
+ *
+ * TODO: interrupt support, thresholds, wait time
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define TCS3472_DRV_NAME "tcs3472"
+
+#define TCS3472_COMMAND BIT(7)
+#define TCS3472_AUTO_INCR BIT(5)
+
+#define TCS3472_ENABLE (TCS3472_COMMAND | 0x00)
+#define TCS3472_ATIME (TCS3472_COMMAND | 0x01)
+#define TCS3472_WTIME (TCS3472_COMMAND | 0x03)
+#define TCS3472_AILT (TCS3472_COMMAND | 0x04)
+#define TCS3472_AIHT (TCS3472_COMMAND | 0x06)
+#define TCS3472_PERS (TCS3472_COMMAND | 0x0c)
+#define TCS3472_CONFIG (TCS3472_COMMAND | 0x0d)
+#define TCS3472_CONTROL (TCS3472_COMMAND | 0x0f)
+#define TCS3472_ID (TCS3472_COMMAND | 0x12)
+#define TCS3472_STATUS (TCS3472_COMMAND | 0x13)
+#define TCS3472_CDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x14)
+#define TCS3472_RDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x16)
+#define TCS3472_GDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x18)
+#define TCS3472_BDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x1a)
+
+#define TCS3472_STATUS_AVALID BIT(0)
+#define TCS3472_ENABLE_AEN BIT(1)
+#define TCS3472_ENABLE_PON BIT(0)
+#define TCS3472_CONTROL_AGAIN_MASK (BIT(0) | BIT(1))
+
+struct tcs3472_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 enable;
+ u8 control;
+ u8 atime;
+ u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
+};
+
+#define TCS3472_CHANNEL(_color, _si, _addr) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .channel2 = IIO_MOD_LIGHT_##_color, \
+ .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const int tcs3472_agains[] = { 1, 4, 16, 60 };
+
+static const struct iio_chan_spec tcs3472_channels[] = {
+ TCS3472_CHANNEL(CLEAR, 0, TCS3472_CDATA),
+ TCS3472_CHANNEL(RED, 1, TCS3472_RDATA),
+ TCS3472_CHANNEL(GREEN, 2, TCS3472_GDATA),
+ TCS3472_CHANNEL(BLUE, 3, TCS3472_BDATA),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static int tcs3472_req_data(struct tcs3472_data *data)
+{
+ int tries = 50;
+ int ret;
+
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_STATUS);
+ if (ret < 0)
+ return ret;
+ if (ret & TCS3472_STATUS_AVALID)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tcs3472_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ mutex_lock(&data->lock);
+ ret = tcs3472_req_data(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = tcs3472_agains[data->control &
+ TCS3472_CONTROL_AGAIN_MASK];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = (256 - data->atime) * 2400;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int tcs3472_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val2 != 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(tcs3472_agains); i++) {
+ if (val == tcs3472_agains[i]) {
+ data->control &= ~TCS3472_CONTROL_AGAIN_MASK;
+ data->control |= i;
+ return i2c_smbus_write_byte_data(
+ data->client, TCS3472_CONTROL,
+ data->control);
+ }
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ for (i = 0; i < 256; i++) {
+ if (val2 == (256 - i) * 2400) {
+ data->atime = i;
+ return i2c_smbus_write_word_data(
+ data->client, TCS3472_ATIME,
+ data->atime);
+ }
+
+ }
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int i, j = 0;
+
+ int ret = tcs3472_req_data(data);
+ if (ret < 0)
+ goto done;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = i2c_smbus_read_word_data(data->client,
+ TCS3472_CDATA + 2*i);
+ if (ret < 0)
+ goto done;
+
+ data->buffer[j++] = ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t tcs3472_show_int_time_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 1; i <= 256; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06d ",
+ 2400 * i);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_CONST_ATTR(calibscale_available, "1 4 16 60");
+static IIO_DEV_ATTR_INT_TIME_AVAIL(tcs3472_show_int_time_available);
+
+static struct attribute *tcs3472_attributes[] = {
+ &iio_const_attr_calibscale_available.dev_attr.attr,
+ &iio_dev_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tcs3472_attribute_group = {
+ .attrs = tcs3472_attributes,
+};
+
+static const struct iio_info tcs3472_info = {
+ .read_raw = tcs3472_read_raw,
+ .write_raw = tcs3472_write_raw,
+ .attrs = &tcs3472_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int tcs3472_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tcs3472_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &tcs3472_info;
+ indio_dev->name = TCS3472_DRV_NAME;
+ indio_dev->channels = tcs3472_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tcs3472_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ID);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0x44)
+ dev_info(&client->dev, "TCS34721/34725 found\n");
+ else if (ret == 0x4d)
+ dev_info(&client->dev, "TCS34723/34727 found\n");
+ else
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_CONTROL);
+ if (ret < 0)
+ return ret;
+ data->control = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ATIME);
+ if (ret < 0)
+ return ret;
+ data->atime = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ /* enable device */
+ data->enable = ret | TCS3472_ENABLE_PON | TCS3472_ENABLE_AEN;
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ tcs3472_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int tcs3472_powerdown(struct tcs3472_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable & ~(TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+}
+
+static int tcs3472_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ tcs3472_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tcs3472_suspend(struct device *dev)
+{
+ struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return tcs3472_powerdown(data);
+}
+
+static int tcs3472_resume(struct device *dev)
+{
+ struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+ return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable | (TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tcs3472_pm_ops, tcs3472_suspend, tcs3472_resume);
+
+static const struct i2c_device_id tcs3472_id[] = {
+ { "tcs3472", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tcs3472_id);
+
+static struct i2c_driver tcs3472_driver = {
+ .driver = {
+ .name = TCS3472_DRV_NAME,
+ .pm = &tcs3472_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = tcs3472_probe,
+ .remove = tcs3472_remove,
+ .id_table = tcs3472_id,
+};
+module_i2c_driver(tcs3472_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TCS3472 color light sensors driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
new file mode 100644
index 00000000000000..94daa9fc12478b
--- /dev/null
+++ b/drivers/iio/light/tsl2563.c
@@ -0,0 +1,901 @@
+/*
+ * drivers/iio/light/tsl2563.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Written by Timo O. Karjalainen <timo.o.karjalainen@nokia.com>
+ * Contact: Amit Kucheria <amit.kucheria@verdurent.com>
+ *
+ * Converted to IIO driver
+ * Amit Kucheria <amit.kucheria@verdurent.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/platform_data/tsl2563.h>
+
+/* Use this many bits for fraction part. */
+#define ADC_FRAC_BITS 14
+
+/* Given number of 1/10000's in ADC_FRAC_BITS precision. */
+#define FRAC10K(f) (((f) * (1L << (ADC_FRAC_BITS))) / (10000))
+
+/* Bits used for fraction in calibration coefficients.*/
+#define CALIB_FRAC_BITS 10
+/* 0.5 in CALIB_FRAC_BITS precision */
+#define CALIB_FRAC_HALF (1 << (CALIB_FRAC_BITS - 1))
+/* Make a fraction from a number n that was multiplied with b. */
+#define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b))
+/* Decimal 10^(digits in sysfs presentation) */
+#define CALIB_BASE_SYSFS 1000
+
+#define TSL2563_CMD 0x80
+#define TSL2563_CLEARINT 0x40
+
+#define TSL2563_REG_CTRL 0x00
+#define TSL2563_REG_TIMING 0x01
+#define TSL2563_REG_LOWLOW 0x02 /* data0 low threshold, 2 bytes */
+#define TSL2563_REG_LOWHIGH 0x03
+#define TSL2563_REG_HIGHLOW 0x04 /* data0 high threshold, 2 bytes */
+#define TSL2563_REG_HIGHHIGH 0x05
+#define TSL2563_REG_INT 0x06
+#define TSL2563_REG_ID 0x0a
+#define TSL2563_REG_DATA0LOW 0x0c /* broadband sensor value, 2 bytes */
+#define TSL2563_REG_DATA0HIGH 0x0d
+#define TSL2563_REG_DATA1LOW 0x0e /* infrared sensor value, 2 bytes */
+#define TSL2563_REG_DATA1HIGH 0x0f
+
+#define TSL2563_CMD_POWER_ON 0x03
+#define TSL2563_CMD_POWER_OFF 0x00
+#define TSL2563_CTRL_POWER_MASK 0x03
+
+#define TSL2563_TIMING_13MS 0x00
+#define TSL2563_TIMING_100MS 0x01
+#define TSL2563_TIMING_400MS 0x02
+#define TSL2563_TIMING_MASK 0x03
+#define TSL2563_TIMING_GAIN16 0x10
+#define TSL2563_TIMING_GAIN1 0x00
+
+#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_LEVEL 0x10
+#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
+
+struct tsl2563_gainlevel_coeff {
+ u8 gaintime;
+ u16 min;
+ u16 max;
+};
+
+static const struct tsl2563_gainlevel_coeff tsl2563_gainlevel_table[] = {
+ {
+ .gaintime = TSL2563_TIMING_400MS | TSL2563_TIMING_GAIN16,
+ .min = 0,
+ .max = 65534,
+ }, {
+ .gaintime = TSL2563_TIMING_400MS | TSL2563_TIMING_GAIN1,
+ .min = 2048,
+ .max = 65534,
+ }, {
+ .gaintime = TSL2563_TIMING_100MS | TSL2563_TIMING_GAIN1,
+ .min = 4095,
+ .max = 37177,
+ }, {
+ .gaintime = TSL2563_TIMING_13MS | TSL2563_TIMING_GAIN1,
+ .min = 3000,
+ .max = 65535,
+ },
+};
+
+struct tsl2563_chip {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct delayed_work poweroff_work;
+
+ /* Remember state for suspend and resume functions */
+ bool suspended;
+
+ struct tsl2563_gainlevel_coeff const *gainlevel;
+
+ u16 low_thres;
+ u16 high_thres;
+ u8 intr;
+ bool int_enabled;
+
+ /* Calibration coefficients */
+ u32 calib0;
+ u32 calib1;
+ int cover_comp_gain;
+
+ /* Cache current values, to be returned while suspended */
+ u32 data0;
+ u32 data1;
+};
+
+static int tsl2563_set_power(struct tsl2563_chip *chip, int on)
+{
+ struct i2c_client *client = chip->client;
+ u8 cmd;
+
+ cmd = on ? TSL2563_CMD_POWER_ON : TSL2563_CMD_POWER_OFF;
+ return i2c_smbus_write_byte_data(client,
+ TSL2563_CMD | TSL2563_REG_CTRL, cmd);
+}
+
+/*
+ * Return value is 0 for off, 1 for on, or a negative error
+ * code if reading failed.
+ */
+static int tsl2563_get_power(struct tsl2563_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_CTRL);
+ if (ret < 0)
+ return ret;
+
+ return (ret & TSL2563_CTRL_POWER_MASK) == TSL2563_CMD_POWER_ON;
+}
+
+static int tsl2563_configure(struct tsl2563_chip *chip)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_TIMING,
+ chip->gainlevel->gaintime);
+ if (ret)
+ goto error_ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGHLOW,
+ chip->high_thres & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGHHIGH,
+ (chip->high_thres >> 8) & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOWLOW,
+ chip->low_thres & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOWHIGH,
+ (chip->low_thres >> 8) & 0xFF);
+/*
+ * Interrupt register is automatically written anyway if it is relevant
+ * so is not here.
+ */
+error_ret:
+ return ret;
+}
+
+static void tsl2563_poweroff_work(struct work_struct *work)
+{
+ struct tsl2563_chip *chip =
+ container_of(work, struct tsl2563_chip, poweroff_work.work);
+ tsl2563_set_power(chip, 0);
+}
+
+static int tsl2563_detect(struct tsl2563_chip *chip)
+{
+ int ret;
+
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ return ret;
+
+ ret = tsl2563_get_power(chip);
+ if (ret < 0)
+ return ret;
+
+ return ret ? 0 : -ENODEV;
+}
+
+static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_ID);
+ if (ret < 0)
+ return ret;
+
+ *id = ret;
+
+ return 0;
+}
+
+/*
+ * "Normalized" ADC value is one obtained with 400ms of integration time and
+ * 16x gain. This function returns the number of bits of shift needed to
+ * convert between normalized values and HW values obtained using given
+ * timing and gain settings.
+ */
+static int adc_shiftbits(u8 timing)
+{
+ int shift = 0;
+
+ switch (timing & TSL2563_TIMING_MASK) {
+ case TSL2563_TIMING_13MS:
+ shift += 5;
+ break;
+ case TSL2563_TIMING_100MS:
+ shift += 2;
+ break;
+ case TSL2563_TIMING_400MS:
+ /* no-op */
+ break;
+ }
+
+ if (!(timing & TSL2563_TIMING_GAIN16))
+ shift += 4;
+
+ return shift;
+}
+
+/* Convert a HW ADC value to normalized scale. */
+static u32 normalize_adc(u16 adc, u8 timing)
+{
+ return adc << adc_shiftbits(timing);
+}
+
+static void tsl2563_wait_adc(struct tsl2563_chip *chip)
+{
+ unsigned int delay;
+
+ switch (chip->gainlevel->gaintime & TSL2563_TIMING_MASK) {
+ case TSL2563_TIMING_13MS:
+ delay = 14;
+ break;
+ case TSL2563_TIMING_100MS:
+ delay = 101;
+ break;
+ default:
+ delay = 402;
+ }
+ /*
+ * TODO: Make sure that we wait at least required delay but why we
+ * have to extend it one tick more?
+ */
+ schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
+}
+
+static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
+{
+ struct i2c_client *client = chip->client;
+
+ if (adc > chip->gainlevel->max || adc < chip->gainlevel->min) {
+
+ (adc > chip->gainlevel->max) ?
+ chip->gainlevel++ : chip->gainlevel--;
+
+ i2c_smbus_write_byte_data(client,
+ TSL2563_CMD | TSL2563_REG_TIMING,
+ chip->gainlevel->gaintime);
+
+ tsl2563_wait_adc(chip);
+ tsl2563_wait_adc(chip);
+
+ return 1;
+ } else
+ return 0;
+}
+
+static int tsl2563_get_adc(struct tsl2563_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ u16 adc0, adc1;
+ int retry = 1;
+ int ret = 0;
+
+ if (chip->suspended)
+ goto out;
+
+ if (!chip->int_enabled) {
+ cancel_delayed_work(&chip->poweroff_work);
+
+ if (!tsl2563_get_power(chip)) {
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ goto out;
+ ret = tsl2563_configure(chip);
+ if (ret)
+ goto out;
+ tsl2563_wait_adc(chip);
+ }
+ }
+
+ while (retry) {
+ ret = i2c_smbus_read_word_data(client,
+ TSL2563_CMD | TSL2563_REG_DATA0LOW);
+ if (ret < 0)
+ goto out;
+ adc0 = ret;
+
+ ret = i2c_smbus_read_word_data(client,
+ TSL2563_CMD | TSL2563_REG_DATA1LOW);
+ if (ret < 0)
+ goto out;
+ adc1 = ret;
+
+ retry = tsl2563_adjust_gainlevel(chip, adc0);
+ }
+
+ chip->data0 = normalize_adc(adc0, chip->gainlevel->gaintime);
+ chip->data1 = normalize_adc(adc1, chip->gainlevel->gaintime);
+
+ if (!chip->int_enabled)
+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static inline int calib_to_sysfs(u32 calib)
+{
+ return (int) (((calib * CALIB_BASE_SYSFS) +
+ CALIB_FRAC_HALF) >> CALIB_FRAC_BITS);
+}
+
+static inline u32 calib_from_sysfs(int value)
+{
+ return (((u32) value) << CALIB_FRAC_BITS) / CALIB_BASE_SYSFS;
+}
+
+/*
+ * Conversions between lux and ADC values.
+ *
+ * The basic formula is lux = c0 * adc0 - c1 * adc1, where c0 and c1 are
+ * appropriate constants. Different constants are needed for different
+ * kinds of light, determined by the ratio adc1/adc0 (basically the ratio
+ * of the intensities in infrared and visible wavelengths). lux_table below
+ * lists the upper threshold of the adc1/adc0 ratio and the corresponding
+ * constants.
+ */
+
+struct tsl2563_lux_coeff {
+ unsigned long ch_ratio;
+ unsigned long ch0_coeff;
+ unsigned long ch1_coeff;
+};
+
+static const struct tsl2563_lux_coeff lux_table[] = {
+ {
+ .ch_ratio = FRAC10K(1300),
+ .ch0_coeff = FRAC10K(315),
+ .ch1_coeff = FRAC10K(262),
+ }, {
+ .ch_ratio = FRAC10K(2600),
+ .ch0_coeff = FRAC10K(337),
+ .ch1_coeff = FRAC10K(430),
+ }, {
+ .ch_ratio = FRAC10K(3900),
+ .ch0_coeff = FRAC10K(363),
+ .ch1_coeff = FRAC10K(529),
+ }, {
+ .ch_ratio = FRAC10K(5200),
+ .ch0_coeff = FRAC10K(392),
+ .ch1_coeff = FRAC10K(605),
+ }, {
+ .ch_ratio = FRAC10K(6500),
+ .ch0_coeff = FRAC10K(229),
+ .ch1_coeff = FRAC10K(291),
+ }, {
+ .ch_ratio = FRAC10K(8000),
+ .ch0_coeff = FRAC10K(157),
+ .ch1_coeff = FRAC10K(180),
+ }, {
+ .ch_ratio = FRAC10K(13000),
+ .ch0_coeff = FRAC10K(34),
+ .ch1_coeff = FRAC10K(26),
+ }, {
+ .ch_ratio = ULONG_MAX,
+ .ch0_coeff = 0,
+ .ch1_coeff = 0,
+ },
+};
+
+/* Convert normalized, scaled ADC values to lux. */
+static unsigned int adc_to_lux(u32 adc0, u32 adc1)
+{
+ const struct tsl2563_lux_coeff *lp = lux_table;
+ unsigned long ratio, lux, ch0 = adc0, ch1 = adc1;
+
+ ratio = ch0 ? ((ch1 << ADC_FRAC_BITS) / ch0) : ULONG_MAX;
+
+ while (lp->ch_ratio < ratio)
+ lp++;
+
+ lux = ch0 * lp->ch0_coeff - ch1 * lp->ch1_coeff;
+
+ return (unsigned int) (lux >> ADC_FRAC_BITS);
+}
+
+/* Apply calibration coefficient to ADC count. */
+static u32 calib_adc(u32 adc, u32 calib)
+{
+ unsigned long scaled = adc;
+
+ scaled *= calib;
+ scaled >>= CALIB_FRAC_BITS;
+
+ return (u32) scaled;
+}
+
+static int tsl2563_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ chip->calib0 = calib_from_sysfs(val);
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
+ chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tsl2563_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret = -EINVAL;
+ u32 calib0, calib1;
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+
+ mutex_lock(&chip->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = tsl2563_get_adc(chip);
+ if (ret)
+ goto error_ret;
+ calib0 = calib_adc(chip->data0, chip->calib0) *
+ chip->cover_comp_gain;
+ calib1 = calib_adc(chip->data1, chip->calib1) *
+ chip->cover_comp_gain;
+ *val = adc_to_lux(calib0, calib1);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_INTENSITY:
+ ret = tsl2563_get_adc(chip);
+ if (ret)
+ goto error_ret;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = chip->data0;
+ else
+ *val = chip->data1;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+ *val = calib_to_sysfs(chip->calib0);
+ else
+ *val = calib_to_sysfs(chip->calib1);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+error_ret:
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static const struct iio_event_spec tsl2563_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec tsl2563_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .channel = 0,
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2563_events,
+ .num_event_specs = ARRAY_SIZE(tsl2563_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ }
+};
+
+static int tsl2563_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->high_thres;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->low_thres;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int tsl2563_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+ int ret;
+ u8 address;
+
+ if (dir == IIO_EV_DIR_RISING)
+ address = TSL2563_REG_HIGHLOW;
+ else
+ address = TSL2563_REG_LOWLOW;
+ mutex_lock(&chip->lock);
+ ret = i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | address,
+ val & 0xFF);
+ if (ret)
+ goto error_ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | (address + 1),
+ (val >> 8) & 0xFF);
+ if (dir == IIO_EV_DIR_RISING)
+ chip->high_thres = val;
+ else
+ chip->low_thres = val;
+
+error_ret:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static irqreturn_t tsl2563_event_handler(int irq, void *private)
+{
+ struct iio_dev *dev_info = private;
+ struct tsl2563_chip *chip = iio_priv(dev_info);
+
+ iio_push_event(dev_info,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+
+ /* clear the interrupt and push the event */
+ i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
+ return IRQ_HANDLED;
+}
+
+static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ if (state && !(chip->intr & 0x30)) {
+ chip->intr &= ~0x30;
+ chip->intr |= 0x10;
+ /* ensure the chip is actually on */
+ cancel_delayed_work(&chip->poweroff_work);
+ if (!tsl2563_get_power(chip)) {
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ goto out;
+ ret = tsl2563_configure(chip);
+ if (ret)
+ goto out;
+ }
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
+ chip->int_enabled = true;
+ }
+
+ if (!state && (chip->intr & 0x30)) {
+ chip->intr &= ~0x30;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
+ chip->int_enabled = false;
+ /* now the interrupt is not enabled, we can go to sleep */
+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+ }
+out:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tsl2563_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT);
+ mutex_unlock(&chip->lock);
+ if (ret < 0)
+ return ret;
+
+ return !!(ret & 0x30);
+}
+
+static const struct iio_info tsl2563_info_no_irq = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2563_read_raw,
+ .write_raw = &tsl2563_write_raw,
+};
+
+static const struct iio_info tsl2563_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2563_read_raw,
+ .write_raw = &tsl2563_write_raw,
+ .read_event_value = &tsl2563_read_thresh,
+ .write_event_value = &tsl2563_write_thresh,
+ .read_event_config = &tsl2563_read_interrupt_config,
+ .write_event_config = &tsl2563_write_interrupt_config,
+};
+
+static int tsl2563_probe(struct i2c_client *client,
+ const struct i2c_device_id *device_id)
+{
+ struct iio_dev *indio_dev;
+ struct tsl2563_chip *chip;
+ struct tsl2563_platform_data *pdata = client->dev.platform_data;
+ struct device_node *np = client->dev.of_node;
+ int err = 0;
+ u8 id = 0;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ err = tsl2563_detect(chip);
+ if (err) {
+ dev_err(&client->dev, "detect error %d\n", -err);
+ return err;
+ }
+
+ err = tsl2563_read_id(chip, &id);
+ if (err) {
+ dev_err(&client->dev, "read id error %d\n", -err);
+ return err;
+ }
+
+ mutex_init(&chip->lock);
+
+ /* Default values used until userspace says otherwise */
+ chip->low_thres = 0x0;
+ chip->high_thres = 0xffff;
+ chip->gainlevel = tsl2563_gainlevel_table;
+ chip->intr = TSL2563_INT_PERSIST(4);
+ chip->calib0 = calib_from_sysfs(CALIB_BASE_SYSFS);
+ chip->calib1 = calib_from_sysfs(CALIB_BASE_SYSFS);
+
+ if (pdata)
+ chip->cover_comp_gain = pdata->cover_comp_gain;
+ else if (np)
+ of_property_read_u32(np, "amstaos,cover-comp-gain",
+ &chip->cover_comp_gain);
+ else
+ chip->cover_comp_gain = 1;
+
+ dev_info(&client->dev, "model %d, rev. %d\n", id >> 4, id & 0x0f);
+ indio_dev->name = client->name;
+ indio_dev->channels = tsl2563_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq)
+ indio_dev->info = &tsl2563_info;
+ else
+ indio_dev->info = &tsl2563_info_no_irq;
+
+ if (client->irq) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ &tsl2563_event_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tsl2563_event",
+ indio_dev);
+ if (err) {
+ dev_err(&client->dev, "irq request error %d\n", -err);
+ return err;
+ }
+ }
+
+ err = tsl2563_configure(chip);
+ if (err) {
+ dev_err(&client->dev, "configure error %d\n", -err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
+
+ /* The interrupt cannot yet be enabled so this is fine without lock */
+ schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
+
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "iio registration error %d\n", -err);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ cancel_delayed_work(&chip->poweroff_work);
+ flush_scheduled_work();
+ return err;
+}
+
+static int tsl2563_remove(struct i2c_client *client)
+{
+ struct tsl2563_chip *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+
+ iio_device_unregister(indio_dev);
+ if (!chip->int_enabled)
+ cancel_delayed_work(&chip->poweroff_work);
+ /* Ensure that interrupts are disabled - then flush any bottom halves */
+ chip->intr &= ~0x30;
+ i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
+ flush_scheduled_work();
+ tsl2563_set_power(chip, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tsl2563_suspend(struct device *dev)
+{
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = tsl2563_set_power(chip, 0);
+ if (ret)
+ goto out;
+
+ chip->suspended = true;
+
+out:
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static int tsl2563_resume(struct device *dev)
+{
+ struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = tsl2563_set_power(chip, 1);
+ if (ret)
+ goto out;
+
+ ret = tsl2563_configure(chip);
+ if (ret)
+ goto out;
+
+ chip->suspended = false;
+
+out:
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tsl2563_pm_ops, tsl2563_suspend, tsl2563_resume);
+#define TSL2563_PM_OPS (&tsl2563_pm_ops)
+#else
+#define TSL2563_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id tsl2563_id[] = {
+ { "tsl2560", 0 },
+ { "tsl2561", 1 },
+ { "tsl2562", 2 },
+ { "tsl2563", 3 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tsl2563_id);
+
+static struct i2c_driver tsl2563_i2c_driver = {
+ .driver = {
+ .name = "tsl2563",
+ .pm = TSL2563_PM_OPS,
+ },
+ .probe = tsl2563_probe,
+ .remove = tsl2563_remove,
+ .id_table = tsl2563_id,
+};
+module_i2c_driver(tsl2563_i2c_driver);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("tsl2563 light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
new file mode 100644
index 00000000000000..0763b863257387
--- /dev/null
+++ b/drivers/iio/light/tsl4531.c
@@ -0,0 +1,261 @@
+/*
+ * tsl4531.c - Support for TAOS TSL4531 ambient light sensor
+ *
+ * Copyright 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for the TSL4531x family
+ * TSL45311/TSL45313: 7-bit I2C slave address 0x39
+ * TSL45315/TSL45317: 7-bit I2C slave address 0x29
+ *
+ * TODO: single cycle measurement
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define TSL4531_DRV_NAME "tsl4531"
+
+#define TCS3472_COMMAND BIT(7)
+
+#define TSL4531_CONTROL (TCS3472_COMMAND | 0x00)
+#define TSL4531_CONFIG (TCS3472_COMMAND | 0x01)
+#define TSL4531_DATA (TCS3472_COMMAND | 0x04)
+#define TSL4531_ID (TCS3472_COMMAND | 0x0a)
+
+/* operating modes in control register */
+#define TSL4531_MODE_POWERDOWN 0x00
+#define TSL4531_MODE_SINGLE_ADC 0x02
+#define TSL4531_MODE_NORMAL 0x03
+
+/* integration time control in config register */
+#define TSL4531_TCNTRL_400MS 0x00
+#define TSL4531_TCNTRL_200MS 0x01
+#define TSL4531_TCNTRL_100MS 0x02
+
+/* part number in id register */
+#define TSL45311_ID 0x8
+#define TSL45313_ID 0x9
+#define TSL45315_ID 0xa
+#define TSL45317_ID 0xb
+#define TSL4531_ID_SHIFT 4
+
+struct tsl4531_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ int int_time;
+};
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.1 0.2 0.4");
+
+static struct attribute *tsl4531_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tsl4531_attribute_group = {
+ .attrs = tsl4531_attributes,
+};
+
+static const struct iio_chan_spec tsl4531_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME)
+ }
+};
+
+static int tsl4531_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tsl4531_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_smbus_read_word_data(data->client,
+ TSL4531_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* 0.. 1x, 1 .. 2x, 2 .. 4x */
+ *val = 1 << data->int_time;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ if (data->int_time == 0)
+ *val2 = 400000;
+ else if (data->int_time == 1)
+ *val2 = 200000;
+ else if (data->int_time == 2)
+ *val2 = 100000;
+ else
+ return -EINVAL;
+ *val = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tsl4531_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tsl4531_data *data = iio_priv(indio_dev);
+ int int_time, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ if (val2 == 400000)
+ int_time = 0;
+ else if (val2 == 200000)
+ int_time = 1;
+ else if (val2 == 100000)
+ int_time = 2;
+ else
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client,
+ TSL4531_CONFIG, int_time);
+ if (ret >= 0)
+ data->int_time = int_time;
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info tsl4531_info = {
+ .read_raw = tsl4531_read_raw,
+ .write_raw = tsl4531_write_raw,
+ .attrs = &tsl4531_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int tsl4531_check_id(struct i2c_client *client)
+{
+ int ret = i2c_smbus_read_byte_data(client, TSL4531_ID);
+ if (ret < 0)
+ return ret;
+
+ switch (ret >> TSL4531_ID_SHIFT) {
+ case TSL45311_ID:
+ case TSL45313_ID:
+ case TSL45315_ID:
+ case TSL45317_ID:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int tsl4531_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tsl4531_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ if (!tsl4531_check_id(client)) {
+ dev_err(&client->dev, "no TSL4531 sensor\n");
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, TSL4531_CONTROL,
+ TSL4531_MODE_NORMAL);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, TSL4531_CONFIG,
+ TSL4531_TCNTRL_400MS);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &tsl4531_info;
+ indio_dev->channels = tsl4531_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl4531_channels);
+ indio_dev->name = TSL4531_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return iio_device_register(indio_dev);
+}
+
+static int tsl4531_powerdown(struct i2c_client *client)
+{
+ return i2c_smbus_write_byte_data(client, TSL4531_CONTROL,
+ TSL4531_MODE_POWERDOWN);
+}
+
+static int tsl4531_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ tsl4531_powerdown(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tsl4531_suspend(struct device *dev)
+{
+ return tsl4531_powerdown(to_i2c_client(dev));
+}
+
+static int tsl4531_resume(struct device *dev)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL,
+ TSL4531_MODE_NORMAL);
+}
+
+static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume);
+#define TSL4531_PM_OPS (&tsl4531_pm_ops)
+#else
+#define TSL4531_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id tsl4531_id[] = {
+ { "tsl4531", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tsl4531_id);
+
+static struct i2c_driver tsl4531_driver = {
+ .driver = {
+ .name = TSL4531_DRV_NAME,
+ .pm = TSL4531_PM_OPS,
+ .owner = THIS_MODULE,
+ },
+ .probe = tsl4531_probe,
+ .remove = tsl4531_remove,
+ .id_table = tsl4531_id,
+};
+
+module_i2c_driver(tsl4531_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TAOS TSL4531 ambient light sensors driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
new file mode 100644
index 00000000000000..d948c4778ba6cc
--- /dev/null
+++ b/drivers/iio/light/vcnl4000.c
@@ -0,0 +1,198 @@
+/*
+ * vcnl4000.c - Support for Vishay VCNL4000 combined ambient light and
+ * proximity sensor
+ *
+ * Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for VCNL4000 (7-bit I2C slave address 0x13)
+ *
+ * TODO:
+ * allow to adjust IR current
+ * proximity threshold and event handling
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VCNL4000_DRV_NAME "vcnl4000"
+
+#define VCNL4000_COMMAND 0x80 /* Command register */
+#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL4000_LED_CURRENT 0x83 /* IR LED current for proximity mode */
+#define VCNL4000_AL_PARAM 0x84 /* Ambient light parameter register */
+#define VCNL4000_AL_RESULT_HI 0x85 /* Ambient light result register, MSB */
+#define VCNL4000_AL_RESULT_LO 0x86 /* Ambient light result register, LSB */
+#define VCNL4000_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
+#define VCNL4000_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
+#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
+#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+
+/* Bit masks for COMMAND register */
+#define VCNL4000_AL_RDY 0x40 /* ALS data ready? */
+#define VCNL4000_PS_RDY 0x20 /* proximity data ready? */
+#define VCNL4000_AL_OD 0x10 /* start on-demand ALS measurement */
+#define VCNL4000_PS_OD 0x08 /* start on-demand proximity measurement */
+
+struct vcnl4000_data {
+ struct i2c_client *client;
+};
+
+static const struct i2c_device_id vcnl4000_id[] = {
+ { "vcnl4000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, vcnl4000_id);
+
+static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
+ u8 rdy_mask, u8 data_reg, int *val)
+{
+ int tries = 20;
+ __be16 buf;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
+ req_mask);
+ if (ret < 0)
+ return ret;
+
+ /* wait for data to become ready */
+ while (tries--) {
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_COMMAND);
+ if (ret < 0)
+ return ret;
+ if (ret & rdy_mask)
+ break;
+ msleep(20); /* measurement takes up to 100 ms */
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev,
+ "vcnl4000_measure() failed, data not ready\n");
+ return -EIO;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ data_reg, sizeof(buf), (u8 *) &buf);
+ if (ret < 0)
+ return ret;
+
+ *val = be16_to_cpu(buf);
+
+ return 0;
+}
+
+static const struct iio_chan_spec vcnl4000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static int vcnl4000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = -EINVAL;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = vcnl4000_measure(data,
+ VCNL4000_AL_OD, VCNL4000_AL_RDY,
+ VCNL4000_AL_RESULT_HI, val);
+ if (ret < 0)
+ return ret;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_PROXIMITY:
+ ret = vcnl4000_measure(data,
+ VCNL4000_PS_OD, VCNL4000_PS_RDY,
+ VCNL4000_PS_RESULT_HI, val);
+ if (ret < 0)
+ return ret;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_LIGHT) {
+ *val = 0;
+ *val2 = 250000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info vcnl4000_info = {
+ .read_raw = vcnl4000_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int vcnl4000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct vcnl4000_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&client->dev, "VCNL4000 Ambient light/proximity sensor, Prod %02x, Rev: %02x\n",
+ ret >> 4, ret & 0xf);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &vcnl4000_info;
+ indio_dev->channels = vcnl4000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vcnl4000_channels);
+ indio_dev->name = VCNL4000_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static struct i2c_driver vcnl4000_driver = {
+ .driver = {
+ .name = VCNL4000_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = vcnl4000_probe,
+ .id_table = vcnl4000_id,
+};
+
+module_i2c_driver(vcnl4000_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Vishay VCNL4000 proximity/ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
new file mode 100644
index 00000000000000..b2dba9e506ab12
--- /dev/null
+++ b/drivers/iio/magnetometer/Kconfig
@@ -0,0 +1,82 @@
+#
+# Magnetometer sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Magnetometer sensors"
+
+config AK8975
+ tristate "Asahi Kasei AK8975 3-Axis Magnetometer"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say yes here to build support for Asahi Kasei AK8975 3-Axis
+ Magnetometer. This driver can also support AK8963, if i2c
+ device name is identified as ak8963.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ak8975.
+
+config AK09911
+ tristate "Asahi Kasei AK09911 3-axis Compass"
+ depends on I2C
+ help
+ Say yes here to build support for Asahi Kasei AK09911 3-Axis
+ Magnetometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ak09911.
+
+config MAG3110
+ tristate "Freescale MAG3110 3-Axis Magnetometer"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MAG3110 3-Axis
+ magnetometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mag3110.
+
+config HID_SENSOR_MAGNETOMETER_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Magenetometer 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ Magnetometer 3D.
+
+config IIO_ST_MAGN_3AXIS
+ tristate "STMicroelectronics magnetometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_MAGN_I2C_3AXIS if (I2C)
+ select IIO_ST_MAGN_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics magnetometers:
+ LSM303DLHC, LSM303DLM, LIS3MDL.
+
+ This driver can also be built as a module. If so, these modules
+ will be created:
+ - st_magn (core functions for the driver [it is mandatory]);
+ - st_magn_i2c (necessary for the I2C devices [optional*]);
+ - st_magn_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_MAGN_I2C_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_MAGN_SPI_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
+endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
new file mode 100644
index 00000000000000..b91315e0b82669
--- /dev/null
+++ b/drivers/iio/magnetometer/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for industrial I/O Magnetometer sensor drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AK09911) += ak09911.o
+obj-$(CONFIG_AK8975) += ak8975.o
+obj-$(CONFIG_MAG3110) += mag3110.o
+obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
+
+obj-$(CONFIG_IIO_ST_MAGN_3AXIS) += st_magn.o
+st_magn-y := st_magn_core.o
+st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
+
+obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
+obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
diff --git a/drivers/iio/magnetometer/ak09911.c b/drivers/iio/magnetometer/ak09911.c
new file mode 100644
index 00000000000000..b2bc942ff6b811
--- /dev/null
+++ b/drivers/iio/magnetometer/ak09911.c
@@ -0,0 +1,326 @@
+/*
+ * AK09911 3-axis compass driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/iio/iio.h>
+
+#define AK09911_REG_WIA1 0x00
+#define AK09911_REG_WIA2 0x01
+#define AK09911_WIA1_VALUE 0x48
+#define AK09911_WIA2_VALUE 0x05
+
+#define AK09911_REG_ST1 0x10
+#define AK09911_REG_HXL 0x11
+#define AK09911_REG_HXH 0x12
+#define AK09911_REG_HYL 0x13
+#define AK09911_REG_HYH 0x14
+#define AK09911_REG_HZL 0x15
+#define AK09911_REG_HZH 0x16
+
+#define AK09911_REG_ASAX 0x60
+#define AK09911_REG_ASAY 0x61
+#define AK09911_REG_ASAZ 0x62
+
+#define AK09911_REG_CNTL1 0x30
+#define AK09911_REG_CNTL2 0x31
+#define AK09911_REG_CNTL3 0x32
+
+#define AK09911_MODE_SNG_MEASURE 0x01
+#define AK09911_MODE_SELF_TEST 0x10
+#define AK09911_MODE_FUSE_ACCESS 0x1F
+#define AK09911_MODE_POWERDOWN 0x00
+#define AK09911_RESET_DATA 0x01
+
+#define AK09911_REG_CNTL1 0x30
+#define AK09911_REG_CNTL2 0x31
+#define AK09911_REG_CNTL3 0x32
+
+#define AK09911_RAW_TO_GAUSS(asa) ((((asa) + 128) * 6000) / 256)
+
+#define AK09911_MAX_CONVERSION_TIMEOUT_MS 500
+#define AK09911_CONVERSION_DONE_POLL_TIME_MS 10
+
+struct ak09911_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 asa[3];
+ long raw_to_gauss[3];
+};
+
+static const int ak09911_index_to_reg[] = {
+ AK09911_REG_HXL, AK09911_REG_HYL, AK09911_REG_HZL,
+};
+
+static int ak09911_set_mode(struct i2c_client *client, u8 mode)
+{
+ int ret;
+
+ switch (mode) {
+ case AK09911_MODE_SNG_MEASURE:
+ case AK09911_MODE_SELF_TEST:
+ case AK09911_MODE_FUSE_ACCESS:
+ case AK09911_MODE_POWERDOWN:
+ ret = i2c_smbus_write_byte_data(client,
+ AK09911_REG_CNTL2, mode);
+ if (ret < 0) {
+ dev_err(&client->dev, "set_mode error\n");
+ return ret;
+ }
+ /* After mode change wait atleast 100us */
+ usleep_range(100, 500);
+ break;
+ default:
+ dev_err(&client->dev,
+ "%s: Unknown mode(%d).", __func__, mode);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Get Sensitivity Adjustment value */
+static int ak09911_get_asa(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak09911_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = ak09911_set_mode(client, AK09911_MODE_FUSE_ACCESS);
+ if (ret < 0)
+ return ret;
+
+ /* Get asa data and store in the device data. */
+ ret = i2c_smbus_read_i2c_block_data(client, AK09911_REG_ASAX,
+ 3, data->asa);
+ if (ret < 0) {
+ dev_err(&client->dev, "Not able to read asa data\n");
+ return ret;
+ }
+
+ ret = ak09911_set_mode(client, AK09911_MODE_POWERDOWN);
+ if (ret < 0)
+ return ret;
+
+ data->raw_to_gauss[0] = AK09911_RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = AK09911_RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = AK09911_RAW_TO_GAUSS(data->asa[2]);
+
+ return 0;
+}
+
+static int ak09911_verify_chip_id(struct i2c_client *client)
+{
+ u8 wia_val[2];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, AK09911_REG_WIA1,
+ 2, wia_val);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading WIA\n");
+ return ret;
+ }
+
+ dev_dbg(&client->dev, "WIA %02x %02x\n", wia_val[0], wia_val[1]);
+
+ if (wia_val[0] != AK09911_WIA1_VALUE ||
+ wia_val[1] != AK09911_WIA2_VALUE) {
+ dev_err(&client->dev, "Device ak09911 not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int wait_conversion_complete_polled(struct ak09911_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 read_status;
+ u32 timeout_ms = AK09911_MAX_CONVERSION_TIMEOUT_MS;
+ int ret;
+
+ /* Wait for the conversion to complete. */
+ while (timeout_ms) {
+ msleep_interruptible(AK09911_CONVERSION_DONE_POLL_TIME_MS);
+ ret = i2c_smbus_read_byte_data(client, AK09911_REG_ST1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST1\n");
+ return ret;
+ }
+ read_status = ret & 0x01;
+ if (read_status)
+ break;
+ timeout_ms -= AK09911_CONVERSION_DONE_POLL_TIME_MS;
+ }
+ if (!timeout_ms) {
+ dev_err(&client->dev, "Conversion timeout happened\n");
+ return -EIO;
+ }
+
+ return read_status;
+}
+
+static int ak09911_read_axis(struct iio_dev *indio_dev, int index, int *val)
+{
+ struct ak09911_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = ak09911_set_mode(client, AK09911_MODE_SNG_MEASURE);
+ if (ret < 0)
+ goto fn_exit;
+
+ ret = wait_conversion_complete_polled(data);
+ if (ret < 0)
+ goto fn_exit;
+
+ /* Read data */
+ ret = i2c_smbus_read_word_data(client, ak09911_index_to_reg[index]);
+ if (ret < 0) {
+ dev_err(&client->dev, "Read axis data fails\n");
+ goto fn_exit;
+ }
+
+ mutex_unlock(&data->lock);
+
+ /* Clamp to valid range. */
+ *val = sign_extend32(clamp_t(s16, ret, -8192, 8191), 13);
+
+ return IIO_VAL_INT;
+
+fn_exit:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ak09911_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ak09911_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return ak09911_read_axis(indio_dev, chan->address, val);
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+#define AK09911_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ }
+
+static const struct iio_chan_spec ak09911_channels[] = {
+ AK09911_CHANNEL(X, 0), AK09911_CHANNEL(Y, 1), AK09911_CHANNEL(Z, 2),
+};
+
+static const struct iio_info ak09911_info = {
+ .read_raw = &ak09911_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct acpi_device_id ak_acpi_match[] = {
+ {"AK009911", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
+
+static int ak09911_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ak09911_data *data;
+ const char *name;
+ int ret;
+
+ ret = ak09911_verify_chip_id(client);
+ if (ret) {
+ dev_err(&client->dev, "AK00911 not detected\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->client = client;
+ mutex_init(&data->lock);
+
+ ret = ak09911_get_asa(client);
+ if (ret)
+ return ret;
+
+ if (id)
+ name = id->name;
+ else if (ACPI_HANDLE(&client->dev))
+ name = dev_name(&client->dev);
+ else
+ return -ENODEV;
+
+ dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ak09911_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak09911_channels);
+ indio_dev->info = &ak09911_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = name;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id ak09911_id[] = {
+ {"ak09911", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ak09911_id);
+
+static struct i2c_driver ak09911_driver = {
+ .driver = {
+ .name = "ak09911",
+ .acpi_match_table = ACPI_PTR(ak_acpi_match),
+ },
+ .probe = ak09911_probe,
+ .id_table = ak09911_id,
+};
+module_i2c_driver(ak09911_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("AK09911 Compass driver");
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
new file mode 100644
index 00000000000000..bf5ef077e791df
--- /dev/null
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -0,0 +1,601 @@
+/*
+ * A sensor driver for the magnetometer AK8975.
+ *
+ * Magnetic compass sensor driver for monitoring magnetic flux information.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+/*
+ * Register definitions, as well as various shifts and masks to get at the
+ * individual fields of the registers.
+ */
+#define AK8975_REG_WIA 0x00
+#define AK8975_DEVICE_ID 0x48
+
+#define AK8975_REG_INFO 0x01
+
+#define AK8975_REG_ST1 0x02
+#define AK8975_REG_ST1_DRDY_SHIFT 0
+#define AK8975_REG_ST1_DRDY_MASK (1 << AK8975_REG_ST1_DRDY_SHIFT)
+
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+#define AK8975_REG_ST2 0x09
+#define AK8975_REG_ST2_DERR_SHIFT 2
+#define AK8975_REG_ST2_DERR_MASK (1 << AK8975_REG_ST2_DERR_SHIFT)
+
+#define AK8975_REG_ST2_HOFL_SHIFT 3
+#define AK8975_REG_ST2_HOFL_MASK (1 << AK8975_REG_ST2_HOFL_SHIFT)
+
+#define AK8975_REG_CNTL 0x0A
+#define AK8975_REG_CNTL_MODE_SHIFT 0
+#define AK8975_REG_CNTL_MODE_MASK (0xF << AK8975_REG_CNTL_MODE_SHIFT)
+#define AK8975_REG_CNTL_MODE_POWER_DOWN 0
+#define AK8975_REG_CNTL_MODE_ONCE 1
+#define AK8975_REG_CNTL_MODE_SELF_TEST 8
+#define AK8975_REG_CNTL_MODE_FUSE_ROM 0xF
+
+#define AK8975_REG_RSVC 0x0B
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+#define AK8975_REG_I2CDIS 0x0F
+#define AK8975_REG_ASAX 0x10
+#define AK8975_REG_ASAY 0x11
+#define AK8975_REG_ASAZ 0x12
+
+#define AK8975_MAX_REGS AK8975_REG_ASAZ
+
+/*
+ * Miscellaneous values.
+ */
+#define AK8975_MAX_CONVERSION_TIMEOUT 500
+#define AK8975_CONVERSION_DONE_POLL_TIME 10
+#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS_8975(asa) ((((asa) + 128) * 3000) / 256)
+#define RAW_TO_GAUSS_8963(asa) ((((asa) + 128) * 6000) / 256)
+
+/* Compatible Asahi Kasei Compass parts */
+enum asahi_compass_chipset {
+ AK8975,
+ AK8963,
+};
+
+/*
+ * Per-instance context data for the device.
+ */
+struct ak8975_data {
+ struct i2c_client *client;
+ struct attribute_group attrs;
+ struct mutex lock;
+ u8 asa[3];
+ long raw_to_gauss[3];
+ u8 reg_cache[AK8975_MAX_REGS];
+ int eoc_gpio;
+ int eoc_irq;
+ wait_queue_head_t data_ready_queue;
+ unsigned long flags;
+ enum asahi_compass_chipset chipset;
+};
+
+static const int ak8975_index_to_reg[] = {
+ AK8975_REG_HXL, AK8975_REG_HYL, AK8975_REG_HZL,
+};
+
+/*
+ * Helper function to write to the I2C device's registers.
+ */
+static int ak8975_write_data(struct i2c_client *client,
+ u8 reg, u8 val, u8 mask, u8 shift)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ u8 regval;
+ int ret;
+
+ regval = (data->reg_cache[reg] & ~mask) | (val << shift);
+ ret = i2c_smbus_write_byte_data(client, reg, regval);
+ if (ret < 0) {
+ dev_err(&client->dev, "Write to device fails status %x\n", ret);
+ return ret;
+ }
+ data->reg_cache[reg] = regval;
+
+ return 0;
+}
+
+/*
+ * Handle data ready irq
+ */
+static irqreturn_t ak8975_irq_handler(int irq, void *data)
+{
+ struct ak8975_data *ak8975 = data;
+
+ set_bit(0, &ak8975->flags);
+ wake_up(&ak8975->data_ready_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Install data ready interrupt handler
+ */
+static int ak8975_setup_irq(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ int rc;
+ int irq;
+
+ if (client->irq)
+ irq = client->irq;
+ else
+ irq = gpio_to_irq(data->eoc_gpio);
+
+ rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(&client->dev), data);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "irq %d request failed, (gpio %d): %d\n",
+ irq, data->eoc_gpio, rc);
+ return rc;
+ }
+
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
+ data->eoc_irq = irq;
+
+ return rc;
+}
+
+
+/*
+ * Perform some start-of-day setup, including reading the asa calibration
+ * values and caching them.
+ */
+static int ak8975_setup(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ u8 device_id;
+ int ret;
+
+ /* Confirm that the device we're talking to is really an AK8975. */
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_WIA);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading WIA\n");
+ return ret;
+ }
+ device_id = ret;
+ if (device_id != AK8975_DEVICE_ID) {
+ dev_err(&client->dev, "Device ak8975 not found\n");
+ return -ENODEV;
+ }
+
+ /* Write the fused rom access mode. */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_FUSE_ROM,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting fuse access mode\n");
+ return ret;
+ }
+
+ /* Get asa data and store in the device data. */
+ ret = i2c_smbus_read_i2c_block_data(client, AK8975_REG_ASAX,
+ 3, data->asa);
+ if (ret < 0) {
+ dev_err(&client->dev, "Not able to read asa data\n");
+ return ret;
+ }
+
+ /* After reading fuse ROM data set power-down mode */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_POWER_DOWN,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+
+ if (data->eoc_gpio > 0 || client->irq) {
+ ret = ak8975_setup_irq(data);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error setting data ready interrupt\n");
+ return ret;
+ }
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+
+/*
+ * Precalculate scale factor (in Gauss units) for each axis and
+ * store in the device data.
+ *
+ * This scale factor is axis-dependent, and is derived from 3 calibration
+ * factors ASA(x), ASA(y), and ASA(z).
+ *
+ * These ASA values are read from the sensor device at start of day, and
+ * cached in the device context struct.
+ *
+ * Adjusting the flux value with the sensitivity adjustment value should be
+ * done via the following formula:
+ *
+ * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
+ *
+ * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
+ * is the resultant adjusted value.
+ *
+ * We reduce the formula to:
+ *
+ * Hadj = H * (ASA + 128) / 256
+ *
+ * H is in the range of -4096 to 4095. The magnetometer has a range of
+ * +-1229uT. To go from the raw value to uT is:
+ *
+ * HuT = H * 1229/4096, or roughly, 3/10.
+ *
+ * Since 1uT = 0.01 gauss, our final scale factor becomes:
+ *
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
+ *
+ * Since ASA doesn't change, we cache the resultant scale factor into the
+ * device context in ak8975_setup().
+ */
+ if (data->chipset == AK8963) {
+ /*
+ * H range is +-8190 and magnetometer range is +-4912.
+ * So HuT using the above explanation for 8975,
+ * 4912/8190 = ~ 6/10.
+ * So the Hadj should use 6/10 instead of 3/10.
+ */
+ data->raw_to_gauss[0] = RAW_TO_GAUSS_8963(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS_8963(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS_8963(data->asa[2]);
+ } else {
+ data->raw_to_gauss[0] = RAW_TO_GAUSS_8975(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS_8975(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS_8975(data->asa[2]);
+ }
+
+ return 0;
+}
+
+static int wait_conversion_complete_gpio(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT;
+ int ret;
+
+ /* Wait for the conversion to complete. */
+ while (timeout_ms) {
+ msleep(AK8975_CONVERSION_DONE_POLL_TIME);
+ if (gpio_get_value(data->eoc_gpio))
+ break;
+ timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
+ }
+ if (!timeout_ms) {
+ dev_err(&client->dev, "Conversion timeout happened\n");
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error in reading ST1\n");
+
+ return ret;
+}
+
+static int wait_conversion_complete_polled(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 read_status;
+ u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT;
+ int ret;
+
+ /* Wait for the conversion to complete. */
+ while (timeout_ms) {
+ msleep(AK8975_CONVERSION_DONE_POLL_TIME);
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST1\n");
+ return ret;
+ }
+ read_status = ret;
+ if (read_status)
+ break;
+ timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
+ }
+ if (!timeout_ms) {
+ dev_err(&client->dev, "Conversion timeout happened\n");
+ return -EINVAL;
+ }
+
+ return read_status;
+}
+
+/* Returns 0 if the end of conversion interrupt occured or -ETIME otherwise */
+static int wait_conversion_complete_interrupt(struct ak8975_data *data)
+{
+ int ret;
+
+ ret = wait_event_timeout(data->data_ready_queue,
+ test_bit(0, &data->flags),
+ AK8975_DATA_READY_TIMEOUT);
+ clear_bit(0, &data->flags);
+
+ return ret > 0 ? 0 : -ETIME;
+}
+
+/*
+ * Emits the raw flux value for the x, y, or z axis.
+ */
+static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ /* Set up the device for taking a sample. */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_ONCE,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting operating mode\n");
+ goto exit;
+ }
+
+ /* Wait for the conversion to complete. */
+ if (data->eoc_irq)
+ ret = wait_conversion_complete_interrupt(data);
+ else if (gpio_is_valid(data->eoc_gpio))
+ ret = wait_conversion_complete_gpio(data);
+ else
+ ret = wait_conversion_complete_polled(data);
+ if (ret < 0)
+ goto exit;
+
+ /* This will be executed only for non-interrupt based waiting case */
+ if (ret & AK8975_REG_ST1_DRDY_MASK) {
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST2\n");
+ goto exit;
+ }
+ if (ret & (AK8975_REG_ST2_DERR_MASK |
+ AK8975_REG_ST2_HOFL_MASK)) {
+ dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ /* Read the flux value from the appropriate register
+ (the register is specified in the iio device attributes). */
+ ret = i2c_smbus_read_word_data(client, ak8975_index_to_reg[index]);
+ if (ret < 0) {
+ dev_err(&client->dev, "Read axis data fails\n");
+ goto exit;
+ }
+
+ mutex_unlock(&data->lock);
+
+ /* Clamp to valid range. */
+ *val = clamp_t(s16, ret, -4096, 4095);
+ return IIO_VAL_INT;
+
+exit:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ak8975_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return ak8975_read_axis(indio_dev, chan->address, val);
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+#define AK8975_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ }
+
+static const struct iio_chan_spec ak8975_channels[] = {
+ AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2),
+};
+
+static const struct iio_info ak8975_info = {
+ .read_raw = &ak8975_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct acpi_device_id ak_acpi_match[] = {
+ {"AK8975", AK8975},
+ {"AK8963", AK8963},
+ {"INVN6500", AK8963},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
+
+static const char *ak8975_match_acpi_device(struct device *dev,
+ enum asahi_compass_chipset *chipset)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+ *chipset = (int)id->driver_data;
+
+ return dev_name(dev);
+}
+
+static int ak8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ak8975_data *data;
+ struct iio_dev *indio_dev;
+ int eoc_gpio;
+ int err;
+ const char *name = NULL;
+
+ /* Grab and set up the supplied GPIO. */
+ if (client->dev.platform_data)
+ eoc_gpio = *(int *)(client->dev.platform_data);
+ else if (client->dev.of_node)
+ eoc_gpio = of_get_gpio(client->dev.of_node, 0);
+ else
+ eoc_gpio = -1;
+
+ if (eoc_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We may not have a GPIO based IRQ to scan, that is fine, we will
+ poll if so */
+ if (gpio_is_valid(eoc_gpio)) {
+ err = devm_gpio_request_one(&client->dev, eoc_gpio,
+ GPIOF_IN, "ak_8975");
+ if (err < 0) {
+ dev_err(&client->dev,
+ "failed to request GPIO %d, error %d\n",
+ eoc_gpio, err);
+ return err;
+ }
+ }
+
+ /* Register with IIO */
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->client = client;
+ data->eoc_gpio = eoc_gpio;
+ data->eoc_irq = 0;
+
+ /* id will be NULL when enumerated via ACPI */
+ if (id) {
+ data->chipset =
+ (enum asahi_compass_chipset)(id->driver_data);
+ name = id->name;
+ } else if (ACPI_HANDLE(&client->dev))
+ name = ak8975_match_acpi_device(&client->dev, &data->chipset);
+ else
+ return -ENOSYS;
+
+ dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
+
+ /* Perform some basic start-of-day setup of the device. */
+ err = ak8975_setup(client);
+ if (err < 0) {
+ dev_err(&client->dev, "AK8975 initialization fails\n");
+ return err;
+ }
+
+ data->client = client;
+ mutex_init(&data->lock);
+ data->eoc_gpio = eoc_gpio;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ak8975_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
+ indio_dev->info = &ak8975_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = name;
+ err = devm_iio_device_register(&client->dev, indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const struct i2c_device_id ak8975_id[] = {
+ {"ak8975", AK8975},
+ {"ak8963", AK8963},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ak8975_id);
+
+static const struct of_device_id ak8975_of_match[] = {
+ { .compatible = "asahi-kasei,ak8975", },
+ { .compatible = "ak8975", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ak8975_of_match);
+
+static struct i2c_driver ak8975_driver = {
+ .driver = {
+ .name = "ak8975",
+ .of_match_table = ak8975_of_match,
+ .acpi_match_table = ACPI_PTR(ak_acpi_match),
+ },
+ .probe = ak8975_probe,
+ .id_table = ak8975_id,
+};
+module_i2c_driver(ak8975_driver);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("AK8975 magnetometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
new file mode 100644
index 00000000000000..6294575d2777a1
--- /dev/null
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -0,0 +1,541 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum magn_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP,
+ CHANNEL_SCAN_INDEX_NORTH_TRUE_TILT_COMP,
+ CHANNEL_SCAN_INDEX_NORTH_MAGN,
+ CHANNEL_SCAN_INDEX_NORTH_TRUE,
+ MAGN_3D_CHANNEL_MAX,
+};
+
+struct magn_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
+
+ /* dynamically sized array to hold sensor values */
+ u32 *iio_vals;
+ /* array of pointers to sensor value */
+ u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
+
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS,
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS,
+ HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS,
+ HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+ HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH,
+ HID_USAGE_SENSOR_ORIENT_MAGN_NORTH,
+ HID_USAGE_SENSOR_ORIENT_TRUE_NORTH,
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec magn_3d_channels[] = {
+ {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_MAGN,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_ROT,
+ .modified = 1,
+ .channel2 = IIO_MOD_NORTH_MAGN_TILT_COMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_ROT,
+ .modified = 1,
+ .channel2 = IIO_MOD_NORTH_TRUE_TILT_COMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_ROT,
+ .modified = 1,
+ .channel2 = IIO_MOD_NORTH_MAGN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }, {
+ .type = IIO_ROT,
+ .modified = 1,
+ .channel2 = IIO_MOD_NORTH_TRUE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void magn_3d_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int magn_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case 0:
+ poll_value = hid_sensor_read_poll_value(
+ &magn_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&magn_state->common_attributes, true);
+ msleep_interruptible(poll_value * 2);
+
+ report_id =
+ magn_state->magn[chan->address].report_id;
+ address = magn_3d_addresses[chan->address];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ magn_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_COMPASS_3D, address,
+ report_id);
+ else {
+ *val = 0;
+ hid_sensor_power_state(&magn_state->common_attributes,
+ false);
+ return -EINVAL;
+ }
+ hid_sensor_power_state(&magn_state->common_attributes, false);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = magn_state->scale_pre_decml;
+ *val2 = magn_state->scale_post_decml;
+ ret_type = magn_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = magn_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &magn_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &magn_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int magn_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &magn_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &magn_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info magn_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &magn_3d_read_raw,
+ .write_raw = &magn_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
+ if (atomic_read(&magn_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev, magn_state->iio_vals);
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int magn_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
+ int offset;
+ int ret = 0;
+ u32 *iio_val = NULL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS:
+ case HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS:
+ case HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS:
+ offset = (usage_id - HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS)
+ + CHANNEL_SCAN_INDEX_X;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH:
+ case HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH:
+ case HID_USAGE_SENSOR_ORIENT_MAGN_NORTH:
+ case HID_USAGE_SENSOR_ORIENT_TRUE_NORTH:
+ offset = (usage_id - HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH)
+ + CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iio_val = magn_state->magn_val_addr[offset];
+
+ if (iio_val != NULL)
+ *iio_val = *((u32 *)raw_data);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int magn_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec **channels,
+ int *chan_count,
+ unsigned usage_id,
+ struct magn_3d_state *st)
+{
+ int i;
+ int attr_count = 0;
+ struct iio_chan_spec *_channels;
+
+ /* Scan for each usage attribute supported */
+ for (i = 0; i < MAGN_3D_CHANNEL_MAX; i++) {
+ int status;
+ u32 address = magn_3d_addresses[i];
+
+ /* Check if usage attribute exists in the sensor hub device */
+ status = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ address,
+ &(st->magn[i]));
+ if (!status)
+ attr_count++;
+ }
+
+ if (attr_count <= 0) {
+ dev_err(&pdev->dev,
+ "failed to find any supported usage attributes in report\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "magn_3d Found %d usage attributes\n",
+ attr_count);
+ dev_dbg(&pdev->dev, "magn_3d X: %x:%x Y: %x:%x Z: %x:%x\n",
+ st->magn[0].index,
+ st->magn[0].report_id,
+ st->magn[1].index, st->magn[1].report_id,
+ st->magn[2].index, st->magn[2].report_id);
+
+ /* Setup IIO channel array */
+ _channels = devm_kcalloc(&pdev->dev, attr_count,
+ sizeof(struct iio_chan_spec),
+ GFP_KERNEL);
+ if (!_channels) {
+ dev_err(&pdev->dev,
+ "failed to allocate space for iio channels\n");
+ return -ENOMEM;
+ }
+
+ st->iio_vals = devm_kcalloc(&pdev->dev, attr_count,
+ sizeof(u32),
+ GFP_KERNEL);
+ if (!st->iio_vals) {
+ dev_err(&pdev->dev,
+ "failed to allocate space for iio values array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0, *chan_count = 0;
+ i < MAGN_3D_CHANNEL_MAX && *chan_count < attr_count;
+ i++){
+ if (st->magn[i].index >= 0) {
+ /* Setup IIO channel struct */
+ (_channels[*chan_count]) = magn_3d_channels[i];
+ (_channels[*chan_count]).scan_index = *chan_count;
+ (_channels[*chan_count]).address = i;
+
+ /* Set magn_val_addr to iio value address */
+ st->magn_val_addr[i] = &(st->iio_vals[*chan_count]);
+ magn_3d_adjust_channel_bit_mask(_channels,
+ *chan_count,
+ st->magn[i].size);
+ (*chan_count)++;
+ }
+ }
+
+ if (*chan_count <= 0) {
+ dev_err(&pdev->dev,
+ "failed to find any magnetic channels setup\n");
+ return -EINVAL;
+ }
+
+ *channels = _channels;
+
+ dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
+ *chan_count);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_COMPASS_3D,
+ &st->magn[CHANNEL_SCAN_INDEX_X],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
+ return 0;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_magn_3d_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static char *name = "magn_3d";
+ struct iio_dev *indio_dev;
+ struct magn_3d_state *magn_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+ int chan_count = 0;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct magn_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ magn_state = iio_priv(indio_dev);
+ magn_state->common_attributes.hsdev = hsdev;
+ magn_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_COMPASS_3D,
+ &magn_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ ret = magn_3d_parse_report(pdev, hsdev,
+ &channels, &chan_count,
+ HID_USAGE_SENSOR_COMPASS_3D, magn_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse report\n");
+ return ret;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = chan_count;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &magn_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ return ret;
+ }
+ atomic_set(&magn_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &magn_state->common_attributes);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ magn_state->callbacks.send_event = magn_3d_proc_event;
+ magn_state->callbacks.capture_sample = magn_3d_capture_sample;
+ magn_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D,
+ &magn_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&magn_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_magn_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct magn_3d_state *magn_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&magn_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static struct platform_device_id hid_magn_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200083",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_magn_3d_ids);
+
+static struct platform_driver hid_magn_3d_platform_driver = {
+ .id_table = hid_magn_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_magn_3d_probe,
+ .remove = hid_magn_3d_remove,
+};
+module_platform_driver(hid_magn_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Magnetometer 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
new file mode 100644
index 00000000000000..e3106b43ef488f
--- /dev/null
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -0,0 +1,438 @@
+/*
+ * mag3110.c - Support for Freescale MAG3110 magnetometer sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x0e)
+ *
+ * TODO: irq, user offset, oversampling, continuous mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MAG3110_STATUS 0x00
+#define MAG3110_OUT_X 0x01 /* MSB first */
+#define MAG3110_OUT_Y 0x03
+#define MAG3110_OUT_Z 0x05
+#define MAG3110_WHO_AM_I 0x07
+#define MAG3110_OFF_X 0x09 /* MSB first */
+#define MAG3110_OFF_Y 0x0b
+#define MAG3110_OFF_Z 0x0d
+#define MAG3110_DIE_TEMP 0x0f
+#define MAG3110_CTRL_REG1 0x10
+#define MAG3110_CTRL_REG2 0x11
+
+#define MAG3110_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
+
+#define MAG3110_CTRL_DR_MASK (BIT(7) | BIT(6) | BIT(5))
+#define MAG3110_CTRL_DR_SHIFT 5
+#define MAG3110_CTRL_DR_DEFAULT 0
+
+#define MAG3110_CTRL_TM BIT(1) /* trigger single measurement */
+#define MAG3110_CTRL_AC BIT(0) /* continuous measurements */
+
+#define MAG3110_CTRL_AUTO_MRST_EN BIT(7) /* magnetic auto-reset */
+#define MAG3110_CTRL_RAW BIT(5) /* measurements not user-offset corrected */
+
+#define MAG3110_DEVICE_ID 0xc4
+
+/* Each client has this additional data */
+struct mag3110_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+};
+
+static int mag3110_request(struct mag3110_data *data)
+{
+ int ret, tries = 150;
+
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 | MAG3110_CTRL_TM);
+ if (ret < 0)
+ return ret;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MAG3110_STATUS);
+ if (ret < 0)
+ return ret;
+ /* wait for data ready */
+ if ((ret & MAG3110_STATUS_DRDY) == MAG3110_STATUS_DRDY)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mag3110_read(struct mag3110_data *data, __be16 buf[3])
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mag3110_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MAG3110_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static ssize_t mag3110_show_int_plus_micros(char *buf,
+ const int (*vals)[2], int n)
+{
+ size_t len = 0;
+
+ while (n-- > 0)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%06d ", vals[n][0], vals[n][1]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static int mag3110_get_int_plus_micros_index(const int (*vals)[2], int n,
+ int val, int val2)
+{
+ while (n-- > 0)
+ if (val == vals[n][0] && val2 == vals[n][1])
+ return n;
+
+ return -EINVAL;
+}
+
+static const int mag3110_samp_freq[8][2] = {
+ {80, 0}, {40, 0}, {20, 0}, {10, 0}, {5, 0}, {2, 500000},
+ {1, 250000}, {0, 625000}
+};
+
+static ssize_t mag3110_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mag3110_show_int_plus_micros(buf, mag3110_samp_freq, 8);
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mag3110_show_samp_freq_avail);
+
+static int mag3110_get_samp_freq_index(struct mag3110_data *data,
+ int val, int val2)
+{
+ return mag3110_get_int_plus_micros_index(mag3110_samp_freq, 8, val,
+ val2);
+}
+
+static int mag3110_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mag3110_data *data = iio_priv(indio_dev);
+ __be16 buffer[3];
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_MAGN: /* in 0.1 uT / LSB */
+ ret = mag3110_read(data, buffer);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(
+ be16_to_cpu(buffer[chan->scan_index]), 15);
+ return IIO_VAL_INT;
+ case IIO_TEMP: /* in 1 C / LSB */
+ mutex_lock(&data->lock);
+ ret = mag3110_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_byte_data(data->client,
+ MAG3110_DIE_TEMP);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 7);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT;
+ *val = mag3110_samp_freq[i][0];
+ *val2 = mag3110_samp_freq[i][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = i2c_smbus_read_word_swapped(data->client,
+ MAG3110_OFF_X + 2 * chan->scan_index);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret >> 1, 14);
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int mag3110_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mag3110_data *data = iio_priv(indio_dev);
+ int rate;
+
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ rate = mag3110_get_samp_freq_index(data, val, val2);
+ if (rate < 0)
+ return -EINVAL;
+
+ data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK;
+ data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
+ return i2c_smbus_write_byte_data(data->client,
+ MAG3110_CTRL_REG1, data->ctrl_reg1);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -10000 || val > 10000)
+ return -EINVAL;
+ return i2c_smbus_write_word_swapped(data->client,
+ MAG3110_OFF_X + 2 * chan->scan_index, val << 1);
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t mag3110_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mag3110_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
+ int ret;
+
+ ret = mag3110_read(data, (__be16 *) buffer);
+ if (ret < 0)
+ goto done;
+
+ if (test_bit(3, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_byte_data(data->client,
+ MAG3110_DIE_TEMP);
+ if (ret < 0)
+ goto done;
+ buffer[6] = ret;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+#define MAG3110_CHANNEL(axis, idx) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec mag3110_channels[] = {
+ MAG3110_CHANNEL(X, 0),
+ MAG3110_CHANNEL(Y, 1),
+ MAG3110_CHANNEL(Z, 2),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 8,
+ .storagebits = 8,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static struct attribute *mag3110_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mag3110_group = {
+ .attrs = mag3110_attributes,
+};
+
+static const struct iio_info mag3110_info = {
+ .attrs = &mag3110_group,
+ .read_raw = &mag3110_read_raw,
+ .write_raw = &mag3110_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0};
+
+static int mag3110_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mag3110_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MAG3110_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MAG3110_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mag3110_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mag3110_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mag3110_channels);
+ indio_dev->available_scan_masks = mag3110_scan_masks;
+
+ data->ctrl_reg1 = MAG3110_CTRL_DR_DEFAULT << MAG3110_CTRL_DR_SHIFT;
+ ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG2,
+ MAG3110_CTRL_AUTO_MRST_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mag3110_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mag3110_standby(struct mag3110_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1 & ~MAG3110_CTRL_AC);
+}
+
+static int mag3110_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mag3110_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mag3110_suspend(struct device *dev)
+{
+ return mag3110_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mag3110_resume(struct device *dev)
+{
+ struct mag3110_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static SIMPLE_DEV_PM_OPS(mag3110_pm_ops, mag3110_suspend, mag3110_resume);
+#define MAG3110_PM_OPS (&mag3110_pm_ops)
+#else
+#define MAG3110_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mag3110_id[] = {
+ { "mag3110", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mag3110_id);
+
+static struct i2c_driver mag3110_driver = {
+ .driver = {
+ .name = "mag3110",
+ .pm = MAG3110_PM_OPS,
+ },
+ .probe = mag3110_probe,
+ .remove = mag3110_remove,
+ .id_table = mag3110_id,
+};
+module_i2c_driver(mag3110_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MAG3110 magnetometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
new file mode 100644
index 00000000000000..7e81d00ef0c3d4
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -0,0 +1,45 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_MAGN_H
+#define ST_MAGN_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_MAGN_DEV_NAME "lsm303dlhc_magn"
+#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
+#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
+
+int st_magn_common_probe(struct iio_dev *indio_dev);
+void st_magn_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_magn_allocate_ring(struct iio_dev *indio_dev);
+void st_magn_deallocate_ring(struct iio_dev *indio_dev);
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_magn_probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return 0;
+}
+static inline void st_magn_remove_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return;
+}
+static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_MAGN_H */
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
new file mode 100644
index 00000000000000..bf427dc0d226c4
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -0,0 +1,89 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return st_sensors_set_enable(indio_dev, true);
+}
+
+static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ mdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (mdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_postenable_error;
+
+ return err;
+
+st_magn_buffer_postenable_error:
+ kfree(mdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_magn_buffer_predisable_error:
+ kfree(mdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
+ .preenable = &st_magn_buffer_preenable,
+ .postenable = &st_magn_buffer_postenable,
+ .predisable = &st_magn_buffer_predisable,
+};
+
+int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
+}
+
+void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
new file mode 100644
index 00000000000000..8ade473f99fee3
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -0,0 +1,439 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+#define ST_MAGN_NUMBER_DATA_CHANNELS 3
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
+#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
+#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
+
+/* FULLSCALE */
+#define ST_MAGN_FS_AVL_1300MG 1300
+#define ST_MAGN_FS_AVL_1900MG 1900
+#define ST_MAGN_FS_AVL_2500MG 2500
+#define ST_MAGN_FS_AVL_4000MG 4000
+#define ST_MAGN_FS_AVL_4700MG 4700
+#define ST_MAGN_FS_AVL_5600MG 5600
+#define ST_MAGN_FS_AVL_8000MG 8000
+#define ST_MAGN_FS_AVL_8100MG 8100
+#define ST_MAGN_FS_AVL_12000MG 12000
+#define ST_MAGN_FS_AVL_16000MG 16000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_MAGN_1_WAI_EXP 0x3c
+#define ST_MAGN_1_ODR_ADDR 0x00
+#define ST_MAGN_1_ODR_MASK 0x1c
+#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
+#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
+#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
+#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
+#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
+#define ST_MAGN_1_PW_ADDR 0x02
+#define ST_MAGN_1_PW_MASK 0x03
+#define ST_MAGN_1_PW_ON 0x00
+#define ST_MAGN_1_PW_OFF 0x03
+#define ST_MAGN_1_FS_ADDR 0x01
+#define ST_MAGN_1_FS_MASK 0xe0
+#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
+#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
+#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
+#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
+#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
+#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
+#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878
+#define ST_MAGN_1_MULTIREAD_BIT false
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_MAGN_2_WAI_EXP 0x3d
+#define ST_MAGN_2_ODR_ADDR 0x20
+#define ST_MAGN_2_ODR_MASK 0x1c
+#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
+#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
+#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
+#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
+#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
+#define ST_MAGN_2_PW_ADDR 0x22
+#define ST_MAGN_2_PW_MASK 0x03
+#define ST_MAGN_2_PW_ON 0x00
+#define ST_MAGN_2_PW_OFF 0x03
+#define ST_MAGN_2_FS_ADDR 0x21
+#define ST_MAGN_2_FS_MASK 0x60
+#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
+#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
+#define ST_MAGN_2_FS_AVL_12000_VAL 0x02
+#define ST_MAGN_2_FS_AVL_16000_VAL 0x03
+#define ST_MAGN_2_FS_AVL_4000_GAIN 146
+#define ST_MAGN_2_FS_AVL_8000_GAIN 292
+#define ST_MAGN_2_FS_AVL_12000_GAIN 438
+#define ST_MAGN_2_FS_AVL_16000_GAIN 584
+#define ST_MAGN_2_MULTIREAD_BIT false
+#define ST_MAGN_2_OUT_X_L_ADDR 0x28
+#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
+#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
+
+static const struct iio_chan_spec st_magn_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
+ ST_MAGN_DEFAULT_OUT_X_H_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
+ ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
+ ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+ ST_MAGN_2_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+ ST_MAGN_2_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+ ST_MAGN_2_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensor_settings st_magn_sensors_settings[] = {
+ {
+ .wai = ST_MAGN_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM303DLHC_MAGN_DEV_NAME,
+ [1] = LSM303DLM_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_1_ODR_ADDR,
+ .mask = ST_MAGN_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
+ { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
+ { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
+ { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
+ { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
+ { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_1_PW_ADDR,
+ .mask = ST_MAGN_1_PW_MASK,
+ .value_on = ST_MAGN_1_PW_ON,
+ .value_off = ST_MAGN_1_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_1_FS_ADDR,
+ .mask = ST_MAGN_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_1300MG,
+ .value = ST_MAGN_1_FS_AVL_1300_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_1900MG,
+ .value = ST_MAGN_1_FS_AVL_1900_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_2500MG,
+ .value = ST_MAGN_1_FS_AVL_2500_VAL,
+ .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ },
+ [3] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_1_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ },
+ [4] = {
+ .num = ST_MAGN_FS_AVL_4700MG,
+ .value = ST_MAGN_1_FS_AVL_4700_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ },
+ [5] = {
+ .num = ST_MAGN_FS_AVL_5600MG,
+ .value = ST_MAGN_1_FS_AVL_5600_VAL,
+ .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ },
+ [6] = {
+ .num = ST_MAGN_FS_AVL_8100MG,
+ .value = ST_MAGN_1_FS_AVL_8100_VAL,
+ .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_MAGN_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3MDL_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_2_ODR_ADDR,
+ .mask = ST_MAGN_2_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
+ { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
+ { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
+ { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
+ { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
+ { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_2_PW_ADDR,
+ .mask = ST_MAGN_2_PW_MASK,
+ .value_on = ST_MAGN_2_PW_ON,
+ .value_off = ST_MAGN_2_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_2_FS_ADDR,
+ .mask = ST_MAGN_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_2_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_8000MG,
+ .value = ST_MAGN_2_FS_AVL_8000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_12000MG,
+ .value = ST_MAGN_2_FS_AVL_12000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+ },
+ [3] = {
+ .num = ST_MAGN_FS_AVL_16000MG,
+ .value = ST_MAGN_2_FS_AVL_16000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_magn_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if ((ch->scan_index == ST_SENSORS_SCAN_Z) &&
+ (mdata->current_fullscale->gain2 != 0))
+ *val2 = mdata->current_fullscale->gain2;
+ else
+ *val2 = mdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = mdata->odr;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_magn_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_magn_scale_available);
+
+static struct attribute *st_magn_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_magn_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_magn_attribute_group = {
+ .attrs = st_magn_attributes,
+};
+
+static const struct iio_info magn_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_magn_attribute_group,
+ .read_raw = &st_magn_read_raw,
+ .write_raw = &st_magn_write_raw,
+};
+
+int st_magn_common_probe(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+ int irq = mdata->get_irq_data_ready(indio_dev);
+ int err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &magn_info;
+
+ st_sensors_power_enable(indio_dev);
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_magn_sensors_settings),
+ st_magn_sensors_settings);
+ if (err < 0)
+ return err;
+
+ mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
+ mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
+ indio_dev->channels = mdata->sensor_settings->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev, NULL);
+ if (err < 0)
+ return err;
+
+ err = st_magn_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+
+ if (irq > 0) {
+ err = st_sensors_allocate_trigger(indio_dev, NULL);
+ if (err < 0)
+ goto st_magn_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_magn_device_register_error;
+
+ dev_info(&indio_dev->dev, "registered magnetometer %s\n",
+ indio_dev->name);
+
+ return 0;
+
+st_magn_device_register_error:
+ if (irq > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_magn_probe_trigger_error:
+ st_magn_deallocate_ring(indio_dev);
+
+ return err;
+}
+EXPORT_SYMBOL(st_magn_common_probe);
+
+void st_magn_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ st_sensors_power_disable(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+
+ st_magn_deallocate_ring(indio_dev);
+}
+EXPORT_SYMBOL(st_magn_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
new file mode 100644
index 00000000000000..92e5c15452a3e4
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -0,0 +1,95 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_magn.h"
+
+#ifdef CONFIG_OF
+static const struct of_device_id st_magn_of_match[] = {
+ {
+ .compatible = "st,lsm303dlhc-magn",
+ .data = LSM303DLHC_MAGN_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303dlm-magn",
+ .data = LSM303DLM_MAGN_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3mdl-magn",
+ .data = LIS3MDL_MAGN_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_magn_of_match);
+#else
+#define st_magn_of_match NULL
+#endif
+
+static int st_magn_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mdata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ mdata = iio_priv(indio_dev);
+ st_sensors_of_i2c_probe(client, st_magn_of_match);
+
+ st_sensors_i2c_configure(indio_dev, client, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_magn_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
+
+static struct i2c_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-i2c",
+ .of_match_table = of_match_ptr(st_magn_of_match),
+ },
+ .probe = st_magn_i2c_probe,
+ .remove = st_magn_i2c_remove,
+ .id_table = st_magn_id_table,
+};
+module_i2c_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
new file mode 100644
index 00000000000000..7adacf160146eb
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -0,0 +1,71 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_magn.h"
+
+static int st_magn_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*mdata));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ mdata = iio_priv(indio_dev);
+
+ st_sensors_spi_configure(indio_dev, spi, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_magn_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_magn_id_table);
+
+static struct spi_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-spi",
+ },
+ .probe = st_magn_spi_probe,
+ .remove = st_magn_spi_remove,
+ .id_table = st_magn_id_table,
+};
+module_spi_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
new file mode 100644
index 00000000000000..e3aa1e58d920f4
--- /dev/null
+++ b/drivers/iio/orientation/Kconfig
@@ -0,0 +1,31 @@
+#
+# Inclinometer sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Inclinometer sensors"
+
+config HID_SENSOR_INCLINOMETER_3D
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Inclinometer 3D"
+ help
+ Say yes here to build support for the HID SENSOR
+ Inclinometer 3D.
+
+config HID_SENSOR_DEVICE_ROTATION
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Device Rotation"
+ help
+ Say yes here to build support for the HID SENSOR
+ device rotation. The output of a device rotation sensor
+ is presented using quaternion format.
+
+endmenu
diff --git a/drivers/iio/orientation/Makefile b/drivers/iio/orientation/Makefile
new file mode 100644
index 00000000000000..4734dabbde13ee
--- /dev/null
+++ b/drivers/iio/orientation/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for industrial I/O Inclinometer sensor drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_HID_SENSOR_INCLINOMETER_3D) += hid-sensor-incl-3d.o
+obj-$(CONFIG_HID_SENSOR_DEVICE_ROTATION) += hid-sensor-rotation.o
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
new file mode 100644
index 00000000000000..1ff181bbbcef8b
--- /dev/null
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -0,0 +1,448 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+enum incl_3d_channel {
+ CHANNEL_SCAN_INDEX_X,
+ CHANNEL_SCAN_INDEX_Y,
+ CHANNEL_SCAN_INDEX_Z,
+ INCLI_3D_CHANNEL_MAX,
+};
+
+struct incl_3d_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
+ u32 incl_val[INCLI_3D_CHANNEL_MAX];
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec incl_3d_channels[] = {
+ {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_X,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Y,
+ }, {
+ .type = IIO_INCLI,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_Z,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void incl_3d_adjust_channel_bit_mask(struct iio_chan_spec *chan,
+ int size)
+{
+ chan->scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ chan->scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ chan->scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int incl_3d_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ poll_value = hid_sensor_read_poll_value(
+ &incl_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+
+ hid_sensor_power_state(&incl_state->common_attributes, true);
+ msleep_interruptible(poll_value * 2);
+
+ report_id =
+ incl_state->incl[chan->scan_index].report_id;
+ address = incl_3d_addresses[chan->scan_index];
+ if (report_id >= 0)
+ *val = sensor_hub_input_attr_get_raw_value(
+ incl_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, address,
+ report_id);
+ else {
+ hid_sensor_power_state(&incl_state->common_attributes,
+ false);
+ return -EINVAL;
+ }
+ hid_sensor_power_state(&incl_state->common_attributes, false);
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = incl_state->scale_pre_decml;
+ *val2 = incl_state->scale_post_decml;
+ ret_type = incl_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = incl_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int incl_3d_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &incl_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info incl_3d_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &incl_3d_read_raw,
+ .write_raw = &incl_3d_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, (u8 *)data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "incl_3d_proc_event\n");
+ if (atomic_read(&incl_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ (u8 *)incl_state->incl_val,
+ sizeof(incl_state->incl_val));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ORIENT_TILT_X:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Y:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
+ break;
+ case HID_USAGE_SENSOR_ORIENT_TILT_Z:
+ incl_state->incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int incl_3d_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct incl_3d_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_X,
+ &st->incl[CHANNEL_SCAN_INDEX_X]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_X],
+ st->incl[CHANNEL_SCAN_INDEX_X].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Y,
+ &st->incl[CHANNEL_SCAN_INDEX_Y]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Y],
+ st->incl[CHANNEL_SCAN_INDEX_Y].size);
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_TILT_Z,
+ &st->incl[CHANNEL_SCAN_INDEX_Z]);
+ if (ret)
+ return ret;
+ incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Z],
+ st->incl[CHANNEL_SCAN_INDEX_Z].size);
+
+ dev_dbg(&pdev->dev, "incl_3d %x:%x, %x:%x, %x:%x\n",
+ st->incl[0].index,
+ st->incl[0].report_id,
+ st->incl[1].index, st->incl[1].report_id,
+ st->incl[2].index, st->incl[2].report_id);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &st->incl[CHANNEL_SCAN_INDEX_X],
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_incl_3d_probe(struct platform_device *pdev)
+{
+ int ret;
+ static char *name = "incli_3d";
+ struct iio_dev *indio_dev;
+ struct incl_3d_state *incl_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct incl_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ incl_state = iio_priv(indio_dev);
+ incl_state->common_attributes.hsdev = hsdev;
+ incl_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(incl_3d_channels, sizeof(incl_3d_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = incl_3d_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_INCLINOMETER_3D, incl_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(incl_3d_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &incl_3d_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&incl_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &incl_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ incl_state->callbacks.send_event = incl_3d_proc_event;
+ incl_state->callbacks.capture_sample = incl_3d_capture_sample;
+ incl_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev,
+ HID_USAGE_SENSOR_INCLINOMETER_3D,
+ &incl_state->callbacks);
+ if (ret) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return 0;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_incl_3d_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct incl_3d_state *incl_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&incl_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_incl_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200086",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids);
+
+static struct platform_driver hid_incl_3d_platform_driver = {
+ .id_table = hid_incl_3d_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_incl_3d_probe,
+ .remove = hid_incl_3d_remove,
+};
+module_platform_driver(hid_incl_3d_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Inclinometer 3D");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
new file mode 100644
index 00000000000000..4afb6c79ccbce8
--- /dev/null
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -0,0 +1,345 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+struct dev_rot_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info quaternion;
+ u32 sampled_vals[4];
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec dev_rot_channels[] = {
+ {
+ .type = IIO_ROT,
+ .modified = 1,
+ .channel2 = IIO_MOD_QUATERNION,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS)
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void dev_rot_adjust_channel_bit_mask(struct iio_chan_spec *chan,
+ int size)
+{
+ chan->scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ chan->scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ chan->scan_type.storagebits = sizeof(u32) * 8;
+ chan->scan_type.repeat = 4;
+}
+
+/* Channel read_raw handler */
+static int dev_rot_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ struct dev_rot_state *rot_state = iio_priv(indio_dev);
+ int ret_type;
+ int i;
+
+ vals[0] = 0;
+ vals[1] = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (size >= 4) {
+ for (i = 0; i < 4; ++i)
+ vals[i] = rot_state->sampled_vals[i];
+ ret_type = IIO_VAL_INT_MULTIPLE;
+ *val_len = 4;
+ } else
+ ret_type = -EINVAL;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &rot_state->common_attributes, &vals[0], &vals[1]);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &rot_state->common_attributes, &vals[0], &vals[1]);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int dev_rot_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct dev_rot_state *rot_state = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &rot_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &rot_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info dev_rot_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw_multi = &dev_rot_read_raw,
+ .write_raw = &dev_rot_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data >>\n");
+ iio_push_to_buffers(indio_dev, (u8 *)data);
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data <<\n");
+
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct dev_rot_state *rot_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "dev_rot_proc_event\n");
+ if (atomic_read(&rot_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ (u8 *)rot_state->sampled_vals,
+ sizeof(rot_state->sampled_vals));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct dev_rot_state *rot_state = iio_priv(indio_dev);
+
+ if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
+ memcpy(rot_state->sampled_vals, raw_data,
+ sizeof(rot_state->sampled_vals));
+ dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
+ sizeof(rot_state->sampled_vals));
+ }
+
+ return 0;
+}
+
+/* Parse report which is specific to an usage id*/
+static int dev_rot_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct dev_rot_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev,
+ HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ORIENT_QUATERNION,
+ &st->quaternion);
+ if (ret)
+ return ret;
+
+ dev_rot_adjust_channel_bit_mask(&channels[0],
+ st->quaternion.size / 4);
+
+ dev_dbg(&pdev->dev, "dev_rot %x:%x\n", st->quaternion.index,
+ st->quaternion.report_id);
+
+ dev_dbg(&pdev->dev, "dev_rot: attrib size %d\n",
+ st->quaternion.size);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ORIENTATION,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+
+ return 0;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_dev_rot_probe(struct platform_device *pdev)
+{
+ int ret;
+ static char *name = "dev_rotation";
+ struct iio_dev *indio_dev;
+ struct dev_rot_state *rot_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct dev_rot_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ rot_state = iio_priv(indio_dev);
+ rot_state->common_attributes.hsdev = hsdev;
+ rot_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_DEVICE_ORIENTATION,
+ &rot_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = devm_kmemdup(&pdev->dev, dev_rot_channels,
+ sizeof(dev_rot_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = dev_rot_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_DEVICE_ORIENTATION, rot_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ return ret;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(dev_rot_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &dev_rot_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ return ret;
+ }
+ atomic_set(&rot_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &rot_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ rot_state->callbacks.send_event = dev_rot_proc_event;
+ rot_state->callbacks.capture_sample = dev_rot_capture_sample;
+ rot_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev,
+ HID_USAGE_SENSOR_DEVICE_ORIENTATION,
+ &rot_state->callbacks);
+ if (ret) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return 0;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&rot_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_dev_rot_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct dev_rot_state *rot_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_DEVICE_ORIENTATION);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&rot_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static struct platform_device_id hid_dev_rot_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-20008a",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_dev_rot_ids);
+
+static struct platform_driver hid_dev_rot_platform_driver = {
+ .id_table = hid_dev_rot_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_dev_rot_probe,
+ .remove = hid_dev_rot_remove,
+};
+module_platform_driver(hid_dev_rot_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Device Rotation");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
new file mode 100644
index 00000000000000..a3be53792072e4
--- /dev/null
+++ b/drivers/iio/pressure/Kconfig
@@ -0,0 +1,94 @@
+#
+# Pressure drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Pressure sensors"
+
+config BMP280
+ tristate "Bosch Sensortec BMP280 pressure sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Bosch Sensortec BMP280
+ pressure and temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmp280.
+
+config HID_SENSOR_PRESS
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID PRESS"
+ help
+ Say yes here to build support for the HID SENSOR
+ Pressure driver
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-press.
+
+config MPL115
+ tristate "Freescale MPL115A2 pressure sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Freescale MPL115A2
+ pressure sensor connected via I2C.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl115.
+
+config MPL3115
+ tristate "Freescale MPL3115A2 pressure sensor driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Freescale MPL3115A2
+ pressure sensor / altimeter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl3115.
+
+config IIO_ST_PRESS
+ tristate "STMicroelectronics pressure sensor Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_PRESS_I2C if (I2C)
+ select IIO_ST_PRESS_SPI if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics pressure
+ sensors: LPS001WP, LPS25H, LPS331AP.
+
+ This driver can also be built as a module. If so, these modules
+ will be created:
+ - st_pressure (core functions for the driver [it is mandatory]);
+ - st_pressure_i2c (necessary for the I2C devices [optional*]);
+ - st_pressure_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_PRESS_I2C
+ tristate
+ depends on IIO_ST_PRESS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_PRESS_SPI
+ tristate
+ depends on IIO_ST_PRESS
+ depends on IIO_ST_SENSORS_SPI
+
+config T5403
+ tristate "EPCOS T5403 digital barometric pressure sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for the EPCOS T5403 pressure sensor
+ connected via I2C.
+
+ To compile this driver as a module, choose M here: the module
+ will be called t5403.
+
+endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
new file mode 100644
index 00000000000000..88011f2ae00e4c
--- /dev/null
+++ b/drivers/iio/pressure/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for industrial I/O pressure drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_BMP280) += bmp280.o
+obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
+obj-$(CONFIG_MPL115) += mpl115.o
+obj-$(CONFIG_MPL3115) += mpl3115.o
+obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
+st_pressure-y := st_pressure_core.o
+st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
+obj-$(CONFIG_T5403) += t5403.o
+
+obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
+obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
new file mode 100644
index 00000000000000..75038dacfff184
--- /dev/null
+++ b/drivers/iio/pressure/bmp280.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * Driver for Bosch Sensortec BMP280 digital pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "bmp280: " fmt
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define BMP280_REG_TEMP_XLSB 0xFC
+#define BMP280_REG_TEMP_LSB 0xFB
+#define BMP280_REG_TEMP_MSB 0xFA
+#define BMP280_REG_PRESS_XLSB 0xF9
+#define BMP280_REG_PRESS_LSB 0xF8
+#define BMP280_REG_PRESS_MSB 0xF7
+
+#define BMP280_REG_CONFIG 0xF5
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_STATUS 0xF3
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP280_REG_COMP_TEMP_START 0x88
+#define BMP280_COMP_TEMP_REG_COUNT 6
+
+#define BMP280_REG_COMP_PRESS_START 0x8E
+#define BMP280_COMP_PRESS_REG_COUNT 18
+
+#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF 0
+#define BMP280_FILTER_2X BIT(2)
+#define BMP280_FILTER_4X BIT(3)
+#define BMP280_FILTER_8X (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X BIT(4)
+
+#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP 0
+#define BMP280_OSRS_TEMP_1X BIT(5)
+#define BMP280_OSRS_TEMP_2X BIT(6)
+#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_8X BIT(7)
+#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5))
+
+#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP 0
+#define BMP280_OSRS_PRESS_1X BIT(2)
+#define BMP280_OSRS_PRESS_2X BIT(3)
+#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_8X BIT(4)
+#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2))
+
+#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP 0
+#define BMP280_MODE_FORCED BIT(0)
+#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+
+#define BMP280_CHIP_ID 0x58
+#define BMP280_SOFT_RESET_VAL 0xB6
+
+struct bmp280_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * calculation.
+ */
+ s32 t_fine;
+};
+
+/* Compensation parameters. */
+struct bmp280_comp_temp {
+ u16 dig_t1;
+ s16 dig_t2, dig_t3;
+};
+
+struct bmp280_comp_press {
+ u16 dig_p1;
+ s16 dig_p2, dig_p3, dig_p4, dig_p5, dig_p6, dig_p7, dig_p8, dig_p9;
+};
+
+static const struct iio_chan_spec bmp280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP280_REG_TEMP_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+ .volatile_reg = bmp280_is_volatile_reg,
+};
+
+static int bmp280_read_compensation_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_t1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_t2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_t3 = (s16) le16_to_cpu(buf[2]);
+
+ return 0;
+}
+
+static int bmp280_read_compensation_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp)
+{
+ int ret;
+ __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
+
+ comp->dig_p1 = (u16) le16_to_cpu(buf[0]);
+ comp->dig_p2 = (s16) le16_to_cpu(buf[1]);
+ comp->dig_p3 = (s16) le16_to_cpu(buf[2]);
+ comp->dig_p4 = (s16) le16_to_cpu(buf[3]);
+ comp->dig_p5 = (s16) le16_to_cpu(buf[4]);
+ comp->dig_p6 = (s16) le16_to_cpu(buf[5]);
+ comp->dig_p7 = (s16) le16_to_cpu(buf[6]);
+ comp->dig_p8 = (s16) le16_to_cpu(buf[7]);
+ comp->dig_p9 = (s16) le16_to_cpu(buf[8]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+ * "5123" equals 51.23 DegC. t_fine carries fine temperature as global
+ * value.
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static s32 bmp280_compensate_temp(struct bmp280_data *data,
+ struct bmp280_comp_temp *comp,
+ s32 adc_temp)
+{
+ s32 var1, var2, t;
+
+ var1 = (((adc_temp >> 3) - ((s32) comp->dig_t1 << 1)) *
+ ((s32) comp->dig_t2)) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32) comp->dig_t1)) *
+ ((adc_temp >> 4) - ((s32) comp->dig_t1))) >> 12) *
+ ((s32) comp->dig_t3)) >> 14;
+
+ data->t_fine = var1 + var2;
+ t = (data->t_fine * 5 + 128) >> 8;
+
+ return t;
+}
+
+/*
+ * Returns pressure in Pa as unsigned 32 bit integer in Q24.8 format (24
+ * integer bits and 8 fractional bits). Output value of "24674867"
+ * represents 24674867/256 = 96386.2 Pa = 963.862 hPa
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_press(struct bmp280_data *data,
+ struct bmp280_comp_press *comp,
+ s32 adc_press)
+{
+ s64 var1, var2, p;
+
+ var1 = ((s64) data->t_fine) - 128000;
+ var2 = var1 * var1 * (s64) comp->dig_p6;
+ var2 = var2 + ((var1 * (s64) comp->dig_p5) << 17);
+ var2 = var2 + (((s64) comp->dig_p4) << 35);
+ var1 = ((var1 * var1 * (s64) comp->dig_p3) >> 8) +
+ ((var1 * (s64) comp->dig_p2) << 12);
+ var1 = (((((s64) 1) << 47) + var1)) * ((s64) comp->dig_p1) >> 33;
+
+ if (var1 == 0)
+ return 0;
+
+ p = ((((s64) 1048576 - adc_press) << 31) - var2) * 3125;
+ p = div64_s64(p, var1);
+ var1 = (((s64) comp->dig_p9) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = (((s64) comp->dig_p8) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64) comp->dig_p7) << 4);
+
+ return (u32) p;
+}
+
+static int bmp280_read_temp(struct bmp280_data *data,
+ int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp, comp_temp;
+ struct bmp280_comp_temp comp;
+
+ ret = bmp280_read_compensation_temp(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ comp_temp = bmp280_compensate_temp(data, &comp, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp280_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+ u32 comp_press;
+ struct bmp280_comp_press comp;
+
+ ret = bmp280_read_compensation_press(data, &comp);
+ if (ret < 0)
+ return ret;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ comp_press = bmp280_compensate_press(data, &comp, adc_press);
+
+ *val = comp_press;
+ *val2 = 256000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = bmp280_read_press(data, val, val2);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_info bmp280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &bmp280_read_raw,
+};
+
+static int bmp280_chip_init(struct bmp280_data *data)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ BMP280_OSRS_TEMP_MASK |
+ BMP280_OSRS_PRESS_MASK |
+ BMP280_MODE_MASK,
+ BMP280_OSRS_TEMP_2X |
+ BMP280_OSRS_PRESS_16X |
+ BMP280_MODE_NORMAL);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CONFIG,
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bmp280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bmp280_data *data;
+ unsigned int chip_id;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+ data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = bmp280_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmp280_channels);
+ indio_dev->info = &bmp280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
+ if (ret < 0)
+ return ret;
+ if (chip_id != BMP280_CHIP_ID) {
+ dev_err(&client->dev, "bad chip id. expected %x got %x\n",
+ BMP280_CHIP_ID, chip_id);
+ return -EINVAL;
+ }
+
+ ret = bmp280_chip_init(data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_match[] = {
+ {"BMP0280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
+
+static const struct i2c_device_id bmp280_id[] = {
+ {"bmp280", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_id);
+
+static struct i2c_driver bmp280_driver = {
+ .driver = {
+ .name = "bmp280",
+ .acpi_match_table = ACPI_PTR(bmp280_acpi_match),
+ },
+ .probe = bmp280_probe,
+ .id_table = bmp280_id,
+};
+module_i2c_driver(bmp280_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
new file mode 100644
index 00000000000000..764928682df2fb
--- /dev/null
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -0,0 +1,393 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ */
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+#define CHANNEL_SCAN_INDEX_PRESSURE 0
+
+struct press_state {
+ struct hid_sensor_hub_callbacks callbacks;
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info press_attr;
+ u32 press_data;
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec press_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .modified = 1,
+ .channel2 = IIO_NO_MOD,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ .scan_index = CHANNEL_SCAN_INDEX_PRESSURE,
+ }
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void press_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is u32 */
+ channels[channel].scan_type.storagebits = sizeof(u32) * 8;
+}
+
+/* Channel read_raw handler */
+static int press_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int report_id = -1;
+ u32 address;
+ int ret_type;
+ s32 poll_value;
+
+ *val = 0;
+ *val2 = 0;
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->scan_index) {
+ case CHANNEL_SCAN_INDEX_PRESSURE:
+ report_id = press_state->press_attr.report_id;
+ address =
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+ break;
+ default:
+ report_id = -1;
+ break;
+ }
+ if (report_id >= 0) {
+ poll_value = hid_sensor_read_poll_value(
+ &press_state->common_attributes);
+ if (poll_value < 0)
+ return -EINVAL;
+ hid_sensor_power_state(&press_state->common_attributes,
+ true);
+
+ msleep_interruptible(poll_value * 2);
+
+ *val = sensor_hub_input_attr_get_raw_value(
+ press_state->common_attributes.hsdev,
+ HID_USAGE_SENSOR_PRESSURE, address,
+ report_id);
+ hid_sensor_power_state(&press_state->common_attributes,
+ false);
+ } else {
+ *val = 0;
+ return -EINVAL;
+ }
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = press_state->scale_pre_decml;
+ *val2 = press_state->scale_post_decml;
+ ret_type = press_state->scale_precision;
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = press_state->value_offset;
+ ret_type = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret_type = hid_sensor_read_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret_type = hid_sensor_read_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ default:
+ ret_type = -EINVAL;
+ break;
+ }
+
+ return ret_type;
+}
+
+/* Channel write_raw handler */
+static int press_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = 0;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = hid_sensor_write_samp_freq_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ case IIO_CHAN_INFO_HYSTERESIS:
+ ret = hid_sensor_write_raw_hyst_value(
+ &press_state->common_attributes, val, val2);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info press_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &press_read_raw,
+ .write_raw = &press_write_raw,
+};
+
+/* Function to push data to buffer */
+static void hid_sensor_push_data(struct iio_dev *indio_dev, const void *data,
+ int len)
+{
+ dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
+ iio_push_to_buffers(indio_dev, data);
+}
+
+/* Callback handler to send event after all samples are received and captured */
+static int press_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ dev_dbg(&indio_dev->dev, "press_proc_event\n");
+ if (atomic_read(&press_state->common_attributes.data_ready))
+ hid_sensor_push_data(indio_dev,
+ &press_state->press_data,
+ sizeof(press_state->press_data));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int press_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned usage_id,
+ size_t raw_len, char *raw_data,
+ void *priv)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(priv);
+ struct press_state *press_state = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE:
+ press_state->press_data = *(u32 *)raw_data;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* Parse report which is specific to an usage id*/
+static int press_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned usage_id,
+ struct press_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE,
+ &st->press_attr);
+ if (ret < 0)
+ return ret;
+ press_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_PRESSURE,
+ st->press_attr.size);
+
+ dev_dbg(&pdev->dev, "press %x:%x\n", st->press_attr.index,
+ st->press_attr.report_id);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_PRESSURE,
+ &st->press_attr,
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0) {
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE,
+ &st->common_attributes.sensitivity);
+ dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+ st->common_attributes.sensitivity.index,
+ st->common_attributes.sensitivity.report_id);
+ }
+ return ret;
+}
+
+/* Function to initialize the processing for usage id */
+static int hid_press_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ static const char *name = "press";
+ struct iio_dev *indio_dev;
+ struct press_state *press_state;
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_chan_spec *channels;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct press_state));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ press_state = iio_priv(indio_dev);
+ press_state->common_attributes.hsdev = hsdev;
+ press_state->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_PRESSURE,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup common attributes\n");
+ return ret;
+ }
+
+ channels = kmemdup(press_channels, sizeof(press_channels), GFP_KERNEL);
+ if (!channels) {
+ dev_err(&pdev->dev, "failed to duplicate channels\n");
+ return -ENOMEM;
+ }
+
+ ret = press_parse_report(pdev, hsdev, channels,
+ HID_USAGE_SENSOR_PRESSURE, press_state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to setup attributes\n");
+ goto error_free_dev_mem;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels =
+ ARRAY_SIZE(press_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &press_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
+ goto error_free_dev_mem;
+ }
+ atomic_set(&press_state->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &press_state->common_attributes);
+ if (ret) {
+ dev_err(&pdev->dev, "trigger setup failed\n");
+ goto error_unreg_buffer_funcs;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "device register failed\n");
+ goto error_remove_trigger;
+ }
+
+ press_state->callbacks.send_event = press_proc_event;
+ press_state->callbacks.capture_sample = press_capture_sample;
+ press_state->callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PRESSURE,
+ &press_state->callbacks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "callback reg failed\n");
+ goto error_iio_unreg;
+ }
+
+ return ret;
+
+error_iio_unreg:
+ iio_device_unregister(indio_dev);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+error_unreg_buffer_funcs:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_free_dev_mem:
+ kfree(indio_dev->channels);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_press_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct press_state *press_state = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
+ iio_device_unregister(indio_dev);
+ hid_sensor_remove_trigger(&press_state->common_attributes);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(indio_dev->channels);
+
+ return 0;
+}
+
+static struct platform_device_id hid_press_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200031",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_press_ids);
+
+static struct platform_driver hid_press_platform_driver = {
+ .id_table = hid_press_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = hid_press_probe,
+ .remove = hid_press_remove,
+};
+module_platform_driver(hid_press_platform_driver);
+
+MODULE_DESCRIPTION("HID Sensor Pressure");
+MODULE_AUTHOR("Archana Patni <archana.patni@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
new file mode 100644
index 00000000000000..f5ecd6e19f5de7
--- /dev/null
+++ b/drivers/iio/pressure/mpl115.c
@@ -0,0 +1,211 @@
+/*
+ * mpl115.c - Support for Freescale MPL115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * TODO: shutdown pin
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/delay.h>
+
+#define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */
+#define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */
+#define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */
+#define MPL115_B1 0x06 /* 2 bit integer, 13 bit fraction */
+#define MPL115_B2 0x08 /* 1 bit integer, 14 bit fraction */
+#define MPL115_C12 0x0a /* 0 bit integer, 13 bit fraction */
+#define MPL115_CONVERT 0x12 /* convert temperature and pressure */
+
+struct mpl115_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ s16 a0;
+ s16 b1, b2;
+ s16 c12;
+};
+
+static int mpl115_request(struct mpl115_data *data)
+{
+ int ret = i2c_smbus_write_byte_data(data->client, MPL115_CONVERT, 0);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 4000);
+
+ return 0;
+}
+
+static int mpl115_comp_pressure(struct mpl115_data *data, int *val, int *val2)
+{
+ int ret;
+ u16 padc, tadc;
+ int a1, y1, pcomp;
+ unsigned kpa;
+
+ mutex_lock(&data->lock);
+ ret = mpl115_request(data);
+ if (ret < 0)
+ goto done;
+
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_PADC);
+ if (ret < 0)
+ goto done;
+ padc = ret >> 6;
+
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+ if (ret < 0)
+ goto done;
+ tadc = ret >> 6;
+
+ /* see Freescale AN3785 */
+ a1 = data->b1 + ((data->c12 * tadc) >> 11);
+ y1 = (data->a0 << 10) + a1 * padc;
+
+ /* compensated pressure with 4 fractional bits */
+ pcomp = (y1 + ((data->b2 * (int) tadc) >> 1)) >> 9;
+
+ kpa = pcomp * (115 - 50) / 1023 + (50 << 4);
+ *val = kpa >> 4;
+ *val2 = (kpa & 15) * (1000000 >> 4);
+done:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int mpl115_read_temp(struct mpl115_data *data)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mpl115_request(data);
+ if (ret < 0)
+ goto done;
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+done:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int mpl115_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mpl115_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = mpl115_comp_pressure(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_RAW:
+ /* temperature -5.35 C / LSB, 472 LSB is 25 C */
+ ret = mpl115_read_temp(data);
+ if (ret < 0)
+ return ret;
+ *val = ret >> 6;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 605;
+ *val2 = 750000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ *val = -186;
+ *val2 = 915888;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec mpl115_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct iio_info mpl115_info = {
+ .read_raw = &mpl115_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mpl115_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpl115_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mpl115_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mpl115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpl115_channels);
+
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_A0);
+ if (ret < 0)
+ return ret;
+ data->a0 = ret;
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_B1);
+ if (ret < 0)
+ return ret;
+ data->b1 = ret;
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_B2);
+ if (ret < 0)
+ return ret;
+ data->b2 = ret;
+ ret = i2c_smbus_read_word_swapped(data->client, MPL115_C12);
+ if (ret < 0)
+ return ret;
+ data->c12 = ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mpl115_id[] = {
+ { "mpl115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl115_id);
+
+static struct i2c_driver mpl115_driver = {
+ .driver = {
+ .name = "mpl115",
+ },
+ .probe = mpl115_probe,
+ .id_table = mpl115_id,
+};
+module_i2c_driver(mpl115_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
new file mode 100644
index 00000000000000..01b2e0b1887881
--- /dev/null
+++ b/drivers/iio/pressure/mpl3115.c
@@ -0,0 +1,329 @@
+/*
+ * mpl3115.c - Support for Freescale MPL3115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * TODO: FIFO buffer, altimeter mode, oversampling, continuous mode,
+ * interrupts, user offset correction, raw mode
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#define MPL3115_STATUS 0x00
+#define MPL3115_OUT_PRESS 0x01 /* MSB first, 20 bit */
+#define MPL3115_OUT_TEMP 0x04 /* MSB first, 12 bit */
+#define MPL3115_WHO_AM_I 0x0c
+#define MPL3115_CTRL_REG1 0x26
+
+#define MPL3115_DEVICE_ID 0xc4
+
+#define MPL3115_STATUS_PRESS_RDY BIT(2)
+#define MPL3115_STATUS_TEMP_RDY BIT(1)
+
+#define MPL3115_CTRL_RESET BIT(2) /* software reset */
+#define MPL3115_CTRL_OST BIT(1) /* initiate measurement */
+#define MPL3115_CTRL_ACTIVE BIT(0) /* continuous measurement */
+#define MPL3115_CTRL_OS_258MS (BIT(5) | BIT(4)) /* 64x oversampling */
+
+struct mpl3115_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 ctrl_reg1;
+};
+
+static int mpl3115_request(struct mpl3115_data *data)
+{
+ int ret, tries = 15;
+
+ /* trigger measurement */
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 | MPL3115_CTRL_OST);
+ if (ret < 0)
+ return ret;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_CTRL_REG1);
+ if (ret < 0)
+ return ret;
+ /* wait for data ready, i.e. OST cleared */
+ if (!(ret & MPL3115_CTRL_OST))
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mpl3115_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ __be32 tmp = 0;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_PRESSURE: /* in 0.25 pascal / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = be32_to_cpu(tmp) >> 12;
+ return IIO_VAL_INT;
+ case IIO_TEMP: /* in 0.0625 celsius / LSB */
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 250; /* want kilopascal */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = 62500;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
+ int ret, pos = 0;
+
+ mutex_lock(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+
+ memset(buffer, 0, sizeof(buffer));
+ if (test_bit(0, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS, 3, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ pos += 4;
+ }
+
+ if (test_bit(1, indio_dev->active_scan_mask)) {
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP, 2, &buffer[pos]);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec mpl3115_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 20,
+ .storagebits = 32,
+ .shift = 12,
+ .endianness = IIO_BE,
+ }
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 4,
+ .endianness = IIO_BE,
+ }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const struct iio_info mpl3115_info = {
+ .read_raw = &mpl3115_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mpl3115_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpl3115_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPL3115_WHO_AM_I);
+ if (ret < 0)
+ return ret;
+ if (ret != MPL3115_DEVICE_ID)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &mpl3115_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mpl3115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mpl3115_channels);
+
+ /* software reset, I2C transfer is aborted (fails) */
+ i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ MPL3115_CTRL_RESET);
+ msleep(50);
+
+ data->ctrl_reg1 = MPL3115_CTRL_OS_258MS;
+ ret = i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ mpl3115_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int mpl3115_standby(struct mpl3115_data *data)
+{
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
+}
+
+static int mpl3115_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ mpl3115_standby(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mpl3115_suspend(struct device *dev)
+{
+ return mpl3115_standby(iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev))));
+}
+
+static int mpl3115_resume(struct device *dev)
+{
+ struct mpl3115_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+}
+
+static SIMPLE_DEV_PM_OPS(mpl3115_pm_ops, mpl3115_suspend, mpl3115_resume);
+#define MPL3115_PM_OPS (&mpl3115_pm_ops)
+#else
+#define MPL3115_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mpl3115_id[] = {
+ { "mpl3115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl3115_id);
+
+static struct i2c_driver mpl3115_driver = {
+ .driver = {
+ .name = "mpl3115",
+ .pm = MPL3115_PM_OPS,
+ },
+ .probe = mpl3115_probe,
+ .remove = mpl3115_remove,
+ .id_table = mpl3115_id,
+};
+module_i2c_driver(mpl3115_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL3115 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
new file mode 100644
index 00000000000000..f5f41490060b95
--- /dev/null
+++ b/drivers/iio/pressure/st_pressure.h
@@ -0,0 +1,49 @@
+/*
+ * STMicroelectronics pressures driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_PRESS_H
+#define ST_PRESS_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LPS001WP_PRESS_DEV_NAME "lps001wp"
+#define LPS25H_PRESS_DEV_NAME "lps25h"
+#define LPS331AP_PRESS_DEV_NAME "lps331ap"
+
+/**
+ * struct st_sensors_platform_data - default press platform data
+ * @drdy_int_pin: default press DRDY is available on INT1 pin.
+ */
+static const struct st_sensors_platform_data default_press_pdata = {
+ .drdy_int_pin = 1,
+};
+
+int st_press_common_probe(struct iio_dev *indio_dev);
+void st_press_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_press_allocate_ring(struct iio_dev *indio_dev);
+void st_press_deallocate_ring(struct iio_dev *indio_dev);
+int st_press_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_PRESS_TRIGGER_SET_STATE (&st_press_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_press_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void st_press_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_PRESS_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_PRESS_H */
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
new file mode 100644
index 00000000000000..2ff53f222352ca
--- /dev/null
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -0,0 +1,96 @@
+/*
+ * STMicroelectronics pressures driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_pressure.h"
+
+int st_press_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_press_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return st_sensors_set_enable(indio_dev, true);
+}
+
+static int st_press_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+
+ press_data->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (press_data->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_press_buffer_postenable_error;
+
+ return err;
+
+st_press_buffer_postenable_error:
+ kfree(press_data->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_press_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_press_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_press_buffer_predisable_error:
+ kfree(press_data->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
+ .preenable = &st_press_buffer_preenable,
+ .postenable = &st_press_buffer_postenable,
+ .predisable = &st_press_buffer_predisable,
+};
+
+int st_press_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
+}
+
+void st_press_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics pressures buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
new file mode 100644
index 00000000000000..97baf40d424bd5
--- /dev/null
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -0,0 +1,497 @@
+/*
+ * STMicroelectronics pressures driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_pressure.h"
+
+#define ST_PRESS_LSB_PER_MBAR 4096UL
+#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
+ ST_PRESS_LSB_PER_MBAR)
+#define ST_PRESS_LSB_PER_CELSIUS 480UL
+#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
+ ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_NUMBER_DATA_CHANNELS 1
+
+/* FULLSCALE */
+#define ST_PRESS_FS_AVL_1260MB 1260
+
+#define ST_PRESS_1_OUT_XL_ADDR 0x28
+#define ST_TEMP_1_OUT_L_ADDR 0x2b
+
+/* CUSTOM VALUES FOR LPS331AP SENSOR */
+#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
+#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
+#define ST_PRESS_LPS331AP_ODR_MASK 0x70
+#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL 0x05
+#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL 0x06
+#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL 0x07
+#define ST_PRESS_LPS331AP_PW_ADDR 0x20
+#define ST_PRESS_LPS331AP_PW_MASK 0x80
+#define ST_PRESS_LPS331AP_FS_ADDR 0x23
+#define ST_PRESS_LPS331AP_FS_MASK 0x30
+#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
+#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
+#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
+#define ST_PRESS_LPS331AP_BDU_MASK 0x04
+#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
+#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
+#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
+#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
+
+/* CUSTOM VALUES FOR LPS001WP SENSOR */
+#define ST_PRESS_LPS001WP_WAI_EXP 0xba
+#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
+#define ST_PRESS_LPS001WP_ODR_MASK 0x30
+#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL 0x02
+#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
+#define ST_PRESS_LPS001WP_PW_ADDR 0x20
+#define ST_PRESS_LPS001WP_PW_MASK 0x40
+#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
+#define ST_PRESS_LPS001WP_BDU_MASK 0x04
+#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
+#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
+#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
+
+/* CUSTOM VALUES FOR LPS25H SENSOR */
+#define ST_PRESS_LPS25H_WAI_EXP 0xbd
+#define ST_PRESS_LPS25H_ODR_ADDR 0x20
+#define ST_PRESS_LPS25H_ODR_MASK 0x70
+#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL 0x02
+#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL 0x03
+#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
+#define ST_PRESS_LPS25H_PW_ADDR 0x20
+#define ST_PRESS_LPS25H_PW_MASK 0x80
+#define ST_PRESS_LPS25H_FS_ADDR 0x00
+#define ST_PRESS_LPS25H_FS_MASK 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
+#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
+#define ST_PRESS_LPS25H_BDU_ADDR 0x20
+#define ST_PRESS_LPS25H_BDU_MASK 0x04
+#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
+#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
+#define ST_PRESS_LPS25H_MULTIREAD_BIT true
+#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
+#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
+#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
+
+static const struct iio_chan_spec st_press_1_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_PRESS_1_OUT_XL_ADDR,
+ .scan_index = ST_SENSORS_SCAN_X,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 24,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .modified = 0,
+ },
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_TEMP_1_OUT_L_ADDR,
+ .scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .modified = 0,
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
+};
+
+static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_PRESS_LPS001WP_OUT_L_ADDR,
+ .scan_index = ST_SENSORS_SCAN_X,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .modified = 0,
+ },
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .address = ST_TEMP_LPS001WP_OUT_L_ADDR,
+ .scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .modified = 0,
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
+};
+
+static const struct st_sensor_settings st_press_sensors_settings[] = {
+ {
+ .wai = ST_PRESS_LPS331AP_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS331AP_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS331AP_ODR_ADDR,
+ .mask = ST_PRESS_LPS331AP_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
+ { 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS331AP_PW_ADDR,
+ .mask = ST_PRESS_LPS331AP_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = ST_PRESS_LPS331AP_FS_ADDR,
+ .mask = ST_PRESS_LPS331AP_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
+ .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
+ .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS331AP_BDU_ADDR,
+ .mask = ST_PRESS_LPS331AP_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_PRESS_LPS001WP_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS001WP_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS001WP_ODR_ADDR,
+ .mask = ST_PRESS_LPS001WP_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS001WP_PW_ADDR,
+ .mask = ST_PRESS_LPS001WP_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = 0,
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS001WP_BDU_ADDR,
+ .mask = ST_PRESS_LPS001WP_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = 0,
+ },
+ .multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_PRESS_LPS25H_WAI_EXP,
+ .sensors_supported = {
+ [0] = LPS25H_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_1_channels,
+ .num_ch = ARRAY_SIZE(st_press_1_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS25H_ODR_ADDR,
+ .mask = ST_PRESS_LPS25H_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
+ { 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
+ { 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
+ { 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS25H_PW_ADDR,
+ .mask = ST_PRESS_LPS25H_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = ST_PRESS_LPS25H_FS_ADDR,
+ .mask = ST_PRESS_LPS25H_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
+ .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
+ .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS25H_BDU_ADDR,
+ .mask = ST_PRESS_LPS25H_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+ },
+ .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_press_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch,
+ int val,
+ int val2,
+ long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, val);
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int st_press_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+
+ switch (ch->type) {
+ case IIO_PRESSURE:
+ *val2 = press_data->current_fullscale->gain;
+ break;
+ case IIO_TEMP:
+ *val2 = press_data->current_fullscale->gain2;
+ break;
+ default:
+ err = -EINVAL;
+ goto read_error;
+ }
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (ch->type) {
+ case IIO_TEMP:
+ *val = 425;
+ *val2 = 10;
+ break;
+ default:
+ err = -EINVAL;
+ goto read_error;
+ }
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = press_data->odr;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+
+static struct attribute *st_press_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_press_attribute_group = {
+ .attrs = st_press_attributes,
+};
+
+static const struct iio_info press_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_press_attribute_group,
+ .read_raw = &st_press_read_raw,
+ .write_raw = &st_press_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_press_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+};
+#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
+#else
+#define ST_PRESS_TRIGGER_OPS NULL
+#endif
+
+int st_press_common_probe(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+ int irq = press_data->get_irq_data_ready(indio_dev);
+ int err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &press_info;
+
+ st_sensors_power_enable(indio_dev);
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_press_sensors_settings),
+ st_press_sensors_settings);
+ if (err < 0)
+ return err;
+
+ press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+ press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
+ indio_dev->channels = press_data->sensor_settings->ch;
+ indio_dev->num_channels = press_data->sensor_settings->num_ch;
+
+ if (press_data->sensor_settings->fs.addr != 0)
+ press_data->current_fullscale =
+ (struct st_sensor_fullscale_avl *)
+ &press_data->sensor_settings->fs.fs_avl[0];
+
+ press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
+
+ /* Some devices don't support a data ready pin. */
+ if (!press_data->dev->platform_data &&
+ press_data->sensor_settings->drdy_irq.addr)
+ press_data->dev->platform_data =
+ (struct st_sensors_platform_data *)&default_press_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
+ if (err < 0)
+ return err;
+
+ err = st_press_allocate_ring(indio_dev);
+ if (err < 0)
+ return err;
+
+ if (irq > 0) {
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_PRESS_TRIGGER_OPS);
+ if (err < 0)
+ goto st_press_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_press_device_register_error;
+
+ dev_info(&indio_dev->dev, "registered pressure sensor %s\n",
+ indio_dev->name);
+
+ return err;
+
+st_press_device_register_error:
+ if (irq > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_press_probe_trigger_error:
+ st_press_deallocate_ring(indio_dev);
+
+ return err;
+}
+EXPORT_SYMBOL(st_press_common_probe);
+
+void st_press_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *press_data = iio_priv(indio_dev);
+
+ st_sensors_power_disable(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (press_data->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+
+ st_press_deallocate_ring(indio_dev);
+}
+EXPORT_SYMBOL(st_press_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics pressures driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
new file mode 100644
index 00000000000000..137788bba4a322
--- /dev/null
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -0,0 +1,94 @@
+/*
+ * STMicroelectronics pressures driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_pressure.h"
+
+#ifdef CONFIG_OF
+static const struct of_device_id st_press_of_match[] = {
+ {
+ .compatible = "st,lps001wp-press",
+ .data = LPS001WP_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps25h-press",
+ .data = LPS25H_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps331ap-press",
+ .data = LPS331AP_PRESS_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_press_of_match);
+#else
+#define st_press_of_match NULL
+#endif
+
+static int st_press_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *press_data;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ press_data = iio_priv(indio_dev);
+ st_sensors_of_i2c_probe(client, st_press_of_match);
+
+ st_sensors_i2c_configure(indio_dev, client, press_data);
+
+ err = st_press_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_press_i2c_remove(struct i2c_client *client)
+{
+ st_press_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_press_id_table[] = {
+ { LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
+ { LPS331AP_PRESS_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_press_id_table);
+
+static struct i2c_driver st_press_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-press-i2c",
+ .of_match_table = of_match_ptr(st_press_of_match),
+ },
+ .probe = st_press_i2c_probe,
+ .remove = st_press_i2c_remove,
+ .id_table = st_press_id_table,
+};
+module_i2c_driver(st_press_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics pressures i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
new file mode 100644
index 00000000000000..1ffa6d4d349c14
--- /dev/null
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -0,0 +1,70 @@
+/*
+ * STMicroelectronics pressures driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_pressure.h"
+
+static int st_press_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *press_data;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*press_data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ press_data = iio_priv(indio_dev);
+
+ st_sensors_spi_configure(indio_dev, spi, press_data);
+
+ err = st_press_common_probe(indio_dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int st_press_spi_remove(struct spi_device *spi)
+{
+ st_press_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_press_id_table[] = {
+ { LPS001WP_PRESS_DEV_NAME },
+ { LPS25H_PRESS_DEV_NAME },
+ { LPS331AP_PRESS_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_press_id_table);
+
+static struct spi_driver st_press_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-press-spi",
+ },
+ .probe = st_press_spi_probe,
+ .remove = st_press_spi_remove,
+ .id_table = st_press_id_table,
+};
+module_spi_driver(st_press_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics pressures spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
new file mode 100644
index 00000000000000..e11cd3938d673a
--- /dev/null
+++ b/drivers/iio/pressure/t5403.c
@@ -0,0 +1,275 @@
+/*
+ * t5403.c - Support for EPCOS T5403 pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x77)
+ *
+ * TODO: end-of-conversion irq
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+
+#define T5403_DATA 0xf5 /* data, LSB first, 16 bit */
+#define T5403_CALIB_DATA 0x8e /* 10 calibration coeff., LSB first, 16 bit */
+#define T5403_SLAVE_ADDR 0x88 /* I2C slave address, 0x77 */
+#define T5403_COMMAND 0xf1
+
+/* command bits */
+#define T5403_MODE_SHIFT 3 /* conversion time: 2, 8, 16, 66 ms */
+#define T5403_PT BIT(1) /* 0 .. pressure, 1 .. temperature measurement */
+#define T5403_SCO BIT(0) /* start conversion */
+
+#define T5403_MODE_LOW 0
+#define T5403_MODE_STANDARD 1
+#define T5403_MODE_HIGH 2
+#define T5403_MODE_ULTRA_HIGH 3
+
+#define T5403_I2C_MASK (~BIT(7))
+#define T5403_I2C_ADDR 0x77
+
+static const int t5403_pressure_conv_ms[] = {2, 8, 16, 66};
+
+struct t5403_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ int mode;
+ __le16 c[10];
+};
+
+#define T5403_C_U16(i) le16_to_cpu(data->c[(i) - 1])
+#define T5403_C(i) sign_extend32(T5403_C_U16(i), 15)
+
+static int t5403_read(struct t5403_data *data, bool pressure)
+{
+ int wait_time = 3; /* wakeup time in ms */
+
+ int ret = i2c_smbus_write_byte_data(data->client, T5403_COMMAND,
+ (pressure ? (data->mode << T5403_MODE_SHIFT) : T5403_PT) |
+ T5403_SCO);
+ if (ret < 0)
+ return ret;
+
+ wait_time += pressure ? t5403_pressure_conv_ms[data->mode] : 2;
+
+ msleep(wait_time);
+
+ return i2c_smbus_read_word_data(data->client, T5403_DATA);
+}
+
+static int t5403_comp_pressure(struct t5403_data *data, int *val, int *val2)
+{
+ int ret;
+ s16 t_r;
+ u16 p_r;
+ s32 S, O, X;
+
+ mutex_lock(&data->lock);
+
+ ret = t5403_read(data, false);
+ if (ret < 0)
+ goto done;
+ t_r = ret;
+
+ ret = t5403_read(data, true);
+ if (ret < 0)
+ goto done;
+ p_r = ret;
+
+ /* see EPCOS application note */
+ S = T5403_C_U16(3) + (s32) T5403_C_U16(4) * t_r / 0x20000 +
+ T5403_C(5) * t_r / 0x8000 * t_r / 0x80000 +
+ T5403_C(9) * t_r / 0x8000 * t_r / 0x8000 * t_r / 0x10000;
+
+ O = T5403_C(6) * 0x4000 + T5403_C(7) * t_r / 8 +
+ T5403_C(8) * t_r / 0x8000 * t_r / 16 +
+ T5403_C(9) * t_r / 0x8000 * t_r / 0x10000 * t_r;
+
+ X = (S * p_r + O) / 0x4000;
+
+ X += ((X - 75000) * (X - 75000) / 0x10000 - 9537) *
+ T5403_C(10) / 0x10000;
+
+ *val = X / 1000;
+ *val2 = (X % 1000) * 1000;
+
+done:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int t5403_comp_temp(struct t5403_data *data, int *val)
+{
+ int ret;
+ s16 t_r;
+
+ mutex_lock(&data->lock);
+ ret = t5403_read(data, false);
+ if (ret < 0)
+ goto done;
+ t_r = ret;
+
+ /* see EPCOS application note */
+ *val = ((s32) T5403_C_U16(1) * t_r / 0x100 +
+ (s32) T5403_C_U16(2) * 0x40) * 1000 / 0x10000;
+
+done:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int t5403_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct t5403_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = t5403_comp_pressure(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ ret = t5403_comp_temp(data, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = t5403_pressure_conv_ms[data->mode] * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int t5403_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct t5403_data *data = iio_priv(indio_dev);
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(t5403_pressure_conv_ms); i++)
+ if (val2 == t5403_pressure_conv_ms[i] * 1000) {
+ mutex_lock(&data->lock);
+ data->mode = i;
+ mutex_unlock(&data->lock);
+ return 0;
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec t5403_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.002 0.008 0.016 0.066");
+
+static struct attribute *t5403_attributes[] = {
+ &iio_const_attr_integration_time_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group t5403_attribute_group = {
+ .attrs = t5403_attributes,
+};
+
+static const struct iio_info t5403_info = {
+ .read_raw = &t5403_read_raw,
+ .write_raw = &t5403_write_raw,
+ .attrs = &t5403_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int t5403_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct t5403_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(client, T5403_SLAVE_ADDR);
+ if (ret < 0)
+ return ret;
+ if ((ret & T5403_I2C_MASK) != T5403_I2C_ADDR)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &t5403_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = t5403_channels;
+ indio_dev->num_channels = ARRAY_SIZE(t5403_channels);
+
+ data->mode = T5403_MODE_STANDARD;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client, T5403_CALIB_DATA,
+ sizeof(data->c), (u8 *) data->c);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id t5403_id[] = {
+ { "t5403", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, t5403_id);
+
+static struct i2c_driver t5403_driver = {
+ .driver = {
+ .name = "t5403",
+ },
+ .probe = t5403_probe,
+ .id_table = t5403_id,
+};
+module_i2c_driver(t5403_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("EPCOS T5403 pressure/temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
new file mode 100644
index 00000000000000..0c8cdf58f6a19b
--- /dev/null
+++ b/drivers/iio/proximity/Kconfig
@@ -0,0 +1,19 @@
+#
+# Proximity sensors
+#
+
+menu "Lightning sensors"
+
+config AS3935
+ tristate "AS3935 Franklin lightning sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ depends on SPI
+ help
+ Say Y here to build SPI interface support for the Austrian
+ Microsystems AS3935 lightning detection sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called as3935
+
+endmenu
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
new file mode 100644
index 00000000000000..743adee1c8bff9
--- /dev/null
+++ b/drivers/iio/proximity/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for IIO proximity sensors
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AS3935) += as3935.o
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 00000000000000..36208fe0baae66
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,239 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+
+struct keyreset_state {
+ struct input_handler input_handler;
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+ spinlock_t lock;
+ int key_down_target;
+ int key_down;
+ int key_up;
+ int restart_disabled;
+ int (*reset_fn)(void);
+};
+
+int restart_requested;
+static void deferred_restart(struct work_struct *dummy)
+{
+ restart_requested = 2;
+ sys_sync();
+ restart_requested = 3;
+ kernel_restart(NULL);
+}
+static DECLARE_WORK(restart_work, deferred_restart);
+
+static void keyreset_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ unsigned long flags;
+ struct keyreset_state *state = handle->private;
+
+ if (type != EV_KEY)
+ return;
+
+ if (code >= KEY_MAX)
+ return;
+
+ if (!test_bit(code, state->keybit))
+ return;
+
+ spin_lock_irqsave(&state->lock, flags);
+ if (!test_bit(code, state->key) == !value)
+ goto done;
+ __change_bit(code, state->key);
+ if (test_bit(code, state->upbit)) {
+ if (value) {
+ state->restart_disabled = 1;
+ state->key_up++;
+ } else
+ state->key_up--;
+ } else {
+ if (value)
+ state->key_down++;
+ else
+ state->key_down--;
+ }
+ if (state->key_down == 0 && state->key_up == 0)
+ state->restart_disabled = 0;
+
+ pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value,
+ state->key_down, state->key_up, state->restart_disabled);
+
+ if (value && !state->restart_disabled &&
+ state->key_down == state->key_down_target) {
+ state->restart_disabled = 1;
+ if (restart_requested)
+ panic("keyboard reset failed, %d", restart_requested);
+ if (state->reset_fn) {
+ restart_requested = state->reset_fn();
+ } else {
+ pr_info("keyboard reset\n");
+ schedule_work(&restart_work);
+ restart_requested = 1;
+ }
+ }
+done:
+ spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keyreset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i;
+ int ret;
+ struct input_handle *handle;
+ struct keyreset_state *state =
+ container_of(handler, struct keyreset_state, input_handler);
+
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "keyreset";
+ handle->private = state;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("using input dev %s for key reset\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keyreset_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id keyreset_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(input, keyreset_ids);
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+ int ret;
+ int key, *keyp;
+ struct keyreset_state *state;
+ struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ spin_lock_init(&state->lock);
+ keyp = pdata->keys_down;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ state->key_down_target++;
+ __set_bit(key, state->keybit);
+ }
+ if (pdata->keys_up) {
+ keyp = pdata->keys_up;
+ while ((key = *keyp++)) {
+ if (key >= KEY_MAX)
+ continue;
+ __set_bit(key, state->keybit);
+ __set_bit(key, state->upbit);
+ }
+ }
+
+ if (pdata->reset_fn)
+ state->reset_fn = pdata->reset_fn;
+
+ state->input_handler.event = keyreset_event;
+ state->input_handler.connect = keyreset_connect;
+ state->input_handler.disconnect = keyreset_disconnect;
+ state->input_handler.name = KEYRESET_NAME;
+ state->input_handler.id_table = keyreset_ids;
+ ret = input_register_handler(&state->input_handler);
+ if (ret) {
+ kfree(state);
+ return ret;
+ }
+ platform_set_drvdata(pdev, state);
+ return 0;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+ struct keyreset_state *state = platform_get_drvdata(pdev);
+ input_unregister_handler(&state->input_handler);
+ kfree(state);
+ return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+ .driver.name = KEYRESET_NAME,
+ .probe = keyreset_probe,
+ .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+ return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+ return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/akm8975.c b/drivers/input/misc/akm8975.c
new file mode 100644
index 00000000000000..a20bc1090ebc70
--- /dev/null
+++ b/drivers/input/misc/akm8975.c
@@ -0,0 +1,829 @@
+/* drivers/input/misc/akm8975.c - AK8972 compass driver
+ *
+ * Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
+ * Copyright (C) 2012 Sony Mobile Communications AB.
+ * Authors: Takashi Shiina <takashi.shiina (at) sonymobile.com>
+ * Tadashi Kubo <tadashi.kubo (at) sonymobile.com>
+ * Aleksej Makarov <Aleksej.Makarov (at) sonymobile.com>
+ * Joachim Holst <joachim.holst (at) sonymobile.com>
+ * Chikaharu Gonnokami <Chikaharu.X.Gonnokami (at) sonymobile.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#include <linux/input/akm8975.h>
+
+#define AKM8975_GPIO_DBG 0 // Enable gpio debugging (pulse during it handling)
+#define AKM8975_GPIO_NUM 134 // Gpio number for gpio debugging
+
+#define AKM8975_BASE_NUM 10
+#define AKM8975_WAIT_TIME_MS 10 // 100 Hz
+#define MODE_CHANGE_DELAY 100
+#if SENSOR_DATA_SIZE < 8
+#error SENSOR_DATA_SIZE is too small.
+#endif
+#define AK8975_DATA_SIZE (AK8975_REG_ST2 - AK8975_REG_WIA + 1)
+#define AK8975_FUSE_SIZE (AK8975_FUSE_ASAZ - AK8975_FUSE_ASAX + 1)
+#define AK8975_REGS_SIZE (AK8975_DATA_SIZE + AK8975_FUSE_SIZE)
+struct akm8975_data {
+ struct i2c_client *i2c;
+ struct input_dev *input;
+ struct device *class_dev;
+ struct class *compass;
+ struct delayed_work work;
+/* Prevent to start measure while measuring */
+ atomic_t is_busy;
+/* If it is positive value, sequential measurement is running */
+ atomic_t interval;
+/* Register address to be read */
+ atomic_t reg_addr;
+/* A buffer to save FUSE ROM value */
+ unsigned char fuse[FUSEROM_SIZE];
+ struct akm8975_platform_data *pdata;
+ struct notifier_block pnotifier;
+};
+static int aki2c_rxdata(struct i2c_client *i2c, unsigned char *rxdata,
+ int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = rxdata,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = rxdata,
+ },
+ };
+ if (i2c_transfer(i2c->adapter, msgs, ARRAY_SIZE(msgs))
+ != ARRAY_SIZE(msgs)) {
+ dev_err(&i2c->dev, "%s: transfer failed.\n", __func__);
+ return -EIO;
+ }
+ dev_dbg(&i2c->dev, "RxData: len=%02x, addr=%02x data=%02x",
+ length, rxdata[0], rxdata[1]);
+ return 0;
+}
+static int aki2c_txdata(struct i2c_client *i2c, unsigned char *txdata,
+ int length)
+{
+ struct i2c_msg msg[] = {
+ {
+ .addr = i2c->addr,
+ .flags = 0,
+ .len = length,
+ .buf = txdata,
+ },
+ };
+ if (i2c_transfer(i2c->adapter, msg, ARRAY_SIZE(msg))
+ != ARRAY_SIZE(msg)) {
+ dev_err(&i2c->dev, "%s: transfer failed.", __func__);
+ return -EIO;
+ }
+ dev_dbg(&i2c->dev, "TxData: len=%02x, addr=%02x data=%02x",
+ length, txdata[0], txdata[1]);
+ return 0;
+}
+static int akecs_setmode_sngmeasure(struct akm8975_data *akm)
+{
+ unsigned char buffer[2];
+ int err;
+ if (atomic_cmpxchg(&akm->is_busy, 0, 1) != 0) {
+ dev_err(&akm->i2c->dev, "%s: device is busy\n", __func__);
+ return -EBUSY;
+ }
+ buffer[0] = AK8975_REG_CNTL;
+ buffer[1] = AK8975_MODE_SNG_MEASURE;
+ err = aki2c_txdata(akm->i2c, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not set to measurement mode.\n", __func__);
+ atomic_set(&akm->is_busy, 0);
+ }
+ return err;
+}
+static int akecs_setmode_selftest(struct akm8975_data *akm,
+ unsigned char astc)
+{
+ unsigned char buffer[2];
+ int err;
+ if (atomic_cmpxchg(&akm->is_busy, 0, 1) != 0) {
+ dev_err(&akm->i2c->dev, "%s: device is busy\n", __func__);
+ return -EBUSY;
+ }
+ buffer[0] = AK8975_REG_ASTC;
+ buffer[1] = astc;
+ err = aki2c_txdata(akm->i2c, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not activate self test.\n", __func__);
+ atomic_set(&akm->is_busy, 0);
+ return err;
+ }
+ buffer[0] = AK8975_REG_CNTL;
+ buffer[1] = AK8975_MODE_SELF_TEST;
+ err = aki2c_txdata(akm->i2c, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not set to self test mode.\n", __func__);
+ atomic_set(&akm->is_busy, 0);
+ }
+ return err;
+}
+/* If power down mode is set while DRDY is HIGH,
+ (i.e. before work que function read out the measurement data)
+ DRDY bit is reset to 0, then work que function will fail.*/
+static int akecs_setmode_powerdown(struct akm8975_data *akm)
+{
+ unsigned char buffer[2];
+ int err;
+ buffer[0] = AK8975_REG_CNTL;
+ buffer[1] = AK8975_MODE_POWERDOWN;
+ err = aki2c_txdata(akm->i2c, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not set to powerdown.\n", __func__);
+ return err;
+ }
+ udelay(MODE_CHANGE_DELAY);
+ atomic_set(&akm->is_busy, 0);
+ return err;
+}
+static int akecs_checkdevice(struct akm8975_data *akm)
+{
+ unsigned char buffer;
+ int err;
+ buffer = AK8975_REG_WIA;
+ err = aki2c_rxdata(akm->i2c, &buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not read WIA.\n", __func__);
+ return err;
+ }
+ if (buffer != AK8975_DEVICE_ID) {
+ dev_err(&akm->i2c->dev,
+ "%s: The device is not AK8975.\n", __func__);
+ return -ENXIO;
+ }
+ return err;
+}
+static int akecs_read_fuse(struct akm8975_data *akm)
+{
+ unsigned char buffer[2];
+ int err;
+ memset(akm->fuse, 0, sizeof(akm->fuse));
+ buffer[0] = AK8975_REG_CNTL;
+ buffer[1] = AK8975_MODE_FUSE_ACCESS;
+ err = aki2c_txdata(akm->i2c, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not set to fuse access mode.", __func__);
+ return err;
+ }
+ akm->fuse[0] = AK8975_FUSE_ASAX;
+ err = aki2c_rxdata(akm->i2c, akm->fuse, FUSEROM_SIZE);
+ if (err) {
+ dev_err(&akm->i2c->dev,
+ "%s: Can not read the FUSE-ROM.", __func__);
+ return err;
+ }
+ dev_dbg(&akm->i2c->dev, "%s: FUSE = 0x%02x, 0x%02x, 0x%02x",
+ __func__, akm->fuse[0], akm->fuse[1], akm->fuse[2]);
+ return akecs_setmode_powerdown(akm);
+}
+static irqreturn_t akm8975_irq(int irq, void *handle)
+{
+ struct akm8975_data *akm = handle;
+ unsigned char buffer[SENSOR_DATA_SIZE];
+ int err;
+ s16 val_hx, val_hy, val_hz, val_st2;
+ u32 sensitivity;
+ s32 val_tmp;
+
+#if AKM8975_GPIO_DBG
+ gpio_set_value(AKM8975_GPIO_NUM, 1);
+#endif
+
+ dev_vdbg(&akm->i2c->dev, "%s called\n", __func__);
+ buffer[0] = AK8975_REG_ST1;
+ err = aki2c_rxdata(akm->i2c, buffer, SENSOR_DATA_SIZE);
+ if (err)
+ goto work_func_end;
+/* Check ST bit */
+ if ((buffer[0] & AK8975_ST1_DATA_READY) != AK8975_ST1_DATA_READY) {
+ dev_err(&akm->i2c->dev, "%s: DRDY is not set", __func__);
+ goto work_func_end;
+ }
+ atomic_set(&akm->is_busy, 0);
+/* report axis data: HXL & HXH / HYL & HYH / HZL & HZH */
+ val_hx = -(s16)(((u16)buffer[2] << 8) | (u16)buffer[1]);
+ val_hy = -(s16)(((u16)buffer[4] << 8) | (u16)buffer[3]);
+ val_hz = (s16)(((u16)buffer[6] << 8) | (u16)buffer[5]);
+ val_st2 = (s16)buffer[7];
+
+/* adjust sensitivity */
+ sensitivity = akm->fuse[0];
+ val_tmp = (((sensitivity >> 1) * (s32)val_hx) >> 7) + (val_hx >> 1);
+ val_hx = (s16)val_tmp;
+ sensitivity = akm->fuse[1];
+ val_tmp = (((sensitivity >> 1) * (s32)val_hy) >> 7) + (val_hy >> 1);
+ val_hy = (s16)val_tmp;
+ sensitivity = akm->fuse[2];
+ val_tmp = (((sensitivity >> 1) * (s32)val_hz) >> 7) + (val_hz >> 1);
+ val_hz = (s16)val_tmp;
+
+ dev_dbg(&akm->i2c->dev, "hval = %d,%d,%d: ST2 = 0x%02X",
+ val_hx, val_hy, val_hz, val_st2);
+ input_report_abs(akm->input, ABS_RX, val_hx);
+ input_report_abs(akm->input, ABS_RY, val_hy);
+ input_report_abs(akm->input, ABS_RZ, val_hz);
+ input_report_abs(akm->input, ABS_RUDDER, val_st2);
+ input_sync(akm->input);
+work_func_end:
+
+#if AKM8975_GPIO_DBG
+ gpio_set_value(AKM8975_GPIO_NUM, 0);
+#endif
+
+ return IRQ_HANDLED;
+}
+static void akm8975_continuous_measure(struct work_struct *work)
+{
+ struct akm8975_data *akm =
+ container_of(work, struct akm8975_data, work.work);
+ int interval;
+ dev_vdbg(&akm->i2c->dev, "%s called\n", __func__);
+ interval = atomic_read(&akm->interval);
+ if (0 <= interval) {
+ schedule_delayed_work(&akm->work,
+ msecs_to_jiffies(interval));
+ akecs_setmode_sngmeasure(akm);
+ }
+}
+static int akm8975_device_power_on(struct akm8975_data *akm)
+{
+ int ret;
+ akm->pdata->hw_config(1);
+ ret = akm->pdata->setup();
+ if (ret)
+ akm->pdata->hw_config(0);
+ return ret;
+}
+static void akm8975_device_power_off(struct akm8975_data *akm)
+{
+ akm->pdata->shutdown();
+ akm->pdata->hw_config(0);
+}
+static int akm8975_suspend(struct device *dev)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ int ret = 0;
+ dev_vdbg(&akm->i2c->dev, "%s called\n", __func__);
+ if (akm->input->users) {
+ cancel_delayed_work_sync(&akm->work);
+ ret = akecs_setmode_powerdown(akm);
+ akm8975_device_power_off(akm);
+ }
+ akm->pdata->power_mode(0);
+ return ret;
+}
+static int akm8975_resume(struct device *dev)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ int interval;
+ int ret = 0;
+ dev_vdbg(&akm->i2c->dev, "%s called\n", __func__);
+ akm->pdata->power_mode(1);
+ interval = atomic_read(&akm->interval);
+ if ((0 <= interval) && (akm->input->users)) {
+ ret = akm8975_device_power_on(akm);
+ if (!ret) {
+ dev_vdbg(&akm->i2c->dev, "%s power on failed.\n",
+ __func__);
+ schedule_delayed_work(&akm->work,
+ msecs_to_jiffies(interval));
+ }
+ }
+ return ret;
+}
+/***** internal utility functions for SysFS *****/
+static int create_device_attributes(struct device *dev,
+ struct device_attribute *attrs)
+{
+ int i;
+ int err = 0;
+ for (i = 0 ; NULL != attrs[i].attr.name ; ++i) {
+ err = device_create_file(dev, &attrs[i]);
+ if (err)
+ break;
+ }
+ if (err) {
+ for (; i >= 0 ; --i)
+ device_remove_file(dev, &attrs[i]);
+ }
+ return err;
+}
+static void remove_device_attributes(struct device *dev,
+ struct device_attribute *attrs)
+{
+ int i;
+ for (i = 0 ; NULL != attrs[i].attr.name ; ++i)
+ device_remove_file(dev, &attrs[i]);
+}
+static int create_device_binary_attributes(struct kobject *kobj,
+ struct bin_attribute *attrs)
+{
+ int i;
+ int err = 0;
+ for (i = 0 ; NULL != attrs[i].attr.name ; ++i) {
+ err = sysfs_create_bin_file(kobj, &attrs[i]);
+ if (err)
+ break;
+ }
+ if (err) {
+ for (; i >= 0 ; --i)
+ sysfs_remove_bin_file(kobj, &attrs[i]);
+ }
+ return err;
+}
+static void remove_device_binary_attributes(struct kobject *kobj,
+ struct bin_attribute *attrs)
+{
+ int i;
+ for (i = 0 ; NULL != attrs[i].attr.name ; ++i)
+ sysfs_remove_bin_file(kobj, &attrs[i]);
+}
+/*****************************************************************************
+ *
+ * SysFS attribute functions
+ *
+ * directory : /sys/class/compass/akm8975/
+ * files :
+ * - interval [rw] [t] : store measurement interval
+ * - selftest [rw] [t] : device's self test mode
+ * - single [w] [t] : single-shot trigger
+ * - registers [r] [b] : get all registers value
+ * - regacc [rw] [b] : provide direct register access
+ *
+ * [b] = binary format
+ * [t] = text format
+ *
+ */
+/*********** interval (TEXT) ***********/
+static ssize_t attr_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ dev_vdbg(dev, "%s called\n", __func__);
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&akm->interval));
+}
+static ssize_t attr_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ long interval = 0;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (0 == count)
+ return 0;
+ if (strict_strtol(buf, AKM8975_BASE_NUM, &interval))
+ return -EINVAL;
+ if ((0 <= interval) && (interval < AKM8975_WAIT_TIME_MS))
+ interval = AKM8975_WAIT_TIME_MS;
+ atomic_set(&akm->interval, interval);
+ if ((0 <= interval) && (akm->input->users))
+ schedule_delayed_work(
+ &akm->work,
+ msecs_to_jiffies(interval));
+ else
+ cancel_delayed_work_sync(&akm->work);
+ return count;
+}
+/*********** selftest (TEXT) ***********/
+static ssize_t attr_selftest_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ dev_vdbg(dev, "%s called\n", __func__);
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&akm->is_busy));
+}
+static ssize_t attr_selftest_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ long astc_value = 0;
+ int err;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (0 == count)
+ return 0;
+ if (strict_strtol(buf, AKM8975_BASE_NUM, &astc_value))
+ return -EINVAL;
+ if ((astc_value < UCHAR_MIN) || (UCHAR_MAX < astc_value))
+ return -EINVAL;
+ if ((0 != atomic_read(&akm->is_busy)) ||
+ (0 <= atomic_read(&akm->interval)))
+ return -EBUSY;
+ err = akecs_setmode_selftest(akm, (unsigned char)astc_value);
+ if (err)
+ return err;
+ return count;
+}
+/*********** single (TEXT) ***********/
+static ssize_t attr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ long single_value;
+ int err;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (0 == count)
+ return 0;
+ if (strict_strtol(buf, AKM8975_BASE_NUM, &single_value))
+ return -EINVAL;
+ if (0 == single_value)
+ return count;
+ if ((0 != atomic_read(&akm->is_busy)) ||
+ (0 <= atomic_read(&akm->interval)))
+ return -EBUSY;
+ err = akecs_setmode_sngmeasure(akm);
+ if (err)
+ return err;
+ return count;
+}
+/*********** registers (BINARY) ***********/
+static ssize_t bin_attr_registers_read(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ unsigned char reg[AK8975_DATA_SIZE];
+ int err;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (0 == size)
+ return 0;
+ reg[0] = AK8975_REG_WIA;
+ err = aki2c_rxdata(akm->i2c, reg, AK8975_DATA_SIZE);
+ if (err)
+ return err;
+ if (AK8975_DATA_SIZE > size) {
+ memcpy(&buf[0], reg, size);
+ } else {
+ memcpy(&buf[0], reg, AK8975_DATA_SIZE);
+ if (AK8975_REGS_SIZE > size) {
+ memcpy(&buf[AK8975_DATA_SIZE], akm->fuse,
+ size - AK8975_DATA_SIZE);
+ } else {
+ memcpy(&buf[AK8975_DATA_SIZE], akm->fuse,
+ AK8975_FUSE_SIZE);
+ }
+ }
+ return (size < AK8975_REGS_SIZE) ? size : AK8975_REGS_SIZE;
+}
+/*********** regacc (BINARY) ***********/
+static ssize_t bin_attr_regacc_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ unsigned char reg;
+ int err;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (size == 0)
+ return 0;
+ reg = atomic_read(&akm->reg_addr);
+ if ((AK8975_FUSE_ASAX <= reg) && (reg <= AK8975_FUSE_ASAZ)) {
+ buf[0] = akm->fuse[reg - AK8975_FUSE_ASAX];
+ } else {
+ err = aki2c_rxdata(akm->i2c, &reg, sizeof(reg));
+ if (err)
+ return err;
+ buf[0] = reg;
+ }
+ return 1;
+}
+static ssize_t bin_attr_regacc_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct akm8975_data *akm = dev_get_drvdata(dev);
+ unsigned char addr;
+ dev_vdbg(dev, "%s called\n", __func__);
+ if (size == 0)
+ return 0;
+ addr = (unsigned char)buf[0];
+ if ((addr < AK8975_REG_WIA) || (AK8975_FUSE_ASAZ < addr))
+ return -ERANGE;
+ atomic_set(&akm->reg_addr, addr);
+ return size;
+}
+#define __BIN_ATTR(name_, mode_, size_, private_, read_, write_) \
+ { \
+ .attr = { .name = __stringify(name_), .mode = mode_ }, \
+ .size = size_, \
+ .private = private_, \
+ .read = read_, \
+ .write = write_, \
+ }
+#define __BIN_ATTR_NULL \
+ { \
+ .attr = { .name = NULL }, \
+ }
+static struct device_attribute akm8975_attributes[] = {
+ __ATTR(interval, 0600, attr_interval_show, attr_interval_store),
+ __ATTR(selftest, 0600, attr_selftest_show, attr_selftest_store),
+ __ATTR(single, 0200, NULL, attr_single_store),
+ __ATTR_NULL,
+};
+static struct bin_attribute akm8975_bin_attributes[] = {
+ __BIN_ATTR(registers, 0400, 13, NULL, bin_attr_registers_read, NULL),
+ __BIN_ATTR(regacc, 0600, 1, NULL,
+ bin_attr_regacc_read, bin_attr_regacc_write),
+ __BIN_ATTR_NULL
+};
+static char const * const compass_class_name = "compass";
+static char const * const akm8975_device_name = "akm8975";
+static char const * const device_link_name = "i2c";
+static const dev_t akm8975_device_dev_t = MKDEV(MISC_MAJOR, 240);
+static int create_sysfs_interfaces(struct akm8975_data *akm)
+{
+ int err;
+ if (NULL == akm)
+ return -EINVAL;
+ akm->compass = class_create(THIS_MODULE, compass_class_name);
+ if (IS_ERR(akm->compass)) {
+ err = PTR_ERR(akm->compass);
+ goto exit_class_create_failed;
+ }
+ akm->class_dev = device_create(akm->compass, NULL,
+ akm8975_device_dev_t, akm,
+ akm8975_device_name);
+ if (IS_ERR(akm->class_dev)) {
+ err = PTR_ERR(akm->class_dev);
+ goto exit_class_device_create_failed;
+ }
+ err = sysfs_create_link(&akm->class_dev->kobj, &akm->i2c->dev.kobj,
+ device_link_name);
+ if (err)
+ goto exit_sysfs_create_link_failed;
+ err = create_device_attributes(akm->class_dev, akm8975_attributes);
+ if (err)
+ goto exit_device_attributes_create_failed;
+ err = create_device_binary_attributes(&akm->class_dev->kobj,
+ akm8975_bin_attributes);
+ if (err)
+ goto exit_device_binary_attributes_create_failed;
+ return err;
+exit_device_binary_attributes_create_failed:
+ remove_device_attributes(akm->class_dev, akm8975_attributes);
+exit_device_attributes_create_failed:
+ sysfs_remove_link(&akm->class_dev->kobj, device_link_name);
+exit_sysfs_create_link_failed:
+ device_destroy(akm->compass, akm8975_device_dev_t);
+exit_class_device_create_failed:
+ akm->class_dev = NULL;
+ class_destroy(akm->compass);
+exit_class_create_failed:
+ akm->compass = NULL;
+ return err;
+}
+static void remove_sysfs_interfaces(struct akm8975_data *akm)
+{
+ if (NULL == akm)
+ return;
+ if (NULL != akm->class_dev) {
+ remove_device_binary_attributes(&akm->class_dev->kobj,
+ akm8975_bin_attributes);
+ remove_device_attributes(akm->class_dev, akm8975_attributes);
+ sysfs_remove_link(&akm->class_dev->kobj, device_link_name);
+ akm->class_dev = NULL;
+ }
+ if (NULL != akm->compass) {
+ device_destroy(akm->compass, akm8975_device_dev_t);
+ class_destroy(akm->compass);
+ akm->compass = NULL;
+ }
+}
+static int akm8975_open(struct input_dev *dev)
+{
+ struct akm8975_data *akm = input_get_drvdata(dev);
+ int ret;
+ int interval;
+ ret = akm8975_device_power_on(akm);
+ if (ret) {
+ dev_err(&akm->i2c->dev, "%s: power on failed.\n", __func__);
+ goto exit;
+ }
+ interval = atomic_read(&akm->interval);
+ if (0 <= interval)
+ schedule_delayed_work(
+ &akm->work,
+ msecs_to_jiffies(interval));
+exit:
+ return ret;
+}
+static void akm8975_close(struct input_dev *dev)
+{
+ struct akm8975_data *akm = input_get_drvdata(dev);
+ cancel_delayed_work_sync(&akm->work);
+ akm8975_device_power_off(akm);
+}
+static int akm8975_cancel_work
+(struct notifier_block *this, unsigned long code, void *_cmd)
+{
+ struct akm8975_data *akm =
+ container_of(this, struct akm8975_data, pnotifier);
+ cancel_delayed_work_sync(&akm->work);
+ return NOTIFY_DONE;
+}
+static struct notifier_block akm8975_poweroff_notifier = {
+ .notifier_call = akm8975_cancel_work,
+ .priority = 1,
+};
+
+int akm8975_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct akm8975_data *akm;
+ int err = 0;
+ struct akm8975_platform_data *pdata = client->dev.platform_data;
+ dev_vdbg(&client->dev, "%s called\n", __func__);
+ if (!pdata || !pdata->power_mode || !pdata->hw_config ||
+ !pdata->setup || !pdata->shutdown) {
+ printk(KERN_ERR "%s: platform data is NULL\n", __func__);
+ err = -ENOMEM;
+ goto err_pdata;
+ }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "%s: check_functionality failed.", __func__);
+ err = -ENODEV;
+ goto err_i2c_check;
+ }
+ akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+ if (!akm) {
+ dev_err(&client->dev,
+ "%s: memory allocation failed.", __func__);
+ err = -ENOMEM;
+ goto err_kzalloc;
+ }
+ akm->i2c = client;
+ akm->pdata = pdata;
+ atomic_set(&akm->interval, -1);
+ pdata->power_mode(1);
+ err = akm8975_device_power_on(akm);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: device power on failed.\n", __func__);
+ goto err_power_on;
+ }
+ err = akecs_checkdevice(akm);
+ if (err < 0) {
+ akm8975_device_power_off(akm);
+ goto err_check_device;
+ }
+ err = akecs_read_fuse(akm);
+ akm8975_device_power_off(akm);
+ if (err < 0)
+ goto err_read_fuse;
+ akm->input = input_allocate_device();
+ if (!akm->input) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "%s: Failed to allocate input device.", __func__);
+ goto err_input_allocate_device;
+ }
+ input_set_drvdata(akm->input, akm);
+ set_bit(EV_ABS, akm->input->evbit);
+ akm->input->open = akm8975_open;
+ akm->input->close = akm8975_close;
+ akm->input->name = akm8975_device_name;
+/* HX: 14-bit */
+ input_set_abs_params(akm->input, ABS_RX, -8188, 8188, 0, 0);
+/* HY: 14-bit */
+ input_set_abs_params(akm->input, ABS_RY, -8188, 8188, 0, 0);
+/* HZ: 14-bit */
+ input_set_abs_params(akm->input, ABS_RZ, -8188, 8188, 0, 0);
+/* ST2 */
+ input_set_abs_params(akm->input, ABS_RUDDER, 0, 12, 0, 0);
+ err = input_register_device(akm->input);
+ if (err) {
+ input_free_device(akm->input);
+ dev_err(&client->dev,
+ "%s: Unable to register input device.", __func__);
+ goto err_input_register_device;
+ }
+ INIT_DELAYED_WORK(&akm->work, akm8975_continuous_measure);
+ err = request_threaded_irq(client->irq, NULL, akm8975_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ dev_name(&client->dev), akm);
+ if (err) {
+ input_free_device(akm->input);
+ dev_err(&client->dev, "%s: request irq failed.", __func__);
+ goto err_request_th_irq;
+ }
+ err = create_sysfs_interfaces(akm);
+ if (err) {
+ input_free_device(akm->input);
+ dev_err(&client->dev,
+ "%s: create sysfs failed.", __func__);
+ goto err_create_sysfs_interfaces;
+ }
+ akm->pnotifier = akm8975_poweroff_notifier;
+ err = register_reboot_notifier(&akm->pnotifier);
+ if (err)
+ goto err_register_reboot_notifier;
+ i2c_set_clientdata(client, akm);
+ dev_info(&client->dev, "successfully probed.");
+ return 0;
+err_register_reboot_notifier:
+ remove_sysfs_interfaces(akm);
+err_create_sysfs_interfaces:
+ free_irq(client->irq, akm);
+err_request_th_irq:
+ input_unregister_device(akm->input);
+err_input_register_device:
+err_input_allocate_device:
+err_read_fuse:
+err_check_device:
+err_power_on:
+ kfree(akm);
+err_kzalloc:
+err_i2c_check:
+err_pdata:
+ return err;
+}
+static int akm8975_remove(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+ dev_vdbg(&client->dev, "%s called\n", __func__);
+ remove_sysfs_interfaces(akm);
+ input_unregister_device(akm->input);
+ free_irq(client->irq, akm);
+ akm8975_device_power_off(akm);
+ akm->pdata->power_mode(0);
+ kfree(akm);
+ dev_dbg(&client->dev, "successfully removed.");
+ return 0;
+}
+static const struct i2c_device_id akm8975_id[] = {
+ {AKM8975_I2C_NAME, 0 },
+ { }
+};
+static const struct dev_pm_ops akm8975_pm_ops = {
+ .suspend = akm8975_suspend,
+ .resume = akm8975_resume,
+};
+static struct i2c_driver akm8975_driver = {
+ .probe = akm8975_probe,
+ .remove = akm8975_remove,
+ .id_table = akm8975_id,
+ .driver = {
+ .name = AKM8975_I2C_NAME,
+ .owner = THIS_MODULE,
+ .pm = &akm8975_pm_ops,
+ },
+};
+static int __init akm8975_init(void)
+{
+ return i2c_add_driver(&akm8975_driver);
+}
+static void __exit akm8975_exit(void)
+{
+ i2c_del_driver(&akm8975_driver);
+}
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("SEMC");
+MODULE_DESCRIPTION("AKM8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 00000000000000..0acf4a576f53a2
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_axis_info *info;
+ uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+ [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+ [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+ [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+ [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+ [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+ [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+ [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+ [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+ [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */
+ [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */
+ [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */
+ [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */
+ [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */
+ [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */
+ [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */
+ [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */
+ [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */
+ [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in)
+{
+ return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+ struct gpio_event_axis_info *ai = as->info;
+ int i;
+ int change;
+ uint16_t state = 0;
+ uint16_t pos;
+ uint16_t old_pos = as->pos;
+ for (i = ai->count - 1; i >= 0; i--)
+ state = (state << 1) | gpio_get_value(ai->gpio[i]);
+ pos = ai->map(ai, state);
+ if (ai->flags & GPIOEAF_PRINT_RAW)
+ pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+ ai->type, ai->code, state, old_pos, pos);
+ if (report && pos != old_pos) {
+ if (ai->type == EV_REL) {
+ change = (ai->decoded_size + pos - old_pos) %
+ ai->decoded_size;
+ if (change > ai->decoded_size / 2)
+ change -= ai->decoded_size;
+ if (change == ai->decoded_size / 2) {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d unknown direction, "
+ "pos %d -> %d\n", ai->type,
+ ai->code, old_pos, pos);
+ change = 0; /* no closest direction */
+ }
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d change %d\n",
+ ai->type, ai->code, change);
+ input_report_rel(as->input_devs->dev[ai->dev],
+ ai->code, change);
+ } else {
+ if (ai->flags & GPIOEAF_PRINT_EVENT)
+ pr_info("axis %d-%d now %d\n",
+ ai->type, ai->code, pos);
+ input_event(as->input_devs->dev[ai->dev],
+ ai->type, ai->code, pos);
+ }
+ input_sync(as->input_devs->dev[ai->dev]);
+ }
+ as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_axis_state *as = dev_id;
+ gpio_event_update_axis(as, 1);
+ return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ int irq;
+ struct gpio_event_axis_info *ai;
+ struct gpio_axis_state *as;
+
+ ai = container_of(info, struct gpio_event_axis_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ for (i = 0; i < ai->count; i++)
+ disable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ for (i = 0; i < ai->count; i++)
+ enable_irq(gpio_to_irq(ai->gpio[i]));
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (as == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_axis_state_failed;
+ }
+ as->input_devs = input_devs;
+ as->info = ai;
+ if (ai->dev >= input_devs->count) {
+ pr_err("gpio_event_axis: bad device index %d >= %d "
+ "for %d:%d\n", ai->dev, input_devs->count,
+ ai->type, ai->code);
+ ret = -EINVAL;
+ goto err_bad_device_index;
+ }
+
+ input_set_capability(input_devs->dev[ai->dev],
+ ai->type, ai->code);
+ if (ai->type == EV_ABS) {
+ input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+ 0, ai->decoded_size - 1, 0, 0);
+ }
+ for (i = 0; i < ai->count; i++) {
+ ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+ if (ret < 0)
+ goto err_request_gpio_failed;
+ ret = gpio_direction_input(ai->gpio[i]);
+ if (ret < 0)
+ goto err_gpio_direction_input_failed;
+ ret = irq = gpio_to_irq(ai->gpio[i]);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+ ret = request_irq(irq, gpio_axis_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "gpio_event_axis", as);
+ if (ret < 0)
+ goto err_request_irq_failed;
+ }
+ gpio_event_update_axis(as, 0);
+ return 0;
+ }
+
+ ret = 0;
+ as = *data;
+ for (i = ai->count - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+ ;
+ }
+err_bad_device_index:
+ kfree(as);
+ *data = NULL;
+err_alloc_axis_state_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 00000000000000..c7f396ab3eabae
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,239 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_platform_data *info;
+ void *state[0];
+};
+
+static int gpio_input_event(
+ struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+ int i;
+ int devnr;
+ int ret = 0;
+ int tmp_ret;
+ struct gpio_event_info **ii;
+ struct gpio_event *ip = input_get_drvdata(dev);
+
+ for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+ if (ip->input_devs->dev[devnr] == dev)
+ break;
+ if (devnr == ip->input_devs->count) {
+ pr_err("gpio_input_event: unknown device %p\n", dev);
+ return -EIO;
+ }
+
+ for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->event) {
+ tmp_ret = (*ii)->event(ip->input_devs, *ii,
+ &ip->state[i],
+ devnr, type, code, value);
+ if (tmp_ret)
+ ret = tmp_ret;
+ }
+ }
+ return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+ int i;
+ int ret;
+ struct gpio_event_info **ii;
+
+ if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+ ii = ip->info->info;
+ for (i = 0; i < ip->info->info_count; i++, ii++) {
+ if ((*ii)->func == NULL) {
+ ret = -ENODEV;
+ pr_err("gpio_event_probe: Incomplete pdata, "
+ "no function\n");
+ goto err_no_func;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+ continue;
+ ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+ func);
+ if (ret) {
+ pr_err("gpio_event_probe: function failed\n");
+ goto err_func_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ i = ip->info->info_count;
+ ii = ip->info->info + i;
+ while (i > 0) {
+ i--;
+ ii--;
+ if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+ continue;
+ (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+ ;
+ }
+ return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+ if (ip->info->power)
+ ip->info->power(ip->info, 1);
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+ int err;
+ struct gpio_event *ip;
+ struct gpio_event_platform_data *event_info;
+ int dev_count = 1;
+ int i;
+ int registered = 0;
+
+ event_info = pdev->dev.platform_data;
+ if (event_info == NULL) {
+ pr_err("gpio_event_probe: No pdata\n");
+ return -ENODEV;
+ }
+ if ((!event_info->name && !event_info->names[0]) ||
+ !event_info->info || !event_info->info_count) {
+ pr_err("gpio_event_probe: Incomplete pdata\n");
+ return -ENODEV;
+ }
+ if (!event_info->name)
+ while (event_info->names[dev_count])
+ dev_count++;
+ ip = kzalloc(sizeof(*ip) +
+ sizeof(ip->state[0]) * event_info->info_count +
+ sizeof(*ip->input_devs) +
+ sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+ if (ip == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ ip->input_devs = (void*)&ip->state[event_info->info_count];
+ platform_set_drvdata(pdev, ip);
+
+ for (i = 0; i < dev_count; i++) {
+ struct input_dev *input_dev = input_allocate_device();
+ if (input_dev == NULL) {
+ err = -ENOMEM;
+ pr_err("gpio_event_probe: "
+ "Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ input_set_drvdata(input_dev, ip);
+ input_dev->name = event_info->name ?
+ event_info->name : event_info->names[i];
+ input_dev->event = gpio_input_event;
+ ip->input_devs->dev[i] = input_dev;
+ }
+ ip->input_devs->count = dev_count;
+ ip->info = event_info;
+ if (event_info->power)
+ ip->info->power(ip->info, 1);
+
+ err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+ if (err)
+ goto err_call_all_func_failed;
+
+ for (i = 0; i < dev_count; i++) {
+ err = input_register_device(ip->input_devs->dev[i]);
+ if (err) {
+ pr_err("gpio_event_probe: Unable to register %s "
+ "input device\n", ip->input_devs->dev[i]->name);
+ goto err_input_register_device_failed;
+ }
+ registered++;
+ }
+
+ return 0;
+
+err_input_register_device_failed:
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+ if (event_info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < registered; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ for (i = dev_count - 1; i >= registered; i--) {
+ input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+ ;
+ }
+ kfree(ip);
+err_kp_alloc_failed:
+ return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+ struct gpio_event *ip = platform_get_drvdata(pdev);
+ int i;
+
+ gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+ if (ip->info->power)
+ ip->info->power(ip->info, 0);
+ for (i = 0; i < ip->input_devs->count; i++)
+ input_unregister_device(ip->input_devs->dev[i]);
+ kfree(ip);
+ return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+ .probe = gpio_event_probe,
+ .remove = gpio_event_remove,
+ .driver = {
+ .name = GPIO_EVENT_DEV_NAME,
+ },
+};
+
+static int __devinit gpio_event_init(void)
+{
+ return platform_driver_register(&gpio_event_driver);
+}
+
+static void __exit gpio_event_exit(void)
+{
+ platform_driver_unregister(&gpio_event_driver);
+}
+
+module_init(gpio_event_init);
+module_exit(gpio_event_exit);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 00000000000000..6a0c3151096856
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,376 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+enum {
+ DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */
+ DEBOUNCE_PRESSED = BIT(1),
+ DEBOUNCE_NOTPRESSED = BIT(2),
+ DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */
+ DEBOUNCE_POLL = BIT(4), /* Stable polling state */
+
+ DEBOUNCE_UNKNOWN =
+ DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+ struct gpio_input_state *ds;
+ uint8_t debounce;
+};
+
+struct gpio_input_state {
+ struct gpio_event_input_devs *input_devs;
+ const struct gpio_event_input_info *info;
+ struct hrtimer timer;
+ int use_irq;
+ int debounce_count;
+ spinlock_t irq_lock;
+ struct wake_lock wake_lock;
+ struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+ int i;
+ int pressed;
+ struct gpio_input_state *ds =
+ container_of(timer, struct gpio_input_state, timer);
+ unsigned gpio_flags = ds->info->flags;
+ unsigned npolarity;
+ int nkeys = ds->info->keymap_size;
+ const struct gpio_event_direct_entry *key_entry;
+ struct gpio_key_state *key_state;
+ unsigned long irqflags;
+ uint8_t debounce;
+ bool sync_needed;
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+#endif
+ key_entry = ds->info->keymap;
+ key_state = ds->key_state;
+ sync_needed = false;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ debounce = key_state->debounce;
+ if (debounce & DEBOUNCE_WAIT_IRQ)
+ continue;
+ if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+ debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+ enable_irq(gpio_to_irq(key_entry->gpio));
+ if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) continue debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+ pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+ if (debounce & DEBOUNCE_POLL) {
+ if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+ ds->debounce_count++;
+ key_state->debounce = DEBOUNCE_UNKNOWN;
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-"
+ "%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ }
+ continue;
+ }
+ if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 1\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_PRESSED;
+ continue;
+ }
+ if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+ if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) debounce pressed 0\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
+ key_state->debounce = DEBOUNCE_NOTPRESSED;
+ continue;
+ }
+ /* key is stable */
+ ds->debounce_count--;
+ if (ds->use_irq)
+ key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+ else
+ key_state->debounce |= DEBOUNCE_POLL;
+ if (gpio_flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+ "changed to %d\n", ds->info->type,
+ key_entry->code, i, key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ sync_needed = true;
+ }
+ if (sync_needed) {
+ for (i = 0; i < ds->input_devs->count; i++)
+ input_sync(ds->input_devs->dev[i]);
+ }
+
+#if 0
+ key_entry = kp->keys_info->keymap;
+ key_state = kp->key_state;
+ for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+ pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+ gpio_read_detect_status(key_entry->gpio));
+ }
+#endif
+
+ if (ds->debounce_count)
+ hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+ else if (!ds->use_irq)
+ hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+ else
+ wake_unlock(&ds->wake_lock);
+
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_key_state *ks = dev_id;
+ struct gpio_input_state *ds = ks->ds;
+ int keymap_index = ks - ds->key_state;
+ const struct gpio_event_direct_entry *key_entry;
+ unsigned long irqflags;
+ int pressed;
+
+ if (!ds->use_irq)
+ return IRQ_HANDLED;
+
+ key_entry = &ds->info->keymap[keymap_index];
+
+ if (ds->info->debounce_time.tv64) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+ ks->debounce = DEBOUNCE_UNKNOWN;
+ if (ds->debounce_count++ == 0) {
+ wake_lock(&ds->wake_lock);
+ hrtimer_start(
+ &ds->timer, ds->info->debounce_time,
+ HRTIMER_MODE_REL);
+ }
+ if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+ pr_info("gpio_event_input_irq_handler: "
+ "key %x-%x, %d (%d) start debounce\n",
+ ds->info->type, key_entry->code,
+ keymap_index, key_entry->gpio);
+ } else {
+ disable_irq_nosync(irq);
+ ks->debounce = DEBOUNCE_UNSTABLE;
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ } else {
+ pressed = gpio_get_value(key_entry->gpio) ^
+ !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+ if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+ pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+ "(%d) changed to %d\n",
+ ds->info->type, key_entry->code, keymap_index,
+ key_entry->gpio, pressed);
+ input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+ key_entry->code, pressed);
+ input_sync(ds->input_devs->dev[key_entry->dev]);
+ }
+ return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+ for (i = 0; i < ds->info->keymap_size; i++) {
+ err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_event_input_irq_handler,
+ req_flags, "gpio_keys", &ds->key_state[i]);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: request_irq "
+ "failed for input %d, irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_request_irq_failed;
+ }
+ if (ds->info->info.no_suspend) {
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpio_event_input_request_irqs: "
+ "enable_irq_wake failed for input %d, "
+ "irq %d\n",
+ ds->info->keymap[i].gpio, irq);
+ goto err_enable_irq_wake_failed;
+ }
+ }
+ }
+ return 0;
+
+ for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+ irq = gpio_to_irq(ds->info->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+ free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int ret;
+ int i;
+ unsigned long irqflags;
+ struct gpio_event_input_info *di;
+ struct gpio_input_state *ds = *data;
+
+ di = container_of(info, struct gpio_event_input_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND) {
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ disable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_cancel(&ds->timer);
+ return 0;
+ }
+ if (func == GPIO_EVENT_FUNC_RESUME) {
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ if (ds->use_irq)
+ for (i = 0; i < di->keymap_size; i++)
+ enable_irq(gpio_to_irq(di->keymap[i].gpio));
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (ktime_to_ns(di->poll_time) <= 0)
+ di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+ *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+ di->keymap_size, GFP_KERNEL);
+ if (ds == NULL) {
+ ret = -ENOMEM;
+ pr_err("gpio_event_input_func: "
+ "Failed to allocate private data\n");
+ goto err_ds_alloc_failed;
+ }
+ ds->debounce_count = di->keymap_size;
+ ds->input_devs = input_devs;
+ ds->info = di;
+ wake_lock_init(&ds->wake_lock, WAKE_LOCK_SUSPEND, "gpio_input");
+ spin_lock_init(&ds->irq_lock);
+
+ for (i = 0; i < di->keymap_size; i++) {
+ int dev = di->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_input_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ di->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], di->type,
+ di->keymap[i].code);
+ ds->key_state[i].ds = ds;
+ ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+ }
+
+ for (i = 0; i < di->keymap_size; i++) {
+ ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+ if (ret) {
+ pr_err("gpio_event_input_func: gpio_request "
+ "failed for %d\n", di->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_input(di->keymap[i].gpio);
+ if (ret) {
+ pr_err("gpio_event_input_func: "
+ "gpio_direction_input failed for %d\n",
+ di->keymap[i].gpio);
+ goto err_gpio_configure_failed;
+ }
+ }
+
+ ret = gpio_event_input_request_irqs(ds);
+
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ ds->use_irq = ret == 0;
+
+ pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+ "mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ ret == 0 ? "interrupt" : "polling");
+
+ hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ds->timer.function = gpio_event_input_timer_func;
+ hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+ return 0;
+ }
+
+ ret = 0;
+ spin_lock_irqsave(&ds->irq_lock, irqflags);
+ hrtimer_cancel(&ds->timer);
+ if (ds->use_irq) {
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+ int irq = gpio_to_irq(di->keymap[i].gpio);
+ if (ds->info->info.no_suspend)
+ disable_irq_wake(irq);
+ free_irq(irq, &ds->key_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+ for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+ gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ wake_lock_destroy(&ds->wake_lock);
+ kfree(ds);
+err_ds_alloc_failed:
+ return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 00000000000000..eaa9e89d473acb
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+ struct gpio_event_input_devs *input_devs;
+ struct gpio_event_matrix_info *keypad_info;
+ struct hrtimer timer;
+ struct wake_lock wake_lock;
+ int current_output;
+ unsigned int use_irq:1;
+ unsigned int key_state_changed:1;
+ unsigned int last_key_state_changed:1;
+ unsigned int some_keys_pressed:2;
+ unsigned int disabled_irq:1;
+ unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int key_index = out * mi->ninputs + in;
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ __clear_bit(key_index, kp->keys_pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+ pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+ "not cleared\n", keycode, out, in,
+ mi->output_gpios[out], mi->input_gpios[in]);
+ }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+ int rv = 0;
+ int key_index;
+
+ key_index = out * kp->keypad_info->ninputs + in;
+ while (out < kp->keypad_info->noutputs) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ rv = 1;
+ clear_phantom_key(kp, out, in);
+ }
+ key_index += kp->keypad_info->ninputs;
+ out++;
+ }
+ return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+ int out, in, inp;
+ int key_index;
+
+ if (kp->some_keys_pressed < 3)
+ return;
+
+ for (out = 0; out < kp->keypad_info->noutputs; out++) {
+ inp = -1;
+ key_index = out * kp->keypad_info->ninputs;
+ for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+ if (test_bit(key_index, kp->keys_pressed)) {
+ if (inp == -1) {
+ inp = in;
+ continue;
+ }
+ if (inp >= 0) {
+ if (!restore_keys_for_input(kp, out + 1,
+ inp))
+ break;
+ clear_phantom_key(kp, out, inp);
+ inp = -2;
+ }
+ restore_keys_for_input(kp, out, in);
+ }
+ }
+ }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ int pressed = test_bit(key_index, kp->keys_pressed);
+ unsigned short keyentry = mi->keymap[key_index];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+ if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+ if (keycode == KEY_RESERVED) {
+ if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+ pr_info("gpiomatrix: unmapped key, %d-%d "
+ "(%d-%d) changed to %d\n",
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ } else {
+ if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+ pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+ "changed to %d\n", keycode,
+ out, in, mi->output_gpios[out],
+ mi->input_gpios[in], pressed);
+ input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+ }
+ }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+ int i;
+
+ for (i = 0; i < kp->input_devs->count; i++)
+ input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+ int out, in;
+ int key_index;
+ int gpio;
+ struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+ unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+ out = kp->current_output;
+ if (out == mi->noutputs) {
+ out = 0;
+ kp->last_key_state_changed = kp->key_state_changed;
+ kp->key_state_changed = 0;
+ kp->some_keys_pressed = 0;
+ } else {
+ key_index = out * mi->ninputs;
+ for (in = 0; in < mi->ninputs; in++, key_index++) {
+ gpio = mi->input_gpios[in];
+ if (gpio_get_value(gpio) ^ !polarity) {
+ if (kp->some_keys_pressed < 3)
+ kp->some_keys_pressed++;
+ kp->key_state_changed |= !__test_and_set_bit(
+ key_index, kp->keys_pressed);
+ } else
+ kp->key_state_changed |= __test_and_clear_bit(
+ key_index, kp->keys_pressed);
+ }
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, !polarity);
+ else
+ gpio_direction_input(gpio);
+ out++;
+ }
+ kp->current_output = out;
+ if (out < mi->noutputs) {
+ gpio = mi->output_gpios[out];
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(gpio, polarity);
+ else
+ gpio_direction_output(gpio, polarity);
+ hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+ if (kp->key_state_changed) {
+ hrtimer_start(&kp->timer, mi->debounce_delay,
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+ kp->key_state_changed = kp->last_key_state_changed;
+ }
+ if (kp->key_state_changed) {
+ if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+ remove_phantom_keys(kp);
+ key_index = 0;
+ for (out = 0; out < mi->noutputs; out++)
+ for (in = 0; in < mi->ninputs; in++, key_index++)
+ report_key(kp, key_index, out, in);
+ report_sync(kp);
+ }
+ if (!kp->use_irq || kp->some_keys_pressed) {
+ hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+ }
+
+ /* No keys are pressed, reenable interrupt */
+ for (out = 0; out < mi->noutputs; out++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[out], polarity);
+ else
+ gpio_direction_output(mi->output_gpios[out], polarity);
+ }
+ for (in = 0; in < mi->ninputs; in++)
+ enable_irq(gpio_to_irq(mi->input_gpios[in]));
+ wake_unlock(&kp->wake_lock);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+ int i;
+ struct gpio_kp *kp = dev_id;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+ unsigned gpio_keypad_flags = mi->flags;
+
+ if (!kp->use_irq) {
+ /* ignore interrupt while registering the handler */
+ kp->disabled_irq = 1;
+ disable_irq_nosync(irq_in);
+ return IRQ_HANDLED;
+ }
+
+ for (i = 0; i < mi->ninputs; i++)
+ disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+ for (i = 0; i < mi->noutputs; i++) {
+ if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+ gpio_set_value(mi->output_gpios[i],
+ !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ gpio_direction_input(mi->output_gpios[i]);
+ }
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+ int i;
+ int err;
+ unsigned int irq;
+ unsigned long request_flags;
+ struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+ switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+ default:
+ request_flags = IRQF_TRIGGER_FALLING;
+ break;
+ case GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_RISING;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+ request_flags = IRQF_TRIGGER_LOW;
+ break;
+ case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+ request_flags = IRQF_TRIGGER_HIGH;
+ break;
+ }
+
+ for (i = 0; i < mi->ninputs; i++) {
+ err = irq = gpio_to_irq(mi->input_gpios[i]);
+ if (err < 0)
+ goto err_gpio_get_irq_num_failed;
+ err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+ "gpio_kp", kp);
+ if (err) {
+ pr_err("gpiomatrix: request_irq failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ goto err_request_irq_failed;
+ }
+ err = enable_irq_wake(irq);
+ if (err) {
+ pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+ "irq %d\n", mi->input_gpios[i], irq);
+ }
+ disable_irq(irq);
+ if (kp->disabled_irq) {
+ kp->disabled_irq = 0;
+ enable_irq(irq);
+ }
+ }
+ return 0;
+
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+ ;
+ }
+ return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func)
+{
+ int i;
+ int err;
+ int key_count;
+ struct gpio_kp *kp;
+ struct gpio_event_matrix_info *mi;
+
+ mi = container_of(info, struct gpio_event_matrix_info, info);
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+ /* TODO: disable scanning */
+ return 0;
+ }
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ if (mi->keymap == NULL ||
+ mi->input_gpios == NULL ||
+ mi->output_gpios == NULL) {
+ err = -ENODEV;
+ pr_err("gpiomatrix: Incomplete pdata\n");
+ goto err_invalid_platform_data;
+ }
+ key_count = mi->ninputs * mi->noutputs;
+
+ *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+ BITS_TO_LONGS(key_count), GFP_KERNEL);
+ if (kp == NULL) {
+ err = -ENOMEM;
+ pr_err("gpiomatrix: Failed to allocate private data\n");
+ goto err_kp_alloc_failed;
+ }
+ kp->input_devs = input_devs;
+ kp->keypad_info = mi;
+ for (i = 0; i < key_count; i++) {
+ unsigned short keyentry = mi->keymap[i];
+ unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+ unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+ if (dev >= input_devs->count) {
+ pr_err("gpiomatrix: bad device index %d >= "
+ "%d for key code %d\n",
+ dev, input_devs->count, keycode);
+ err = -EINVAL;
+ goto err_bad_keymap;
+ }
+ if (keycode && keycode <= KEY_MAX)
+ input_set_capability(input_devs->dev[dev],
+ EV_KEY, keycode);
+ }
+
+ for (i = 0; i < mi->noutputs; i++) {
+ err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_request_output_gpio_failed;
+ }
+ if (gpio_cansleep(mi->output_gpios[i])) {
+ pr_err("gpiomatrix: unsupported output gpio %d,"
+ " can sleep\n", mi->output_gpios[i]);
+ err = -EINVAL;
+ goto err_output_gpio_configure_failed;
+ }
+ if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+ err = gpio_direction_output(mi->output_gpios[i],
+ !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+ else
+ err = gpio_direction_input(mi->output_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_configure failed for "
+ "output %d\n", mi->output_gpios[i]);
+ goto err_output_gpio_configure_failed;
+ }
+ }
+ for (i = 0; i < mi->ninputs; i++) {
+ err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+ if (err) {
+ pr_err("gpiomatrix: gpio_request failed for "
+ "input %d\n", mi->input_gpios[i]);
+ goto err_request_input_gpio_failed;
+ }
+ err = gpio_direction_input(mi->input_gpios[i]);
+ if (err) {
+ pr_err("gpiomatrix: gpio_direction_input failed"
+ " for input %d\n", mi->input_gpios[i]);
+ goto err_gpio_direction_input_failed;
+ }
+ }
+ kp->current_output = mi->noutputs;
+ kp->key_state_changed = 1;
+
+ hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kp->timer.function = gpio_keypad_timer_func;
+ wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+ err = gpio_keypad_request_irqs(kp);
+ kp->use_irq = err == 0;
+
+ pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+ "%s%s in %s mode\n", input_devs->dev[0]->name,
+ (input_devs->count > 1) ? "..." : "",
+ kp->use_irq ? "interrupt" : "polling");
+
+ if (kp->use_irq)
+ wake_lock(&kp->wake_lock);
+ hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+ return 0;
+ }
+
+ err = 0;
+ kp = *data;
+
+ if (kp->use_irq)
+ for (i = mi->noutputs - 1; i >= 0; i--)
+ free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+ hrtimer_cancel(&kp->timer);
+ wake_lock_destroy(&kp->wake_lock);
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+ gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+ ;
+ }
+ for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+ gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+ ;
+ }
+err_bad_keymap:
+ kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+ return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 00000000000000..2aac2fad0a17bf
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value)
+{
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+ if (type != oi->type)
+ return 0;
+ if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+ value = !value;
+ for (i = 0; i < oi->keymap_size; i++)
+ if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+ gpio_set_value(oi->keymap[i].gpio, value);
+ return 0;
+}
+
+int gpio_event_output_func(
+ struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+ void **data, int func)
+{
+ int ret;
+ int i;
+ struct gpio_event_output_info *oi;
+ oi = container_of(info, struct gpio_event_output_info, info);
+
+ if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+ return 0;
+
+ if (func == GPIO_EVENT_FUNC_INIT) {
+ int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ int dev = oi->keymap[i].dev;
+ if (dev >= input_devs->count) {
+ pr_err("gpio_event_output_func: bad device "
+ "index %d >= %d for key code %d\n",
+ dev, input_devs->count,
+ oi->keymap[i].code);
+ ret = -EINVAL;
+ goto err_bad_keymap;
+ }
+ input_set_capability(input_devs->dev[dev], oi->type,
+ oi->keymap[i].code);
+ }
+
+ for (i = 0; i < oi->keymap_size; i++) {
+ ret = gpio_request(oi->keymap[i].gpio,
+ "gpio_event_output");
+ if (ret) {
+ pr_err("gpio_event_output_func: gpio_request "
+ "failed for %d\n", oi->keymap[i].gpio);
+ goto err_gpio_request_failed;
+ }
+ ret = gpio_direction_output(oi->keymap[i].gpio,
+ output_level);
+ if (ret) {
+ pr_err("gpio_event_output_func: "
+ "gpio_direction_output failed for %d\n",
+ oi->keymap[i].gpio);
+ goto err_gpio_direction_output_failed;
+ }
+ }
+ return 0;
+ }
+
+ ret = 0;
+ for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+ gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+ ;
+ }
+err_bad_keymap:
+ return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 00000000000000..3ffab6da411b54
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME "keychord"
+#define BUFFER_SIZE 16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+ ((char *)kc + sizeof(struct input_keychord) + \
+ kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+ struct input_handler input_handler;
+ int registered;
+
+ /* list of keychords to monitor */
+ struct input_keychord *keychords;
+ int keychord_count;
+
+ /* bitmask of keys contained in our keychords */
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ /* current state of the keys */
+ unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+ /* number of keys that are currently pressed */
+ int key_down;
+
+ /* second input_device_id is needed for null termination */
+ struct input_device_id device_ids[2];
+
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ unsigned char head;
+ unsigned char tail;
+ __u16 buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+ struct input_keychord *keychord)
+{
+ int i;
+
+ if (keychord->count != kdev->key_down)
+ return 0;
+
+ for (i = 0; i < keychord->count; i++) {
+ if (!test_bit(keychord->keycodes[i], kdev->keystate))
+ return 0;
+ }
+
+ /* we have a match */
+ return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct keychord_device *kdev = handle->private;
+ struct input_keychord *keychord;
+ unsigned long flags;
+ int i, got_chord = 0;
+
+ if (type != EV_KEY || code >= KEY_MAX)
+ return;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* do nothing if key state did not change */
+ if (!test_bit(code, kdev->keystate) == !value)
+ goto done;
+ __change_bit(code, kdev->keystate);
+ if (value)
+ kdev->key_down++;
+ else
+ kdev->key_down--;
+
+ /* don't notify on key up */
+ if (!value)
+ goto done;
+ /* ignore this event if it is not one of the keys we are monitoring */
+ if (!test_bit(code, kdev->keybit))
+ goto done;
+
+ keychord = kdev->keychords;
+ if (!keychord)
+ goto done;
+
+ /* check to see if the keyboard state matches any keychords */
+ for (i = 0; i < kdev->keychord_count; i++) {
+ if (check_keychord(kdev, keychord)) {
+ kdev->buff[kdev->head] = keychord->id;
+ kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+ got_chord = 1;
+ break;
+ }
+ /* skip to next keychord */
+ keychord = NEXT_KEYCHORD(keychord);
+ }
+
+done:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (got_chord)
+ wake_up_interruptible(&kdev->waitq);
+}
+
+static int keychord_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ int i, ret;
+ struct input_handle *handle;
+ struct keychord_device *kdev =
+ container_of(handler, struct keychord_device, input_handler);
+
+ /*
+ * ignore this input device if it does not contain any keycodes
+ * that we are monitoring
+ */
+ for (i = 0; i < KEY_MAX; i++) {
+ if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+ break;
+ }
+ if (i == KEY_MAX)
+ return -ENODEV;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = KEYCHORD_NAME;
+ handle->private = kdev;
+
+ ret = input_register_handle(handle);
+ if (ret)
+ goto err_input_register_handle;
+
+ ret = input_open_device(handle);
+ if (ret)
+ goto err_input_open_device;
+
+ pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+ return 0;
+
+err_input_open_device:
+ input_unregister_handle(handle);
+err_input_register_handle:
+ kfree(handle);
+ return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ __u16 id;
+ int retval;
+ unsigned long flags;
+
+ if (count < sizeof(id))
+ return -EINVAL;
+ count = sizeof(id);
+
+ if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(kdev->waitq,
+ kdev->head != kdev->tail);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* pop a keychord ID off the queue */
+ id = kdev->buff[kdev->tail];
+ kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ if (copy_to_user(buffer, &id, count))
+ return -EFAULT;
+
+ return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct keychord_device *kdev = file->private_data;
+ struct input_keychord *keychords = 0;
+ struct input_keychord *keychord, *next, *end;
+ int ret, i, key;
+ unsigned long flags;
+
+ if (count < sizeof(struct input_keychord))
+ return -EINVAL;
+ keychords = kzalloc(count, GFP_KERNEL);
+ if (!keychords)
+ return -ENOMEM;
+
+ /* read list of keychords from userspace */
+ if (copy_from_user(keychords, buffer, count)) {
+ kfree(keychords);
+ return -EFAULT;
+ }
+
+ /* unregister handler before changing configuration */
+ if (kdev->registered) {
+ input_unregister_handler(&kdev->input_handler);
+ kdev->registered = 0;
+ }
+
+ spin_lock_irqsave(&kdev->lock, flags);
+ /* clear any existing configuration */
+ kfree(kdev->keychords);
+ kdev->keychords = 0;
+ kdev->keychord_count = 0;
+ kdev->key_down = 0;
+ memset(kdev->keybit, 0, sizeof(kdev->keybit));
+ memset(kdev->keystate, 0, sizeof(kdev->keystate));
+ kdev->head = kdev->tail = 0;
+
+ keychord = keychords;
+ end = (struct input_keychord *)((char *)keychord + count);
+
+ while (keychord < end) {
+ next = NEXT_KEYCHORD(keychord);
+ if (keychord->count <= 0 || next > end) {
+ pr_err("keychord: invalid keycode count %d\n",
+ keychord->count);
+ goto err_unlock_return;
+ }
+ if (keychord->version != KEYCHORD_VERSION) {
+ pr_err("keychord: unsupported version %d\n",
+ keychord->version);
+ goto err_unlock_return;
+ }
+
+ /* keep track of the keys we are monitoring in keybit */
+ for (i = 0; i < keychord->count; i++) {
+ key = keychord->keycodes[i];
+ if (key < 0 || key >= KEY_CNT) {
+ pr_err("keychord: keycode %d out of range\n",
+ key);
+ goto err_unlock_return;
+ }
+ __set_bit(key, kdev->keybit);
+ }
+
+ kdev->keychord_count++;
+ keychord = next;
+ }
+
+ kdev->keychords = keychords;
+ spin_unlock_irqrestore(&kdev->lock, flags);
+
+ ret = input_register_handler(&kdev->input_handler);
+ if (ret) {
+ kfree(keychords);
+ kdev->keychords = 0;
+ return ret;
+ }
+ kdev->registered = 1;
+
+ return count;
+
+err_unlock_return:
+ spin_unlock_irqrestore(&kdev->lock, flags);
+ kfree(keychords);
+ return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ poll_wait(file, &kdev->waitq, wait);
+
+ if (kdev->head != kdev->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev;
+
+ kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ spin_lock_init(&kdev->lock);
+ init_waitqueue_head(&kdev->waitq);
+
+ kdev->input_handler.event = keychord_event;
+ kdev->input_handler.connect = keychord_connect;
+ kdev->input_handler.disconnect = keychord_disconnect;
+ kdev->input_handler.name = KEYCHORD_NAME;
+ kdev->input_handler.id_table = kdev->device_ids;
+
+ kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+ __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+ file->private_data = kdev;
+
+ return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+ struct keychord_device *kdev = file->private_data;
+
+ if (kdev->registered)
+ input_unregister_handler(&kdev->input_handler);
+ kfree(kdev);
+
+ return 0;
+}
+
+static const struct file_operations keychord_fops = {
+ .owner = THIS_MODULE,
+ .open = keychord_open,
+ .release = keychord_release,
+ .read = keychord_read,
+ .write = keychord_write,
+ .poll = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+ .fops = &keychord_fops,
+ .name = KEYCHORD_NAME,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+ return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+ misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/input/misc/mpu6050.c b/drivers/input/misc/mpu6050.c
new file mode 100644
index 00000000000000..4a0ea949be2536
--- /dev/null
+++ b/drivers/input/misc/mpu6050.c
@@ -0,0 +1,402 @@
+/*
+ * Implements driver for INVENSENSE MPU6050 Chip
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Kishore Kadiyala <kishore.kadiyala@ti.com>
+ * Contributor: Leed Aguilar <leed.aguilar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input/mpu6050.h>
+#include <linux/platform_device.h>
+
+#include "mpu6050x.h"
+
+#define MPU6050_DEBUG
+
+#ifdef MPU6050_DEBUG
+struct mpu6050_reg {
+ u8 *name;
+ uint8_t reg;
+} mpu6050_regs[] = {
+ { "MPU6050_REG_SMPLRT_DIV", MPU6050_REG_SMPLRT_DIV },
+ { "MPU6050_REG_CONFIG", MPU6050_REG_CONFIG },
+ { "MPU6050_REG_GYRO_CONFIG", MPU6050_REG_GYRO_CONFIG },
+ { "MPU6050_REG_ACCEL_CONFIG", MPU6050_REG_ACCEL_CONFIG },
+ { "MPU6050_REG_ACCEL_FF_THR", MPU6050_REG_ACCEL_FF_THR },
+ { "MPU6050_REG_ACCEL_FF_DUR", MPU6050_REG_ACCEL_FF_DUR },
+ { "MPU6050_REG_ACCEL_MOT_THR", MPU6050_REG_ACCEL_MOT_THR },
+ { "MPU6050_REG_ACCEL_MOT_DUR", MPU6050_REG_ACCEL_MOT_DUR },
+ { "MPU6050_REG_ACCEL_ZRMOT_THR", MPU6050_REG_ACCEL_ZRMOT_THR },
+ { "MPU6050_REG_ACCEL_ZRMOT_DUR", MPU6050_REG_ACCEL_ZRMOT_DUR },
+ { "MPU6050_REG_FIFO_EN", MPU6050_REG_FIFO_EN },
+ { "MPU6050_REG_I2C_MST_CTRL", MPU6050_REG_I2C_MST_CTRL },
+ { "MPU6050_REG_I2C_SLV0_ADDR", MPU6050_REG_I2C_SLV0_ADDR },
+ { "MPU6050_REG_I2C_SLV0_REG", MPU6050_REG_I2C_SLV0_REG },
+ { "MPU6050_REG_I2C_SLV0_CTRL", MPU6050_REG_I2C_SLV0_CTRL },
+ { "MPU6050_REG_I2C_SLV1_ADDR", MPU6050_REG_I2C_SLV1_ADDR },
+ { "MPU6050_REG_I2C_SLV1_REG", MPU6050_REG_I2C_SLV1_REG },
+ { "MPU6050_REG_I2C_SLV1_CTRL", MPU6050_REG_I2C_SLV1_CTRL },
+ { "MPU6050_REG_I2C_SLV2_ADDR", MPU6050_REG_I2C_SLV2_ADDR },
+ { "MPU6050_REG_I2C_SLV2_REG", MPU6050_REG_I2C_SLV2_REG },
+ { "MPU6050_REG_I2C_SLV2_CTRL", MPU6050_REG_I2C_SLV2_CTRL },
+ { "MPU6050_REG_I2C_SLV3_ADDR", MPU6050_REG_I2C_SLV3_ADDR },
+ { "MPU6050_REG_I2C_SLV3_REG", MPU6050_REG_I2C_SLV3_REG },
+ { "MPU6050_REG_I2C_SLV3_CTRL", MPU6050_REG_I2C_SLV3_CTRL },
+ { "MPU6050_REG_I2C_SLV4_ADDR", MPU6050_REG_I2C_SLV4_ADDR },
+ { "MPU6050_REG_I2C_SLV4_REG", MPU6050_REG_I2C_SLV4_REG },
+ { "MPU6050_REG_I2C_SLV4_DO", MPU6050_REG_I2C_SLV4_DO },
+ { "MPU6050_REG_I2C_SLV4_CTRL", MPU6050_REG_I2C_SLV4_CTRL },
+ { "MPU6050_REG_I2C_SLV4_DI", MPU6050_REG_I2C_SLV4_DI },
+ { "MPU6050_REG_I2C_MST_STATUS", MPU6050_REG_I2C_MST_STATUS },
+ { "MPU6050_REG_INT_PIN_CFG", MPU6050_REG_INT_PIN_CFG },
+ { "MPU6050_REG_INT_ENABLE", MPU6050_REG_INT_ENABLE },
+ { "MPU6050_REG_INT_STATUS", MPU6050_REG_INT_STATUS },
+ { "MPU6050_REG_ACCEL_XOUT_H", MPU6050_REG_ACCEL_XOUT_H },
+ { "MPU6050_REG_ACCEL_XOUT_L", MPU6050_REG_ACCEL_XOUT_L },
+ { "MPU6050_REG_ACCEL_YOUT_H", MPU6050_REG_ACCEL_YOUT_H },
+ { "MPU6050_REG_ACCEL_YOUT_L", MPU6050_REG_ACCEL_YOUT_L },
+ { "MPU6050_REG_ACCEL_ZOUT_H", MPU6050_REG_ACCEL_ZOUT_H },
+ { "MPU6050_REG_ACCEL_ZOUT_L", MPU6050_REG_ACCEL_ZOUT_L },
+ { "MPU6050_REG_TEMP_OUT_H", MPU6050_REG_TEMP_OUT_H },
+ { "MPU6050_REG_TEMP_OUT_L", MPU6050_REG_TEMP_OUT_L },
+ { "MPU6050_REG_GYRO_XOUT_H", MPU6050_REG_GYRO_XOUT_H },
+ { "MPU6050_REG_GYRO_XOUT_L", MPU6050_REG_GYRO_XOUT_L },
+ { "MPU6050_REG_GYRO_YOUT_H", MPU6050_REG_GYRO_YOUT_H },
+ { "MPU6050_REG_GYRO_YOUT_L", MPU6050_REG_GYRO_YOUT_L },
+ { "MPU6050_REG_GYRO_ZOUT_H", MPU6050_REG_GYRO_ZOUT_H },
+ { "MPU6050_REG_GYRO_ZOUT_L", MPU6050_REG_GYRO_ZOUT_L },
+ { "MPU6050_REG_I2C_SLV0_DO", MPU6050_REG_I2C_SLV0_DO },
+ { "MPU6050_REG_I2C_SLV1_DO", MPU6050_REG_I2C_SLV1_DO },
+ { "MPU6050_REG_I2C_SLV2_DO", MPU6050_REG_I2C_SLV2_DO },
+ { "MPU6050_REG_I2C_SLV3_DO", MPU6050_REG_I2C_SLV3_DO },
+ { "MPU6050_REG_I2C_MST_DELAY_CTRL", MPU6050_REG_I2C_MST_DELAY_CTRL },
+ { "MPU6050_REG_SIGNAL_PATH_RESET", MPU6050_REG_SIGNAL_PATH_RESET },
+ { "MPU6050_REG_ACCEL_INTEL_CTRL", MPU6050_REG_ACCEL_INTEL_CTRL },
+ { "MPU6050_REG_USER_CTRL", MPU6050_REG_USER_CTRL },
+ { "MPU6050_REG_PWR_MGMT_1", MPU6050_REG_PWR_MGMT_1 },
+ { "MPU6050_REG_PWR_MGMT_2", MPU6050_REG_PWR_MGMT_2 },
+ { "MPU6050_REG_FIFO_COUNTH", MPU6050_REG_FIFO_COUNTH },
+ { "MPU6050_REG_FIFO_COUNTL", MPU6050_REG_FIFO_COUNTL },
+ { "MPU6050_REG_FIFO_R_W", MPU6050_REG_FIFO_R_W },
+ { "MPU6050_REG_WHOAMI", MPU6050_REG_WHOAMI },
+};
+
+static ssize_t mpu6050_registers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mpu6050_data *data = platform_get_drvdata(pdev);
+ unsigned i, n, reg_count;
+ uint8_t value;
+
+ reg_count = sizeof(mpu6050_regs) / sizeof(mpu6050_regs[0]);
+ for (i = 0, n = 0; i < reg_count; i++) {
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ mpu6050_regs[i].reg, 1, &value, mpu6050_regs[i].name);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%-20s = 0x%02X\n",
+ mpu6050_regs[i].name, value);
+ }
+
+ return n;
+}
+
+static ssize_t mpu6050_registers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mpu6050_data *data = platform_get_drvdata(pdev);
+ unsigned i, reg_count, value;
+ char name[30];
+
+ if (count >= 30) {
+ pr_err("%s:input too long\n", __func__);
+ return -1;
+ }
+
+ if (sscanf(buf, "%s %x", name, &value) != 2) {
+ pr_err("%s:unable to parse input\n", __func__);
+ return -1;
+ }
+
+ reg_count = sizeof(mpu6050_regs) / sizeof(mpu6050_regs[0]);
+ for (i = 0; i < reg_count; i++) {
+ if (!strcmp(name, mpu6050_regs[i].name)) {
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ mpu6050_regs[i].reg, value,
+ mpu6050_regs[i].name);
+ return count;
+ }
+ }
+
+ pr_err("%s:no such register %s\n", __func__, name);
+ return -1;
+}
+
+static DEVICE_ATTR(registers, S_IWUSR | S_IRUGO,
+ mpu6050_registers_show, mpu6050_registers_store);
+
+static struct attribute *mpu6050_attrs[] = {
+ &dev_attr_registers.attr,
+ NULL
+};
+
+static const struct attribute_group mpu6050_attr_group = {
+ .attrs = mpu6050_attrs,
+};
+
+#endif
+
+static int mpu6050_enable_sleep(struct mpu6050_data *data, int action)
+{
+ unsigned char val = 0;
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, 1, &val, "Enable Sleep");
+ if (action)
+ val |= MPU6050_DEVICE_SLEEP_MODE;
+ else
+ val &= ~MPU6050_DEVICE_SLEEP_MODE;
+
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, val, "Enable Sleep");
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, 1, &val, "Enable Sleep");
+
+ if (val & MPU6050_DEVICE_SLEEP_MODE)
+ dev_err(data->dev, "Enable Sleep failed\n");
+ return 0;
+}
+
+static int mpu6050_reset(struct mpu6050_data *data)
+{
+ unsigned char val = 0;
+ int error;
+
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, 1, &val, "Reset");
+ if (error) {
+ dev_err(data->dev, "fail to read PWR_MGMT_1 reg\n");
+ return error;
+ }
+
+ val &= ~MPU6050_CLOCKSEL_SRC_INT;
+ val |= MPU6050_CLOCKSEL_SRC_PLL_X;
+ error = MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, val, "PLL_X");
+ if (error) {
+ dev_err(data->dev, "fail to set reset bit\n");
+ return error;
+ }
+
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_1, 1, &val, "Reset");
+ if (error) {
+ dev_err(data->dev, "fail to read PWR_MGMT_1 reg\n");
+ return error;
+ }
+
+ mpu6050_enable_sleep(data, 0);
+
+ return 0;
+}
+
+static int mpu6050_set_dplf_mode(struct mpu6050_data *data)
+{
+ int error;
+ unsigned char val = 0;
+ dev_info(data->dev, "%s\n", __func__);
+
+ /* dplf Configuration */
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_CONFIG,
+ 1, &val, "MPU6050 register configuration");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to read REG_CONFIG register\n");
+ return error;
+ }
+
+ val &= ~MPU6050_REG_CONFIG_DLPF;
+ val |= MPU6050_BANDWIDTH_GYRO_42HZ_ACC_44HZ;
+ error = MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_CONFIG,
+ val, "MPU6050 register configuration");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set dplf register\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int mpu6050_set_bypass_mode(struct mpu6050_data *data)
+{
+ int error;
+ unsigned char val = 0;
+
+ dev_info(data->dev, "%s\n", __func__);
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_I2C_MST_STATUS,
+ 1, &val, "Pass Through");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to read MST_STATUS register\n");
+ return error;
+ }
+ val |= MPU6050_PASS_THROUGH;
+ error = MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_I2C_MST_STATUS,
+ val, "Pass Through");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set pass through\n");
+ return error;
+ }
+
+ /* Bypass Enable Configuration */
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_INT_PIN_CFG,
+ 1, &val, "Bypass enabled");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to read INT_PIN_CFG register\n");
+ return error;
+ }
+
+ val |= MPU6050_I2C_BYPASS_EN;
+ error = MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_INT_PIN_CFG,
+ val, "Bypass enabled");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set i2c bypass mode\n");
+ return error;
+ }
+ error = MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_USER_CTRL,
+ 1, &val, "I2C MST mode");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to read USER_CTRL register\n");
+ return error;
+ }
+ val &= ~MPU6050_I2C_MST_EN;
+ error = MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_USER_CTRL,
+ val, "I2C MST mode");
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set i2c master enable bit\n");
+ return error;
+ }
+
+ return 0;
+}
+
+int mpu6050_init(struct mpu6050_data *data, const struct mpu6050_bus_ops *bops)
+{
+ struct mpu6050_platform_data *pdata = data->client->dev.platform_data;
+ int error = 0;
+
+ /* Verify platform data */
+ if (!pdata) {
+ dev_err(&data->client->dev, "platform data not found\n");
+ return -EINVAL;
+ }
+
+ /* if no IRQ return error */
+ if (data->client->irq == 0) {
+ dev_err(&data->client->dev, "Irq not set\n");
+ return -EINVAL;
+ }
+
+ /* Initialize device data */
+ data->dev = &data->client->dev;
+ data->bus_ops = bops;
+ data->pdata = pdata;
+ data->irq = data->client->irq;
+ mutex_init(&data->mutex);
+
+ /* Reset MPU6050 device */
+ error = mpu6050_reset(data);
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to reset device\n");
+ return error;
+ }
+
+ /* Verify if pass-through mode is required */
+ if (pdata->flags & MPU6050_PASS_THROUGH_EN) {
+ error = mpu6050_set_bypass_mode(data);
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set bypass mode\n");
+ return error;
+ }
+ }
+ else
+ {
+ error = mpu6050_set_dplf_mode(data);
+ if (error) {
+ dev_err(data->dev,
+ "MPU6050: fail to set fsync mode\n");
+ return error;
+ }
+ }
+
+ /* Initializing built-in sensors on MPU6050 */
+ if (IS_ERR(mpu6050_accel_gyro_init(data)))
+ {
+ dev_err(data->dev,
+ "MPU6050: mpu6050_accel_gyro_init failed\n");
+ if (!(pdata->flags & MPU6050_PASS_THROUGH_EN))
+ return -ENODEV;
+ }
+
+#ifdef MPU6050_DEBUG
+ error = sysfs_create_group(&data->client->dev.kobj, &mpu6050_attr_group);
+#endif
+
+ return error;
+}
+EXPORT_SYMBOL(mpu6050_init);
+
+void mpu6050_exit(struct mpu6050_data *data)
+{
+#ifdef MPU6050_DEBUG
+ sysfs_remove_group(&data->client->dev.kobj, &mpu6050_attr_group);
+#endif
+
+ mpu6050_enable_sleep(data, 1);
+
+ mpu6050_accel_gyro_exit(data);
+}
+EXPORT_SYMBOL(mpu6050_exit);
+
+MODULE_AUTHOR("Kishore Kadiyala <kishore.kadiyala@ti.com");
+MODULE_DESCRIPTION("MPU6050 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu6050_accel_gyro.c b/drivers/input/misc/mpu6050_accel_gyro.c
new file mode 100644
index 00000000000000..1d33ea920a61ec
--- /dev/null
+++ b/drivers/input/misc/mpu6050_accel_gyro.c
@@ -0,0 +1,384 @@
+/*
+ * INVENSENSE MPU6050 Accelerometer driver
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Kishore Kadiyala <kishore.kadiyala@ti.com>
+ * Contributor: Leed Aguilar <leed.aguilar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/device.h>
+#include <linux/input/mpu6050.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include "mpu6050x.h"
+
+#define MPU6050_ACCELERO_DATA_DBG 0
+#define MPU6050_GYRO_DATA_DBG 0
+#define MPU6050_GPIO_DBG 0 // Enable gpio debugging (pulse during it handling)
+#define MPU6050_GPIO_NUM 134 // Gpio number for gpio debugging
+
+/*MPU6050 full scale range types conversion table */
+/* Shift left the value to get in in the same scale as the most precise one */
+static int mpu6050_accel_shift_table[4] = {
+ 0,
+ 1,
+ 2,
+ 3,
+};
+static int mpu6050_gyro_shift_table[4] = {
+ 0,
+ 1,
+ 2,
+ 3,
+};
+/* Range of accelero sensor */
+#define MPU6050_ACCEL_RANGE_MAX (16384 * 16) // +16g @ 16384 LSB / g
+#define MPU6050_GYRO_RANGE_MAX (131 * 2000) // 2000 °/s @ 131 LSB / °/s
+
+
+static inline int mpu6050_acc_convert(int16_t val, int fsr)
+{
+ int newval = (int)val;
+ newval <<= mpu6050_accel_shift_table[fsr >> 3];
+ return newval;
+}
+
+static inline int mpu6050_gyro_convert(int16_t val, int fsr)
+{
+ int newval = (int)val;
+ newval <<= mpu6050_gyro_shift_table[fsr >> 3];
+ return newval;
+}
+
+/**
+ * mpu6050_accel_set_standby - Put's the accel axis in standby or not
+ * @mpu6050_data: accelerometer driver data
+ * @enable: to put into standby mode or not
+ */
+static void mpu6050_accel_set_standby(struct mpu6050_data *data,
+ uint8_t enable)
+{
+ uint8_t val = 0;
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR, MPU6050_REG_PWR_MGMT_2,
+ 1, &val, "Accel STBY");
+ if (enable)
+ val |= (MPU6050_STBY_XA | MPU6050_STBY_YA | MPU6050_STBY_ZA);
+ else
+ val &= ~(MPU6050_STBY_XA | MPU6050_STBY_YA | MPU6050_STBY_ZA);
+
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_2, val, "Accel STBY");
+}
+
+/**
+ * mpu6050_gyro_set_standby - Put's the gyro axis in standby or not
+ * @mpu6050_gyro_data: gyroscope driver data
+ * @enable: to put into standby mode or not
+ */
+static void mpu6050_gyro_set_standby(struct mpu6050_data *data,
+ uint8_t enable)
+{
+ unsigned char val = 0;
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR, MPU6050_REG_PWR_MGMT_2,
+ 1, &val, "Gyro STBY");
+ if (enable)
+ val |= (MPU6050_STBY_XG | MPU6050_STBY_YG | MPU6050_STBY_ZG);
+ else
+ val &= ~(MPU6050_STBY_XG | MPU6050_STBY_YG | MPU6050_STBY_ZG);
+
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_PWR_MGMT_2, val, "Gyro STBY");
+}
+
+/**
+ * mpu6050_gyro_reset - Reset's the signal path of gyroscope
+ * @mpu6050_gyro_data: gyroscope driver data
+ */
+static int mpu6050_gyro_reset(struct mpu6050_data *data)
+{
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_SIGNAL_PATH_RESET, MPU6050_GYRO_SP_RESET,
+ "Gyro SP-reset");
+ mpu6050_gyro_set_standby(data, 0);
+ return 0;
+}
+
+/**
+ * mpu6050_accel_reset - Reset's the signal path of acceleromter
+ * @mpu6050_data: accelerometer driver data
+ */
+static void mpu6050_accel_reset(struct mpu6050_data *data)
+{
+ MPU6050_WRITE(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_SIGNAL_PATH_RESET, MPU6050_ACCEL_SP_RESET,
+ "Accel SP-reset");
+ mpu6050_accel_set_standby(data, 0);
+}
+
+static void mpu6050_accel_read_xyz(struct mpu6050_data *data)
+{
+ uint8_t fsr;
+ int16_t datax, datay, dataz;
+ int convx, convy, convz;
+ int16_t buffer[3];
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_ACCEL_XOUT_H, 6, (uint8_t *)buffer, "Accel-x-y-z");
+
+ datax = be16_to_cpu(buffer[0]);
+ datay = be16_to_cpu(buffer[1]);
+ dataz = be16_to_cpu(buffer[2]);
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_ACCEL_CONFIG, 1, &fsr, "full scale range");
+
+ convx = mpu6050_acc_convert(datax, fsr);
+ convy = mpu6050_acc_convert(datay, fsr);
+ convz = mpu6050_acc_convert(dataz, fsr);
+
+#if MPU6050_ACCELERO_DATA_DBG
+ pr_info("Accelero: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ convx, convy, convz);
+#endif
+
+ input_report_abs(data->input_dev, ABS_X, convx);
+ input_report_abs(data->input_dev, ABS_Y, convy);
+ input_report_abs(data->input_dev, ABS_Z, convz);
+}
+
+void mpu6050_gyro_read_xyz(struct mpu6050_data *data)
+{
+ int16_t datax, datay, dataz;
+ int convx, convy, convz;
+ int16_t buffer[3];
+ uint8_t fsr;
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR, MPU6050_REG_GYRO_XOUT_H,
+ 6, (uint8_t *)buffer, "gyro-x-y-z");
+
+ datax = be16_to_cpu(buffer[0]);
+ datay = be16_to_cpu(buffer[1]);
+ dataz = be16_to_cpu(buffer[2]);
+
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_GYRO_CONFIG, 1, &fsr, "Gyro full scale range");
+
+ convx = mpu6050_gyro_convert(datax, fsr);
+ convy = mpu6050_gyro_convert(datay, fsr);
+ convz = mpu6050_gyro_convert(dataz, fsr);
+
+#if MPU6050_GYRO_DATA_DBG
+ pr_info("Gyro: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ convx, convy, convz);
+#endif
+ input_report_abs(data->input_dev, ABS_RX, convx);
+ input_report_abs(data->input_dev, ABS_RY, convy);
+ input_report_abs(data->input_dev, ABS_RZ, convz);
+}
+
+static void mpu6050_read_values(struct mpu6050_data *data)
+{
+ uint8_t val;
+
+#if MPU6050_GPIO_DBG
+ gpio_set_value(MPU6050_GPIO_NUM, 1);
+#endif
+
+ /* Clear interrupts for requested mode */
+ MPU6050_READ(data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_INT_STATUS, 1, &val, "Clear Isr");
+
+ mpu6050_accel_read_xyz(data);
+ mpu6050_gyro_read_xyz(data);
+ input_sync(data->input_dev);
+
+#if MPU6050_GPIO_DBG
+ gpio_set_value(MPU6050_GPIO_NUM, 0);
+#endif
+
+}
+
+/**
+ * mpu6050_accel_thread_irq - mpu6050 interrupt handler
+ * @irq: device interrupt number
+ * @dev_id: pointer to driver device data
+ */
+static irqreturn_t mpu6050_thread_irq(int irq, void *dev_id)
+{
+ struct mpu6050_data *data = dev_id;
+
+ mpu6050_read_values(data);
+
+ return IRQ_HANDLED;
+}
+
+struct mpu6050_data *mpu6050_accel_gyro_init(
+ struct mpu6050_data *mpu_data)
+{
+ struct mpu6050_platform_data *pdata = mpu_data->pdata;
+ struct input_dev *input_dev;
+ int error;
+ uint8_t val;
+
+ /* Verify for IRQ line */
+ if (!mpu_data->irq) {
+ pr_err("%s: Accelerometer Irq not assigned\n", __func__);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ /* Input device allocation */
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ mpu_data->input_dev = input_dev;
+ /* Configure the init values from platform data */
+ pdata->accel_fsr = 0;
+ error = MPU6050_WRITE(mpu_data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_ACCEL_CONFIG,
+ (pdata->accel_fsr << 3), "Init accel fsr");
+ if (error) {
+ dev_err(mpu_data->dev, "fail to configure accel FSR\n");
+ goto err_free_input;
+ }
+
+ /* Configure the init values from platform data */
+ pdata->gyro_fsr = 3; // 2000 deg/s
+ MPU6050_WRITE(mpu_data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_GYRO_CONFIG, (pdata->gyro_fsr << 3), "Init gyro fsr");
+ if (error) {
+ dev_err(mpu_data->dev, "fail to configure gyro FSR\n");
+ goto err_free_input;
+ }
+
+ /* Configure the sample rate to 125Hz */
+ // = 1000 / (1+val)
+ // 200 -> 0x04 (4) --> 1000 / (1+4)
+ // 125 -> 0x07 (7) --> 1000 / (1+7)
+ // 100 -> 0x09 (9) --> 1000 / (1+9)
+ // 50 -> 0x13 (19) --> 1000 / (1+19)
+ MPU6050_WRITE(mpu_data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_SMPLRT_DIV, 0x09, "Init sample rate");
+ if (error) {
+ dev_err(mpu_data->dev, "fail to configure FSR\n");
+ goto err_free_input;
+ }
+
+ /* Initialize the Input Subsystem configuration */
+ input_dev->name = MPU6050_NAME;
+ input_dev->id.bustype = mpu_data->bus_ops->bustype;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+
+ input_set_abs_params(input_dev, ABS_X,
+ -MPU6050_ACCEL_RANGE_MAX,
+ MPU6050_ACCEL_RANGE_MAX, pdata->x_axis, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ -MPU6050_ACCEL_RANGE_MAX,
+ MPU6050_ACCEL_RANGE_MAX, pdata->y_axis, 0);
+ input_set_abs_params(input_dev, ABS_Z,
+ -MPU6050_ACCEL_RANGE_MAX,
+ MPU6050_ACCEL_RANGE_MAX, pdata->z_axis, 0);
+
+ input_set_abs_params(input_dev, ABS_RX,
+ -MPU6050_GYRO_RANGE_MAX,
+ MPU6050_GYRO_RANGE_MAX, pdata->x_axis, 0);
+ input_set_abs_params(input_dev, ABS_RY,
+ -MPU6050_GYRO_RANGE_MAX,
+ MPU6050_GYRO_RANGE_MAX, pdata->y_axis, 0);
+ input_set_abs_params(input_dev, ABS_RZ,
+ -MPU6050_GYRO_RANGE_MAX,
+ MPU6050_GYRO_RANGE_MAX, pdata->z_axis, 0);
+
+ input_set_drvdata(input_dev, mpu_data);
+
+ /* Reset Accelerometer signal path */
+ mpu6050_accel_reset(mpu_data);
+
+ /* Reset Gyroscope signal path */
+ mpu6050_gyro_reset(mpu_data);
+
+ error = request_threaded_irq(mpu_data->irq, NULL,
+ mpu6050_thread_irq,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ MPU6050_NAME, mpu_data);
+ if (error) {
+ dev_err(mpu_data->dev, "request_threaded_irq failed\n");
+ goto err_free_input;
+ }
+
+ /* Register with the Input Subsystem */
+ error = input_register_device(mpu_data->input_dev);
+ if (error) {
+ dev_err(mpu_data->dev, "Unable to register input device\n");
+ goto err_free_irq;
+ }
+
+ /* Enable interrupt for requested mode */
+ error = MPU6050_WRITE(mpu_data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_INT_ENABLE, MPU6050_FF_INT, "Enable Intr");
+ if (error) {
+ dev_err(mpu_data->dev, "failed to enable interrupt\n");
+ goto err_reg_device;
+ }
+
+ /* Clear interrupts for requested mode */
+ error = MPU6050_READ(mpu_data, MPU6050_CHIP_I2C_ADDR,
+ MPU6050_REG_INT_STATUS, 1, &val, "Clear Isr");
+ if (error) {
+ dev_err(mpu_data->dev, "fail to clear ISR\n");
+ goto err_reg_device;
+ }
+
+ return mpu_data;
+
+err_reg_device:
+ input_unregister_device(mpu_data->input_dev);
+err_free_irq:
+ free_irq(gpio_to_irq(mpu_data->irq), mpu_data);
+err_free_input:
+ input_free_device(mpu_data->input_dev);
+err_free_mem:
+err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL(mpu6050_accel_gyro_init);
+
+void mpu6050_accel_gyro_exit(struct mpu6050_data *data)
+{
+ mpu6050_accel_set_standby(data, 1);
+ mpu6050_gyro_set_standby(data, 1);
+ input_unregister_device(data->input_dev);
+ free_irq(data->irq, data);
+ input_free_device(data->input_dev);
+ kfree(data);
+}
+EXPORT_SYMBOL(mpu6050_accel_gyro_exit);
+
+MODULE_AUTHOR("Kishore Kadiyala <kishore.kadiyala@ti.com");
+MODULE_DESCRIPTION("MPU6050 I2c Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu6050_i2c.c b/drivers/input/misc/mpu6050_i2c.c
new file mode 100644
index 00000000000000..ad352708b4e6fd
--- /dev/null
+++ b/drivers/input/misc/mpu6050_i2c.c
@@ -0,0 +1,214 @@
+/*
+ * Implements I2C interface driver for INVENSENSE MPU6050
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Kishore Kadiyala <kishore.kadiyala@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/input/mpu6050.h>
+
+#include "mpu6050x.h"
+
+/**
+ * mpu6050_i2c_write - Writes to register in MPU6050
+ * @dev: Driver model device node for the slave
+ * @addr: Address used on the I2C bus connected to the parent adapter
+ * @reg: i2c register address value
+ * @val: number of bytes to be written
+ * @msg: debug message string
+ *
+ * Returns '0' on success.
+ */
+static int mpu6050_i2c_write(struct device *dev, u8 addr, u8 reg,
+ u8 val, u8 *msg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_adapter *i2c_adapt = client->adapter;
+ struct i2c_msg msgs[1];
+ u8 m[2] = {reg, val};
+ int err;
+ int tries = 0;
+
+ mdelay(1);
+ msgs[0].addr = addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = m;
+ msgs[0].len = 2;
+
+ do {
+ err = i2c_transfer(i2c_adapt, msgs, 1);
+ if (err != 1) {
+ printk("mpu6050_i2c_write: %s\n", msg);
+ msleep_interruptible(MPU6050_I2C_RETRY_DELAY);
+ }
+ } while ((err != 1) && (++tries < MPU6050_MAX_RW_RETRIES));
+
+ if (err != 1) {
+ dev_err(&client->dev,
+ "%s failed (%s, %d)\n", __func__, msg, err);
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+/**
+ * mpu6050_i2c_read - Read from register in MPU6050
+ * @dev: Driver model device node for the slave
+ * @addr: Address used on the I2C bus connected to the parent adapter
+ * @reg: i2c register address value
+ * @len: number of bytes to transfer
+ * @val: used to store the read byte
+ * @msg: debug message string
+ *
+ * Returns '0' on success.
+ */
+static int mpu6050_i2c_read(struct device *dev, u8 addr, u8 reg, u8 len,
+ u8 *val, u8 *msg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_adapter *i2c_adapt = client->adapter;
+ struct i2c_msg msgs[2];
+ int err;
+ int tries = 0;
+
+ mdelay(1);
+ msgs[0].addr = addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = &reg;
+ msgs[0].len = 1;
+
+ msgs[1].addr = addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = val;
+ msgs[1].len = len;
+
+ do {
+ err = i2c_transfer(i2c_adapt, msgs, 2);
+ if (err != 2) {
+ printk("mpu6050_i2c_read: %s\n", msg);
+ msleep_interruptible(MPU6050_I2C_RETRY_DELAY);
+ }
+ } while ((err != 2) && (++tries < MPU6050_MAX_RW_RETRIES));
+
+ if (err != 2) {
+ dev_err(&client->dev,
+ "%s failed (%s, %d)\n", __func__, msg, err);
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+static const struct mpu6050_bus_ops mpu6050_i2c_bops = {
+ .bustype = BUS_I2C,
+ .read = mpu6050_i2c_read,
+ .write = mpu6050_i2c_write,
+};
+
+/**
+ * mpu6050_i2c_probe - device detection callback
+ * @client: i2c client of found device
+ * @id: id match information
+ *
+ * The I2C layer calls us when it believes a sensor is present at this
+ * address. Probe to see if this is correct and to validate the device.
+ */
+
+static int __devinit mpu6050_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpu6050_data *data = NULL;
+ int ret = -1;
+ dev_info(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s: need I2C_FUNC_I2C\n", __func__);
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct mpu6050_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+ ret = mpu6050_init(data, &mpu6050_i2c_bops);
+ if (ret) {
+ dev_err(&client->dev, "%s failed\n", __func__);
+ goto err;
+ }
+ return 0;
+err:
+ kfree(data);
+ return ret;
+}
+
+/**
+ * mpu6050_i2c_remove - remove a sensor
+ * @client: i2c client of sensor being removed
+ */
+static int __devexit mpu6050_i2c_remove(struct i2c_client *client)
+{
+ struct mpu6050_data *data = i2c_get_clientdata(client);
+
+ mpu6050_exit(data);
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id mpu6050_i2c_id[] = {
+ { MPU6050_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpu6050_i2c_id);
+
+static struct i2c_driver mpu6050_i2c_driver = {
+ .driver = {
+ .name = MPU6050_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mpu6050_i2c_probe,
+ .remove = __devexit_p(mpu6050_i2c_remove),
+ .id_table = mpu6050_i2c_id,
+};
+
+static int __init mpu6050_i2c_init(void)
+{
+ return i2c_add_driver(&mpu6050_i2c_driver);
+}
+module_init(mpu6050_i2c_init);
+
+static void __exit mpu6050_i2c_exit(void)
+{
+ i2c_del_driver(&mpu6050_i2c_driver);
+}
+module_exit(mpu6050_i2c_exit)
+
+MODULE_AUTHOR("Kishore Kadiyala <kishore.kadiyala@ti.com");
+MODULE_DESCRIPTION("MPU6050 I2c Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu6050x.h b/drivers/input/misc/mpu6050x.h
new file mode 100644
index 00000000000000..304a7f22abc107
--- /dev/null
+++ b/drivers/input/misc/mpu6050x.h
@@ -0,0 +1,192 @@
+/*
+ * INVENSENSE MPU6050 driver
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Kishore Kadiyala <kishore.kadiyala@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_MPU6050X_H
+#define _LINUX_MPU6050X_H
+
+#include <linux/workqueue.h>
+
+/* MPU6050 REGISTERS */
+#define MPU6050_REG_AUX_VDDIO 0x01
+#define MPU6050_REG_SMPLRT_DIV 0x19
+#define MPU6050_REG_CONFIG 0x1a
+#define MPU6050_REG_GYRO_CONFIG 0x1b
+#define MPU6050_REG_ACCEL_CONFIG 0x1c
+#define MPU6050_REG_ACCEL_FF_THR 0x1d
+#define MPU6050_REG_ACCEL_FF_DUR 0x1e
+#define MPU6050_REG_ACCEL_MOT_THR 0x1f
+#define MPU6050_REG_ACCEL_MOT_DUR 0x20
+#define MPU6050_REG_ACCEL_ZRMOT_THR 0x21
+#define MPU6050_REG_ACCEL_ZRMOT_DUR 0x22
+#define MPU6050_REG_FIFO_EN 0x23
+#define MPU6050_REG_I2C_MST_CTRL 0x24
+#define MPU6050_REG_I2C_SLV0_ADDR 0x25
+#define MPU6050_REG_I2C_SLV0_REG 0x26
+#define MPU6050_REG_I2C_SLV0_CTRL 0x27
+#define MPU6050_REG_I2C_SLV1_ADDR 0x28
+#define MPU6050_REG_I2C_SLV1_REG 0x29
+#define MPU6050_REG_I2C_SLV1_CTRL 0x2a
+#define MPU6050_REG_I2C_SLV2_ADDR 0x2B
+#define MPU6050_REG_I2C_SLV2_REG 0x2c
+#define MPU6050_REG_I2C_SLV2_CTRL 0x2d
+#define MPU6050_REG_I2C_SLV3_ADDR 0x2E
+#define MPU6050_REG_I2C_SLV3_REG 0x2f
+#define MPU6050_REG_I2C_SLV3_CTRL 0x30
+#define MPU6050_REG_I2C_SLV4_ADDR 0x31
+#define MPU6050_REG_I2C_SLV4_REG 0x32
+#define MPU6050_REG_I2C_SLV4_DO 0x33
+#define MPU6050_REG_I2C_SLV4_CTRL 0x34
+#define MPU6050_REG_I2C_SLV4_DI 0x35
+#define MPU6050_REG_I2C_MST_STATUS 0x36
+#define MPU6050_REG_INT_PIN_CFG 0x37
+#define MPU6050_REG_INT_ENABLE 0x38
+#define MPU6050_REG_INT_STATUS 0x3A
+#define MPU6050_REG_ACCEL_XOUT_H 0x3B
+#define MPU6050_REG_ACCEL_XOUT_L 0x3c
+#define MPU6050_REG_ACCEL_YOUT_H 0x3d
+#define MPU6050_REG_ACCEL_YOUT_L 0x3e
+#define MPU6050_REG_ACCEL_ZOUT_H 0x3f
+#define MPU6050_REG_ACCEL_ZOUT_L 0x40
+#define MPU6050_REG_TEMP_OUT_H 0x41
+#define MPU6050_REG_TEMP_OUT_L 0x42
+#define MPU6050_REG_GYRO_XOUT_H 0x43
+#define MPU6050_REG_GYRO_XOUT_L 0x44
+#define MPU6050_REG_GYRO_YOUT_H 0x45
+#define MPU6050_REG_GYRO_YOUT_L 0x46
+#define MPU6050_REG_GYRO_ZOUT_H 0x47
+#define MPU6050_REG_GYRO_ZOUT_L 0x48
+#define MPU6050_REG_EXT_SLV_SENS_DATA_00 0x49
+#define MPU6050_REG_EXT_SLV_SENS_DATA_01 0x4a
+#define MPU6050_REG_EXT_SLV_SENS_DATA_02 0x4b
+#define MPU6050_REG_EXT_SLV_SENS_DATA_03 0x4c
+#define MPU6050_REG_EXT_SLV_SENS_DATA_04 0x4d
+#define MPU6050_REG_EXT_SLV_SENS_DATA_05 0x4e
+#define MPU6050_REG_EXT_SLV_SENS_DATA_06 0x4F
+#define MPU6050_REG_EXT_SLV_SENS_DATA_07 0x50
+#define MPU6050_REG_EXT_SLV_SENS_DATA_08 0x51
+#define MPU6050_REG_EXT_SLV_SENS_DATA_09 0x52
+#define MPU6050_REG_EXT_SLV_SENS_DATA_10 0x53
+#define MPU6050_REG_EXT_SLV_SENS_DATA_11 0x54
+#define MPU6050_REG_EXT_SLV_SENS_DATA_12 0x55
+#define MPU6050_REG_EXT_SLV_SENS_DATA_13 0x56
+#define MPU6050_REG_EXT_SLV_SENS_DATA_14 0x57
+#define MPU6050_REG_EXT_SLV_SENS_DATA_15 0x58
+#define MPU6050_REG_EXT_SLV_SENS_DATA_16 0x59
+#define MPU6050_REG_EXT_SLV_SENS_DATA_17 0x5a
+#define MPU6050_REG_EXT_SLV_SENS_DATA_18 0x5b
+#define MPU6050_REG_EXT_SLV_SENS_DATA_19 0x5c
+#define MPU6050_REG_EXT_SLV_SENS_DATA_20 0x5d
+#define MPU6050_REG_EXT_SLV_SENS_DATA_21 0x5e
+#define MPU6050_REG_EXT_SLV_SENS_DATA_22 0x5f
+#define MPU6050_REG_EXT_SLV_SENS_DATA_23 0x60
+//#define MPU6050_REG_ACCEL_INTEL_STATUS 0x61
+//#define MPU6050_REG_62_RSVD 0x62
+#define MPU6050_REG_I2C_SLV0_DO 0x63
+#define MPU6050_REG_I2C_SLV1_DO 0x64
+#define MPU6050_REG_I2C_SLV2_DO 0x65
+#define MPU6050_REG_I2C_SLV3_DO 0x66
+#define MPU6050_REG_I2C_MST_DELAY_CTRL 0x67
+#define MPU6050_REG_SIGNAL_PATH_RESET 0x68
+#define MPU6050_REG_ACCEL_INTEL_CTRL 0x69
+#define MPU6050_REG_USER_CTRL 0x6a
+#define MPU6050_REG_PWR_MGMT_1 0x6b
+#define MPU6050_REG_PWR_MGMT_2 0x6c
+#define MPU6050_REG_FIFO_COUNTH 0x72
+#define MPU6050_REG_FIFO_COUNTL 0x73
+#define MPU6050_REG_FIFO_R_W 0x74
+#define MPU6050_REG_WHOAMI 0x75
+
+
+/* MPU6050 REGISTER MASKS */
+#define MPU6050_CHIP_I2C_ADDR 0x68
+#define MPU6050_DEVICE_RESET BIT(7)
+#define MPU6050_DEVICE_SLEEP_MODE BIT(6)
+#define MPU6050_FF_MODE BIT(7)
+#define MPU6050_MD_MODE BIT(6)
+#define MPU6050_ZM_MODE BIT(5)
+#define MPU6050_ACCEL_SP_RESET BIT(1)
+#define MPU6050_GYRO_SP_RESET BIT(2)
+#define MPU6050_STBY_XA BIT(5)
+#define MPU6050_STBY_YA BIT(4)
+#define MPU6050_STBY_ZA BIT(3)
+#define MPU6050_STBY_XG BIT(2)
+#define MPU6050_STBY_YG BIT(1)
+#define MPU6050_STBY_ZG BIT(0)
+#define MPU6050_FF_INT BIT(0)
+#define MPU6050_MOT_INT BIT(6)
+#define MPU6050_ZMOT_INT BIT(5)
+#define MPU6050_DATARDY_INT BIT(0)
+#define MPU6050_PASS_THROUGH BIT(7)
+#define MPU6050_I2C_BYPASS_EN BIT(1)
+#define MPU6050_I2C_FSYNC_LEVEL BIT(3)
+#define MPU6050_I2C_FSYNC_EN BIT(4)
+
+#define MPU6050_PIN_CFG_LATCH_EN BIT(5)
+
+#define MPU6050_I2C_MST_EN BIT(5)
+#define MPU6050_CLOCKSEL_SRC_INT (BIT(0) | BIT(1) | BIT(2))
+
+#define MPU6050_REG_CONFIG_DLPF (BIT(0) | BIT(1) | BIT(2))
+
+#define MPU6050_CLOCKSEL_SRC_INTERNAL 0x00
+#define MPU6050_CLOCKSEL_SRC_PLL_X 0x01
+#define MPU6050_CLOCKSEL_SRC_PLL_Y 0x02
+#define MPU6050_CLOCKSEL_SRC_PLL_Z 0x03
+#define MPU6050_CLOCKSEL_SRC_PLLEXT32 0x04
+#define MPU6050_CLOCKSEL_SRC_PLLEXT19 0x05
+#define MPU6050_CLOCKSEL_SRC_STOP 0x07
+
+#define MPU6050_BANDWIDTH_GYRO_42HZ_ACC_44HZ 0x03
+
+#define MPU6050_READ(data, addr, reg, len, val, msg) \
+ (data->bus_ops->read(data->dev, addr, reg, len, val, msg))
+#define MPU6050_WRITE(data, addr, reg, val, msg) \
+ ((data)->bus_ops->write(data->dev, addr, reg, val, msg))
+
+#define MPU6050_MAX_RW_RETRIES 5
+#define MPU6050_I2C_RETRY_DELAY 10
+
+struct mpu6050_bus_ops {
+ uint16_t bustype;
+ int (*read)(struct device *, u8, u8, u8, u8 *, u8 *);
+ int (*write)(struct device *, u8, u8, u8, u8 *);
+};
+
+struct mpu6050_data {
+ struct i2c_client *client;
+ struct device *dev;
+ struct input_dev *input_dev;
+ const struct mpu6050_bus_ops *bus_ops;
+ struct mpu6050_platform_data *pdata;
+ int irq;
+ struct mutex mutex;
+ bool suspended;
+ struct work_struct work;
+};
+
+
+int mpu6050_init(struct mpu6050_data *data,
+ const struct mpu6050_bus_ops *bops);
+void mpu6050_exit(struct mpu6050_data *);
+
+struct mpu6050_data *mpu6050_accel_gyro_init(
+ struct mpu6050_data *mpu_data);
+void mpu6050_accel_gyro_exit(struct mpu6050_data *data);
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 00000000000000..5729602cbb63a4
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ finger2_pressed = finger > 1 && finger != 7;
+ input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+ }
+
+ if (!finger)
+ z = 0;
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ } else if (ts->reported_finger_count > 1) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+ }
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(BTN_2, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c
new file mode 100644
index 00000000000000..f16404212152c8
--- /dev/null
+++ b/drivers/leds/ledtrig-sleep.c
@@ -0,0 +1,80 @@
+/* drivers/leds/ledtrig-sleep.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/leds.h>
+#include <linux/suspend.h>
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored);
+
+DEFINE_LED_TRIGGER(ledtrig_sleep)
+static struct notifier_block ledtrig_sleep_pm_notifier = {
+ .notifier_call = ledtrig_sleep_pm_callback,
+ .priority = 0,
+};
+
+static void ledtrig_sleep_early_suspend(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+}
+
+static void ledtrig_sleep_early_resume(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+}
+
+static struct early_suspend ledtrig_sleep_early_suspend_handler = {
+ .suspend = ledtrig_sleep_early_suspend,
+ .resume = ledtrig_sleep_early_resume,
+};
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+ return NOTIFY_OK;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __init ledtrig_sleep_init(void)
+{
+ led_trigger_register_simple("sleep", &ledtrig_sleep);
+ register_pm_notifier(&ledtrig_sleep_pm_notifier);
+ register_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ return 0;
+}
+
+static void __exit ledtrig_sleep_exit(void)
+{
+ unregister_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ unregister_pm_notifier(&ledtrig_sleep_pm_notifier);
+ led_trigger_unregister_simple(ledtrig_sleep);
+}
+
+module_init(ledtrig_sleep_init);
+module_exit(ledtrig_sleep_exit);
+
diff --git a/drivers/media/dvb/dvb-usb/dib0700_irq.h b/drivers/media/dvb/dvb-usb/dib0700_irq.h
new file mode 100644
index 00000000000000..411b6aa1bc34e8
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/dib0700_irq.h
@@ -0,0 +1,26 @@
+#if defined(CONFIG_DVB_USB_DIB0700) || defined(CONFIG_DVB_USB_DIB0700_MODULE)
+
+extern int
+dib_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags1,
+ const char *name, void *dev);
+extern void dib_free_irq(unsigned int irq, void *dev_id);
+extern void dib_enable_irq(unsigned int irq);
+extern void dib_disable_irq(unsigned int irq);
+
+#else
+static inline int dib_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags1, const char *name, void *dev)
+{
+ printk(KERN_ERR "Can't request dib irq, dib0700 is module");
+ return -EINVAL;
+}
+static inline void dib_free_irq(unsigned int irq, void *dev_id)
+{
+}
+static inline void dib_enable_irq(unsigned int irq)
+{
+}
+static inline void dib_disable_irq(unsigned int irq)
+{
+}
+
+#endif
diff --git a/drivers/media/video/adv7391.c b/drivers/media/video/adv7391.c
new file mode 100644
index 00000000000000..12d33704701faf
--- /dev/null
+++ b/drivers/media/video/adv7391.c
@@ -0,0 +1,407 @@
+/*
+ * adv7391.c
+ *
+ * Driver for Analog device's 7391
+ *
+ * Author : Nicolas Laclau <nicolas.laclau@parrot.com>
+ *
+ * Date : 22 Jan. 2015
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+
+#include <media/adv7391.h>
+
+struct adv7391 {
+ struct i2c_client *client;
+};
+
+/* Register maps */
+struct reg_cfg {
+ u8 reg;
+ u8 val;
+};
+
+/* ------------------------------------------------------------------------- */
+static s32 adv7391_i2c_read_byte(struct i2c_client *client,
+ u8 command)
+{
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ else
+ return -EIO;
+}
+
+static s32 adv7391_i2c_write_byte(struct i2c_client *client,
+ u8 command,
+ u8 value)
+{
+ union i2c_smbus_data data;
+ int err;
+ int i;
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ dev_err(&client->dev, "error writing addr:%02x, reg:%02x,"
+ " val:%02x\n",
+ client->addr, command, value);
+ return err;
+}
+
+static s32 adv7391_write_reg_array(struct i2c_client *client,
+ struct reg_cfg *array, int nb)
+{
+ s32 ret;
+ int i;
+
+ for(i=0;i<nb;i++) {
+ ret = adv7391_i2c_write_byte(client,
+ array[i].reg,
+ array[i].val);
+ if(ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static s32 adv7391_setup_default(struct adv7391 *adv) {
+
+ struct adv7391_platform_data * pdata = adv->client->dev.platform_data;
+ struct reg_cfg cfg[] = {
+ {ADV7391_REG_SW_RESET, ADV7391_FLG_SW_RESET},
+ {ADV7391_REG_PWR_MODE, pdata->dac_pwr},
+ {ADV7391_REG_DAC_OUT_LVL, pdata->dac_gain},
+ {ADV7391_REG_SD_MODE1, pdata->sd_mode1},
+ {ADV7391_REG_SD_MODE2, pdata->sd_mode2},
+ {ADV7391_REG_SD_MODE3, pdata->sd_mode3},
+ {ADV7391_REG_SD_SCL_LSB, pdata->scale_lsb}
+ };
+
+ return adv7391_write_reg_array(adv->client, cfg, ARRAY_SIZE(cfg));
+}
+
+/* ------------------------------------------------------------------------- */
+/* attributes operations */
+static s32 adv7391_write_config(struct i2c_client *client, u8 reg,
+ const char *buf, size_t count)
+{
+ s32 res;
+ ulong val;
+
+ /* force null terminated string */
+ if (buf[count] != '\0')
+ return -EINVAL;
+
+ res = kstrtoul(buf, 0, &val);
+ if (res < 0)
+ return res;
+
+ /* check range */
+ if( (val < 0x0000) || (val > 0x00FF) )
+ return -EINVAL;
+
+ res = adv7391_i2c_write_byte(client, reg, (u8)(val & 0x00FF));
+ if (res < 0)
+ return res;
+
+ return count;
+}
+
+static ssize_t adv7391_g_enable(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res = 0, ret = 0;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_PWR_MODE);
+ if (res < 0)
+ return res;
+
+ if (res & ADV7391_FLG_DAC1_PWR)
+ ret |= 1;
+ if (res & ADV7391_FLG_DAC2_PWR)
+ ret |= 2;
+ if (res & ADV7391_FLG_DAC2_PWR)
+ ret |= 4;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", ret);
+}
+
+static ssize_t adv7391_s_enable(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ s32 res;
+ ulong val;
+ struct adv7391 *adv = dev_get_drvdata(device);
+
+ /* force null terminated string */
+ if (buf[count] != '\0')
+ return -EINVAL;
+
+ res = kstrtoul(buf, 0, &val);
+ if (res < 0)
+ return res;
+
+ /* check range */
+ if ((val < 0x0000) || (val > 0x00FF))
+ return -EINVAL;
+
+ /* get current register value */
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_PWR_MODE);
+ if (res < 0)
+ return res;
+
+ /* clear DAC1, DAC2 and DAC3 flags */
+ res &= ~(ADV7391_FLG_DAC1_PWR |
+ ADV7391_FLG_DAC2_PWR |
+ ADV7391_FLG_DAC3_PWR);
+
+ /* Set new value requested for DACs */
+ if (val & 0x01)
+ res |= ADV7391_FLG_DAC1_PWR;
+ if (val & 0x02)
+ res |= ADV7391_FLG_DAC2_PWR;
+ if (val & 0x04)
+ res |= ADV7391_FLG_DAC3_PWR;
+
+ res = adv7391_i2c_write_byte(adv->client, ADV7391_REG_PWR_MODE, (u8)(res & 0x00FF));
+ if (res < 0)
+ return res;
+
+ return count;
+}
+
+static ssize_t adv7391_g_ctrl(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_SD_MODE6);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", res);
+}
+
+static ssize_t adv7391_s_ctrl(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ return adv7391_write_config(adv->client, ADV7391_REG_SD_MODE6,
+ buf, count);
+}
+
+static ssize_t adv7391_g_brightness(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_SD_BRIGHT);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", res);
+}
+
+static ssize_t adv7391_s_brightness(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ return adv7391_write_config(adv->client, ADV7391_REG_SD_BRIGHT,
+ buf, count);
+}
+
+static ssize_t adv7391_g_luma(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_SD_Y_SCL);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", res);
+}
+
+static ssize_t adv7391_s_luma(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ return adv7391_write_config(adv->client, ADV7391_REG_SD_Y_SCL,
+ buf, count);
+}
+
+static ssize_t adv7391_g_cb_scl(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_SD_CB_SCL);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", res);
+}
+
+static ssize_t adv7391_g_cr_scl(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ int res;
+
+ res = adv7391_i2c_read_byte(adv->client, ADV7391_REG_SD_CR_SCL);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", res);
+}
+
+static ssize_t adv7391_s_cb_scl(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ return adv7391_write_config(adv->client, ADV7391_REG_SD_CB_SCL,
+ buf, count);
+}
+
+static ssize_t adv7391_s_cr_scl(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct adv7391 *adv = dev_get_drvdata(device);
+ return adv7391_write_config(adv->client, ADV7391_REG_SD_CR_SCL,
+ buf, count);
+}
+
+static struct device_attribute adv7391_sysfs_attrs[] = {
+ __ATTR(enable, S_IRUGO|S_IWUSR, adv7391_g_enable, adv7391_s_enable),
+ __ATTR(control, S_IRUGO|S_IWUSR, adv7391_g_ctrl, adv7391_s_ctrl),
+ __ATTR(brightness, S_IRUGO|S_IWUSR, adv7391_g_brightness, adv7391_s_brightness),
+ __ATTR(luma, S_IRUGO|S_IWUSR, adv7391_g_luma, adv7391_s_luma),
+ __ATTR(chroma_b, S_IRUGO|S_IWUSR, adv7391_g_cb_scl, adv7391_s_cb_scl),
+ __ATTR(chroma_r, S_IRUGO|S_IWUSR, adv7391_g_cr_scl, adv7391_s_cr_scl),
+};
+
+/* ------------------------------------------------------------------------- */
+static int __devinit adv7391_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7391 *adv = NULL;
+ int ret = 0;
+ int i;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+
+ /* allocate context memory */
+ adv = kzalloc(sizeof(*adv), GFP_KERNEL);
+ if (!adv) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto enomem;
+ }
+
+ adv->client = client;
+
+ for (i = 0; i < ARRAY_SIZE(adv7391_sysfs_attrs); i++) {
+ ret = device_create_file(&client->dev,
+ &adv7391_sysfs_attrs[i]);
+ if (ret) {
+ for (--i; i >= 0; i--)
+ device_remove_file(&client->dev,
+ &adv7391_sysfs_attrs[i]);
+ goto efail;
+ }
+ }
+
+ ret = adv7391_setup_default(adv);
+
+ dev_info(&client->dev, "detected i2c-device: 0x%02x\n", client->addr);
+
+ i2c_set_clientdata(client, adv);
+
+ return ret;
+efail:
+ kfree(adv);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int __devexit adv7391_remove(struct i2c_client *client)
+{
+ struct adv7391 *adv = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adv7391_sysfs_attrs); i++)
+ device_remove_file(&client->dev, &adv7391_sysfs_attrs[i]);
+
+ kfree(adv);
+ return 0;
+}
+
+static struct i2c_device_id adv7391_id[] = {
+ { ADV7391_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, adv7391_id);
+
+static struct i2c_driver adv7391_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ADV7391_NAME,
+ },
+ .id_table = adv7391_id,
+ .probe = adv7391_probe,
+ .remove = adv7391_remove,
+};
+
+module_i2c_driver(adv7391_driver);
+
+MODULE_AUTHOR("Nicolas Laclau <nicolas.laclau@parrot.com>");
+MODULE_DESCRIPTION("Analog device's 7391 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/adv7604.c b/drivers/media/video/adv7604.c
new file mode 100644
index 00000000000000..79212dd087dd41
--- /dev/null
+++ b/drivers/media/video/adv7604.c
@@ -0,0 +1,3281 @@
+/*
+ * adv7604 - Analog Devices ADV7604 video decoder driver
+ *
+ * Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Analog devices, ADV7604, Register Settings Recommendations,
+ * Revision 2.5, June 2010
+ * REF_02 - Analog devices, Register map documentation, Documentation of
+ * the register maps, Software manual, Rev. F, June 2010
+ * REF_03 - Analog devices, ADV7604, Hardware Manual, Rev. F, August 2010
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/adv7604.h>
+#include <linux/interrupt.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+MODULE_DESCRIPTION("Analog Devices ADV7604 video decoder driver");
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
+MODULE_LICENSE("GPL");
+
+/* ADV7604 system clock frequency */
+#define ADV7604_fsc (28636360)
+
+#define DIGITAL_INPUT (state->mode == ADV7604_MODE_HDMI)
+
+enum adv7604_type {
+ ADV7604,
+ ADV7611,
+};
+
+struct adv7604_reg_seq {
+ unsigned int reg;
+ u8 val;
+};
+
+struct adv7604_chip_info {
+ unsigned int edid_ctrl_reg;
+ unsigned int edid_status_reg;
+ unsigned int lcf_reg;
+
+ unsigned int cable_det_mask;
+ unsigned int tdms_lock_mask;
+ unsigned int fmt_change_digital_mask;
+
+ bool has_afe;
+
+ void (*set_termination)(struct v4l2_subdev *sd, bool enable);
+ void (*setup_irqs)(struct v4l2_subdev *sd);
+ unsigned int (*read_hdmi_pixelclock)(struct v4l2_subdev *sd);
+
+ /* 0 = AFE, 1 = HDMI */
+ const struct adv7604_reg_seq *recommended_settings[2];
+ unsigned int num_recommended_settings[2];
+
+ unsigned long page_mask;
+};
+
+/*
+ **********************************************************************
+ *
+ * Arrays with configuration parameters for the ADV7604
+ *
+ **********************************************************************
+ */
+struct adv7604_state {
+ const struct adv7604_chip_info *info;
+ struct adv7604_platform_data pdata;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler hdl;
+ enum adv7604_mode mode;
+ struct v4l2_dv_timings timings;
+ u8 edid[256];
+ unsigned edid_blocks;
+ struct v4l2_fract aspect_ratio;
+ u32 rgb_quantization_range;
+ struct workqueue_struct *work_queues;
+ struct delayed_work delayed_work_enable_hotplug;
+ bool connector_hdmi;
+ bool restart_stdi_once;
+
+ /* i2c clients */
+ struct i2c_client *i2c_avlink;
+ struct i2c_client *i2c_cec;
+ struct i2c_client *i2c_infoframe;
+ struct i2c_client *i2c_esdp;
+ struct i2c_client *i2c_dpp;
+ struct i2c_client *i2c_afe;
+ struct i2c_client *i2c_repeater;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_hdmi;
+ struct i2c_client *i2c_test;
+ struct i2c_client *i2c_cp;
+ struct i2c_client *i2c_vdp;
+
+ /* controls */
+ struct v4l2_ctrl *detect_tx_5v_ctrl;
+ struct v4l2_ctrl *analog_sampling_phase_ctrl;
+ struct v4l2_ctrl *free_run_color_manual_ctrl;
+ struct v4l2_ctrl *free_run_color_ctrl;
+ struct v4l2_ctrl *rgb_quantization_range_ctrl;
+
+ /* sysfs */
+ struct sysfs_dirent *cable;
+ struct sysfs_dirent *signal;
+ struct sysfs_dirent *fmt;
+ struct sysfs_dirent *audio;
+
+ /* v4l */
+ enum v4l2_mbus_type mbus_type;
+ enum v4l2_mbus_pixelcode code;
+
+ unsigned int fmt_change;
+ int irq;
+
+ struct v4l2_mbus_framefmt format;
+};
+
+
+struct adv7604_mbus_pixelcode {
+ /* Bus rotation and reordering */
+ enum adv7604_op_ch_sel op_ch_sel;
+ /* Select output format */
+ enum adv7604_op_format_sel op_format_sel;
+ unsigned int invert_cbcr;
+
+ enum v4l2_mbus_pixelcode code;
+};
+
+
+//For details see http://linuxtv.org/downloads/v4l-dvb-apis/subdev.html
+struct adv7604_mbus_pixelcode pxcode [] = {
+ { ADV7604_OP_CH_SEL_RGB, ADV7604_OP_FORMAT_SEL_SDR_ITU656_8, 0, V4L2_MBUS_FMT_UYVY8_2X8 },
+ { ADV7604_OP_CH_SEL_RGB, ADV7604_OP_FORMAT_SEL_SDR_ITU656_8, 1, V4L2_MBUS_FMT_VYUY8_2X8 },
+ { ADV7604_OP_CH_SEL_RGB, ADV7604_OP_FORMAT_SEL_SDR_ITU656_16, 0, V4L2_MBUS_FMT_YUYV8_1X16 },
+ { ADV7604_OP_CH_SEL_RGB, ADV7604_OP_FORMAT_SEL_SDR_ITU656_16, 1, V4L2_MBUS_FMT_YVYU8_1X16 },
+ { ADV7604_OP_CH_SEL_RBG, ADV7604_OP_FORMAT_SEL_SDR_ITU656_16, 0, V4L2_MBUS_FMT_UYVY8_1X16 },
+ { ADV7604_OP_CH_SEL_RBG, ADV7604_OP_FORMAT_SEL_SDR_ITU656_16, 1, V4L2_MBUS_FMT_VYUY8_1X16 },
+};
+
+
+static inline bool no_signal(struct v4l2_subdev *sd);
+
+static bool adv7604_has_afe(struct adv7604_state *state)
+{
+ return state->info->has_afe;
+}
+
+/* Supported CEA and DMT timings */
+static const struct v4l2_dv_timings adv7604_timings[] = {
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+
+ /* sorted by DMT ID */
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P60,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1920X1080P60,
+ { },
+};
+
+struct adv7604_video_standards {
+ struct v4l2_dv_timings timings;
+ u8 vid_std;
+ u8 v_freq;
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_comp[] = {
+ { V4L2_DV_BT_CEA_720X480I59_94, 0x00, 0x00 },
+ { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 },
+ { V4L2_DV_BT_CEA_720X576I50, 0x01, 0x01 },
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x19, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x19, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080I50, 0x14, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080I60, 0x14, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ /* TODO add 1920x1080P60_RB (CVT timing) */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { V4L2_DV_BT_DMT_1360X768P60, 0x12, 0x00 },
+ { V4L2_DV_BT_DMT_1366X768P60, 0x13, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P60, 0x14, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P75, 0x15, 0x00 },
+ { V4L2_DV_BT_DMT_1600X1200P60, 0x16, 0x00 }, /* TODO not tested */
+ /* TODO add 1600X1200P60_RB (not a DMT timing) */
+ { V4L2_DV_BT_DMT_1680X1050P60, 0x18, 0x00 },
+ { V4L2_DV_BT_DMT_1920X1200P60_RB, 0x19, 0x00 }, /* TODO not tested */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_hdmi_comp[] = {
+ { V4L2_DV_BT_CEA_720X480I59_94, 0x00, 0x00 },
+ { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 },
+ { V4L2_DV_BT_CEA_720X576I50, 0x01, 0x01 },
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x13, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x13, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080I50, 0x14, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080I60, 0x14, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7604_video_standards adv7604_prim_mode_hdmi_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { },
+};
+
+/* ----------------------------------------------------------------------- */
+
+static inline struct adv7604_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7604_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7604_state, hdl)->sd;
+}
+
+static inline unsigned hblanking(const struct v4l2_bt_timings *t)
+{
+ return t->hfrontporch + t->hsync + t->hbackporch;
+}
+
+static inline unsigned htotal(const struct v4l2_bt_timings *t)
+{
+ return t->width + t->hfrontporch + t->hsync + t->hbackporch;
+}
+
+static inline unsigned vblanking(const struct v4l2_bt_timings *t)
+{
+ return t->vfrontporch + t->vsync + t->vbackporch;
+}
+
+static inline unsigned vtotal(const struct v4l2_bt_timings *t)
+{
+ return t->height + t->vfrontporch + t->vsync + t->vbackporch;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
+ u8 command, bool check)
+{
+ union i2c_smbus_data data;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data)){
+ if(sd)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ client->addr, command, data.byte);
+ return data.byte;
+ }
+ if (check)
+ v4l_err(client, "error reading 0x%02x, 0x%02x\n",
+ client->addr, command);
+ return -EIO;
+}
+
+static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
+{
+ return adv_smbus_read_byte_data_check(client, command, true);
+}
+
+static s32 adv_smbus_write_byte_data(struct i2c_client *client,
+ u8 command, u8 value)
+{
+ union i2c_smbus_data data;
+ int err;
+ int i;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ v4l_err(client, "error writing 0x%02x, 0x%02x, 0x%02x\n",
+ client->addr, command, value);
+ else if(sd)
+ v4l2_dbg(2, debug, sd, "writing 0x%02x, 0x%02x, 0x%02x\n",
+ client->addr, command, value);
+
+ return err;
+}
+
+static s32 adv_smbus_write_i2c_block_data(struct i2c_client *client,
+ u8 command, unsigned length, const u8 *values)
+{
+ union i2c_smbus_data data;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+ data.block[0] = length;
+ memcpy(data.block + 1, values, length);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static inline int io_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_read_byte_data(client, reg);
+}
+
+static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_write_byte_data(client, reg, val);
+}
+
+static inline int io_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return io_write(sd, reg, (io_read(sd, reg) & mask) | val);
+}
+
+static inline int avlink_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_avlink, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_avlink->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_avlink->addr, reg);
+ return ret;
+}
+
+static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_avlink, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret >= 0)? "w" : "error w",
+ state->i2c_avlink->addr, reg, val);
+
+ return ret;
+}
+
+static inline int cec_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_cec, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_cec->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_cec->addr, reg);
+ return ret;
+}
+
+static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_cec, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_cec->addr, reg, val);
+ return ret;
+}
+
+static inline int cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return cec_write(sd, reg, (cec_read(sd, reg) & mask) | val);
+}
+
+static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_infoframe, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_infoframe->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_infoframe->addr, reg);
+ return ret;
+}
+
+static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_infoframe, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_infoframe->addr, reg, val);
+ return ret;
+}
+
+static inline int esdp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_esdp, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_esdp->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_esdp->addr, reg);
+ return ret;
+}
+
+static inline int esdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_esdp, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_esdp->addr, reg, val);
+ return ret;
+}
+
+static inline int dpp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_dpp, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_dpp->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_dpp->addr, reg);
+ return ret;
+}
+
+static inline int dpp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_dpp, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_dpp->addr, reg, val);
+ return ret;
+}
+
+static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_afe, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_afe->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_afe->addr, reg);
+ return ret;
+}
+
+static inline int afe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_afe, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_afe->addr, reg, val);
+ return ret;
+}
+
+static inline int rep_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_repeater, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_repeater->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_repeater->addr, reg);
+ return ret;
+}
+
+static inline int rep_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_repeater, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_repeater->addr, reg, val);
+ return ret;
+}
+
+static inline int rep_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return rep_write(sd, reg, (rep_read(sd, reg) & mask) | val);
+}
+
+static inline int edid_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_edid, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_edid->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_edid->addr, reg);
+ return ret;
+}
+
+static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_edid, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_edid->addr, reg, val);
+ return ret;
+}
+
+static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
+{
+ struct adv7604_state *state = to_state(sd);
+ struct i2c_client *client = state->i2c_edid;
+ u8 msgbuf0[1] = { 0 };
+ u8 msgbuf1[256];
+ struct i2c_msg msg[2] = { { client->addr, 0, 1, msgbuf0 },
+ { client->addr, 0 | I2C_M_RD, len, msgbuf1 }
+ };
+
+ if (i2c_transfer(client->adapter, msg, 2) < 0)
+ return -EIO;
+ memcpy(val, msgbuf1, len);
+ return 0;
+}
+
+static void adv7604_delayed_work_enable_hotplug(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7604_state *state = container_of(dwork, struct adv7604_state,
+ delayed_work_enable_hotplug);
+ struct v4l2_subdev *sd = &state->sd;
+
+ v4l2_dbg(2, debug, sd, "%s: enable hotplug\n", __func__);
+
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)1);
+}
+
+static inline int edid_write_block(struct v4l2_subdev *sd,
+ unsigned len, const u8 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ int err = 0;
+ int i;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID block (%d byte)\n", __func__, len);
+
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
+
+ /* Disables I2C access to internal EDID ram from DDC port */
+ rep_write_and_or(sd, info->edid_ctrl_reg, 0xf0, 0x0);
+
+ for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
+ err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
+ I2C_SMBUS_BLOCK_MAX, val + i);
+ if (err)
+ return err;
+
+ /* adv7604 calculates the checksums and enables I2C access to internal
+ EDID ram from DDC port. */
+ rep_write_and_or(sd, info->edid_ctrl_reg, 0xf0, 0x1);
+
+ for (i = 0; i < 1000; i++) {
+ if (rep_read(sd, info->edid_status_reg) & 1)
+ break;
+ mdelay(1);
+ }
+ if (i == 1000) {
+ v4l_err(client, "error enabling edid\n");
+ return -EIO;
+ }
+
+ /* enable hotplug after 100 ms */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 10);
+ return 0;
+}
+
+static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_hdmi, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_hdmi->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_hdmi->addr, reg);
+ return ret;
+}
+
+static u16 hdmi_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
+{
+ return ((hdmi_read(sd, reg) << 8) | hdmi_read(sd, reg + 1)) & mask;
+}
+
+static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_hdmi->addr, reg, val);
+ return ret;
+}
+
+static inline int test_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_test, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_test->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_test->addr, reg);
+ return ret;
+
+}
+
+static inline int test_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_test, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_test->addr, reg, val);
+ return ret;
+}
+
+static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_cp, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_cp->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_cp->addr, reg);
+ return ret;
+}
+
+static u16 cp_read16(struct v4l2_subdev *sd, u8 reg, u16 mask)
+{
+ return ((cp_read(sd, reg) << 8) | cp_read(sd, reg + 1)) & mask;
+}
+
+static inline int cp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_cp, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_cp->addr, reg, val);
+ return ret;
+}
+
+static inline int cp_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return cp_write(sd, reg, (cp_read(sd, reg) & mask) | val);
+}
+
+static inline int vdp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_read_byte_data(state->i2c_vdp, reg);
+
+ if(ret >= 0)
+ v4l2_dbg(2, debug, sd, "reading 0x%02x, 0x%02x, 0x%02x\n",
+ state->i2c_vdp->addr, reg, ret);
+ else
+ v4l2_dbg(2, debug, sd, "error reading 0x%02x, 0x%02x\n",
+ state->i2c_vdp->addr, reg);
+ return ret;
+}
+
+static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ int ret = adv_smbus_write_byte_data(state->i2c_vdp, reg, val);
+
+ v4l2_dbg(2, debug, sd, "%sritting 0x%02x, 0x%02x, 0x%02x\n",
+ (ret>=0)? "w" : "error w",
+ state->i2c_vdp->addr, reg, val);
+ return ret;
+}
+
+enum {
+ ADV7604_PAGE_IO,
+ ADV7604_PAGE_AVLINK,
+ ADV7604_PAGE_CEC,
+ ADV7604_PAGE_INFOFRAME,
+ ADV7604_PAGE_ESDP,
+ ADV7604_PAGE_DPP,
+ ADV7604_PAGE_AFE,
+ ADV7604_PAGE_REP,
+ ADV7604_PAGE_EDID,
+ ADV7604_PAGE_HDMI,
+ ADV7604_PAGE_TEST,
+ ADV7604_PAGE_CP,
+ ADV7604_PAGE_VDP,
+};
+
+#define ADV7604_REG(page, offset) (((page) << 8) | (offset))
+
+static int __maybe_unused adv7604_read_reg(struct v4l2_subdev *sd,
+ unsigned int reg)
+{
+ struct adv7604_state *state = to_state(sd);
+ unsigned int page = reg >> 8;
+
+ if (!(BIT(page) & state->info->page_mask))
+ return -EINVAL;
+
+ reg &= 0xff;
+
+ switch (page) {
+ case ADV7604_PAGE_IO:
+ return io_read(sd, reg);
+ case ADV7604_PAGE_AVLINK:
+ return avlink_read(sd, reg);
+ case ADV7604_PAGE_CEC:
+ return cec_read(sd, reg);
+ case ADV7604_PAGE_INFOFRAME:
+ return infoframe_read(sd, reg);
+ case ADV7604_PAGE_ESDP:
+ return esdp_read(sd, reg);
+ case ADV7604_PAGE_DPP:
+ return dpp_read(sd, reg);
+ case ADV7604_PAGE_AFE:
+ return afe_read(sd, reg);
+ case ADV7604_PAGE_REP:
+ return rep_read(sd, reg);
+ case ADV7604_PAGE_EDID:
+ return edid_read(sd, reg);
+ case ADV7604_PAGE_HDMI:
+ return hdmi_read(sd, reg);
+ case ADV7604_PAGE_TEST:
+ return test_read(sd, reg);
+ case ADV7604_PAGE_CP:
+ return cp_read(sd, reg);
+ case ADV7604_PAGE_VDP:
+ return vdp_read(sd, reg);
+ }
+
+ return -EINVAL;
+}
+
+static int adv7604_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
+{
+ struct adv7604_state *state = to_state(sd);
+ unsigned int page = reg >> 8;
+
+ if (!(BIT(page) & state->info->page_mask))
+ return -EINVAL;
+
+ reg &= 0xff;
+
+ switch (page) {
+ case ADV7604_PAGE_IO:
+ return io_write(sd, reg, val);
+ case ADV7604_PAGE_AVLINK:
+ return avlink_write(sd, reg, val);
+ case ADV7604_PAGE_CEC:
+ return cec_write(sd, reg, val);
+ case ADV7604_PAGE_INFOFRAME:
+ return infoframe_write(sd, reg, val);
+ case ADV7604_PAGE_ESDP:
+ return esdp_write(sd, reg, val);
+ case ADV7604_PAGE_DPP:
+ return dpp_write(sd, reg, val);
+ case ADV7604_PAGE_AFE:
+ return afe_write(sd, reg, val);
+ case ADV7604_PAGE_REP:
+ return rep_write(sd, reg, val);
+ case ADV7604_PAGE_EDID:
+ return edid_write(sd, reg, val);
+ case ADV7604_PAGE_HDMI:
+ return hdmi_write(sd, reg, val);
+ case ADV7604_PAGE_TEST:
+ return test_write(sd, reg, val);
+ case ADV7604_PAGE_CP:
+ return cp_write(sd, reg, val);
+ case ADV7604_PAGE_VDP:
+ return vdp_write(sd, reg, val);
+ }
+
+ return -EINVAL;
+}
+
+static void adv7604_write_reg_seq(struct v4l2_subdev *sd,
+ const struct adv7604_reg_seq *reg_seq, unsigned int reg_seq_size)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg_seq_size; i++)
+ adv7604_write_reg(sd, reg_seq[i].reg, reg_seq[i].val);
+}
+
+
+
+static enum v4l2_mbus_pixelcode adv7604_get_mbus_pixelcode( enum adv7604_op_ch_sel op_ch_sel,
+ enum adv7604_op_format_sel op_format_sel, unsigned int invert_cbcr)
+{
+ int loop;
+ for( loop = 0; loop < ARRAY_SIZE(pxcode); loop++){
+
+ if( (pxcode[loop].op_ch_sel == op_ch_sel) &&
+ (pxcode[loop].op_format_sel == op_format_sel) &&
+ (pxcode[loop].invert_cbcr == invert_cbcr) ){
+ break;
+ }
+ }
+
+ if( loop == ARRAY_SIZE(pxcode) ){
+ loop = 0;
+ pr_err("%s: op_ch_sel/op_format_sel : bad configuration\n", __func__);
+ }
+ return pxcode[loop].code;
+}
+
+/* ----------------------------------------------------------------------- */
+static ssize_t cable_detect_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ return snprintf(buf, PAGE_SIZE,
+ "%d\n", io_read(sd, 0x6f) /* & 0x1 */);
+}
+
+static ssize_t signal_detect_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ return snprintf(buf, PAGE_SIZE,
+ "%d\n", (no_signal(sd)));
+}
+
+static ssize_t fmt_change_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7604_state *state = to_state(sd);
+ ssize_t val;
+
+ val = snprintf(buf, PAGE_SIZE,
+ "%s\n", (state->fmt_change)? "1": "0");
+
+ if(state->fmt_change)
+ state->fmt_change = 0;
+
+ return val;
+}
+
+static ssize_t audio_change_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ int audio_ch_md_raw, audio_channel_mode = 0, audio_status = 0;
+ ssize_t size = 0;
+ unsigned char data[6] = {0};
+ char * freq = NULL;
+ const char *clock_accuracy[4] = {
+ "Level II, Âħ1000 ppm",
+ "Level I, Âħ50 ppm",
+ "Level III, variable pitch shifted",
+ "Reserved",
+ };
+
+ data[5] = io_read(sd, 0x65);
+
+ if( data[5] & 0x40 )//audio is mute
+ return snprintf(buf, PAGE_SIZE, "Mute\n");
+
+ audio_ch_md_raw = data[5] & 0x10;
+ audio_channel_mode = hdmi_read(sd, 0x7) & 0x40;
+ audio_status = hdmi_read(sd, 0x18);
+
+ if( data[5] & 0x80 ){
+ data[0] = hdmi_read(sd, 0x36);
+ data[1] = hdmi_read(sd, 0x37);
+ data[2] = hdmi_read(sd, 0x38);
+ data[3] = hdmi_read(sd, 0x39);
+ data[4] = hdmi_read(sd, 0x3a);
+ }
+
+ if( audio_status & 0x2){
+ size += snprintf(buf + size, PAGE_SIZE - size, "\t%s-channel audio\n",
+ audio_ch_md_raw? "8": "2");
+ }
+
+ size += snprintf(buf + size, PAGE_SIZE - size, "\t%s\n",
+ audio_channel_mode? "Multichannel uncompressed audio" : "Stereo audio");
+
+ if( audio_status & 0x1)
+ size += snprintf(buf + size, PAGE_SIZE - size, "\tL_PCM or IEC 61937\n");
+ else
+ return snprintf(buf + size, PAGE_SIZE - size, "\tNo L_PCM or IEC 61937\n");
+
+ if( audio_status & 0x2)
+ size += snprintf(buf + size, PAGE_SIZE - size, "\tDSD packet\n");
+
+ if( audio_status & 0x4)
+ size += snprintf(buf + size, PAGE_SIZE - size, "\tDST packet\n");
+
+ if( audio_status & 0x4)
+ size += snprintf(buf + size, PAGE_SIZE - size, "\tHBR packet\n");
+
+ if( data[5] & 0x80 ){
+ switch( ( data[3] & 0xf) ){
+ case 0 : freq = "44.1 kHz";break;
+ case 2 : freq = "48 kHz"; break;
+ case 3 : freq = "32 kHz"; break;
+ case 8 : freq = "88.2 kHz"; break;
+ case 9 : freq = "768 kHz"; break;
+ case 10 : freq = "96 kHz"; break;
+ case 12 : freq = "176 kHz"; break;
+ case 14 : freq = "192 kHz"; break;
+ };
+
+ if(debug){
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "\t%s application\n"
+ "\t%sopyright is asserted\n"
+ "\t%sPCM Audio samples\n"
+ "\tCategory code : %d\n"
+ "\tSource number : %d\n"
+ "\tChannel number : %d\n",
+ (data[0] & 0x1)? "Consumer":"Professional",
+ (data[0] & 0x4)? "No c":"C",
+ (data[0] & 0x2)? "Non-":"",
+ data[1],
+ data[2] & 0xf,
+ (data[2] & 0xf0) >> 4);
+
+ if( !(data[0] & 0x2) ){
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "\tTwo audio channels %s\n"
+ "\tSampling Frequency : %s\n"
+ "\tClock Accuracy : %s\n",
+ (data[0] & 0x8)? "with 50/15 pre-emphasis":"without pre-emphasis",
+ freq? freq : "Unknown",
+ clock_accuracy[(( data[3] & 0x30)>>4)]);
+ }
+
+ }else{
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "\t%sPCM Audio samples\n",
+ (data[0] & 0x2)? "Non-":"");
+
+ if( !(data[0] & 0x2) )
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "\tSampling Frequency : %s\n",
+ freq? freq : "Unknown");
+ }
+
+ }
+
+ return size;
+}
+
+
+static struct device_attribute adv7611_sysfs_attrs[] = {
+ __ATTR(cable, S_IRUGO, cable_detect_show, NULL),
+ __ATTR(signal, S_IRUGO, signal_detect_show, NULL),
+ __ATTR(fmt, S_IRUGO, fmt_change_show, NULL),
+ __ATTR(audio, S_IRUGO, audio_change_show, NULL),
+};
+
+
+
+/* ----------------------------------------------------------------------- */
+
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void adv7604_inv_register(struct v4l2_subdev *sd)
+{
+ v4l2_info(sd, "0x000-0x0ff: IO Map\n");
+ v4l2_info(sd, "0x100-0x1ff: AVLink Map\n");
+ v4l2_info(sd, "0x200-0x2ff: CEC Map\n");
+ v4l2_info(sd, "0x300-0x3ff: InfoFrame Map\n");
+ v4l2_info(sd, "0x400-0x4ff: ESDP Map\n");
+ v4l2_info(sd, "0x500-0x5ff: DPP Map\n");
+ v4l2_info(sd, "0x600-0x6ff: AFE Map\n");
+ v4l2_info(sd, "0x700-0x7ff: Repeater Map\n");
+ v4l2_info(sd, "0x800-0x8ff: EDID Map\n");
+ v4l2_info(sd, "0x900-0x9ff: HDMI Map\n");
+ v4l2_info(sd, "0xa00-0xaff: Test Map\n");
+ v4l2_info(sd, "0xb00-0xbff: CP Map\n");
+ v4l2_info(sd, "0xc00-0xcff: VDP Map\n");
+}
+
+static int adv7604_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = adv7604_read_reg(sd, reg->reg);
+ if (ret < 0) {
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7604_inv_register(sd);
+ return ret;
+ }
+
+ reg->size = 1;
+ reg->val = ret;
+
+ return 0;
+}
+
+static int adv7604_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!v4l2_chip_match_i2c_client(client, &reg->match))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = adv7604_write_reg(sd, reg->reg, reg->val);
+ if (ret < 0) {
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7604_inv_register(sd);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int adv7604_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ s32 val = io_read(sd, 0x6f);
+
+ if(val < 0)
+ return val;
+
+ /* port A only */
+ return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
+ (bool)(io_read(sd, 0x6f) & info->cable_det_mask));
+}
+
+static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
+ u8 prim_mode,
+ const struct adv7604_video_standards *predef_vid_timings,
+ const struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+ int i;
+
+ for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
+ if (!v4l_match_dv_timings(timings, &predef_vid_timings[i].timings,
+ DIGITAL_INPUT ? 250000 : 1000000))
+ continue;
+ io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
+ io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) +
+ prim_mode); /* v_freq and prim mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+static int configure_predefined_video_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+ int err;
+
+ v4l2_dbg(1, debug, sd, "%s", __func__);
+
+ if (adv7604_has_afe(state)) {
+ /* reset to default values */
+ io_write(sd, 0x16, 0x43);
+ io_write(sd, 0x17, 0x5a);
+ }
+ /* disable embedded syncs for auto graphics mode */
+ if (adv7604_has_afe(state)){
+ cp_write_and_or(sd, 0x81, 0xef, 0x00);
+ cp_write(sd, 0x8f, 0x00);
+ cp_write(sd, 0x90, 0x00);
+ cp_write(sd, 0xa2, 0x00);
+ cp_write(sd, 0xa3, 0x00);
+ cp_write(sd, 0xa4, 0x00);
+ cp_write(sd, 0xa5, 0x00);
+ cp_write(sd, 0xa6, 0x00);
+ cp_write(sd, 0xa7, 0x00);
+ cp_write(sd, 0xab, 0x00);
+ cp_write(sd, 0xac, 0x00);
+ }
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x01, adv7604_prim_mode_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x02, adv7604_prim_mode_gr, timings);
+ break;
+ case ADV7604_MODE_HDMI:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x05, adv7604_prim_mode_hdmi_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x06, adv7604_prim_mode_hdmi_gr, timings);
+
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ err = -1;
+ break;
+ }
+
+
+ return err;
+}
+
+static void configure_custom_video_timings(struct v4l2_subdev *sd,
+ const struct v4l2_bt_timings *bt) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 width = htotal(bt);
+ u32 height = vtotal(bt);
+ u16 cp_start_sav = bt->hsync + bt->hbackporch - 4;
+ u16 cp_start_eav = width - bt->hfrontporch;
+ u16 cp_start_vbi = height - bt->vfrontporch;
+ u16 cp_end_vbi = bt->vsync + bt->vbackporch;
+ u16 ch1_fr_ll = (((u32)bt->pixelclock / 100) > 0) ?
+ ((width * (ADV7604_fsc / 100)) / ((u32)bt->pixelclock / 100)) : 0;
+ const u8 pll[2] = {
+ 0xc0 | ((width >> 8) & 0x1f),
+ width & 0xff
+ };
+
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ /* auto graphics */
+ io_write(sd, 0x00, 0x07); /* video std */
+ io_write(sd, 0x01, 0x02); /* prim mode */
+ /* enable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x10);
+
+ /* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
+ /* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
+ /* IO-map reg. 0x16 and 0x17 should be written in sequence */
+ if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
+ v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
+ break;
+ }
+
+ /* active video - horizontal timing */
+ cp_write(sd, 0xa2, (cp_start_sav >> 4) & 0xff);
+ cp_write(sd, 0xa3, ((cp_start_sav & 0x0f) << 4) |
+ ((cp_start_eav >> 8) & 0x0f));
+ cp_write(sd, 0xa4, cp_start_eav & 0xff);
+
+ /* active video - vertical timing */
+ cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
+ cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
+ ((cp_end_vbi >> 8) & 0xf));
+ cp_write(sd, 0xa7, cp_end_vbi & 0xff);
+ break;
+ case ADV7604_MODE_HDMI:
+ /* set default prim_mode/vid_std for HDMI
+ accoring to [REF_03, c. 4.2] */
+ io_write(sd, 0x00, 0x02); /* video std */
+ io_write(sd, 0x01, 0x06); /* prim mode */
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+
+ if (adv7604_has_afe(state)){
+ cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
+ cp_write(sd, 0x90, ch1_fr_ll & 0xff);
+ cp_write(sd, 0xab, (height >> 4) & 0xff);
+ cp_write(sd, 0xac, (height & 0x0f) << 4);
+ }
+}
+
+static void set_rgb_quantization_range(struct v4l2_subdev *sd) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+
+ if( (state->pdata.inp_color_space != ADV7604_INP_COLOR_SPACE_LIM_RGB )
+ || (state->pdata.inp_color_space != ADV7604_INP_COLOR_SPACE_FULL_RGB) )
+ return; //let default value for input color space
+
+ switch (state->rgb_quantization_range) {
+ case V4L2_DV_RGB_RANGE_AUTO:
+ /* automatic */
+ if (DIGITAL_INPUT && !(hdmi_read(sd, 0x05) & 0x80)) {
+ /* receiving DVI-D signal */
+
+ /* ADV7604 selects RGB limited range regardless of
+ input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
+ } else {
+ /* receiving HDMI or analog signal, set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ }
+ break;
+ case V4L2_DV_RGB_RANGE_LIMITED:
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ break;
+ case V4L2_DV_RGB_RANGE_FULL:
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+}
+
+static int adv7604_s_ctrl(struct v4l2_ctrl *ctrl) /* OK */
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct adv7604_state *state = to_state(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cp_write(sd, 0x3c, ctrl->val);
+ return 0;
+ case V4L2_CID_CONTRAST:
+ cp_write(sd, 0x3a, ctrl->val);
+ return 0;
+ case V4L2_CID_SATURATION:
+ cp_write(sd, 0x3b, ctrl->val);
+ return 0;
+ case V4L2_CID_HUE:
+ cp_write(sd, 0x3d, ctrl->val);
+ return 0;
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ state->rgb_quantization_range = ctrl->val;
+ if( state->pdata.inp_color_space <= ADV7604_INP_COLOR_SPACE_FULL_RGB )
+ set_rgb_quantization_range(sd);
+ return 0;
+ case V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE:
+ if (!adv7604_has_afe(state))
+ return -EINVAL;
+ /* Set the analog sampling phase. This is needed to find the
+ best sampling phase for analog video: an application or
+ driver has to try a number of phases and analyze the picture
+ quality before settling on the best performing phase. */
+ afe_write(sd, 0xc8, ctrl->val);
+ return 0;
+ case V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL:
+ /* Use the default blue color for free running mode,
+ or supply your own. */
+ cp_write_and_or(sd, 0xbf, ~0x04, (ctrl->val << 2));
+ return 0;
+ case V4L2_CID_ADV_RX_FREE_RUN_COLOR:
+ cp_write(sd, 0xc0, (ctrl->val & 0xff0000) >> 16);
+ cp_write(sd, 0xc1, (ctrl->val & 0x00ff00) >> 8);
+ cp_write(sd, 0xc2, (u8)(ctrl->val & 0x0000ff));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int adv7604_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7604, 0);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static inline bool no_power(struct v4l2_subdev *sd) /* OK */
+{
+ /* Entire chip or CP powered off */
+ return io_read(sd, 0x0c) & 0x24;
+}
+
+static inline bool no_signal_tmds(struct v4l2_subdev *sd) /* OK */
+{
+ /* TODO port B, C and D */
+ return !(io_read(sd, 0x6a) & 0x10);
+}
+
+static inline bool no_lock_tmds(struct v4l2_subdev *sd) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+
+ return (io_read(sd, 0x6a) & info->tdms_lock_mask) != info->tdms_lock_mask;
+}
+
+static inline bool no_lock_sspd(struct v4l2_subdev *sd) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+
+ /*
+ * Chips without a AFE don't expose registers for the SSPD, so just assume
+ * that we have a lock.
+ */
+ if (!adv7604_has_afe(state))
+ return false;
+
+ /* TODO channel 2 */
+ return ((cp_read(sd, 0xb5) & 0xd0) != 0xd0);
+}
+
+static inline bool no_lock_stdi(struct v4l2_subdev *sd) /* OK */
+{
+ /* TODO channel 2 */
+ return !(cp_read(sd, 0xb1) & 0x80);
+}
+
+static inline bool no_signal(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+ bool ret;
+
+ ret = no_power(sd);
+
+ ret |= no_lock_stdi(sd);
+ ret |= no_lock_sspd(sd);
+
+ if (DIGITAL_INPUT) {
+ ret |= no_lock_tmds(sd);
+ ret |= no_signal_tmds(sd);
+ }
+
+ return ret;
+}
+
+static inline bool no_lock_cp(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ if (!adv7604_has_afe(state))
+ return false;
+
+ /* CP has detected a non standard number of lines on the incoming
+ video compared to what it is configured to receive by s_dv_timings */
+ return io_read(sd, 0x12) & 0x01;
+}
+
+static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ *status = 0;
+ *status |= no_power(sd) ? V4L2_IN_ST_NO_POWER : 0;
+ *status |= no_signal(sd) ? V4L2_IN_ST_NO_SIGNAL : 0;
+ if (no_lock_cp(sd))
+ *status |= DIGITAL_INPUT ? V4L2_IN_ST_NO_SYNC : V4L2_IN_ST_NO_H_LOCK;
+
+ v4l2_dbg(1, debug, sd, "%s: status = 0x%x\n", __func__, *status);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void adv7604_print_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings, const char *txt, bool detailed)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 htot, vtot;
+
+ if (timings->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = htotal(bt);
+ vtot = vtotal(bt);
+
+ v4l2_info(sd, "%s %dx%d%s%d (%dx%d)",
+ txt, bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (htot * vtot) > 0 ? ((u32)bt->pixelclock /
+ (htot * vtot)) : 0,
+ htot, vtot);
+
+ if (detailed) {
+ v4l2_info(sd, " horizontal: fp = %d, %ssync = %d, bp = %d\n",
+ bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ v4l2_info(sd, " vertical: fp = %d, %ssync = %d, bp = %d\n",
+ bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ v4l2_info(sd, " pixelclock: %lld, flags: 0x%x, standards: 0x%x\n",
+ bt->pixelclock, bt->flags, bt->standards);
+ }
+}
+
+struct stdi_readback {
+ u16 bl, lcf, lcvs;
+ u8 hs_pol, vs_pol;
+ bool interlaced;
+};
+
+static int stdi2dv_timings(struct v4l2_subdev *sd,
+ struct stdi_readback *stdi,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+ u32 hfreq = (ADV7604_fsc * 8) / stdi->bl;
+ u32 pix_clk;
+ int i;
+
+ for (i = 0; adv7604_timings[i].bt.height; i++) {
+ if (vtotal(&adv7604_timings[i].bt) != stdi->lcf + 1)
+ continue;
+ if (adv7604_timings[i].bt.vsync != stdi->lcvs)
+ continue;
+
+ pix_clk = hfreq * htotal(&adv7604_timings[i].bt);
+
+ if ((pix_clk < adv7604_timings[i].bt.pixelclock + 1000000) &&
+ (pix_clk > adv7604_timings[i].bt.pixelclock - 1000000)) {
+ *timings = adv7604_timings[i];
+ return 0;
+ }
+ }
+
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+ timings))
+ return 0;
+ if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+ state->aspect_ratio, timings))
+ return 0;
+
+ v4l2_dbg(2, debug, sd,
+ "%s: No format candidate found for lcvs = %d, lcf=%d, bl = %d, %chsync, %cvsync\n",
+ __func__, stdi->lcvs, stdi->lcf, stdi->bl,
+ stdi->hs_pol, stdi->vs_pol);
+ return -1;
+}
+
+
+static int read_stdi(struct v4l2_subdev *sd, struct stdi_readback *stdi)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+
+ if (no_lock_stdi(sd) || no_lock_sspd(sd)) {
+ v4l2_dbg(2, debug, sd, "%s: STDI and/or SSPD not locked\n", __func__);
+ return -1;
+ }
+
+ /* read STDI */
+ stdi->bl = cp_read16(sd, 0xb1, 0x3fff);
+ stdi->lcf = cp_read16(sd, info->lcf_reg, 0x7ff);
+ stdi->lcvs = cp_read(sd, 0xb3) >> 3;
+ stdi->interlaced = io_read(sd, 0x12) & 0x10;
+
+ if (adv7604_has_afe(state)) {
+ /* read SSPD */
+ if ((cp_read(sd, 0xb5) & 0x03) == 0x01) {
+ stdi->hs_pol = ((cp_read(sd, 0xb5) & 0x10) ?
+ ((cp_read(sd, 0xb5) & 0x08) ? '+' : '-') : 'x');
+ stdi->vs_pol = ((cp_read(sd, 0xb5) & 0x40) ?
+ ((cp_read(sd, 0xb5) & 0x20) ? '+' : '-') : 'x');
+ } else {
+ stdi->hs_pol = 'x';
+ stdi->vs_pol = 'x';
+ }
+ } else {
+ stdi->hs_pol = (hdmi_read(sd, 0x05) & 0x20) ? '+' : '-';
+ stdi->vs_pol = (hdmi_read(sd, 0x05) & 0x10) ? '+' : '-';
+ }
+
+ if (no_lock_stdi(sd) || no_lock_sspd(sd)) {
+ v4l2_dbg(2, debug, sd,
+ "%s: signal lost during readout of STDI/SSPD\n", __func__);
+ return -1;
+ }
+
+ if (stdi->lcf < 239 || stdi->bl < 8 || stdi->bl == 0x3fff) {
+ v4l2_dbg(2, debug, sd, "%s: invalid signal\n", __func__);
+ memset(stdi, 0, sizeof(struct stdi_readback));
+ return -1;
+ }
+
+ v4l2_dbg(2, debug, sd,
+ "%s: lcf (frame height - 1) = %d, bl = %d, lcvs (vsync) = %d, %chsync, %cvsync, %s\n",
+ __func__, stdi->lcf, stdi->bl, stdi->lcvs,
+ stdi->hs_pol, stdi->vs_pol,
+ stdi->interlaced ? "interlaced" : "progressive");
+
+ return 0;
+}
+
+static int adv7604_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->index >= ARRAY_SIZE(adv7604_timings) - 1)
+ return -EINVAL;
+ memset(timings->reserved, 0, sizeof(timings->reserved));
+ timings->timings = adv7604_timings[timings->index];
+ return 0;
+}
+
+static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ cap->type = V4L2_DV_BT_656_1120;
+ cap->bt.max_width = 1920;
+ cap->bt.max_height = 1200;
+ cap->bt.min_pixelclock = 27000000;
+ if (DIGITAL_INPUT)
+ cap->bt.max_pixelclock = 225000000;
+ else
+ cap->bt.max_pixelclock = 170000000;
+ cap->bt.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT;
+ cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM;
+ return 0;
+}
+
+/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
+ if the format is listed in adv7604_timings[] */
+static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+ int i;
+
+ for (i = 0; adv7604_timings[i].bt.width; i++) {
+ if (v4l_match_dv_timings(timings, &adv7604_timings[i],
+ DIGITAL_INPUT ? 250000 : 1000000)) {
+ *timings = adv7604_timings[i];
+ break;
+ }
+ }
+}
+
+static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
+{
+ int a, b;
+
+ a = hdmi_read(sd, 0x06);
+ b = hdmi_read(sd, 0x3b);
+ if (a < 0 || b < 0)
+ return 0;
+ return a * 1000000 + ((b & 0x30) >> 4) * 250000;
+}
+
+static unsigned int adv7611_read_hdmi_pixelclock(struct v4l2_subdev *sd)
+{
+ int a, b;
+
+ a = hdmi_read(sd, 0x51);
+ b = hdmi_read(sd, 0x52);
+ if (a < 0 || b < 0)
+ return 0;
+ return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128;
+}
+
+static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ struct stdi_readback stdi;
+
+ if (!timings){
+ v4l2_dbg(1, debug, sd, "%s: invalide timings\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+ if (no_signal(sd)) {
+ v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
+ return -ENOLINK;
+ }
+
+ /* read STDI */
+ if (read_stdi(sd, &stdi)) {
+ v4l2_dbg(1, debug, sd, "%s: STDI/SSPD not locked\n", __func__);
+ return -ENOLINK;
+ }
+ bt->interlaced = stdi.interlaced ?
+ V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+
+ if (DIGITAL_INPUT) {
+ timings->type = V4L2_DV_BT_656_1120;
+
+ bt->width = hdmi_read16(sd, 0x07, 0xfff); /* mask */
+ bt->height = hdmi_read16(sd, 0x09, 0xfff);
+ bt->pixelclock = info->read_hdmi_pixelclock(sd);
+ bt->hfrontporch = hdmi_read16(sd, 0x20, 0x3ff); /* mask */
+ bt->hsync = hdmi_read16(sd, 0x22, 0x3ff);
+ bt->hbackporch = hdmi_read16(sd, 0x24, 0x3ff);
+ bt->vfrontporch = hdmi_read16(sd, 0x2a, 0x1fff) / 2;
+ bt->vsync = hdmi_read16(sd, 0x2e, 0x1fff) / 2;
+ bt->vbackporch = hdmi_read16(sd, 0x32, 0x1fff) / 2;
+ bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
+ ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height += hdmi_read16(sd, 0x0b, 0xfff);
+ bt->il_vfrontporch = hdmi_read16(sd, 0x2c, 0x1fff) / 2;
+ bt->il_vsync = hdmi_read16(sd, 0x30, 0x1fff) / 2;
+ bt->vbackporch = hdmi_read16(sd, 0x34, 0x1fff) / 2;
+ }
+ adv7604_fill_optional_dv_timings_fields(sd, timings);
+ } else {
+ /* find format
+ * Since LCVS values are inaccurate [REF_03, p. 275-276],
+ * stdi2dv_timings() is called with lcvs +-1 if the first attempt fails.
+ */
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs += 1;
+ v4l2_dbg(1, debug, sd, "%s: lcvs + 1 = %d\n", __func__, stdi.lcvs);
+ if (!stdi2dv_timings(sd, &stdi, timings))
+ goto found;
+ stdi.lcvs -= 2;
+ v4l2_dbg(1, debug, sd, "%s: lcvs - 1 = %d\n", __func__, stdi.lcvs);
+ if (stdi2dv_timings(sd, &stdi, timings)) {
+ /*
+ * The STDI block may measure wrong values, especially
+ * for lcvs and lcf. If the driver can not find any
+ * valid timing, the STDI block is restarted to measure
+ * the video timings again. The function will return an
+ * error, but the restart of STDI will generate a new
+ * STDI interrupt and the format detection process will
+ * restart.
+ */
+ if (state->restart_stdi_once) {
+ v4l2_dbg(1, debug, sd, "%s: restart STDI\n", __func__);
+ /* TODO restart STDI for Sync Channel 2 */
+ /* enter one-shot mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x00);
+ /* trigger STDI restart */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x04);
+ /* reset to continuous mode */
+ cp_write_and_or(sd, 0x86, 0xf9, 0x02);
+ state->restart_stdi_once = false;
+ return -ENOLINK;
+ }
+ v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
+ return -ERANGE;
+ }
+ state->restart_stdi_once = true;
+ }
+found:
+
+ if (no_signal(sd)) {
+ v4l2_dbg(1, debug, sd, "%s: signal lost during readout\n", __func__);
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+ return -ENOLINK;
+ }
+
+ if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
+ (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
+ __func__, (u32)bt->pixelclock);
+ return -ERANGE;
+ }
+
+ if (debug > 1)
+ adv7604_print_timings(sd, timings,
+ "adv7604_query_dv_timings:", true);
+
+ return 0;
+}
+
+static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+ struct v4l2_bt_timings *bt;
+ int err;
+
+ if (!timings)
+ return -EINVAL;
+
+ bt = &timings->bt;
+
+ if ((!DIGITAL_INPUT && bt->pixelclock > 170000000) ||
+ (DIGITAL_INPUT && bt->pixelclock > 225000000)) {
+ v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
+ __func__, (u32)bt->pixelclock);
+ return -ERANGE;
+ }
+
+ adv7604_fill_optional_dv_timings_fields(sd, timings);
+
+ state->timings = *timings;
+
+ cp_write_and_or(sd, 0x91, 0x40, bt->interlaced ? 0x40 : 0x00);
+
+ /* Use prim_mode and vid_std when available */
+ err = configure_predefined_video_timings(sd, timings);
+ if (err) {
+ /* custom settings when the video format
+ does not have prim_mode/vid_std */
+ configure_custom_video_timings(sd, bt);
+ }
+
+ if( state->pdata.inp_color_space <= ADV7604_INP_COLOR_SPACE_FULL_RGB )
+ set_rgb_quantization_range(sd);
+
+
+ if (debug > 1)
+ adv7604_print_timings(sd, timings,
+ "adv7604_s_dv_timings:", true);
+ return 0;
+}
+
+static int adv7604_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ *timings = state->timings;
+ return 0;
+}
+
+static void adv7611_set_termination(struct v4l2_subdev *sd, bool enable)
+{
+ hdmi_write(sd, 0x83, enable ? 0xfe : 0xff);
+}
+
+static void adv7604_set_termination(struct v4l2_subdev *sd, bool enable)
+{
+ hdmi_write(sd, 0x01, enable ? 0x00 : 0x78);
+}
+
+static void enable_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ /* enable */
+ io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
+ break;
+ case ADV7604_MODE_HDMI:
+ /* enable */
+ hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
+ state->info->set_termination(sd, true);
+ io_write(sd, 0x15, adv7604_has_afe(state)? 0xa0: 0x80); /* Disable Tristate of Pins */
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+}
+
+static void disable_input(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ /* disable */
+ io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
+ hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
+ state->info->set_termination(sd, false);
+}
+
+static void select_input(struct v4l2_subdev *sd) /* OK */
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+
+ switch (state->mode) {
+ case ADV7604_MODE_COMP:
+ case ADV7604_MODE_GR:
+ adv7604_write_reg_seq(sd, info->recommended_settings[0],
+ info->num_recommended_settings[0]);
+
+ afe_write(sd, 0x00, 0x08); /* power up ADC */
+ afe_write(sd, 0x01, 0x06); /* power up Analog Front End */
+ afe_write(sd, 0xc8, 0x00); /* phase control */
+ break;
+
+ case ADV7604_MODE_HDMI:
+ adv7604_write_reg_seq(sd, info->recommended_settings[1],
+ info->num_recommended_settings[1]);
+
+ if (adv7604_has_afe(state)) {
+ afe_write(sd, 0x00, 0xff); /* power down ADC */
+ afe_write(sd, 0x01, 0xfe); /* power down Analog Front End */
+ afe_write(sd, 0xc8, 0x40); /* phase control */
+
+ }
+
+ cp_write(sd, 0x3e, 0x00); /* CP core pre-gain control */
+ cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
+ cp_write(sd, 0x40, 0x80); /* CP core pre-gain control. Graphics mode */
+
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+}
+
+static int adv7604_s_routing(struct v4l2_subdev *sd,
+ u32 input, u32 output, u32 config)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ v4l2_dbg(2, debug, sd, "%s: input %d", __func__, input);
+
+ if (!adv7604_has_afe(state) && input != ADV7604_MODE_HDMI)
+ return -EINVAL;
+
+ state->mode = input;
+
+ disable_input(sd);
+
+ select_input(sd);
+
+ enable_input(sd);
+
+ return 0;
+}
+
+static int adv7604_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7604_state *state = to_state(sd);
+ struct adv7604_platform_data *pdata = &state->pdata;
+ u8 llc_dll_double = 0;
+
+ if (adv7604_has_afe(state))
+ return 0;
+
+ if( (state->mbus_type == V4L2_MBUS_BT656)
+ &&
+ ((pdata->op_format_sel == ADV7604_OP_FORMAT_SEL_SDR_ITU656_8)
+ ||(pdata->op_format_sel == ADV7604_OP_FORMAT_SEL_DDR_422_8)
+ ||(pdata->op_format_sel == ADV7604_OP_FORMAT_SEL_SDR_ITU656_12_MODE2)) ){
+
+ llc_dll_double = 1<<6;
+ }
+
+ // set or unset LLC_DLL_EN, LLC_DLL_DOUBLE, LLC_DLL_PHASE
+ if( enable ){
+ // TODO must be modified according to the output fromat selected
+ // and perhaps output parallell ou BT656
+ //see OP_FORMAT_SEL and LLC_DLL_PHASE for more details
+ io_write(sd, 0x19, llc_dll_double | 0x86);
+
+ io_write_and_or(sd, 0x33, 0xbf, 1<<6);//LLC_DLL_MUX
+ }else{
+ io_write(sd, 0x19, 0x0);
+ io_write_and_or(sd, 0x33, 0xbf, 0x0);
+ }
+ return 0;
+}
+
+static int adv7604_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ //For BT656
+ code->code = state->code;
+
+ v4l2_dbg(1, debug, sd, "%s: code = 0x%04x\n",
+ __func__, code->code);
+
+ return 0;
+}
+
+static int adv7604_find_matching_dv_timings
+ ( struct v4l2_subdev *sd,unsigned int width, unsigned int height )
+{
+ int loop;
+ int err = -1;
+ struct v4l2_dv_timings timings;
+ struct adv7604_state *state = to_state(sd);
+
+ //Looks for available format format that matches
+ //Set default standard to enable free run if there is no signal
+ //According to what has been set as default width and height
+ for (loop = 0; loop < ARRAY_SIZE(adv7604_prim_mode_comp); loop++) {
+
+ if( (adv7604_prim_mode_comp[loop].timings.bt.width
+ == width)
+ && ( adv7604_prim_mode_comp[loop].timings.bt.height
+ == height) ){
+
+ v4l2_dbg(1, debug, sd,
+ "%s : found a matching value (comp, %d)\n",
+ __func__, loop );
+
+ timings = adv7604_prim_mode_comp[loop].timings;
+ timings.bt.pixelclock += 125000;
+
+ err = find_and_set_predefined_video_timings(sd,
+ 0x05, adv7604_prim_mode_hdmi_comp, &timings);
+ }
+ }
+
+ if(loop == ARRAY_SIZE(adv7604_prim_mode_comp)){
+
+ for (loop = 0; loop < ARRAY_SIZE(adv7604_prim_mode_gr); loop++) {
+
+ if( (adv7604_prim_mode_gr[loop].timings.bt.width
+ == width)
+ && ( adv7604_prim_mode_gr[loop].timings.bt.height
+ == height) ){
+
+ v4l2_dbg(1, debug, sd,
+ "%s : found a matching value (gr, %d)\n",
+ __func__, loop );
+
+ timings = adv7604_prim_mode_gr[loop].timings;
+ timings.bt.pixelclock += 125000;
+
+ err = find_and_set_predefined_video_timings(sd,
+ 0x06, adv7604_prim_mode_hdmi_gr, &timings);
+ }
+ }
+ }
+
+ if( !err ) //Update default timings
+ state->timings = timings;
+
+ return err;
+}
+
+
+static int adv7604_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ if( !adv7604_has_afe(state)
+ && (fmt->width != state->timings.bt.width)
+ && (fmt->height != state->timings.bt.height) ){
+
+ if( adv7604_find_matching_dv_timings( sd,
+ fmt->width, fmt->height ) ){
+
+ v4l2_dbg(2, debug, sd, "Unable to set default video standard\n");
+ }
+ }
+
+ fmt->width = state->timings.bt.width;
+ fmt->height = state->timings.bt.height;
+ fmt->code = state->code;
+ if (state->timings.bt.interlaced != V4L2_DV_INTERLACED)
+ fmt->field = V4L2_FIELD_NONE;
+ else
+ fmt->field = V4L2_FIELD_INTERLACED;
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ fmt->colorspace = (state->timings.bt.height <= 576) ?
+ V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
+ }
+
+ v4l2_dbg(1, debug, sd, "%s: (width = %d, height = %d, field = %d, code=0x%04x)\n",
+ __func__, fmt->width, fmt->height, fmt->field, fmt->code);
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__adv7604_get_fmt(struct adv7604_state *state,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if(pad)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &state->format;
+}
+
+static int adv7604_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct adv7604_state *state = to_state(sd);
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ format = __adv7604_get_fmt(state, fh, fmt->pad, fmt->which);
+ if(!format)
+ return -EINVAL;
+
+ ret = adv7604_s_mbus_fmt(sd, &fmt->format);
+ if(ret < 0)
+ return ret;
+
+ *format = fmt->format;
+ return 0;
+}
+
+static int adv7604_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct adv7604_state *state = to_state(sd);
+ struct v4l2_dv_timings timings;
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ //Ugly hack : Force detection and set DV timings
+ memset(&timings, 0, sizeof(struct v4l2_dv_timings));
+ if(!adv7604_query_dv_timings(sd, &timings))
+ if( memcmp(&state->timings, &timings,
+ sizeof(struct v4l2_dv_timings)) ){
+ adv7604_s_dv_timings(sd, &timings);
+ }
+
+ fmt->width = state->timings.bt.width;
+ fmt->height = state->timings.bt.height;
+ fmt->code = state->code;
+ if (state->timings.bt.interlaced != V4L2_DV_INTERLACED)
+ fmt->field = V4L2_FIELD_NONE;
+ else
+ fmt->field = V4L2_FIELD_INTERLACED;
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ fmt->colorspace = (state->timings.bt.height <= 576) ?
+ V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
+ }
+
+ v4l2_dbg(1, debug, sd, "%s: (width = %d, height = %d, field = %d, code=0x%04x)\n",
+ __func__, fmt->width, fmt->height, fmt->field, fmt->code);
+
+ return 0;
+}
+
+static int adv7604_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct adv7604_state *state = to_state(sd);
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ format = __adv7604_get_fmt(state, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ ret = adv7604_g_mbus_fmt(sd, &fmt->format);
+ if(ret < 0)
+ return ret;
+
+ *format = fmt->format;
+ return 0;
+}
+
+static int adv7604_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ //fake for the moment
+ //struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct adv7604_state *state = to_state(sd);
+
+ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_MASTER;
+
+ cfg->type = state->mbus_type;
+
+ v4l2_dbg(2, debug, sd, "%s: v4l2_mbus_config type %s\n flags = 0x%x ( %s%s%s%s%s%s%s%s )\n",
+ __func__,
+ (cfg->type==V4L2_MBUS_PARALLEL) ? "V4L2_MBUS_PARALLEL":"V4L2_MBUS_BT656",
+ cfg->flags,
+ (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)? " V4L2_MBUS_HSYNC_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)? "| V4L2_MBUS_HSYNC_ACTIVE_LOW ": "",
+ (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)? "| V4L2_MBUS_VSYNC_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)? "| V4L2_MBUS_VSYNC_ACTIVE_LOW ": "",
+ (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING)? "| V4L2_MBUS_PCLK_SAMPLE_RISING ": "",
+ (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)? "| V4L2_MBUS_PCLK_SAMPLE_FALLING ": "",
+ (cfg->flags & V4L2_MBUS_DATA_ACTIVE_HIGH)? "| V4L2_MBUS_DATA_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_DATA_ACTIVE_LOW)? "| V4L2_MBUS_DATA_ACTIVE_LOW ": "");
+
+ return 0;
+}
+
+static int adv7604_s_mbus_config(struct v4l2_subdev *sd,
+ const struct v4l2_mbus_config *cfg)
+{
+ //fake for the moment
+ if( (cfg->type != V4L2_MBUS_PARALLEL) &&
+ (cfg->type != V4L2_MBUS_BT656) )
+ return -EINVAL;
+
+ //TODO change the values of register to be compliant with the configuration
+ v4l2_dbg(2, debug, sd, "%s: v4l2_mbus_config type %s\n flags = 0x%x ( %s%s%s%s%s%s%s%s )\n",
+ __func__,
+ (cfg->type==V4L2_MBUS_PARALLEL) ? "V4L2_MBUS_PARALLEL":"V4L2_MBUS_BT656",
+ cfg->flags,
+ (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)? " V4L2_MBUS_HSYNC_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)? "| V4L2_MBUS_HSYNC_ACTIVE_LOW ": "",
+ (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)? "| V4L2_MBUS_VSYNC_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)? "| V4L2_MBUS_VSYNC_ACTIVE_LOW ": "",
+ (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING)? "| V4L2_MBUS_PCLK_SAMPLE_RISING ": "",
+ (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)? "| V4L2_MBUS_PCLK_SAMPLE_FALLING ": "",
+ (cfg->flags & V4L2_MBUS_DATA_ACTIVE_HIGH)? "| V4L2_MBUS_DATA_ACTIVE_HIGH ": "",
+ (cfg->flags & V4L2_MBUS_DATA_ACTIVE_LOW)? "| V4L2_MBUS_DATA_ACTIVE_LOW ": "");
+
+ //io_write(sd, 0x06, 0x2f); /* Field output on VS/FIELD pin, Positive polarity HS, VS, Field, LLC */
+ // INV_LLC_POL, IO Map, Address 0x06, [0] A control to select the polarity of the LLC.
+ // INV_HS_POL, IO, Address 0x06[1] A control to select the polarity of the HS signal.
+ // INV_VS_POL, IO, Address 0x06[2] A control to select the polarity of the VS/FIELD/ALSB signal.
+ // INV_F_POL, IO, Address 0x06[3] A control to select the polarity of the DE signal.
+ return 0;
+}
+
+static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ u8 fmt_change, fmt_change_digital, tx_5v;
+
+ /* format change */
+ fmt_change = io_read(sd, 0x43) & 0x98;
+ if (fmt_change)
+ io_write(sd, 0x44, fmt_change);
+ fmt_change_digital = DIGITAL_INPUT ? (io_read(sd, 0x6b) & info->fmt_change_digital_mask) : 0;
+ if (fmt_change_digital)
+ io_write(sd, 0x6c, fmt_change_digital);
+ if (fmt_change || fmt_change_digital) {
+ v4l2_dbg(1, debug, sd,
+ "%s: ADV7604_FMT_CHANGE, fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
+ __func__, fmt_change, fmt_change_digital);
+ v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
+ if (handled)
+ *handled = true;
+ }
+ /* tx 5v detect */
+ tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+ if (tx_5v) {
+ v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
+ io_write(sd, 0x71, tx_5v);
+ adv7604_s_detect_tx_5v_ctrl(sd);
+ if (handled)
+ *handled = true;
+ }
+ return 0;
+}
+
+static int adv7604_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ struct adv7604_state *state = to_state(sd);
+
+ if (edid->pad != 0)
+ return -EINVAL;
+ if (edid->blocks == 0)
+ return -EINVAL;
+ if (edid->start_block >= state->edid_blocks)
+ return -EINVAL;
+ if (edid->start_block + edid->blocks > state->edid_blocks)
+ edid->blocks = state->edid_blocks - edid->start_block;
+ if (!edid->edid)
+ return -EINVAL;
+ memcpy(edid->edid + edid->start_block * 128,
+ state->edid + edid->start_block * 128,
+ edid->blocks * 128);
+ return 0;
+}
+
+static int adv7604_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ int err;
+
+ if (edid->pad != 0)
+ return -EINVAL;
+
+ if (edid->start_block != 0)
+ return -EINVAL;
+
+ if (edid->blocks == 0) {
+ /* Pull down the hotplug pin */
+ v4l2_subdev_notify(sd, ADV7604_HOTPLUG, (void *)0);
+ /* Disables I2C access to internal EDID ram from DDC port */
+ rep_write_and_or(sd, info->edid_ctrl_reg, 0xf0, 0x0);
+ state->edid_blocks = 0;
+ /* Fall back to a 16:9 aspect ratio */
+ state->aspect_ratio.numerator = 16;
+ state->aspect_ratio.denominator = 9;
+ return 0;
+ }
+
+ if (edid->blocks > 2)
+ return -E2BIG;
+
+ if (!edid->edid)
+ return -EINVAL;
+
+ memcpy(state->edid, edid->edid, 128 * edid->blocks);
+ state->edid_blocks = edid->blocks;
+ state->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15],
+ edid->edid[0x16]);
+ err = edid_write_block(sd, 128 * edid->blocks, state->edid);
+ if (err < 0)
+ v4l2_err(sd, "error %d writing edid\n", err);
+ else
+ v4l2_dbg(2, debug, sd, "Writing edid succed\n");
+
+ return err;
+}
+
+/*********** avi info frame CEA-861-E **************/
+
+static void print_avi_infoframe(struct v4l2_subdev *sd)
+{
+ int i;
+ u8 buf[14];
+ u8 avi_len;
+ u8 avi_ver;
+
+ if (!(hdmi_read(sd, 0x05) & 0x80)) {
+ v4l2_info(sd, "receive DVI-D signal (AVI infoframe not supported)\n");
+ return;
+ }
+ if (!(io_read(sd, 0x60) & 0x01)) {
+ v4l2_info(sd, "AVI infoframe not received\n");
+ return;
+ }
+
+ if (io_read(sd, 0x83) & 0x01) {
+ v4l2_info(sd, "AVI infoframe checksum error has occurred earlier\n");
+ io_write(sd, 0x85, 0x01); /* clear AVI_INF_CKS_ERR_RAW */
+ if (io_read(sd, 0x83) & 0x01) {
+ v4l2_info(sd, "AVI infoframe checksum error still present\n");
+ io_write(sd, 0x85, 0x01); /* clear AVI_INF_CKS_ERR_RAW */
+ }
+ }
+
+ avi_len = infoframe_read(sd, 0xe2);
+ avi_ver = infoframe_read(sd, 0xe1);
+ v4l2_info(sd, "AVI infoframe version %d (%d byte)\n",
+ avi_ver, avi_len);
+
+ if (avi_ver != 0x02)
+ return;
+
+ for (i = 0; i < 14; i++)
+ buf[i] = infoframe_read(sd, i);
+
+ v4l2_info(sd,
+ "\t%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11], buf[12], buf[13]);
+}
+
+static int adv7604_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ struct v4l2_dv_timings timings;
+ struct stdi_readback stdi;
+ u8 reg_io_0x02 = io_read(sd, 0x02);
+
+ static const char * const csc_coeff_sel_rb[16] = {
+ "bypassed", "YPbPr601 -> RGB", "reserved", "YPbPr709 -> RGB",
+ "reserved", "RGB -> YPbPr601", "reserved", "RGB -> YPbPr709",
+ "reserved", "YPbPr709 -> YPbPr601", "YPbPr601 -> YPbPr709",
+ "reserved", "reserved", "reserved", "reserved", "manual"
+ };
+ static const char * const input_color_space_txt[16] = {
+ "RGB limited range (16-235)", "RGB full range (0-255)",
+ "YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
+ "XvYCC Bt.601", "XvYCC Bt.709",
+ "YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
+ "invalid", "invalid", "invalid", "invalid", "invalid",
+ "invalid", "invalid", "automatic"
+ };
+ static const char * const rgb_quantization_range_txt[] = {
+ "Automatic",
+ "RGB limited range (16-235)",
+ "RGB full range (0-255)",
+ };
+
+ v4l2_info(sd, "-----Chip status-----\n");
+ v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
+ v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
+ "HDMI" : (DIGITAL_INPUT ? "DVI-D" : "DVI-A"));
+ v4l2_info(sd, "EDID: %s\n", ((rep_read(sd, info->edid_ctrl_reg) & 0x01) &&
+ (rep_read(sd, info->edid_status_reg) & 0x01)) ? "enabled" : "disabled ");
+ v4l2_info(sd, "CEC: %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
+ "enabled" : "disabled");
+
+ v4l2_info(sd, "-----Signal status-----\n");
+ v4l2_info(sd, "Cable detected (+5V power): %s\n",
+ (io_read(sd, 0x6f) & info->cable_det_mask) ? "true" : "false");
+ v4l2_info(sd, "TMDS signal detected: %s\n",
+ no_signal_tmds(sd) ? "false" : "true");
+ v4l2_info(sd, "TMDS signal locked: %s\n",
+ no_lock_tmds(sd) ? "false" : "true");
+ v4l2_info(sd, "SSPD locked: %s\n", no_lock_sspd(sd) ? "false" : "true");
+ v4l2_info(sd, "STDI locked: %s\n", no_lock_stdi(sd) ? "false" : "true");
+ v4l2_info(sd, "CP locked: %s\n", no_lock_cp(sd) ? "false" : "true");
+ v4l2_info(sd, "CP free run: %s\n",
+ (!!(cp_read(sd, 0xff) & 0x10) ? "on" : "off"));
+ v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x, v_freq = 0x%x\n",
+ io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f,
+ (io_read(sd, 0x01) & 0x70) >> 4);
+
+ v4l2_info(sd, "-----Video Timings-----\n");
+ if (read_stdi(sd, &stdi))
+ v4l2_info(sd, "STDI: not locked\n");
+ else
+ v4l2_info(sd, "STDI: lcf (frame height - 1) = %d, bl = %d, lcvs (vsync) = %d, %s, %chsync, %cvsync\n",
+ stdi.lcf, stdi.bl, stdi.lcvs,
+ stdi.interlaced ? "interlaced" : "progressive",
+ stdi.hs_pol, stdi.vs_pol);
+ if (adv7604_query_dv_timings(sd, &timings))
+ v4l2_info(sd, "No video detected\n");
+ else
+ adv7604_print_timings(sd, &timings, "Detected format:", true);
+ adv7604_print_timings(sd, &state->timings, "Configured format:", true);
+
+ v4l2_info(sd, "-----Color space-----\n");
+ v4l2_info(sd, "RGB quantization range ctrl: %s\n",
+ rgb_quantization_range_txt[state->rgb_quantization_range]);
+ v4l2_info(sd, "Input color space: %s\n",
+ input_color_space_txt[reg_io_0x02 >> 4]);
+ v4l2_info(sd, "Output color space: %s %s, saturator %s\n",
+ (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
+ (reg_io_0x02 & 0x04) ? "(16-235)" : "(0-255)",
+ ((reg_io_0x02 & 0x04) ^ (reg_io_0x02 & 0x01)) ?
+ "enabled" : "disabled");
+ v4l2_info(sd, "Color space conversion: %s\n",
+ csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
+
+ /* Digital video */
+ if (DIGITAL_INPUT) {
+ v4l2_info(sd, "-----HDMI status-----\n");
+ v4l2_info(sd, "HDCP encrypted content: %s\n",
+ hdmi_read(sd, 0x05) & 0x40 ? "true" : "false");
+
+ print_avi_infoframe(sd);
+ }
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct v4l2_ctrl_ops adv7604_ctrl_ops = {
+ .s_ctrl = adv7604_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops adv7604_core_ops = {
+ .log_status = adv7604_log_status,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .g_chip_ident = adv7604_g_chip_ident,
+ .interrupt_service_routine = adv7604_isr,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = adv7604_g_register,
+ .s_register = adv7604_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops adv7604_video_ops = {
+ .s_stream = adv7604_s_stream,
+ .s_routing = adv7604_s_routing,
+ .g_input_status = adv7604_g_input_status,
+ .s_dv_timings = adv7604_s_dv_timings,
+ .g_dv_timings = adv7604_g_dv_timings,
+ .query_dv_timings = adv7604_query_dv_timings,
+ .enum_dv_timings = adv7604_enum_dv_timings,
+ .dv_timings_cap = adv7604_dv_timings_cap,
+ .g_mbus_config = adv7604_g_mbus_config,
+ .s_mbus_config = adv7604_s_mbus_config,
+};
+
+static const struct v4l2_subdev_pad_ops adv7604_pad_ops = {
+ .get_edid = adv7604_get_edid,
+ .set_edid = adv7604_set_edid,
+ .get_fmt = adv7604_get_fmt,
+ .set_fmt = adv7604_set_fmt,
+ .enum_mbus_code = adv7604_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops adv7604_ops = {
+ .core = &adv7604_core_ops,
+ .video = &adv7604_video_ops,
+ .pad = &adv7604_pad_ops,
+};
+
+/* -------------------------- custom ctrls ---------------------------------- */
+
+static const struct v4l2_ctrl_config adv7604_ctrl_analog_sampling_phase = {
+ .ops = &adv7604_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE,
+ .name = "Analog Sampling Phase",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0x1f,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color_manual = {
+ .ops = &adv7604_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL,
+ .name = "Free Running Color, Manual",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = false,
+};
+
+static const struct v4l2_ctrl_config adv7604_ctrl_free_run_color = {
+ .ops = &adv7604_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_FREE_RUN_COLOR,
+ .name = "Free Running Color",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0x0,
+ .max = 0xffffff,
+ .step = 0x1,
+ .def = 0x0,
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7604_core_init(struct v4l2_subdev *sd)
+{
+ struct adv7604_state *state = to_state(sd);
+ const struct adv7604_chip_info *info = state->info;
+ struct adv7604_platform_data *pdata = &state->pdata;
+
+ hdmi_write(sd, 0x48,
+ (pdata->disable_pwrdnb ? 0x80 : 0) |
+ (pdata->disable_cable_det_rst ? 0x40 : 0));
+
+ disable_input(sd);
+
+ /* power */
+ io_write(sd, 0x0c, 0x42); /* Power up part and power down VDP */
+ io_write(sd, 0x0b, 0x44); /* Power down ESDP block */
+ cp_write(sd, 0xcf, 0x01); /* Power down macrovision */
+
+ /* video format */
+ io_write_and_or(sd, 0x02, 0xf0,
+ pdata->alt_gamma << 3 |
+ pdata->op_656_range << 2 |
+ pdata->rgb_out << 1 |
+ pdata->alt_data_sat << 0);
+ io_write(sd, 0x03, pdata->op_format_sel);
+ io_write_and_or(sd, 0x04, 0x1f, pdata->op_ch_sel << 5);
+ io_write_and_or(sd, 0x05, 0xf0, pdata->blank_data << 3 |
+ pdata->insert_av_codes << 2 |
+ pdata->replicate_av_codes << 1 |
+ pdata->invert_cbcr << 0);
+
+ /* TODO from platform data */
+ if (adv7604_has_afe(state)){
+ cp_write(sd, 0x69, 0x30); /* Enable CP CSC */
+ io_write(sd, 0x06, 0xa6); /* positive VS and HS */
+ }else
+ io_write(sd, 0x06, 0x2f); /* Field output on VS/FIELD pin, Positive polarity HS, VS, Field, LLC */
+
+ io_write(sd, 0x14, 0x7f); /* Drive strength adjusted to max */
+ cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01); /* HDMI free run */
+ cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
+ cp_write(sd, 0xf9, 0x23); /* STDI ch. 1 - LCVS change threshold -
+ ADI recommended setting [REF_01, c. 2.3.3] */
+
+ if (adv7604_has_afe(state)){
+ cp_write(sd, 0x45, 0x23); /* STDI ch. 2 - LCVS change threshold -
+ ADI recommended setting [REF_01, c. 2.3.3] */
+ }
+
+ cp_write(sd, 0xc9, 0x2d); /* use prim_mode and vid_std as free run resolution
+ for digital formats */
+
+ /* TODO from platform data */
+ afe_write(sd, 0xb5, 0x01); /* Setting MCLK to 256Fs */
+
+ if (adv7604_has_afe(state)) {
+ afe_write(sd, 0x02, pdata->ain_sel); /* Select analog input muxing mode */
+ io_write_and_or(sd, 0x30, ~(1 << 4), pdata->output_bus_lsb_to_msb << 4);
+ }
+
+ /* interrupts */
+ //io_write(sd, 0x40, 0xc0 | pdata->int1_config); /* Configure INT1, remains untill flags is cleared */
+ io_write(sd, 0x40, 0x80 | pdata->int1_config); /* Configure INT1, 64 Xtal periods */
+ io_write(sd, 0x73, info->cable_det_mask); /* Enable CABLE_DET_A_ST (+5v) interrupt */
+
+ if (!adv7604_has_afe(state)){ /* Audio */
+ io_write(sd, 0x69, 0x80); /* Enable CS_DATA_VALID_ST interrupt */
+ io_write(sd, 0x87, 0x8); /* Enable NEW_SAMP_RT_ST interrupt */
+ }
+
+ io_write(sd, 0x46, 0x98); /* Enable SSPD, STDI and CP unlocked interrupts */
+ io_write(sd, 0x6e, info->fmt_change_digital_mask); /* Enable V_LOCKED and DE_REGEN_LCK interrupts */
+ info->setup_irqs(sd);
+
+ return v4l2_ctrl_handler_setup(sd->ctrl_handler);
+}
+
+static void adv7604_setup_irqs(struct v4l2_subdev *sd)
+{
+ io_write(sd, 0x41, 0xd7); /* STDI irq for any change, disable INT2 */
+}
+
+static void adv7611_setup_irqs(struct v4l2_subdev *sd)
+{
+ io_write(sd, 0x41, 0xd0); /* STDI irq for any change, disable INT2 */
+}
+
+static void adv7604_unregister_clients(struct adv7604_state *state)
+{
+ if (state->i2c_avlink)
+ i2c_unregister_device(state->i2c_avlink);
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
+ if (state->i2c_infoframe)
+ i2c_unregister_device(state->i2c_infoframe);
+ if (state->i2c_esdp)
+ i2c_unregister_device(state->i2c_esdp);
+ if (state->i2c_dpp)
+ i2c_unregister_device(state->i2c_dpp);
+ if (state->i2c_afe)
+ i2c_unregister_device(state->i2c_afe);
+ if (state->i2c_repeater)
+ i2c_unregister_device(state->i2c_repeater);
+ if (state->i2c_edid)
+ i2c_unregister_device(state->i2c_edid);
+ if (state->i2c_hdmi)
+ i2c_unregister_device(state->i2c_hdmi);
+ if (state->i2c_test)
+ i2c_unregister_device(state->i2c_test);
+ if (state->i2c_cp)
+ i2c_unregister_device(state->i2c_cp);
+ if (state->i2c_vdp)
+ i2c_unregister_device(state->i2c_vdp);
+}
+
+static struct i2c_client *adv7604_dummy_client(struct v4l2_subdev *sd,
+ u8 addr, u8 io_reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (addr)
+ if( io_write(sd, io_reg, addr << 1) < 0)
+ return NULL; //no device detected
+ return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+}
+
+static const struct adv7604_reg_seq adv7604_recommended_settings_afe[] = {
+ /* reset ADI recommended settings for HDMI: */
+ /* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x04 }, /* HDMI filter optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3d), 0x00 }, /* DDC bus active pull-up control */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3e), 0x74 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0x74 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x63 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x93), 0x88 }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x94), 0x2e }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x96), 0x00 }, /* enable automatic EQ changing */
+
+ /* set ADI recommended settings for digitizer */
+ /* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 17. */
+ { ADV7604_REG(ADV7604_PAGE_AFE, 0x12), 0x7b }, /* ADC noise shaping filter controls */
+ { ADV7604_REG(ADV7604_PAGE_AFE, 0x0c), 0x1f }, /* CP core gain controls */
+ { ADV7604_REG(ADV7604_PAGE_CP, 0x3e), 0x04 }, /* CP core pre-gain control */
+ { ADV7604_REG(ADV7604_PAGE_CP, 0xc3), 0x39 }, /* CP coast control. Graphics mode */
+ { ADV7604_REG(ADV7604_PAGE_CP, 0x40), 0x5c }, /* CP core pre-gain control. Graphics mode */
+};
+
+static const struct adv7604_reg_seq adv7604_recommended_settings_hdmi[] = {
+ /* set ADI recommended settings for HDMI: */
+ /* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 4. */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x0d), 0x84 }, /* HDMI filter optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3d), 0x10 }, /* DDC bus active pull-up control */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x3e), 0x39 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4e), 0x3b }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0xb6 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x03 }, /* TMDS PLL optimization */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x18 }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x34 }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x93), 0x8b }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x94), 0x2d }, /* equaliser */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x96), 0x01 }, /* enable automatic EQ changing */
+
+ /* reset ADI recommended settings for digitizer */
+ /* "ADV7604 Register Settings Recommendations (rev. 2.5, June 2010)" p. 17. */
+ { ADV7604_REG(ADV7604_PAGE_AFE, 0x12), 0xfb }, /* ADC noise shaping filter controls */
+ { ADV7604_REG(ADV7604_PAGE_AFE, 0x0c), 0x0d }, /* CP core gain controls */
+};
+
+static const struct adv7604_reg_seq adv7611_recommended_settings_hdmi[] = {
+ { ADV7604_REG(ADV7604_PAGE_CP, 0x6c), 0x00 },
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x03), 0x98 },
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x4c), 0x44 },
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8d), 0x04 },
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x8e), 0x1e },
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x6c), 0x26 }, /* HPA_AUTO_INT_EDID, HPA_DELAY_SEL */
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x9b), 0x03 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc1), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc2), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc3), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc4), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc5), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc6), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc7), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc8), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xc9), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xca), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xcb), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0xcc), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x6f), 0x0c },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x85), 0x1f },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x87), 0x70 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x57), 0xda },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x58), 0x01 },// ADI recommended setting
+ { ADV7604_REG(ADV7604_PAGE_HDMI, 0x75), 0x10 },// DDC drive strength
+};
+
+static const struct adv7604_chip_info adv7604_chip_info[] = {
+ [ADV7604] = {
+ .has_afe = true,
+ .tdms_lock_mask = 0xe0,
+ .fmt_change_digital_mask = 0xc0,
+ .set_termination = adv7604_set_termination,
+ .setup_irqs = adv7604_setup_irqs,
+ .read_hdmi_pixelclock = adv7604_read_hdmi_pixelclock,
+ .recommended_settings = {
+ [0] = adv7604_recommended_settings_afe,
+ [1] = adv7604_recommended_settings_hdmi,
+ },
+ .num_recommended_settings = {
+ [0] = ARRAY_SIZE(adv7604_recommended_settings_afe),
+ [1] = ARRAY_SIZE(adv7604_recommended_settings_hdmi),
+ },
+ .page_mask = BIT(ADV7604_PAGE_IO) | BIT(ADV7604_PAGE_AVLINK) |
+ BIT(ADV7604_PAGE_CEC) | BIT(ADV7604_PAGE_INFOFRAME) |
+ BIT(ADV7604_PAGE_ESDP) | BIT(ADV7604_PAGE_DPP) |
+ BIT(ADV7604_PAGE_AFE) | BIT(ADV7604_PAGE_REP) |
+ BIT(ADV7604_PAGE_EDID) | BIT(ADV7604_PAGE_HDMI) |
+ BIT(ADV7604_PAGE_TEST) | BIT(ADV7604_PAGE_CP) |
+ BIT(ADV7604_PAGE_VDP),
+ },
+ [ADV7611] = {
+ .has_afe = false,
+ .edid_ctrl_reg = 0x74,
+ .edid_status_reg = 0x76,
+ .lcf_reg = 0xa3,
+ .tdms_lock_mask = 0x43,
+ .cable_det_mask = 0x01,
+ .fmt_change_digital_mask = 0x03,
+ .set_termination = adv7611_set_termination,
+ .read_hdmi_pixelclock = adv7611_read_hdmi_pixelclock,
+ .setup_irqs = adv7611_setup_irqs,
+ .recommended_settings = {
+ [1] = adv7611_recommended_settings_hdmi,
+ },
+ .num_recommended_settings = {
+ [1] = ARRAY_SIZE(adv7611_recommended_settings_hdmi),
+ },
+ .page_mask = BIT(ADV7604_PAGE_IO) | BIT(ADV7604_PAGE_CEC) |
+ BIT(ADV7604_PAGE_INFOFRAME) | BIT(ADV7604_PAGE_AFE) |
+ BIT(ADV7604_PAGE_REP) | BIT(ADV7604_PAGE_EDID) |
+ BIT(ADV7604_PAGE_HDMI) | BIT(ADV7604_PAGE_CP),
+ },
+};
+
+
+static irqreturn_t adv7604_irq(int irq, void *dev_id)
+{
+ struct adv7604_state *state = dev_id;
+ struct v4l2_subdev *sd = &state->sd;
+ const struct adv7604_chip_info *info = state->info;
+ u8 fmt_change = 0, fmt_change_digital = 0, tx_5v = 0, regval = 0;
+ u8 cs_data_valid_st = 0, new_samp_rt_st = 0;
+
+ /* format change */
+ fmt_change = io_read(sd, 0x43) & 0x98;
+ if (fmt_change)
+ io_write(sd, 0x44, fmt_change);
+
+ regval = io_read(sd, 0x6b);
+ fmt_change_digital = DIGITAL_INPUT ? (io_read(sd, 0x6b) & info->fmt_change_digital_mask) : 0;
+
+ //Signal
+ //TMDS_CLK_A_ST, IO, Address 0x6B[4] // see TMDS_CLK_A_RAW no_signal_tmds
+ //V_LOCKED_ST, IO, Address 0x6B[1]
+ //TMDSPLL_LCK_A_ST, IO, Address 0x6B[6] //see TMDSPLL_LCK_A_RAW, IO, Address 0x6A[6]
+ //
+
+ //no_lock_tmds see Table 29. HDMI Flags in IO Map Register 0x6A
+ //V_LOCKED_RAW, IO, Address 0x6A[1]
+ //DE_REGEN_LCK_RAW, IO, Address 0x6A[0]
+ //TMDSPLL_LCK_A_RAW, IO, Address 0x6A[6]
+
+ if (fmt_change_digital)
+ io_write(sd, 0x6c, fmt_change_digital);
+ if (fmt_change || fmt_change_digital) {
+ state->fmt_change = 1;
+ sysfs_notify_dirent(state->fmt);
+ }
+ /* tx 5v detect */
+ tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+ if (tx_5v) {
+ io_write(sd, 0x71, tx_5v);
+ //adv7604_s_detect_tx_5v_ctrl(sd);//TODO perhaps should try a delayed work
+ sysfs_notify_dirent(state->cable);
+ }
+
+ /* Audio */
+ cs_data_valid_st = io_read(sd, 0x66) & 0x80;
+ if( cs_data_valid_st ){
+ io_write(sd, 0x67, cs_data_valid_st );//no necessary to clear
+ sysfs_notify_dirent(state->audio);
+ }
+
+ new_samp_rt_st = io_read(sd, 0x84) & 0x8;
+ if( new_samp_rt_st ){
+ io_write(sd, 0x85, new_samp_rt_st );
+ sysfs_notify_dirent(state->audio);
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+static int adv7604_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7604_state *state;
+ struct adv7604_platform_data *pdata = NULL;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_subdev *sd;
+ int err, loop;
+ char gpio_name[50] = { 0 };
+ pdata = client->dev.platform_data;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+ v4l_dbg(1, debug, client, "detecting %s client on address 0x%x\n",
+ id->name,
+ client->addr << 1);
+
+ state = kzalloc(sizeof(struct adv7604_state), GFP_KERNEL);
+ if (!state) {
+ v4l_err(client, "Could not allocate adv7604_state memory!\n");
+ return -ENOMEM;
+ }
+
+ state->info = &adv7604_chip_info[id->driver_data];
+
+ if( id->driver_data == ADV7611 )
+ state->mode = ADV7604_MODE_HDMI;
+
+ /* platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ err = -ENODEV;
+ goto err_state;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
+
+ if (pdata->power_on) {
+ err = pdata->power_on();
+ if (err) {
+ v4l_err(client, "power on failed\n");
+ goto err_state;
+ }
+ }
+
+ state->timings.bt.width = pdata->default_width;
+ state->timings.bt.height = pdata->default_height;
+ if (!adv7604_has_afe(state))
+ state->timings.bt.standards = V4L2_DV_BT_STD_CEA861;
+
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &adv7604_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ state->connector_hdmi = pdata->connector_hdmi;
+
+#if 0
+ /* i2c access to adv7604? */
+ if (adv_smbus_read_byte_data_check(client, 0xfb, false) != 0x68) {
+ v4l2_info(sd, "not an adv7604 on address 0x%x\n",
+ client->addr << 1);
+ err = -ENODEV;
+ goto err_pwr;
+ }
+#endif
+
+ /* control handlers */
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, adv7604_has_afe(state) ? 9 : 8);
+
+ v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &adv7604_ctrl_ops,
+ V4L2_CID_HUE, 0, 128, 1, 0);
+
+ /* private controls */
+ state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
+ state->detect_tx_5v_ctrl->is_private = true;
+
+ if( state->pdata.inp_color_space <= ADV7604_INP_COLOR_SPACE_FULL_RGB ){
+ state->rgb_quantization_range_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
+ V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ state->rgb_quantization_range_ctrl->is_private = true;
+ }
+
+ /* custom controls */
+ if (adv7604_has_afe(state)) {
+ state->analog_sampling_phase_ctrl =
+ v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_analog_sampling_phase, NULL);
+ state->analog_sampling_phase_ctrl->is_private = true;
+ }
+ state->free_run_color_manual_ctrl =
+ v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color_manual, NULL);
+ state->free_run_color_manual_ctrl->is_private = true;
+ state->free_run_color_ctrl =
+ v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color, NULL);
+ state->free_run_color_ctrl->is_private = true;
+
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ err = hdl->error;
+ goto err_hdl;
+ }
+ if (adv7604_s_detect_tx_5v_ctrl(sd)) {
+ err = -ENODEV;
+ goto err_hdl;
+ }
+
+ state->i2c_cec = adv7604_dummy_client(sd, pdata->i2c_cec, 0xf4);
+ state->i2c_infoframe = adv7604_dummy_client(sd, pdata->i2c_infoframe, 0xf5);
+ state->i2c_afe = adv7604_dummy_client(sd, pdata->i2c_afe, 0xf8);
+ state->i2c_repeater = adv7604_dummy_client(sd, pdata->i2c_repeater, 0xf9);
+ state->i2c_edid = adv7604_dummy_client(sd, pdata->i2c_edid, 0xfa);
+ state->i2c_hdmi = adv7604_dummy_client(sd, pdata->i2c_hdmi, 0xfb);
+ state->i2c_cp = adv7604_dummy_client(sd, pdata->i2c_cp, 0xfd);
+ if (!state->i2c_cec || !state->i2c_infoframe || !state->i2c_afe ||
+ !state->i2c_repeater || !state->i2c_edid || !state->i2c_hdmi ||
+ !state->i2c_cp) {
+ err = -ENOMEM;
+ v4l2_err(sd, "failed to create all i2c clients\n");
+ goto err_i2c;
+ }
+ if (adv7604_has_afe(state)) {
+ state->i2c_avlink = adv7604_dummy_client(sd, pdata->i2c_avlink, 0xf3);
+ state->i2c_esdp = adv7604_dummy_client(sd, pdata->i2c_esdp, 0xf6);
+ state->i2c_dpp = adv7604_dummy_client(sd, pdata->i2c_dpp, 0xf7);
+ state->i2c_test = adv7604_dummy_client(sd, pdata->i2c_test, 0xfc);
+ state->i2c_vdp = adv7604_dummy_client(sd, pdata->i2c_vdp, 0xfe);
+ if (!state->i2c_avlink || !state->i2c_esdp || !state->i2c_dpp ||
+ !state->i2c_test || !state->i2c_vdp) {
+ err = -ENOMEM;
+ v4l2_err(sd, "failed to create all i2c clients\n");
+ goto err_i2c;
+ }
+ }
+ state->restart_stdi_once = true;
+
+ /* work queues */
+ state->work_queues = create_singlethread_workqueue(client->name);
+ if (!state->work_queues) {
+ v4l2_err(sd, "Could not create work queue\n");
+ err = -ENOMEM;
+ goto err_i2c;
+ }
+
+ INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
+ adv7604_delayed_work_enable_hotplug);
+
+ state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ err = media_entity_init(&sd->entity, 1, &state->pad, 0);
+ if (err)
+ goto err_work_queues;
+
+ if(pdata->insert_av_codes ){
+ state->mbus_type = V4L2_MBUS_BT656;
+
+ if(!pdata->op_656_range)//Force range
+ pdata->op_656_range = 1;
+ }else
+ state->mbus_type = V4L2_MBUS_PARALLEL;
+
+ state->code = adv7604_get_mbus_pixelcode( pdata->op_ch_sel,
+ pdata->op_format_sel,
+ pdata->invert_cbcr);
+
+ err = adv7604_core_init(sd);
+ if (err)
+ goto err_entity;
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+
+ if(sd && !adv7604_has_afe(state)) // Force routing
+ adv7604_s_routing(sd, ADV7604_MODE_HDMI, 0, 0);
+
+ for (loop = 0; loop < ARRAY_SIZE(adv7611_sysfs_attrs); loop++) {
+ err = device_create_file(&client->dev, &adv7611_sysfs_attrs[loop]);
+ if (err) {
+ goto sysfs_err;
+ }
+ }
+
+ state->cable = sysfs_get_dirent(client->dev.kobj.sd, NULL, "cable");
+ if(!state->cable){
+ err = -ENODEV;
+ goto sysfs_err;
+ }
+ state->signal = sysfs_get_dirent(client->dev.kobj.sd, NULL, "signal");
+ if(!state->signal){
+ err = -ENODEV;
+ goto sysfs_err;
+ }
+ state->fmt = sysfs_get_dirent(client->dev.kobj.sd, NULL, "fmt");
+ if(!state->fmt){
+ err = -ENODEV;
+ goto sysfs_err;
+ }
+
+ state->audio = sysfs_get_dirent(client->dev.kobj.sd, NULL, "audio");
+ if(!state->audio){
+ err = -ENODEV;
+ goto sysfs_err;
+ }
+
+ if( pdata->cam_it != -1 ){
+ v4l2_err(sd, "request IRQ gpio %d\n", pdata->cam_it);
+ if (!gpio_is_valid(pdata->cam_it)){
+ err = -EINVAL;
+ v4l2_err(sd, "Invalide GPIO %d\n",
+ pdata->cam_it);
+ goto sysfs_err;
+ }
+ snprintf(gpio_name, ARRAY_SIZE(gpio_name), "%s cam_it", client->name);
+ state->irq = gpio_to_irq(pdata->cam_it);
+
+
+ err = request_threaded_irq(state->irq, NULL, adv7604_irq,
+ IRQF_TRIGGER_FALLING,
+ client->name, state);
+
+ if (err < 0) {
+ v4l2_err(sd, "request IRQ %d failed\n",
+ state->irq);
+ goto erqstirq;
+ }
+ }
+
+ state->fmt_change = 0;
+
+ return 0;
+
+erqstirq:
+sysfs_err:
+ if(state->cable)
+ sysfs_put(state->cable);
+
+ if(state->signal)
+ sysfs_put(state->signal);
+
+ if(state->fmt)
+ sysfs_put(state->fmt);
+
+ if(state->audio)
+ sysfs_put(state->audio);
+
+ for (loop = 0; loop < ARRAY_SIZE(adv7611_sysfs_attrs); loop++) {
+ device_remove_file(&client->dev,
+ &adv7611_sysfs_attrs[loop]);
+ }
+
+err_entity:
+ media_entity_cleanup(&sd->entity);
+err_work_queues:
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+err_i2c:
+ adv7604_unregister_clients(state);
+err_hdl:
+ v4l2_ctrl_handler_free(hdl);
+err_state:
+ kfree(state);
+ return err;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7604_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7604_state *state = to_state(sd);
+ int loop;
+
+ if( state->pdata.cam_it != -1 ){
+ free_irq(state->irq, state);
+ }
+
+ if(state->cable)
+ sysfs_put(state->cable);
+
+ if(state->signal)
+ sysfs_put(state->signal);
+
+ if(state->fmt)
+ sysfs_put(state->fmt);
+
+ if(state->audio)
+ sysfs_put(state->audio);
+
+ for (loop = 0; loop < ARRAY_SIZE(adv7611_sysfs_attrs); loop++) {
+ device_remove_file(&client->dev,
+ &adv7611_sysfs_attrs[loop]);
+ }
+
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ adv7604_unregister_clients(to_state(sd));
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ if (state->pdata.power_off)
+ state->pdata.power_off();
+ kfree(to_state(sd));
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_device_id adv7604_id[] = {
+ { "adv7604", ADV7604 },
+ { "adv7611", ADV7611 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7604_id);
+
+static struct i2c_driver adv7604_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7604",
+ },
+ .probe = adv7604_probe,
+ .remove = adv7604_remove,
+ .id_table = adv7604_id,
+};
+
+module_i2c_driver(adv7604_driver);
diff --git a/drivers/media/video/ar0330.c b/drivers/media/video/ar0330.c
new file mode 100644
index 00000000000000..0711b3ab839b02
--- /dev/null
+++ b/drivers/media/video/ar0330.c
@@ -0,0 +1,1797 @@
+/*
+ * Driver for AR0330 CMOS Image Sensor from Aptina
+ *
+ * Copyright (C) 2014, Julien Beraud <julien.beraud@parrot.com>
+ * Copyright (C) 2011, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (C) 2011, Javier Martin <javier.martin@vista-silicon.com>
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Based on the MT9V032 driver and Bastian Hecht's code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/log2.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/gcd.h>
+
+#include <media/ar0330.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+/* clk limits */
+#define AR0330_MIN_EXT_CLK_HZ 6000000
+#define AR0330_MAX_EXT_CLK_HZ 27000000
+#define AR0330_MIN_VCO_CLK_HZ 384000000
+#define AR0330_MAX_VCO_CLK_HZ 768000000
+#define AR0330_MIN_PLL_MULT 32
+#define AR0330_MAX_PLL_MULT 255
+#define AR0330_MAX_PRE_PLL_CLK_DIV 64
+
+/* pix array limits */
+
+#define AR0330_PIXEL_ARRAY_WIDTH 2316
+#define AR0330_PIXEL_ARRAY_HEIGHT 1555
+
+#define AR0330_WINDOW_WIDTH_MIN 32
+#define AR0330_WINDOW_WIDTH_DEF 2304
+#define AR0330_WINDOW_WIDTH_MAX 2304
+#define AR0330_WINDOW_HEIGHT_MIN 32
+#define AR0330_WINDOW_HEIGHT_DEF 1544
+#define AR0330_WINDOW_HEIGHT_MAX 1544
+
+/* AR0330 Registers */
+#define AR0330_CHIP_VERSION 0x3000
+#define AR0330_CHIP_VERSION_VALUE 0x2604
+#define AR0330_Y_ADDR_START 0x3002
+#define AR0330_Y_ADDR_START_MIN 6
+#define AR0330_X_ADDR_START 0x3004
+#define AR0330_X_ADDR_START_MIN 6
+#define AR0330_Y_ADDR_END 0x3006
+#define AR0330_Y_ADDR_END_MAX 1549
+#define AR0330_X_ADDR_END 0x3008
+#define AR0330_X_ADDR_END_MAX 2309
+#define AR0330_FRAME_LENGTH_LINES 0x300a
+#define AR0330_LINE_LENGTH_PCK 0x300c
+#define AR0330_CHIP_REVISION 0x300e
+#define AR0330_CHIP_REVISION_1x 0x10
+#define AR0330_CHIP_REVISION_2x 0x20
+#define AR0330_LOCK_CONTROL 0x3010
+#define AR0330_LOCK_CONTROL_UNLOCK 0xbeef
+#define AR0330_COARSE_INTEGRATION_TIME 0x3012
+#define AR0330_RESET 0x301a
+#define AR0330_RESET_SMIA_DIS (1 << 12)
+#define AR0330_RESET_FORCE_PLL_ON (1 << 11)
+#define AR0330_RESET_RESTART_BAD (1 << 10)
+#define AR0330_RESET_MASK_BAD (1 << 9)
+#define AR0330_RESET_GPI_EN (1 << 8)
+#define AR0330_RESET_PARALLEL_EN (1 << 7)
+#define AR0330_RESET_DRIVE_PINS (1 << 6)
+#define AR0330_RESET_LOCK_REG (1 << 3)
+#define AR0330_RESET_STREAM (1 << 2)
+#define AR0330_RESET_RESTART (1 << 1)
+#define AR0330_RESET_RESET (1 << 0)
+/* AR03303_MODE_SELECT is an alias for AR0330_RESET_STREAM */
+#define AR0330_MODE_SELECT 0x301c
+#define AR0330_MODE_SELECT_STREAM (1 << 0)
+#define AR0330_VT_PIX_CLK_DIV 0x302a
+#define AR0330_VT_SYS_CLK_DIV 0x302c
+#define AR0330_PRE_PLL_CLK_DIV 0x302e
+#define AR0330_PLL_MULTIPLIER 0x3030
+#define AR0330_OP_PIX_CLK_DIV 0x3036
+#define AR0330_OP_SYS_CLK_DIV 0x3038
+#define AR0330_FRAME_COUNT 0x303a
+#define AR0330_READ_MODE 0x3040
+#define AR0330_READ_MODE_VERT_FLIP (1 << 15)
+#define AR0330_READ_MODE_HORIZ_MIRROR (1 << 14)
+#define AR0330_READ_MODE_COL_BIN (1 << 13)
+#define AR0330_READ_MODE_ROW_BIN (1 << 12)
+#define AR0330_READ_MODE_COL_SF_BIN (1 << 9)
+#define AR0330_READ_MODE_COL_SUM (1 << 5)
+#define AR0330_EXTRA_DELAY 0x3042
+#define AR0330_GREEN1_GAIN 0x3056
+#define AR0330_BLUE_GAIN 0x3058
+#define AR0330_RED_GAIN 0x305a
+#define AR0330_GREEN2_GAIN 0x305c
+#define AR0330_GLOBAL_GAIN 0x305e
+#define AR0330_GLOBAL_GAIN_MIN 0
+#define AR0330_GLOBAL_GAIN_DEF 1000
+#define AR0330_GLOBAL_GAIN_MAX 15992
+#define AR0330_ANALOG_GAIN 0x3060
+#define AR0330_SMIA_TEST 0x3064
+#define AR0330_EMBEDDED_DATA (1 << 8)
+#define AR0330_DATAPATH_SELECT 0x306e
+#define AR0330_DATAPATH_SLEW_DOUT_MASK (7 << 13)
+#define AR0330_DATAPATH_SLEW_DOUT_SHIFT 13
+#define AR0330_DATAPATH_SLEW_PCLK_MASK (7 << 10)
+#define AR0330_DATAPATH_SLEW_PCLK_SHIFT 10
+#define AR0330_DATAPATH_HIGH_VCM (1 << 9)
+#define AR0330_TEST_PATTERN_MODE 0x3070
+#define AR0330_TEST_DATA_RED 0x3072
+#define AR0330_TEST_DATA_GREENR 0x3074
+#define AR0330_TEST_DATA_BLUE 0x3076
+#define AR0330_TEST_DATA_GREENB 0x3078
+#define AR0330_SEQ_DATA_PORT 0x3086
+#define AR0330_SEQ_CTRL_PORT 0x3088
+#define AR0330_X_ODD_INC 0x30a2
+#define AR0330_Y_ODD_INC 0x30a6
+#define AR0330_X_ODD_INC 0x30a2
+#define AR0330_DIGITAL_CTRL 0x30ba
+#define AR0330_DITHER_ENABLE (1 << 5)
+/* Note from Developer Guide Version E :
+ * The original AR0330 Rev2.0 samples did not have R0x300E
+ * programmed correctly
+ * Therefore reading register 0x30F0 is the only sure method
+ * to get chip revision : 0x1200 indicates Rev1 while 0x1208
+ * indicates Rev2.X
+ */
+#define AR0330_RESERVED_CHIPREV 0x30F0
+#define AR0330_DATA_FORMAT_BITS 0x31ac
+#define AR0330_SERIAL_FORMAT 0x31ae
+#define AR0330_FRAME_PREAMBLE 0x31b0
+#define AR0330_LINE_PREAMBLE 0x31b2
+#define AR0330_MIPI_TIMING_0 0x31b4
+#define AR0330_MIPI_TIMING_1 0x31b6
+#define AR0330_MIPI_TIMING_2 0x31b8
+#define AR0330_MIPI_TIMING_3 0x31ba
+#define AR0330_MIPI_TIMING_4 0x31bc
+#define AR0330_MIPI_CONFIG_STATUS 0x31be
+#define AR0330_COMPRESSION 0x31d0
+#define AR0330_COMPRESSION_ENABLE (1 << 0)
+#define AR0330_POLY_SC 0x3780
+#define AR0330_POLY_SC_ENABLE (1 << 15)
+
+struct ar0330_pll {
+ u16 pre_pll_clk_div;
+ u16 pll_multiplier;
+ u16 vt_sys_clk_div;
+ u16 vt_pix_clk_div;
+ u16 op_sys_clk_div;
+ u16 op_pix_clk_div;
+ u32 clk_pix;
+};
+
+struct ar0330 {
+ struct ar0330_platform_data *pdata;
+ struct ar0330_pll pll;
+ unsigned int version;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pad;
+
+ /* Sensor window */
+ struct v4l2_rect crop;
+ struct v4l2_rect video_timing;
+ u32 x_binning;
+ u32 y_binning;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+
+ bool streaming;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *flip[2];
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gains[4];
+
+ /* lock to protect power_count */
+ struct mutex power_lock;
+ int power_count;
+
+ /* Registers cache */
+ u16 read_mode;
+};
+
+static struct ar0330 *to_ar0330(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ar0330, subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static int __ar0330_read(struct i2c_client *client, u16 reg, size_t size)
+{
+ u8 data[2];
+ struct i2c_msg msg[2] = {
+ { client->addr, 0, 2, data },
+ { client->addr, I2C_M_RD, size, data },
+ };
+ int ret;
+
+ data[0] = reg >> 8;
+ data[1] = reg & 0xff;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s(0x%04x) failed (%d)\n", __func__,
+ reg, ret);
+ return ret;
+ }
+
+ if (size == 2)
+ return (data[0] << 8) | data[1];
+ else
+ return data[0];
+}
+
+static int __ar0330_write(struct i2c_client *client, u16 reg, u16 value,
+ size_t size)
+{
+ u8 data[4];
+ struct i2c_msg msg = { client->addr, 0, 2 + size, data };
+ int ret;
+
+ v4l2_info(client, "writing 0x%04x to 0x%04x\n", value, reg);
+
+ data[0] = reg >> 8;
+ data[1] = reg & 0xff;
+ if (size == 2) {
+ data[2] = value >> 8;
+ data[3] = value & 0xff;
+ } else {
+ data[2] = value & 0xff;
+ }
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s(0x%04x) failed (%d)\n", __func__,
+ reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int ar0330_read8(struct i2c_client *client, u16 reg)
+{
+ return __ar0330_read(client, reg, 1);
+}
+
+static inline int ar0330_write8(struct i2c_client *client, u16 reg, u8 value)
+{
+ return __ar0330_write(client, reg, value, 1);
+}
+
+static inline int ar0330_read16(struct i2c_client *client, u16 reg)
+{
+ return __ar0330_read(client, reg, 2);
+}
+
+static inline int ar0330_write16(struct i2c_client *client, u16 reg, u16 value)
+{
+ return __ar0330_write(client, reg, value, 2);
+}
+
+static inline int ar0330_set16(struct i2c_client *client, u16 reg, u16 value,
+ u16 mask)
+{
+ int rval = ar0330_read16(client, reg);
+
+ if (rval < 0)
+ return rval;
+ else
+ return ar0330_write16(client, reg,
+ (rval & ~mask) | (value & mask));
+}
+
+static int ar0330_set_read_mode(struct ar0330 *ar0330, u16 clear, u16 set)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ u16 value = (ar0330->read_mode & ~clear) | set;
+ int ret;
+
+ ret = ar0330_write16(client, AR0330_READ_MODE, value);
+ if (ret < 0)
+ return ret;
+
+ ar0330->read_mode = value;
+ return 0;
+}
+
+static int ar0330_pll_configure(struct ar0330 *ar0330)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ int ret;
+
+ ret = ar0330_write16(client, AR0330_VT_PIX_CLK_DIV,
+ ar0330->pll.vt_pix_clk_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_VT_SYS_CLK_DIV,
+ ar0330->pll.vt_sys_clk_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_PRE_PLL_CLK_DIV,
+ ar0330->pll.pre_pll_clk_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_PLL_MULTIPLIER,
+ ar0330->pll.pll_multiplier);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_OP_PIX_CLK_DIV,
+ ar0330->pll.op_pix_clk_div);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_OP_SYS_CLK_DIV,
+ ar0330->pll.op_sys_clk_div);
+
+ if (ar0330->pdata->hw_bus != AR0330_HW_BUS_PARALLEL) {
+ ret = ar0330_write16(client, AR0330_DATA_FORMAT_BITS, 0x0c0c);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int ar0330_pll_init(struct ar0330 *ar0330)
+{
+ u32 ext_clk = ar0330->pdata->clk_rate;
+ u32 max_vco = ar0330->pdata->max_vco_rate;
+ u32 op_clk = ar0330->pdata->max_op_clk_rate;
+ u32 vco_clk, i;
+ u16 pre_pll_div, pll_mult, vt_sys_clk_div, vt_pix_clk_div;
+ u16 op_pix_clk_div;
+
+ if ((ext_clk < AR0330_MIN_EXT_CLK_HZ) ||
+ (ext_clk > AR0330_MAX_EXT_CLK_HZ)) {
+ v4l2_err(&ar0330->subdev, "ext_clk = %u, out of range\n", ext_clk);
+ return -EINVAL;
+ }
+
+ if ((max_vco < AR0330_MIN_VCO_CLK_HZ) ||
+ (max_vco > AR0330_MAX_VCO_CLK_HZ)) {
+ v4l2_err(&ar0330->subdev, "max_vco = %u, out of range\n", max_vco);
+ return -EINVAL;
+ }
+
+ if (op_clk == 0) {
+ v4l2_err(&ar0330->subdev, "the op_clk must be specified\n");
+ return -EINVAL;
+ }
+
+ /* heuristics to find a divider combination that provides the highest
+ * vco_clk inferior to max_vco_rate with the specified ext_clk and
+ * the limits of the pll div and mult */
+ for(vco_clk = max_vco; vco_clk >= AR0330_MIN_VCO_CLK_HZ; vco_clk--) {
+ i = gcd(vco_clk, ext_clk);
+ pre_pll_div = ext_clk / i;
+ pll_mult = vco_clk / i;
+ if ((pll_mult <= AR0330_MAX_PLL_MULT) &&
+ (pll_mult >= AR0330_MIN_PLL_MULT) &&
+ (pre_pll_div <= AR0330_MAX_PRE_PLL_CLK_DIV)) {
+ v4l2_info(&ar0330->subdev, "found new vco_clk : %u\n",
+ vco_clk);
+ break;
+ }
+ }
+
+ if (vco_clk <= AR0330_MIN_VCO_CLK_HZ) {
+ v4l2_err(&ar0330->subdev, "no vco_clk found\n");
+ return -EINVAL;
+ }
+ else
+ v4l2_info(&ar0330->subdev,
+ "pll_mult = %u\n""pre_pll_div=%u\n",
+ pll_mult, pre_pll_div);
+
+ /* in Parallel mode, there is no need to use vt_sys_clk_div */
+ if (ar0330->pdata->hw_bus == AR0330_HW_BUS_PARALLEL) {
+ vt_pix_clk_div = DIV_ROUND_UP(vco_clk, op_clk);
+ vt_sys_clk_div = 1;
+ op_clk = vco_clk / vt_pix_clk_div;
+ op_pix_clk_div = vt_pix_clk_div;
+ }
+ else if (ar0330->pdata->hw_bus == AR0330_HW_BUS_MIPI) {
+ /* assume 12 bit 2 lanes for now */
+ op_pix_clk_div = 12;
+ vt_pix_clk_div = 6;
+ vt_sys_clk_div = 2;
+ }
+ else if (ar0330->pdata->hw_bus == AR0330_HW_BUS_HISPI) {
+ v4l2_err(&ar0330->subdev, "HiSPi not supported yet\n");
+ return -EINVAL;
+ }
+ else {
+ v4l2_err(&ar0330->subdev, "Bad hw bus\n");
+ return -EINVAL;
+ }
+
+ ar0330->pll.pre_pll_clk_div = pre_pll_div;
+ ar0330->pll.pll_multiplier = pll_mult;
+ ar0330->pll.vt_sys_clk_div = vt_sys_clk_div;
+ ar0330->pll.vt_pix_clk_div = vt_pix_clk_div;
+ ar0330->pll.op_sys_clk_div = 1; //constant
+ ar0330->pll.op_pix_clk_div = op_pix_clk_div;
+ ar0330->pll.clk_pix = op_clk / 2;
+
+ v4l2_info(&ar0330->subdev, "ext_clk_freq_hz %u\n", ext_clk);
+ v4l2_info(&ar0330->subdev, "pre_pll_clk_div %u\n", pre_pll_div);
+ v4l2_info(&ar0330->subdev, "pll_multiplier %u\n", pll_mult);
+ v4l2_info(&ar0330->subdev, "vt_pix_clk_div %u\n", vt_pix_clk_div);
+ v4l2_info(&ar0330->subdev, "vt_sys_clk_div %u\n", vt_sys_clk_div);
+ v4l2_info(&ar0330->subdev, "op_clk %u\n", op_clk);
+ v4l2_info(&ar0330->subdev, "pix_clk %u\n", op_clk / 2);
+ v4l2_info(&ar0330->subdev, "op_pix_clk_div %u\n", op_pix_clk_div);
+ v4l2_info(&ar0330->subdev, "op_sys_clk_div %u\n", 1);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power handling
+ */
+
+struct ar0330_register {
+ u16 addr;
+ u16 value;
+ bool wide;
+}
+;
+struct ar0330_patch {
+ const struct ar0330_register *reg_data;
+ unsigned int reg_size;
+
+ u16 seq_addr;
+ const u16 *seq_data;
+ unsigned int seq_size;
+};
+
+static const u16 ar0330_sequencer_v1[] = {
+ 0x4540, 0x6134, 0x4a31, 0x4342, 0x4560, 0x2714, 0x3dff, 0x3dff,
+ 0x3dea, 0x2704, 0x3d10, 0x2705, 0x3d10, 0x2715, 0x3527, 0x053d,
+ 0x1045, 0x4027, 0x4027, 0x143d, 0xff3d, 0xff3d, 0xea62, 0x2728,
+ 0x3627, 0x083d, 0x6444, 0x2c2c, 0x2c2c, 0x4b01, 0x432d, 0x4643,
+ 0x1647, 0x435f, 0x4f50, 0x2604, 0x2684, 0x2027, 0xfc53, 0x0d5c,
+ 0x0d60, 0x5754, 0x1709, 0x5556, 0x4917, 0x145c, 0x0945, 0x0045,
+ 0x8026, 0xa627, 0xf917, 0x0227, 0xfa5c, 0x0b5f, 0x5307, 0x5302,
+ 0x4d28, 0x6c4c, 0x0928, 0x2c28, 0x294e, 0x1718, 0x26a2, 0x5c03,
+ 0x1744, 0x2809, 0x27f2, 0x1714, 0x2808, 0x164d, 0x1a26, 0x8317,
+ 0x0145, 0xa017, 0x0727, 0xf317, 0x2945, 0x8017, 0x0827, 0xf217,
+ 0x285d, 0x27fa, 0x170e, 0x2681, 0x5300, 0x17e6, 0x5302, 0x1710,
+ 0x2683, 0x2682, 0x4827, 0xf24d, 0x4e28, 0x094c, 0x0b17, 0x6d28,
+ 0x0817, 0x014d, 0x1a17, 0x0126, 0x035c, 0x0045, 0x4027, 0x9017,
+ 0x2a4a, 0x0a43, 0x160b, 0x4327, 0x9445, 0x6017, 0x0727, 0x9517,
+ 0x2545, 0x4017, 0x0827, 0x905d, 0x2808, 0x530d, 0x2645, 0x5c01,
+ 0x2798, 0x4b12, 0x4452, 0x5117, 0x0260, 0x184a, 0x0343, 0x1604,
+ 0x4316, 0x5843, 0x1659, 0x4316, 0x5a43, 0x165b, 0x4327, 0x9c45,
+ 0x6017, 0x0727, 0x9d17, 0x2545, 0x4017, 0x1027, 0x9817, 0x2022,
+ 0x4b12, 0x442c, 0x2c2c, 0x2c00,
+};
+
+static const struct ar0330_register ar0330_register_v1[] = {
+ { 0x30ba, 0x002c, 1 },
+ { 0x30fe, 0x0080, 1 },
+ { 0x31e0, 0x0003, 1 },
+ { 0x3ece, 0xff, 0 },
+ { 0x3ed0, 0xe4f6, 1 },
+ { 0x3ed2, 0x0146, 1 },
+ { 0x3ed4, 0x8f6c, 1 },
+ { 0x3ed6, 0x66cc, 1 },
+ { 0x3ed8, 0x8c42, 1 },
+ { 0x3eda, 0x88bc, 1 },
+ { 0x3edc, 0xaa63, 1 },
+ { 0x3ede, 0x22c0, 1 },
+ { 0x3ee0, 0x1500, 1 },
+ { 0x3ee6, 0x0080, 1 },
+ { 0x3ee8, 0x2027, 1 },
+ { 0x3eea, 0x001d, 1 },
+ { 0x3f06, 0x046a, 1 },
+ { 0x305e, 0x00a0, 1 },
+};
+
+static const u16 ar0330_sequencer_v2[] = {
+ 0x4a03, 0x4316, 0x0443, 0x1645, 0x4045, 0x6017, 0x2045, 0x404b,
+ 0x1244, 0x6134, 0x4a31, 0x4342, 0x4560, 0x2714, 0x3dff, 0x3dff,
+ 0x3dea, 0x2704, 0x3d10, 0x2705, 0x3d10, 0x2715, 0x3527, 0x053d,
+ 0x1045, 0x4027, 0x0427, 0x143d, 0xff3d, 0xff3d, 0xea62, 0x2728,
+ 0x3627, 0x083d, 0x6444, 0x2c2c, 0x2c2c, 0x4b01, 0x432d, 0x4643,
+ 0x1647, 0x435f, 0x4f50, 0x2604, 0x2684, 0x2027, 0xfc53, 0x0d5c,
+ 0x0d57, 0x5417, 0x0955, 0x5649, 0x5307, 0x5302, 0x4d28, 0x6c4c,
+ 0x0928, 0x2c28, 0x294e, 0x5c09, 0x6045, 0x0045, 0x8026, 0xa627,
+ 0xf817, 0x0227, 0xfa5c, 0x0b17, 0x1826, 0xa25c, 0x0317, 0x4427,
+ 0xf25f, 0x2809, 0x1714, 0x2808, 0x1701, 0x4d1a, 0x2683, 0x1701,
+ 0x27fa, 0x45a0, 0x1707, 0x27fb, 0x1729, 0x4580, 0x1708, 0x27fa,
+ 0x1728, 0x5d17, 0x0e26, 0x8153, 0x0117, 0xe653, 0x0217, 0x1026,
+ 0x8326, 0x8248, 0x4d4e, 0x2809, 0x4c0b, 0x6017, 0x2027, 0xf217,
+ 0x535f, 0x2808, 0x164d, 0x1a17, 0x0127, 0xfa26, 0x035c, 0x0145,
+ 0x4027, 0x9817, 0x2a4a, 0x0a43, 0x160b, 0x4327, 0x9c45, 0x6017,
+ 0x0727, 0x9d17, 0x2545, 0x4017, 0x0827, 0x985d, 0x2645, 0x4b17,
+ 0x0a28, 0x0853, 0x0d52, 0x5112, 0x4460, 0x184a, 0x0343, 0x1604,
+ 0x4316, 0x5843, 0x1659, 0x4316, 0x5a43, 0x165b, 0x4327, 0x9c45,
+ 0x6017, 0x0727, 0x9d17, 0x2545, 0x4017, 0x1027, 0x9817, 0x2022,
+ 0x4b12, 0x442c, 0x2c2c, 0x2c00,
+};
+
+static const struct ar0330_register ar0330_register_v2[] = {
+ { 0x30ba, 0x002c, 1 },
+ { 0x30fe, 0x0080, 1 },
+ { 0x31e0, 0x0003, 1 },
+ { 0x3ece, 0xff, 0 },
+ { 0x3ed0, 0xe4f6, 1 },
+ { 0x3ed2, 0x0146, 1 },
+ { 0x3ed4, 0x8f6c, 1 },
+ { 0x3ed6, 0x66cc, 1 },
+ { 0x3ed8, 0x8c42, 1 },
+ { 0x3eda, 0x88bc, 1 },
+ { 0x3edc, 0xaa63, 1 },
+ { 0x3ede, 0xaa04, 1 },
+ { 0x3ee0, 0x15f0, 1 },
+ { 0x3ee6, 0x008c, 1 },
+ { 0x3ee8, 0x2024, 1 },
+ { 0x3eea, 0xff1f, 1 },
+ { 0x3f06, 0x046a, 1 },
+ { 0x305e, 0x00a0, 1 }
+};
+
+static const u16 ar0330_sequencer_v3[] = {
+ 0x2045,
+};
+
+static const struct ar0330_register ar0330_register_v3[] = {
+ { 0x31e0, 0x0003, 1 },
+ { 0x3ed2, 0x0146, 1 },
+ { 0x3ed4, 0x8f6c, 1 },
+ { 0x3ed6, 0x66cc, 1 },
+ { 0x3ed8, 0x8c42, 1 },
+ { 0x3eda, 0x88bc, 1 },
+ { 0x3edc, 0xaa63, 1 },
+ { 0x305e, 0x00a0, 1 }
+};
+
+static const struct ar0330_register ar0330_register_v4[] = {
+ { 0x31e0, 0x0003, 1 },
+ { 0x3ed2, 0x0146, 1 },
+ { 0x3ed6, 0x66cc, 1 },
+ { 0x3ed8, 0x8c42, 1 },
+ { 0x3eda, 0x88bc, 1 },
+ { 0x3edc, 0xaa63, 1 },
+ { 0x305e, 0x00a0, 1 }
+};
+
+static const struct ar0330_register ar0330_register_v5[] = {
+ { 0x3ed2, 0x0146, 1 },
+ { 0x3eda, 0x88bc, 1 },
+ { 0x3edc, 0xaa63, 1 },
+ { 0x305e, 0x00a0, 1 }
+};
+
+struct ar0330_patch ar0330_patches[] = {
+ [0] = {
+ .reg_data = ar0330_register_v1,
+ .reg_size = ARRAY_SIZE(ar0330_register_v1),
+ .seq_addr = 0x8000,
+ .seq_data = ar0330_sequencer_v1,
+ .seq_size = ARRAY_SIZE(ar0330_sequencer_v1),
+ },
+ [1] = {
+ .reg_data = ar0330_register_v2,
+ .reg_size = ARRAY_SIZE(ar0330_register_v2),
+ .seq_addr = 0x8000,
+ .seq_data = ar0330_sequencer_v2,
+ .seq_size = ARRAY_SIZE(ar0330_sequencer_v2),
+ },
+ [2] = {
+ .reg_data = ar0330_register_v3,
+ .reg_size = ARRAY_SIZE(ar0330_register_v3),
+ .seq_addr = 0x800c,
+ .seq_data = ar0330_sequencer_v3,
+ .seq_size = ARRAY_SIZE(ar0330_sequencer_v3),
+ },
+ [3] = {
+ .reg_data = ar0330_register_v4,
+ .reg_size = ARRAY_SIZE(ar0330_register_v4),
+ .seq_addr = 0,
+ .seq_data = NULL,
+ .seq_size = 0,
+ },
+ [4] = {
+ .reg_data = ar0330_register_v5,
+ .reg_size = ARRAY_SIZE(ar0330_register_v5),
+ .seq_addr = 0,
+ .seq_data = NULL,
+ .seq_size = 0,
+ },
+ [5] = {
+ .reg_size = 0,
+ .seq_size = 0,
+ },
+};
+
+static int ar0330_otpm_patch(struct ar0330 *ar0330)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ const struct ar0330_patch *patch;
+ unsigned int i;
+ int ret;
+
+ v4l2_info(&ar0330->subdev, "ar0330_otpm_patch\n");
+
+ if (ar0330->version == 0)
+ return 0;
+
+ patch = &ar0330_patches[ar0330->version - 1];
+
+ for (i = 0; i < patch->reg_size; ++i) {
+ if (!patch->reg_data[i].wide)
+ ret = ar0330_write8(client, patch->reg_data[i].addr,
+ patch->reg_data[i].value);
+ else
+ ret = ar0330_write16(client, patch->reg_data[i].addr,
+ patch->reg_data[i].value);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ if(patch->seq_addr != 0) {
+ ret = ar0330_write16(client, AR0330_SEQ_CTRL_PORT,
+ patch->seq_addr);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < patch->seq_size; ++i) {
+ ret = ar0330_write16(client, AR0330_SEQ_DATA_PORT,
+ patch->seq_data[i]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ar0330_power_on(struct ar0330 *ar0330)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ int ret = 0;
+
+ v4l2_info(&ar0330->subdev, "ar0330_power_on\n");
+
+ /* Enable clock */
+ if (ar0330->pdata->clock)
+ clk_enable(ar0330->pdata->clock);
+
+ /* Assert reset for 1ms */
+ if (ar0330->pdata->reset) {
+ gpio_set_value(ar0330->pdata->reset, 0);
+ usleep_range(1000, 2000);
+ gpio_set_value(ar0330->pdata->reset, 1);
+ usleep_range(10000, 11000);
+ }
+ if (ar0330->pdata->set_power)
+ ar0330->pdata->set_power(AR0330_POWER_ON);
+
+ msleep(50);
+ ret = ar0330_set16(client, AR0330_RESET, AR0330_RESET_RESET,
+ AR0330_RESET_RESET);
+ if (ret < 0)
+ return ret;
+ msleep_interruptible(100);
+
+ if (ar0330->pdata->hw_bus == AR0330_HW_BUS_MIPI) {
+ /* Serial interface. *//* 2 Lanes MIPI : 0x0202, 4 Lanes : 0x204 */
+ ret = ar0330_write16(client, AR0330_SERIAL_FORMAT, 0x0202);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_set16(client, AR0330_RESET, ~AR0330_RESET_STREAM,
+ AR0330_RESET_STREAM);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_set16(client, AR0330_RESET,
+ AR0330_RESET_DRIVE_PINS |
+ ~AR0330_RESET_PARALLEL_EN |
+ ~AR0330_RESET_GPI_EN |
+ ~AR0330_RESET_SMIA_DIS,
+ AR0330_RESET_DRIVE_PINS |
+ AR0330_RESET_PARALLEL_EN |
+ AR0330_RESET_GPI_EN |
+ AR0330_RESET_SMIA_DIS);
+ if (ret < 0)
+ return ret;
+ } else if (ar0330->pdata->hw_bus == AR0330_HW_BUS_PARALLEL) {
+ /* Serial interface. */
+ ret = ar0330_write16(client, AR0330_SERIAL_FORMAT, 0x0301);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_set16(client, AR0330_RESET, ~AR0330_RESET_STREAM,
+ AR0330_RESET_STREAM);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_set16(client, AR0330_RESET,
+ AR0330_RESET_DRIVE_PINS |
+ AR0330_RESET_PARALLEL_EN |
+ ~AR0330_RESET_GPI_EN |
+ AR0330_RESET_SMIA_DIS,
+ AR0330_RESET_DRIVE_PINS |
+ AR0330_RESET_PARALLEL_EN |
+ AR0330_RESET_GPI_EN |
+ AR0330_RESET_SMIA_DIS);
+ if (ret < 0)
+ return ret;
+ } else if (ar0330->pdata->hw_bus == AR0330_HW_BUS_HISPI) {
+ v4l2_err(&ar0330->subdev, "HiSPi mode not supported yet\n");
+ return -EINVAL;
+ } else {
+ v4l2_err(&ar0330->subdev, "Unknown hw bus\n");
+ return -EINVAL;
+ }
+
+ ret = ar0330_set16(client, AR0330_SMIA_TEST, AR0330_EMBEDDED_DATA,
+ AR0330_EMBEDDED_DATA);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void ar0330_power_off(struct ar0330 *ar0330)
+{
+ /* Disable clock */
+ if (ar0330->pdata->clock)
+ clk_disable(ar0330->pdata->clock);
+}
+
+static int __ar0330_set_power(struct ar0330 *ar0330, bool on)
+{
+ int ret;
+
+ v4l2_info(&ar0330->subdev, "__ar0330_set_power\n");
+
+ if (!on) {
+ ar0330_power_off(ar0330);
+ return 0;
+ }
+
+ ret = ar0330_power_on(ar0330);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_otpm_patch(ar0330);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_ctrl_handler_setup(&ar0330->ctrls);
+}
+
+static int ar0330_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+ int ret = 0;
+
+ mutex_lock(&ar0330->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (ar0330->power_count == !on) {
+ ret = __ar0330_set_power(ar0330, !!on);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Update the power count. */
+ ar0330->power_count += on ? 1 : -1;
+ WARN_ON(ar0330->power_count < 0);
+
+out:
+ mutex_unlock(&ar0330->power_lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static int ar0330_configure_interface_mipi(struct ar0330 *ar0330)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ int ret = 0;
+
+ /* Serial interface. */
+ /* 2 Lanes MIPI : 0x0202, 4 Lanes : 0x204 */
+ ret = ar0330_write16(client, AR0330_FRAME_PREAMBLE, 36);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_LINE_PREAMBLE, 12);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_TIMING_0, 0x2643);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_TIMING_1, 0x114e);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_TIMING_2, 0x2048);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_TIMING_3, 0x0186);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_TIMING_4, 0x8005);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_MIPI_CONFIG_STATUS, 0x2003);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Compute VT timing and binning */
+static int ar0330_calc_vt(struct v4l2_subdev *sd)
+{
+ struct ar0330 *ar0330 = to_ar0330(sd);
+ struct v4l2_rect *vt = &ar0330->video_timing;
+ struct v4l2_rect *crop = &ar0330->crop;
+ struct v4l2_mbus_framefmt *fmt = &ar0330->format;
+ u32 adc_readout;
+
+ /* Row and column binning. */
+ ar0330->x_binning = DIV_ROUND_CLOSEST(crop->width,
+ fmt->width) * 2 - 1;
+ ar0330->y_binning = DIV_ROUND_CLOSEST(crop->height,
+ fmt->height) * 2 - 1;
+
+ /* The line length can be limited by 3 factors :
+ * -Adc readout limitation :
+ * The datasheet version D indicates 1204 for
+ * ADC high-speed = 0 and 1116 else.
+ * ADC high-speed seems to correspond to register
+ * digital_ctrl which enables dithering after digital gain
+ * Sequencer B has a default value of 1242 but nothing
+ * is said about what it is when enabling ADC high speed
+ * - Digital readout limitation
+ * 1/3*(x_addr_end-x_addr_start)/((x_odd_inc +1)*0.5)
+ * - Output interface limitation
+ * 1/2*(x_add_end-x_addr_start)/((x_odd_inc+1)*0.5) + 96
+ *
+ * it is obvious that Digital readout limitation is always inferior to
+ * the output interface
+ */
+ if (ar0330->version < 2)
+ adc_readout = 1204;
+ else
+ adc_readout = 1242;
+
+ /* for config examples to fit in, width should replace
+ * x_addr_end-x_addr_start which is an odd number */
+ vt->width = max(adc_readout,
+ crop->width / (ar0330->x_binning + 1) + 96);
+ vt->height = crop->height /
+ ((ar0330->y_binning + 1) / 2) + 12;
+
+ return 0;
+}
+
+static int ar0330_set_params(struct ar0330 *ar0330)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ const struct v4l2_rect *crop = &ar0330->crop;
+ struct v4l2_rect *vt = &ar0330->video_timing;
+ int ret;
+
+ if (ar0330->pdata->hw_bus == AR0330_HW_BUS_MIPI) {
+ ret = ar0330_configure_interface_mipi(ar0330);
+ if (ret < 0)
+ return ret;
+ } else if (ar0330->pdata->hw_bus == AR0330_HW_BUS_HISPI) {
+ v4l2_err(&ar0330->subdev, "HiSPi not supported yet\n");
+ return -EINVAL;
+ }
+
+ /* In parallel mode, there is no need to configure timings */
+
+ /* Windows position and size.
+ *
+ * TODO: Make sure the start coordinates and window size match the
+ * skipping and mirroring requirements.
+ */
+
+ ret = ar0330_write16(client, AR0330_X_ADDR_START,
+ crop->left);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_Y_ADDR_START,
+ crop->top);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_X_ADDR_END,
+ crop->left +
+ crop->width - 1);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_Y_ADDR_END,
+ crop->top +
+ crop->height - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_X_ODD_INC,
+ ar0330->x_binning);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_Y_ODD_INC,
+ ar0330->y_binning);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_LINE_LENGTH_PCK,
+ vt->width);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_FRAME_LENGTH_LINES,
+ vt->height);
+ if (ret < 0)
+ return ret;
+
+ ar0330_set16(client, AR0330_DIGITAL_CTRL, ~AR0330_DITHER_ENABLE,
+ AR0330_DITHER_ENABLE);
+
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int ar0330_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+ int ret;
+
+ v4l2_info(&ar0330->subdev, "%s: frame count is %d\n", __func__,
+ ar0330_read16(client, AR0330_FRAME_COUNT));
+ if (!enable) {
+
+ ar0330->streaming = 0;
+ ar0330_write8(client, AR0330_MODE_SELECT, 0);
+ return ar0330_set_power(subdev, 0);
+ }
+
+ ar0330_calc_vt(subdev);
+
+ ret = ar0330_set_power(subdev, 1);
+ if (ret) {
+ v4l2_err(&ar0330->subdev, "Power on failed\n");
+ return ret;
+ }
+
+ ret = ar0330_pll_configure(ar0330);
+ if (ret < 0)
+ goto power_off;
+
+ ret = ar0330_set_params(ar0330);
+ if (ret < 0)
+ goto power_off;
+
+ /* Stream on */
+ ar0330->streaming = 1;
+ ar0330_write8(client, AR0330_MODE_SELECT,
+ AR0330_MODE_SELECT_STREAM);
+
+ ret = v4l2_ctrl_handler_setup(&ar0330->ctrls);
+ if (ret) {
+ v4l2_err(&ar0330->subdev, "v4l2_ctrl_handler_setup failure\n");
+ goto power_off;
+ }
+
+ return 0;
+
+power_off:
+ ar0330_set_power(subdev, 0);
+
+ return ret;
+}
+
+static int ar0330_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ar0330 *ar0330 = to_ar0330(sd);
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval = ar0330->frame_interval;
+
+ return 0;
+}
+
+static int ar0330_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ar0330 *ar0330 = to_ar0330(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *vt = &ar0330->video_timing;
+ struct v4l2_rect *c = &ar0330->crop;
+ struct v4l2_fract *cur_fi = &ar0330->frame_interval;
+ u32 min_vt_height;
+
+ *cur_fi = fi->interval;
+
+ if (!ar0330->streaming)
+ return 0;
+
+ /* We are already streaming, so we try to adjust the vertical blanking
+ * in order to match the frame rate.
+ */
+ vt->height = div_u64((u64) ar0330->pll.clk_pix * cur_fi->numerator,
+ vt->width * cur_fi->denominator);
+
+ /* In case min_vt_frame_blanking is not met, we adjust the frame rate */
+ min_vt_height = c->height / ((ar0330->y_binning + 1) / 2) + 12;
+
+ if (vt->height < min_vt_height) {
+ vt->height = min_vt_height;
+ /* Refresh FPS */
+ cur_fi->denominator = ar0330->pll.clk_pix;
+ cur_fi->numerator = vt->width * vt->height;
+ }
+
+ ar0330_write16(client, AR0330_FRAME_LENGTH_LINES,
+ vt->height);
+
+ return 0;
+}
+
+static int ar0330_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+
+ if (code->pad || code->index)
+ return -EINVAL;
+
+ code->code = ar0330->format.code;
+ return 0;
+}
+
+static int ar0330_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+
+ if (fse->index >= 3 || fse->code != ar0330->format.code)
+ return -EINVAL;
+
+ fse->min_width = AR0330_WINDOW_WIDTH_DEF / (fse->index + 1);
+ fse->max_width = fse->min_width;
+ fse->min_height = AR0330_WINDOW_HEIGHT_DEF / (fse->index + 1);
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__ar0330_get_pad_format(struct ar0330 *ar0330, struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ar0330->format;
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *
+__ar0330_get_pad_crop(struct ar0330 *ar0330, struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ar0330->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int ar0330_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+
+ fmt->format = *__ar0330_get_pad_format(ar0330, fh, fmt->pad,
+ fmt->which);
+ return 0;
+}
+
+static int ar0330_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ unsigned int width;
+ unsigned int height;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ __crop = __ar0330_get_pad_crop(ar0330, fh, format->pad,
+ format->which);
+
+ /* Clamp the width and height to avoid dividing by zero. */
+ width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ max(__crop->width / 3, AR0330_WINDOW_WIDTH_MIN),
+ __crop->width);
+ height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
+ max(__crop->height / 3, AR0330_WINDOW_HEIGHT_MIN),
+ __crop->height);
+
+ hratio = DIV_ROUND_CLOSEST(__crop->width, width);
+ vratio = DIV_ROUND_CLOSEST(__crop->height, height);
+
+ __format = __ar0330_get_pad_format(ar0330, fh, format->pad,
+ format->which);
+ __format->width = __crop->width / hratio;
+ __format->height = __crop->height / vratio;
+
+ format->format = *__format;
+
+ return 0;
+}
+
+static int ar0330_get_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+
+ crop->rect = *__ar0330_get_pad_crop(ar0330, fh, crop->pad,
+ crop->which);
+ return 0;
+}
+
+static int ar0330_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_crop *crop)
+{
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect rect;
+ int ret;
+
+ /* Clamp the crop rectangle boundaries and align them to a multiple of 2
+ * pixels to ensure a GRBG Bayer pattern.
+ */
+ rect.left = clamp(ALIGN(crop->rect.left, 2),
+ AR0330_X_ADDR_START_MIN,
+ AR0330_X_ADDR_END_MAX);
+ rect.top = clamp(ALIGN(crop->rect.top, 2),
+ AR0330_Y_ADDR_START_MIN,
+ AR0330_Y_ADDR_END_MAX);
+ rect.width = clamp(ALIGN(crop->rect.width, 2),
+ AR0330_WINDOW_WIDTH_MIN,
+ AR0330_WINDOW_WIDTH_MAX);
+ rect.height = clamp(ALIGN(crop->rect.height, 2),
+ AR0330_WINDOW_HEIGHT_MIN,
+ AR0330_WINDOW_HEIGHT_MAX);
+
+ rect.width = min(rect.width, AR0330_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min(rect.height, AR0330_PIXEL_ARRAY_HEIGHT - rect.top);
+
+ __crop = __ar0330_get_pad_crop(ar0330, fh, crop->pad, crop->which);
+
+ __format = __ar0330_get_pad_format(ar0330, fh, crop->pad,
+ crop->which);
+
+ if (rect.width != __crop->width || rect.height != __crop->height) {
+ /* Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ __format->width = rect.width;
+ __format->height = rect.height;
+ }
+
+ *__crop = rect;
+ crop->rect = rect;
+
+ if (!ar0330->streaming)
+ return 0;
+
+ ar0330->x_binning = DIV_ROUND_CLOSEST(__crop->width,
+ __format->width) * 2 - 1;
+ ar0330->y_binning = DIV_ROUND_CLOSEST(__crop->height,
+ __format->height) * 2 - 1;
+
+ ret = ar0330_write16(client, AR0330_X_ADDR_START,
+ __crop->left);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_Y_ADDR_START,
+ __crop->top);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_X_ADDR_END,
+ __crop->left +
+ __crop->width - 1);
+ if (ret < 0)
+ return ret;
+
+ ret = ar0330_write16(client, AR0330_Y_ADDR_END,
+ __crop->top +
+ __crop->height - 1);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_X_ODD_INC,
+ ar0330->x_binning);
+ if (ret < 0)
+ return ret;
+
+ return ar0330_write16(client, AR0330_Y_ODD_INC,
+ ar0330->y_binning);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+#define V4L2_CID_GAIN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+#define V4L2_CID_GAIN_GREEN1 (V4L2_CTRL_CLASS_CAMERA | 0x1002)
+#define V4L2_CID_GAIN_GREEN2 (V4L2_CTRL_CLASS_CAMERA | 0x1003)
+#define V4L2_CID_GAIN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1004)
+#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
+
+static int ar0330_analog_gain_value(struct i2c_client *client, u16 reg, s32 gain_val)
+{
+ if (100 <= gain_val && gain_val < 103)
+ return ar0330_write16(client, 0x3060, 0x0000);
+ else if (103 <= gain_val && gain_val < 107)
+ return ar0330_write16(client, 0x3060, 0x0001);
+ else if (107 <= gain_val && gain_val < 110)
+ return ar0330_write16(client, 0x3060, 0x0002);
+ else if (110 <= gain_val && gain_val < 114)
+ return ar0330_write16(client, 0x3060, 0x0003);
+ else if (114 <= gain_val && gain_val < 119)
+ return ar0330_write16(client, 0x3060, 0x0004);
+ else if (119 <= gain_val && gain_val < 123)
+ return ar0330_write16(client, 0x3060, 0x0005);
+ else if (123 <= gain_val && gain_val < 128)
+ return ar0330_write16(client, 0x3060, 0x0006);
+ else if (128 <= gain_val && gain_val < 133)
+ return ar0330_write16(client, 0x3060, 0x0007);
+ else if (133 <= gain_val && gain_val < 139)
+ return ar0330_write16(client, 0x3060, 0x0008);
+ else if (139 <= gain_val && gain_val < 145)
+ return ar0330_write16(client, 0x3060, 0x0009);
+ else if (145 <= gain_val && gain_val < 152)
+ return ar0330_write16(client, 0x3060, 0x000a);
+ else if (152 <= gain_val && gain_val < 160)
+ return ar0330_write16(client, 0x3060, 0x000b);
+ else if (160 <= gain_val && gain_val < 168)
+ return ar0330_write16(client, 0x3060, 0x000c);
+ else if (168 <= gain_val && gain_val < 178)
+ return ar0330_write16(client, 0x3060, 0x000d);
+ else if (178 <= gain_val && gain_val < 188)
+ return ar0330_write16(client, 0x3060, 0x000e);
+ else if (188 <= gain_val && gain_val < 200)
+ return ar0330_write16(client, 0x3060, 0x000f);
+ else if (200 <= gain_val && gain_val < 213)
+ return ar0330_write16(client, 0x3060, 0x0010);
+ else if (213 <= gain_val && gain_val < 229)
+ return ar0330_write16(client, 0x3060, 0x0012);
+ else if (229 <= gain_val && gain_val < 246)
+ return ar0330_write16(client, 0x3060, 0x0014);
+ else if (246 <= gain_val && gain_val < 267)
+ return ar0330_write16(client, 0x3060, 0x0016);
+ else if (267 <= gain_val && gain_val < 291)
+ return ar0330_write16(client, 0x3060, 0x0018);
+ else if (291 <= gain_val && gain_val < 320)
+ return ar0330_write16(client, 0x3060, 0x001a);
+ else if (320 <= gain_val && gain_val < 356)
+ return ar0330_write16(client, 0x3060, 0x001c);
+ else if (356 <= gain_val && gain_val < 400)
+ return ar0330_write16(client, 0x3060, 0x001e);
+ else if (400 <= gain_val && gain_val < 457)
+ return ar0330_write16(client, 0x3060, 0x0020);
+ else if (457 <= gain_val && gain_val < 533)
+ return ar0330_write16(client, 0x3060, 0x0024);
+ else if (533 <= gain_val && gain_val < 640)
+ return ar0330_write16(client, 0x3060, 0x0028);
+ else if (640 <= gain_val && gain_val < 800)
+ return ar0330_write16(client, 0x3060, 0x002c);
+ else if (800 >= gain_val )
+ return ar0330_write16(client, 0x3060, 0x0030);
+
+ return -EINVAL;
+
+}
+
+static int ar0330_digital_gain_value(struct i2c_client *client, u16 reg, s32 gain)
+{
+ /* From datasheet AR0330 page 36
+ * The format of each digital gain register
+ * is “xxxx.yyyyyyy” */
+ int xxxx, yyyyyyy;
+
+ xxxx = gain / 1000;
+ yyyyyyy = gain % 1000;
+ yyyyyyy = (yyyyyyy * 128 + 500) / 1000;
+
+ return ar0330_write16(client, reg, (xxxx << 7) | yyyyyyy);
+}
+
+static int ar0330_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ static const u16 gains[4] = {
+ AR0330_RED_GAIN, AR0330_GREEN1_GAIN,
+ AR0330_GREEN2_GAIN, AR0330_BLUE_GAIN
+ };
+ static const u16 test_pattern[] = { 0, 1, 2, 3, 256, };
+ struct ar0330 *ar0330 =
+ container_of(ctrl->handler, struct ar0330, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&ar0330->subdev);
+ struct v4l2_rect *vt = &ar0330->video_timing;
+ u32 *exposure_us = &ar0330->exposure->val;
+ u32 line_duration_ns, exposure_max_us;
+ u16 coarse;
+ int ret, i;
+
+ /* If not streaming, just keep interval structures up-to-date */
+ if (!ar0330->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ /* Exposure is expressed in us */
+ exposure_max_us = div_u64((u64) vt->width * vt->height *
+ 1000000,
+ ar0330->pll.clk_pix);
+
+ if (*exposure_us > exposure_max_us) {
+ v4l2_warn(&ar0330->subdev,
+ "requested exposure (%d) is higher than exposure max (%d)\n",
+ *exposure_us, exposure_max_us);
+
+ *exposure_us = exposure_max_us;
+ }
+
+ line_duration_ns = div_u64((u64) vt->width * 1000000000,
+ ar0330->pll.clk_pix);
+
+ coarse = (*exposure_us * 1000) / line_duration_ns;
+
+ return ar0330_write16(client, AR0330_COARSE_INTEGRATION_TIME,
+ coarse);
+
+ case V4L2_CID_GAIN_RED:
+ case V4L2_CID_GAIN_GREEN1:
+ case V4L2_CID_GAIN_GREEN2:
+ case V4L2_CID_GAIN_BLUE:
+
+ /* Update the gain controls. */
+ for (i = 0; i < 4; ++i) {
+ struct v4l2_ctrl *gain = ar0330->gains[i];
+
+ if (gain->val == gain->cur.val)
+ continue;
+
+ ret = ar0330_digital_gain_value(client,
+ gains[i],
+ gain->val);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ return 0;
+
+ case V4L2_CID_GAIN:
+ return ar0330_digital_gain_value(client,
+ AR0330_GLOBAL_GAIN,
+ ctrl->val);
+
+ case V4L2_CID_ANALOGUE_GAIN:
+ return ar0330_analog_gain_value(client,
+ AR0330_ANALOG_GAIN,
+ ctrl->val);
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP: {
+ u16 clr = 0;
+ u16 set = 0;
+
+ ret = ar0330_write16(client, AR0330_LOCK_CONTROL,
+ AR0330_LOCK_CONTROL_UNLOCK);
+ if (ret < 0)
+ return ret;
+
+ if (ar0330->flip[0]->val)
+ set |= AR0330_READ_MODE_HORIZ_MIRROR;
+ else
+ clr |= AR0330_READ_MODE_HORIZ_MIRROR;
+ if (ar0330->flip[1]->val)
+ set |= AR0330_READ_MODE_VERT_FLIP;
+ else
+ clr |= AR0330_READ_MODE_VERT_FLIP;
+
+ ret = ar0330_set_read_mode(ar0330, clr, set);
+
+ ar0330_write16(client, AR0330_LOCK_CONTROL, 0);
+ return ret;
+ }
+
+ case V4L2_CID_TEST_PATTERN:
+ if (ctrl->val == 1) {
+ ret = ar0330_write16(client, AR0330_TEST_DATA_RED,
+ 0x0eb0);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_TEST_DATA_GREENR,
+ 0x0690);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_TEST_DATA_BLUE,
+ 0x00b0);
+ if (ret < 0)
+ return ret;
+ ret = ar0330_write16(client, AR0330_TEST_DATA_GREENB,
+ 0x0690);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ar0330_write16(client, AR0330_TEST_PATTERN_MODE,
+ test_pattern[ctrl->val]);
+ }
+
+ return 0;
+}
+
+static struct v4l2_ctrl_ops ar0330_ctrl_ops = {
+ .s_ctrl = ar0330_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config ar0330_gains[] = {
+ {
+ .ops = &ar0330_ctrl_ops,
+ .id = V4L2_CID_GAIN_RED,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Red x1000",
+ .min = AR0330_GLOBAL_GAIN_MIN,
+ .max = AR0330_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = AR0330_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &ar0330_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN1,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (R) x1000",
+ .min = AR0330_GLOBAL_GAIN_MIN,
+ .max = AR0330_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = AR0330_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &ar0330_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN2,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (B) x1000",
+ .min = AR0330_GLOBAL_GAIN_MIN,
+ .max = AR0330_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = AR0330_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &ar0330_ctrl_ops,
+ .id = V4L2_CID_GAIN_BLUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Blue x1000",
+ .min = AR0330_GLOBAL_GAIN_MIN,
+ .max = AR0330_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = AR0330_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ },
+};
+
+static const char * const ar0330_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "Full Color Bar",
+ "Fade-to-gray Color Bar",
+ "Walking 1s",
+};
+
+static const struct v4l2_ctrl_config ar0330_ctrls[] = {
+ {
+ .ops = &ar0330_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern",
+ .min = 0,
+ .max = ARRAY_SIZE(ar0330_test_pattern_menu) - 1,
+ .step = 0,
+ .def = 0,
+ .flags = 0,
+ .menu_skip_mask = 0,
+ .qmenu = ar0330_test_pattern_menu,
+ }
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev internal operations
+ */
+
+static int ar0330_registered(struct v4l2_subdev *subdev)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+ s32 reserved_chiprev;
+ s32 data;
+ u32 revision[2];
+ int ret;
+
+ ret = ar0330_power_on(ar0330);
+ if (ret < 0) {
+ dev_err(&client->dev, "AR0330 power up failed\n");
+ return ret;
+ }
+
+ /* Read out the chip version register */
+ data = ar0330_read16(client, AR0330_CHIP_VERSION);
+ if (data != AR0330_CHIP_VERSION_VALUE) {
+ dev_err(&client->dev, "AR0330 not detected, wrong version "
+ "0x%04x\n", data);
+ return -ENODEV;
+ }
+
+ reserved_chiprev = ar0330_read8(client, AR0330_RESERVED_CHIPREV);
+ if (reserved_chiprev < 0) {
+ dev_err(&client->dev, "%s: unable to read chip revision (%d)\n",
+ __func__, reserved_chiprev);
+ return -ENODEV;
+ }
+
+ data = ar0330_read16(client, AR0330_TEST_DATA_RED);
+ if (data < 0) {
+ dev_err(&client->dev, "%s: unable to read chip version (%d)\n",
+ __func__, data);
+ return -ENODEV;
+ }
+
+ if (reserved_chiprev == 0x1200) {
+ revision[0] = 1;
+ revision[1] = 0;
+ ar0330->version = 1;
+ }
+ else if (data == 0x0000) {
+ revision[0] = 2;
+ revision[1] = 0;
+ ar0330->version = 2;
+ }
+ else if (data == 0x0006) {
+ revision[0] = 2;
+ revision[1] = 1;
+ ar0330->version = 3;
+ }
+ else if (data == 0x0007) {
+ revision[0] = 2;
+ revision[1] = 1;
+ ar0330->version = 4;
+ }
+ else if (data == 0x0008) {
+ revision[0] = 2;
+ revision[1] = 1;
+ ar0330->version = 5;
+ }
+ else {
+ revision[0] = 0;
+ revision[1] = 0;
+ ar0330->version = 0;
+ }
+
+ ar0330_power_off(ar0330);
+
+ dev_info(&client->dev, "AR0330 rev. %02x ver. %u detected at address "
+ "0x%02x\n", revision[0], revision[1], client->addr);
+
+ return ret;
+}
+
+static struct v4l2_subdev_video_ops ar0330_subdev_video_ops = {
+ .s_stream = ar0330_s_stream,
+ .g_frame_interval = ar0330_g_frame_interval,
+ .s_frame_interval = ar0330_s_frame_interval,
+};
+
+static struct v4l2_subdev_pad_ops ar0330_subdev_pad_ops = {
+ .enum_mbus_code = ar0330_enum_mbus_code,
+ .enum_frame_size = ar0330_enum_frame_size,
+ .get_fmt = ar0330_get_format,
+ .set_fmt = ar0330_set_format,
+ .get_crop = ar0330_get_crop,
+ .set_crop = ar0330_set_crop,
+};
+
+static struct v4l2_subdev_ops ar0330_subdev_ops = {
+ .video = &ar0330_subdev_video_ops,
+ .pad = &ar0330_subdev_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ar0330_subdev_internal_ops = {
+ .registered = ar0330_registered,
+};
+
+/* -----------------------------------------------------------------------------
+ * Driver initialization and probing
+ */
+
+static int ar0330_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ar0330_platform_data *pdata = client->dev.platform_data;
+ struct ar0330 *ar0330;
+ unsigned int i;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ ar0330 = kzalloc(sizeof(*ar0330), GFP_KERNEL);
+ if (ar0330 == NULL)
+ return -ENOMEM;
+
+ ar0330->pdata = pdata;
+ ar0330->read_mode = 0;
+
+ v4l2_ctrl_handler_init(&ar0330->ctrls,
+ ARRAY_SIZE(ar0330_gains) +
+ ARRAY_SIZE(ar0330_ctrls) +
+ 5);
+
+ /* Exposure in us */
+ ar0330->exposure = v4l2_ctrl_new_std(&ar0330->ctrls,
+ &ar0330_ctrl_ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 0, 65535, 1, 30000);
+
+ v4l2_ctrl_new_std(&ar0330->ctrls, &ar0330_ctrl_ops,
+ V4L2_CID_GAIN, AR0330_GLOBAL_GAIN_MIN,
+ AR0330_GLOBAL_GAIN_MAX, 1, AR0330_GLOBAL_GAIN_DEF);
+ ar0330->flip[0] = v4l2_ctrl_new_std(&ar0330->ctrls, &ar0330_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ar0330->flip[1] = v4l2_ctrl_new_std(&ar0330->ctrls, &ar0330_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(ar0330_gains); ++i)
+ ar0330->gains[i] = v4l2_ctrl_new_custom(&ar0330->ctrls,
+ &ar0330_gains[i], NULL);
+
+ v4l2_ctrl_new_std(&ar0330->ctrls,
+ &ar0330_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ 100, 800, 1, 100);
+
+ for (i = 0; i < ARRAY_SIZE(ar0330_ctrls); ++i)
+ v4l2_ctrl_new_custom(&ar0330->ctrls, &ar0330_ctrls[i], NULL);
+
+ v4l2_ctrl_cluster(ARRAY_SIZE(ar0330->flip), ar0330->flip);
+
+ if (ar0330->ctrls.error)
+ v4l2_err(client, "%s: control initialization error %d\n",
+ __func__, ar0330->ctrls.error);
+
+ ar0330->subdev.ctrl_handler = &ar0330->ctrls;
+
+ mutex_init(&ar0330->power_lock);
+ v4l2_i2c_subdev_init(&ar0330->subdev, client, &ar0330_subdev_ops);
+ ar0330->subdev.internal_ops = &ar0330_subdev_internal_ops;
+
+ ar0330->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&ar0330->subdev.entity, 1, &ar0330->pad, 0);
+ if (ret < 0)
+ goto done;
+
+ ar0330->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ar0330->crop.width = AR0330_WINDOW_WIDTH_DEF;
+ ar0330->crop.height = AR0330_WINDOW_HEIGHT_DEF;
+ ar0330->crop.left = (AR0330_WINDOW_WIDTH_MAX - AR0330_WINDOW_WIDTH_DEF)
+ / 2;
+ ar0330->crop.top = (AR0330_WINDOW_HEIGHT_MAX - AR0330_WINDOW_HEIGHT_DEF)
+ / 2;
+
+ ar0330->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ ar0330->format.width = AR0330_WINDOW_WIDTH_DEF;
+ ar0330->format.height = AR0330_WINDOW_HEIGHT_DEF;
+ ar0330->format.field = V4L2_FIELD_NONE;
+
+ /* 30 FPS */
+ ar0330->frame_interval.numerator = 1;
+ ar0330->frame_interval.denominator = 30;
+
+ ar0330_calc_vt(&ar0330->subdev);
+
+ ret = ar0330_pll_init(ar0330);
+
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&ar0330->ctrls);
+ media_entity_cleanup(&ar0330->subdev.entity);
+ kfree(ar0330);
+ }
+
+ return ret;
+}
+
+static int ar0330_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ar0330 *ar0330 = to_ar0330(subdev);
+
+ v4l2_ctrl_handler_free(&ar0330->ctrls);
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ kfree(ar0330);
+
+ return 0;
+}
+
+static const struct i2c_device_id ar0330_id[] = {
+ { "ar0330", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ar0330_id);
+
+static struct i2c_driver ar0330_i2c_driver = {
+ .driver = {
+ .name = "ar0330",
+ },
+ .probe = ar0330_probe,
+ .remove = ar0330_remove,
+ .id_table = ar0330_id,
+};
+
+static int __init ar0330_mod_init(void)
+{
+ return i2c_add_driver(&ar0330_i2c_driver);
+}
+
+static void __exit ar0330_mod_exit(void)
+{
+ i2c_del_driver(&ar0330_i2c_driver);
+}
+
+module_init(ar0330_mod_init);
+module_exit(ar0330_mod_exit);
+
+MODULE_DESCRIPTION("Aptina AR0330 Camera driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_AUTHOR("Julien Beraud <julien.beraud@parrot.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ar1820.c b/drivers/media/video/ar1820.c
new file mode 100644
index 00000000000000..66f2728c08acb5
--- /dev/null
+++ b/drivers/media/video/ar1820.c
@@ -0,0 +1,409 @@
+/*
+ * ar1820 - Aptina CMOS Digital Image Sensor
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Tue Aug 26 15:55:49 CEST 2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+
+#include <media/media-entity.h>
+#include <media/ar1820.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "aptina-pll.h"
+
+MODULE_AUTHOR("Eng-Hong SRON <eng-hong.sron@parrot.com>");
+MODULE_DESCRIPTION("Aptina AR1820 driver");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME "ar1820"
+
+#define AR1820_CHIP_VERSION 0x0000
+#define AR1820_CHIP_VERSION_VALUE 0x2E01
+
+struct ar1820 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct ar1820_platform_data *pdata;
+};
+
+static inline struct ar1820 *to_ar1820(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ar1820, sd);
+}
+
+static int ar1820_read16(struct v4l2_subdev *sd, u16 reg, u16 *val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = (u8 *)val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = swab16(*val);
+
+ return 0;
+}
+
+static int ar1820_write8(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_msg msg;
+
+ struct {
+ u16 reg;
+ u8 val;
+ } __packed buf;
+
+ reg = swab16(reg);
+
+ buf.reg = reg;
+ buf.val = val;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ar1820_write16(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ u16 buf[] = {
+ swab16(reg),
+ swab16(val),
+ };
+
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 4,
+ .buf = (u8 *)&buf,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ar1820_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ar1820 *ar1820 = to_ar1820(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ fmt->format = ar1820->format;
+
+ return 0;
+}
+
+static int ar1820_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ar1820 *ar1820 = to_ar1820(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ ar1820->format = *mf;
+
+ return 0;
+}
+
+static int ar1820_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ar1820 *ar1820 = to_ar1820(sd);
+
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = ar1820->format.code;
+
+ return 0;
+}
+
+static int ar1820_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ar1820 *ar1820 = to_ar1820(sd);
+ struct ar1820_platform_data *pdata = ar1820->pdata;
+ int ret = 0;
+
+ if (enable == 0) {
+ if (pdata->set_power)
+ pdata->set_power(0);
+
+ return 0;
+ }
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ /* TODO: Need to clean this up.
+ * Sequence taken for dragon-prog... */
+ ar1820_write8(sd, 0x0103, 0x01);
+ msleep(1);
+
+ ar1820_write16(sd, 0x301a, 0x0118);
+ ar1820_write8(sd, 0x0100, 0x00);
+ ar1820_write16(sd, 0x31ae, 0x0202);
+ ar1820_write16(sd, 0x0112, 0x0a0a);
+ ar1820_write16(sd, 0x0300, 0x0005);
+ ar1820_write16(sd, 0x0302, 0x0001);
+ ar1820_write16(sd, 0x0304, 0x0006);
+ ar1820_write16(sd, 0x0306, 0x007d);
+ ar1820_write16(sd, 0x0308, 0x000a);
+ ar1820_write16(sd, 0x030a, 0x0002);
+ ar1820_write16(sd, 0x3064, 0x0945);
+ ar1820_write16(sd, 0x3016, 0x0111);
+ ar1820_write16(sd, 0x3064, 0x0045);
+ ar1820_write16(sd, 0x034c, 0x0500);
+ ar1820_write16(sd, 0x034e, 0x02d0);
+ ar1820_write16(sd, 0x0344, 0x0000);
+ ar1820_write16(sd, 0x0348, 0x04ff);
+ ar1820_write16(sd, 0x0346, 0x0000);
+ ar1820_write16(sd, 0x034a, 0x02cf);
+ msleep(1);
+
+ ar1820_write16(sd, 0x0342, 0x3d20);
+ ar1820_write16(sd, 0x0340, 0x0362);
+ ar1820_write16(sd, 0x0202, 0x017f);
+ ar1820_write16(sd, 0x3014, 0x1aa0);
+ ar1820_write16(sd, 0x3056, 0x1440);
+ ar1820_write16(sd, 0x3058, 0x1440);
+ ar1820_write16(sd, 0x305a, 0x1440);
+ ar1820_write16(sd, 0x305c, 0x1440);
+ ar1820_write16(sd, 0x3018, 0x0000);
+ ar1820_write16(sd, 0x301a, 0x0118);
+ ar1820_write16(sd, 0x3edc, 0x68cf);
+ ar1820_write16(sd, 0x3ee2, 0xe363);
+ msleep(1);
+
+ ar1820_write8(sd, 0x0104, 0x01);
+ ar1820_write16(sd, 0x0342, 0x3d20);
+ ar1820_write16(sd, 0x0340, 0x0362);
+ ar1820_write16(sd, 0x0202, 0x017f);
+ ar1820_write16(sd, 0x3014, 0x1aa0);
+ ar1820_write8(sd, 0x0104, 0x00);
+ ar1820_write16(sd, 0x0342, 0x3d20);
+ ar1820_write16(sd, 0x0340, 0x0362);
+ ar1820_write8(sd, 0x0100, 0x01);
+ msleep(1);
+
+ return 0;
+}
+
+static int ar1820_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ bt->pixelclock = 40000000;
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ar1820_video_ops = {
+ .s_stream = ar1820_s_stream,
+ .g_dv_timings = ar1820_g_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops ar1820_pad_ops = {
+ .get_fmt = ar1820_get_fmt,
+ .set_fmt = ar1820_set_fmt,
+ .enum_mbus_code = ar1820_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops ar1820_ops = {
+ .core = NULL,
+ .video = &ar1820_video_ops,
+ .pad = &ar1820_pad_ops,
+};
+
+static int ar1820_detect_chip(struct v4l2_subdev *sd)
+{
+ struct ar1820 *ar1820 = to_ar1820(sd);
+ struct ar1820_platform_data *pdata = ar1820->pdata;
+ int ret = 0;
+ u16 id = 0;
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ ar1820_read16(sd, AR1820_CHIP_VERSION, &id);
+
+ if (pdata->set_power)
+ pdata->set_power(0);
+
+ if (id != AR1820_CHIP_VERSION_VALUE) {
+ v4l2_err(sd, "Error Chip ID = 0x%04x instead of 0x%04x\n",
+ id, AR1820_CHIP_VERSION_VALUE);
+ return -ENODEV;
+ }
+
+ v4l2_info(sd, "Found " DRIVER_NAME " chip\n");
+
+ return 0;
+}
+
+static int ar1820_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ar1820 *ar1820;
+ struct ar1820_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c not available\n");
+ return -ENODEV;
+ }
+
+ ar1820 = kzalloc(sizeof(*ar1820), GFP_KERNEL);
+ if (!ar1820) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ ar1820->pdata = pdata;
+
+ sd = &ar1820->sd;
+ v4l2_i2c_subdev_init(sd, client, &ar1820_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ar1820->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &ar1820->pad, 0);
+
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto error_media_entity_init;
+ }
+
+ /* Set default configuration: 720p60 */
+ ar1820->format.width = 1280;
+ ar1820->format.height = 720;
+ ar1820->format.code = V4L2_MBUS_FMT_SBGGR10_1X10;
+
+ /* Check if the chip is present */
+ ret = ar1820_detect_chip(sd);
+ if (ret < 0)
+ goto error_detect;
+
+ return 0;
+
+error_detect:
+error_media_entity_init:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(ar1820);
+
+ return ret;
+}
+
+static int ar1820_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ar1820 *ar1820 = to_ar1820(sd);
+
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(ar1820);
+
+ return 0;
+}
+
+static const struct i2c_device_id ar1820_id[] = {
+ {DRIVER_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ar1820_id);
+
+static struct i2c_driver ar1820_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = ar1820_probe,
+ .remove = ar1820_remove,
+ .id_table = ar1820_id,
+};
+
+module_i2c_driver(ar1820_driver);
diff --git a/drivers/media/video/as3636.c b/drivers/media/video/as3636.c
new file mode 100644
index 00000000000000..6c9b60a0fb6173
--- /dev/null
+++ b/drivers/media/video/as3636.c
@@ -0,0 +1,792 @@
+/*
+ * drivers/media/video/as3636a.c - AS3636 flash controller driver
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * Contact: Maxime Jourdan <maxime.jourdan@parrot.com>
+ *
+ * Inspired from AS3645A driver made by Laurent Pinchart
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <media/as3636.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define REG_IC_INFO 0x00
+#define REG_IC_VERSION 0x01
+#define REG_MODULE_INFO 0x02
+#define REG_USER_NVM_1 0x03
+
+#define REG_CONTROL 0x04
+#define CONTROL_AFSTROBE (1 << 1)
+#define CONTROL_MST (1 << 2)
+#define CONTROL_RESET (1 << 6)
+#define CONTROL_STANDBY (1 << 7)
+
+#define REG_INTERRUPT_MASK 0x05
+
+#define REG_INTERRUPT_STATUS 0x06
+#define INTERRUPT_STATUS_ILF 1
+#define INTERRUPT_STATUS_CTF (1 << 1)
+#define INTERRUPT_STATUS_BCF (1 << 2)
+#define INTERRUPT_STATUS_TOF (1 << 3)
+#define INTERRUPT_STATUS_OTPF (1 << 4)
+#define INTERRUPT_STATUS_AFF (1 << 5)
+#define INTERRUPT_STATUS_READYF (1 << 6)
+#define INTERRUPT_STATUS_SSTF (1 << 7)
+
+#define REG_XENON_CONTROL 0x07
+#define XENON_CHARGE 1
+#define XENON_READY (1 << 1)
+#define XENON_STROBE (1 << 2)
+#define XENON_CAR (1 << 3)
+
+#define REG_XENON_CAR_INTERVAL 0x08
+#define REG_XENON_CHARGE_TIME 0x09
+#define REG_XENON_SST_PULSE_LENGTH 0x0A
+#define REG_LIFE_TIME_MSB 0x0B
+#define REG_LIFE_TIME_LSB 0x0C
+#define REG_XENON_CONFIG_A 0x0D
+#define REG_XENON_CONFIG_B 0x0E
+#define REG_XENON_CONFIG_C 0x0F
+#define REG_LED_CURRENT 0x10
+#define REG_LED_CONTROL 0x11
+#define REG_LED_CONFIG 0x12
+#define REG_PASSWORD 0x13
+#define REG_USER_NVM_2 0x14
+#define REG_USER_NVM_3 0x15
+#define REG_USER_NVM_4 0x16
+#define REG_USER_NVM_5 0x17
+
+#define EEPROM_PASSWORD 0x8A
+
+#define CHARGE_MAX_VOLT_MIN 285 // 28.5 V
+#define CHARGE_MAX_VOLT_MAX 348
+#define DCDC_PEAK_MIN 250
+#define DCDC_PEAK_MAX 400
+
+const u8 REG_IS_EEPROM[] = {0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1};
+
+/*
+ * struct as3636
+ *
+ * @subdev: V4L2 subdev
+ * @pdata: Flash platform data
+ * @power_lock: Protects power_count
+ * @power_count: Power reference count
+ * @led_mode: V4L2 flash LED mode
+ * @timeout: Flash timeout in microseconds
+ * @strobe_source: Flash strobe source (software or external)
+ */
+struct as3636 {
+ struct v4l2_subdev subdev;
+ struct as3636_platform_data *pdata;
+
+ struct mutex power_lock;
+ int power_count;
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrls;
+
+ enum v4l2_flash_led_mode led_mode;
+ unsigned int timeout;
+ enum v4l2_flash_strobe_source strobe_source;
+};
+
+#define to_as3636(sd) container_of(sd, struct as3636, subdev)
+
+/* Return negative errno else a data byte received from the device. */
+static int as3636_read(struct as3636 *flash, u8 addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ rval = i2c_smbus_read_byte_data(client, addr);
+
+ dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* Return negative errno else zero on success */
+static int as3636_write(struct as3636 *flash, u8 addr, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ // Wait if the EEPROM is busy with a previous request.
+ while((rval = as3636_read(flash, REG_PASSWORD)) > 0)
+ usleep_range(2000, 2000);
+
+ if(rval < 0)
+ return rval;
+
+ // To unlock EEPROM registers, we must first write a password value in the corresponding register
+ if(REG_IS_EEPROM[addr])
+ {
+ rval = as3636_write(flash, REG_PASSWORD, EEPROM_PASSWORD);
+ if(rval < 0)
+ return rval;
+ }
+
+ rval = i2c_smbus_write_byte_data(client, addr, val);
+
+ dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int as3636_read_interrupt(struct as3636 *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ /* NOTE: reading register clears interrupt status */
+ rval = as3636_read(flash, REG_INTERRUPT_STATUS);
+ if (rval < 0)
+ return rval;
+
+ if (rval & INTERRUPT_STATUS_ILF)
+ dev_info(&client->dev, "Indicator LED fault: "
+ "Short circuit or open loop\n");
+
+ if (rval & INTERRUPT_STATUS_CTF)
+ dev_info(&client->dev, "Xenon Charger Transformer fault\n");
+
+ if (rval & INTERRUPT_STATUS_BCF)
+ dev_info(&client->dev, "DCDC Boost converter "
+ "(VOUTBOOST) fault\n");
+
+ if (rval & INTERRUPT_STATUS_TOF)
+ dev_info(&client->dev, "Xenon Charger Timeout fault\n");
+
+ if (rval & INTERRUPT_STATUS_OTPF)
+ dev_info(&client->dev, "Over Temperature protection fault\n");
+
+ if (rval & INTERRUPT_STATUS_AFF)
+ dev_info(&client->dev, "Autofocus LED (VLED) fault\n");
+
+ if (rval & INTERRUPT_STATUS_SSTF)
+ dev_info(&client->dev, "Xenon Strobe Self test fault\n");
+
+ return rval;
+}
+
+static int as3636_flash_test(struct as3636 *flash) {
+ int ret;
+
+ ret = as3636_read(flash, REG_XENON_CONTROL);
+
+ if(ret < 0)
+ return ret;
+
+ if(!(ret & XENON_READY))
+ return -EINVAL;
+
+ return as3636_write(flash, REG_XENON_CONTROL, ret | XENON_STROBE);
+}
+
+/**
+ * AS3636 self-test and diagnosis in case there is a problem
+ */
+static int as3636_self_test(struct as3636 *flash) {
+ int ret, rval;
+
+ as3636_read_interrupt(flash);
+ ret = as3636_write(flash, REG_LED_CONTROL, 0x08); // Enable AF LED to go into active mode.
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_LED_CONTROL, 0x00); // Disable AF LED.
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_CONTROL, CONTROL_MST);
+ if (ret < 0)
+ return ret;
+
+ while (((rval = as3636_read(flash, REG_CONTROL)) & CONTROL_MST) == CONTROL_MST)
+ msleep(20);
+
+ if (rval < 0)
+ return rval;
+
+ rval = as3636_read_interrupt(flash);
+ if (rval < 0)
+ return rval;
+
+ /*if (rval != INTERRUPT_STATUS_READYF)
+ return -EINVAL;*/
+
+ return 0;
+}
+
+static int as3636_is_flash_ready(struct as3636* flash) {
+ int val = as3636_read(flash, REG_XENON_CONTROL);
+ if(val < 0)
+ return val;
+
+ return (val & XENON_READY) ? 1 : 0;
+}
+
+static int as3636_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct as3636 *flash =
+ container_of(ctrl->handler, struct as3636, ctrls);
+ int value;
+
+ switch (ctrl->id) {
+
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ value = as3636_read(flash, REG_XENON_CONTROL);
+ if (value < 0)
+ return value;
+
+ ctrl->cur.val = (value & XENON_STROBE) ? 1 : 0;
+ break;
+ case V4L2_CID_FLASH_READY:
+ value = as3636_is_flash_ready(flash);
+ if(value < 0)
+ return value;
+
+ ctrl->cur.val = value;
+ ctrl->val = value;
+ break;
+ case V4L2_CID_FLASH_CHARGE:
+ value = as3636_read(flash, REG_XENON_CONTROL);
+ if(value < 0)
+ return value;
+
+ ctrl->cur.val = (value & XENON_CHARGE) ? 1 : 0;
+ ctrl->val = (value & XENON_CHARGE) ? 1 : 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int as3636_start_charging(struct as3636* flash) {
+ int val = as3636_read(flash, REG_XENON_CONTROL);
+ if(val < 0)
+ return val;
+
+ // We're already fully charged
+ if((val & XENON_READY) == XENON_READY)
+ return 0;
+
+ // Clear interrupts that prevent entering active mode
+ as3636_read_interrupt(flash);
+
+ return as3636_write(flash, REG_XENON_CONTROL, val | XENON_CHARGE);
+}
+
+static int as3636_stdby(struct as3636* flash, int enable) {
+ int val = as3636_read(flash, REG_CONTROL);
+ if (val < 0)
+ return val;
+
+ return as3636_write(flash, REG_CONTROL, val | CONTROL_STANDBY);
+}
+
+static int as3636_enable_led(struct as3636 *flash, int on) {
+ if (on) {
+ // Enable IND LED
+ as3636_write(flash, REG_LED_CONFIG, 0x01);
+ return as3636_write(flash, REG_LED_CONTROL, 0x01);
+ }
+
+ // Disable IND LED
+ return as3636_write(flash, REG_LED_CONTROL, 0x00);
+}
+
+static int as3636_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct as3636 *flash =
+ container_of(ctrl->handler, struct as3636, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_STROBE:
+ return as3636_flash_test(flash);
+ case V4L2_CID_FLASH_CHARGE:
+ ctrl->val = 0;
+ return as3636_start_charging(flash);
+ case V4L2_CID_PRIVATE_AS3636_TEST:
+ return as3636_self_test(flash);
+ case V4L2_CID_PRIVATE_AS3636_STDBY:
+ ctrl->val = 0;
+ return as3636_stdby(flash, ctrl->val);
+ case V4L2_CID_FLASH_LED_MODE:
+ if (ctrl->val == V4L2_FLASH_LED_MODE_FLASH) {
+ dev_warn(&client->dev, "Flash LED Mode not supported."
+ "Please use NONE or TORCH\n");
+ return -EINVAL;
+ }
+ return as3636_enable_led(flash, ctrl->val);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops as3636_ctrl_ops = {
+ .g_volatile_ctrl = as3636_get_ctrl,
+ .s_ctrl = as3636_set_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+/* Put device into know state. */
+static int as3636_setup(struct as3636 *flash)
+{
+ int ret;
+ struct as3636_platform_data *pdata = flash->pdata;
+ u8 switch_sel, charge_voltage;
+
+ ret = as3636_write(flash, REG_CONTROL, CONTROL_RESET);
+ if (ret < 0)
+ return ret;
+
+ // Recommended 4ms wait after reset
+ usleep_range(4000, 5000);
+
+ ret = as3636_write(flash, REG_LED_CONTROL, 0x08); // Enable AF LED to go into active mode.
+ if (ret < 0)
+ return ret;
+
+ switch_sel = (flash->pdata->switch_selection-375)/75;
+
+ ret = as3636_write(flash, REG_XENON_CONFIG_B, switch_sel | 0x80);
+ if (ret < 0)
+ return ret;
+
+ charge_voltage = (pdata->charge_max_voltage-CHARGE_MAX_VOLT_MIN);
+
+ ret = as3636_write(flash, REG_XENON_CONFIG_A, charge_voltage);
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_read(flash, REG_XENON_CONTROL);
+
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_XENON_CONTROL,
+ ret | (pdata->auto_charge ? XENON_CAR : 0));
+
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_XENON_CONFIG_C,
+ ((pdata->dcdc_peak-DCDC_PEAK_MIN)/50) << 6);
+
+ if (ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_XENON_SST_PULSE_LENGTH, 0xFF); // In [us].
+ if(ret < 0)
+ return ret;
+
+ ret = as3636_write(flash, REG_LED_CONTROL, 0x00); // Disable AF LED.
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int as3636_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct as3636 *flash = to_as3636(sd);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ if (on) {
+ ret = as3636_setup(flash);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+
+ flash->power_count += on ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static int as3636_registered(struct v4l2_subdev *sd)
+{
+ struct as3636 *flash = to_as3636(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int rval, man, model, version;
+
+ /* Power up the flash driver and read manufacturer ID, model ID, RFU
+ * and version.
+ */
+ rval = as3636_set_power(&flash->subdev, 1);
+ if (rval < 0)
+ return rval;
+
+ rval = as3636_read(flash, REG_IC_INFO);
+ if (rval < 0)
+ goto power_off;
+
+ man = rval >> 4;
+ model = rval & 0x0F;
+
+ /* Verify the chip model and version. */
+ if (model != 0x02) {
+ dev_err(&client->dev, "AS3636 not detected (model %d)\n", model);
+ rval = -ENODEV;
+ goto power_off;
+ }
+
+ rval = as3636_read(flash, REG_IC_VERSION);
+ if (rval < 0)
+ goto power_off;
+
+ version = rval & 0x0F;
+
+ dev_info(&client->dev, "AS3636 Version: %d\n", version);
+
+power_off:
+ if(rval < 0) {
+ as3636_set_power(&flash->subdev, 0);
+ return rval;
+ }
+
+ return 0;
+}
+
+static int as3636_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return as3636_set_power(sd, 1);
+}
+
+static int as3636_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return as3636_set_power(sd, 0);
+}
+
+
+static const struct v4l2_subdev_internal_ops as3636_internal_ops = {
+ .registered = as3636_registered,
+ .open = as3636_open,
+ .close = as3636_close,
+
+};
+
+static const struct v4l2_subdev_core_ops as3636_core_ops = {
+ .s_power = as3636_set_power,
+};
+
+static const struct v4l2_subdev_ops as3636_ops = {
+ .core = &as3636_core_ops,
+};
+
+
+/* -----------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int as3636_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3636 *flash = to_as3636(subdev);
+ int rval = 0;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ if (flash->pdata->set_power) {
+ rval = flash->pdata->set_power(&flash->subdev, 0);
+ }
+
+ dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
+
+ return rval;
+}
+
+static int as3636_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3636 *flash = to_as3636(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ if (flash->pdata->set_power) {
+ rval = flash->pdata->set_power(&flash->subdev, 1);
+ if (rval < 0)
+ return rval;
+ }
+
+ rval = as3636_setup(flash);
+
+ dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+#else
+
+#define as3636_suspend NULL
+#define as3636_resume NULL
+
+#endif /* CONFIG_PM */
+
+/**
+ * AS3636 interruption handler
+ */
+static irqreturn_t as3636_isr(int irq, void *priv)
+{
+ struct as3636 *as3636 = priv;
+ int val;
+
+ val = as3636_read_interrupt(as3636);
+
+ if(val < 0)
+ return IRQ_HANDLED;
+
+ printk("%s:%d INT : %02X!\n", __func__, __LINE__, val);
+
+ // Charge timeout : capacity is probably too big to charge in less
+ // than 5s. Re-enable charging.
+ if(val & INTERRUPT_STATUS_TOF)
+ as3636_start_charging(as3636);
+
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_ctrl_config ctrl_private_test = {
+ .id = V4L2_CID_PRIVATE_AS3636_TEST,
+ .ops = &as3636_ctrl_ops,
+ .name = "AS3636 - SELFTEST",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config ctrl_private_fstrobe = {
+ .id = V4L2_CID_PRIVATE_AS3636_STDBY,
+ .ops = &as3636_ctrl_ops,
+ .name = "AS3636 - Standby mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = false,
+};
+
+/*
+ * as3636_init_controls - Create controls
+ * @flash: The flash
+ *
+ */
+static int as3636_init_controls(struct as3636 *flash)
+{
+ struct v4l2_ctrl *ctrl;
+ v4l2_ctrl_handler_init(&flash->ctrls, 10);
+
+ v4l2_ctrl_new_std(&flash->ctrls, &as3636_ctrl_ops,
+ V4L2_CID_FLASH_STROBE, 0, 1, 1, 0);
+
+ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3636_ctrl_ops,
+ V4L2_CID_FLASH_CHARGE, 0, 1, 1, 0);
+
+ if(ctrl != NULL)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3636_ctrl_ops,
+
+ V4L2_CID_FLASH_STROBE_STATUS, 0, 1, 1, 0);
+
+ if(ctrl != NULL)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3636_ctrl_ops,
+ V4L2_CID_FLASH_READY, 0, 1, 1, 0);
+
+ if(ctrl != NULL)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &as3636_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE, 2, ~7, V4L2_FLASH_LED_MODE_NONE);
+
+ v4l2_ctrl_new_custom(&flash->ctrls, &ctrl_private_test, NULL);
+ v4l2_ctrl_new_custom(&flash->ctrls, &ctrl_private_fstrobe, NULL);
+
+ flash->subdev.ctrl_handler = &flash->ctrls;
+
+ return flash->ctrls.error;
+}
+
+static int as3636_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct as3636 *flash;
+ struct as3636_platform_data *pdata = client->dev.platform_data;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Missing platform data\n");
+ return -ENODEV;
+ }
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL) {
+ dev_err(&client->dev, "Alloc failed\n");
+ return -ENOMEM;
+ }
+
+ flash->pdata = pdata;
+
+ if(pdata->switch_selection < 375)
+ pdata->switch_selection = 375;
+
+ if(pdata->switch_selection > 900) {
+ dev_warn(&client->dev, "Switch selection over 900mA, overriding to 900mA\n");
+ pdata->switch_selection = 900;
+ }
+
+ if(pdata->charge_max_voltage < CHARGE_MAX_VOLT_MIN)
+ pdata->charge_max_voltage = CHARGE_MAX_VOLT_MIN;
+
+ if(pdata->charge_max_voltage > CHARGE_MAX_VOLT_MAX) {
+ dev_warn(&client->dev, "Charge voltage over 34.8V, overriding to 34.8V\n");
+ pdata->switch_selection = CHARGE_MAX_VOLT_MAX;
+ }
+
+ if(pdata->auto_charge > 1)
+ pdata->auto_charge = 0;
+
+ if(pdata->dcdc_peak < DCDC_PEAK_MIN)
+ pdata->dcdc_peak = DCDC_PEAK_MIN;
+
+ if(pdata->dcdc_peak > DCDC_PEAK_MAX) {
+ dev_warn(&client->dev,
+ "DCDC Boost Coil Peak current over 400mA, overriding to 400mA");
+ pdata->dcdc_peak = DCDC_PEAK_MAX;
+ }
+
+ v4l2_i2c_subdev_init(&flash->subdev, client, &as3636_ops);
+ flash->subdev.internal_ops = &as3636_internal_ops;
+ flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ret = as3636_init_controls(flash);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't init controls (%d)\n", ret);
+ goto done;
+ }
+
+ ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't init media entity (%d)\n", ret);
+ goto done;
+ }
+
+ flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+ mutex_init(&flash->power_lock);
+
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+
+ if(client->irq > 0) {
+ ret = request_threaded_irq(client->irq,NULL,
+ as3636_isr,
+ IRQF_TRIGGER_FALLING,
+ "as3636", flash);
+ if(ret < 0) {
+ v4l2_err(client, "failed to register irq\n");
+ client->irq = -1;
+ }
+ }
+
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ kfree(flash);
+ }
+
+ return ret;
+}
+
+static int __devexit as3636_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3636 *flash = to_as3636(subdev);
+
+ v4l2_device_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ media_entity_cleanup(&flash->subdev.entity);
+ mutex_destroy(&flash->power_lock);
+ kfree(flash);
+
+ return 0;
+}
+
+static const struct i2c_device_id as3636_id_table[] = {
+ { AS3636_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, as3636_id_table);
+
+static const struct dev_pm_ops as3636_pm_ops = {
+ .suspend = as3636_suspend,
+ .resume = as3636_resume,
+};
+
+static struct i2c_driver as3636_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = AS3636_NAME,
+ .pm = &as3636_pm_ops,
+ },
+ .probe = as3636_probe,
+ .remove = __devexit_p(as3636_remove),
+ .id_table = as3636_id_table,
+};
+
+module_i2c_driver(as3636_i2c_driver);
+
+MODULE_AUTHOR("Maxime Jourdan <maxime.jourdan@parrot.com>");
+MODULE_DESCRIPTION("Xenon Flash driver for AS3636");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/galileo1.c b/drivers/media/video/galileo1.c
new file mode 100644
index 00000000000000..8afce3a4124b67
--- /dev/null
+++ b/drivers/media/video/galileo1.c
@@ -0,0 +1,2074 @@
+/*
+ * galileo1 - Nokia sensor
+ *
+ * It is a 41 MPix sensor present in the Nokia Lumia 808
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Wed Jul 2 09:16:13 CEST 2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <media/galileo1.h>
+
+#include "galileo1_reg.h"
+
+MODULE_AUTHOR("Eng-Hong SRON <eng-hong.sron@parrot.com>");
+MODULE_DESCRIPTION("Nokia Galileo1 driver");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME "galileo1"
+
+#define SENSOR_WIDTH 7728
+#define SENSOR_HEIGHT 5368
+
+#define MIN_VTCLK 20000000UL
+#define MAX_VTCLK 256000000UL
+
+/* Here we take a MIPICLK slightly higher than the specification on purpose,
+ * because we are using the TC358746A bridge and it has its own limitation
+ * (PPICLK need to be between 66 and 125 MHz).
+ * In case of other bridge, we could go back to the specified value (80.0 MHz).
+ */
+#define MIN_MIPICLK 82500000UL
+#define MAX_MIPICLK 1000000000UL
+
+#define MIN_REFCLK 6000000UL
+#define MAX_REFCLK 27000000UL
+
+#define MIN_PLL_IN_CLK 3000000UL
+#define MAX_PLL_IN_CLK 27000000UL
+
+#define MIN_PLL_OP_CLK 1000000000UL
+#define MAX_PLL_OP_CLK 2080000000UL
+
+#define MIN_VT_SYS_CLK 83330000UL
+#define MAX_VT_SYS_CLK 2080000000UL
+
+struct galileo1 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct galileo1_platform_data *pdata;
+
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+
+ /* Internal states */
+ bool streaming;
+ bool timings_uptodate;
+
+ /* Dimensions */
+ struct v4l2_rect crop;
+ struct v4l2_rect video_timing;
+ u32 x_binning;
+ u32 y_binning;
+ u32 bits_per_pixel;
+
+ /* PLLs */
+ struct {
+ u32 pre_pll_clk_div;
+ u32 pll_multiplier;
+ u32 vt_sys_clk_div;
+ u32 vt_pix_clk_div;
+ u32 op_sys_clk_div;
+ } pll1;
+
+ struct {
+ u32 pre_pll_clk_div;
+ u32 pll_multiplier;
+ } pll0;
+
+ /* Non-Volatile Memory */
+ u8 *nvm;
+ union nvm_memaddr nvm_addr;
+
+
+ /* Clocks */
+ unsigned long vtclk;
+ unsigned long mipiclk;
+ unsigned long line_duration_ns;
+
+ u16 trdy_ctrl;
+
+ /* i2c clients */
+ struct i2c_client *i2c_sensor;
+ struct i2c_client *i2c_pmic;
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *focus;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *nd;
+ struct v4l2_ctrl *ms;
+ struct v4l2_ctrl *gs;
+ struct v4l2_ctrl *strobe_source;
+ struct v4l2_ctrl *strobe_width;
+};
+
+enum mech_shutter_state {
+ MS_STATE_SSTROBE,
+ MS_STATE_OPEN,
+ MS_STATE_CLOSE
+};
+
+static inline struct galileo1 *to_galileo1(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct galileo1, sd);
+}
+
+static inline struct galileo1 *ctrl_to_galileo1(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct galileo1, ctrl_handler);
+}
+
+static int galileo1_read8(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int galileo1_read16(struct i2c_client *client, u16 reg, u16 *val)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = (u8 *)val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = swab16(*val);
+
+ return 0;
+}
+
+static int galileo1_write8(struct i2c_client *client, u16 reg, u8 val)
+{
+ int ret;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u8 val;
+ } __packed buf;
+
+ reg = swab16(reg);
+
+ buf.reg = reg;
+ buf.val = val;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int galileo1_write16(struct i2c_client *client, u16 reg, u16 val)
+{
+ int ret;
+ struct i2c_msg msg;
+ u16 buf[2];
+
+ buf[0] = swab16(reg);
+ buf[1] = swab16(val);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int galileo1_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ fmt->format = galileo1->format;
+
+ return 0;
+}
+
+static int galileo1_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct galileo1 *galileo1 = to_galileo1(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ } else {
+ galileo1->format = fmt->format;
+ }
+
+ galileo1->timings_uptodate = 0;
+
+ return 0;
+}
+
+static int galileo1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* For now we support only one code */
+ if (code->index)
+ return -EINVAL;
+
+ code->code = V4L2_MBUS_FMT_SGBRG10_1X10;
+
+ return 0;
+}
+
+static int galileo1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = SENSOR_WIDTH;
+ sel->r.height = SENSOR_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = galileo1->crop;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Compute VT timing and binning */
+static int galileo1_calc_vt(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_mbus_framefmt *fmt = &galileo1->format;
+
+ /* We bin as much as possible before scaling */
+ galileo1->x_binning = c->width / fmt->width;
+ galileo1->x_binning = min(galileo1->x_binning, 2U);
+
+ galileo1->y_binning = c->height / fmt->height;
+ galileo1->y_binning = min(galileo1->y_binning, 8U);
+
+ /* Video Timing is working on binned pixels
+ * min_vt_line_blanking_pck is 512
+ * min_vt_frame_blanking_line is 38
+ */
+ vt->width = (c->width / galileo1->x_binning) + 512;
+ vt->height = (c->height / galileo1->y_binning) + 42;
+
+ /* It seems there is a minimum VT width which differs from what the
+ * datasheet says (8240). It is an empiric value, I don't know if it is
+ * correct... */
+ vt->width = max(vt->width, 1920);
+
+ return 0;
+}
+
+static int galileo1_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_mbus_framefmt *fmt = &galileo1->format;
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ galileo1->crop = sel->r;
+ break;
+ default:
+ v4l2_err(sd, "selection target (%d) not supported yet\n",
+ sel->target);
+ return -EINVAL;
+ }
+
+ galileo1->timings_uptodate = 0;
+
+ if (!galileo1->streaming)
+ return 0;
+
+ /* We bin as much as possible before scaling */
+ galileo1->x_binning = c->width / fmt->width;
+ galileo1->x_binning = min(galileo1->x_binning, 2U);
+
+ galileo1->y_binning = c->height / fmt->height;
+ galileo1->y_binning = min(galileo1->y_binning, 8U);
+
+ galileo1_write16(i2c, GROUPED_PARAMETER_HOLD, 0x1);
+
+ galileo1_write16(i2c, X_ADDR_START, c->left);
+ galileo1_write16(i2c, Y_ADDR_START, c->top);
+ galileo1_write16(i2c, X_ADDR_END, c->left + c->width - 1);
+ galileo1_write16(i2c, Y_ADDR_END, c->top + c->height - 1);
+
+ galileo1_write16(i2c, DIGITAL_CROP_IMAGE_WIDTH,
+ c->width / galileo1->x_binning);
+ galileo1_write16(i2c, DIGITAL_CROP_IMAGE_HEIGHT,
+ c->height / galileo1->y_binning);
+
+ galileo1_write8(i2c, BINNING_TYPE, galileo1->x_binning << 4 |
+ galileo1->y_binning);
+
+ galileo1_write16(i2c, GROUPED_PARAMETER_HOLD, 0x0);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops galileo1_pad_ops = {
+ .get_fmt = galileo1_get_fmt,
+ .set_fmt = galileo1_set_fmt,
+ .enum_mbus_code = galileo1_enum_mbus_code,
+ .get_selection = galileo1_get_selection,
+ .set_selection = galileo1_set_selection,
+};
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int galileo1_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ int ret;
+ u8 val;
+
+ reg->size = 2;
+
+ if (reg->reg & ~0xff)
+ return -EINVAL;
+
+ ret = galileo1_read8(galileo1->i2c_sensor, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return 0;
+}
+
+static int galileo1_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+
+ if (reg->reg & ~0xff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return galileo1_write8(galileo1->i2c_sensor, reg->reg, reg->val);
+}
+#endif
+
+static const struct v4l2_subdev_core_ops galileo1_core_ops = {
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = galileo1_get_register,
+ .s_register = galileo1_set_register,
+#endif
+};
+
+/* Compute minimum clocks in order to reach the FPS */
+static int galileo1_calc_clocks(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ struct v4l2_fract *fi = &galileo1->frame_interval;
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_mbus_framefmt *fmt = &galileo1->format;
+ u64 mipiclk_numerator;
+ u64 mipiclk_denominator;
+
+ galileo1->vtclk = div_u64((u64) vt->width * vt->height * fi->denominator,
+ fi->numerator);
+
+ /* In case vtclk is too high, we need to adjust the frame interval */
+ if (galileo1->vtclk > MAX_VTCLK) {
+ galileo1->vtclk = MAX_VTCLK;
+
+ fi->denominator = galileo1->vtclk;
+ fi->numerator = vt->width * vt->height;
+
+ /* In case vtclk is too low, we just increase the vertical blanking */
+ } else if (galileo1->vtclk < MIN_VTCLK) {
+ galileo1->vtclk = MIN_VTCLK;
+
+ vt->height = div_u64((u64) galileo1->vtclk * fi->numerator,
+ vt->width * fi->denominator);
+
+ }
+
+ /* Finally, mipiclk will have to transfer all the scaled pixels, but the
+ * vertical scaling need some line buffers, introducing some
+ * 'burstiness'. We can considered the transfered frame as only scaled
+ * horizontally.
+ */
+ switch (fmt->code) {
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ galileo1->bits_per_pixel = 10;
+ break;
+ default:
+ v4l2_err(sd, "code not supported yet\n");
+ galileo1->vtclk = 0;
+ galileo1->mipiclk = 0;
+ return -EINVAL;
+ }
+
+ mipiclk_numerator = (u64) galileo1->vtclk *
+ galileo1->bits_per_pixel *
+ fmt->width *
+ galileo1->x_binning;
+
+ mipiclk_denominator = c->width * pdata->lanes * 2;
+
+ galileo1->mipiclk = div_u64(mipiclk_numerator, mipiclk_denominator);
+
+ /* In case mipiclk is too low, we just strech vtclk and vertical
+ * blanking.
+ */
+ if (galileo1->mipiclk < MIN_MIPICLK) {
+ vt->height = div_u64((u64) vt->height * MIN_MIPICLK,
+ galileo1->mipiclk);
+
+ galileo1->vtclk = div_u64((u64) galileo1->vtclk * MIN_MIPICLK,
+ galileo1->mipiclk);
+
+ galileo1->mipiclk = MIN_MIPICLK;
+
+ }
+
+ return 0;
+}
+
+#define IS_BETWEEN(_f, _min, _max) ((_f >= _min) && (_f <= _max))
+
+/* Try to reach vtclk and mipiclk from the same PLL. We give the 'priority' to
+ * vtclk, since it is the processing clock whereas mipiclk is 'just' the output
+ * clock.
+ * We are also trying to keep the targeted FPS (if specified so)
+ */
+static int galileo1_pll_brute_force(struct v4l2_subdev *sd, int keep_fps)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ struct v4l2_fract *fi = &galileo1->frame_interval;
+
+ const u16 pre_pll_div[] = {1, 2, 4};
+ const u16 vt_sys_div[] = {1, 2, 4, 6, 8, 10, 12};
+ const u16 vt_pix_div[] = {4, 5, 6, 7, 8, 9, 10, 12};
+ const u16 op_sys_div[] = {2, 4, 12, 16, 20, 24};
+ long best_error = -1;
+ int ret = -EINVAL;
+
+ /* PLL parameters */
+ u32 p, best_p = 0;
+ u32 m, best_m = 0;
+ u32 vts, best_vts = 0;
+ u32 vtp, best_vtp = 0;
+ u32 op, best_op = 0;
+
+ /* Brute force PLL */
+ for (p = 0 ; p < ARRAY_SIZE(pre_pll_div) ; p++) {
+ unsigned long pll_in_clk = pdata->refclk / pre_pll_div[p];
+
+ if (!IS_BETWEEN(pll_in_clk, MIN_PLL_IN_CLK, MAX_PLL_IN_CLK))
+ continue;
+
+ for (m = 36 ; m <= 832 ; m++) {
+ unsigned long pll_op_clk = pll_in_clk * m;
+
+ if (!IS_BETWEEN(pll_op_clk, MIN_PLL_OP_CLK,
+ MAX_PLL_OP_CLK))
+ continue;
+
+ for (vts = 0 ; vts < ARRAY_SIZE(vt_sys_div) ; vts++) {
+ unsigned long vt_sys_clk;
+
+ vt_sys_clk = pll_op_clk / vt_sys_div[vts];
+ if (!IS_BETWEEN(vt_sys_clk, MIN_VT_SYS_CLK, MAX_VT_SYS_CLK))
+ continue;
+
+ for (vtp = 0 ; vtp < ARRAY_SIZE(vt_pix_div) ; vtp++) {
+ unsigned long vtclk;
+
+ vtclk = vt_sys_clk / vt_pix_div[vtp];
+
+ if (!IS_BETWEEN(vtclk, MIN_VTCLK, MAX_VTCLK))
+ continue;
+
+ for (op = 0 ; op < ARRAY_SIZE(op_sys_div) ; op++) {
+ unsigned long mipiclk;
+ long error;
+ long vt_error;
+ long mipi_error;
+
+ mipiclk = pll_op_clk / op_sys_div[op] / 2;
+
+ vt_error = vtclk - galileo1->vtclk;
+ mipi_error = mipiclk - galileo1->mipiclk;
+
+ /* Don't go lower than the
+ * targeted frequencies,
+ * otherwise we won't be able to
+ * reach the FPS.
+ */
+ if (keep_fps == 1) {
+ if (vt_error < 0)
+ continue;
+
+ if (mipi_error < 0)
+ continue;
+ } else {
+ if (vt_error < 0)
+ vt_error = -vt_error;
+
+ if (mipi_error < 0)
+ mipi_error = -mipi_error;
+ }
+
+ /* Try to minimize both error */
+ error = mipi_error + vt_error;
+
+ if (error <= best_error || best_error < 0) {
+ ret = 0;
+ best_error = error;
+ best_p = pre_pll_div[p];
+ best_m = m;
+ best_vts = vt_sys_div[vts];
+ best_vtp = vt_pix_div[vtp];
+ best_op = op_sys_div[op];
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (ret != 0)
+ return ret;
+
+ /* Refresh clock frequencies */
+ galileo1->vtclk = (pdata->refclk * best_m) /
+ (best_p * best_vts * best_vtp);
+ galileo1->mipiclk = (pdata->refclk * best_m) /
+ (best_p * best_op * 2);
+
+ /* Refresh FPS */
+ fi->denominator = galileo1->vtclk;
+ fi->numerator = vt->width * vt->height;
+
+ /* Refresh line_duration */
+ galileo1->line_duration_ns = div_u64((u64) vt->width * 1000000000,
+ galileo1->vtclk);
+
+ galileo1->pll1.pre_pll_clk_div = best_p;
+ galileo1->pll1.pll_multiplier = best_m;
+ galileo1->pll1.vt_sys_clk_div = best_vts;
+ galileo1->pll1.vt_pix_clk_div = best_vtp;
+ galileo1->pll1.op_sys_clk_div = best_op;
+
+ return 0;
+}
+
+static int galileo1_calc_plls(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+
+ /* PLL0 parameters */
+ const u16 pre_pll_div[] = {1, 2, 4};
+ long best_error = -1;
+ u32 p, best_p = 0;
+ u32 m, best_m = 0;
+
+
+ /* Perform some sanity checks */
+ if (!IS_BETWEEN(galileo1->mipiclk, MIN_MIPICLK, MAX_MIPICLK)) {
+ v4l2_err(sd, "mipiclk (%lu) is out of range [%lu - %lu]\n",
+ galileo1->mipiclk, MIN_MIPICLK, MAX_MIPICLK);
+ return -EINVAL;
+ }
+
+ if (!IS_BETWEEN(galileo1->vtclk, MIN_VTCLK, MAX_VTCLK)) {
+ v4l2_err(sd, "vtclk (%lu) is out of range [%lu - %lu]\n",
+ galileo1->vtclk, MIN_VTCLK, MAX_VTCLK);
+ return -EINVAL;
+ }
+
+ if (!IS_BETWEEN(pdata->refclk, MIN_REFCLK, MAX_REFCLK)) {
+ v4l2_err(sd, "refclk (%lu) is out of range [%lu - %lu]\n",
+ galileo1->mipiclk, MIN_REFCLK, MAX_REFCLK);
+ return -EINVAL;
+ }
+
+ /* Try to reach the PLL frequencies while preserving the FPS, but in
+ * case it is not possible, we have to derate it.
+ */
+ if (galileo1_pll_brute_force(sd, 1) < 0) {
+ if (galileo1_pll_brute_force(sd, 0) < 0) {
+ v4l2_err(sd, "Unable to find PLL config for:\n");
+ v4l2_err(sd, " vtclk %lu", galileo1->vtclk);
+ v4l2_err(sd, " mipiclk %lu", galileo1->mipiclk);
+ return -EINVAL;
+ }
+ }
+
+ /* TOSHIBA register setting
+ * I don't know what frequency is needed for the following BoostCK,
+ * ADC Clock, ck_st and hreg_clk...
+ * So I follow the given spreadsheet...
+ * I also assume the PLL0 constraints are the same as the PLL1.
+ */
+ for (p = 0 ; p < ARRAY_SIZE(pre_pll_div) ; p++) {
+ unsigned long pll_in_clk = pdata->refclk / pre_pll_div[p];
+
+ if (!IS_BETWEEN(pll_in_clk, MIN_PLL_IN_CLK, MAX_PLL_IN_CLK))
+ continue;
+ for (m = 36 ; m <= 832 ; m++) {
+ unsigned long pll_op_clk = pll_in_clk * m;
+
+ /* Trying to reach 1GHz, again, it seems to work that
+ * way, but I don't know why...
+ */
+ long error = 1000000000UL - pll_op_clk;
+
+ if (error < 0)
+ error = -error;
+
+ if (error < best_error || best_error < 0) {
+ best_error = error;
+ best_p = pre_pll_div[p] - 1;
+ best_m = m;
+ }
+ }
+ }
+
+ galileo1->pll0.pre_pll_clk_div = best_p;
+ galileo1->pll0.pll_multiplier = best_m;
+
+ return 0;
+}
+
+#undef IS_BETWEEN
+
+static int galileo1_update_timings(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ int ret;
+
+ /* From the crop and the output size, calculate the binning and the
+ * Video Timing.
+ */
+ ret = galileo1_calc_vt(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate Video Timing\n");
+ return ret;
+ }
+
+ /* Calculate the the minimum theorical clock frequency in order to
+ * achieve the frame interval.
+ */
+ ret = galileo1_calc_clocks(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate Clocks\n");
+ return ret;
+ }
+
+ /* Update clocks */
+ ret = galileo1_calc_plls(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate plls\n");
+ return ret;
+ }
+
+ galileo1->timings_uptodate = 1;
+
+ return 0;
+}
+
+static int galileo1_apply_plls(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c_sensor = galileo1->i2c_sensor;
+
+ galileo1_write16(i2c_sensor, PRE_PLL_CLK_DIV,
+ galileo1->pll1.pre_pll_clk_div);
+ galileo1_write16(i2c_sensor, PLL_MULTIPLIER,
+ galileo1->pll1.pll_multiplier);
+ galileo1_write16(i2c_sensor, VT_SYS_CLK_DIV,
+ galileo1->pll1.vt_sys_clk_div);
+ galileo1_write16(i2c_sensor, VT_PIX_CLK_DIV,
+ galileo1->pll1.vt_pix_clk_div);
+ galileo1_write16(i2c_sensor, OP_SYS_CLK_DIV,
+ galileo1->pll1.op_sys_clk_div);
+ galileo1_write16(i2c_sensor, OP_PIX_CLK_DIV,
+ 0x0008);
+
+ galileo1_write8(i2c_sensor, PRE_PLL_CNTL_ST,
+ galileo1->pll0.pre_pll_clk_div);
+ galileo1_write16(i2c_sensor, PLL_MULTI_ST,
+ galileo1->pll0.pll_multiplier);
+
+ galileo1_write8(i2c_sensor, AD_CNTL, 0x02);
+ galileo1_write8(i2c_sensor, ST_CNTL, 0x07);
+ galileo1_write8(i2c_sensor, HREG_CNTL, 0x05);
+ galileo1_write8(i2c_sensor, PLL_HRG_CNTL, 0x01);
+ galileo1_write8(i2c_sensor, HREG_PLLSEL_SINGLE, 0x10);
+ galileo1_write8(i2c_sensor, OPCK_PLLSEL, 0x00);
+
+ return 0;
+}
+
+/*
+ * MODEPowerup_and_Initialize
+ * Following values are taken directly from Nokia INIT.txt file.
+ * I have no idea what it does...
+ */
+static int galileo1_init(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ struct i2c_client *i2c_sensor = galileo1->i2c_sensor;
+ struct i2c_client *i2c_pmic = galileo1->i2c_pmic;
+ u16 whole, fract;
+ union global_reset_mode_config1 glbrst_cfg1 = {{
+ .vf_to_glbrst = 0, /* complete frame */
+ .glbrst_to_vf = 0,
+ .readout_start = 0, /* Readout start by tRDOUT */
+ .long_exposure_mode = 0,
+ .continous_global_reset_mode = 0,
+ .flash_strobe = 0,
+ .sstrobe_muxing = 1,
+ .sastrobe_muxing = 0,
+ }};
+
+ /*
+ * AD5814 settings
+ */
+ galileo1_write8(i2c_pmic, CONTROL, 0x00);
+ /* DRIVE_CFG Slope 001(275ns) H Bridge,DIV2 */
+ galileo1_write8(i2c_pmic, DRIVE_CFG, 0x39);
+ galileo1_write8(i2c_pmic, BUCK_CFG, 0x03);
+ /* TIMING 20ms */
+ galileo1_write8(i2c_pmic, TIMING, 0x58);
+ /* CONFIG 2.5V */
+ galileo1_write8(i2c_pmic, CONFIG, 0x10);
+ /* PERIOD 15.6us */
+ galileo1_write8(i2c_pmic, PERIOD, 0x9C);
+ /* TAH 0.8us */
+ galileo1_write8(i2c_pmic, TAH, 0x08);
+ /* TAL 14.8us */
+ galileo1_write8(i2c_pmic, TAL, 0x94);
+ /* TBH 12.8us */
+ galileo1_write8(i2c_pmic, TBH, 0x78);
+ /* TBL 1.7us */
+ galileo1_write8(i2c_pmic, TBL, 0x11);
+ galileo1_write8(i2c_pmic, NR_CFG, 0x04);
+ galileo1_write8(i2c_pmic, NR_PERIOD, 0x02);
+ galileo1_write8(i2c_pmic, NR_TAH, 0x01);
+ galileo1_write8(i2c_pmic, NR_TAL, 0x01);
+ galileo1_write8(i2c_pmic, NR_TBH, 0x01);
+ galileo1_write8(i2c_pmic, NR_TBL, 0x01);
+ galileo1_write16(i2c_pmic, NR_PRE_PULSE, 0x0000);
+ galileo1_write16(i2c_pmic, NR_POST_PULSE, 0x0000);
+ galileo1_write8(i2c_pmic, NR_RISEFALL, 0x74);
+ /* NR_RF_PULSE NR 4 PLS */
+ galileo1_write8(i2c_pmic, NR_RF_PULSE, 0x04);
+ /* CURRENT 10.5mA */
+ galileo1_write8(i2c_pmic, CURRENT, 0x13);
+ /* IPOS_TEMPCOMP -0.10%/deg c */
+ galileo1_write8(i2c_pmic, IPOS_TEMPCOMP, 0x02);
+ /*ADC_CONFIG 16Ave 200uA */
+ galileo1_write8(i2c_pmic, ADC_CONFIG, 0x18);
+ galileo1_write8(i2c_pmic, ADC_ACT, 0x00);
+ galileo1_write8(i2c_pmic, ADC_RES, 0x00);
+ galileo1_write8(i2c_pmic, STATUS, 0x00);
+
+ /* Sensor MSRs */
+ galileo1_write8(i2c_sensor, POSBSTSEL, 0x1C);
+ galileo1_write8(i2c_sensor, READVDSEL, 0x06);
+ galileo1_write8(i2c_sensor, RSTVDSEL, 0x08);
+ galileo1_write8(i2c_sensor, BSVBPSEL, 0x18);
+ galileo1_write8(i2c_sensor, HREG_TEST, 0x04);
+ galileo1_write8(i2c_sensor, DRESET, 0xC8);
+ galileo1_write16(i2c_sensor, FRACEXP_TIME1, 0x08FF);
+ galileo1_write16(i2c_sensor, PORTGRESET_U, 0x026C);
+ galileo1_write8(i2c_sensor, PORTGRESET_W, 0x30);
+ galileo1_write8(i2c_sensor, ROREAD, 0xCE);
+ galileo1_write8(i2c_sensor, DRCUT, 0x01);
+ galileo1_write8(i2c_sensor, GDMOSCNT, 0x2F);
+ galileo1_write8(i2c_sensor, CDS_STOPBST, 0x01);
+ galileo1_write8(i2c_sensor, BSTCKLFIX_ADC, 0x89);
+ galileo1_write8(i2c_sensor, BSC_AGRNG2, 0x30);
+ galileo1_write8(i2c_sensor, BSC_AGRNG1, 0x18);
+ galileo1_write8(i2c_sensor, BSC_AGRNG0, 0x10);
+ galileo1_write8(i2c_sensor, KBIASCNT_RNG32, 0x98);
+ galileo1_write8(i2c_sensor, KBIASCNT_RNG10, 0x76);
+ galileo1_write8(i2c_sensor, GDMOSHEN, 0x01);
+ galileo1_write8(i2c_sensor, BSDIGITAL_MODE, 0x08);
+ galileo1_write8(i2c_sensor, PS_VZS_NML_COEF, 0xC7);
+ galileo1_write8(i2c_sensor, PS_VZS_NML_INTC, 0x7E);
+ galileo1_write8(i2c_sensor, ZSV_IN_LINES, 0x43);
+ galileo1_write8(i2c_sensor, FBC_IN_RANGE, 0x10);
+ galileo1_write8(i2c_sensor, OB_CLPTHRSH_NEAR, 0x28);
+ galileo1_write8(i2c_sensor, OB_CLPTHRSH_FAR, 0x28);
+ galileo1_write8(i2c_sensor, WKUP_WAIT_ON, 0xE9);
+ galileo1_write8(i2c_sensor, HALF_VTAP_MODE, 0x12);
+ galileo1_write8(i2c_sensor, CCP2BLKD, 0xB0);
+
+ /* Sensor static register */
+ whole = pdata->refclk / 1000000;
+ fract = ((pdata->refclk - (whole * 1000000)) * 0x100) / 1000000;
+
+ galileo1_write8(i2c_sensor, EXTCLK_FRQ_MHZ, whole);
+ galileo1_write8(i2c_sensor, EXTCLK_FRQ_MHZ + 1, fract);
+
+ galileo1_write8(i2c_sensor, GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+ galileo1_write8(i2c_sensor, DPHY_CTRL, 0x01);
+
+ /* Link MBPS seems to influence the bridge, I don't know why, so I let
+ * this value to zero.
+ */
+ galileo1_write8(i2c_sensor, REQUESTED_LINK_BIT_RATE_MBPS_31_24, 0x0);
+ galileo1_write8(i2c_sensor, REQUESTED_LINK_BIT_RATE_MBPS_23_16, 0x0);
+ galileo1_write8(i2c_sensor, REQUESTED_LINK_BIT_RATE_MBPS_15_8, 0x0);
+ galileo1_write8(i2c_sensor, REQUESTED_LINK_BIT_RATE_MBPS_7_0, 0x0);
+
+ return 0;
+}
+
+static int galileo1_apply_hflip(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+ union image_orientation reg;
+
+ galileo1_read8(i2c, IMAGE_ORIENTATION, &reg._register);
+ reg.h_mirror = galileo1->hflip->val;
+ galileo1_write8(i2c, IMAGE_ORIENTATION, reg._register);
+
+ return 0;
+}
+
+static int galileo1_apply_vflip(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+ union image_orientation reg;
+
+ galileo1_read8(i2c, IMAGE_ORIENTATION, &reg._register);
+ reg.v_mirror = galileo1->vflip->val;
+ galileo1_write8(i2c, IMAGE_ORIENTATION, reg._register);
+
+ return 0;
+}
+
+static int galileo1_apply_nd(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_pmic;
+
+ galileo1_write8(i2c, ACT_STATE_1, galileo1->nd->val);
+ galileo1_write8(i2c, OPERATION_MODE, 0x1);
+
+ /* Drive the ND for 10 ms */
+ galileo1_write8(i2c, MECH_SHUTTER_CONTROL, 0x01);
+ msleep(20);
+ galileo1_write8(i2c, MECH_SHUTTER_CONTROL, 0x00);
+
+ return 0;
+}
+
+static int galileo1_drive_shutter(struct v4l2_subdev *sd, u8 direction) {
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_pmic;
+
+ union operation_mode opmode = {{
+ .sdirm_cfg = direction,
+ }};
+
+ /* Reset shutter usage */
+ galileo1_write8(i2c, MECH_SHUTTER_CONTROL, 0x08);
+ galileo1_write8(i2c, OPERATION_MODE, opmode._register);
+
+ /* Drive the shutter for 10 ms */
+ galileo1_write8(i2c, MECH_SHUTTER_CONTROL, 0x01);
+ msleep(20);
+ galileo1_write8(i2c, MECH_SHUTTER_CONTROL, 0x00);
+
+ return 0;
+}
+
+static int galileo1_shutter_close(struct v4l2_subdev *sd)
+{
+ return galileo1_drive_shutter(sd, 0);
+}
+
+static int galileo1_shutter_open(struct v4l2_subdev *sd)
+{
+ return galileo1_drive_shutter(sd, 1);
+}
+
+static int galileo1_set_shutter(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ union global_reset_mode_config1 glbrst_cfg1;
+ struct i2c_client *i2c_sensor = galileo1->i2c_sensor;
+
+ switch (galileo1->ms->val) {
+ case MS_STATE_OPEN:
+ return galileo1_shutter_open(sd);
+ case MS_STATE_CLOSE:
+ return galileo1_shutter_close(sd);
+ default:
+ break;
+ }
+
+ galileo1_read8(i2c_sensor, GLOBAL_RESET_MODE_CONFIG1,
+ &glbrst_cfg1._register);
+ glbrst_cfg1.sastrobe_muxing = 0;
+
+ if (galileo1->gs->val) {
+ struct i2c_client *i2c_pmic = galileo1->i2c_pmic;
+
+ union operation_mode opmode = {
+ /* Shutter strobe & ND Strobe */
+ /* Shutter */
+ .smode = 1, /* Strobe mode */
+ .sedge = 1, /* Edge mode
+ * rising edge = close
+ * falling edge = open
+ */
+ .sdirm_cfg = 0,
+ .nmode = 1, /* Strobe mode */
+ .nedge = 0,
+ .ndirm_cfg = 0,
+ };
+
+ /* Reset shutter usage */
+ galileo1_write8(i2c_pmic, MECH_SHUTTER_CONTROL, 0x08);
+ galileo1_write8(i2c_pmic, OPERATION_MODE, opmode._register);
+ galileo1_write8(i2c_pmic, MECH_SHUTTER_CONTROL, 0x1);
+
+ glbrst_cfg1.sastrobe_muxing = 1;
+ }
+
+ galileo1_write8(i2c_sensor, GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+
+ return 0;
+}
+
+static int galileo1_apply_ms(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c_sensor = galileo1->i2c_sensor;
+ struct i2c_client *i2c_pmic = galileo1->i2c_pmic;
+ u32 *exposure_us = &galileo1->exposure->val;
+ u32 ms_state = galileo1->ms->val;
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ u8 *nvm = galileo1->nvm;
+ union nvm_memaddr *nvm_addr = &galileo1->nvm_addr;
+ u16 sdelay, sdelay_ctrl;
+ u16 trdout_ctrl;
+ u16 str_delay_ctrl;
+ u16 tgrst_interval_ctrl;
+ u32 half_line_duration;
+
+ union global_reset_mode_config1 glbrst_cfg1 = {
+ .vf_to_glbrst = 0,
+ .glbrst_to_vf = 0,
+ .readout_start = 0,
+ .long_exposure_mode = 0,
+ .continous_global_reset_mode = 1,
+ .flash_strobe = 0,
+ .sstrobe_muxing = 1,
+ .sastrobe_muxing = 1,
+ };
+
+ if (ms_state != MS_STATE_SSTROBE)
+ glbrst_cfg1.sastrobe_muxing = 0;
+
+ /* Deactivate GS mode if it was previously enabled */
+ if (!galileo1->gs->val) {
+ glbrst_cfg1.sastrobe_muxing = 0;
+ glbrst_cfg1.continous_global_reset_mode = 0;
+
+ galileo1_write8(i2c_sensor, GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+
+ galileo1_write8(galileo1->i2c_sensor,
+ GLOBAL_RESET_CTRL1, 0x0);
+
+ if (ms_state != MS_STATE_CLOSE)
+ return galileo1_shutter_open(sd);
+
+ return 0;
+ }
+
+ galileo1->trdy_ctrl = 0x0034;
+
+ /* This is used to round further timing computations instead of flooring */
+ half_line_duration = galileo1->line_duration_ns / 2;
+
+ /* Shutter should close after exposure time, but we need to take into
+ * account the shutter speed stored in the NVM */
+ sdelay = swab16(*((u16 *)(nvm + nvm_addr->ms)));
+ sdelay_ctrl = ((u32)sdelay * 1000 + half_line_duration) / galileo1->line_duration_ns;
+
+ /* Don't begin reading the pixels until we've waited for the exposure
+ * time */
+ trdout_ctrl = ((u32)(*exposure_us) * 1000 + half_line_duration) / galileo1->line_duration_ns;
+
+ if (sdelay_ctrl > galileo1->trdy_ctrl + trdout_ctrl)
+ galileo1->trdy_ctrl = sdelay_ctrl - trdout_ctrl;
+
+ /* Leave the shutter open for some more time so that it closes when we
+ * start reading the pixels */
+ str_delay_ctrl = galileo1->trdy_ctrl + trdout_ctrl - sdelay_ctrl;
+
+ /* Configure timer */
+ /* Set Global reset ready to its minimum */
+ galileo1_write16(i2c_sensor, TRDY_CTRL, galileo1->trdy_ctrl);
+
+ galileo1_write16(i2c_sensor, TSHUTTER_STROBE_DELAY_CTRL,
+ str_delay_ctrl);
+
+ /* Start readout as soon as possible */
+ galileo1_write16(i2c_sensor, TRDOUT_CTRL, trdout_ctrl);
+
+ /* Close the shutter during the readout, thus it should last at least
+ * the number of active line.
+ */
+ galileo1_write16(i2c_sensor, TSHUTTER_STROBE_WIDTH_CTRL,
+ c->height / galileo1->y_binning
+ + sdelay_ctrl);
+
+ tgrst_interval_ctrl = vt->height + trdout_ctrl + galileo1->trdy_ctrl
+ + sdelay_ctrl + 512;
+ galileo1_write16(i2c_sensor, TGRST_INTERVAL_CTRL, tgrst_interval_ctrl);
+
+ galileo1_write8(i2c_sensor, GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+
+ /* Mechanical shutter control */
+ galileo1_write8(i2c_sensor, GLOBAL_RESET_CTRL1, 0x1);
+ galileo1_write8(i2c_pmic, MECH_SHUTTER_CONTROL, 0x1);
+
+ return 0;
+}
+
+static int galileo1_apply_exposure(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct v4l2_fract *fi = &galileo1->frame_interval;
+ u32 *exposure_us = &galileo1->exposure->val;
+ u16 coarse;
+
+ /* Exposure is expressed in us */
+ u32 exposure_max_us = div_u64((u64) fi->numerator * 1000000,
+ fi->denominator);
+
+ if (*exposure_us > exposure_max_us) {
+ v4l2_warn(sd, "requested exposure (%d) is higher than exposure max (%d)\n",
+ *exposure_us, exposure_max_us);
+
+ *exposure_us = exposure_max_us;
+ }
+
+ coarse = (*exposure_us * 1000) / galileo1->line_duration_ns;
+
+ galileo1_write16(galileo1->i2c_sensor, COARSE_INTEGRATION_TIME, coarse);
+
+ return 0;
+}
+
+static int galileo1_apply_gain(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ u32 gain = galileo1->gain->val;
+
+ galileo1_write16(galileo1->i2c_sensor, ANALOG_GAIN_CODE_GLOBAL, gain);
+
+ return 0;
+}
+
+static int galileo1_apply_flash_strobe(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ enum v4l2_flash_strobe_source strobe_source;
+ union global_reset_mode_config1 glbrst_cfg1;
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+
+ strobe_source = galileo1->strobe_source->val;
+
+ galileo1_read8(i2c, GLOBAL_RESET_MODE_CONFIG1, &glbrst_cfg1._register);
+
+ if (strobe_source == V4L2_FLASH_STROBE_SOURCE_SOFTWARE) {
+ glbrst_cfg1.flash_strobe = 0;
+ galileo1_write8(i2c,
+ GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+ galileo1_write8(i2c, FLASH_TRIGGER_RS, 0x0);
+ return 0;
+ }
+
+
+ /* Set the width to 100us, it is an arbitrary value, but the signal
+ * seems to take at least ~30us to go from 0 to 1
+ */
+ if (galileo1->gs->val) {
+ /* "Global" shutter mode (photo) */
+ glbrst_cfg1.flash_strobe = 1;
+
+ galileo1_write8(i2c,
+ GLOBAL_RESET_MODE_CONFIG1,
+ glbrst_cfg1._register);
+
+ galileo1_write16(i2c, TFLASH_STROBE_WIDTH_HIGH_CTRL, (galileo1->strobe_width->val
+ * 1000) / 108);
+
+ } else {
+ /* Rolling shutter mode (video) */
+ galileo1_write16(i2c, TFLASH_STROBE_WIDTH_HIGH_RS_CTRL, (galileo1->strobe_width->val
+ * 1000) / 108);
+ galileo1_write8(i2c, FLASH_MODE_RS, 0x1);
+ galileo1_write8(i2c, FLASH_TRIGGER_RS, 0x1);
+ }
+
+
+ return 0;
+}
+
+static int galileo1_get_lens_position(struct v4l2_subdev *sd, u16 *pos)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_pmic;
+
+ union status status = {
+ .busy = 1,
+ };
+
+ union focus_change_control fcc = {
+ .measpos = 1,
+ };
+
+ galileo1_write8(i2c, FOCUS_CHANGE_CONTROL, fcc._register);
+
+ while (status.busy)
+ galileo1_read8(i2c, STATUS, &status._register);
+
+ galileo1_read16(i2c, POSITION, pos);
+
+ return 0;
+}
+
+static int galileo1_apply_focus(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_pmic;
+ u8 trial = 0;
+ u16 cur_pos;
+ u16 tgt_pos = galileo1->focus->val;
+
+ galileo1_get_lens_position(sd, &cur_pos);
+
+ /* We try to reach the tgt_pos with +/- 2 tolerance in 3 trials max */
+ while ((cur_pos < tgt_pos - 2 || cur_pos > tgt_pos + 2) && trial < 3) {
+ union focus_change_control fcc = { ._register = 0 };
+ union status status;
+ s16 step;
+
+ fcc.enable = 1;
+ fcc.dir = (tgt_pos < cur_pos);
+ fcc.measpos = 1;
+
+ step = 30 * (tgt_pos - cur_pos);
+ if (step < 0)
+ step = -step;
+
+ galileo1_write16(i2c, FOCUS_CHANGE, step);
+ galileo1_write8(i2c, FOCUS_CHANGE_CONTROL, fcc._register);
+
+ status.busy = 1;
+ while (status.busy)
+ galileo1_read8(i2c, STATUS, &status._register);
+
+ galileo1_read16(i2c, POSITION, &cur_pos);
+ trial++;
+
+ /* Avoid locking up the focus mechanics */
+ udelay(750);
+ }
+
+ galileo1->focus->val = cur_pos;
+ galileo1->focus->cur.val = cur_pos;
+
+ return 0;
+}
+
+/* Manually synchronize control values, I'm not sure if it is the right way to
+ * do it...
+ */
+static inline void galileo1_synchronize_ctrl(struct v4l2_ctrl *ctrl)
+{
+ v4l2_ctrl_lock(ctrl);
+ ctrl->cur.val = ctrl->val;
+ v4l2_ctrl_unlock(ctrl);
+}
+
+static int galileo1_configure(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_mbus_framefmt *fmt = &galileo1->format;
+ int ret;
+
+ ret = galileo1_init(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "init failed\n");
+ return ret;
+ }
+
+ /* CSI2 mode */
+ galileo1_write8(i2c, CSI_SIGNALING_MODE, 0x2);
+
+ /* Pixel format */
+ galileo1_write8(i2c, CSI_DATA_FORMAT, galileo1->bits_per_pixel << 8 |
+ galileo1->bits_per_pixel);
+ galileo1_write8(i2c, CSI_LANE_MODE, pdata->lanes - 1);
+
+ /* Image Size */
+ galileo1_write16(i2c, X_OUTPUT_SIZE, fmt->width);
+ galileo1_write16(i2c, Y_OUTPUT_SIZE, fmt->height);
+
+ /* Image Scaling */
+ /* Full scaling, Bayer sampling */
+ galileo1_write16(i2c, SCALING_MODE, 0x0002);
+ galileo1_write16(i2c, SPATIAL_SAMPLING, 0x0000);
+
+ /* Scaler */
+ galileo1_write16(i2c, OUTPUT_IMAGE_WIDTH, fmt->width);
+ galileo1_write16(i2c, OUTPUT_IMAGE_HEIGHT, fmt->height);
+ galileo1_write16(i2c, SCALER_BLANKING_PCK, 0x26EC);
+
+ /* Frame Timing */
+ galileo1_write16(i2c, VT_LINE_LENGTH_PCK, vt->width);
+ galileo1_write16(i2c, VT_FRAME_LENGTH_LINES, vt->height);
+
+ /* Image area */
+ galileo1_write16(i2c, X_ADDR_START, c->left);
+ galileo1_write16(i2c, Y_ADDR_START, c->top);
+ galileo1_write16(i2c, X_ADDR_END, c->left + c->width - 1);
+ galileo1_write16(i2c, Y_ADDR_END, c->top + c->height - 1);
+
+ /* Digital Crop: We do not crop before the scaler */
+ galileo1_write16(i2c, DIGITAL_CROP_X_OFFSET, 0x0000);
+ galileo1_write16(i2c, DIGITAL_CROP_Y_OFFSET, 0x0000);
+ galileo1_write16(i2c, DIGITAL_CROP_IMAGE_WIDTH,
+ c->width / galileo1->x_binning);
+ galileo1_write16(i2c, DIGITAL_CROP_IMAGE_HEIGHT,
+ c->height / galileo1->y_binning);
+
+ /* Binning */
+ galileo1_write8(i2c, BINNING_MODE, 0x1);
+ galileo1_write8(i2c, BINNING_TYPE, galileo1->x_binning << 4 |
+ galileo1->y_binning);
+
+ /* TOSHIBA register setting
+ * Scaler */
+ galileo1_write8(i2c, GREEN_AVERAGED_BAYER, 0x00);
+ galileo1_write8(i2c, HORIZONTAL_DIGITAL_BINNING, 0x00);
+ galileo1_write8(i2c, VERTICAL_DIGITAL_BINNING, 0x00);
+
+ /* Row Noise improve setting */
+ galileo1_write8(i2c, BLC_SEL, 0x01);
+ galileo1_write16(i2c, CSI2_DELAY, 0x0000);
+
+ /* DPC */
+ galileo1_write8(i2c, SINGLE_DEFECT_CORRECT_ENABLE, 0x00);
+ galileo1_write8(i2c, COMBINED_COUPLET_SINGLE_DEFECT_CORRECT_ENABLE,
+ 0x01);
+
+ /* Controls */
+ galileo1_apply_exposure(sd);
+ galileo1_apply_gain(sd);
+ galileo1_apply_focus(sd);
+
+ /* Synchronize control values */
+ galileo1_synchronize_ctrl(galileo1->exposure);
+ galileo1_synchronize_ctrl(galileo1->focus);
+
+ return 0;
+}
+
+static int galileo1_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ int ret;
+
+ if (enable == 0) {
+ /* Nothing to do if we are already off */
+ if (galileo1->streaming == 0)
+ return 0;
+
+ galileo1->streaming = 0;
+
+ if (galileo1->gs->val) {
+ galileo1_write8(galileo1->i2c_sensor,
+ GLOBAL_RESET_CTRL1, 0x0);
+ galileo1_write8(galileo1->i2c_pmic,
+ MECH_SHUTTER_CONTROL, 0);
+ }
+
+ galileo1_write8(galileo1->i2c_sensor, MODE_SELECT, 0x00);
+
+ if (pdata->set_power)
+ pdata->set_power(GALILEO1_POWER_OFF);
+
+ return 0;
+ }
+
+ if (!galileo1->timings_uptodate) {
+ ret = galileo1_update_timings(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate Video Timing\n");
+ return ret;
+ }
+ }
+
+ /* Now that all needed pre-calculations are done, we can power on the
+ * device and configure it.
+ */
+ if (pdata->set_power) {
+ ret = pdata->set_power(GALILEO1_POWER_ON);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ ret = galileo1_apply_plls(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to apply plls\n");
+ goto power_off;
+ }
+
+ ret = galileo1_configure(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to configure\n");
+ goto power_off;
+ }
+
+ /* Stream on */
+ galileo1->streaming = 1;
+ galileo1_set_shutter(sd);
+ galileo1_apply_hflip(sd);
+ galileo1_apply_vflip(sd);
+ galileo1_apply_nd(sd);
+ galileo1_apply_ms(sd);
+ galileo1_apply_flash_strobe(sd);
+ galileo1_write8(galileo1->i2c_sensor, MODE_SELECT, 0x01);
+
+ return 0;
+
+power_off:
+ if (pdata->set_power)
+ pdata->set_power(GALILEO1_POWER_OFF);
+
+ return ret;
+}
+
+static int galileo1_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval = galileo1->frame_interval;
+
+ return 0;
+}
+
+static int galileo1_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+ struct v4l2_rect *vt = &galileo1->video_timing;
+ struct v4l2_rect *c = &galileo1->crop;
+ struct v4l2_fract *cur_fi = &galileo1->frame_interval;
+ u32 min_vt_height;
+
+ *cur_fi = fi->interval;
+
+ galileo1->timings_uptodate = 0;
+
+ if (!galileo1->streaming)
+ return 0;
+
+ /* We are already streaming, so we try to adjust the vertical blanking
+ * in order to match the frame rate.
+ */
+ vt->height = div_u64((u64) galileo1->vtclk * cur_fi->numerator,
+ vt->width * cur_fi->denominator);
+
+ /* In case min_vt_frame_blanking is not met, we adjust the frame rate */
+ min_vt_height = c->height / galileo1->y_binning + 42;
+
+ if (vt->height < min_vt_height) {
+ vt->height = min_vt_height;
+ /* Refresh FPS */
+ cur_fi->denominator = galileo1->vtclk;
+ cur_fi->numerator = vt->width * vt->height;
+ }
+
+ galileo1_write16(i2c, VT_FRAME_LENGTH_LINES, vt->height);
+
+ return 0;
+}
+
+static int galileo1_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = galileo1->pdata;
+ struct v4l2_mbus_framefmt *fmt = &galileo1->format;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ int ret;
+
+ memset(timings, 0, sizeof(*timings));
+
+ /* We update the timing only when we are not streaming. Anyway, while
+ * streaming, it is forbidden to change the pixelcloclk.
+ */
+ if (!galileo1->timings_uptodate && !galileo1->streaming) {
+ ret = galileo1_update_timings(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate Video Timing\n");
+ return ret;
+ }
+ }
+
+ bt->width = fmt->width;
+ bt->height = fmt->height;
+ bt->pixelclock = (galileo1->mipiclk * pdata->lanes * 2) /
+ galileo1->bits_per_pixel;
+
+ /* Consider HSYNC and VSYNC as HACTIVE and VACTIVE*/
+ bt->polarities = 0;
+
+ /* Because we are in HACTIVE/VACTIVE mode, the blanking size does not
+ * matter for the capture device.
+ */
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops galileo1_video_ops = {
+ .s_stream = galileo1_s_stream,
+ .g_frame_interval = galileo1_g_frame_interval,
+ .s_frame_interval = galileo1_s_frame_interval,
+ .g_dv_timings = galileo1_g_dv_timings,
+};
+
+static const struct v4l2_subdev_ops galileo1_ops = {
+ .core = &galileo1_core_ops,
+ .video = &galileo1_video_ops,
+ .pad = &galileo1_pad_ops,
+};
+
+static int galileo1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct galileo1 *galileo1 = ctrl_to_galileo1(ctrl);
+
+ /* If not streaming, just default value */
+ if (!galileo1->streaming) {
+ ctrl->val = ctrl->default_value;
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ galileo1_get_lens_position(&galileo1->sd, (u16 *)&ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+/* Custom ctrls */
+#define V4L2_CID_GALILEO1_ND (V4L2_CID_CAMERA_CLASS_BASE + 0x100)
+#define V4L2_CID_GALILEO1_GS (V4L2_CID_CAMERA_CLASS_BASE + 0x101)
+#define V4L2_CID_GALILEO1_STROBE_WIDTH (V4L2_CID_CAMERA_CLASS_BASE + 0x102)
+#define V4L2_CID_GALILEO1_MS (V4L2_CID_CAMERA_CLASS_BASE + 0x103)
+
+static int galileo1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct galileo1 *galileo1 = ctrl_to_galileo1(ctrl);
+
+ /* If not streaming, just keep interval structures up-to-date */
+ if (!galileo1->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ return galileo1_apply_hflip(&galileo1->sd);
+ case V4L2_CID_VFLIP:
+ return galileo1_apply_vflip(&galileo1->sd);
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ return galileo1_apply_exposure(&galileo1->sd);
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ return galileo1_apply_focus(&galileo1->sd);
+ case V4L2_CID_GALILEO1_ND:
+ return galileo1_apply_nd(&galileo1->sd);
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_GALILEO1_STROBE_WIDTH:
+ return galileo1_apply_flash_strobe(&galileo1->sd);
+ case V4L2_CID_ANALOGUE_GAIN:
+ return galileo1_apply_gain(&galileo1->sd);
+ case V4L2_CID_GALILEO1_GS:
+ return galileo1_apply_ms(&galileo1->sd);
+ case V4L2_CID_GALILEO1_MS:
+ return galileo1_set_shutter(&galileo1->sd);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops galileo1_ctrl_ops = {
+ .g_volatile_ctrl = galileo1_g_volatile_ctrl,
+ .s_ctrl = galileo1_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config galileo1_ctrl_nd = {
+ .ops = &galileo1_ctrl_ops,
+ .id = V4L2_CID_GALILEO1_ND,
+ .name = "Neutral Density Filter",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = false,
+};
+
+static const struct v4l2_ctrl_config galileo1_ctrl_gs = {
+ .ops = &galileo1_ctrl_ops,
+ .id = V4L2_CID_GALILEO1_GS,
+ .name = "Global Shutter",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = false,
+};
+
+static const struct v4l2_ctrl_config galileo1_ctrl_ms = {
+ .ops = &galileo1_ctrl_ops,
+ .id = V4L2_CID_GALILEO1_MS,
+ .name = "Mechanical Shutter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 2,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config galileo1_ctrl_sw = {
+ .ops = &galileo1_ctrl_ops,
+ .id = V4L2_CID_GALILEO1_STROBE_WIDTH,
+ .name = "Flash strobe width, in us",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 50000,
+ .step = 1,
+ .def = 100,
+};
+
+static int galileo1_initialize_controls(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct v4l2_ctrl_handler *hdl = &galileo1->ctrl_handler;
+ u8 *nvm = galileo1->nvm;
+ union nvm_memaddr *nvm_addr = &galileo1->nvm_addr;
+ union nvm_af nvm_af;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init ctrl handler\n");
+ goto einit;
+ }
+
+ /* Flips */
+ galileo1->hflip = v4l2_ctrl_new_std(hdl,
+ &galileo1_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+
+ galileo1->vflip = v4l2_ctrl_new_std(hdl,
+ &galileo1_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ /* Exposure in us */
+ galileo1->exposure = v4l2_ctrl_new_std(hdl,
+ &galileo1_ctrl_ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 0, 1000000, 1, 20000);
+
+ /* Focus */
+ nvm_af._registers =
+ swab64(*((u64 *)(nvm + nvm_addr->af + NVM_AF_FAR_END)));
+
+ /* Format the Auto Focus registers */
+ nvm_af.infinity += nvm_af.far_end;
+ nvm_af.macro += nvm_af.infinity;
+ nvm_af.near_end += nvm_af.macro;
+
+ galileo1->focus = v4l2_ctrl_new_std(hdl, &galileo1_ctrl_ops,
+ V4L2_CID_FOCUS_ABSOLUTE,
+ nvm_af.far_end,
+ nvm_af.near_end,
+ 1,
+ nvm_af.infinity);
+
+ /* Since the lens can move even if no command has been sent, flag the
+ * control as volatile.
+ */
+ galileo1->focus->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ /* Neutral Density Filter */
+ galileo1->nd = v4l2_ctrl_new_custom(hdl, &galileo1_ctrl_nd, NULL);
+
+ /* Global Shutter */
+ galileo1->gs = v4l2_ctrl_new_custom(hdl, &galileo1_ctrl_gs, NULL);
+
+ /* Mechanical shutter control */
+ galileo1->ms = v4l2_ctrl_new_custom(hdl, &galileo1_ctrl_ms, NULL);
+
+ /* Flash strobe width */
+ galileo1->strobe_width = v4l2_ctrl_new_custom(hdl, &galileo1_ctrl_sw, NULL);
+
+ /* Flash Strobe */
+ galileo1->strobe_source =
+ v4l2_ctrl_new_std_menu(hdl,
+ &galileo1_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_SOURCE,
+ V4L2_FLASH_STROBE_SOURCE_EXTERNAL,
+ ~0x3,
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+
+ /* Analog Gain
+ * AGx1.0 = 0x003F
+ * AGx2.0 = 0x007E
+ * AGx8.0 = 0x01F8
+ * AGx12.0 = 0x02F4, this is the max gain code
+ */
+ galileo1->gain = v4l2_ctrl_new_std(hdl,
+ &galileo1_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ 0, 0x2F4, 1, 5 * 0x3F);
+
+ if (hdl->error) {
+ v4l2_err(sd, "failed to add new ctrls\n");
+ ret = hdl->error;
+ goto ectrl;
+ }
+
+ sd->ctrl_handler = hdl;
+
+ return 0;
+
+ectrl:
+ v4l2_ctrl_handler_free(hdl);
+einit:
+ return ret;
+}
+
+static void galileo1_free_controls(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+}
+
+static int galileo1_detect_chip(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ u16 chip_id;
+
+ galileo1_read16(galileo1->i2c_sensor, SENSOR_MODEL_ID, &chip_id);
+
+ if (chip_id != GALILEO1_CHIPID) {
+ v4l2_err(sd, "Error Chipd ID = 0x%04x instead of 0x%04x\n",
+ chip_id, GALILEO1_CHIPID);
+ return -ENODEV;
+ }
+
+ galileo1_read16(galileo1->i2c_pmic, IC_INFO, &chip_id);
+
+ if (chip_id != PMIC_CHIPID) {
+ v4l2_err(sd, "Error Chipd ID = 0x%04x instead of 0x%04x\n",
+ chip_id, PMIC_CHIPID);
+ return -ENODEV;
+ }
+
+ v4l2_info(sd, "Found " DRIVER_NAME " chip\n");
+
+ return 0;
+}
+
+static int galileo1_read_nvm(struct v4l2_subdev *sd)
+{
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct i2c_client *i2c = galileo1->i2c_sensor;
+ u8 *nvm = galileo1->nvm;
+ u8 page = 0;
+ int ret = 0;
+
+ /* Enable Read */
+ galileo1_write8(i2c, DATA_TRANSFER_IF_1_CTRL, 0x1);
+
+ for (page = 0 ; page < NVM_PAGE_NB ; page++) {
+ unsigned int i;
+
+ union data_transfer_if_1_status status = {
+ .read_if_ready = 0,
+ };
+
+ /* Select page */
+ galileo1_write8(i2c, DATA_TRANSFER_IF_1_PAGE_SELECT, page);
+
+ /* Check Status */
+ while (!status.read_if_ready) {
+ galileo1_read8(i2c, DATA_TRANSFER_IF_1_STATUS,
+ &status._register);
+
+ if (status.improper_if_usage || status.data_corrupted) {
+ v4l2_err(sd, "NVM Data transfer IF is bad\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Read the entire page (64 bytes)
+ * If it is taking too long, it can be optimized into reading
+ * the entire page in one i2c xfer.
+ */
+ for (i = 0 ; i < NVM_PAGE_SZ ; i++)
+ galileo1_read8(i2c, DATA_TRANSFER_IF_1_DATA + i,
+ nvm + NVM_PAGE_SZ * page + i);
+ }
+
+out:
+ galileo1_write8(i2c, DATA_TRANSFER_IF_1_CTRL, 0x0);
+
+ /* Check Version */
+ if (*nvm != NVM_VERSION) {
+ v4l2_err(sd, "NVM Version (0x%02x) is not correct\n", *nvm);
+ v4l2_err(sd, "Expecting 0x%02x\n", NVM_VERSION);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static ssize_t galileo1_nvm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(to_i2c_client(dev));
+ struct galileo1 *galileo1 = to_galileo1(sd);
+
+ memcpy(buf, galileo1->nvm, NVM_SIZE);
+
+ return NVM_SIZE;
+}
+
+DEVICE_ATTR(nvm, S_IRUGO, galileo1_nvm_show, NULL);
+
+static int galileo1_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct galileo1 *galileo1;
+ struct galileo1_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->refclk == 0) {
+ dev_err(&client->dev, "refclk frequency is not specified\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c not available\n");
+ return -ENODEV;
+ }
+
+ galileo1 = kzalloc(sizeof(*galileo1), GFP_KERNEL);
+ if (!galileo1) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ galileo1->pdata = pdata;
+
+ sd = &galileo1->sd;
+ v4l2_i2c_subdev_init(sd, client, &galileo1_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ galileo1->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &galileo1->pad, 0);
+
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ galileo1->i2c_sensor = client;
+ galileo1->i2c_pmic = i2c_new_dummy(client->adapter,
+ GALILEO1_PMIC_I2C_ADDR);
+ if (galileo1->i2c_pmic == NULL) {
+ v4l2_err(sd, "failed to register pmic i2c client\n");
+ ret = -ENODEV;
+ goto epmic;
+ }
+
+ /* Set default configuration:
+ * Max sensor crop into 720p30
+ */
+ galileo1->format.width = 1280;
+ galileo1->format.height = 720;
+ galileo1->format.code = V4L2_MBUS_FMT_SGBRG10_1X10;
+
+ /* Center the crop */
+ galileo1->crop.width = 7680;
+ galileo1->crop.height = 4320;
+ galileo1->crop.left = 24;
+ galileo1->crop.top = 524;
+
+ /* 30 FPS */
+ galileo1->frame_interval.numerator = 1;
+ galileo1->frame_interval.denominator = 30;
+
+ /* Make sure all clocks info are up-to-date */
+ ret = galileo1_update_timings(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Unable to calculate Video Timing\n");
+ goto eupdate;
+ }
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(GALILEO1_POWER_ON);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ /* Check if the chip is preset */
+ ret = galileo1_detect_chip(sd);
+ if (ret < 0)
+ goto edetect;
+
+ /* Make sure the shutter is closed */
+ galileo1_shutter_close(sd);
+
+ /* Non-Volatile Memory */
+ ret = device_create_file(&client->dev, &dev_attr_nvm);
+ if (ret < 0) {
+ v4l2_err(sd, "Sysfs nvm entry creation failed\n");
+ goto esysfs;
+ }
+
+ galileo1->nvm = kzalloc(NVM_SIZE, GFP_KERNEL);
+ if (!galileo1->nvm) {
+ v4l2_err(sd, "alloc failed for NVM structure\n");
+ ret = -ENOMEM;
+ goto enomem;
+ }
+
+ ret = galileo1_read_nvm(sd);
+ if (ret < 0) {
+ v4l2_err(sd, "Failed to read NVM\n");
+ goto envm;
+ }
+
+ /* Extract NVM Memory map */
+ galileo1->nvm_addr._registers =
+ swab64(*(u64 *)(galileo1->nvm + NVM_MEMORY_ADDRESS));
+
+ if (pdata->set_power)
+ pdata->set_power(GALILEO1_POWER_OFF);
+
+ /* Initialize Control */
+ ret = galileo1_initialize_controls(sd);
+ if (ret < 0)
+ goto einitctrl;
+
+ return 0;
+
+einitctrl:
+envm:
+ kfree(galileo1->nvm);
+enomem:
+ device_remove_file(&client->dev, &dev_attr_nvm);
+esysfs:
+edetect:
+ if (pdata->set_power)
+ pdata->set_power(GALILEO1_POWER_OFF);
+eupdate:
+ i2c_unregister_device(galileo1->i2c_pmic);
+epmic:
+emedia:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(galileo1);
+
+ return ret;
+}
+
+static int galileo1_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct galileo1 *galileo1 = to_galileo1(sd);
+ struct galileo1_platform_data *pdata = client->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->set_power)
+ pdata->set_power(GALILEO1_POWER_OFF);
+
+ if (galileo1->i2c_pmic)
+ i2c_unregister_device(galileo1->i2c_pmic);
+
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ galileo1_free_controls(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(galileo1->nvm);
+ kfree(galileo1);
+
+ return 0;
+}
+
+static const struct i2c_device_id galileo1_id[] = {
+ {DRIVER_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, galileo1_id);
+
+static struct i2c_driver galileo1_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = galileo1_probe,
+ .remove = galileo1_remove,
+ .id_table = galileo1_id,
+};
+
+module_i2c_driver(galileo1_driver);
diff --git a/drivers/media/video/galileo1_reg.h b/drivers/media/video/galileo1_reg.h
new file mode 100644
index 00000000000000..06ef489e5ae648
--- /dev/null
+++ b/drivers/media/video/galileo1_reg.h
@@ -0,0 +1,402 @@
+/*
+ * Galileo I register map
+ *
+ * @author Eng-Hong Sron <eng-hong.sron@parrot.com>
+ * @date Sun May 4 13:01:40 CEST 2014
+ *
+ */
+#ifndef _GALILEO1_REG_H_
+#define _GALILEO1_REG_H_
+
+/* I2C parameters */
+#define PMIC_CHIPID 0x2102
+#define GALILEO1_CHIPID 0x2170
+
+/*
+ * PMIC: ADV5814
+ */
+#define IC_INFO 0x0000
+#define IC_VERSION 0x0001
+#define FOCUS_CHANGE 0x0002
+#define FOCUS_CHANGE_CONTROL 0x0005
+#define FOCUS_CHANGE_NUMBER_PHASE_1 0x0006
+#define FOCUS_CHANGE_NUMBER_PHASE_2 0x0008
+#define STROBE_COUNT_PHASE_1 0x000A
+#define STROBE_COUNT_PHASE_2 0x000B
+#define POSITION 0x000C
+#define MECH_SHUTTER_CONTROL 0x000E
+#define OPERATION_MODE 0x000F
+#define ACT_STATE_1 0x0010
+#define ACT_STATE_2 0x0011
+#define CONTROL 0x0012
+#define DRIVE_CFG 0x0013
+#define BUCK_CFG 0x0014
+#define TIMING 0x0015
+#define CONFIG 0x0016
+#define PERIOD 0x0017
+#define TAH 0x0018
+#define TAL 0x0019
+#define TBH 0x001A
+#define TBL 0x001B
+#define NR_CFG 0x001C
+#define NR_PERIOD 0x001D
+#define NR_TAH 0x001E
+#define NR_TAL 0x001F
+#define NR_TBH 0x0020
+#define NR_TBL 0x0021
+#define NR_PRE_PULSE 0x0022
+#define NR_POST_PULSE 0x0024
+#define NR_RISEFALL 0x0026
+#define NR_RF_PULSE 0x0027
+#define CURRENT 0x0028
+#define IPOS_TEMPCOMP 0x0029
+#define ADC_CONFIG 0x002A
+#define ADC_ACT 0x002B
+#define ADC_RES 0x002C
+#define STATUS 0x002D
+
+union focus_change_control {
+ struct {
+ uint8_t enable : 1;
+ uint8_t async : 1;
+ uint8_t frames : 1;
+ uint8_t strobes : 1;
+ uint8_t phase : 1;
+ uint8_t dir : 1;
+ uint8_t measpos : 1;
+ uint8_t movemeas : 1;
+ };
+ uint8_t _register;
+};
+
+union mech_shutter_control {
+ struct {
+ uint8_t enable : 1;
+ uint8_t demux : 1;
+ uint8_t control_level : 1;
+ uint8_t swreset : 1;
+ };
+ uint8_t _register;
+};
+
+union operation_mode {
+ struct {
+ uint8_t smode : 1;
+ uint8_t sedge : 2;
+ uint8_t sdirm_cfg : 1;
+ uint8_t nmode : 1;
+ uint8_t nedge : 2;
+ uint8_t ndirm_cfg : 1;
+ };
+ uint8_t _register;
+};
+
+union drive_cfg {
+ struct {
+ uint8_t mclk_div : 3;
+ uint8_t polarity : 1;
+ uint8_t h_bridge : 1;
+ uint8_t slope : 3;
+ };
+ uint8_t _register;
+};
+
+union buck_cfg {
+ struct {
+ uint8_t b_en : 1;
+ uint8_t b_mode : 1;
+ };
+ uint8_t _register;
+};
+
+union timing {
+ struct {
+ uint8_t rsvd : 4;
+ uint8_t shontime : 4;
+ };
+ uint8_t _register;
+};
+
+union config {
+ struct {
+ unsigned /* unused */ : 2;
+ uint8_t vldo : 3;
+ };
+ uint8_t _register;
+};
+
+union nr_risefall {
+ struct {
+ uint8_t ns_330 : 1;
+ uint8_t ns_360 : 1;
+ uint8_t ns_385 : 1;
+ uint8_t ns_410 : 1;
+ uint8_t ns_490 : 1;
+ uint8_t ns_560 : 1;
+ uint8_t ns_680 : 1;
+ uint8_t ns_950 : 1;
+ };
+ uint8_t _register;
+};
+
+union nr_rf_pulse {
+ struct {
+ uint8_t nr_rf_pulse : 4;
+ };
+ uint8_t _register;
+};
+
+union status {
+ struct {
+ uint8_t over_temp : 1;
+ uint8_t low_vbatt : 1;
+ uint8_t busy : 1;
+ };
+ uint8_t _register;
+};
+
+/*
+ * Sensor
+ */
+#define MODEL_ID 0x0000
+#define SENSOR_MODEL_ID 0x0016
+
+/* General Set-up */
+#define MODE_SELECT 0x0100
+#define IMAGE_ORIENTATION 0x0101
+#define SOFTWARE_RESET 0x0103
+#define GROUPED_PARAMETER_HOLD 0x0104
+#define MASK_CORRUPTED_FRAME 0x0105
+#define FAST_STANDBY_CTRL 0x0106
+#define CCI_ADDRESS_CONTROL 0x0107
+#define CSI_CHANNEL_IDENTIFIER 0x0110
+#define CSI_SIGNALING_MODE 0x0111
+#define CSI_DATA_FORMAT 0x0112
+#define CSI_LANE_MODE 0x0114
+#define CSI_10_TO_8_DT 0x0115
+#define CSI_10_TO_6_DT 0x0117
+#define CSI_12_T0_8_DT 0x0118
+#define CSI_12_T0_6_DT 0x011A
+
+union image_orientation {
+ struct {
+ uint8_t h_mirror : 1;
+ uint8_t v_mirror : 1;
+ };
+ uint8_t _register;
+};
+
+/* Operating Voltage */
+#define EXTCLK_FRQ_MHZ 0x0136
+
+/* Integration Time */
+#define FINE_INTEGRATION_TIME 0x0200
+#define COARSE_INTEGRATION_TIME 0x0202
+
+/* Analog gain */
+#define ANALOG_GAIN_CODE_GLOBAL 0x0204
+
+/* Clock Set-up */
+#define VT_PIX_CLK_DIV 0x0300
+#define VT_SYS_CLK_DIV 0x0302
+#define PRE_PLL_CLK_DIV 0x0304
+#define PLL_MULTIPLIER 0x0306
+#define OP_PIX_CLK_DIV 0x0308
+#define OP_SYS_CLK_DIV 0x030A
+
+/* Frame Timing Register */
+#define VT_FRAME_LENGTH_LINES 0x0340
+#define VT_LINE_LENGTH_PCK 0x0342
+
+/* Image Size */
+#define X_ADDR_START 0x0344
+#define Y_ADDR_START 0x0346
+#define X_ADDR_END 0x0348
+#define Y_ADDR_END 0x034A
+#define X_OUTPUT_SIZE 0x034C
+#define Y_OUTPUT_SIZE 0x034E
+
+/* Imsage Scaling */
+#define SCALING_MODE 0x0400
+#define SPATIAL_SAMPLING 0x0402
+#define DIGITAL_CROP_X_OFFSET 0x0408
+#define DIGITAL_CROP_Y_OFFSET 0x040A
+#define DIGITAL_CROP_IMAGE_WIDTH 0x040C
+#define DIGITAL_CROP_IMAGE_HEIGHT 0x040E
+
+/* CSI-2 Configuration */
+#define DPHY_CTRL 0x0808
+#define REQUESTED_LINK_BIT_RATE_MBPS_31_24 0x0820
+#define REQUESTED_LINK_BIT_RATE_MBPS_23_16 0x0821
+#define REQUESTED_LINK_BIT_RATE_MBPS_15_8 0x0822
+#define REQUESTED_LINK_BIT_RATE_MBPS_7_0 0x0823
+
+/* Binning Configuration */
+#define BINNING_MODE 0x0900
+#define BINNING_TYPE 0x0901
+#define BINNING_WEIGHTING 0x0902
+
+/* Data Upload/Download Configuration */
+#define DATA_TRANSFER_IF_1_CTRL 0x0A00
+#define DATA_TRANSFER_IF_1_STATUS 0x0A01
+#define DATA_TRANSFER_IF_1_PAGE_SELECT 0x0A02
+#define DATA_TRANSFER_IF_1_DATA 0x0A04
+
+/* Non-Volatile Memory */
+#define NVM_PAGE_NB 32
+#define NVM_PAGE_SZ 64
+
+#define NVM_SIZE (NVM_PAGE_NB * NVM_PAGE_SZ)
+
+#define NVM_VERSION 0x60
+
+#define NVM_MEMORY_ADDRESS 0x0002
+#define NVM_AF_FAR_END 0x0005
+
+union data_transfer_if_1_status {
+ struct {
+ uint8_t read_if_ready : 1;
+ uint8_t write_if_ready : 1;
+ uint8_t data_corrupted : 1;
+ uint8_t improper_if_usage : 1;
+ };
+ uint8_t _register;
+};
+
+/* Due to alignement constraint, even if the set of register is on 7 bytes, we
+ * declare it on 8 bytes on purpose.
+ */
+union nvm_memaddr {
+ struct {
+ uint64_t dummy : 10;
+ uint64_t af : 12;
+ uint64_t defect : 12;
+ uint64_t ls : 7;
+ uint64_t nd : 5;
+ uint64_t ms : 5;
+ uint64_t checksum : 13;
+ };
+ uint64_t _registers;
+};
+
+union nvm_af {
+ struct {
+ uint16_t near_end;
+ uint16_t macro;
+ uint16_t infinity;
+ uint16_t far_end;
+ };
+ uint64_t _registers;
+};
+
+/* Ideal Raw */
+#define MAPPED_COUPLET_CORRECT_ENABLE 0x0B05
+#define SINGLE_DEFECT_CORRECT_ENABLE 0x0B06
+#define SINGLE_DEFECT_CORRECT_WEIGHT 0x0B07
+#define COMBINED_COUPLET_SINGLE_DEFECT_CORRECT_ENABLE 0x0B0A
+#define COMBINED_COUPLET_SINGLE_DEFECT_CORRECT_WEIGHT 0x0B0B
+#define MAPPED_LINE_DEFECT_CORRECT_ENABLE 0x0B0E
+
+/* Timer Configuration */
+#define GLOBAL_RESET_CTRL1 0x0C00
+#define GLOBAL_RESET_CTRL2 0x0C01
+#define GLOBAL_RESET_MODE_CONFIG1 0x0C02
+#define GLOBAL_RESET_MODE_CONFIG2 0x0C03
+#define TRDY_CTRL 0x0C04
+#define TRDOUT_CTRL 0x0C06
+#define TSHUTTER_STROBE_DELAY_CTRL 0x0C08
+#define TSHUTTER_STROBE_WIDTH_CTRL 0x0C0A
+#define TFLASH_STROBE_DELAY_CTRL 0x0C0C
+#define TFLASH_STROBE_WIDTH_HIGH_CTRL 0x0C0E
+#define TGRST_INTERVAL_CTRL 0x0C10
+#define TFLASH_STROBE_ADJUSTMENT 0x0C12
+#define TFLASH_STROBE_START_POINT 0x0C14
+#define TFLASH_STROBE_DELAY_RS_CTRL 0x0C16
+#define TFLASH_STROBE_WIDTH_HIGH_RS_CTRL 0x0C18
+#define FLASH_MODE_RS 0x0C1A
+#define FLASH_TRIGGER_RS 0x0C1B
+#define FLASH_STATUS 0x0C1C
+
+union global_reset_mode_ctrl1 {
+ struct {
+ uint8_t start_global_reset : 1;
+ uint8_t start_long_exposure : 1;
+ };
+ uint8_t _register;
+};
+
+union global_reset_mode_config1 {
+ struct {
+ uint8_t vf_to_glbrst : 1;
+ uint8_t glbrst_to_vf : 1;
+ uint8_t readout_start : 1;
+ uint8_t long_exposure_mode : 1;
+ uint8_t continous_global_reset_mode : 1;
+ uint8_t flash_strobe : 1;
+ uint8_t sstrobe_muxing : 1;
+ uint8_t sastrobe_muxing : 1;
+ };
+ uint8_t _register;
+};
+
+/* PLL for ADC parameters */
+#define AD_CNTL 0x3381
+#define ST_CNTL 0x3383
+#define PRE_PLL_CNTL_ST 0x3385
+#define PLL_MULTI_ST 0x3386
+#define PLL_HRG_CNTL 0x3388
+#define HREG_CNTL 0x3389
+#define HREG_PLLSEL_SINGLE 0x338A
+#define BST_CNTL 0x338C
+#define OPCK_PLLSEL 0x35C4
+
+/* New Scaler Parameters */
+#define OUTPUT_IMAGE_WIDTH 0x3400
+#define OUTPUT_IMAGE_HEIGHT 0x3402
+#define GREEN_AVERAGED_BAYER 0x3404
+#define FILTER_COEFFICIENT_CONTROL_X 0x3405
+#define FILTER_COEFFICIENT_CONTROL_Y 0x3406
+#define HORIZONTAL_DIGITAL_BINNING 0x3407
+#define VERTICAL_DIGITAL_BINNING 0x3408
+#define SCALER_BLANKING_PCK 0x3409
+#define X_SCALE_RATIO 0x3410
+#define Y_SCALE_RATIO 0x3412
+
+/* Row Noise Improve Setting */
+#define BLC_SEL 0x3206
+#define CSI2_DELAY 0x3584
+
+/* No documentation... */
+#define POSBSTSEL 0x3000
+#define READVDSEL 0x3002
+#define RSTVDSEL 0x3004
+#define BSVBPSEL 0x3006
+#define HREG_TEST 0x300C
+#define DRESET 0x3015
+#define FRACEXP_TIME1 0x3021
+#define PORTGRESET_U 0x302A
+#define PORTGRESET_W 0x302C
+#define ROREAD 0x302F
+#define DRCUT 0x3039
+#define GDMOSCNT 0x3042
+#define CDS_STOPBST 0x3064
+#define BSTCKLFIX_ADC 0x3067
+#define BSC_AGRNG2 0x30E0
+#define BSC_AGRNG1 0x30E1
+#define BSC_AGRNG0 0x30E2
+#define KBIASCNT_RNG32 0x30E3
+#define KBIASCNT_RNG10 0x30E4
+#define GDMOSHEN 0x30E6
+#define BSDIGITAL_MODE 0x30EA
+#define PS_VZS_NML_COEF 0x310D
+#define PS_VZS_NML_INTC 0x310E
+#define ZSV_IN_LINES 0x3110
+#define FBC_IN_RANGE 0x3114
+#define OB_CLPTHRSH_NEAR 0x3202
+#define OB_CLPTHRSH_FAR 0x3203
+#define WKUP_WAIT_ON 0x3300
+#define HALF_VTAP_MODE 0x340B
+#define HALF_DRATE_LIMIT 0x340C
+#define CCP2BLKD 0x3580
+
+#endif /* _GALILEO1_REG_H_ */
diff --git a/drivers/media/video/galileo2.c b/drivers/media/video/galileo2.c
new file mode 100644
index 00000000000000..860c9d23b4a282
--- /dev/null
+++ b/drivers/media/video/galileo2.c
@@ -0,0 +1,188 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define DRIVER_NAME "galileo2"
+
+struct galileo2 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+};
+
+static inline struct galileo2 *sd_to_galileo2(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct galileo2, sd);
+}
+
+static int galileo2_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = 1280;
+ fmt->format.height = 720;
+ fmt->format.code = V4L2_MBUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int galileo2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* For now we support only one code */
+ if (code->index)
+ return -EINVAL;
+
+ code->code = V4L2_MBUS_FMT_SGBRG10_1X10;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops galileo2_pad_ops = {
+ .get_fmt = galileo2_get_fmt,
+ .set_fmt = galileo2_get_fmt,
+ .enum_mbus_code = galileo2_enum_mbus_code,
+ /*.get_selection = galileo2_get_selection,
+ .set_selection = galileo2_set_selection,*/
+};
+
+static int galileo2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int galileo2_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ bt->width = 1280;
+ bt->height = 720;
+ bt->pixelclock = 100 * 1000 * 1000;
+
+ /* Consider HSYNC and VSYNC as HACTIVE and VACTIVE*/
+ bt->polarities = 0;
+
+ /* Because we are in HACTIVE/VACTIVE mode, the blanking size does not
+ * matter for the capture device.
+ */
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops galileo2_video_ops = {
+ .s_stream = galileo2_s_stream,
+ /*.g_frame_interval = galileo2_g_frame_interval,
+ .s_frame_interval = galileo2_s_frame_interval,*/
+ .g_dv_timings = galileo2_g_dv_timings,
+};
+
+static const struct v4l2_subdev_ops galileo2_ops = {
+/* .core = &galileo2_core_ops,*/
+ .video = &galileo2_video_ops,
+ .pad = &galileo2_pad_ops,
+};
+
+static int galileo2_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct galileo2 *galileo2;
+ /*struct galileo2_platform_data *pdata = client->dev.platform_data;*/
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ /*if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }*/
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c not available\n");
+ return -ENODEV;
+ }
+
+ galileo2 = kzalloc(sizeof(*galileo2), GFP_KERNEL);
+ if (!galileo2) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ /*galileo2->pdata = pdata;*/
+
+ sd = &galileo2->sd;
+ v4l2_i2c_subdev_init(sd, client, &galileo2_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ galileo2->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &galileo2->pad, 0);
+
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto emedia;
+ }
+
+
+ return 0;
+
+emedia:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(galileo2);
+
+ return ret;
+}
+
+static int galileo2_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct galileo2 *galileo2 = sd_to_galileo2(sd);
+ /*struct galileo2_platform_data *pdata = client->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->set_power)
+ pdata->set_power(GALILEO2_POWER_OFF);
+
+ if (galileo2->i2c_pmic)
+ i2c_unregister_device(galileo2->i2c_pmic);
+
+ device_remove_file(&client->dev, &dev_attr_nvm);
+
+
+ galileo2_free_controls(sd);
+ */
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(galileo2);
+
+ return 0;
+}
+
+static const struct i2c_device_id galileo2_id[] = {
+ {DRIVER_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, galileo2_id);
+
+static struct i2c_driver galileo2_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = galileo2_probe,
+ .remove = galileo2_remove,
+ .id_table = galileo2_id,
+};
+
+module_i2c_driver(galileo2_driver);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Nokia Galileo2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mt9f002.c b/drivers/media/video/mt9f002.c
new file mode 100644
index 00000000000000..5cee23d6557658
--- /dev/null
+++ b/drivers/media/video/mt9f002.c
@@ -0,0 +1,2901 @@
+/**
+ ************************************************
+ * @file mt9f002.c
+ * @brief Driver for Aptina mt9f002
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2014-09-19
+ ************************************************
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-mediabus.h>
+
+#include <media/mt9f002.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+
+#include "mt9f002_regs.h"
+
+#define MT9F002_NEVENTS 8
+/* /!\ This value is calculated with a 400kHz clock for I2C */
+#define MT9F002_CROP_WRITE_US 3000
+#define MT9F002_DEFAULT_USER_US 4000
+
+static inline struct mt9f002 *to_mt9f002(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9f002, subdev);
+}
+
+/* write a register */
+static int mt9f002_write8(struct mt9f002 *mt9f002, u16 reg, u8 val)
+{
+ struct i2c_client *client = mt9f002->i2c_client;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u8 val;
+ } __packed buf;
+ int ret;
+
+ reg = swab16(reg);
+
+ buf.reg = reg;
+ buf.val = val;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* read a register */
+static int mt9f002_read16(struct mt9f002 *mt9f002, u16 reg, u16 *val)
+{
+ struct i2c_client *client = mt9f002->i2c_client;
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = (u8 *)val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = swab16(*val);
+
+ return 0;
+}
+
+static int mt9f002_read8(struct mt9f002 *mt9f002, u16 reg, u8 *val)
+{
+ struct i2c_client *client = mt9f002->i2c_client;
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+
+ return 0;
+}
+
+/* write a register */
+static int mt9f002_write16(struct mt9f002 *mt9f002, u16 reg, u16 val)
+{
+ struct i2c_client *client = mt9f002->i2c_client;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u16 val;
+ } __packed buf;
+ int ret;
+
+ buf.reg = swab16(reg);
+ buf.val = swab16(val);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* write a register */
+static int mt9f002_blanking_init(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+
+ if (blanking->x_odd_inc > 1) {
+ if (blanking->y_odd_inc > 1)
+ {
+ blanking->min_line_blanking_pck =
+ BINNING_XY_MIN_LINE_BLANKING_PCK;
+ blanking->min_line_length_pck =
+ BINNING_XY_MIN_LINE_LENGTH_PCK;
+ blanking->min_line_fifo_pck =
+ BINNING_XY_MIN_LINE_FIFO_BLANKING_PCK;
+ } else {
+ blanking->min_line_blanking_pck =
+ BINNING_X_MIN_LINE_BLANKING_PCK;
+ blanking->min_line_length_pck =
+ BINNING_X_MIN_LINE_LENGTH_PCK;
+ blanking->min_line_fifo_pck =
+ BINNING_X_MIN_LINE_FIFO_BLANKING_PCK;
+ }
+ } else {
+
+ if (ctx->scaler != 16) {
+ blanking->min_line_blanking_pck
+ = SCALER_MIN_LINE_BLANKING_PCK;
+ blanking->min_line_length_pck
+ = SCALER_MIN_LINE_LENGTH_PCK;
+ blanking->min_line_fifo_pck
+ = SCALER_MIN_LINE_FIFO_BLANKING_PCK;
+ } else {
+ blanking->min_line_blanking_pck
+ = MIN_LINE_BLANKING_PCK;
+ blanking->min_line_length_pck
+ = MIN_LINE_LENGTH_PCK;
+ blanking->min_line_fifo_pck
+ = MIN_LINE_FIFO_BLANKING_PCK;
+ }
+ }
+
+ return res;
+}
+
+static int mt9f002_pll_check(struct mt9f002 *mt9f002)
+{
+ uint64_t pll_ip_clk_freq, vco_freq, clk_pixel, clk_op;
+ struct mt9f002_platform_data *pdata = mt9f002->pdata;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_pll *pll = &ctx->pll;
+ int res = 0;
+
+ /*
+ * check ext_clk_freq_mhz (inputClk) is in the range
+ */
+ if (pdata->ext_clk_freq_mhz < EXT_CLK_MIN
+ || pdata->ext_clk_freq_mhz > EXT_CLK_MAX) {
+ v4l2_err(&mt9f002->subdev, "error,input clock (%llu MHz) \
+ not in the allowed range\n",
+ pdata->ext_clk_freq_mhz);
+ res = -1;
+ }
+
+ /*
+ * check pll_ip_clk_freq is in the range
+ */
+ if (pll->pre_pll_clk_div < PRE_PLL_CLK_DIV_MIN
+ || pll->pre_pll_clk_div > PRE_PLL_CLK_DIV_MAX) {
+ v4l2_err(&mt9f002->subdev, "error, pre_pll_clk_div (%u) \
+ not in the allowed range\n",
+ pll->pre_pll_clk_div);
+ res = -1;
+ }
+
+ pll_ip_clk_freq = div_u64((u64)pdata->ext_clk_freq_mhz,
+ pll->pre_pll_clk_div);//(EQ 1)
+ if (pll_ip_clk_freq < PLL_INPUT_CLK_MIN
+ || pll_ip_clk_freq > PLL_INPUT_CLK_MAX) {
+ v4l2_err(&mt9f002->subdev, "error, pll_ip_clk_freq (%llu) \
+ not in the allowed range\n",
+ pll_ip_clk_freq);
+ res = -1;
+ }
+
+ /*
+ * check VCO is in the range
+ */
+ if (pll->pll_multiplier % 2) {
+ // odd case
+ if (pll->pll_multiplier < PLL_MUL_ODD_MIN ||
+ pll->pll_multiplier > PLL_MUL_ODD_MAX) {
+ v4l2_err(&mt9f002->subdev, "error, odd pre_pll_clk_div \
+ (%d) not in the allowed range\n",
+ pll->pll_multiplier);
+ res = -1;
+ }
+ } else {
+ // even case
+ if (pll->pll_multiplier < PLL_MUL_EVEN_MIN ||
+ pll->pll_multiplier > PLL_MUL_EVEN_MAX) {
+ v4l2_err(&mt9f002->subdev, "error, even pre_pll_clk_div \
+ (%d) not in the allowed range\n",
+ pll->pll_multiplier);
+ res = -1;
+ }
+ }
+
+ vco_freq = (u64)pll_ip_clk_freq * pll->pll_multiplier; // (EQ 2)
+ if (vco_freq < VCO_CLK_MIN || vco_freq > VCO_CLK_MAX) {
+ v4l2_err(&mt9f002->subdev, "error, vco_freq (%llu) \
+ not in the allowed range\n",
+ vco_freq);
+ res = -1;
+ }
+
+ /*
+ * check clk_op < clk_pixel
+ */
+ clk_pixel = (u64)pdata->ext_clk_freq_mhz * pll->pll_multiplier *
+ (1 + pll->shift_vt_pix_clk_div);
+
+ clk_pixel = div_u64((u64)clk_pixel,
+ pll->pre_pll_clk_div * pll->vt_sys_clk_div
+ * pll->vt_pix_clk_div * 2 * pll->row_speed_2_0); // (EQ 4)
+
+ clk_op = (u64)pdata->ext_clk_freq_mhz * pll->pll_multiplier;
+ clk_op = div_u64((u64)clk_op, pll->pre_pll_clk_div * pll->op_sys_clk_div
+ * pll->op_pix_clk_div * pll->row_speed_10_8); // (EQ 5)
+
+ if (clk_op > clk_pixel) {
+ v4l2_err(&mt9f002->subdev, "error, clk_op (%llu) runs faster \
+ than clk_pixel (%llu)\n",
+ clk_op, clk_pixel);
+ res = -1;
+ }
+
+ /*
+ * When the serial interface is used the clk_op divider
+ * cannot be used; row_speed[10:8] must equal 1
+ */
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi) {
+ if (pll->row_speed_10_8 != 1) {
+ v4l2_err(&mt9f002->subdev, "error, wrong row_speed_10_8 \
+ value %u) intead of 1\n",
+ pll->row_speed_10_8);
+ res = -1;
+ }
+ }
+
+ /*
+ The value of op_sys_clk_div must match the bit-depth of the image
+ when using serial interface. R0x0112-3 controls whether the pixel
+ data interface will generate 12, 10, or 8 bits per pixel. When the
+ pixel data interface is generating 8 bits per-pixel, op_pix_clk_div
+ must be programmed with the value 8. When the pixel data interface is
+ generating 10 bits per pixel, op_pix_clk_div must be programmed with
+ the value 10. And when the pixel data interface is generating 12 bits
+ per pixel, op_pix_clk_div must be programmed with the value 12.
+ This is not required when using the parallel inter-face.
+ */
+ if (pdata->interface == MT9F002_MIPI
+ || pdata->interface == MT9F002_HiSPi) {
+ if (pll->op_pix_clk_div != pdata->pixel_depth) {
+ v4l2_err(&mt9f002->subdev, "error, wrong op_pix_clk_div\
+ expected 0x%x get 0x%x\n",
+ pdata->pixel_depth, pll->op_pix_clk_div);
+ res = -1;
+ }
+ }
+
+ return res;
+}
+
+static int mt9f002_pll_conf(struct mt9f002 *mt9f002)
+{
+ uint32_t pre_pll_clk_div, pll_multiplier;
+ uint32_t best_pre_pll_clk_div = 0, best_pll_multiplier = 0;
+ uint32_t best_vt_sys_clk_div = 0, best_vt_pix_clk_div = 0;
+ uint32_t best_op_sys_clk_div = 0;
+ uint32_t best_op_pix_clk_div = 0;
+ uint32_t best_row_speed_10_8 = 0;
+ uint32_t vt_sys_clk_div_index, vt_pix_clk_div_index,
+ op_sys_clk_div_index;
+ uint32_t op_pix_clk_div_index, row_speed_10_8_index;
+ int64_t minError = -1;
+ struct mt9f002_platform_data *pdata = mt9f002->pdata;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_pll *pll = &ctx->pll;
+ struct mt9f002_clock *clock = &ctx->clock;
+
+ uint32_t vt_sys_clk_div_tab[] = {1, 2, 4, 6, 8};
+ uint32_t vt_pix_clk_div_tab[] = {2, 3, 4, 5, 6, 7, 8};
+ uint32_t op_sys_clk_div_tab[] = {1, 2, 4, 6, 8};
+ uint32_t op_pix_clk_div_tab[] = {8, 10, 12};
+ uint32_t row_speed_10_8_tab[] = {1, 2, 4};
+
+ uint64_t virtual_pll_ip_clk_freq, virtualVCOfreq, virtual_clk_pixel,
+ virtual_clk;
+ int64_t error;
+ uint64_t tmp64;
+
+ // fill constant value
+ pll->shift_vt_pix_clk_div = 1;
+ // no documentation, set to 1 in the excel sheet
+ pll->row_speed_2_0 = 1;
+ // don't understand what it is, fix it to 1
+
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi) {
+ // MIPI/HiSPi has some forbiden values
+ row_speed_10_8_tab[0] = row_speed_10_8_tab[1]
+ = row_speed_10_8_tab[2] = 1;
+ op_pix_clk_div_tab[0] = op_pix_clk_div_tab[1]
+ = op_pix_clk_div_tab[2]
+ = pdata->pixel_depth;
+ }
+
+ // explore all possibilities
+ for (pre_pll_clk_div = PRE_PLL_CLK_DIV_MIN;
+ pre_pll_clk_div <= PRE_PLL_CLK_DIV_MAX;
+ pre_pll_clk_div++) {
+
+ // if pll input clock is out of range, cancel this option
+ virtual_pll_ip_clk_freq = div_u64((u64)pdata->ext_clk_freq_mhz,
+ pre_pll_clk_div);
+
+ if (virtual_pll_ip_clk_freq > PLL_INPUT_CLK_MAX
+ || virtual_pll_ip_clk_freq < PLL_INPUT_CLK_MIN) {
+ continue;
+ }
+
+ for (pll_multiplier = PLL_MUL_MIN;
+ pll_multiplier <= PLL_MUL_MAX; pll_multiplier++) {
+ if (pll_multiplier % 2 == 0) {
+ // even mul, check range
+ if (pll_multiplier < PLL_MUL_EVEN_MIN
+ || pll_multiplier > PLL_MUL_EVEN_MAX) {
+ // out of range
+ continue;
+ }
+ } else {
+ // odd mul, check range
+ if (pll_multiplier < PLL_MUL_ODD_MIN
+ || pll_multiplier > PLL_MUL_ODD_MAX) {
+ // out of range
+ continue;
+ }
+ }
+
+ // check that we remain in the VCO range
+ virtualVCOfreq = (u64)virtual_pll_ip_clk_freq
+ * pll_multiplier;
+
+ if (virtualVCOfreq < VCO_CLK_MIN
+ || virtualVCOfreq > VCO_CLK_MAX) {
+ // VCO out of range, cancel this option
+ continue;
+ }
+
+ for (vt_sys_clk_div_index = 0; vt_sys_clk_div_index < ARRAY_SIZE(vt_sys_clk_div_tab); vt_sys_clk_div_index++) {
+ for (vt_pix_clk_div_index = 0; vt_pix_clk_div_index < ARRAY_SIZE(vt_pix_clk_div_tab); vt_pix_clk_div_index++) {
+ for (op_sys_clk_div_index = 0; op_sys_clk_div_index < ARRAY_SIZE(op_sys_clk_div_tab); op_sys_clk_div_index++) {
+ for (op_pix_clk_div_index = 0; op_pix_clk_div_index < ARRAY_SIZE(op_pix_clk_div_tab); op_pix_clk_div_index++) {
+ for (row_speed_10_8_index = 0; row_speed_10_8_index < ARRAY_SIZE(row_speed_10_8_tab); row_speed_10_8_index++) {
+
+ // check that clk_op <= clk_pixel (from EQ 5 and EQ 4)
+ if ((vt_sys_clk_div_tab[vt_sys_clk_div_index]
+ * vt_pix_clk_div_tab[vt_pix_clk_div_index] * pll->row_speed_2_0) > (op_sys_clk_div_tab[op_sys_clk_div_index] * op_pix_clk_div_tab[op_pix_clk_div_index] * row_speed_10_8_tab[row_speed_10_8_index])) {
+ // with this settings clk_op would be greater than clk_pixel, forget it !
+ continue;
+ }
+
+ // check that we remain in clk_pixel range
+ // try to run clk_pixel as close as possible to 120MHz (max value)
+ virtual_clk_pixel = div_u64((u64)virtualVCOfreq,
+ vt_sys_clk_div_tab[vt_sys_clk_div_index] * vt_pix_clk_div_tab[vt_pix_clk_div_index]
+ * pll->row_speed_2_0); // (EQ 4)
+
+ if (virtual_clk_pixel > PIXEL_CLK_MAX) {
+ continue;
+ }
+
+ if (pdata->interface == MT9F002_Parallel) {
+ // compute pixel clock (named clk_op in the datasheet)
+ virtual_clk = div_u64((u64)virtualVCOfreq,
+ op_sys_clk_div_tab[op_sys_clk_div_index] * op_pix_clk_div_tab[op_pix_clk_div_index]
+ * row_speed_10_8_tab[row_speed_10_8_index]);
+ } else {
+ // compute serial clock (named op_sys_clk in the datasheet)
+ virtual_clk = div_u64((u64)virtualVCOfreq,
+ op_sys_clk_div_tab[op_sys_clk_div_index]);
+ }
+
+ // if this solution is best we've seen so far, save it
+ error = abs64((u64)virtual_clk - pdata->output_clk_freq_mhz);
+ if (error < minError || minError < 0) {
+ minError = error;
+ best_vt_sys_clk_div = vt_sys_clk_div_index;
+ best_vt_pix_clk_div = vt_pix_clk_div_index;
+ best_op_sys_clk_div = op_sys_clk_div_index;
+ best_op_pix_clk_div = op_pix_clk_div_index;
+ best_row_speed_10_8 = row_speed_10_8_index;
+ best_pre_pll_clk_div = pre_pll_clk_div;
+ best_pll_multiplier = pll_multiplier;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // check that an option has been found
+ if (minError < 0) {
+ v4l2_err(&mt9f002->subdev, "pll configuration failure\n");
+ return -1;
+ }
+
+ // save values
+ pll->op_sys_clk_div = op_sys_clk_div_tab[best_op_sys_clk_div];
+ pll->vt_sys_clk_div = vt_sys_clk_div_tab[best_vt_sys_clk_div];
+ pll->vt_pix_clk_div = vt_pix_clk_div_tab[best_vt_pix_clk_div];
+ pll->op_pix_clk_div = op_pix_clk_div_tab[best_op_pix_clk_div];
+ pll->row_speed_10_8 = row_speed_10_8_tab[best_row_speed_10_8];
+ pll->pll_multiplier = best_pll_multiplier;
+ pll->pre_pll_clk_div = best_pre_pll_clk_div;
+
+ // compute&save some relevant clock values
+ // compute vt_pix_clk (EQ 3)
+ tmp64 = (u64)pdata->ext_clk_freq_mhz * pll->pll_multiplier *
+ (1 + pll->shift_vt_pix_clk_div);
+ tmp64 = div_u64((u64)tmp64,
+ pll->pre_pll_clk_div * pll->vt_sys_clk_div *
+ pll->vt_pix_clk_div);
+ clock->vt_pix_clk = div_u64((u64)tmp64, 1000000);
+
+ /* Update pixel rate control */
+ *mt9f002->pixel_rate->p_cur.p_s64 = div_u64((u64)tmp64, 1000L);
+
+ // compute op_pix_clk
+ tmp64 = (u64)pdata->ext_clk_freq_mhz * pll->pll_multiplier;
+ tmp64 = div_u64((u64)tmp64,
+ pll->pre_pll_clk_div * pll->op_sys_clk_div *
+ pll->op_pix_clk_div);
+ clock->op_pix_clk = div_u64((u64)tmp64, 1000000);
+
+ return 0;
+}
+
+static int mt9f002_pll_write(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+ uint16_t smia;
+ uint16_t row_speed;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_pll *pll = &ctx->pll;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_VT_PIX_CLK_DIV,
+ pll->vt_pix_clk_div);
+ res |= mt9f002_write16(mt9f002, MT9F002_VT_SYS_CLK_DIV,
+ pll->vt_sys_clk_div);
+ res |= mt9f002_write16(mt9f002, MT9F002_PRE_PLL_CLK_DIV,
+ pll->pre_pll_clk_div);
+ res |= mt9f002_write16(mt9f002, MT9F002_PLL_MULTIPLIER,
+ pll->pll_multiplier);
+ res |= mt9f002_write16(mt9f002, MT9F002_OP_PIX_CLK_DIV,
+ pll->op_pix_clk_div);
+ res |= mt9f002_write16(mt9f002, MT9F002_OP_SYS_CLK_DIV,
+ pll->op_sys_clk_div);
+
+ res |= mt9f002_read16(mt9f002, MT9F002_SMIA_TEST, &smia);
+ smia = (smia & 0xFFBF) | ((pll->shift_vt_pix_clk_div & 0x01) << 6);
+ res |= mt9f002_write16(mt9f002, MT9F002_SMIA_TEST , smia);
+
+ res |= mt9f002_read16(mt9f002, MT9F002_ROW_SPEED, &row_speed);
+ row_speed = (row_speed & 0xFFF8) | (pll->row_speed_2_0 & 0x07);
+ row_speed = (row_speed & 0xF8FF) | ((pll->row_speed_10_8 & 0x07) << 8);
+ // from ASIC team : change opclk_delay
+ row_speed = (row_speed & (~0x70)) | (0x2 << 4);
+ res |= mt9f002_write16(mt9f002, MT9F002_ROW_SPEED , row_speed);
+
+ return res;
+}
+
+static int mt9f002_parallel_stage1_conf(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x0010);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x0010);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x0010);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x0010);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_14_15, 0xE525);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_CONTROL_REG, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xF873);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x08AA);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3219);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3219);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3219);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3200);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3200);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3200);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3200);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x3200);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x1769);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA6F3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xAFF3);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xF164);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xFA64);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xF164);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x276E);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x28CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x18CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2363);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2363);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2352);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2363);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2363);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2363);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2352);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x2352);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA394);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA394);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x8F8F);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA3D4);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA394);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xA394);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x8F8F);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x8FCF);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC23);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC63);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC63);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC23);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC23);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC63);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC63);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0xDC23);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x0F73);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x85C4);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_WR_DATA_REG, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL4, 0x8000);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_14_15 , 0xE525);
+ res |= mt9f002_write16(mt9f002, MT9F002_DATA_PEDESTAL_ , 0x00A8);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x0090);
+ res |= mt9f002_write16(mt9f002, MT9F002_SERIAL_FORMAT , 0x0301);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x1090);
+ res |= mt9f002_write16(mt9f002, MT9F002_SMIA_TEST , 0x0845);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x1080);
+ res |= mt9f002_write16(mt9f002, MT9F002_DATAPATH_SELECT, 0xD880);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x9080);
+ res |= mt9f002_write16(mt9f002, MT9F002_DATAPATH_SELECT, 0xD880);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x10C8);
+ res |= mt9f002_write16(mt9f002, MT9F002_DATAPATH_SELECT, 0xD880);
+
+ return res;
+}
+
+static int mt9f002_parallel_stage2_conf(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL4, 0x8000);
+ res |= mt9f002_write16(mt9f002, MT9F002_READ_MODE, 0x0041);
+ res |= mt9f002_write16(mt9f002, MT9F002_READ_MODE , 0x04C3);
+ res |= mt9f002_write16(mt9f002, MT9F002_READ_MODE , 0x04C3);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL5, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL5, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL5, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL5, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_28_29 , 0x0047);
+ res |= mt9f002_write16(mt9f002, MT9F002_COLUMN_CORRECTION, 0xB080);
+ res |= mt9f002_write16(mt9f002, MT9F002_COLUMN_CORRECTION, 0xB100);
+ res |= mt9f002_write16(mt9f002, MT9F002_DARK_CONTROL3 , 0x0020);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_24_25 , 0x6349);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL7, 0x800A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x90C8);
+ res |= mt9f002_write16(mt9f002, MT9F002_CTX_CONTROL_REG, 0x8005);
+ res |= mt9f002_write16(mt9f002, MT9F002_ANALOG_CONTROL7, 0x800A);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_28_29 , 0x0047);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_30_31 , 0x15F0);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_30_31 , 0x15F0);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_30_31 , 0x15F0);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_28_29 , 0x0047);
+ res |= mt9f002_write16(mt9f002, MT9F002_DAC_LD_28_29 , 0x0047);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x10C8);
+ res |= mt9f002_write16(mt9f002, MT9F002_COARSE_INTEGRATION_TIME,0x08C3);
+ res |= mt9f002_write16(mt9f002, MT9F002_DIGITAL_TEST , 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_DATAPATH_SELECT, 0xd880);
+
+ if (blanking->x_odd_inc > 1 &&
+ blanking->y_odd_inc > 1) {
+ res |= mt9f002_write16(mt9f002 , MT9F002_READ_MODE,
+ 0x00441);
+ res |= mt9f002_write16(mt9f002 , MT9F002_X_ODD_INC,
+ blanking->x_odd_inc);
+ res |= mt9f002_write16(mt9f002 , MT9F002_Y_ODD_INC,
+ blanking->y_odd_inc);
+
+ // bayer resampling
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_SCALING_MODE,
+ 2);
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_DATAPATH_SELECT,
+ 0xd8b0);
+
+ } else {
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_READ_MODE,
+ 0x0041);
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_X_ODD_INC,
+ 0x0001);
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_Y_ODD_INC,
+ 0x0001);
+ }
+
+ res |= mt9f002_write8(mt9f002,
+ MT9F002_MASK_CORRUPTED_FRAME,
+ 0x01);
+
+ return res;
+}
+
+static int mt9f002_mipi_stage1_conf(struct mt9f002 *mt9f002)
+{
+ uint32_t serialFormat, dataFormat;
+ struct mt9f002_platform_data *pdata = mt9f002->pdata;
+ int res = 0;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x0118);
+ res |= mt9f002_write8(mt9f002, MT9F002_MODE_SELECT , 0x00);
+ if (pdata->interface == MT9F002_HiSPi) {
+ serialFormat = (3 << 8) | pdata->number_of_lanes;
+ } else {
+ serialFormat = (2 << 8) | pdata->number_of_lanes;
+ }
+
+ res |= mt9f002_write16(mt9f002, MT9F002_SERIAL_FORMAT, serialFormat);
+ dataFormat = (pdata->pixel_depth << 8) | pdata->pixel_depth;
+ res |= mt9f002_write16(mt9f002, MT9F002_CCP_DATA_FORMAT , dataFormat);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D00, 0x0435);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D02, 0x435D);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D04, 0x6698);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D06, 0xFFFF);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D08, 0x7783);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D0A, 0x101B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D0C, 0x732C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D0E, 0x4230);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D10, 0x5881);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D12, 0x5C3A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D14, 0x0140);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D16, 0x2300);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D18, 0x815F);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D1A, 0x6789);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D1C, 0x5920);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D1E, 0x0C20);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D20, 0x21C0);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D22, 0x4684);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D24, 0x4892);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D26, 0x1A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D28, 0xBA4C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D2A, 0x8D48);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D2C, 0x4641);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D2E, 0x408C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D30, 0x4784);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D32, 0x4A87);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D34, 0x561A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D36, 0x00A5);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D38, 0x1A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D3A, 0x5693);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D3C, 0x4D8D);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D3E, 0x4A47);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D40, 0x4041);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D42, 0x8200);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D44, 0x24B7);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D46, 0x0024);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D48, 0x8D4F);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D4A, 0x831A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D4C, 0x00B4);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D4E, 0x4684);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D50, 0x49CE);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D52, 0x4946);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D54, 0x4140);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D56, 0x9247);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D58, 0x844B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D5A, 0xCE4B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D5C, 0x4741);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D5E, 0x502F);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D60, 0xBD3A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D62, 0x5181);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D64, 0x5E73);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D66, 0x7C0A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D68, 0x7770);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D6A, 0x8085);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D6C, 0x6A82);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D6E, 0x6742);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D70, 0x8244);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D72, 0x831A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D74, 0x0099);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D76, 0x44DF);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D78, 0x1A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D7A, 0x8542);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D7C, 0x8567);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D7E, 0x826A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D80, 0x857C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D82, 0x6B80);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D84, 0x7000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D86, 0xB831);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D88, 0x40BE);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D8A, 0x6700);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D8C, 0x0CBD);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D8E, 0x4482);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D90, 0x7898);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D92, 0x7480);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D94, 0x5680);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D96, 0x9755);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D98, 0x8057);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D9A, 0x8056);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D9C, 0x9256);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3D9E, 0x8057);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DA0, 0x8055);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DA2, 0x817C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DA4, 0x969B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DA6, 0x56A6);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DA8, 0x44BE);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DAA, 0x000C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DAC, 0x867A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DAE, 0x9474);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DB0, 0x8A79);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DB2, 0x9367);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DB4, 0xBF6A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DB6, 0x816C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DB8, 0x8570);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DBA, 0x836C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DBC, 0x826A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DBE, 0x8245);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DC0, 0xFFFF);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DC2, 0xFFD6);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DC4, 0x4582);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DC6, 0x6A82);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DC8, 0x6C83);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DCA, 0x7000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DCC, 0x8024);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DCE, 0xB181);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DD0, 0x6859);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DD2, 0x732B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DD4, 0x4030);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DD6, 0x4982);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DD8, 0x101B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DDA, 0x4083);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DDC, 0x6785);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DDE, 0x3A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DE0, 0x8820);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DE2, 0x0C59);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DE4, 0x8546);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DE6, 0x8348);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DE8, 0xD04C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DEA, 0x8B48);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DEC, 0x4641);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DEE, 0x4083);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DF0, 0x1A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DF2, 0x8347);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DF4, 0x824A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DF6, 0x9A56);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DF8, 0x1A00);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DFA, 0x951A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DFC, 0x0056);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3DFE, 0x914D);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E00, 0x8B4A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E02, 0x4700);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E04, 0x0300);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E06, 0x2492);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E08, 0x0024);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E0A, 0x8A1A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E0C, 0x004F);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E0E, 0xB446);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E10, 0x8349);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E12, 0xB249);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E14, 0x4641);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E16, 0x408B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E18, 0x4783);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E1A, 0x4BDB);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E1C, 0x4B47);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E1E, 0x4180);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E20, 0x502B);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E22, 0x4C3A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E24, 0x4180);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E26, 0x737C);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E28, 0xD124);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E2A, 0x9068);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E2C, 0x8A20);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E2E, 0x2170);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E30, 0x8081);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E32, 0x6A67);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E34, 0x4257);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E36, 0x5544);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E38, 0x8644);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E3A, 0x9755);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E3C, 0x5742);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E3E, 0x676A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E40, 0x807D);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E42, 0x3180);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E44, 0x7000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E46, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E48, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E4A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E4C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E4E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E50, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E52, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E54, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E56, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E58, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E5A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E5C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E5E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E60, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E62, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E64, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E66, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E68, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E6A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E6C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E6E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E70, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E72, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E74, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E76, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E78, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E7A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E7C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E7E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E80, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E82, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E84, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E86, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E88, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E8A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E8C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E8E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E90, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E92, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E94, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E96, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E98, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E9A, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E9C, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3E9E, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EA0, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EA2, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EA4, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EA6, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EA8, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EAA, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EAC, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EAE, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EB0, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EB2, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EB4, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EB6, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EB8, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EBA, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EBC, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EBE, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EC0, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EC2, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EC4, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EC6, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EC8, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3ECA, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3176, 0x4000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_317C, 0xA00A);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EE6, 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3ED8, 0xE0E0);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EE8, 0x0001);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3064, 0x0005);
+
+ return res;
+}
+
+static int mt9f002_mipi_stage2_conf(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3064, 0x0045);
+ return res;
+}
+
+static int mt9f002_mipi_stage3_conf(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+
+ res |= mt9f002_write16(mt9f002, MT9F002_EXTRA_DELAY , 0x0000);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER , 0x0118);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EDC, 0x68CF);
+ res |= mt9f002_write16(mt9f002, MT9F002_RESERVED_MFR_3EE2, 0xE363);
+
+ return res;
+}
+
+static void mt9f002_power_on(struct mt9f002 *mt9f002)
+{
+ if (mt9f002->set_power) {
+ mt9f002->set_power(1);
+ msleep(500);
+ }
+}
+
+static void mt9f002_power_off(struct mt9f002 *mt9f002)
+{
+ if (mt9f002->set_power) {
+ mt9f002->set_power(0);
+ }
+}
+
+static int __mt9f002_set_power(struct mt9f002 *mt9f002, bool on)
+{
+ int ret;
+
+ if (!on) {
+ mt9f002_power_off(mt9f002);
+ return 0;
+ }
+
+ mt9f002_power_on(mt9f002);
+
+ /* Reset the chip and stop data read out */
+ ret = mt9f002_write8(mt9f002, MT9F002_SOFTWARE_RESET, 1);
+ msleep(500);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Failed to reset the camera\n");
+ return ret;
+ }
+
+ return mt9f002_write8(mt9f002, MT9F002_SOFTWARE_RESET, 0);
+}
+
+static int mt9f002_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+ int ret = 0;
+
+ mutex_lock(&mt9f002->power_lock);
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (mt9f002->power_count == !on) {
+ ret = __mt9f002_set_power(mt9f002, !!on);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* Update the power count. */
+ mt9f002->power_count += on ? 1 : -1;
+ WARN_ON(mt9f002->power_count < 0);
+
+done:
+ mutex_unlock(&mt9f002->power_lock);
+
+ return ret;
+}
+
+static int mt9f002_pll_setup(struct mt9f002 *mt9f002)
+{
+ int ret;
+ ret = mt9f002_pll_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "failed to configure sensor pll\n");
+ return -ENODEV;
+ }
+
+ ret = mt9f002_pll_check(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "pll config checking failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int mt9f002_pos_write(struct mt9f002 *mt9f002)
+{
+ struct v4l2_rect *crop = &mt9f002->crop;
+ int ret;
+
+ //Set window pos
+ ret = mt9f002_write16(mt9f002, MT9F002_X_ADDR_START, crop->left);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_Y_ADDR_START, crop->top);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_X_ADDR_END,
+ crop->left + crop->width - 1);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_Y_ADDR_END,
+ crop->top + crop->height - 1);
+
+ return ret;
+}
+
+static void mt9f002_exposure_max(struct mt9f002 *mt9f002)
+{
+ u64 tmp64;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_exposure *exposure = &ctx->exposure;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+ struct mt9f002_clock *clock = &ctx->clock;
+
+ uint32_t max_fine_integration_time =
+ (blanking->line_length
+ - exposure->fine_integration_time_max_margin);
+ uint32_t max_coarse_integration_time =
+ (blanking->frame_length
+ - exposure->coarse_integration_time_max_margin);
+
+ tmp64 = ((u64)max_coarse_integration_time
+ * blanking->line_length
+ + max_fine_integration_time) * 1000;
+ ctx->maxExposure_us = div_u64((u64)tmp64, clock->vt_pix_clk);
+}
+
+static int mt9f002_res_write(struct mt9f002 *mt9f002)
+{
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct v4l2_mbus_framefmt *format = &mt9f002->format;
+ int ret;
+
+ //Set output resolution
+ ret = mt9f002_write16(mt9f002, MT9F002_X_OUTPUT_SIZE, format->width);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_Y_OUTPUT_SIZE, format->height);
+ /* scaler */
+ if (ctx->scaler != 16) {
+ // enable scaling mode
+ ret |= mt9f002_write16(mt9f002, MT9F002_SCALING_MODE, 2);
+ ret |= mt9f002_write16(mt9f002, MT9F002_SCALE_M, ctx->scaler);
+ }
+
+ return ret;
+}
+
+static int mt9f002_blanking_conf(struct mt9f002 *mt9f002)
+{
+ int i, res = 0;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+ struct mt9f002_timing *timings = &ctx->timings;
+ struct mt9f002_clock *clock = &ctx->clock;
+ struct v4l2_rect *crop = &mt9f002->crop;
+ struct v4l2_mbus_framefmt *format = &mt9f002->format;
+ struct mt9f002_platform_data *pdata = mt9f002->pdata;
+ uint32_t clkRatio_num, clkRatio_den;
+ uint32_t minHBlkStep, fpgaCorrection;
+ uint64_t tmp64;
+
+ uint32_t subsamplingX_factor = (uint32_t)(1 + blanking->x_odd_inc)
+ / (uint32_t)2;
+ uint32_t subsamplingY_factor = (uint32_t)(1 + blanking->y_odd_inc)
+ / (uint32_t)2;
+ uint32_t minimum_line_length;
+
+ // compute minimum line length
+ minimum_line_length = blanking->min_line_length_pck;
+ minimum_line_length = max(minimum_line_length, (crop->width - 1 +
+ blanking->x_odd_inc)
+ / subsamplingX_factor
+ + blanking->min_line_blanking_pck);
+
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi) {
+ minimum_line_length = max(minimum_line_length,
+ ((uint32_t)((uint32_t)format->width
+ * clock->vt_pix_clk
+ / clock->op_pix_clk)
+ / pdata->number_of_lanes)
+ + blanking->min_line_fifo_pck);
+ } else {
+ minimum_line_length = max(minimum_line_length,
+ ((uint32_t)((uint32_t)format->width
+ * clock->vt_pix_clk / clock->op_pix_clk))
+ + blanking->min_line_fifo_pck);
+
+ }
+
+ // first we compute clkRatio fraction
+ clkRatio_num = ctx->pll.op_sys_clk_div * ctx->pll.op_pix_clk_div
+ * ctx->pll.row_speed_10_8
+ * (1 + ctx->pll.shift_vt_pix_clk_div);
+ clkRatio_den = ctx->pll.vt_sys_clk_div * ctx->pll.vt_pix_clk_div;
+
+ // reduce fraction (lazy way)
+ i = clkRatio_den;
+ while (i > 1) {
+ if (((clkRatio_den % i) == 0) && ((clkRatio_num % i) == 0)) {
+ clkRatio_den = clkRatio_den / i;
+ clkRatio_num = clkRatio_num / i;
+ }
+ i--;
+ }
+
+ // then we adjust length_line to meet all the requirement
+ // fpga length_line must me divisible by 2 (fpga constraint)
+ // and sensor lenght_line by clkRatio_den
+ // (so that length_line is an integer in vt based and op based)
+ minHBlkStep = clkRatio_num;
+ if (clkRatio_den % 2 != 0) {
+ minHBlkStep *= 2;
+ }
+
+ fpgaCorrection = (minimum_line_length%minHBlkStep);
+ if (fpgaCorrection) {
+ minimum_line_length += minHBlkStep-fpgaCorrection;
+ }
+
+ // save minimum line length
+ blanking->minimum_line_length = minimum_line_length;
+
+ // compute minimum frame length
+ blanking->minimum_frame_length_lines = crop->height
+ / subsamplingY_factor + blanking->min_frame_blanking_lines;
+ // (EQ 10)
+ // compute max framerate
+ tmp64 = (u64)blanking->minimum_line_length *
+ blanking->minimum_frame_length_lines * NSEC_PER_SEC;
+ ctx->max_frameperiod_ns = div_u64((u64)tmp64, clock->vt_pix_clk * 1000);
+
+ if (ctx->frameperiod_ns < ctx->max_frameperiod_ns) {
+ blanking->frame_length = blanking->minimum_frame_length_lines;
+ blanking->line_length = blanking->minimum_line_length;
+ } else {
+#ifdef CONFIG_VIDEO_MT9F002_ACCURATE_FRAME_RATE
+ // look for the best line_length/frame_length_lines couple to
+ // reach desired framerate
+ uint32_t best_line_length = 0, best_frame_length_lines = 0;
+ uint32_t line_length, frame_length_lines;
+ uint64_t max_length_line, num, den;
+ uint64_t minError = ULLONG_MAX;
+ uint64_t error;
+
+ /* Calculate constants */
+ num = (u64) ctx->frameperiod_ns * clock->vt_pix_clk;
+ max_length_line = div64_u64((u64) num,
+ (u64) blanking->minimum_frame_length_lines *
+ USEC_PER_SEC);
+
+ /* Find best value for each line length */
+ for (line_length = blanking->minimum_line_length;
+ line_length <= max_length_line;
+ line_length += minHBlkStep) {
+ /* Find best frame length */
+ den = (u64) line_length * USEC_PER_SEC;
+ frame_length_lines = div64_u64(num + (den / 2), den);
+
+ /* Calculate error */
+ error = abs64(((u64) frame_length_lines * line_length *
+ USEC_PER_SEC) - num);
+
+ /* A new best has been found */
+ if (error < minError) {
+ minError = error;
+ best_line_length = line_length;
+ best_frame_length_lines = frame_length_lines;
+ }
+ }
+
+ // check that an option has been found, should not happen
+ if (minError < 0) {
+ v4l2_err(&mt9f002->subdev, "error, failed to configure fps\n");
+ return -1;
+ }
+
+ // save config
+ blanking->line_length = best_line_length;
+ blanking->frame_length = best_frame_length_lines;
+#else
+ /* Calculate frame length with minimal HBLANK */
+ blanking->line_length = blanking->minimum_line_length;
+ blanking->frame_length = div64_u64((u64) clock->vt_pix_clk *
+ ctx->frameperiod_ns,
+ (u64) blanking->line_length *
+ USEC_PER_SEC);
+#endif
+ }
+
+ /* Calculate blanking length in frames */
+ ctx->prev_timings = *timings;
+ tmp64 = (u64) blanking->line_length * 1000LL *
+ (u64) (crop->height / subsamplingY_factor);
+ timings->frameLength_us = (u32) div_u64(tmp64, clock->vt_pix_clk);
+ tmp64 = (u64) blanking->line_length * 1000LL *
+ MT9F002_MIN_FRAME_BLANKING_LINES_DEF;
+ timings->blkStartHV_us = (u32) div_u64(tmp64, clock->vt_pix_clk);
+ tmp64 = (u64) blanking->line_length * 1000LL *
+ (u64) (blanking->frame_length -
+ (crop->height / subsamplingY_factor) -
+ MT9F002_MIN_FRAME_BLANKING_LINES_DEF);
+ timings->blkExtraHV_us = (u32) div_u64(tmp64, clock->vt_pix_clk);
+
+ /* Update H/VBLANK controls */
+ mt9f002->hblank->cur.val = blanking->line_length -
+ (crop->width / subsamplingX_factor);
+ mt9f002->vblank->cur.val = blanking->frame_length -
+ (crop->height / subsamplingY_factor);
+
+ mt9f002_exposure_max(mt9f002);
+
+ return res;
+}
+
+static int mt9f002_exposure_init(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_exposure *exposure = &ctx->exposure;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+
+ if (blanking->x_odd_inc > 1) {
+ if (blanking->y_odd_inc > 1) {
+ exposure->fine_integration_time_min =
+ BINNING_XY_FINE_INTEGRATION_TIME_MIN;
+ exposure->fine_integration_time_max_margin =
+ BINNING_XY_FINE_INTEGRATION_TIME_MAX_MARGIN;
+ } else {
+ exposure->fine_integration_time_min =
+ BINNING_X_FINE_INTEGRATION_TIME_MIN;
+ exposure->fine_integration_time_max_margin =
+ BINNING_X_FINE_INTEGRATION_TIME_MAX_MARGIN;
+ }
+ } else {
+
+ if (ctx->scaler != 16) {
+ exposure->fine_integration_time_max_margin =
+ SCALER_FINE_INTEGRATION_TIME_MAX_MARGIN;
+ exposure->fine_integration_time_min =
+ SCALER_FINE_INTEGRATION_TIME_MIN;
+ } else {
+ exposure->fine_integration_time_min =
+ FINE_INTEGRATION_TIME_MIN;
+ exposure->fine_integration_time_max_margin =
+ FINE_INTEGRATION_TIME_MAX_MARGIN;
+ }
+ }
+
+ exposure->coarse_integration_time_min =
+ COARSE_INTEGRATION_TIME_MIN_VAL;
+ exposure->coarse_integration_time_max_margin =
+ COARSE_INTEGRATION_TIME_MAX_MARGIN_VAL;
+
+ return res;
+}
+
+static int mt9f002_blanking_write(struct mt9f002 *mt9f002)
+{
+ int res;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+
+ // write blanking
+ res = mt9f002_write16(mt9f002,
+ MT9F002_LINE_LENGTH_PCK,
+ blanking->line_length);
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_FRAME_LENGTH_LINES,
+ blanking->frame_length);
+
+ return res;
+}
+
+static int mt9f002_exposure_conf(struct mt9f002 *mt9f002)
+{
+ int res = 0;
+ u64 tmp64;
+ uint32_t integration;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_exposure *exposure = &ctx->exposure;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+ struct mt9f002_clock *clock = &ctx->clock;
+ u32 *exposure_us = &mt9f002->target_exposure_us->val;
+ u32 real_exposure_us;
+
+ // compute fine and coarse integration time
+ tmp64 = (u64)*exposure_us * clock->vt_pix_clk;
+ integration = div_u64((u64)tmp64, 1000);
+ exposure->coarse_integration_time = integration / blanking->line_length;
+ exposure->fine_integration_time = integration % blanking->line_length;
+
+ // fine integration must be in a specific range, round exposure if it's not the case
+ if ((exposure->fine_integration_time_min > exposure->fine_integration_time) ||
+ (exposure->fine_integration_time >
+ (blanking->line_length -
+ exposure->fine_integration_time_max_margin))) {
+ // test upper and lower value, take the closest
+ int32_t upper_coarse_integration_time =
+ exposure->coarse_integration_time + 1;
+ int32_t upper_fine_integration_time =
+ exposure->fine_integration_time_min;
+
+ int32_t lower_coarse_integration_time =
+ exposure->coarse_integration_time - 1;
+ int32_t lower_fine_integration_time =
+ blanking->line_length -
+ exposure->fine_integration_time_max_margin;
+
+ // test saturation
+ if (lower_coarse_integration_time < 0) {
+ // lower case is negative, reject it
+ exposure->coarse_integration_time =
+ upper_coarse_integration_time;
+ exposure->fine_integration_time =
+ upper_fine_integration_time;
+ }
+ else if (upper_coarse_integration_time >
+ (blanking->frame_length -
+ exposure->coarse_integration_time_max_margin)) {
+ // upper case is to high, reject it
+ exposure->coarse_integration_time = lower_coarse_integration_time;
+ exposure->fine_integration_time = lower_fine_integration_time;
+ } else {
+ // both case are correct values
+
+ // which one is better, upper or lower case ?
+ int32_t upper_integration = blanking->line_length *
+ upper_coarse_integration_time +
+ upper_fine_integration_time;
+ int32_t lower_integration = blanking->line_length *
+ lower_coarse_integration_time +
+ lower_fine_integration_time;
+
+ // compute error
+ int32_t upper_error = upper_integration - integration;
+ int32_t lower_error = integration - lower_integration;
+
+ // choose the best
+ if (upper_error < lower_error) {
+ // take upper case
+ exposure->coarse_integration_time =
+ upper_coarse_integration_time;
+ exposure->fine_integration_time =
+ upper_fine_integration_time;
+ } else {
+ // take lower case
+ exposure->coarse_integration_time =
+ lower_coarse_integration_time;
+ exposure->fine_integration_time =
+ lower_fine_integration_time;
+ }
+ }
+ }
+
+ // check limit
+ if (exposure->fine_integration_time_min > exposure->fine_integration_time) {
+ v4l2_warn(&mt9f002->subdev, "warning, fine_integration_time too low (bottom limit %d)\n",
+ exposure->fine_integration_time_min);
+ exposure->fine_integration_time = exposure->fine_integration_time_min;
+ }
+
+ if (exposure->fine_integration_time >
+ (blanking->line_length -
+ exposure->fine_integration_time_max_margin)) {
+ v4l2_warn(&mt9f002->subdev, "warning, fine_integration_time too high (upper limit %d)\n",
+ (blanking->line_length -
+ exposure->fine_integration_time_max_margin));
+ exposure->fine_integration_time = (blanking->line_length -
+ exposure->fine_integration_time_max_margin);
+ }
+
+ if (exposure->coarse_integration_time_min > exposure->coarse_integration_time) {
+ v4l2_warn(&mt9f002->subdev, "warning, coarse_integration_time too low (bottom limit %d)\n",
+ exposure->coarse_integration_time_min);
+ exposure->coarse_integration_time = exposure->coarse_integration_time_min;
+ }
+
+ if (exposure->coarse_integration_time > (blanking->frame_length -
+ exposure->coarse_integration_time_max_margin)) {
+ v4l2_warn(&mt9f002->subdev, "warning, coarse_integration_time too high (upper limit %d) blanking->frame_length(%d) - exposure->coarse_integration_time_max_margin(%d)\n",
+ (blanking->frame_length -
+ exposure->coarse_integration_time_max_margin),
+ blanking->frame_length,
+ exposure->coarse_integration_time_max_margin);
+ exposure->coarse_integration_time = (blanking->frame_length -
+ exposure->coarse_integration_time_max_margin);
+ }
+
+ // compute final time exposure
+ tmp64 = ((u64)exposure->coarse_integration_time *
+ blanking->line_length +
+ exposure->fine_integration_time) * 1000;
+ real_exposure_us = div_u64((u64)tmp64, clock->vt_pix_clk);
+
+ if (res == -1) {
+ v4l2_warn(&mt9f002->subdev, "warning, exposure saturation, ask for %d us, final time exposure %d us\n",
+ *exposure_us, real_exposure_us);
+ }
+
+ *exposure_us = real_exposure_us;
+ mt9f002->target_exposure_us->cur.val = *exposure_us;
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ /* Update timings */
+ ctx->prev_timings.exposure_us = ctx->timings.exposure_us;
+ ctx->timings.exposure_us = real_exposure_us;
+#endif
+
+ return res;
+}
+
+static int mt9f002_exposure_write(struct mt9f002 *mt9f002)
+{
+ int res;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_exposure *exposure = &ctx->exposure;
+
+ // write values
+ res = mt9f002_write16(mt9f002,
+ MT9F002_COARSE_INTEGRATION_TIME,
+ exposure->coarse_integration_time);
+ res |= mt9f002_write16(mt9f002,
+ MT9F002_FINE_INTEGRATION_TIME,
+ exposure->fine_integration_time);
+
+ return res;
+}
+
+static int mt9f002_calc_vt(struct mt9f002 *mt9f002,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop, int update)
+{
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+ struct v4l2_rect rect;
+ unsigned int x_odd_inc;
+ unsigned int y_odd_inc;
+ unsigned int hratio;
+ unsigned int vratio;
+ unsigned int width;
+ unsigned int height;
+ unsigned int ratio;
+ unsigned int xMultiple;
+ unsigned int div;
+
+ static const u8 xy_odd_inc_tab[] = {1, 1, 3, 3, 7};
+
+ /* Clamp the crop rectangle boundaries and align them to a multiple of 2
+ * pixels to ensure a GRBG Bayer pattern.
+ */
+ rect.left = clamp(ALIGN(crop->left, 2),
+ MT9F002_X_ADDR_MIN,
+ MT9F002_X_ADDR_MAX);
+ rect.top = clamp(ALIGN(crop->top, 2),
+ MT9F002_Y_ADDR_MIN,
+ MT9F002_Y_ADDR_MAX);
+ rect.width = clamp(ALIGN(crop->width, 2), 1,
+ MT9F002_PIXEL_ARRAY_WIDTH);
+ rect.height = clamp(ALIGN(crop->height, 2), 1,
+ MT9F002_PIXEL_ARRAY_HEIGHT);
+
+ rect.width = min(rect.width, MT9F002_PIXEL_ARRAY_WIDTH - rect.left);
+ rect.height = min(rect.height, MT9F002_PIXEL_ARRAY_HEIGHT - rect.top);
+
+ /* Clamp the width and height to avoid dividing by zero. */
+ width = clamp_t(unsigned int, ALIGN(format->width, 2),
+ max(rect.width / 8, (__s32) MT9F002_WINDOW_WIDTH_MIN),
+ rect.width);
+ height = clamp_t(unsigned int, ALIGN(format->height, 2),
+ max((rect.height / 8), (__s32) MT9F002_WINDOW_HEIGHT_MIN),
+ rect.height);
+
+ /* Calculate binning / skipping for X */
+ div = rect.width / width;
+ div = clamp_t(unsigned short, div,
+ 1U, 4U);
+ x_odd_inc = xy_odd_inc_tab[div];
+
+ /* Calculate binning / skipping for Y */
+ div = rect.height / height;
+ div = clamp_t(unsigned short, div,
+ 1U, 4U);
+ y_odd_inc = xy_odd_inc_tab[div];
+
+ /* Align left offset to 8 */
+ xMultiple = 8 * ((x_odd_inc + 1) / 2);
+ if (rect.left % xMultiple)
+ rect.left -= (rect.left % xMultiple);
+
+ /* Calculate remaining scaling not handled by binning / skipping */
+ hratio = ((rect.width / ((x_odd_inc + 1) / 2) * SCALER_N) + width - 1) /
+ width;
+ vratio = ((rect.height / ((y_odd_inc + 1) / 2) * SCALER_N) + height -
+ 1) / height;
+ ratio = min(hratio, vratio);
+
+ /* Check ratio */
+ if (ratio > SCALER_M_MAX)
+ {
+ /* Fix ratio to maximum and adjust the crop window */
+ ratio = SCALER_M_MAX;
+ rect.width = format->width * ratio * ((x_odd_inc + 1) / 2) /
+ SCALER_N;
+ rect.height = format->height * ratio * ((y_odd_inc + 1) / 2) /
+ SCALER_N;
+ }
+
+ /* Update crop */
+ *crop = rect;
+
+ /* Check if scaling configuration has changed */
+ if (blanking->x_odd_inc != x_odd_inc ||
+ blanking->y_odd_inc != y_odd_inc || ctx->scaler != ratio) {
+ /* Update values */
+ if (update) {
+ blanking->x_odd_inc = x_odd_inc;
+ blanking->y_odd_inc = y_odd_inc;
+ ctx->scaler = ratio;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mt9f002_update_timings(struct mt9f002 *mt9f002)
+{
+ int ret;
+
+ ret = mt9f002_calc_vt(mt9f002, &mt9f002->format, &mt9f002->crop, 1);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Unable to compute vt timing and binning\n");
+ return ret;
+ }
+
+ ret = mt9f002_blanking_init(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Blanking initialisation failed\n");
+ return ret;
+ }
+
+ ret = mt9f002_exposure_init(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Exposition initialization failed\n");
+ return ret;
+ }
+
+ ret = mt9f002_blanking_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Blanking configuration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__mt9f002_get_pad_format(struct mt9f002 *mt9f002, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9f002->format;
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *
+__mt9f002_get_pad_crop(struct mt9f002 *mt9f002, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &mt9f002->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int mt9f002_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+
+ format->format = *__mt9f002_get_pad_format(mt9f002, fh, format->pad,
+ format->which);
+ return 0;
+}
+
+static int mt9f002_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *format)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ /* Get format */
+ __format = __mt9f002_get_pad_format(mt9f002, fh, format->pad,
+ format->which);
+
+ /* Align format and fix format.: scaling is done with croping */
+ __format->width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
+ MT9F002_WINDOW_WIDTH_MIN,
+ MT9F002_PIXEL_ARRAY_WIDTH),
+ __format->height = clamp_t(unsigned int,
+ ALIGN(format->format.height, 2),
+ MT9F002_WINDOW_HEIGHT_MIN,
+ MT9F002_PIXEL_ARRAY_HEIGHT),
+
+ /* Update format */
+ format->format = *__format;
+
+ /* Update crop when not streaming */
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY && !mt9f002->isStreaming)
+ mt9f002_update_timings(mt9f002);
+
+ return 0;
+}
+
+static int mt9f002_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = MT9F002_PIXEL_ARRAY_WIDTH;
+ sel->r.height = MT9F002_PIXEL_ARRAY_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__mt9f002_get_pad_crop(mt9f002, fh, sel->pad,
+ sel->which);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt9f002_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+ struct i2c_client *client = mt9f002->i2c_client;
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_blanking *blanking = &ctx->blanking;
+ struct mt9f002_exposure *exposure = &ctx->exposure;
+ struct mt9f002_timing *timings = &ctx->timings;
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ struct v4l2_rect old_crop;
+ struct mt9f002_timer *timer;
+ struct i2c_msg msg[16];
+ struct {
+ u16 reg;
+ u16 val;
+ } __packed msg_buffer[16];
+ int msg_len = 0;
+ u16 read_mode = 0x0041; /* No binning / skipping */
+ ktime_t ktime;
+ int32_t delay;
+ int recalc_blk;
+ int ret;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ old_crop = mt9f002->crop;
+ break;
+ default:
+ v4l2_err(&mt9f002->subdev, "selection target (%d) not supported yet\n",
+ sel->target);
+ return -EINVAL;
+ }
+
+ /* Get format and crop */
+ __format = __mt9f002_get_pad_format(mt9f002, fh, sel->pad, sel->which);
+ __crop = __mt9f002_get_pad_crop(mt9f002, fh, sel->pad, sel->which);
+ *__crop = sel->r;
+
+ /* Calculate cpature window for crop */
+ recalc_blk = mt9f002_calc_vt(mt9f002, __format, __crop,
+ sel->which == V4L2_SUBDEV_FORMAT_TRY ? 0 : 1);
+ sel->r = *__crop;
+
+ /* Do not update crop config if try */
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* Sensor is not streaming */
+ if (!mt9f002->isStreaming) {
+ /* Recalculate blanking only */
+ if (recalc_blk || old_crop.width != mt9f002->crop.width ||
+ old_crop.height != mt9f002->crop.height) {
+ /* Reconfigure blanking */
+ mt9f002_blanking_init(mt9f002);
+ mt9f002_blanking_conf(mt9f002);
+
+ /* Reconfigure exposure */
+ mt9f002_exposure_init(mt9f002);
+ mt9f002_exposure_conf(mt9f002);
+ }
+
+ return 0;
+ }
+
+#define ADD(r, v, l) do { \
+ msg_buffer[msg_len].reg = swab16(r); \
+ msg_buffer[msg_len].val = swab16(v); \
+ msg[msg_len].addr = client->addr; \
+ msg[msg_len].flags = 0; \
+ msg[msg_len].len = 2 + (l / 8); \
+ msg[msg_len].buf = (u8 *)&msg_buffer[msg_len]; \
+ msg_len++; \
+ } while(0)
+#define ADD8(r, v) ADD(r, v, 8)
+#define ADD16(r, v) ADD(r, v, 16)
+
+ /* Prepare new configuration */
+ ADD8(MT9F002_GROUPED_PARAMETER_HOLD, 1);
+
+ /* Set position */
+ ADD16(MT9F002_X_ADDR_START, mt9f002->crop.left);
+ ADD16(MT9F002_Y_ADDR_START, mt9f002->crop.top);
+ ADD16(MT9F002_X_ADDR_END, mt9f002->crop.left + mt9f002->crop.width - 1);
+ ADD16(MT9F002_Y_ADDR_END, mt9f002->crop.top + mt9f002->crop.height - 1);
+
+ /* Sensor binning and scaler configuration not changed */
+ if (!recalc_blk)
+ goto end;
+
+ /* Reconfigure blanking */
+ ret |= mt9f002_blanking_init(mt9f002);
+ ret |= mt9f002_blanking_conf(mt9f002);
+ ADD16(MT9F002_LINE_LENGTH_PCK, blanking->line_length);
+ ADD16(MT9F002_FRAME_LENGTH_LINES, blanking->frame_length);
+
+ /* Reconfigure exposure */
+ ret |= mt9f002_exposure_init(mt9f002);
+ ret |= mt9f002_exposure_conf(mt9f002);
+ ADD16(MT9F002_FINE_INTEGRATION_TIME, exposure->fine_integration_time);
+ ADD16(MT9F002_COARSE_INTEGRATION_TIME,
+ exposure->coarse_integration_time);
+
+ /* Set scaling */
+ ADD16(MT9F002_SCALING_MODE, 2);
+ ADD16(MT9F002_SCALE_M, ctx->scaler);
+
+ /* Binning activated for X/Y or only X */
+ if (blanking->y_odd_inc > 1)
+ read_mode = 0x0441;
+ else if (blanking->x_odd_inc > 1)
+ read_mode = 0x0841;
+
+ /* Set binning / skipping */
+ ADD16(MT9F002_READ_MODE, read_mode);
+ ADD16(MT9F002_X_ODD_INC, blanking->x_odd_inc);
+ ADD16(MT9F002_Y_ODD_INC, blanking->y_odd_inc);
+
+end:
+ /* Apply new configuration */
+ ADD8(MT9F002_GROUPED_PARAMETER_HOLD, 0);
+
+#undef ADD16
+#undef ADD8
+#undef ADD
+
+ /* Send I2C messages in one shot */
+ ret = i2c_transfer(client->adapter, msg, msg_len);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Position configuration failed\n");
+ return ret;
+ }
+
+ /* When the Y position or size of crop window is changed, the next
+ * incoming frame is considered as bad by sensor. By default, this frame
+ * is masked by inhibition of V/HSYNC signals during this frame.
+ * This implies that next FRAME_SYNC event must be delayed from one
+ * frame. */
+ if (old_crop.top != mt9f002->crop.top ||
+ old_crop.width != mt9f002->crop.width ||
+ old_crop.height != mt9f002->crop.height) {
+ /* Get timer */
+ timer = &mt9f002->fsync_timers[!mt9f002->next_timer];
+ if (!hrtimer_active(&timer->hrtimer)) {
+ /* A bad frame will occur and must be added in next
+ * timer start (on next VSYNC BOTTOM) */
+ mt9f002->drop = 1;
+ return ret;
+ }
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ /* Restart capture */
+ mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x12CE);
+
+ /* Add a delay of exposure time instead of a frame */
+ delay = timings->exposure_us + timings->frameLength_us +
+ timings->blkExtraHV_us - MT9F002_CROP_WRITE_US -
+ mt9f002->fsync_delay_us;
+#else
+ /* Add a delay of one frame to timer */
+ delay = (timings->blkExtraHV_us + timings->blkStartHV_us +
+ ctx->timings.frameLength_us) * 1000;
+#endif
+
+ /* Cancel timer */
+ hrtimer_cancel(&timer->hrtimer);
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ /* Calculate new timer delay */
+ ktime = ktime_set(0, delay * 1000);
+#else
+ /* Calculate new timer delay */
+ ktime = ktime_add(ktime_set(0, delay),
+ hrtimer_get_remaining(&timer->hrtimer));
+#endif
+
+ /* Restart timer */
+ hrtimer_start(&timer->hrtimer, ktime, HRTIMER_MODE_REL);
+ }
+
+ return ret;
+}
+
+#define V4L2_CID_GAIN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1001)
+#define V4L2_CID_GAIN_GREEN_RED (V4L2_CTRL_CLASS_CAMERA | 0x1002)
+#define V4L2_CID_GAIN_GREEN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1003)
+#define V4L2_CID_GAIN_BLUE (V4L2_CTRL_CLASS_CAMERA | 0x1004)
+#define V4L2_CID_EXPOSURE_MAX (V4L2_CTRL_CLASS_CAMERA | 0x1005)
+
+/* Private controls
+ * MT9F002_CID_FRAME_SYNC_DELAY: this control allows to set the delay that user
+ * needs before end of safe window during which
+ * set_crop will immediatly apply (in us).
+ */
+#define MT9F002_CID_FRAME_SYNC_DELAY (V4L2_CTRL_CLASS_CAMERA | 0x1006)
+
+static int mt9f002_gain_value(struct mt9f002 *mt9f002, u16 reg, s32 gain)
+{
+ int res = 0;
+
+ pixelGain_t pixelGain;
+ uint32_t analogGain;
+
+ if (gain < 10) {
+ v4l2_err(&mt9f002->subdev, "error, gain (%d) too low (<1.0)\n", gain);
+ res = -1;
+ }
+
+ if (!res) {
+ // table 19 p56
+ if (gain < 15) {
+ // warning, gain < 1.5 are not recommended
+ v4l2_warn(&mt9f002->subdev, "warning, gain (%d) <1.5 is not recommended\n", gain);
+ pixelGain.colamp_gain = 0;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 1;
+ } else if (gain < 30) {
+ pixelGain.colamp_gain = 1;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 1;
+ } else if (gain < 60) {
+ pixelGain.colamp_gain = 2;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 1;
+ } else if (gain < 160) {
+ pixelGain.colamp_gain = 3;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 1;
+ } else if (gain < 320) {
+ pixelGain.colamp_gain = 3;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 2;
+ } else {
+ pixelGain.colamp_gain = 3;
+ pixelGain.analog_gain3 = 0;
+ pixelGain.digital_gain = 4;
+ }
+
+ // compute pixelGain.analog_gain2
+ analogGain = (gain * 1000000)/(pixelGain.digital_gain)
+ / (1<<pixelGain.colamp_gain)
+ / (1<<pixelGain.analog_gain3)
+ * 64
+ / 10
+ / 1000000;
+
+ if (analogGain == 0) {
+ v4l2_err(&mt9f002->subdev, "error, analogGain (%d) too low (<1) \n", analogGain);
+ analogGain = 1;
+ res = -1;
+ } else if (analogGain > 127) {
+ v4l2_err(&mt9f002->subdev, "error, analogGain (%d) too high (>127) \n", analogGain);
+ analogGain = 127;
+ res = -1;
+ }
+
+ if (analogGain < 48) {
+ v4l2_warn(&mt9f002->subdev, "warning, analogGain (%d) <48 is not recommended\n", analogGain);
+ }
+
+ pixelGain.analog_gain2 = analogGain;
+ res = mt9f002_write16(mt9f002, reg, pixelGain.val);
+ }
+
+ return res;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev control operations
+ */
+static int mt9f002_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ u8 data;
+ static const u16 gains[4] = {
+ MT9F002_RED_GAIN, MT9F002_GREEN1_GAIN,
+ MT9F002_GREEN2_GAIN, MT9F002_BLUE_GAIN
+ };
+
+ struct mt9f002 *mt9f002 =
+ container_of(ctrl->handler, struct mt9f002, ctrls);
+ unsigned int i;
+ int ret;
+
+ /* If not streaming, just keep interval structures up-to-date */
+ if (!mt9f002->isStreaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ ret = mt9f002_read8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ &data);
+ if (ret < 0)
+ return -EIO;
+
+ if (ctrl->val)
+ ret = mt9f002_write8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ data | (1 << 1));
+ else
+ ret = mt9f002_write8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ data & ~(1 << 1));
+
+ return mt9f002_read8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ &data);
+
+ case V4L2_CID_HFLIP:
+ ret = mt9f002_read8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ &data);
+ if (ret < 0)
+ return -EIO;
+
+ if (ctrl->val)
+ ret = mt9f002_write8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ data | (1 << 0));
+ else
+ ret = mt9f002_write8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ data & ~(1 << 0));
+
+ return mt9f002_read8(mt9f002,
+ MT9F002_IMAGE_ORIENTATION,
+ &data);
+
+ case V4L2_CID_GAIN_RED:
+ case V4L2_CID_GAIN_GREEN_RED:
+ case V4L2_CID_GAIN_GREEN_BLUE:
+ case V4L2_CID_GAIN_BLUE:
+
+ /* Update the gain controls. */
+ for (i = 0; i < 4; ++i) {
+ struct v4l2_ctrl *gain = mt9f002->gains[i];
+
+ ret = mt9f002_gain_value(mt9f002, gains[i], gain->val);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ return 0;
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+
+ ret = mt9f002_exposure_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Exposure configuration failed\n");
+ return ret;
+ }
+
+ ret = mt9f002_exposure_write(mt9f002);
+ if (ret < 0)
+ {
+ v4l2_err(&mt9f002->subdev, "fatal error, exposure configuration failed\n");
+ return ret;
+ }
+
+ return 0;
+
+ case V4L2_CID_TEST_PATTERN:
+
+ if (!ctrl->val) {
+ return mt9f002_write16(mt9f002,
+ MT9F002_TEST_PATTERN,
+ 0);
+ }
+
+ ret = mt9f002_write16(mt9f002, MT9F002_TEST_PATTERN_GREENR,
+ 0x05a0);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_TEST_PATTERN_RED,
+ 0x0a50);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_TEST_PATTERN_BLUE,
+ 0x0aa0);
+ if (ret < 0)
+ return ret;
+ ret = mt9f002_write16(mt9f002, MT9F002_TEST_PATTERN_GREENB,
+ 0x0aa0);
+ if (ret < 0)
+ return ret;
+
+ ret = mt9f002_write16(mt9f002, MT9F002_TEST_PATTERN_GREENB,
+ 0x0aa0);
+ if (ret < 0)
+ return ret;
+
+ return mt9f002_write16(mt9f002,
+ MT9F002_TEST_PATTERN,
+ ctrl->val);
+
+ case MT9F002_CID_FRAME_SYNC_DELAY:
+ mt9f002->fsync_delay_us = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static int mt9f002_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9f002 *mt9f002 =
+ container_of(ctrl->handler, struct mt9f002, ctrls);
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+
+ /* If not streaming, just default value */
+ if (!mt9f002->isStreaming) {
+ ctrl->val = ctrl->default_value;
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_MAX:
+ ctrl->val = ctx->maxExposure_us;
+ break;
+ }
+
+ return 0;
+}
+
+static struct v4l2_ctrl_ops mt9f002_ctrl_ops = {
+ .g_volatile_ctrl = mt9f002_g_volatile_ctrl,
+ .s_ctrl = mt9f002_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config mt9f002_gains[] = {
+ {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_GAIN_RED,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Red x10",
+ .min = MT9F002_GLOBAL_GAIN_MIN,
+ .max = MT9F002_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9F002_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN_RED,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (R) x10",
+ .min = MT9F002_GLOBAL_GAIN_MIN,
+ .max = MT9F002_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9F002_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_GAIN_GREEN_BLUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Green (B) x10",
+ .min = MT9F002_GLOBAL_GAIN_MIN,
+ .max = MT9F002_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9F002_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ }, {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_GAIN_BLUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain, Blue x10",
+ .min = MT9F002_GLOBAL_GAIN_MIN,
+ .max = MT9F002_GLOBAL_GAIN_MAX,
+ .step = 1,
+ .def = MT9F002_GLOBAL_GAIN_DEF,
+ .flags = 0,
+ },
+};
+
+static const char * const mt9f002_test_pattern_menu[] = {
+ "Disabled",
+ "Solid color",
+ "100% color bar",
+ "Fade-to-gray color bar",
+};
+
+static const struct v4l2_ctrl_config mt9f002_ctrls[] = {
+ {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern",
+ .min = 0,
+ .max = ARRAY_SIZE(mt9f002_test_pattern_menu) - 1,
+ .step = 0,
+ .def = 0,
+ .flags = 0,
+ .menu_skip_mask = 0,
+ .qmenu = mt9f002_test_pattern_menu,
+ }, {
+ .ops = &mt9f002_ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_MAX,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure max",
+ .min = 0,
+ .max = 60000,
+ .step = 1,
+ .def = MT9F002_EXPOSURE_DEF,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ }, {
+ .ops = &mt9f002_ctrl_ops,
+ .id = MT9F002_CID_FRAME_SYNC_DELAY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "User delay for Frame sync",
+ .min = 0,
+ .max = USEC_PER_SEC,
+ .step = 1,
+ .def = MT9F002_DEFAULT_USER_US,
+ .flags = 0,
+ },
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev internal operations
+ */
+static int mt9f002_registered(struct v4l2_subdev *subdev)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+ struct i2c_client *client = mt9f002->i2c_client;
+ u16 data;
+ int ret = 0;
+
+ dev_info(&client->dev, "Probing mt9f002 at address 0x%02x\n",
+ client->addr);
+
+ mt9f002_power_on(mt9f002);
+
+ /* Read and check the sensor version */
+ mt9f002_read16(mt9f002, MT9F002_CHIP_VERSION, &data);
+ if (data != MT9F002_CHIP_ID) {
+ dev_err(&client->dev, "mt9f002 not detected, wrong version "
+ "0x%04x\n", data);
+ return -ENODEV;
+ }
+
+ mt9f002_power_off(mt9f002);
+
+ dev_info(&client->dev, "mt9f002 detected at address 0x%02x\n",
+ client->addr);
+
+ return ret;
+}
+
+static int mt9f002_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = mt9f002->format.code;
+ return 0;
+}
+
+static int mt9f002_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ int ret;
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+ struct mt9f002_platform_data *pdata = mt9f002->pdata;
+
+ if (!enable) {
+ mt9f002->isStreaming = false;
+ mt9f002_write8(mt9f002, MT9F002_MODE_SELECT, 0);
+ return mt9f002_set_power(subdev, 0);
+ }
+
+ mt9f002->frame = 0;
+
+ ret = mt9f002_set_power(subdev, 1);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "power failure\n");
+ return ret;
+ }
+
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi ) {
+ ret = mt9f002_mipi_stage1_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "mipi conf stage 1 failed\n");
+ goto power_off;
+ }
+ } else {
+ ret = mt9f002_parallel_stage1_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "parallel conf stage 1 failed\n");
+ goto power_off;
+ }
+ }
+
+ ret = mt9f002_pll_write(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Could not write pll configuration\n");
+ goto power_off;
+ }
+
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi ) {
+ ret = mt9f002_mipi_stage2_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "mipi conf stage 2 failed\n");
+ goto power_off;
+ }
+ } else {
+ ret = mt9f002_parallel_stage2_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "parallel conf stage 2 failed\n");
+ goto power_off;
+ }
+ }
+
+ ret = mt9f002_update_timings(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev,
+ "Unable to calculate Video Timing\n");
+ goto power_off;
+ }
+
+ ret = mt9f002_exposure_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Exposure configuration failed\n");
+ goto power_off;
+ }
+
+ ret = mt9f002_res_write(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Resolution configuration failed\n");
+ goto power_off;
+ }
+
+ ret = mt9f002_pos_write(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Position configuration failed\n");
+ goto power_off;
+ }
+
+ ret = mt9f002_blanking_write(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Writing blanking configuration failed\n");
+ goto power_off;
+ }
+
+ ret = mt9f002_exposure_write(mt9f002);
+ if (ret < 0)
+ {
+ v4l2_err(&mt9f002->subdev, "fatal error, exposure configuration failed\n");
+ goto power_off;
+ }
+
+ if (pdata->interface == MT9F002_MIPI ||
+ pdata->interface == MT9F002_HiSPi ) {
+ ret = mt9f002_mipi_stage3_conf(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Mipi conf stage 3 failed\n");
+ goto power_off;
+ }
+ }
+
+ mt9f002_write8(mt9f002, MT9F002_MODE_SELECT, 1);
+
+ mt9f002->isStreaming = true;
+
+ ret = v4l2_ctrl_handler_setup(&mt9f002->ctrls);
+ if (ret) {
+ v4l2_err(&mt9f002->subdev, "v4l2_ctrl_handler_setup failure\n");
+ goto power_off;
+ }
+
+ return 0;
+
+power_off:
+ mt9f002_write8(mt9f002, MT9F002_MODE_SELECT, 0);
+ mt9f002->isStreaming = false;
+ mt9f002_set_power(&mt9f002->subdev, 0);
+
+ return ret;
+}
+
+static int mt9f002_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(sd);
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ unsigned g;
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval.denominator = NSEC_PER_SEC;
+ fi->interval.numerator = ctx->frameperiod_ns;
+
+ /* I could return there but I prefer to reduce the fraction first */
+ g = gcd(fi->interval.numerator, fi->interval.denominator);
+
+ fi->interval.numerator /= g;
+ fi->interval.denominator /= g;
+
+ return 0;
+}
+
+static int mt9f002_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(sd);
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ uint64_t tmp64;
+ int ret;
+
+ /* Protect against division by 0. */
+ if (fi->interval.denominator == 0)
+ fi->interval.denominator = 30;
+
+ if (fi->interval.numerator == 0)
+ fi->interval.numerator = 1;
+
+ tmp64 = (u64)fi->interval.numerator * NSEC_PER_SEC;
+ ctx->frameperiod_ns = div_u64((u64)tmp64, fi->interval.denominator);
+
+ if (!mt9f002->isStreaming) {
+ ret = mt9f002_update_timings(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev,
+ "Unable to calculate Video Timing\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops mt9f002_subdev_video_ops = {
+ .s_stream = mt9f002_s_stream,
+ .g_frame_interval = mt9f002_g_frame_interval,
+ .s_frame_interval = mt9f002_s_frame_interval,
+};
+
+static struct v4l2_subdev_pad_ops mt9f002_subdev_pad_ops = {
+ .get_fmt = mt9f002_get_format,
+ .set_fmt = mt9f002_set_format,
+ .get_selection = mt9f002_get_selection,
+ .set_selection = mt9f002_set_selection,
+ .enum_mbus_code = mt9f002_enum_mbus_code,
+};
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mt9f002_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(sd);
+ int ret;
+ u8 val;
+
+ if (reg->size == 2)
+ ret = mt9f002_read16(mt9f002, reg->reg, &val);
+ else if (reg->size == 1)
+ ret = mt9f002_read8(mt9f002, reg->reg, &val);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return 0;
+}
+
+static int mt9f002_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct mt9f002 *mt9f002 = to_mt9f002(sd);
+
+ if (reg->size == 2)
+ return mt9f002_write16(mt9f002, reg->reg, reg->val);
+ else if (reg->size == 1)
+ return mt9f002_write8(mt9f002, reg->reg, reg->val);
+ else
+ return -EINVAL;
+}
+#endif
+
+static int mt9f002_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_VSYNC && sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, MT9F002_NEVENTS, NULL);
+}
+
+static int mt9f002_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static enum hrtimer_restart mt9f002_fsync_callback(struct hrtimer *_timer)
+{
+ struct mt9f002_timer *timer = container_of(_timer, struct mt9f002_timer,
+ hrtimer);
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ .u.frame_sync = {
+ .frame_sequence = timer->frame,
+ },
+ };
+
+ /* Queue V4L2 frame_sync event */
+ v4l2_event_queue(timer->vdev, &event);
+
+ return HRTIMER_NORESTART;
+}
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+static void mt9f002_restart_callback(struct kthread_work *work)
+{
+ struct mt9f002 *mt9f002 = container_of(work, struct mt9f002,
+ fsync_work);
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_timing *timings = &ctx->timings;
+ struct video_device *vdev = mt9f002->subdev.devnode;
+ struct mt9f002_timer *timer;
+ int32_t delay = 0;
+ ktime_t ktime;
+
+ /* Restart capture */
+ mt9f002_write16(mt9f002, MT9F002_RESET_REGISTER, 0x12CE);
+
+ /* Calculate delay of exposure time */
+ delay = timings->exposure_us + timings->frameLength_us +
+ timings->blkExtraHV_us - MT9F002_CROP_WRITE_US -
+ mt9f002->fsync_delay_us;
+
+ /* Start a new timer */
+ if (delay < 0)
+ return;
+
+ /* Get next timer */
+ timer = &mt9f002->fsync_timers[mt9f002->next_timer];
+ if (hrtimer_active(&timer->hrtimer))
+ return;
+
+ /* Setup timer */
+ ktime = ktime_set(0, delay * 1000);
+ timer->hrtimer.function = &mt9f002_fsync_callback;
+ timer->vdev = vdev;
+ timer->frame = mt9f002->frame++;
+ hrtimer_start(&timer->hrtimer, ktime, HRTIMER_MODE_REL);
+
+ /* Set next timer */
+ mt9f002->next_timer ^= 1;
+}
+#endif
+
+static int mt9f002_irq_handler(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct mt9f002 *mt9f002 = container_of(sd, struct mt9f002, subdev);
+ struct mt9f002_context *ctx = &mt9f002->ctx;
+ struct mt9f002_timing *timings = &ctx->timings;
+ struct video_device *vdev = sd->devnode;
+ static const struct v4l2_event event = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_BOTTOM,
+ };
+ struct mt9f002_timer *timer;
+ int32_t delay;
+ ktime_t ktime;
+
+ /* Queue V4L2 Vsync event (BOTTOM) */
+ if (vdev)
+ v4l2_event_queue(vdev, &event);
+
+ /* Start a new timer for next safe window */
+ if (vdev) {
+ /* Calculate delay before beginning of next zone during which a
+ * new sensor conbfiguration will be delayed of one frame.
+ * This time is arround minimal blanking time of current
+ * frame + time needed for I2C configuration + defined user
+ * delay.
+ */
+ delay = (ctx->prev_timings.blkExtraHV_us +
+ timings->blkStartHV_us + timings->frameLength_us +
+ timings->blkExtraHV_us - MT9F002_CROP_WRITE_US -
+ mt9f002->fsync_delay_us);
+
+ /* Next frame is a bad frame: add a complete frame in order to
+ * do not have two consecutive bad frames.
+ * When restart is enabled, frame capture is restarted in order
+ * to reduce time before next frame.
+ */
+ if (mt9f002->drop) {
+ mt9f002->drop = 0;
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ /* Queue restart task in kthread worker */
+ queue_kthread_work(&mt9f002->fsync_worker, &mt9f002->fsync_work);
+
+ /* Timer will be started in restart task */
+ goto end;
+#else
+ delay += (timings->blkExtraHV_us +
+ timings->blkStartHV_us +
+ timings->frameLength_us);
+#endif
+ }
+
+ /* Too late: skip window */
+ if (delay < 0)
+ goto end;
+
+ /* Get next timer */
+ timer = &mt9f002->fsync_timers[mt9f002->next_timer];
+ if (hrtimer_active(&timer->hrtimer))
+ goto end;
+
+ /* Setup timer */
+ ktime = ktime_set(0, delay * 1000);
+ timer->hrtimer.function = &mt9f002_fsync_callback;
+ timer->vdev = vdev;
+ timer->frame = mt9f002->frame++;
+ hrtimer_start(&timer->hrtimer, ktime, HRTIMER_MODE_REL);
+
+ /* Set next timer */
+ mt9f002->next_timer ^= 1;
+ }
+
+end:
+ *handled = true;
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops mt9f002_core_ops = {
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = mt9f002_get_register,
+ .s_register = mt9f002_set_register,
+#endif
+ .subscribe_event = mt9f002_subscribe_event,
+ .unsubscribe_event = mt9f002_unsubscribe_event,
+ .interrupt_service_routine = mt9f002_irq_handler,
+};
+
+static struct v4l2_subdev_ops mt9f002_subdev_ops = {
+ .core = &mt9f002_core_ops,
+ .video = &mt9f002_subdev_video_ops,
+ .pad = &mt9f002_subdev_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops mt9f002_subdev_internal_ops = {
+ .registered = mt9f002_registered,
+};
+
+/* -----------------------------------------------------------------------------
+ * Driver initialization and probing
+ */
+static int mt9f002_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ struct sched_param param = {
+ .sched_priority = 99,
+ };
+#endif
+ struct mt9f002 *mt9f002;
+ struct mt9f002_context *ctx;
+ struct mt9f002_blanking *blanking;
+ struct mt9f002_platform_data *pdata = NULL;
+ unsigned int i;
+ int ret;
+ pdata = client->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ dev_info(&client->dev, "detecting mt9f002 client on address 0x%x\n",
+ client->addr << 1);
+
+ mt9f002 = kzalloc(sizeof(*mt9f002), GFP_KERNEL);
+ if (!mt9f002)
+ return -ENOMEM;
+
+ ctx = &mt9f002->ctx;
+ blanking = &ctx->blanking;
+ mutex_init(&mt9f002->power_lock);
+ mt9f002->pdata = client->dev.platform_data;
+
+ v4l2_ctrl_handler_init(&mt9f002->ctrls, ARRAY_SIZE(mt9f002_gains) + ARRAY_SIZE(mt9f002_ctrls) + 6);
+
+ mt9f002->target_exposure_us = v4l2_ctrl_new_std(&mt9f002->ctrls, &mt9f002_ctrl_ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE, 0,
+ 60000, 1,
+ MT9F002_EXPOSURE_DEF);
+
+ v4l2_ctrl_new_std(&mt9f002->ctrls,
+ &mt9f002_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9f002->ctrls,
+ &mt9f002_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ mt9f002->hblank = v4l2_ctrl_new_std(&mt9f002->ctrls, &mt9f002_ctrl_ops,
+ V4L2_CID_HBLANK, 1, LINE_LENGTH_MAX, 1, 1);
+ mt9f002->vblank = v4l2_ctrl_new_std(&mt9f002->ctrls, &mt9f002_ctrl_ops,
+ V4L2_CID_VBLANK, MT9F002_MIN_FRAME_BLANKING_LINES_DEF,
+ FRAME_LENGTH_MAX - 2, 1,
+ MT9F002_MIN_FRAME_BLANKING_LINES_DEF);
+ mt9f002->pixel_rate = v4l2_ctrl_new_std(&mt9f002->ctrls,
+ &mt9f002_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ 1000000L, 220000000LL, 1,
+ 220000000LL);
+
+ for (i = 0; i < ARRAY_SIZE(mt9f002_gains); ++i)
+ mt9f002->gains[i] = v4l2_ctrl_new_custom(&mt9f002->ctrls,
+ &mt9f002_gains[i], NULL);
+
+ for (i = 0; i < ARRAY_SIZE(mt9f002_ctrls); ++i)
+ v4l2_ctrl_new_custom(&mt9f002->ctrls, &mt9f002_ctrls[i], NULL);
+
+ mt9f002->subdev.ctrl_handler = &mt9f002->ctrls;
+
+ if (mt9f002->ctrls.error)
+ v4l2_err(client, "%s: control initialization error %d\n",
+ __func__, mt9f002->ctrls.error);
+
+ mt9f002->set_power = pdata->set_power;
+ mt9f002->i2c_client = client;
+
+ v4l2_i2c_subdev_init(&mt9f002->subdev, client, &mt9f002_subdev_ops);
+ mt9f002->subdev.internal_ops = &mt9f002_subdev_internal_ops;
+
+ mt9f002->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&mt9f002->subdev.entity, 1, &mt9f002->pad, 0);
+ if (ret < 0) {
+ v4l_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ mt9f002->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ mt9f002->format.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ mt9f002->format.field = V4L2_FIELD_NONE;
+
+ /* Max sensor crop into 1920p30 */
+ mt9f002->format.width = 1920;
+ mt9f002->format.height = 1080;
+
+ mt9f002->crop.left = 1330; //MT9F002_X_ADDR_MIN;
+ mt9f002->crop.top = 1120; //MT9F002_Y_ADDR_MIN;
+ mt9f002->crop.width = 1920; //MT9F002_PIXEL_ARRAY_WIDTH;
+ mt9f002->crop.height = 1080; //MT9F002_PIXEL_ARRAY_HEIGHT;
+
+ blanking->min_frame_blanking_lines
+ = MT9F002_MIN_FRAME_BLANKING_LINES_DEF;
+
+ /* 30 FPS */
+ ctx->frameperiod_ns = MT9F002_FRAMEPERIOD_NS_DEF;
+
+ ret = mt9f002_pll_setup(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev, "Unable to setup pll\n");
+ return ret;
+ }
+
+ ret = mt9f002_update_timings(mt9f002);
+ if (ret < 0) {
+ v4l2_err(&mt9f002->subdev,
+ "Unable to calculate Video Timing\n");
+ return ret;
+ }
+
+ /* Init internal timers */
+ hrtimer_init(&mt9f002->fsync_timers[0].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_init(&mt9f002->fsync_timers[1].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ mt9f002->fsync_delay_us = MT9F002_DEFAULT_USER_US;
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ /* Create a new kthread worker for restart handling */
+ init_kthread_worker(&mt9f002->fsync_worker);
+ mt9f002->fsync_worker_task = kthread_run(kthread_worker_fn,
+ &mt9f002->fsync_worker,
+ mt9f002->subdev.name);
+ if (mt9f002->fsync_worker_task == NULL) {
+ v4l2_err(&mt9f002->subdev, "Unable to create worker kthread\n");
+ return -1;
+ }
+
+ /* Use FIFO scheduler for realtime purpose */
+ sched_setscheduler(mt9f002->fsync_worker_task, SCHED_FIFO, &param);
+
+ /* Initialize restart work in case of high framerate */
+ init_kthread_work(&mt9f002->fsync_work, mt9f002_restart_callback);
+#endif
+
+ dev_info(&client->dev, "mt9f002 client on address 0x%x name(%s) registered\n",
+ client->addr << 1, did->name);
+ return 0;
+
+emedia:
+ v4l2_ctrl_handler_free(&mt9f002->ctrls);
+ mutex_destroy(&mt9f002->power_lock);
+ kfree(mt9f002);
+
+ return ret;
+}
+
+static int mt9f002_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct mt9f002 *mt9f002 = to_mt9f002(subdev);
+
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ flush_kthread_worker(&mt9f002->fsync_worker);
+ kthread_stop(mt9f002->fsync_worker_task);
+#endif
+ v4l2_ctrl_handler_free(&mt9f002->ctrls);
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&mt9f002->power_lock);
+ kfree(mt9f002);
+ return 0;
+}
+
+static const struct i2c_device_id mt9f002_id[] = {
+ { "mt9f002", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9f002_id);
+
+static struct i2c_driver mt9f002_driver = {
+ .driver = {
+ .name = "mt9f002",
+ },
+ .probe = mt9f002_probe,
+ .remove = mt9f002_remove,
+ .id_table = mt9f002_id,
+};
+
+module_i2c_driver(mt9f002_driver);
+
+MODULE_DESCRIPTION("Aptina mt9f002 Camera driver");
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/media/video/mt9f002_regs.h b/drivers/media/video/mt9f002_regs.h
new file mode 100644
index 00000000000000..44daae683e61b1
--- /dev/null
+++ b/drivers/media/video/mt9f002_regs.h
@@ -0,0 +1,548 @@
+/**
+ ************************************************
+ * @file mt9f002_regs.h
+ * @brief Driver for Aptina mt9f002
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2014-09-19
+ ************************************************
+ */
+
+#ifndef MT9F002_REG_H
+#define MT9F002_REGS_H
+
+//#define HAL_MT9F002_BINNING 1
+/*
+ * PLL constraint (see figure - MTF9002_DS_D.pdf p38), in KHZ
+ */
+// frequencies range in mHZ
+#define EXT_CLK_MIN 2000000000UL // camera input clock min value (symbol ext_clk_freq_mhz)
+#define EXT_CLK_MAX 64000000000UL // camera input clock max value (symbol ext_clk_freq_mhz)
+
+#define PLL_INPUT_CLK_MIN 2000000000UL // pll input clock min value (symbol pll_ip_clk_freq)
+#define PLL_INPUT_CLK_MAX 24000000000UL // pll input clock max value (symbol pll_ip_clk_freq)
+
+#define VCO_CLK_MIN 384000000000UL // vco clock min value (symbol frequency)
+#define VCO_CLK_MAX 768000000000UL // vco clock max value (symbol frequency)
+
+#define PIXEL_CLK_MAX 120000000000UL
+
+// pll registers range
+#define PRE_PLL_CLK_DIV_MIN 1
+#define PRE_PLL_CLK_DIV_MAX 64
+
+#define PLL_MUL_EVEN_MIN 32
+//#define PLL_MUL_EVEN_MAX 384 // TODO according to datasheet max value is 384 but in fact it seems to be 254
+#define PLL_MUL_EVEN_MAX 254
+#define PLL_MUL_ODD_MIN 17
+#define PLL_MUL_ODD_MAX 191
+#define PLL_MUL_MIN PLL_MUL_ODD_MIN
+#define PLL_MUL_MAX PLL_MUL_EVEN_MAX
+
+#define LINE_LENGTH_MAX 0xFFFF
+#define FRAME_LENGTH_MAX 0xFFFF
+
+/*
+* Scaler 1:1 (m = 16) to 1:8 (m = 128)
+*/
+#define SCALER_N 16
+#define SCALER_M_MIN 16
+#define SCALER_M_MAX 128
+
+#define MT9F002_PIXEL_ARRAY_HEIGHT 3351
+#define MT9F002_PIXEL_ARRAY_WIDTH 4647
+
+#define MT9F002_CHIP_VERSION 0x0000
+#define MT9F002_CHIP_ID 0x2E01
+#define MT9F002_RESET_REGISTER 0x301A
+#define MT9F002_GLOBAL_GAIN 0x305E
+#define MT9F002_GLOBAL_GAIN_MIN 15
+#define MT9F002_GLOBAL_GAIN_MAX 635
+#define MT9F002_GLOBAL_GAIN_DEF 20
+#define MT9F002_DAC_LD_14_15 0x3EDA
+#define MT9F002_DAC_LD_28_29 0x3EE8
+#define MT9F002_DAC_LD_24_25 0x3EE4
+#define MT9F002_DAC_LD_30_31 0x3EEA
+#define MT9F002_COLUMN_CORRECTION 0x30D4
+#define MT9F002_CTX_CONTROL_REG 0x30E8
+#define MT9F002_CTX_WR_DATA_REG 0x30EA
+#define MT9F002_ANALOG_CONTROL4 0x3176
+#define MT9F002_DATA_PEDESTAL_ 0x301E
+#define MT9F002_SERIAL_FORMAT 0x31AE
+#define MT9F002_DATAPATH_SELECT 0x306E
+#define MT9F002_SCALING_MODE 0x0400
+#define MT9F002_SMIA_TEST 0x3064
+#define MT9F002_VT_PIX_CLK_DIV 0x0300
+#define MT9F002_VT_SYS_CLK_DIV 0x0302
+#define MT9F002_PRE_PLL_CLK_DIV 0x0304
+#define MT9F002_PLL_MULTIPLIER 0x0306
+#define MT9F002_OP_PIX_CLK_DIV 0x0308
+#define MT9F002_OP_SYS_CLK_DIV 0x030A
+#define MT9F002_CCP_DATA_FORMAT 0x0112
+#define MT9F002_READ_MODE 0x3040
+#define MT9F002_MIN_FRAME_BLANKING_LINES 0x114A
+#define MT9F002_MIN_FRAME_BLANKING_LINES_DEF 0x92
+#define MT9F002_COARSE_INTEGRATION_TIME_MIN 0x1004
+#define MT9F002_COARSE_INTEGRATION_TIME_MAX_MARGIN 0x1006
+#define MT9F002_ROW_SPEED 0x3016
+#define MT9F002_ANALOG_CONTROL5 0x3178
+#define MT9F002_DARK_CONTROL3 0x30EE
+#define MT9F002_SCALE_M 0x0404
+#define MT9F002_ANALOG_CONTROL7 0x317C
+#define MT9F002_COARSE_INTEGRATION_TIME 0x0202
+#define MT9F002_DIGITAL_TEST 0x30B0
+#define MT9F002_X_ADDR_START 0x0344
+#define MT9F002_X_ADDR_END 0x0348
+#define MT9F002_X_ADDR_MIN 24
+#define MT9F002_X_ADDR_MAX 4608
+#define MT9F002_X_ODD_INC 0x0382
+#define MT9F002_X_OUTPUT_SIZE 0x034C
+#define MT9F002_Y_ADDR_START 0x0346
+#define MT9F002_Y_ADDR_END 0x034A
+#define MT9F002_Y_ADDR_MIN 0
+#define MT9F002_Y_ADDR_MAX 3288
+#define MT9F002_WINDOW_HEIGHT_MIN 1
+#define MT9F002_WINDOW_HEIGHT_MAX 3288
+#define MT9F002_WINDOW_WIDTH_MIN 1
+#define MT9F002_WINDOW_WIDTH_MAX 4608
+#define MT9F002_Y_ODD_INC 0x0386
+#define MT9F002_Y_OUTPUT_SIZE 0x034E
+#define MT9F002_LINE_LENGTH_PCK 0x0342
+#define MT9F002_MIN_LINE_BLANKING_PCK 0x1148
+#define MT9F002_MIN_LINE_LENGTH_PCK 0x1144
+#define MT9F002_FRAME_LENGTH_LINES 0x0340
+#define MT9F002_IMAGE_ORIENTATION 0x0101
+#define MT9F002_FINE_INTEGRATION_TIME 0x3014
+#define MT9F002_FINE_INTEGRATION_TIME_MIN 0x1008
+#define MT9F002_FINE_INTEGRATION_TIME_MAX_MARGIN 0x100A
+#define MT9F002_FINE_CORRECTION 0x3010
+#define MT9F002_GREEN1_GAIN 0x3056
+#define MT9F002_BLUE_GAIN 0x3058
+#define MT9F002_RED_GAIN 0x305A
+#define MT9F002_GREEN2_GAIN 0x305C
+#define MT9F002_TEST_PATTERN 0x3070
+#define MT9F002_TEST_PATTERN_RED 0x3072
+#define MT9F002_TEST_PATTERN_GREENR 0x3074
+#define MT9F002_TEST_PATTERN_BLUE 0x3076
+#define MT9F002_TEST_PATTERN_GREENB 0x3078
+#define MT9F002_HISPI_CONTROL_STATUS 0x31C6
+#define MT9F002_EXTRA_DELAY 0x3018
+#define MT9F002_RESERVED_MFR_3D00 0x3D00
+#define MT9F002_RESERVED_MFR_3D02 0x3D02
+#define MT9F002_RESERVED_MFR_3D04 0x3D04
+#define MT9F002_RESERVED_MFR_3D06 0x3D06
+#define MT9F002_RESERVED_MFR_3D08 0x3D08
+#define MT9F002_RESERVED_MFR_3D0A 0x3D0A
+#define MT9F002_RESERVED_MFR_3D0C 0x3D0C
+#define MT9F002_RESERVED_MFR_3D0E 0x3D0E
+#define MT9F002_RESERVED_MFR_3D10 0x3D10
+#define MT9F002_RESERVED_MFR_3D12 0x3D12
+#define MT9F002_RESERVED_MFR_3D14 0x3D14
+#define MT9F002_RESERVED_MFR_3D16 0x3D16
+#define MT9F002_RESERVED_MFR_3D18 0x3D18
+#define MT9F002_RESERVED_MFR_3D1A 0x3D1A
+#define MT9F002_RESERVED_MFR_3D1C 0x3D1C
+#define MT9F002_RESERVED_MFR_3D1E 0x3D1E
+#define MT9F002_RESERVED_MFR_3D20 0x3D20
+#define MT9F002_RESERVED_MFR_3D22 0x3D22
+#define MT9F002_RESERVED_MFR_3D24 0x3D24
+#define MT9F002_RESERVED_MFR_3D26 0x3D26
+#define MT9F002_RESERVED_MFR_3D28 0x3D28
+#define MT9F002_RESERVED_MFR_3D2A 0x3D2A
+#define MT9F002_RESERVED_MFR_3D2C 0x3D2C
+#define MT9F002_RESERVED_MFR_3D2E 0x3D2E
+#define MT9F002_RESERVED_MFR_3D30 0x3D30
+#define MT9F002_RESERVED_MFR_3D32 0x3D32
+#define MT9F002_RESERVED_MFR_3D34 0x3D34
+#define MT9F002_RESERVED_MFR_3D36 0x3D36
+#define MT9F002_RESERVED_MFR_3D38 0x3D38
+#define MT9F002_RESERVED_MFR_3D3A 0x3D3A
+#define MT9F002_RESERVED_MFR_3D3C 0x3D3C
+#define MT9F002_RESERVED_MFR_3D3E 0x3D3E
+#define MT9F002_RESERVED_MFR_3D40 0x3D40
+#define MT9F002_RESERVED_MFR_3D42 0x3D42
+#define MT9F002_RESERVED_MFR_3D44 0x3D44
+#define MT9F002_RESERVED_MFR_3D46 0x3D46
+#define MT9F002_RESERVED_MFR_3D48 0x3D48
+#define MT9F002_RESERVED_MFR_3D4A 0x3D4A
+#define MT9F002_RESERVED_MFR_3D4C 0x3D4C
+#define MT9F002_RESERVED_MFR_3D4E 0x3D4E
+#define MT9F002_RESERVED_MFR_3D50 0x3D50
+#define MT9F002_RESERVED_MFR_3D52 0x3D52
+#define MT9F002_RESERVED_MFR_3D54 0x3D54
+#define MT9F002_RESERVED_MFR_3D56 0x3D56
+#define MT9F002_RESERVED_MFR_3D58 0x3D58
+#define MT9F002_RESERVED_MFR_3D5A 0x3D5A
+#define MT9F002_RESERVED_MFR_3D5C 0x3D5C
+#define MT9F002_RESERVED_MFR_3D5E 0x3D5E
+#define MT9F002_RESERVED_MFR_3D60 0x3D60
+#define MT9F002_RESERVED_MFR_3D62 0x3D62
+#define MT9F002_RESERVED_MFR_3D64 0x3D64
+#define MT9F002_RESERVED_MFR_3D66 0x3D66
+#define MT9F002_RESERVED_MFR_3D68 0x3D68
+#define MT9F002_RESERVED_MFR_3D6A 0x3D6A
+#define MT9F002_RESERVED_MFR_3D6C 0x3D6C
+#define MT9F002_RESERVED_MFR_3D6E 0x3D6E
+#define MT9F002_RESERVED_MFR_3D70 0x3D70
+#define MT9F002_RESERVED_MFR_3D72 0x3D72
+#define MT9F002_RESERVED_MFR_3D74 0x3D74
+#define MT9F002_RESERVED_MFR_3D76 0x3D76
+#define MT9F002_RESERVED_MFR_3D78 0x3D78
+#define MT9F002_RESERVED_MFR_3D7A 0x3D7A
+#define MT9F002_RESERVED_MFR_3D7C 0x3D7C
+#define MT9F002_RESERVED_MFR_3D7E 0x3D7E
+#define MT9F002_RESERVED_MFR_3D80 0x3D80
+#define MT9F002_RESERVED_MFR_3D82 0x3D82
+#define MT9F002_RESERVED_MFR_3D84 0x3D84
+#define MT9F002_RESERVED_MFR_3D86 0x3D86
+#define MT9F002_RESERVED_MFR_3D88 0x3D88
+#define MT9F002_RESERVED_MFR_3D8A 0x3D8A
+#define MT9F002_RESERVED_MFR_3D8C 0x3D8C
+#define MT9F002_RESERVED_MFR_3D8E 0x3D8E
+#define MT9F002_RESERVED_MFR_3D90 0x3D90
+#define MT9F002_RESERVED_MFR_3D92 0x3D92
+#define MT9F002_RESERVED_MFR_3D94 0x3D94
+#define MT9F002_RESERVED_MFR_3D96 0x3D96
+#define MT9F002_RESERVED_MFR_3D98 0x3D98
+#define MT9F002_RESERVED_MFR_3D9A 0x3D9A
+#define MT9F002_RESERVED_MFR_3D9C 0x3D9C
+#define MT9F002_RESERVED_MFR_3D9E 0x3D9E
+#define MT9F002_RESERVED_MFR_3DA0 0x3DA0
+#define MT9F002_RESERVED_MFR_3DA2 0x3DA2
+#define MT9F002_RESERVED_MFR_3DA4 0x3DA4
+#define MT9F002_RESERVED_MFR_3DA6 0x3DA6
+#define MT9F002_RESERVED_MFR_3DA8 0x3DA8
+#define MT9F002_RESERVED_MFR_3DAA 0x3DAA
+#define MT9F002_RESERVED_MFR_3DAC 0x3DAC
+#define MT9F002_RESERVED_MFR_3DAE 0x3DAE
+#define MT9F002_RESERVED_MFR_3DB0 0x3DB0
+#define MT9F002_RESERVED_MFR_3DB2 0x3DB2
+#define MT9F002_RESERVED_MFR_3DB4 0x3DB4
+#define MT9F002_RESERVED_MFR_3DB6 0x3DB6
+#define MT9F002_RESERVED_MFR_3DB8 0x3DB8
+#define MT9F002_RESERVED_MFR_3DBA 0x3DBA
+#define MT9F002_RESERVED_MFR_3DBC 0x3DBC
+#define MT9F002_RESERVED_MFR_3DBE 0x3DBE
+#define MT9F002_RESERVED_MFR_3DC0 0x3DC0
+#define MT9F002_RESERVED_MFR_3DC2 0x3DC2
+#define MT9F002_RESERVED_MFR_3DC4 0x3DC4
+#define MT9F002_RESERVED_MFR_3DC6 0x3DC6
+#define MT9F002_RESERVED_MFR_3DC8 0x3DC8
+#define MT9F002_RESERVED_MFR_3DCA 0x3DCA
+#define MT9F002_RESERVED_MFR_3DCC 0x3DCC
+#define MT9F002_RESERVED_MFR_3DCE 0x3DCE
+#define MT9F002_RESERVED_MFR_3DD0 0x3DD0
+#define MT9F002_RESERVED_MFR_3DD2 0x3DD2
+#define MT9F002_RESERVED_MFR_3DD4 0x3DD4
+#define MT9F002_RESERVED_MFR_3DD6 0x3DD6
+#define MT9F002_RESERVED_MFR_3DD8 0x3DD8
+#define MT9F002_RESERVED_MFR_3DDA 0x3DDA
+#define MT9F002_RESERVED_MFR_3DDC 0x3DDC
+#define MT9F002_RESERVED_MFR_3DDE 0x3DDE
+#define MT9F002_RESERVED_MFR_3DE0 0x3DE0
+#define MT9F002_RESERVED_MFR_3DE2 0x3DE2
+#define MT9F002_RESERVED_MFR_3DE4 0x3DE4
+#define MT9F002_RESERVED_MFR_3DE6 0x3DE6
+#define MT9F002_RESERVED_MFR_3DE8 0x3DE8
+#define MT9F002_RESERVED_MFR_3DEA 0x3DEA
+#define MT9F002_RESERVED_MFR_3DEC 0x3DEC
+#define MT9F002_RESERVED_MFR_3DEE 0x3DEE
+#define MT9F002_RESERVED_MFR_3DF0 0x3DF0
+#define MT9F002_RESERVED_MFR_3DF2 0x3DF2
+#define MT9F002_RESERVED_MFR_3DF4 0x3DF4
+#define MT9F002_RESERVED_MFR_3DF6 0x3DF6
+#define MT9F002_RESERVED_MFR_3DF8 0x3DF8
+#define MT9F002_RESERVED_MFR_3DFA 0x3DFA
+#define MT9F002_RESERVED_MFR_3DFC 0x3DFC
+#define MT9F002_RESERVED_MFR_3DFE 0x3DFE
+#define MT9F002_RESERVED_MFR_3E00 0x3E00
+#define MT9F002_RESERVED_MFR_3E02 0x3E02
+#define MT9F002_RESERVED_MFR_3E04 0x3E04
+#define MT9F002_RESERVED_MFR_3E06 0x3E06
+#define MT9F002_RESERVED_MFR_3E08 0x3E08
+#define MT9F002_RESERVED_MFR_3E0A 0x3E0A
+#define MT9F002_RESERVED_MFR_3E0C 0x3E0C
+#define MT9F002_RESERVED_MFR_3E0E 0x3E0E
+#define MT9F002_RESERVED_MFR_3E10 0x3E10
+#define MT9F002_RESERVED_MFR_3E12 0x3E12
+#define MT9F002_RESERVED_MFR_3E14 0x3E14
+#define MT9F002_RESERVED_MFR_3E16 0x3E16
+#define MT9F002_RESERVED_MFR_3E18 0x3E18
+#define MT9F002_RESERVED_MFR_3E1A 0x3E1A
+#define MT9F002_RESERVED_MFR_3E1C 0x3E1C
+#define MT9F002_RESERVED_MFR_3E1E 0x3E1E
+#define MT9F002_RESERVED_MFR_3E20 0x3E20
+#define MT9F002_RESERVED_MFR_3E22 0x3E22
+#define MT9F002_RESERVED_MFR_3E24 0x3E24
+#define MT9F002_RESERVED_MFR_3E26 0x3E26
+#define MT9F002_RESERVED_MFR_3E28 0x3E28
+#define MT9F002_RESERVED_MFR_3E2A 0x3E2A
+#define MT9F002_RESERVED_MFR_3E2C 0x3E2C
+#define MT9F002_RESERVED_MFR_3E2E 0x3E2E
+#define MT9F002_RESERVED_MFR_3E30 0x3E30
+#define MT9F002_RESERVED_MFR_3E32 0x3E32
+#define MT9F002_RESERVED_MFR_3E34 0x3E34
+#define MT9F002_RESERVED_MFR_3E36 0x3E36
+#define MT9F002_RESERVED_MFR_3E38 0x3E38
+#define MT9F002_RESERVED_MFR_3E3A 0x3E3A
+#define MT9F002_RESERVED_MFR_3E3C 0x3E3C
+#define MT9F002_RESERVED_MFR_3E3E 0x3E3E
+#define MT9F002_RESERVED_MFR_3E40 0x3E40
+#define MT9F002_RESERVED_MFR_3E42 0x3E42
+#define MT9F002_RESERVED_MFR_3E44 0x3E44
+#define MT9F002_RESERVED_MFR_3E46 0x3E46
+#define MT9F002_RESERVED_MFR_3E48 0x3E48
+#define MT9F002_RESERVED_MFR_3E4A 0x3E4A
+#define MT9F002_RESERVED_MFR_3E4C 0x3E4C
+#define MT9F002_RESERVED_MFR_3E4E 0x3E4E
+#define MT9F002_RESERVED_MFR_3E50 0x3E50
+#define MT9F002_RESERVED_MFR_3E52 0x3E52
+#define MT9F002_RESERVED_MFR_3E54 0x3E54
+#define MT9F002_RESERVED_MFR_3E56 0x3E56
+#define MT9F002_RESERVED_MFR_3E58 0x3E58
+#define MT9F002_RESERVED_MFR_3E5A 0x3E5A
+#define MT9F002_RESERVED_MFR_3E5C 0x3E5C
+#define MT9F002_RESERVED_MFR_3E5E 0x3E5E
+#define MT9F002_RESERVED_MFR_3E60 0x3E60
+#define MT9F002_RESERVED_MFR_3E62 0x3E62
+#define MT9F002_RESERVED_MFR_3E64 0x3E64
+#define MT9F002_RESERVED_MFR_3E66 0x3E66
+#define MT9F002_RESERVED_MFR_3E68 0x3E68
+#define MT9F002_RESERVED_MFR_3E6A 0x3E6A
+#define MT9F002_RESERVED_MFR_3E6C 0x3E6C
+#define MT9F002_RESERVED_MFR_3E6E 0x3E6E
+#define MT9F002_RESERVED_MFR_3E70 0x3E70
+#define MT9F002_RESERVED_MFR_3E72 0x3E72
+#define MT9F002_RESERVED_MFR_3E74 0x3E74
+#define MT9F002_RESERVED_MFR_3E76 0x3E76
+#define MT9F002_RESERVED_MFR_3E78 0x3E78
+#define MT9F002_RESERVED_MFR_3E7A 0x3E7A
+#define MT9F002_RESERVED_MFR_3E7C 0x3E7C
+#define MT9F002_RESERVED_MFR_3E7E 0x3E7E
+#define MT9F002_RESERVED_MFR_3E80 0x3E80
+#define MT9F002_RESERVED_MFR_3E82 0x3E82
+#define MT9F002_RESERVED_MFR_3E84 0x3E84
+#define MT9F002_RESERVED_MFR_3E86 0x3E86
+#define MT9F002_RESERVED_MFR_3E88 0x3E88
+#define MT9F002_RESERVED_MFR_3E8A 0x3E8A
+#define MT9F002_RESERVED_MFR_3E8C 0x3E8C
+#define MT9F002_RESERVED_MFR_3E8E 0x3E8E
+#define MT9F002_RESERVED_MFR_3E90 0x3E90
+#define MT9F002_RESERVED_MFR_3E92 0x3E92
+#define MT9F002_RESERVED_MFR_3E94 0x3E94
+#define MT9F002_RESERVED_MFR_3E96 0x3E96
+#define MT9F002_RESERVED_MFR_3E98 0x3E98
+#define MT9F002_RESERVED_MFR_3E9A 0x3E9A
+#define MT9F002_RESERVED_MFR_3E9C 0x3E9C
+#define MT9F002_RESERVED_MFR_3E9E 0x3E9E
+#define MT9F002_RESERVED_MFR_3EA0 0x3EA0
+#define MT9F002_RESERVED_MFR_3EA2 0x3EA2
+#define MT9F002_RESERVED_MFR_3EA4 0x3EA4
+#define MT9F002_RESERVED_MFR_3EA6 0x3EA6
+#define MT9F002_RESERVED_MFR_3EA8 0x3EA8
+#define MT9F002_RESERVED_MFR_3EAA 0x3EAA
+#define MT9F002_RESERVED_MFR_3EAC 0x3EAC
+#define MT9F002_RESERVED_MFR_3EAE 0x3EAE
+#define MT9F002_RESERVED_MFR_3EB0 0x3EB0
+#define MT9F002_RESERVED_MFR_3EB2 0x3EB2
+#define MT9F002_RESERVED_MFR_3EB4 0x3EB4
+#define MT9F002_RESERVED_MFR_3EB6 0x3EB6
+#define MT9F002_RESERVED_MFR_3EB8 0x3EB8
+#define MT9F002_RESERVED_MFR_3EBA 0x3EBA
+#define MT9F002_RESERVED_MFR_3EBC 0x3EBC
+#define MT9F002_RESERVED_MFR_3EBE 0x3EBE
+#define MT9F002_RESERVED_MFR_3EC0 0x3EC0
+#define MT9F002_RESERVED_MFR_3EC2 0x3EC2
+#define MT9F002_RESERVED_MFR_3EC4 0x3EC4
+#define MT9F002_RESERVED_MFR_3EC6 0x3EC6
+#define MT9F002_RESERVED_MFR_3EC8 0x3EC8
+#define MT9F002_RESERVED_MFR_3ECA 0x3ECA
+#define MT9F002_RESERVED_MFR_3176 0x3176
+#define MT9F002_RESERVED_MFR_317C 0x317C
+#define MT9F002_RESERVED_MFR_3EE6 0x3EE6
+#define MT9F002_RESERVED_MFR_3ED8 0x3ED8
+#define MT9F002_RESERVED_MFR_3EDC 0x3EDC
+#define MT9F002_RESERVED_MFR_3EE2 0x3EE2
+#define MT9F002_RESERVED_MFR_3EE8 0x3EE8
+#define MT9F002_RESERVED_MFR_3064 0x3064
+
+#define MT9F002_MODE_SELECT 0x0100
+#define MT9F002_SOFTWARE_RESET 0x103
+#define MT9F002_GROUPED_PARAMETER_HOLD 0x0104
+#define MT9F002_MASK_CORRUPTED_FRAME 0x0105
+
+
+/*
+ * Sensor Timing Constants
+ * values have been found experimentally
+ */
+/* binning mode */
+#define BINNING_XY_MIN_LINE_LENGTH_PCK 4650
+#define BINNING_XY_MIN_LINE_BLANKING_PCK 2950
+#define BINNING_XY_MIN_LINE_FIFO_BLANKING_PCK 120
+#define BINNING_XY_FINE_INTEGRATION_TIME_MIN 2000
+#define BINNING_XY_FINE_INTEGRATION_TIME_MAX_MARGIN 2200
+
+#define BINNING_X_MIN_LINE_LENGTH_PCK 3495
+#define BINNING_X_MIN_LINE_BLANKING_PCK 0
+#define BINNING_X_MIN_LINE_FIFO_BLANKING_PCK 60
+#define BINNING_X_FINE_INTEGRATION_TIME_MIN 1500
+#define BINNING_X_FINE_INTEGRATION_TIME_MAX_MARGIN 1900
+
+#define NO_BINNING_NO_SKIPPING 0x1
+#define BINNING_ONLY 0x3
+#define BINNING_AND_SKIPPING 0x7
+
+/* normal mode */
+#define MIN_LINE_LENGTH_PCK 2400
+#define MIN_LINE_BLANKING_PCK 1700
+#define MIN_LINE_FIFO_BLANKING_PCK 60
+#define FINE_INTEGRATION_TIME_MIN 1316
+#define FINE_INTEGRATION_TIME_MAX_MARGIN 1032
+/* scaler mode */
+#define SCALER_MIN_LINE_LENGTH_PCK 2400
+#define SCALER_MIN_LINE_BLANKING_PCK 1750
+#define SCALER_MIN_LINE_FIFO_BLANKING_PCK 60
+#define SCALER_FINE_INTEGRATION_TIME_MIN 1316
+#define SCALER_FINE_INTEGRATION_TIME_MAX_MARGIN 1032
+
+#define COARSE_INTEGRATION_TIME_MIN_VAL 0
+#define COARSE_INTEGRATION_TIME_MAX_MARGIN_VAL 1
+
+#define MT9F002_FRAMEPERIOD_NS_DEF 33333333 //~30Fps
+#define MT9F002_EXPOSURE_DEF 30000
+#define MT9F002_SCALER_DEF 16
+
+struct mt9f002_clock {
+ uint32_t vt_pix_clk;
+ uint32_t op_pix_clk;
+};
+
+struct mt9f002_color_gain {
+ uint32_t greenR;
+ uint32_t blue;
+ uint32_t red;
+ uint32_t greenB;
+};
+
+struct mt9f002_pll {
+ uint32_t pre_pll_clk_div;
+ uint32_t pll_multiplier;
+ uint32_t vt_sys_clk_div;
+ uint32_t vt_pix_clk_div;
+ uint32_t row_speed_2_0;
+ uint32_t op_sys_clk_div;
+ uint32_t row_speed_10_8;
+ uint32_t op_pix_clk_div;
+ uint32_t shift_vt_pix_clk_div;
+};
+
+/* HV blanking length in µs
+ * A frame is equal to (blkStartHV_us + frameLength_us + blkExtraHV_us)
+ */
+struct mt9f002_timing {
+ uint32_t blkStartHV_us;
+ uint32_t frameLength_us;
+ uint32_t blkExtraHV_us;
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ uint32_t exposure_us;
+#endif
+};
+
+struct mt9f002_blanking {
+ uint16_t min_line_blanking_pck;
+ uint16_t x_odd_inc;
+ uint16_t y_odd_inc;
+ uint16_t min_frame_blanking_lines;
+ uint16_t min_line_length_pck;
+ uint16_t min_line_fifo_pck;
+
+ uint16_t minimum_line_length;
+ uint16_t minimum_frame_length_lines;
+
+ uint16_t line_length;
+ uint16_t frame_length;
+};
+
+struct mt9f002_exposure {
+ uint16_t fine_integration_time_min;
+ uint16_t fine_integration_time_max_margin;
+ uint16_t coarse_integration_time_min;
+ uint16_t coarse_integration_time_max_margin;
+
+ uint16_t coarse_integration_time;
+ uint16_t fine_integration_time;
+};
+
+struct mt9f002_context {
+ struct mt9f002_color_gain colorGain; /* color gain */
+ uint32_t frameperiod_ns; /* frame period in ns */
+ uint32_t max_frameperiod_ns; /* maximum reachable framerate */
+
+ uint32_t scaler; /* ratio between sensor resolution and ouput resolution */
+
+ uint32_t maxExposure_us; /* maximum exposure time in us */
+
+ struct mt9f002_pll pll;
+ struct mt9f002_clock clock;
+ struct mt9f002_blanking blanking;
+ struct mt9f002_exposure exposure;
+ struct mt9f002_timing prev_timings;
+ struct mt9f002_timing timings;
+};
+
+struct mt9f002_timer {
+ struct hrtimer hrtimer;
+ struct video_device *vdev;
+ uint32_t frame;
+};
+
+struct mt9f002 {
+ struct v4l2_subdev subdev;
+ struct i2c_client *i2c_client;
+ struct media_pad pad;
+ struct mt9f002_context ctx;
+
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *target_exposure_us;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *gains[4];
+
+ struct mutex power_lock;
+ int power_count;
+
+ struct mt9f002_platform_data *pdata;
+ int (*set_power)(int on);
+ int isStreaming;
+
+ /* Timers for FRAME_SYNC event */
+ uint32_t fsync_delay_us;
+ struct mt9f002_timer fsync_timers[2];
+#ifdef CONFIG_VIDEO_MT9F002_RESTART_BAD_FRAME
+ struct task_struct *fsync_worker_task;
+ struct kthread_worker fsync_worker;
+ struct kthread_work fsync_work;
+#endif
+ int next_timer;
+ int drop;
+ int frame;
+};
+
+typedef union {
+ struct {
+ unsigned analog_gain2 :7;
+ unsigned analog_gain3 :3;
+ unsigned colamp_gain :2;
+ unsigned digital_gain :4;
+ };
+ uint32_t val;
+} pixelGain_t;
+
+#endif
diff --git a/drivers/media/video/mt9m021.c b/drivers/media/video/mt9m021.c
new file mode 100644
index 00000000000000..80171ced5760ea
--- /dev/null
+++ b/drivers/media/video/mt9m021.c
@@ -0,0 +1,1120 @@
+/*
+ * mt9m021 - Aptina CMOS Digital Image Sensor
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Tue Aug 26 15:55:49 CEST 2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+
+#include <media/media-entity.h>
+#include <media/mt9m021.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "aptina-pll.h"
+#include "mt9m021_reg.h"
+
+MODULE_AUTHOR("Eng-Hong SRON <eng-hong.sron@parrot.com>");
+MODULE_DESCRIPTION("Aptina MT9M021 driver");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME "mt9m021"
+
+#define MT9M021_CHIP_VERSION 0x3000
+#define MT9M021_CHIP_VERSION_VALUE 0x2401
+#define MT9M021_Y_ADDR_START 0x3002
+#define MT9M021_X_ADDR_START 0x3004
+#define MT9M021_Y_ADDR_END 0x3006
+#define MT9M021_X_ADDR_END 0x3008
+#define MT9M021_FRAME_LENGTH_LINES 0x300A
+#define MT9M021_LINE_LENGTH_PCK 0x300C
+#define MT9M021_MIN_LINE_LENGTH 1650
+#define MT9M021_COARSE_INT_TIME 0x3012
+#define MT9M021_FINE_INTEGRATION_TIME 0x3014
+#define MT9M021_RESET_REGISTER 0x301A
+#define MT9M021_RESET 0x00D9
+#define MT9M021_STREAM_OFF 0x00D8
+#define MT9M021_STREAM_ON 0x00DC
+#define MT9M021_TRIGGER_MODE 0x09D8
+#define MT9M021_DATA_PEDESTAL 0x301E
+#define MT9M021_ROW_SPEED 0x3028
+#define MT9M021_VT_PIX_CLK_DIV 0x302A
+#define MT9M021_VT_SYS_CLK_DIV 0x302C
+#define MT9M021_PRE_PLL_CLK_DIV 0x302E
+#define MT9M021_PLL_MULTIPLIER 0x3030
+#define MT9M021_DIGITAL_BINNING 0x3032
+#define MT9M021_HOR_AND_VER_BIN 0x0022
+#define MT9M021_HOR_BIN 0x0011
+#define MT9M021_DISABLE_BINNING 0x0000
+#define MT9M021_READ_MODE 0x3040
+#define MT9M021_DARK_CONTORL 0x3044
+#define MT9M021_GLOBAL_GAIN 0x305E
+#define MT9M021_EMBEDDED_DATA_CTRL 0x3064
+#define MT9M021_TEST_RAW_MODE 0x307A
+#define MT9M021_SEQ_DATA_PORT 0x3086
+#define MT9M021_SEQ_CTRL_PORT 0x3088
+#define MT9M021_X_ODD_INC 0x30A2
+#define MT9M021_Y_ODD_INC 0x30A6
+#define MT9M021_DIGITAL_TEST 0x30B0
+#define MT9M021_COLUMN_CORRECTION 0x30D4
+#define MT9M021_RESERVED_MFR_30EA 0x30EA
+#define MT9M021_AE_CTRL_REGISTER 0x3100
+#define MT9M021_AE_ENABLE 0x0013
+#define MT9M021_AE_MAX_EXPOSURE 0x311C
+#define MT9M021_AE_MIN_EXPOSURE 0x311E
+#define MT9M021_AE_TARGET_LUMA 0x3102
+#define MT9M021_AE_ROI_X_START 0x3140
+#define MT9M021_AE_ROI_Y_START 0x3142
+#define MT9M021_AE_ROI_X_SIZE 0x3144
+#define MT9M021_AE_ROI_Y_SIZE 0x3146
+#define MT9M021_AE_AG_EXPOSURE_HI 0x3166
+#define MT9M021_AE_AG_EXPOSURE_LO 0x3168
+#define MT9M021_RESERVED_MFR_3180 0x3180
+#define MT9M021_ANALOG_REG 0x3ED6
+#define MT9M021_TEMPSENS_CTRL 0x30B4
+#define MT9M021_TEMPSENS_EN 0x0011
+
+/* Embedded data adds 4 lines for register dump and statistics so we
+ * have to remove 4 more lines from height. */
+#define EMBEDDED_DATA_HEIGHT 4
+
+struct mt9m021 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_rect crop;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+ struct mt9m021_platform_data *pdata;
+ struct aptina_pll pll;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *trigger;
+ struct v4l2_ctrl *exposure_mode;
+ struct v4l2_ctrl *exposure_abs;
+ struct v4l2_ctrl *exposure_ae_max;
+ struct v4l2_ctrl *exposure_ae_min;
+ struct v4l2_ctrl *ae_tgt_luma;
+ struct v4l2_ctrl *ae_ag_exposure_hi;
+ struct v4l2_ctrl *ae_ag_exposure_lo;
+ struct v4l2_ctrl *auto_aag;
+ struct v4l2_ctrl *auto_adg;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *ana_gain;
+ /* Auto-Exposure Region Of Interest */
+ struct v4l2_ctrl *ae_roi_x_start;
+ struct v4l2_ctrl *ae_roi_y_start;
+ struct v4l2_ctrl *ae_roi_x_size;
+ struct v4l2_ctrl *ae_roi_y_size;
+
+ u8 streaming;
+};
+
+static inline struct mt9m021 *to_mt9m021(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9m021, sd);
+}
+
+static inline struct mt9m021 *ctrl_to_mt9m021(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mt9m021, ctrl_handler);
+}
+
+static int mt9m021_read(struct v4l2_subdev *sd, u16 reg, u16 *val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = (u8 *)val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = swab16(*val);
+
+ return 0;
+}
+
+static int mt9m021_write(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ u16 buf[] = {
+ swab16(reg),
+ swab16(val),
+ };
+
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 4,
+ .buf = (u8 *)&buf,
+ };
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mt9m021_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ int ret;
+ u16 val;
+
+ reg->size = 2;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ ret = mt9m021_read(sd, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return 0;
+}
+
+static int mt9m021_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ if (reg->reg & ~0xffff || reg->val & ~0xffff)
+ return -EINVAL;
+
+ return mt9m021_write(sd, reg->reg, reg->val);
+}
+#endif
+
+static const struct v4l2_subdev_core_ops mt9m021_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = mt9m021_get_register,
+ .s_register = mt9m021_set_register,
+#endif
+};
+
+static int mt9m021_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, 0);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ fmt->format = mt9m021->format;
+
+ return 0;
+}
+
+static int mt9m021_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mt9m021->format = *mf;
+
+ return 0;
+}
+
+static int mt9m021_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = mt9m021->format.code;
+
+ return 0;
+}
+
+static int mt9m021_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = 1280;
+ sel->r.height = 960;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = mt9m021->crop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt9m021_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mt9m021->crop = sel->r;
+ break;
+
+ default:
+ v4l2_err(sd, "selection target (%d) not supported yet\n",
+ sel->target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * A-1000GS Rev2 Optimized Sequencer 04-07-11
+ */
+static u16 mt9m021_seq_data[] = {
+ 0x3227, 0x0101, 0x0F25, 0x0808, 0x0227, 0x0101, 0x0837, 0x2700,
+ 0x0138, 0x2701, 0x013A, 0x2700, 0x0125, 0x0020, 0x3C25, 0x0040,
+ 0x3427, 0x003F, 0x2500, 0x2037, 0x2540, 0x4036, 0x2500, 0x4031,
+ 0x2540, 0x403D, 0x6425, 0x2020, 0x3D64, 0x2510, 0x1037, 0x2520,
+ 0x2010, 0x2510, 0x100F, 0x2708, 0x0802, 0x2540, 0x402D, 0x2608,
+ 0x280D, 0x1709, 0x2600, 0x2805, 0x26A7, 0x2807, 0x2580, 0x8029,
+ 0x1705, 0x2500, 0x4027, 0x2222, 0x1616, 0x2726, 0x2617, 0x3626,
+ 0xA617, 0x0326, 0xA417, 0x1F28, 0x0526, 0x2028, 0x0425, 0x2020,
+ 0x2700, 0x2625, 0x0000, 0x171E, 0x2500, 0x0425, 0x0020, 0x2117,
+ 0x121B, 0x1703, 0x2726, 0x2617, 0x2828, 0x0517, 0x1A26, 0x6017,
+ 0xAE25, 0x0080, 0x2700, 0x2626, 0x1828, 0x002E, 0x2A28, 0x081E,
+ 0x4127, 0x1010, 0x0214, 0x6060, 0x0A14, 0x6060, 0x0B14, 0x6060,
+ 0x0C14, 0x6060, 0x0D14, 0x6060, 0x0217, 0x3C14, 0x0060, 0x0A14,
+ 0x0060, 0x0B14, 0x0060, 0x0C14, 0x0060, 0x0D14, 0x0060, 0x0811,
+ 0x2500, 0x1027, 0x0010, 0x2F6F, 0x0F3E, 0x2500, 0x0827, 0x0008,
+ 0x3066, 0x3225, 0x0008, 0x2700, 0x0830, 0x6631, 0x3D64, 0x2508,
+ 0x083D, 0xFF3D, 0x2A27, 0x083F, 0x2C00
+};
+
+static u16 mt9m021_analog_setting[] = {
+ 0x00FD, 0x0FFF, 0x0003, 0xF87A, 0xE075, 0x077C, 0xA4EB, 0xD208
+};
+
+static void mt9m021_sequencer_settings(struct v4l2_subdev *sd)
+{
+ int i;
+
+ mt9m021_write(sd, MT9M021_SEQ_CTRL_PORT, 0x8000);
+
+ for (i = 0 ; i < ARRAY_SIZE(mt9m021_seq_data) ; i++)
+ mt9m021_write(sd, MT9M021_SEQ_DATA_PORT, mt9m021_seq_data[i]);
+
+}
+
+/*
+ * Column Correction ReTriggering
+ */
+static void mt9m021_col_correction(struct v4l2_subdev *sd)
+{
+ /* Disable Streaming */
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_STREAM_OFF);
+
+ /* Disable column correction */
+ mt9m021_write(sd, MT9M021_COLUMN_CORRECTION, 0x0000);
+
+ msleep(200);
+
+ /* Enable Streaming */
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_STREAM_ON);
+
+ msleep(200);
+
+ /* Disable Streaming */
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_STREAM_OFF);
+
+ /* Enable column correction */
+ mt9m021_write(sd, MT9M021_COLUMN_CORRECTION, 0x0001);
+
+ msleep(200);
+}
+
+/*
+ * A-1000GS REV2 Optimized Settings
+ */
+static void mt9m021_rev2_settings(struct v4l2_subdev *sd)
+{
+ int i;
+
+ mt9m021_write(sd, MT9M021_TEST_RAW_MODE, 0x0000);
+ mt9m021_write(sd, MT9M021_RESERVED_MFR_30EA, 0x0C00);
+ mt9m021_write(sd, MT9M021_DARK_CONTORL, 0x0404);
+ mt9m021_write(sd, MT9M021_DATA_PEDESTAL, 0x012C);
+ mt9m021_write(sd, MT9M021_RESERVED_MFR_3180, 0x8000);
+ mt9m021_write(sd, MT9M021_COLUMN_CORRECTION, 0xE007);
+ mt9m021_write(sd, MT9M021_FINE_INTEGRATION_TIME, 0x0000);
+
+ /* Analog Settings */
+ for (i = 0 ; i < ARRAY_SIZE(mt9m021_analog_setting) ; i++)
+ mt9m021_write(sd, MT9M021_ANALOG_REG + 2 * i,
+ mt9m021_analog_setting[i]);
+}
+
+static int mt9m021_pll_setup(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ int ret;
+
+ /* For now, as we are only in parallel mode, we can consider p1 as fixed
+ * to 1. So we compute only p2
+ */
+ static const struct aptina_pll_limits limits = {
+ .ext_clock_min = 6000000,
+ .ext_clock_max = 50000000,
+ .int_clock_min = 3000000,
+ .int_clock_max = 50000000,
+ .out_clock_min = 384000000,
+ .out_clock_max = 768000000,
+ .pix_clock_max = 74250000,
+
+ .n_min = 1,
+ .n_max = 63,
+ .m_min = 32,
+ .m_max = 255,
+ .p1_min = 4,
+ .p1_max = 16,
+ };
+
+ ret = aptina_pll_calculate(&client->dev, &limits, &mt9m021->pll);
+ if (ret)
+ return ret;
+
+ mt9m021_write(sd, MT9M021_VT_SYS_CLK_DIV, 1);
+ mt9m021_write(sd, MT9M021_VT_PIX_CLK_DIV, mt9m021->pll.p1);
+ mt9m021_write(sd, MT9M021_PRE_PLL_CLK_DIV, mt9m021->pll.n);
+ mt9m021_write(sd, MT9M021_PLL_MULTIPLIER, mt9m021->pll.m);
+
+ msleep(100);
+
+ return 0;
+}
+
+static int mt9m021_set_size(struct v4l2_subdev *sd)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_rect *c = &mt9m021->crop;
+ struct v4l2_mbus_framefmt *fmt = &mt9m021->format;
+
+ int hratio = DIV_ROUND_CLOSEST(c->width, fmt->width);
+ int vratio = DIV_ROUND_CLOSEST(c->height - EMBEDDED_DATA_HEIGHT,
+ fmt->height);
+ u16 binning;
+
+ /* MT9M021 supports only 3 modes:
+ * - No binning
+ * - Horiontal only binning
+ * - Horizontal and Vertical binning
+ */
+ if (hratio == 2) {
+ if (vratio == 2)
+ binning = MT9M021_HOR_AND_VER_BIN;
+ else
+ binning = MT9M021_HOR_BIN;
+ } else {
+ binning = MT9M021_DISABLE_BINNING;
+ }
+
+ mt9m021_write(sd, MT9M021_DIGITAL_BINNING, binning);
+ /* Skipping: TODO in function of fmt / crop */
+ mt9m021_write(sd, MT9M021_X_ODD_INC, 0x0001);
+ mt9m021_write(sd, MT9M021_Y_ODD_INC, 0x0001);
+
+ mt9m021_write(sd, MT9M021_Y_ADDR_START, mt9m021->crop.top);
+ mt9m021_write(sd, MT9M021_X_ADDR_START, mt9m021->crop.left);
+ mt9m021_write(sd, MT9M021_Y_ADDR_END, mt9m021->crop.top
+ + mt9m021->crop.height - 1 - EMBEDDED_DATA_HEIGHT);
+ mt9m021_write(sd, MT9M021_X_ADDR_END, mt9m021->crop.left +
+ mt9m021->crop.width - 1);
+
+ return 0;
+}
+
+static int mt9m021_apply_exposure(struct v4l2_subdev *sd)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ u32 exposure_mode = mt9m021->exposure_mode->val;
+ u32 exposure_abs = mt9m021->exposure_abs->val;
+ u32 exposure_ae_max = mt9m021->exposure_ae_max->val;
+ u32 exposure_ae_min = mt9m021->exposure_ae_min->val;
+ u32 ae_tgt_luma = mt9m021->ae_tgt_luma->val;
+ u32 ae_ag_exposure_hi = mt9m021->ae_ag_exposure_hi->val;
+ u32 ae_ag_exposure_lo = mt9m021->ae_ag_exposure_lo->val;
+ u32 ae_roi_x_start = mt9m021->ae_roi_x_start->val;
+ u32 ae_roi_y_start = mt9m021->ae_roi_y_start->val;
+ u32 ae_roi_x_size = mt9m021->ae_roi_x_size->val;
+ u32 ae_roi_y_size = mt9m021->ae_roi_y_size->val;
+ u32 auto_aag = mt9m021->auto_aag->val;
+ u32 auto_adg = mt9m021->auto_adg->val;
+ u32 gain = mt9m021->gain->val;
+ u32 ana_gain = mt9m021->ana_gain->val;
+ u32 line_duration_ns;
+ u16 int_time;
+
+ union ae_ctrl_reg ae_ctrl_reg = { ._register = 0 };
+ union dig_test_reg dig_test_reg = {
+ .mono_chrome = 1,
+ .col_gain = ana_gain,
+ };
+
+ /* AAG and ADG both need Auto Exposure to function */
+ if (exposure_mode == V4L2_EXPOSURE_AUTO || auto_aag || auto_adg)
+ ae_ctrl_reg.ae_enable = 1;
+
+ if (auto_aag)
+ ae_ctrl_reg.auto_ag_en = 1;
+
+ if (auto_adg)
+ ae_ctrl_reg.auto_dg_en = 1;
+
+ /* Integration time per line */
+ line_duration_ns = div_u64((u64)1000000000 * MT9M021_MIN_LINE_LENGTH,
+ mt9m021->pll.pix_clock);
+
+ int_time = (exposure_abs * 1000) / line_duration_ns;
+
+ mt9m021_write(sd, MT9M021_COARSE_INT_TIME, int_time);
+
+ /* Max AE time */
+ int_time = (exposure_ae_max * 1000) / line_duration_ns;
+ mt9m021_write(sd, MT9M021_AE_MAX_EXPOSURE, int_time);
+
+ /* Min AE time */
+ int_time = (exposure_ae_min * 1000) / line_duration_ns;
+ mt9m021_write(sd, MT9M021_AE_MIN_EXPOSURE, int_time);
+
+ /* AE Luma Target */
+ mt9m021_write(sd, MT9M021_AE_TARGET_LUMA, ae_tgt_luma);
+ mt9m021_write(sd, MT9M021_AE_AG_EXPOSURE_HI, ae_ag_exposure_hi);
+ mt9m021_write(sd, MT9M021_AE_AG_EXPOSURE_LO, ae_ag_exposure_lo);
+
+ /* Analogue gain */
+ mt9m021_write(sd, MT9M021_DIGITAL_TEST, dig_test_reg._register);
+
+ mt9m021_write(sd, MT9M021_GLOBAL_GAIN, gain);
+
+ /* AE ROI */
+ mt9m021_write(&mt9m021->sd, MT9M021_AE_ROI_X_START, ae_roi_x_start);
+ mt9m021_write(&mt9m021->sd, MT9M021_AE_ROI_Y_START, ae_roi_y_start);
+ mt9m021_write(&mt9m021->sd, MT9M021_AE_ROI_X_SIZE, ae_roi_x_size);
+ mt9m021_write(&mt9m021->sd, MT9M021_AE_ROI_Y_SIZE, ae_roi_y_size);
+
+ return mt9m021_write(sd, MT9M021_AE_CTRL_REGISTER,
+ ae_ctrl_reg._register);
+}
+
+static int mt9m021_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct mt9m021_platform_data *pdata = mt9m021->pdata;
+ struct v4l2_fract *fi = &mt9m021->frame_interval;
+ int ret = 0;
+ u16 frame_length;
+
+ if (enable == 0) {
+ /* Stream off */
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_STREAM_OFF);
+
+ if (pdata->set_power)
+ pdata->set_power(0);
+
+ mt9m021->streaming = 0;
+ return 0;
+ }
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Resetting the sensor will put the all sensor registers back to their
+ * default values. In some cases (mainly the i2c slave address), it is
+ * better to avoid this behavior.
+ */
+ if (!pdata->no_soft_reset)
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_RESET);
+
+ /*
+ * The following functions have been taken directly from Aptina
+ * software, I have no idea what it does...
+ */
+ mt9m021_sequencer_settings(sd);
+ mt9m021_col_correction(sd);
+ mt9m021_rev2_settings(sd);
+
+ ret = mt9m021_pll_setup(sd);
+ if (ret < 0)
+ goto error_pll_setup;
+
+ mt9m021_set_size(sd);
+
+ /* Flip */
+ mt9m021_write(sd, MT9M021_READ_MODE, 0x0000);
+
+ /* Blanking:
+ * HBLANK is minimized
+ * VBLANK set according to the FPS
+ */
+ mt9m021_write(sd, MT9M021_LINE_LENGTH_PCK, MT9M021_MIN_LINE_LENGTH);
+
+ frame_length = div_u64((u64) mt9m021->pll.pix_clock * fi->numerator,
+ MT9M021_MIN_LINE_LENGTH * fi->denominator);
+
+ mt9m021_write(sd, MT9M021_FRAME_LENGTH_LINES, frame_length);
+
+ /* Enable embedded data */
+ mt9m021_write(sd, MT9M021_EMBEDDED_DATA_CTRL, 0x1982);
+
+ /* Set exposure mode & integration time */
+ mt9m021_apply_exposure(sd);
+
+ /* Temperature sensor enable */
+ mt9m021_write(sd, MT9M021_TEMPSENS_CTRL, MT9M021_TEMPSENS_EN);
+
+ /* Stream on */
+ if (mt9m021->trigger->val)
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_TRIGGER_MODE);
+ else
+ mt9m021_write(sd, MT9M021_RESET_REGISTER, MT9M021_STREAM_ON);
+
+ mt9m021->streaming = 1;
+
+ return 0;
+
+error_pll_setup:
+ if (pdata->set_power)
+ pdata->set_power(0);
+
+ return ret;
+}
+
+static int mt9m021_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval = mt9m021->frame_interval;
+
+ return 0;
+}
+
+static int mt9m021_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+
+ mt9m021->frame_interval = fi->interval;
+
+ return 0;
+}
+
+static int mt9m021_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_mbus_framefmt *fmt = &mt9m021->format;
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ memset(timings, 0, sizeof(*timings));
+
+ bt->width = fmt->width;
+ bt->height = fmt->height;
+ bt->pixelclock = mt9m021->pll.pix_clock;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9m021_video_ops = {
+ .s_stream = mt9m021_s_stream,
+ .g_frame_interval = mt9m021_g_frame_interval,
+ .s_frame_interval = mt9m021_s_frame_interval,
+ .g_dv_timings = mt9m021_g_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops mt9m021_pad_ops = {
+ .get_fmt = mt9m021_get_fmt,
+ .set_fmt = mt9m021_set_fmt,
+ .enum_mbus_code = mt9m021_enum_mbus_code,
+ .get_selection = mt9m021_get_selection,
+ .set_selection = mt9m021_set_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m021_ops = {
+ .core = &mt9m021_core_ops,
+ .video = &mt9m021_video_ops,
+ .pad = &mt9m021_pad_ops,
+};
+
+#define V4L2_CID_MT9M021_TRIGGER (V4L2_CID_CAMERA_CLASS_BASE + 0x100)
+#define V4L2_CID_MT9M021_AUTO_ANAGAIN (V4L2_CID_CAMERA_CLASS_BASE + 0x101)
+#define V4L2_CID_MT9M021_AUTO_DIGGAIN (V4L2_CID_CAMERA_CLASS_BASE + 0x102)
+#define V4L2_CID_MT9M021_EXPOSURE_AUTO_MAX (V4L2_CID_CAMERA_CLASS_BASE + 0x103)
+#define V4L2_CID_MT9M021_EXPOSURE_AUTO_MIN (V4L2_CID_CAMERA_CLASS_BASE + 0x104)
+#define V4L2_CID_MT9M021_EXPOSURE_TGT_LUMA (V4L2_CID_CAMERA_CLASS_BASE + 0x105)
+
+/* Region of Interest for Auto-Exposure */
+#define V4L2_CID_MT9M021_AE_ROI_X_START (V4L2_CID_CAMERA_CLASS_BASE + 0x106)
+#define V4L2_CID_MT9M021_AE_ROI_Y_START (V4L2_CID_CAMERA_CLASS_BASE + 0x107)
+#define V4L2_CID_MT9M021_AE_ROI_X_SIZE (V4L2_CID_CAMERA_CLASS_BASE + 0x108)
+#define V4L2_CID_MT9M021_AE_ROI_Y_SIZE (V4L2_CID_CAMERA_CLASS_BASE + 0x109)
+
+#define V4L2_CID_MT9M021_AE_AG_EXPOSURE_HI (V4L2_CID_CAMERA_CLASS_BASE + 0x10a)
+#define V4L2_CID_MT9M021_AE_AG_EXPOSURE_LO (V4L2_CID_CAMERA_CLASS_BASE + 0x10b)
+
+
+static int mt9m021_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m021 *mt9m021 = ctrl_to_mt9m021(ctrl);
+
+ /* If not streaming, just keep interval structures up-to-date */
+ if (!mt9m021->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MT9M021_AUTO_ANAGAIN:
+ case V4L2_CID_MT9M021_AUTO_DIGGAIN:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_MT9M021_EXPOSURE_AUTO_MAX:
+ case V4L2_CID_MT9M021_EXPOSURE_AUTO_MIN:
+ case V4L2_CID_MT9M021_AE_AG_EXPOSURE_HI:
+ case V4L2_CID_MT9M021_AE_AG_EXPOSURE_LO:
+ case V4L2_CID_MT9M021_EXPOSURE_TGT_LUMA:
+ case V4L2_CID_GAIN:
+ case V4L2_CID_ANALOGUE_GAIN:
+ case V4L2_CID_MT9M021_AE_ROI_X_START:
+ case V4L2_CID_MT9M021_AE_ROI_Y_START:
+ case V4L2_CID_MT9M021_AE_ROI_X_SIZE:
+ case V4L2_CID_MT9M021_AE_ROI_Y_SIZE:
+ return mt9m021_apply_exposure(&mt9m021->sd);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mt9m021_ctrl_ops = {
+ .s_ctrl = mt9m021_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_trigger = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_TRIGGER,
+ .name = "Trigger mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = false,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_aag = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AUTO_ANAGAIN,
+ .name = "Auto analogue gain",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = true,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_adg = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AUTO_DIGGAIN,
+ .name = "Auto digital gain",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = true,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_max = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_EXPOSURE_AUTO_MAX,
+ .name = "Max allowed Auto Exposure (us)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1000000,
+ .step = 1,
+ .def = 30000,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_min = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_EXPOSURE_AUTO_MIN,
+ .name = "Min allowed Auto Exposure (us)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1000000,
+ .step = 1,
+ .def = 1,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_tgt_luma = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_EXPOSURE_TGT_LUMA,
+ .name = "Auto Exposure target luma",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 8192,
+ .step = 1,
+ .def = 0x500,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_ag_exposure_hi = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_AG_EXPOSURE_HI,
+ .name = "Upper exposure limit for increasing the Analog gain (us)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1000000,
+ .step = 1,
+ .def = 20000,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_ag_exposure_lo = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_AG_EXPOSURE_LO,
+ .name = "Lower exposure limit for increasing the Analog gain (us)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1000000,
+ .step = 1,
+ .def = 10000,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_roi_x_start = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_ROI_X_START,
+ .name = "Auto Exposure ROI X start",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1280 - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_roi_y_start = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_ROI_Y_START,
+ .name = "Auto Exposure ROI Y start",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 960 - 1,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_roi_x_size = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_ROI_X_SIZE,
+ .name = "Auto Exposure ROI width",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1280,
+ .step = 1,
+ .def = 1280,
+};
+
+static const struct v4l2_ctrl_config mt9m021_ctrl_ae_roi_y_size = {
+ .ops = &mt9m021_ctrl_ops,
+ .id = V4L2_CID_MT9M021_AE_ROI_Y_SIZE,
+ .name = "Auto Exposure ROI height",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 960,
+ .step = 1,
+ .def = 960,
+};
+
+static int mt9m021_initialize_controls(struct v4l2_subdev *sd)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_ctrl_handler *hdl = &mt9m021->ctrl_handler;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 16);
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init ctrl handler\n");
+ goto einit;
+ }
+
+ mt9m021->trigger = v4l2_ctrl_new_custom(hdl, &mt9m021_ctrl_trigger, NULL);
+ mt9m021->auto_aag = v4l2_ctrl_new_custom(hdl, &mt9m021_ctrl_aag, NULL);
+ mt9m021->auto_adg = v4l2_ctrl_new_custom(hdl, &mt9m021_ctrl_adg, NULL);
+ mt9m021->exposure_mode = v4l2_ctrl_new_std_menu(hdl, &mt9m021_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ 3, ~3, V4L2_EXPOSURE_AUTO);
+
+ mt9m021->exposure_abs = v4l2_ctrl_new_std(hdl,
+ &mt9m021_ctrl_ops,
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 0, 1000000, 1, 20000);
+
+ mt9m021->gain = v4l2_ctrl_new_std(hdl,
+ &mt9m021_ctrl_ops,
+ V4L2_CID_GAIN,
+ 0, 255, 1, 32);
+
+ mt9m021->ana_gain = v4l2_ctrl_new_std(hdl,
+ &mt9m021_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ 0, 3, 1, 0);
+
+ mt9m021->exposure_ae_max = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_max,
+ NULL);
+
+ mt9m021->exposure_ae_min = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_min,
+ NULL);
+
+ mt9m021->ae_ag_exposure_hi = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_ag_exposure_hi,
+ NULL);
+
+ mt9m021->ae_ag_exposure_lo = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_ag_exposure_lo,
+ NULL);
+
+ mt9m021->ae_tgt_luma = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_tgt_luma,
+ NULL);
+
+ /* Auto-Exposure Region Of Interest */
+ mt9m021->ae_roi_x_start = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_roi_x_start,
+ NULL);
+
+ mt9m021->ae_roi_y_start = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_roi_y_start,
+ NULL);
+
+ mt9m021->ae_roi_x_size = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_roi_x_size,
+ NULL);
+
+ mt9m021->ae_roi_y_size = v4l2_ctrl_new_custom(hdl,
+ &mt9m021_ctrl_ae_roi_y_size,
+ NULL);
+
+ if (hdl->error) {
+ v4l2_err(sd, "failed to add new controls\n");
+ ret = hdl->error;
+ goto ectrl;
+ }
+
+ sd->ctrl_handler = hdl;
+
+ return 0;
+
+ectrl:
+ v4l2_ctrl_handler_free(hdl);
+einit:
+ return ret;
+}
+
+static int mt9m021_detect_chip(struct v4l2_subdev *sd)
+{
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct mt9m021_platform_data *pdata = mt9m021->pdata;
+ int ret = 0;
+ u16 id = 0;
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ mt9m021_read(sd, MT9M021_CHIP_VERSION, &id);
+
+ if (pdata->set_power)
+ pdata->set_power(0);
+
+ if (id != MT9M021_CHIP_VERSION_VALUE) {
+ v4l2_err(sd, "Error Chip ID = 0x%04x instead of 0x%04x\n",
+ id, MT9M021_CHIP_VERSION_VALUE);
+ return -ENODEV;
+ }
+
+ v4l2_info(sd, "Found " DRIVER_NAME " chip\n");
+
+ return 0;
+}
+
+static int mt9m021_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mt9m021 *mt9m021;
+ struct mt9m021_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c not available\n");
+ return -ENODEV;
+ }
+
+ mt9m021 = kzalloc(sizeof(*mt9m021), GFP_KERNEL);
+ if (!mt9m021) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ mt9m021->pdata = pdata;
+
+ sd = &mt9m021->sd;
+ v4l2_i2c_subdev_init(sd, client, &mt9m021_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ mt9m021->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &mt9m021->pad, 0);
+
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto error_media_entity_init;
+ }
+
+ /* Set default configuration: Y10 960p30 at center */
+ mt9m021->crop.width = 1280;
+ mt9m021->crop.height = 960;
+ mt9m021->crop.left = 0;
+ mt9m021->crop.top = 0;
+ mt9m021->format.width = 1280;
+ mt9m021->format.height = 960;
+ mt9m021->format.code = V4L2_MBUS_FMT_Y10_1X10;
+ mt9m021->frame_interval.numerator = 1;
+ mt9m021->frame_interval.denominator = 30;
+
+ /* Clocks */
+ mt9m021->pll.ext_clock = pdata->ext_clock;
+ mt9m021->pll.pix_clock = pdata->pix_clock;
+
+ /* Check if the chip is present */
+ ret = mt9m021_detect_chip(sd);
+ if (ret < 0)
+ goto error_detect;
+
+ ret = mt9m021_initialize_controls(sd);
+ if (ret < 0)
+ goto error_ctrl;
+
+ return 0;
+
+error_ctrl:
+error_detect:
+error_media_entity_init:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(mt9m021);
+
+ return ret;
+}
+
+static int mt9m021_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mt9m021 *mt9m021 = to_mt9m021(sd);
+ struct v4l2_ctrl_handler *hdl = &mt9m021->ctrl_handler;
+
+ v4l2_ctrl_handler_free(hdl);
+
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(mt9m021);
+
+ return 0;
+}
+
+static const struct i2c_device_id mt9m021_id[] = {
+ {DRIVER_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, mt9m021_id);
+
+static struct i2c_driver mt9m021_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = mt9m021_probe,
+ .remove = mt9m021_remove,
+ .id_table = mt9m021_id,
+};
+
+module_i2c_driver(mt9m021_driver);
diff --git a/drivers/media/video/mt9m021_reg.h b/drivers/media/video/mt9m021_reg.h
new file mode 100644
index 00000000000000..268e10e2f50ed9
--- /dev/null
+++ b/drivers/media/video/mt9m021_reg.h
@@ -0,0 +1,35 @@
+/*
+ * mt9m021_reg - Aptina CMOS Digital Image Sensor registers
+ *
+ * Author : Maxime JOURDAN <maxime.jourdan@parrot.com>
+ *
+ * Date : 12/03/2015
+ *
+ */
+#ifndef __MT9M021_REG_H__
+#define __MT9M021_REG_H__
+
+union ae_ctrl_reg {
+ struct {
+ u8 ae_enable : 1;
+ u8 auto_ag_en : 1;
+ u8 reserved : 2;
+ u8 auto_dg_en : 1;
+ u8 min_ana_gain : 2;
+ };
+
+ u16 _register;
+};
+
+union dig_test_reg {
+ struct {
+ u8 reserved : 4;
+ u8 col_gain : 2;
+ u8 reserved_2 : 1;
+ u8 mono_chrome : 1;
+ };
+
+ u16 _register;
+};
+
+#endif /* __MT9M021_REG_H__ */
diff --git a/drivers/media/video/mt9v117.c b/drivers/media/video/mt9v117.c
new file mode 100644
index 00000000000000..35e807ad4d9516
--- /dev/null
+++ b/drivers/media/video/mt9v117.c
@@ -0,0 +1,896 @@
+/* mt9v117.c
+ *
+ * Driver for aptina mt9v117
+ *
+ * Author : Julien BERAUD <julien.beraud@parrot.com>
+ *
+ * Date : 23/04/2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/gcd.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/mt9v117.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include "mt9v117_patches.h"
+
+MODULE_AUTHOR("Julien BERAUD <julien.beraud@parrot.com>");
+MODULE_DESCRIPTION("kernel driver for mt9v117");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+#define MT9V117_frameperiod_ns_DEF 36353853 //10000·275074 ~= 27Fps
+#define MIN_HORIZONTAL_BLANKING 210
+#define MIN_VERTICAL_BLANKING 31
+
+/* mt9v117 data structure */
+struct mt9v117 {
+ struct v4l2_subdev sd;
+ struct i2c_client *i2c_client;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *gains[4];
+
+ int input;
+ struct mt9v117_platform_data *pdata;
+ uint32_t frameperiod_ns; /* user desired frame rate */
+ uint32_t real_frameperiod_ns; /* real framerate */
+ uint16_t line_length;
+ uint16_t frame_length;
+
+ int isStreaming;
+ bool timings_uptodate;
+ int (*set_power)(int on);
+};
+
+static inline struct mt9v117 *to_mt9v117(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mt9v117, sd);
+}
+
+/* write a register */
+static int mt9v117_write8(struct mt9v117 *mt9v117, u16 reg, u8 val)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u8 val;
+ } __packed buf;
+ int ret;
+
+ reg = swab16(reg);
+
+ buf.reg = reg;
+ buf.val = val;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* read a register */
+static int mt9v117_read16(struct mt9v117 *mt9v117, u16 reg, u16 *val)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = (u8 *)&reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 2,
+ .buf = (u8 *)val,
+ },
+ };
+
+ reg = swab16(reg);
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = swab16(*val);
+
+ return 0;
+}
+
+/* write a register */
+static int mt9v117_write16(struct mt9v117 *mt9v117, u16 reg, u16 val)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u16 val;
+ } __packed buf;
+ int ret;
+
+ buf.reg = swab16(reg);
+ buf.val = swab16(val);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* write a register */
+static int mt9v117_write32(struct mt9v117 *mt9v117, u16 reg, u32 val)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ struct i2c_msg msg;
+ struct {
+ u16 reg;
+ u32 val;
+ } __packed buf;
+ int ret;
+
+ buf.reg = swab16(reg);
+ buf.val = swab32(val);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 6;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to write register 0x%04x!\n",
+ reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt9v117_write_var8(struct mt9v117 *mt9v117, u16 var,
+ u16 offset, u16 value)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ u16 addr = 0x8000 | (var << 10) | offset;
+
+ if(var > 0x200) {
+ dev_err(&client->dev, "write var offset too high\n");
+ return -EINVAL;
+ } else
+ return mt9v117_write8(mt9v117, addr, value);
+}
+
+static int mt9v117_read_var16(struct mt9v117 *mt9v117, u16 var,
+ u16 offset, u16 *value)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ u16 addr = 0x8000 | (var << 10) | offset;
+
+ if(var > 0x200) {
+ dev_err(&client->dev, "read var offset too high\n");
+ return -EINVAL;
+ } else
+ return mt9v117_read16(mt9v117, addr, value);
+}
+
+static int mt9v117_write_var16(struct mt9v117 *mt9v117, u16 var,
+ u16 offset, u16 value)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ u16 addr = 0x8000 | (var << 10) | offset;
+
+ if(var > 0x200) {
+ dev_err(&client->dev, "write var offset too high\n");
+ return -EINVAL;
+ } else
+ return mt9v117_write16(mt9v117, addr, value);
+}
+
+static int mt9v117_write_var32(struct mt9v117 *mt9v117, u16 var,
+ u16 offset, u32 value)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ u16 addr = 0x8000 | (var << 10) | offset;
+
+ if(var > 0x200) {
+ dev_err(&client->dev, "write var offset too high\n");
+ return -EINVAL;
+ } else
+ return mt9v117_write32(mt9v117, addr, value);
+}
+
+static int mt9v117_write_patch(struct mt9v117 *mt9v117,
+ u8 *patch, u16 patch_len)
+{
+ struct i2c_client *client = mt9v117->i2c_client;
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = patch_len;
+ msg.buf = patch;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to write patch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* setup functions */
+static int mt9v117_config_change(struct mt9v117 *mt9v117)
+{
+ u16 value;
+
+ mt9v117_write_var8(mt9v117, 23, 0x0000, 0x28);//SYSMGR_NEXT_STATE
+ mt9v117_write16(mt9v117, 0x0040, 0x8002);//COMMAND_REGISTER
+ msleep(500);
+ mt9v117_read16(mt9v117, 0x0040, &value);
+
+ if ((value & 0x0002) != 0)
+ v4l2_err(&mt9v117->sd,"command not complete...\n");
+
+ msleep(500);
+ mt9v117_read16(mt9v117, 0x0040, &value);
+
+ if ((value & 0x8000) == 0) {
+ v4l2_err(&mt9v117->sd,"config change failed...%x\n",value);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static int mt9v117_soft_reset(struct mt9v117 *mt9v117)
+{
+ if (mt9v117_write16(mt9v117, 0x001A, 0x0001) < 0)
+ return -EPIPE;
+
+ if (mt9v117_write16(mt9v117, 0x001A, 0x0000) < 0)
+ return -EPIPE;
+
+ msleep (50);
+
+ return 0;
+}
+
+static int mt9v117_patch1(struct mt9v117 *mt9v117)
+{
+ u16 value;
+
+ // correct image aberration due to low illumination conditions
+ // Errata item 2
+ mt9v117_write16(mt9v117, 0x301A, 0x10D0);
+ mt9v117_write16(mt9v117, 0x31C0, 0x1404);
+ mt9v117_write16(mt9v117, 0x3ED8, 0x879C);
+ mt9v117_write16(mt9v117, 0x3042, 0x20E1);
+ mt9v117_write16(mt9v117, 0x30D4, 0x8020);
+ mt9v117_write16(mt9v117, 0x30C0, 0x0026);
+ mt9v117_write16(mt9v117, 0x301A, 0x10D4);
+
+ // Errata item 6
+ mt9v117_write_var16(mt9v117, 10, 0x0002, 0x00D3);
+ mt9v117_write_var16(mt9v117, 18, 0x0078, 0x00A0);
+ mt9v117_write_var16(mt9v117, 18, 0x0076, 0x0140);
+
+ // Errata item 8
+ mt9v117_write_var16(mt9v117, 15, 0x0004, 0x00FC);
+ mt9v117_write_var16(mt9v117, 15, 0x0038, 0x007F);
+ mt9v117_write_var16(mt9v117, 15, 0x003A, 0x007F);
+ mt9v117_write_var16(mt9v117, 15, 0x003C, 0x007F);
+ mt9v117_write_var16(mt9v117, 15, 0x0004, 0x00F4);
+
+ // Load Patch 0403
+ mt9v117_write16(mt9v117, 0x0982, 0x0001); //ACCESS_CTL_STAT
+ mt9v117_write16(mt9v117, 0x098A, 0x7000); // PHYSICAL_ADDRESS_ACCESS
+
+ mt9v117_write_patch(mt9v117,patch_line1, sizeof(patch_line1));
+ mt9v117_write_patch(mt9v117,patch_line2, sizeof(patch_line2));
+ mt9v117_write_patch(mt9v117,patch_line3, sizeof(patch_line3));
+ mt9v117_write_patch(mt9v117,patch_line4, sizeof(patch_line4));
+ mt9v117_write_patch(mt9v117,patch_line5, sizeof(patch_line5));
+ mt9v117_write_patch(mt9v117,patch_line6, sizeof(patch_line6));
+ mt9v117_write_patch(mt9v117,patch_line7, sizeof(patch_line7));
+ mt9v117_write_patch(mt9v117,patch_line8, sizeof(patch_line8));
+ mt9v117_write_patch(mt9v117,patch_line9, sizeof(patch_line9));
+ mt9v117_write_patch(mt9v117,patch_line10, sizeof(patch_line10));
+ mt9v117_write_patch(mt9v117,patch_line11, sizeof(patch_line11));
+ mt9v117_write_patch(mt9v117,patch_line12, sizeof(patch_line12));
+ mt9v117_write_patch(mt9v117,patch_line13, sizeof(patch_line13));
+ mt9v117_write16(mt9v117, 0x098E, 0x0000); //LOGICAL_ADDRESS_ACCESS
+
+ // apply patch 0403
+ mt9v117_write_var16(mt9v117, 0x18, 0x0000, 0x05d8); //PATCHLDR_LOADER_ADDRESS
+ mt9v117_write_var16(mt9v117, 0x18, 0x0002, 0x0403); // PATCHLDR_PATCH_ID
+ mt9v117_write_var32(mt9v117, 0x18, 0x0004, 0x00430104); // PATCHLDR_FIRMWARE_ID doute
+
+
+ mt9v117_write16(mt9v117, 0x0040, 0x8001); //apply patch
+ msleep(500);
+ mt9v117_read16(mt9v117, 0x0040, &value);
+ if ((value & 0x8001) != 0x8000) {
+ v4l2_err(&mt9v117->sd,"mt9v117 patch apply failed...%x\n",value);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+
+static int mt9v117_set_basic_settings(struct mt9v117 *mt9v117)
+{
+ u32 ths = 50000;
+ u16 value;
+
+ //awb_pixel_threshold_count
+ mt9v117_write_var32(mt9v117,11,0x0040,ths);
+
+ // set ae_rule_algo to average
+ mt9v117_write_var16(mt9v117,9,0x4,0x0);
+
+ // pad slew more agressive
+ mt9v117_read16(mt9v117, 0x0030, &value);
+ value |= 0x0001; // data slew rate = 1 (default value 0)
+ value |= 0x0600; // clock slew rate = 6 (default value 4)
+ mt9v117_write16(mt9v117, 0x0030, value);
+
+ return 0;
+}
+
+static int mt9v117_update_timings(struct mt9v117 *mt9v117)
+{
+ struct v4l2_mbus_framefmt *fmt = &mt9v117->format;
+
+ // find the best couple Line Length&frame_length to reach desired framerate
+ uint32_t lineLenght, frameLength;
+ uint32_t bestLineLenght = 1;
+ uint32_t bestFrameLength = 1;
+ int64_t error, bestError = -1;
+ uint64_t tmp64;
+ uint32_t frameperiod_ns;
+ uint32_t bestFramePeriod = 0;
+ uint32_t pixelClock = mt9v117->pdata->ext_clk_freq_hz / 2;
+
+ for (lineLenght = 640 + MIN_HORIZONTAL_BLANKING; lineLenght < 2048; lineLenght++) {
+ for (frameLength = fmt->height + MIN_VERTICAL_BLANKING; frameLength < 600; frameLength++) {
+ // compute framerate
+ tmp64 = (u64)lineLenght * frameLength * NSEC_PER_SEC;
+ frameperiod_ns = div_u64((u64)tmp64, pixelClock);
+ // compute error
+ error = abs64((u64)frameperiod_ns - mt9v117->frameperiod_ns);
+ if (((error < bestError) || (bestError < 0))) {
+ bestError = error;
+ bestLineLenght = lineLenght;
+ bestFrameLength = frameLength;
+ bestFramePeriod = frameperiod_ns;
+ }
+ if (frameperiod_ns > mt9v117->frameperiod_ns)
+ break;
+ }
+ }
+
+ /* check a solution has been found */
+ if (bestError < 0) {
+ v4l2_err(&mt9v117->sd, "error setting framerate\n");
+ return -1;
+ }
+
+ mt9v117->real_frameperiod_ns = bestFramePeriod;
+ mt9v117->line_length = bestLineLenght;
+ mt9v117->frame_length = bestFrameLength;
+
+ v4l2_info(&mt9v117->sd, "pixelClock %u\n", mt9v117->pdata->ext_clk_freq_hz);
+ v4l2_info(&mt9v117->sd, "setting framerate: frame period %u->%u, line length %u, frame length %u\n", mt9v117->frameperiod_ns, bestFramePeriod, bestLineLenght, bestFrameLength);
+
+ mt9v117->timings_uptodate = 1;
+
+ return 0;
+}
+
+static int mt9v117_blanking_write(struct mt9v117 *mt9v117)
+{
+ uint32_t pixelClock = mt9v117->pdata->ext_clk_freq_hz / 2;
+ uint64_t tmp64;
+ uint32_t flicker_periods;
+
+ mt9v117_write_var16(mt9v117, 18, 0x10, mt9v117->line_length);
+ mt9v117_write_var16(mt9v117, 18, 0xE, mt9v117->frame_length);
+
+ // configure FD zone
+ mt9v117_write_var16(mt9v117, 18, 0x16,
+ pixelClock / mt9v117->line_length / 120); //FD Period 60Hz
+ mt9v117_write_var16(mt9v117, 18, 0x18,
+ pixelClock / mt9v117->line_length / 100); //FD Period 50Hz
+
+ tmp64 = ((u64)mt9v117->real_frameperiod_ns * 120);
+ flicker_periods = div_u64(tmp64, NSEC_PER_SEC);
+ mt9v117_write_var16(mt9v117, 18, 0x1A,
+ flicker_periods); //Max FD Zone 60Hz
+ mt9v117_write_var16(mt9v117, 18, 0x1E,
+ flicker_periods); //Target FD Zone 60Hz
+
+ tmp64 = ((u64)mt9v117->real_frameperiod_ns * 100);
+ flicker_periods = div_u64(tmp64, NSEC_PER_SEC);
+ mt9v117_write_var16(mt9v117, 18, 0x1C,
+ flicker_periods); //Max FD Zone 50Hz
+ mt9v117_write_var16(mt9v117, 18, 0x20,
+ flicker_periods); //Target FD Zone 50Hz
+
+ return 0;
+}
+
+static int mt9v117_set_VGA(struct mt9v117 *mt9v117)
+{
+ // QVGA, NO Y-skipping, framerate up to 35 FPS
+ mt9v117_write_var16(mt9v117, 18, 0x0, 0x000C); //Row Start = 12
+ mt9v117_write_var16(mt9v117, 18, 0x2, 0x0010); //Column Start = 16
+ mt9v117_write_var16(mt9v117, 18, 0x4, 0x01F3); //Row End = 499
+ mt9v117_write_var16(mt9v117, 18, 0x6, 0x0297); //Column End = 663
+ mt9v117_write_var16(mt9v117, 18, 0x8, 0x0111); //Row Speed = 273
+ mt9v117_write_var16(mt9v117, 18, 0xA, 0x00A4); //Sensor_fine_IT_min = 164
+ mt9v117_write_var16(mt9v117, 18, 0xC, 0x02FA); //Sensor_fine_IT_max = 762
+ mt9v117_write_var16(mt9v117, 18, 0x12, 0x0031);//Sensor_fine_correction = 49
+ mt9v117_write_var16(mt9v117, 18, 0x14, 0x01E3);//Cpipe Last Row = 483
+ mt9v117_write_var16(mt9v117, 18, 0x28, 0x0000);//Read Mode = 3 - Mirror + Flip
+ mt9v117_write_var16(mt9v117, 18, 0x4C, 0x0280);//Crop Width = 640
+ mt9v117_write_var16(mt9v117, 18, 0x4E, 0x01E0);//Crop Height = 480
+ mt9v117_write_var8(mt9v117, 18, 0x50, 0x03); //Crop Mode = 3
+ mt9v117_write_var16(mt9v117, 18, 0x54, 0x0280);//Output Width = 640
+ mt9v117_write_var16(mt9v117, 18, 0x56, 0x01E0);//Output Height = 480
+ mt9v117_write_var16(mt9v117, 18, 0xEC, 0x0000);//AWB Window Xstart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xEE, 0x0000);//AWB Window Ystart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xF0, 0x027F);//AWB Window Xend = 639
+ mt9v117_write_var16(mt9v117, 18, 0xF2, 0x01DF);//AWB Window Yend = 479
+ mt9v117_write_var16(mt9v117, 18, 0xF4, 0x0002);//AE Window Xstart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF6, 0x0002);//AE Window Ystart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF8, 0x007F);//AE Window Xend = 127
+ mt9v117_write_var16(mt9v117, 18, 0xFA, 0x005F);//AE Window Yend = 95
+
+ return 0;
+}
+
+static int mt9v117_set_QVGA(struct mt9v117 *mt9v117)
+{
+ // QVGA, Y-skipping, framerate up to 60 FPS
+ mt9v117_write_var16(mt9v117, 18, 0x0, 0x0008); //Row Start = 8
+ mt9v117_write_var16(mt9v117, 18, 0x2, 0x0010); //Column Start = 16
+ mt9v117_write_var16(mt9v117, 18, 0x4, 0x01F5); //Row End = 501
+ mt9v117_write_var16(mt9v117, 18, 0x6, 0x0297); //Column End = 663
+ mt9v117_write_var16(mt9v117, 18, 0x8, 0x0111); //Row Speed = 273
+ mt9v117_write_var16(mt9v117, 18, 0xA, 0x00A4); //Sensor_fine_IT_min = 164
+ mt9v117_write_var16(mt9v117, 18, 0xC, 0x02FA); //Sensor_fine_IT_max = 762
+ mt9v117_write_var16(mt9v117, 18, 0x12, 0x0031); //Sensor_fine_correction = 49
+ mt9v117_write_var16(mt9v117, 18, 0x14, 0x00F3); //Cpipe Last Row = 243
+ mt9v117_write_var16(mt9v117, 18, 0x28, 0x0007); //Read Mode = 6 - skipping + Mirror + Flip
+ mt9v117_write_var16(mt9v117, 18, 0x4C, 0x0280); //Crop Width = 640
+ mt9v117_write_var16(mt9v117, 18, 0x4E, 0x00F0); //Crop Height = 240
+ mt9v117_write_var8(mt9v117, 18, 0x50, 0x03); //Crop Mode = 3
+ mt9v117_write_var16(mt9v117, 18, 0x54, 0x0140); //Output Width = 320
+ mt9v117_write_var16(mt9v117, 18, 0x56, 0x00F0); //Output Height = 240
+ mt9v117_write_var16(mt9v117, 18, 0xEC, 0x0000); //AWB Window Xstart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xEE, 0x0000); //AWB Window Ystart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xF0, 0x013F); //AWB Window Xend = 319
+ mt9v117_write_var16(mt9v117, 18, 0xF2, 0x00EF); //AWB Window Yend = 239
+ mt9v117_write_var16(mt9v117, 18, 0xF4, 0x0002); //AE Window Xstart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF6, 0x0002); //AE Window Ystart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF8, 0x003F); //AE Window Xend = 63
+ mt9v117_write_var16(mt9v117, 18, 0xFA, 0x002F); //AE Window Yend = 47
+
+ return 0;
+}
+
+static int mt9v117_set_QCIF(struct mt9v117 *mt9v117)
+{
+ // QCIF, Y-skipping, framerate up to 60 FPS
+ mt9v117_write_var16(mt9v117, 18, 0x0, 0x0008);//Row Start = 8
+ mt9v117_write_var16(mt9v117, 18, 0x2, 0x0010);//Column Start = 16
+ mt9v117_write_var16(mt9v117, 18, 0x4, 0x01F5);//Row End = 501
+ mt9v117_write_var16(mt9v117, 18, 0x6, 0x0297);//Column End = 663
+ mt9v117_write_var16(mt9v117, 18, 0x8, 0x0111);//Row Speed = 273
+ mt9v117_write_var16(mt9v117, 18, 0xA, 0x00A4);//Sensor_fine_IT_min = 164
+ mt9v117_write_var16(mt9v117, 18, 0xC, 0x02FA);//Sensor_fine_IT_max = 762
+ mt9v117_write_var16(mt9v117, 18, 0x12, 0x0031); //Sensor_fine_correction = 49
+ mt9v117_write_var16(mt9v117, 18, 0x14, 0x00F3); //Cpipe Last Row = 243
+ mt9v117_write_var16(mt9v117, 18, 0x28, 0x0007); //Read Mode = 7 Skipping + Mirror + Flip
+ mt9v117_write_var16(mt9v117, 18, 0x4C, 0x0280); //Crop Width = 640
+ mt9v117_write_var16(mt9v117, 18, 0x4E, 0x00F0); //Crop Height = 240
+ mt9v117_write_var8(mt9v117, 18, 0x50, 0x03); //Crop Mode = 3
+ mt9v117_write_var16(mt9v117, 18, 0x54, 0x00A0); //Output Width = 160
+ mt9v117_write_var16(mt9v117, 18, 0x56, 0x0078); //Output Height = 120
+ mt9v117_write_var16(mt9v117, 18, 0xEC, 0x0000); //AWB Window Xstart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xEE, 0x0000); //AWB Window Ystart = 0
+ mt9v117_write_var16(mt9v117, 18, 0xF0, 0x00AF); //AWB Window Xend = 175
+ mt9v117_write_var16(mt9v117, 18, 0xF2, 0x008F); //AWB Window Yend = 143
+ mt9v117_write_var16(mt9v117, 18, 0xF4, 0x0002); //AE Window Xstart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF6, 0x0002); //AE Window Ystart = 2
+ mt9v117_write_var16(mt9v117, 18, 0xF8, 0x0022); //AE Window Xend = 34
+ mt9v117_write_var16(mt9v117, 18, 0xFA, 0x001B); //AE Window Yend = 27
+
+ return 0;
+}
+
+static int mt9v117_set_itu_bt656(struct mt9v117 *mt9v117, int itu_bt656)
+{
+ u16 value;
+
+ mt9v117_read_var16(mt9v117,18,0x0058, &value);
+
+ if (itu_bt656)
+ value |= (1<<3);
+ else
+ value &= ~(1<<3);
+
+ mt9v117_write_var16(mt9v117,18,0x0058,value);
+
+ return 0;
+}
+
+/* v4l2 ops */
+static struct v4l2_mbus_framefmt *
+__mt9v117_get_fmt(struct mt9v117 *mt9v117,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if(pad)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &mt9v117->format;
+}
+
+static int mt9v117_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __mt9v117_get_fmt(mt9v117, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int mt9v117_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __mt9v117_get_fmt(mt9v117, fh, fmt->pad, fmt->which);
+ if(!format)
+ return -EINVAL;
+
+ if((fmt->format.width == 640 && fmt->format.height == 480) ||
+ (fmt->format.width == 320 && fmt->format.height == 240) ||
+ (fmt->format.width == 160 && fmt->format.height == 120)) {
+ format->width = fmt->format.width;
+ format->height = fmt->format.height;
+ fmt->format = *format;
+ } else
+ return -EINVAL;
+
+ mt9v117->timings_uptodate = 0;
+
+ return 0;
+}
+
+static int mt9v117_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = mt9v117->format.code;
+
+ return 0;
+}
+
+static int mt9v117_apply_config(struct mt9v117 *mt9v117)
+{
+ struct v4l2_mbus_framefmt *format = &mt9v117->format;
+ int ret = 0;
+
+ ret = mt9v117_soft_reset(mt9v117);
+ mt9v117_patch1(mt9v117);
+ mt9v117_set_basic_settings(mt9v117);
+ ret = mt9v117_update_timings(mt9v117);
+ if (ret < 0) {
+ v4l2_err(&mt9v117->sd, "Unable to calculate Video Timing\n");
+ return ret;
+ }
+
+ switch (format->height) {
+ case 480:
+ ret = mt9v117_set_VGA(mt9v117);
+ break;
+ case 240:
+ ret = mt9v117_set_QVGA(mt9v117);
+ break;
+ case 120:
+ ret = mt9v117_set_QCIF(mt9v117);
+ break;
+ default:
+ v4l2_err(&mt9v117->sd, "resolution not supported\n");
+ return -EINVAL;
+ }
+
+ if(ret < 0) {
+ v4l2_err(&mt9v117->sd, "error setting resolution\n");
+ return ret;
+ }
+
+ mt9v117_blanking_write(mt9v117);
+
+ mt9v117_set_itu_bt656(mt9v117,1);
+
+ // apply settings
+ ret = mt9v117_config_change(mt9v117);
+
+ return ret;
+}
+
+static int mt9v117_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+ int ret = 0;
+
+ if(enable) {
+ if(mt9v117->set_power)
+ mt9v117->set_power(MT9V117_POWER_ON);
+ msleep(500); /* time has to be confirmed */
+ ret = mt9v117_apply_config(mt9v117);
+ if (ret < 0) {
+ v4l2_err(&mt9v117->sd, "failed to apply config\n");
+ goto power_off;
+ }
+
+ mt9v117->isStreaming = true;
+
+ } else {
+ mt9v117->isStreaming = false;
+ mt9v117->set_power(MT9V117_POWER_OFF);
+ }
+
+ return 0;
+
+power_off:
+ mt9v117->isStreaming = false;
+ mt9v117->set_power(MT9V117_POWER_OFF);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops mt9v117_core_ops = {
+};
+
+static int mt9v117_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+ unsigned g;
+
+ memset(fi, 0, sizeof(*fi));
+ fi->interval.denominator = NSEC_PER_SEC;
+ fi->interval.numerator = mt9v117->frameperiod_ns;
+
+ /* I could return there but I prefer to reduce the fraction first */
+ g = gcd(fi->interval.numerator, fi->interval.denominator);
+
+ fi->interval.numerator /= g;
+ fi->interval.denominator /= g;
+
+ return 0;
+}
+
+static int mt9v117_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+ uint64_t tmp64;
+ int ret;
+
+ /* Protect against division by 0. */
+ if (fi->interval.denominator == 0)
+ fi->interval.denominator = 30;
+
+ if (fi->interval.numerator == 0)
+ fi->interval.numerator = 1;
+
+ tmp64 = (u64)fi->interval.numerator * NSEC_PER_SEC;
+ mt9v117->frameperiod_ns = div_u64((u64)tmp64, fi->interval.denominator);
+
+ mt9v117->timings_uptodate = 0;
+ if (!mt9v117->isStreaming) {
+ ret = mt9v117_update_timings(mt9v117);
+ if (ret < 0) {
+ v4l2_err(&mt9v117->sd,
+ "Unable to calculate Video Timing\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9v117_video_ops = {
+ .s_stream = mt9v117_s_stream,
+ .g_frame_interval = mt9v117_g_frame_interval,
+ .s_frame_interval = mt9v117_s_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mt9v117_pad_ops = {
+ .get_fmt = mt9v117_get_fmt,
+ .set_fmt = mt9v117_set_fmt,
+ .enum_mbus_code = mt9v117_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops mt9v117_ops = {
+ .core = &mt9v117_core_ops,
+ .video = &mt9v117_video_ops,
+ .pad = &mt9v117_pad_ops,
+};
+
+static int mt9v117_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mt9v117 *mt9v117;
+ struct mt9v117_platform_data *pdata = NULL;
+ struct v4l2_subdev *sd;
+ int ret = -ENODEV;
+ struct v4l2_mbus_framefmt *format;
+ u16 model_id;
+ pdata = client->dev.platform_data;
+
+ if(!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+
+ dev_info(&client->dev, "detecting mt9v117 client on address 0x%x\n",
+ client->addr << 1);
+
+ mt9v117 = kzalloc(sizeof(*mt9v117), GFP_KERNEL);
+ if(!mt9v117) {
+ v4l_err(client, "alloc failed for data structure\n");
+ ret = -ENOMEM;
+ goto enomem;
+ }
+
+ mt9v117->set_power = pdata->set_power;
+ mt9v117->pdata = client->dev.platform_data;
+ mt9v117->frameperiod_ns = MT9V117_frameperiod_ns_DEF;
+ mt9v117->i2c_client = client;
+
+ format = &mt9v117->format;
+ format->width = 640;
+ format->height = 480;
+ format->field = V4L2_FIELD_NONE;
+ format->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ sd = &mt9v117->sd;
+ v4l2_i2c_subdev_init(sd, client, &mt9v117_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ mt9v117->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &mt9v117->pad, 0);
+ if(ret < 0) {
+ v4l_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ /*see if chip is present */
+ if(pdata->set_power) {
+ pdata->set_power(MT9V117_POWER_ON);
+ msleep(500);
+ }
+
+ mt9v117_read16(mt9v117, 0x3000, &model_id);
+
+ if(model_id != 0x2282) {
+ v4l2_err(sd, "Error Model ID = 0x%04X\n", model_id);
+ ret = -ENODEV;
+ goto echipident;
+ }
+
+ if(pdata->set_power)
+ pdata->set_power(MT9V117_POWER_OFF);
+
+ ret = mt9v117_update_timings(mt9v117);
+ if (ret < 0) {
+ v4l2_err(&mt9v117->sd, "Unable to calculate Video Timing\n");
+ goto echipident;
+ }
+
+ return ret;
+echipident:
+ if(pdata->set_power)
+ pdata->set_power(MT9V117_POWER_OFF);
+ media_entity_cleanup(&sd->entity);
+emedia:
+ kfree(mt9v117);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int mt9v117_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct mt9v117 *mt9v117 = to_mt9v117(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ kfree(mt9v117);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id mt9v117_id[] = {
+ { "mt9v117", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, mt9v117_id);
+
+static struct i2c_driver mt9v117_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mt9v117",
+ },
+ .probe = mt9v117_probe,
+ .remove = mt9v117_remove,
+ .id_table = mt9v117_id,
+};
+
+module_i2c_driver(mt9v117_driver);
diff --git a/drivers/media/video/mt9v117_patches.h b/drivers/media/video/mt9v117_patches.h
new file mode 100644
index 00000000000000..fae34885a8a04a
--- /dev/null
+++ b/drivers/media/video/mt9v117_patches.h
@@ -0,0 +1,85 @@
+/*
+ * Patches data for mt9v117
+ *
+ *
+ *
+*/
+#ifndef __MT9V117_PATCHES_H__
+#define __MT9V117_PATCHES_H__
+
+ u8 patch_line1[] = { 0xf0, 0x00, 0x72, 0xcf, 0xff, 0x00, 0x3e, 0xd0, 0x92,
+ 0x00, 0x71, 0xcf, 0xff, 0xff, 0xf2, 0x18, 0xb1, 0x10, 0x92, 0x05, 0xb1,
+ 0x11, 0x92, 0x04, 0xb1, 0x12, 0x70, 0xcf, 0xff, 0x00, 0x30, 0xc0, 0x90,
+ 0x00, 0x7f, 0xe0, 0xb1, 0x13, 0x70, 0xcf, 0xff, 0xff, 0xe7, 0x1c, 0x88,
+ 0x36, 0x09, 0x0f, 0x00, 0xb3};
+
+ u8 patch_line2[] = { 0xf0, 0x30, 0x69, 0x13, 0xe1, 0x80, 0xd8, 0x08, 0x20,
+ 0xca, 0x03, 0x22, 0x71, 0xcf, 0xff, 0xff, 0xe5, 0x68, 0x91, 0x35, 0x22,
+ 0x0a, 0x1f, 0x80, 0xff, 0xff, 0xf2, 0x18, 0x29, 0x05, 0x00, 0x3e, 0x12,
+ 0x22, 0x11, 0x01, 0x21, 0x04, 0x0f, 0x81, 0x00, 0x00, 0xff, 0xf0, 0x21,
+ 0x8c, 0xf0, 0x10, 0x1a, 0x22};
+
+ u8 patch_line3[] = { 0xf0, 0x60, 0x10, 0x44, 0x12, 0x20, 0x11, 0x02, 0xf7,
+ 0x87, 0x22, 0x4f, 0x03, 0x83, 0x1a, 0x20, 0x10, 0xc4, 0xf0, 0x09, 0xba,
+ 0xae, 0x7b, 0x50, 0x1a, 0x20, 0x10, 0x84, 0x21, 0x45, 0x01, 0xc1, 0x1a,
+ 0x22, 0x10, 0x44, 0x70, 0xcf, 0xff, 0x00, 0x3e, 0xd0, 0xb0, 0x60, 0xb0,
+ 0x25, 0x7e, 0xe0, 0x78, 0xe0};
+
+ u8 patch_line4[] = { 0xf0, 0x90, 0x71, 0xcf, 0xff, 0xff, 0xf2, 0x18, 0x91,
+ 0x12, 0x72, 0xcf, 0xff, 0xff, 0xe7, 0x1c, 0x8a, 0x57, 0x20, 0x04, 0x0f,
+ 0x80, 0x00, 0x00, 0xff, 0xf0, 0xe2, 0x80, 0x20, 0xc5, 0x01, 0x61, 0x20,
+ 0xc5, 0x03, 0x22, 0xb1, 0x12, 0x71, 0xcf, 0xff, 0x00, 0x3e, 0xd0, 0xb1,
+ 0x04, 0x7e, 0xe0, 0x78, 0xe0};
+
+ u8 patch_line5[] = { 0xf0, 0xc0, 0x70, 0xcf, 0xff, 0xff, 0xe7, 0x1c, 0x88,
+ 0x57, 0x71, 0xcf, 0xff, 0xff, 0xf2, 0x18, 0x91, 0x13, 0xea, 0x84, 0xb8,
+ 0xa9, 0x78, 0x10, 0xf0, 0x03, 0xb8, 0x89, 0xb8, 0x8c, 0xb1, 0x13, 0x71,
+ 0xcf, 0xff, 0x00, 0x30, 0xc0, 0xb1, 0x00, 0x7e, 0xe0, 0xc0, 0xf1, 0x09,
+ 0x1e, 0x03, 0xc0, 0xc1, 0xa1};
+
+ u8 patch_line6[] = { 0xf0, 0xf0, 0x75, 0x08, 0x76, 0x28, 0x77, 0x48, 0xc2,
+ 0x40, 0xd8, 0x20, 0x71, 0xcf, 0x00, 0x03, 0x20, 0x67, 0xda, 0x02, 0x08,
+ 0xae, 0x03, 0xa0, 0x73, 0xc9, 0x0e, 0x25, 0x13, 0xc0, 0x0b, 0x5e, 0x01,
+ 0x60, 0xd8, 0x06, 0xff, 0xbc, 0x0c, 0xce, 0x01, 0x00, 0xd8, 0x00, 0xb8,
+ 0x9e, 0x0e, 0x5a, 0x03, 0x20};
+
+ u8 patch_line7[] = { 0xf1, 0x20, 0xd9, 0x01, 0xd8, 0x00, 0xb8, 0x9e, 0x0e,
+ 0xb6, 0x03, 0x20, 0xd9, 0x01, 0x8d, 0x14, 0x08, 0x17, 0x01, 0x91, 0x8d,
+ 0x16, 0xe8, 0x07, 0x0b, 0x36, 0x01, 0x60, 0xd8, 0x07, 0x0b, 0x52, 0x01,
+ 0x60, 0xd8, 0x11, 0x8d, 0x14, 0xe0, 0x87, 0xd8, 0x00, 0x20, 0xca, 0x02,
+ 0x62, 0x00, 0xc9, 0x03, 0xe0};
+
+ u8 patch_line8[] = { 0xf1, 0x50, 0xc0, 0xa1, 0x78, 0xe0, 0xc0, 0xf1, 0x08,
+ 0xb2, 0x03, 0xc0, 0x76, 0xcf, 0xff, 0xff, 0xe5, 0x40, 0x75, 0xcf, 0xff,
+ 0xff, 0xe5, 0x68, 0x95, 0x17, 0x96, 0x40, 0x77, 0xcf, 0xff, 0xff, 0xe5,
+ 0x42, 0x95, 0x38, 0x0a, 0x0d, 0x00, 0x01, 0x97, 0x40, 0x0a, 0x11, 0x00,
+ 0x40, 0x0b, 0x0a, 0x01, 0x00};
+
+ u8 patch_line9[] = { 0xf1, 0x80, 0x95, 0x17, 0xb6, 0x00, 0x95, 0x18, 0xb7,
+ 0x00, 0x76, 0xcf, 0xff, 0xff, 0xe5, 0x44, 0x96, 0x20, 0x95, 0x15, 0x08,
+ 0x13, 0x00, 0x40, 0x0e, 0x1e, 0x01, 0x20, 0xd9, 0x00, 0x95, 0x15, 0xb6,
+ 0x00, 0xff, 0xa1, 0x75, 0xcf, 0xff, 0xff, 0xe7, 0x1c, 0x77, 0xcf, 0xff,
+ 0xff, 0xe5, 0x46, 0x97, 0x40};
+
+ u8 patch_line10[] = { 0xf1, 0xb0, 0x8d, 0x16, 0x76, 0xcf, 0xff, 0xff, 0xe5,
+ 0x48, 0x8d, 0x37, 0x08, 0x0d, 0x00, 0x81, 0x96, 0x40, 0x09, 0x15, 0x00,
+ 0x80, 0x0f, 0xd6, 0x01, 0x00, 0x8d, 0x16, 0xb7, 0x00, 0x8d, 0x17, 0xb6,
+ 0x00, 0xff, 0xb0, 0xff, 0xbc, 0x00, 0x41, 0x03, 0xc0, 0xc0, 0xf1, 0x0d,
+ 0x9e, 0x01, 0x00, 0xe8, 0x04};
+
+ u8 patch_line11[] = { 0xf1, 0xe0, 0xff, 0x88, 0xf0, 0x0a, 0x0d, 0x6a, 0x01,
+ 0x00, 0x0d, 0x8e, 0x01, 0x00, 0xe8, 0x7e, 0xff, 0x85, 0x0d, 0x72, 0x01,
+ 0x00, 0xff, 0x8c, 0xff, 0xa7, 0xff, 0xb2, 0xd8, 0x00, 0x73, 0xcf, 0xff,
+ 0xff, 0xf2, 0x40, 0x23, 0x15, 0x00, 0x01, 0x81, 0x41, 0xe0, 0x02, 0x81,
+ 0x20, 0x08, 0xf7, 0x81, 0x34};
+
+ u8 patch_line12[] = { 0xf2, 0x10, 0xa1, 0x40, 0xd8, 0x00, 0xc0, 0xd1, 0x7e,
+ 0xe0, 0x53, 0x51, 0x30, 0x34, 0x20, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
+ 0x20, 0x25, 0x64, 0x20, 0x25, 0x64, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ u8 patch_line13[] = { 0xf2, 0x40, 0xff, 0xff, 0xe8, 0x28, 0xff, 0xff, 0xf0,
+ 0xe8, 0xff, 0xff, 0xe8, 0x08, 0xff, 0xff, 0xf1, 0x54};
+
+#endif
diff --git a/drivers/media/video/ov16825/Kconfig b/drivers/media/video/ov16825/Kconfig
new file mode 100644
index 00000000000000..bf33eb090a37ff
--- /dev/null
+++ b/drivers/media/video/ov16825/Kconfig
@@ -0,0 +1,6 @@
+config VIDEO_OV16825
+ tristate "OmniVision OV16825 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ Video4Linux2 sensor-level driver for the OmniVision
+ OV16825 camera.
diff --git a/drivers/media/video/ov16825/Makefile b/drivers/media/video/ov16825/Makefile
new file mode 100755
index 00000000000000..b9637ffade984f
--- /dev/null
+++ b/drivers/media/video/ov16825/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_OV16825) += ov16825.o ov16825_ctrl.o ov16825_clk.o ov16825_common.o
diff --git a/drivers/media/video/ov16825/ov16825.c b/drivers/media/video/ov16825/ov16825.c
new file mode 100644
index 00000000000000..b48fc34ab9ef7b
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825.c
@@ -0,0 +1,360 @@
+/*
+ * A V4L2 driver for OV16825 cameras.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/clk.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-mediabus.h>
+#include <linux/io.h>
+
+#include "ov16825_common.h"
+#include "ov16825_confs.h"
+#include "ov16825_ctrl.h"
+#include "ov16825_clk.h"
+
+MODULE_AUTHOR("Victor Lambret");
+MODULE_DESCRIPTION("A low-level driver for OV16825 sensors");
+MODULE_LICENSE("GPL");
+
+static void ov16825_configure(struct v4l2_subdev *sd)
+{
+ struct ov16825_info *info = to_state(sd);
+
+ sensor_write(sd, SC_CTRL0103, SC_CTRL0103_SOFTWARE_RESET);
+
+ /* This 20 ms delay comes from the official scripts */
+ msleep(20);
+
+ sensor_write_array(sd, info->config->regs, info->config->regs_len);
+
+ if (info->format.code == V4L2_MBUS_FMT_SBGGR12_1X12) {
+ /* Change PLL config to increase MIPI clock frequency */
+ sensor_write(&info->sd, 0x0302, 0x8b);
+ /* Set 12 bit mode */
+ sensor_write(&info->sd, 0x3031, 0x0c);
+ /* Change mipi clock period */
+ sensor_write(&info->sd, 0x4837, 0x35);
+ }
+
+ ov16825_apply_ctrl(info);
+}
+
+static int ov16825_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov16825_info *info = to_state(sd);
+ struct ov16825_platform_data *pdata = info->pdata;
+
+ if (enable) {
+ if (pdata->set_power)
+ pdata->set_power(1);
+ info->streaming = 1;
+ ov16825_configure(sd);
+ } else {
+ if (pdata->set_power)
+ pdata->set_power(0);
+ info->streaming = 0;
+ }
+
+ return 0;
+}
+
+static int ov16825_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct ov16825_info *info = to_state(sd);
+ struct v4l2_mbus_framefmt *fmt = &info->format;
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ memset(timings, 0, sizeof(*timings));
+
+ bt->width = fmt->width;
+ bt->height = fmt->height;
+ //bt->pixelclock = mt9m021->pll.pix_clock;
+ bt->pixelclock = 100000000;
+
+ return 0;
+}
+
+static int sensor_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov16825_info *info = to_state(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ fmt->format = info->format;
+ return 0;
+}
+
+static int sensor_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov16825_info *info = to_state(sd);
+ struct v4l2_mbus_framefmt *f;
+ struct ov16825_config *config = NULL;
+ u32 requested_pixels;
+ u32 cur_delta = UINT_MAX;
+ unsigned i;
+
+ f = &fmt->format;
+ requested_pixels = f->width * f->height;
+
+ if (f->code != V4L2_MBUS_FMT_SBGGR12_1X12 &&
+ f->code != V4L2_MBUS_FMT_SBGGR10_1X10) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov16825_configs); i++) {
+ struct ov16825_config *c = &ov16825_configs[i];
+ u32 delta;
+
+ if (c->width == f->width && c->height == f->height) {
+ config = c;
+ break;
+ }
+
+
+ // Don't consider smaller resolutions (we can always crop/scale higher
+ // resolutions later)
+ if (c->width < f->width || c->height < f->height) {
+ continue;
+ }
+
+ // Attempt to find the closest higher resolution by number of
+ // pixels
+ delta = (c->width * c->height) - requested_pixels;
+
+ if (delta < cur_delta) {
+ // We found a closer resolution
+ config = c;
+ cur_delta = delta;
+ }
+ }
+
+ if (config == NULL) {
+ // No config found, use the first one as default
+ config = &ov16825_configs[0];
+ }
+
+ f->width = config->width;
+ f->height = config->height;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = *f;
+ return 0;
+ }
+
+ info->format = *f;
+ info->config = config;
+
+ return 0;
+}
+
+static int sensor_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov16825_info *info = to_state(sd);
+
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = info->format.code;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops sensor_video_ops = {
+ .s_stream = ov16825_s_stream,
+ .g_dv_timings = ov16825_g_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops sensor_pad_ops = {
+ .get_fmt = sensor_get_fmt,
+ .set_fmt = sensor_set_fmt,
+ .enum_mbus_code = sensor_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops ov16825_ops = {
+ .video = &sensor_video_ops,
+ .pad = &sensor_pad_ops,
+};
+
+static int ov16825_detect_chip(struct v4l2_subdev *sd)
+{
+ struct ov16825_info *info = to_state(sd);
+ struct ov16825_platform_data *pdata = info->pdata;
+ u8 id1, id2, id3;
+ int ret;
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+
+ if (ret) {
+ v4l2_err(sd, "Couldn't set camera power on\n");
+ return ret;
+ }
+ }
+
+ ret = sensor_read(sd, SC_CMMN_CHIP_ID1, &id1);
+ if (ret) {
+ goto power_off;
+ }
+
+ ret = sensor_read(sd, SC_CMMN_CHIP_ID2, &id2);
+ if (ret) {
+ goto power_off;
+ }
+
+ ret = sensor_read(sd, SC_CMMN_CHIP_ID3, &id3);
+ if (ret) {
+ goto power_off;
+ }
+
+ if (id1 != 0x01 || id2 != 0x68 || id3 != 0x20) {
+ v4l2_err(sd,
+ "Invalid camera ID: expected 016820, got %02x%02x%02x\n",
+ id1, id2, id3);
+ ret = -EINVAL;
+ goto power_off;
+ }
+
+ v4l2_info(sd, "Found OV16825 chip\n");
+
+ ret = 0;
+
+power_off:
+ if (pdata->set_power) {
+ pdata->set_power(0);
+ }
+
+ return ret;
+}
+
+static int ov16825_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct ov16825_info *info;
+ struct ov16825_platform_data *pdata = client->dev.platform_data;
+ int ret;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c adapdter does not support SMBUS\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(struct ov16825_info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ info->pdata = pdata;
+ sd = &info->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov16825_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+ ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
+ if (ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto error_media_entity_init;
+ }
+
+ /* Default configuration */
+ info->config = &ov16825_configs[0];
+ info->format.width = info->config->width;
+ info->format.height = info->config->height;
+ info->format.code = V4L2_MBUS_FMT_SBGGR10_1X10;
+
+ ret = ov16825_ctrl_create(info);
+ if (ret)
+ goto error_ctrls;
+
+ ret = ov16825_detect_chip(sd);
+ if (ret) {
+ goto chip_not_found;
+ }
+
+ v4l2_info(sd, "OV16825 sensor succesfully probed");
+
+ return 0;
+
+chip_not_found:
+ ov16825_ctrl_free(info);
+error_ctrls:
+error_media_entity_init:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(info);
+
+ return ret;
+}
+
+
+static int ov16825_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ ov16825_ctrl_free(to_state(sd));
+
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_state(sd));
+
+ return 0;
+}
+
+static const struct i2c_device_id ov16825_id[] = {
+ { DRIVER_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, sensor_id);
+
+static struct i2c_driver sensor_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = ov16825_probe,
+ .remove = ov16825_remove,
+ .id_table = ov16825_id,
+};
+
+static __init int init_sensor(void)
+{
+ return i2c_add_driver(&sensor_driver);
+}
+
+static __exit void exit_sensor(void)
+{
+ i2c_del_driver(&sensor_driver);
+}
+
+module_init(init_sensor);
+module_exit(exit_sensor);
diff --git a/drivers/media/video/ov16825/ov16825_clk.c b/drivers/media/video/ov16825/ov16825_clk.c
new file mode 100644
index 00000000000000..d7173fe587f03a
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_clk.c
@@ -0,0 +1,511 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "ov16825_common.h"
+#include "ov16825_clk.h"
+
+/* The limits on VCO clock comes from the Omnivion timing spreadsheet */
+#define MHZ (1000 * 1000)
+
+#define PLL1_VCO_MIN 500 * MHZ
+#define PLL1_VCO_MAX 1000 * MHZ
+// XXX Value in spreadsheet file from Will
+//#define PLL1_VCO_MAX 1300 * MHZ
+#define PLL2_VCO_MIN 100 * MHZ
+#define PLL2_VCO_MAX 400 * MHZ
+#define PLLADC_VCO_MIN 200 * MHZ
+#define PLLADC_VCO_MAX 240 * MHZ
+
+#define IS_BETWEEN(_f, _min, _max) ((_f >= _min) && (_f <= _max))
+
+/* PLL Block helpers */
+
+static inline u32 div_sys(u32 val, unsigned char _reg)
+{
+ unsigned reg = _reg & 0x7;
+ if (reg < 4)
+ return val;
+ return val / (2 * (reg - 3));
+}
+
+static inline u32 div_pix(u32 val, unsigned char reg)
+{
+ return val / ((reg & 0x3)+ 4);
+}
+
+static inline u32 div2(u32 val, unsigned char reg, int mask)
+{
+ if (reg & mask)
+ return val / 2;
+ return val;
+}
+
+static u32 seld5(u32 val, unsigned char _reg)
+{
+ unsigned reg = _reg & 0x3;
+ if (reg == 2)
+ return val / 2;
+ if (reg == 3)
+ return (val * 2) / 5;
+ return val;
+}
+
+static u32 pre_div(u32 val, unsigned char reg)
+{
+ return val / ((reg & 0x3) + 1);
+}
+
+static u32 pre_div_adc(u32 val, unsigned char _reg)
+{
+ unsigned reg = _reg & 0x3;
+ if (reg == 1)
+ return (val * 2) / 3;
+ if (reg == 2 || reg == 3)
+ return val / reg;
+ return val;
+}
+
+/* Clocks getters */
+
+/* PLL1 */
+static inline u32 ov16825_PLL1_vco_clk(u32 refclk,
+ struct ov16825_pll *c)
+{
+ return pre_div(refclk, c->prediv) * c->multi;
+}
+
+static inline u32 ov16825_phy_pclk(u32 vco_clk,
+ struct ov16825_pll *c)
+{
+ return div_sys(vco_clk, c->op_sys);
+}
+
+static inline u32 ov16825_pll_pclk(u32 phy_pclk,
+ struct ov16825_pll *c)
+{
+ u32 res = div_pix(phy_pclk, c->op_pix);
+ return div2(res, c->op_2lane, BIT(0));
+}
+
+static inline u32 ov16825_pll_sclk(u32 vco_clk,
+ struct ov16825_pll *c)
+{
+ u32 res = div_sys(vco_clk, c->vt_sys);
+ res = div_pix(res, c->vt_pix);
+ return div2(res, c->vt_pix, BIT(2));
+}
+
+static inline u32 ov16825_pclk(u32 phy_pclk,
+ struct ov16825_reg_conf *c)
+{
+ u32 res = ov16825_pll_pclk(phy_pclk, &c->pll);
+ if (c->r3020 & BIT(3))
+ res = res / 2;
+ return res;
+}
+
+/* Common */
+
+static u32 ov16825_common_pll_clk(u32 prediv_clk,
+ struct ov16825_pll2 *c,
+ int dac_mode)
+{
+ u32 res = div2(prediv_clk, c->rdiv, BIT(0));
+ if (dac_mode)
+ res = res * (c->multi & 0x1F);
+ else
+ res = res * (c->multi & 0x3F);
+
+ res = res / (c->divr + 1);
+ return seld5(res, c->seld5);
+}
+
+/* PLL2 */
+
+static inline u32 ov16825_pll2_sclk(u32 refclk,
+ struct ov16825_pll2 *c)
+{
+ return ov16825_common_pll_clk(pre_div(refclk, c->prediv),
+ c, 0);
+}
+
+/* PLL ADC */
+
+static inline u32 ov16825_dacclk(u32 refclk,
+ struct ov16825_pll2 *c)
+{
+ return ov16825_common_pll_clk(pre_div_adc(refclk, c->prediv),
+ c, 1);
+}
+
+/* Debug */
+
+void ov16825_print_pll_common(struct ov16825_pll2 *c)
+{
+ printk(" PREDIV = 0x%x\n", c->prediv);
+ printk(" RDIV = 0x%x\n", c->rdiv);
+ printk(" MULTI = 0x%x\n", c->multi);
+ printk(" DIVR = 0x%x\n", c->divr);
+ printk(" SELD5 = 0x%x\n", c->seld5);
+}
+
+void ov16825_print_configuration(struct ov16825_reg_conf *c)
+{
+ printk("PLL:\n");
+ printk(" PREDIV = 0x%x\n", c->pll.prediv);
+ printk(" MULTI = 0x%x\n", c->pll.multi);
+ printk(" VT_SYS = 0x%x\n", c->pll.vt_sys);
+ printk(" VT_PIX = 0x%x\n", c->pll.vt_pix);
+ printk(" OP_SYS = 0x%x\n", c->pll.op_sys);
+ printk(" OP_PIX = 0x%x\n", c->pll.op_pix);
+ printk(" OP_2LANE = 0x%x\n", c->pll.op_2lane);
+ printk("PLL2:\n");
+ ov16825_print_pll_common(&c->pll2);
+ printk("PLL ADC:\n");
+ ov16825_print_pll_common(&c->adc);
+ printk("SC_CMMN_CLOCK_SEL = 0x%x\n", c->r3020);
+ printk("0x3030 = 0x%x\n", c->r3030);
+ printk("SC_CMMN_CORE_CTRL0 = 0x%x\n", c->r3032);
+ printk("SC_CMMN_CORE_CTRL1 = 0x%x\n", c->r3033);
+ printk("0x3106 = 0x%x\n", c->r3106);
+}
+
+int ov16825_current_configuration(struct v4l2_subdev *sd,
+ struct ov16825_reg_conf *c)
+{
+ int ret;
+
+#define READ_REGISTER(_reg, _buf) \
+ ret = sensor_read(sd, _reg, _buf); \
+ if (ret < 0) \
+ return ret;
+
+ READ_REGISTER(PLL1_CTRL0, &c->pll.prediv);
+ READ_REGISTER(PLL1_CTRL2, &c->pll.multi);
+ READ_REGISTER(PLL1_CTRL3, &c->pll.vt_sys);
+ READ_REGISTER(PLL1_CTRL4, &c->pll.vt_pix);
+ READ_REGISTER(PLL1_CTRL5, &c->pll.op_sys);
+ READ_REGISTER(PLL1_CTRL6, &c->pll.op_pix);
+ READ_REGISTER(PLL1_CTRL7, &c->pll.op_2lane);
+ READ_REGISTER(PLL2_CTRL0, &c->pll2.prediv);
+ READ_REGISTER(PLL2_CTRL1, &c->pll2.multi);
+ READ_REGISTER(PLL2_CTRL2, &c->pll2.rdiv);
+ READ_REGISTER(PLL2_CTRL3, &c->pll2.divr);
+ READ_REGISTER(PLL2_CTRL4, &c->pll2.seld5);
+ READ_REGISTER(PLL3_CTRL0, &c->adc.prediv);
+ READ_REGISTER(PLL3_CTRL1, &c->adc.multi);
+ READ_REGISTER(PLL3_CTRL2, &c->adc.rdiv);
+ READ_REGISTER(PLL3_CTRL3, &c->adc.divr);
+ READ_REGISTER(PLL3_CTRL4, &c->adc.seld5);
+ READ_REGISTER(SC_CMMN_CLOCK_SEL, &c->r3020);
+ READ_REGISTER(0x3030, &c->r3030);
+ READ_REGISTER(SC_CMMN_CORE_CTRL0, &c->r3032);
+ READ_REGISTER(SC_CMMN_CORE_CTRL1, &c->r3033);
+ READ_REGISTER(0x3106, &c->r3106);
+
+ return 0;
+}
+
+int ov16825_print_clocks(u32 refclk,
+ struct ov16825_reg_conf *c)
+{
+ u32 pll_vco_clk;
+ u32 phy_pclk;
+ u32 pll_pclk;
+ u32 pll_sclk;
+ u32 pclk;
+ u32 pll2_sclk;
+ u32 dacclk;
+
+ pll_vco_clk = ov16825_PLL1_vco_clk(refclk, &c->pll);
+
+ phy_pclk = ov16825_phy_pclk(pll_vco_clk, &c->pll);
+ pll_pclk = ov16825_pll_pclk(phy_pclk, &c->pll);
+ pclk = ov16825_pclk(phy_pclk, c);
+ pll_sclk = ov16825_pll_sclk(pll_vco_clk, &c->pll);
+
+ pll2_sclk = ov16825_pll2_sclk(refclk, &c->pll2);
+
+ dacclk = ov16825_dacclk(refclk, &c->adc);
+
+ printk("refclk = %u Hz\n", refclk);
+ printk("pll_vco_clk = %u Hz\n", pll_vco_clk);
+ printk("phy_pclk = %u Hz\n", phy_pclk);
+ printk("pll_pclk = %u Hz\n", pll_pclk);
+ printk("pclk = %u Hz\n", pclk);
+ printk("pll_sclk = %u Hz\n", pll_sclk);
+ printk("pll2_sclk = %u Hz\n", pll2_sclk);
+ printk("dacclk = %u Hz\n", dacclk);
+
+ return 0;
+}
+
+static void select_pll_phy_clk(u32 refclk,
+ u32 target_clk,
+ u32 *best_clk,
+ struct ov16825_pll *best_conf,
+ struct ov16825_pll *chalenger)
+{
+ u32 tmp_clk;
+ u32 target_vco_clk;
+ u32 multi_tmp;
+
+ tmp_clk = pre_div(refclk, chalenger->prediv);
+
+ if ((chalenger->op_sys & 0x7) < 4)
+ target_vco_clk = target_clk;
+ else
+ target_vco_clk = target_clk * (2 * (chalenger->op_sys - 3));
+
+ multi_tmp = DIV_ROUND_UP(target_vco_clk, tmp_clk);
+ if (multi_tmp > 0xFF)
+ multi_tmp = 0xFF;
+
+ tmp_clk = ov16825_PLL1_vco_clk(refclk, chalenger);
+ tmp_clk = ov16825_phy_pclk(tmp_clk, chalenger);
+
+ if (tmp_clk >= target_clk && (!*best_clk || tmp_clk < *best_clk)) {
+ *best_clk = tmp_clk;
+ *best_conf = *chalenger;
+ }
+}
+
+/* common */
+static void select_common_clk(u32 refclk,
+ u32 targetclk,
+ u32 *best_clk,
+ struct ov16825_pll2 *best_conf,
+ struct ov16825_pll2 *chalenger,
+ int dac_mode)
+{
+ u32 tmp_clk;
+ u32 target_vco_clk;
+ u32 clk;
+ u32 multi_tmp;
+
+ if (dac_mode)
+ tmp_clk = pre_div_adc(refclk, chalenger->prediv);
+ else
+ tmp_clk = pre_div(refclk, chalenger->prediv);
+
+ tmp_clk = div2(tmp_clk, chalenger->multi, BIT(0));
+
+ if (chalenger->seld5 == 0x2)
+ target_vco_clk = targetclk * 2;
+ else if (chalenger->seld5 == 0x3)
+ target_vco_clk = (targetclk * 5) / 2;
+ else
+ target_vco_clk = targetclk;
+
+ target_vco_clk = target_vco_clk * (chalenger->divr + 1);
+
+ multi_tmp = DIV_ROUND_UP(target_vco_clk, tmp_clk);
+ if (dac_mode && multi_tmp > 0x1F)
+ multi_tmp = 0x1F;
+ else if (!dac_mode && multi_tmp > 0x3F)
+ multi_tmp = 0x3F;
+ chalenger->multi = multi_tmp;
+
+ if (dac_mode)
+ clk = ov16825_dacclk(refclk, chalenger);
+ else
+ clk = ov16825_pll2_sclk(refclk, chalenger);
+
+ if (clk >= targetclk && (!*best_clk || clk < *best_clk)) {
+ *best_clk = clk;
+ *best_conf = *chalenger;
+ }
+}
+
+static int ov16825_set_common_clk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_pll2 *c,
+ int dac_mode)
+{
+ struct ov16825_pll2 best_conf;
+ u32 best_clk = 0;
+
+ for (c->prediv = 0; c->prediv < 4; c->prediv++)
+ for (c->rdiv = 0; c->rdiv < 2; c->rdiv++)
+ for (c->divr = 0; c->divr < 16; c->divr++)
+ for (c->seld5 = 0; c->seld5 < 4; c->seld5++)
+ select_common_clk(refclk,
+ target_clk,
+ &best_clk,
+ &best_conf,
+ c,
+ dac_mode);
+ if (best_clk == 0)
+ return -EINVAL;
+
+ printk("Selecting clock %u Hz for %u Hz target clock\n",
+ best_clk, target_clk);
+
+ *c = best_conf;
+
+ return 0;
+}
+
+int ov16825_set_phy_pclk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c)
+{
+ struct ov16825_pll best_conf;
+ u32 best_clk = 0;
+
+ for (c->pll.prediv = 0; c->pll.prediv < 4; c->pll.prediv++)
+ for (c->pll.op_sys = 0; c->pll.op_sys < 8; c->pll.op_sys++)
+ select_pll_phy_clk(refclk,
+ target_clk,
+ &best_clk,
+ &best_conf,
+ &c->pll);
+
+ if (best_clk == 0)
+ return -EINVAL;
+
+ printk("Selecting PHYCLK clock %u Hz for %u Hz target clock\n",
+ best_clk, target_clk);
+
+ c->pll = best_conf;
+
+ return 0;
+}
+
+/* This function use div_op_pix, div_2lane and r3020 register to compute a
+ * divider allowing to pass from phy_pclk to pclk.
+ * - DIV_OP_PIX can only be 4, 5, 6 or 8
+ * - DIV_2LANE and R3020 can be used to device by 1, 2 or 4
+ *
+ * This means that there are only 10 possible dividers :
+ *
+ ------------------------------------------------------------
+ * | DIVIDER | DIV_OP_PIX | DIV_2_LANE | DIV_R3020 |
+ |------------+---------------+---------------+---------------|
+ | 4 | 4 | 1 | 1 |
+ | 5 | 5 | 1 | 1 |
+ | 6 | 6 | 1 | 1 |
+ | 8 | 8 | 1 | 1 |
+ | 10 | 5 | 2 | 1 |
+ | 12 | 6 | 2 | 1 |
+ | 16 | 8 | 2 | 1 |
+ | 20 | 5 | 2 | 2 |
+ | 24 | 6 | 2 | 2 |
+ | 32 | 8 | 2 | 2 |
+ ------------------------------------------------------------
+*/
+static void ov16825_pclk_closest_conf(u32 div,
+ struct ov16825_reg_conf *c)
+{
+
+ /* We first choose if we need to divide by 2 or 4 */
+ if (div < 10) {
+ c->pll.op_2lane = OP_2LANE_NODIV;
+ c->r3020 = c->r3020 & ~BIT(3);
+ } else if (div < 20){
+ c->pll.op_2lane = OP_2LANE_DIV2;
+ c->r3020 = c->r3020 & ~BIT(3);
+ div = div / 2;
+ } else {
+ c->pll.op_2lane = OP_2LANE_DIV2;
+ c->r3020 = c->r3020 | BIT(3);
+ div = div / 4;
+ }
+
+ /* Then choose the closer value for the rest of the divider. We round
+ * the divider down to get at least the pclk asked. */
+ if (div < 5)
+ c->pll.op_pix = OP_PIX_DIV4;
+ else if (div < 6)
+ c->pll.op_pix = OP_PIX_DIV5;
+ else if (div < 8)
+ c->pll.op_pix = OP_PIX_DIV6;
+ else
+ c->pll.op_pix = OP_PIX_DIV8;
+
+ /* XXX HACK : The test on board show a problem when we're using a 8
+ * divider in op_pix. As we dont understand this problem yet at the
+ * moment we return only a working 8 divider.*/
+ c->pll.op_pix = OP_PIX_DIV4;
+ c->pll.op_2lane = OP_2LANE_NODIV;
+ c->r3020 = c->r3020 | BIT(3);
+}
+
+int ov16825_set_pclk(u32 phy_pclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c)
+{
+ u32 pclk;
+
+ ov16825_pclk_closest_conf(phy_pclk / target_clk, c);
+
+ pclk = ov16825_pclk(phy_pclk, c);
+
+ printk("Selecting PCLK clock %u Hz for %u Hz target clock\n",
+ pclk, target_clk);
+
+ if (pclk < target_clk)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ov16825_set_daccclk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c)
+{
+ return ov16825_set_common_clk(refclk,
+ target_clk,
+ &c->adc,
+ 1);
+}
+
+int ov16825_set_pll2_sclk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c)
+{
+ return ov16825_set_common_clk(refclk,
+ target_clk,
+ &c->pll2,
+ 0);
+}
+
+int ov16825_apply_configuration(struct v4l2_subdev *sd,
+ struct ov16825_reg_conf *c)
+{
+ int ret;
+
+#define WRITE_REGISTER(_reg, _buf) \
+ ret = sensor_write(sd, _reg, _buf); \
+ if (ret < 0) \
+ return ret;
+
+ c->r3033 = (c->r3033 & ~SEL_DACCLK_PLL_DACCLK) | SEL_DACCLK_PLL_DACCLK;
+ c->r3032 = (c->r3032 & ~SEL_SCLK_PLL2) | SEL_SCLK_PLL2;
+
+ WRITE_REGISTER(PLL1_CTRL0, c->pll.prediv);
+ WRITE_REGISTER(PLL1_CTRL2, c->pll.multi);
+ WRITE_REGISTER(PLL1_CTRL3, c->pll.vt_sys);
+ WRITE_REGISTER(PLL1_CTRL4, c->pll.vt_pix);
+ WRITE_REGISTER(PLL1_CTRL5, c->pll.op_sys);
+ WRITE_REGISTER(PLL1_CTRL6, c->pll.op_pix);
+ WRITE_REGISTER(PLL1_CTRL7, c->pll.op_2lane);
+ WRITE_REGISTER(PLL2_CTRL0, c->pll2.prediv);
+ WRITE_REGISTER(PLL2_CTRL1, c->pll2.multi);
+ WRITE_REGISTER(PLL2_CTRL2, c->pll2.rdiv);
+ WRITE_REGISTER(PLL2_CTRL3, c->pll2.divr);
+ WRITE_REGISTER(PLL2_CTRL4, c->pll2.seld5);
+ WRITE_REGISTER(PLL3_CTRL0, c->adc.prediv);
+ WRITE_REGISTER(PLL3_CTRL1, c->adc.multi);
+ WRITE_REGISTER(PLL3_CTRL2, c->adc.rdiv);
+ WRITE_REGISTER(PLL3_CTRL3, c->adc.divr);
+ WRITE_REGISTER(PLL3_CTRL4, c->adc.seld5);
+ WRITE_REGISTER(SC_CMMN_CLOCK_SEL, c->r3020);
+ //WRITE_REGISTER(0x3030, c->r3030);
+ WRITE_REGISTER(SC_CMMN_CORE_CTRL0, c->r3032);
+ WRITE_REGISTER(SC_CMMN_CORE_CTRL1, c->r3033);
+ //WRITE_REGISTER(0x3106, c->r3106);
+
+ return 0;
+}
diff --git a/drivers/media/video/ov16825/ov16825_clk.h b/drivers/media/video/ov16825/ov16825_clk.h
new file mode 100644
index 00000000000000..305c27c0e8eebc
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_clk.h
@@ -0,0 +1,64 @@
+#ifndef __OV16825_CLK__H__
+#define __OV16825_CLK__H__
+
+struct ov16825_pll {
+ unsigned char prediv;
+ unsigned char multi;
+
+ unsigned char op_sys;
+ unsigned char op_pix;
+ unsigned char op_2lane;
+
+ unsigned char vt_sys;
+ unsigned char vt_pix;
+};
+
+struct ov16825_pll2 {
+ unsigned char prediv;
+ unsigned char rdiv;
+ unsigned char multi;
+ unsigned char divr;
+ unsigned char seld5;
+};
+
+struct ov16825_reg_conf {
+ struct ov16825_pll pll;
+ struct ov16825_pll2 pll2;
+ struct ov16825_pll2 adc;
+
+ /* External */
+ unsigned char r3020;
+ unsigned char r3030;
+ unsigned char r3032;
+ unsigned char r3033;
+ unsigned char r3106;
+};
+
+void ov16825_print_configuration(struct ov16825_reg_conf *c);
+
+int ov16825_current_configuration(struct v4l2_subdev *sd,
+ struct ov16825_reg_conf *c);
+
+int ov16825_print_clocks(u32 refclk,
+ struct ov16825_reg_conf *c);
+
+int ov16825_set_phy_pclk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c);
+
+int ov16825_set_pclk(u32 phy_pclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c);
+
+int ov16825_set_daccclk(u32 refclk,
+ u32 dacclk,
+ struct ov16825_reg_conf *c);
+
+int ov16825_set_pll2_sclk(u32 refclk,
+ u32 target_clk,
+ struct ov16825_reg_conf *c);
+
+int ov16825_apply_configuration(struct v4l2_subdev *sd,
+ struct ov16825_reg_conf *c);
+
+#endif //__OV16825_CLK__H__
diff --git a/drivers/media/video/ov16825/ov16825_common.c b/drivers/media/video/ov16825/ov16825_common.c
new file mode 100644
index 00000000000000..eb43814474aa5a
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_common.c
@@ -0,0 +1,107 @@
+#include <linux/i2c.h>
+#include "ov16825_common.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+
+inline struct ov16825_info *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov16825_info, sd);
+}
+
+int sensor_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 data[3];
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(data),
+ .buf = data,
+ };
+
+ data[0] = reg >> 8;
+ data[1] = reg & 0xff;
+ data[2] = val;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ v4l2_err(sd, "I2C write failed [%02x->%04x: %d]\n",
+ val, reg, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int sensor_write16(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+ int ret;
+ ret = sensor_write(sd, reg, (val >> 8) & 0xFF);
+ if (!ret)
+ ret = sensor_write(sd, reg +1, val & 0xFF);
+ return ret;
+}
+
+int sensor_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 regbuf[2];
+ u8 valbuf[1];
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(regbuf),
+ .buf = regbuf,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(valbuf),
+ .buf = valbuf,
+ },
+ };
+
+ regbuf[0] = reg >> 8;
+ regbuf[1] = reg & 0xff;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ v4l2_err(sd, "I2C read failed [%04x: %d]\n", reg, ret);
+ return ret;
+ }
+
+ *val = valbuf[0];
+
+ return 0;
+}
+
+/*
+ * Write a list of register settings;
+ */
+int sensor_write_array(struct v4l2_subdev *sd,
+ struct regval_list *regs,
+ int array_size)
+{
+ int i=0;
+
+ if(!regs)
+ return -EINVAL;
+
+ while(i<array_size) {
+ int ret;
+ ret = sensor_write(sd, regs->addr, regs->data);
+ if (ret) {
+ dev_err(sd->v4l2_dev->dev, "[@0x%x] <- 0x%x error %d\n",
+ regs->addr, regs->data, ret);
+ return ret;
+ }
+ i++;
+ regs++;
+ }
+ return 0;
+}
diff --git a/drivers/media/video/ov16825/ov16825_common.h b/drivers/media/video/ov16825/ov16825_common.h
new file mode 100644
index 00000000000000..4598024af7be7b
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_common.h
@@ -0,0 +1,75 @@
+/*
+ * sunxi sensor header file
+ * Author:raymonxiu
+ */
+#ifndef __OV16825_CAMERA__H__
+#define __OV16825_CAMERA__H__
+
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/videodev2.h>
+#include <media/ov16825.h>
+
+#include "ov16825_registers.h"
+
+#define DRIVER_NAME "ov16825"
+
+struct ov16825_info {
+ struct v4l2_subdev sd;
+ struct ov16825_platform_data *pdata;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct ov16825_config *config;
+ int streaming;
+
+ /* Ctrl specific */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *gain;
+ /* Exposure control in us */
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *isp_en;
+ struct v4l2_ctrl *average_en;
+ struct v4l2_ctrl *white_dpc_en;
+ struct v4l2_ctrl *black_dpc_en;
+ struct v4l2_ctrl *wb_gain_en;
+ struct v4l2_ctrl *otp_cc_en;
+ struct v4l2_ctrl *dbc_en;
+ struct v4l2_ctrl *scale_en;
+ struct v4l2_ctrl *blc_en;
+ struct v4l2_ctrl *average_before;
+ struct v4l2_ctrl *digital_gain_en;
+ struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *red_gain;
+ struct v4l2_ctrl *green_gain;
+ struct v4l2_ctrl *blue_gain;
+ struct v4l2_ctrl *focus;
+};
+
+struct regval_list {
+ unsigned short addr;
+ unsigned char data;
+};
+
+struct ov16825_config {
+ u32 width;
+ u32 height;
+ struct regval_list *regs;
+ u32 regs_len;
+ u32 line_duration_us;
+};
+
+struct ov16825_info *to_state(struct v4l2_subdev *sd);
+
+int sensor_write(struct v4l2_subdev *sd, u16 reg, u8 val);
+
+int sensor_write16(struct v4l2_subdev *sd, u16 reg, u16 val);
+
+int sensor_read(struct v4l2_subdev *sd, u16 reg, u8 *val);
+
+int sensor_write_array(struct v4l2_subdev *sd,
+ struct regval_list *regs,
+ int array_size);
+
+#endif //__OV16825_CAMERA__H__
diff --git a/drivers/media/video/ov16825/ov16825_confs.h b/drivers/media/video/ov16825/ov16825_confs.h
new file mode 100644
index 00000000000000..7d16e4423ba21b
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_confs.h
@@ -0,0 +1,1283 @@
+#ifndef __OV16825_CONFS__H__
+#define __OV16825_CONFS__H__
+
+#include "ov16825_common.h"
+
+static struct regval_list ov16825_10bit_fullres_config[] = {
+ /* PLL control */
+ {0x0300, 0x02},
+ {0x0302, 0x74},
+ {0x0305, 0x05},
+ {0x0306, 0x00},
+ {0x030b, 0x03},
+ {0x030c, 0x13},
+ {0x030e, 0x00},
+ {0x030f, 0x02},
+ {0x0313, 0x02},
+ {0x0314, 0x12},
+ {0x031f, 0x00},
+ {0x3018, 0x72},
+ {0x3022, 0x01},
+ {0x3607, 0x2b},
+ {0x360b, 0x82},
+
+ /* System control */
+ {0x3031, 0x0a},
+ {0x3032, 0x80},
+
+ /* Analog control */
+ {0x3601, 0xf8},
+ {0x3602, 0x00},
+ {0x3603, 0x00},
+ {0x3604, 0x00},
+ {0x3605, 0x50},
+ {0x3606, 0x00},
+ {0x3608, 0x16},
+ {0x3609, 0x00},
+ {0x360a, 0x00},
+ {0x360c, 0x1a},
+ {0x360d, 0x00},
+ {0x360e, 0x99},
+ {0x360f, 0x75},
+ {0x3610, 0x69},
+ {0x3611, 0x59},
+ {0x3612, 0x40},
+ {0x3613, 0x89},
+ {0x3614, 0x77},
+ {0x3615, 0x64},
+ {0x3616, 0x30},
+ {0x3617, 0x00},
+ {0x3618, 0x20},
+ {0x3619, 0x00},
+ {0x361a, 0x10},
+ {0x361c, 0x10},
+ {0x361d, 0x00},
+ {0x361e, 0x00},
+ {0x3631, 0x60},
+ {0x3638, 0x00},
+ {0x3640, 0x15},
+ {0x3641, 0x54},
+ {0x3642, 0x63},
+ {0x3643, 0x32},
+ {0x3644, 0x03},
+ {0x3645, 0x04},
+ {0x3646, 0x85},
+ {0x364a, 0x07},
+
+ /* Sensor timing control */
+ {0x3700, 0x60},
+ {0x3701, 0x10},
+ {0x3702, 0x22},
+ {0x3703, 0x40},
+ {0x3704, 0x10},
+ {0x3705, 0x01},
+ {0x3706, 0x34},
+ {0x3707, 0x08},
+ {0x3708, 0x40},
+ {0x3709, 0x78},
+ {0x370a, 0x03},
+ {0x370a, 0x03},
+ {0x370b, 0xe2},
+ {0x370c, 0x06},
+ {0x370e, 0x40},
+ {0x370f, 0x0a},
+ {0x3710, 0x30},
+ {0x3711, 0x40},
+ {0x3714, 0x31},
+ {0x3718, 0x75},
+ {0x3719, 0x25},
+ {0x371a, 0x55},
+ {0x371b, 0x05},
+ {0x371c, 0x55},
+ {0x371d, 0x05},
+ {0x371e, 0x11},
+ {0x371f, 0x2d},
+ {0x3720, 0x15},
+ {0x3721, 0x30},
+ {0x3722, 0x15},
+ {0x3723, 0x30},
+ {0x3724, 0x08},
+ {0x3725, 0x08},
+ {0x3726, 0x04},
+ {0x3727, 0x04},
+ {0x3728, 0x04},
+ {0x3729, 0x04},
+ {0x372a, 0x29},
+ {0x372b, 0xc9},
+ {0x372c, 0xa9},
+ {0x372d, 0xb9},
+ {0x372e, 0x95},
+ {0x372f, 0x55},
+ {0x3730, 0x55},
+ {0x3731, 0x55},
+ {0x3732, 0x05},
+ {0x3733, 0x80},
+ {0x3734, 0x90},
+ {0x3739, 0x05},
+ {0x373a, 0x40},
+ {0x373b, 0x18},
+ {0x373c, 0x38},
+ {0x373e, 0x15},
+ {0x373f, 0x80},
+ {0x3760, 0x00},
+ {0x3761, 0x30},
+ {0x3762, 0x00},
+ {0x3763, 0xc0},
+ {0x3764, 0x03},
+ {0x3765, 0x00},
+
+ /* System timing control */
+ {0x3800, 0x00},
+ {0x3801, 0x20},
+ {0x3802, 0x00},
+ {0x3803, 0x0e},
+ {0x3804, 0x12},
+ {0x3805, 0x3f},
+ {0x3806, 0x0d},
+ {0x3807, 0x93},
+ {0x3808, 0x12},
+ {0x3809, 0x00},
+ {0x380a, 0x0d},
+ {0x380b, 0x80},
+ {0x380c, 0x0b},
+ {0x380d, 0x76},
+ {0x380e, 0x0d},
+ {0x380f, 0xa2},
+ {0x3811, 0x0f},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3820, 0x00},
+ {0x3821, 0x06},
+ {0x3823, 0x08},
+ {0x3827, 0x02},
+ {0x3828, 0x00},
+ {0x3829, 0x00},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3832, 0x00},
+ {0x3833, 0x00},
+ {0x3834, 0x00},
+
+ /* OTP control */
+ {0x3d85, 0x17},
+ {0x3d8c, 0x70},
+ {0x3d8d, 0xa0},
+
+ /* ??? */
+ {0x3f00, 0x02},
+
+ /* BLC control */
+ {0x4000, 0xe1},
+ {0x4001, 0x83},
+
+ /* ??? */
+ {0x400e, 0x00},
+ {0x4011, 0x00},
+ {0x4012, 0x00},
+
+ /* Frame control */
+ {0x4200, 0x08},
+
+ /* Clipping */
+ {0x4302, 0x7f},
+ {0x4303, 0xff},
+ {0x4304, 0x00},
+ {0x4305, 0x00},
+
+ /* ??? */
+ {0x4501, 0x30},
+
+ /* VFIFO control */
+ {0x4603, 0x60},
+
+ /* LVDS control */
+ {0x4b00, 0x22},
+
+ /* ISP frame control */
+ {0x4903, 0x00},
+
+ /* DSP general control */
+ {0x5000, 0x7f},
+ {0x5001, 0x01},
+ {0x5004, 0x00},
+
+ /* ??? */
+ {0x5013, 0x20},
+ {0x5051, 0x00},
+
+ /* OTP cluster correction control */
+ {0x5500, 0x01},
+ {0x5501, 0x00},
+ {0x5502, 0x07},
+ {0x5503, 0xff},
+ {0x5505, 0x6c},
+ {0x5509, 0x02},
+
+ /* Defective pixel cancellation */
+ {0x5780, 0xfc},
+ {0x5781, 0xff},
+ {0x5787, 0x40},
+ {0x5788, 0x08},
+ {0x578a, 0x02},
+ {0x578b, 0x01},
+ {0x578c, 0x01},
+ {0x578e, 0x02},
+ {0x578f, 0x01},
+ {0x5790, 0x01},
+ {0x5792, 0x00},
+
+ /* Windowing and scaling control */
+ {0x5980, 0x00},
+ {0x5981, 0x21},
+ {0x5982, 0x00},
+ {0x5983, 0x00},
+ {0x5984, 0x00},
+ {0x5985, 0x00},
+ {0x5986, 0x00},
+ {0x5987, 0x00},
+ {0x5988, 0x00},
+
+ /* ??? */
+ {0x3201, 0x15},
+ {0x3202, 0x2a},
+ {0x3f00, 0x02},
+ {0x3f02, 0x00},
+ {0x3f04, 0x00},
+ {0x3f05, 0x00},
+ {0x3f08, 0x40},
+
+ {0x4002, 0x04},
+ {0x4003, 0x08},
+ {0x4306, 0x00},
+
+ /* PCLK Period */
+ {0x4837, 0x3f},
+
+ /* EXPO */
+ {0x3501, 0xA9},
+ {0x3502, 0xe0},
+
+ /* Stream On */
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov16825_10bit_12mp_config[] = {
+ /* PLL control */
+ {0x0300, 0x02},
+ {0x0302, 0x74},
+ {0x0305, 0x05},
+ {0x0306, 0x00},
+ {0x030b, 0x03},
+ {0x030c, 0x13},
+ {0x030e, 0x00},
+ {0x030f, 0x02},
+ {0x0313, 0x02},
+ {0x0314, 0x12},
+ {0x031f, 0x00},
+ {0x3018, 0x72},
+ {0x3022, 0x01},
+ {0x3607, 0x2b},
+ {0x360b, 0x82},
+
+ /* System control */
+ {0x3031, 0x0a},
+ {0x3032, 0x80},
+
+ /* Analog control */
+ {0x3601, 0xf8},
+ {0x3602, 0x00},
+ {0x3603, 0x00},
+ {0x3604, 0x00},
+ {0x3605, 0x50},
+ {0x3606, 0x00},
+ {0x3608, 0x16},
+ {0x3609, 0x00},
+ {0x360a, 0x00},
+ {0x360c, 0x1a},
+ {0x360d, 0x00},
+ {0x360e, 0x99},
+ {0x360f, 0x75},
+ {0x3610, 0x69},
+ {0x3611, 0x59},
+ {0x3612, 0x40},
+ {0x3613, 0x89},
+ {0x3614, 0x77},
+ {0x3615, 0x64},
+ {0x3616, 0x30},
+ {0x3617, 0x00},
+ {0x3618, 0x20},
+ {0x3619, 0x00},
+ {0x361a, 0x10},
+ {0x361c, 0x10},
+ {0x361d, 0x00},
+ {0x361e, 0x00},
+ {0x3631, 0x60},
+ {0x3638, 0x00},
+ {0x3640, 0x15},
+ {0x3641, 0x54},
+ {0x3642, 0x63},
+ {0x3643, 0x32},
+ {0x3644, 0x03},
+ {0x3645, 0x04},
+ {0x3646, 0x85},
+ {0x364a, 0x07},
+
+ /* Sensor timing control */
+ {0x3700, 0x60},
+ {0x3701, 0x10},
+ {0x3702, 0x22},
+ {0x3703, 0x40},
+ {0x3704, 0x10},
+ {0x3705, 0x01},
+ {0x3706, 0x34},
+ {0x3707, 0x08},
+ {0x3708, 0x40},
+ {0x3709, 0x78},
+ {0x370a, 0x03},
+ {0x370a, 0x03},
+ {0x370b, 0xe2},
+ {0x370c, 0x06},
+ {0x370e, 0x40},
+ {0x370f, 0x0a},
+ {0x3710, 0x30},
+ {0x3711, 0x40},
+ {0x3714, 0x31},
+ {0x3718, 0x75},
+ {0x3719, 0x25},
+ {0x371a, 0x55},
+ {0x371b, 0x05},
+ {0x371c, 0x55},
+ {0x371d, 0x05},
+ {0x371e, 0x11},
+ {0x371f, 0x2d},
+ {0x3720, 0x15},
+ {0x3721, 0x30},
+ {0x3722, 0x15},
+ {0x3723, 0x30},
+ {0x3724, 0x08},
+ {0x3725, 0x08},
+ {0x3726, 0x04},
+ {0x3727, 0x04},
+ {0x3728, 0x04},
+ {0x3729, 0x04},
+ {0x372a, 0x29},
+ {0x372b, 0xc9},
+ {0x372c, 0xa9},
+ {0x372d, 0xb9},
+ {0x372e, 0x95},
+ {0x372f, 0x55},
+ {0x3730, 0x55},
+ {0x3731, 0x55},
+ {0x3732, 0x05},
+ {0x3733, 0x80},
+ {0x3734, 0x90},
+ {0x3739, 0x05},
+ {0x373a, 0x40},
+ {0x373b, 0x18},
+ {0x373c, 0x38},
+ {0x373e, 0x15},
+ {0x373f, 0x80},
+ {0x3760, 0x00},
+ {0x3761, 0x30},
+ {0x3762, 0x00},
+ {0x3763, 0xc0},
+ {0x3764, 0x03},
+ {0x3765, 0x00},
+
+ /* System timing control */
+ {0x3800, 0x00},
+ {0x3801, 0x20},
+ {0x3802, 0x00},
+ {0x3803, 0x0e},
+ {0x3804, 0x12},
+ {0x3805, 0x3f},
+ {0x3806, 0x0d},
+ {0x3807, 0x93},
+ {0x3808, 0x0f},
+ {0x3809, 0xa0},
+ {0x380a, 0x0b},
+ {0x380b, 0xb8},
+ {0x380c, 0x09},
+ {0x380d, 0x00},
+ {0x380e, 0x0d},
+ {0x380f, 0xa2},
+ {0x3811, 0x0f},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3820, 0x00},
+ {0x3821, 0x06},
+ {0x3823, 0x08},
+ {0x3827, 0x02},
+ {0x3828, 0x00},
+ {0x3829, 0x00},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3832, 0x00},
+ {0x3833, 0x00},
+ {0x3834, 0x00},
+
+ /* OTP control */
+ {0x3d85, 0x17},
+ {0x3d8c, 0x70},
+ {0x3d8d, 0xa0},
+
+ /* ??? */
+ {0x3f00, 0x02},
+
+ /* BLC control */
+ {0x4000, 0xe1},
+ {0x4001, 0x83},
+
+ /* ??? */
+ {0x400e, 0x00},
+ {0x4011, 0x00},
+ {0x4012, 0x00},
+
+ /* Frame control */
+ {0x4200, 0x08},
+
+ /* Clipping */
+ {0x4302, 0x7f},
+ {0x4303, 0xff},
+ {0x4304, 0x00},
+ {0x4305, 0x00},
+
+ /* ??? */
+ {0x4501, 0x30},
+
+ /* VFIFO control */
+ {0x4603, 0x60},
+
+ /* LVDS control */
+ {0x4b00, 0x22},
+
+ /* ISP frame control */
+ {0x4903, 0x00},
+
+ /* DSP general control */
+ {0x5000, 0xff},
+ {0x5001, 0x01},
+ {0x5004, 0x00},
+
+ /* ??? */
+ {0x5013, 0x20},
+ {0x5051, 0x00},
+
+ /* OTP cluster correction control */
+ {0x5500, 0x01},
+ {0x5501, 0x00},
+ {0x5502, 0x07},
+ {0x5503, 0xff},
+ {0x5505, 0x6c},
+ {0x5509, 0x02},
+
+ /* Defective pixel cancellation */
+ {0x5780, 0xfc},
+ {0x5781, 0xff},
+ {0x5787, 0x40},
+ {0x5788, 0x08},
+ {0x578a, 0x02},
+ {0x578b, 0x01},
+ {0x578c, 0x01},
+ {0x578e, 0x02},
+ {0x578f, 0x01},
+ {0x5790, 0x01},
+ {0x5792, 0x00},
+
+ /* Windowing and scaling control */
+ {0x5980, 0x00},
+ {0x5981, 0x21},
+ {0x5982, 0x00},
+ {0x5983, 0x00},
+ {0x5984, 0x00},
+ {0x5985, 0x00},
+ {0x5986, 0x00},
+ {0x5987, 0x00},
+ {0x5988, 0x00},
+
+ /* ??? */
+ {0x3201, 0x15},
+ {0x3202, 0x2a},
+ {0x3f00, 0x02},
+ {0x3f02, 0x00},
+ {0x3f04, 0x00},
+ {0x3f05, 0x00},
+ {0x3f08, 0x40},
+
+ {0x4002, 0x04},
+ {0x4003, 0x08},
+ {0x4306, 0x00},
+
+ /* PCLK Period */
+ {0x4837, 0x3f},
+
+ /* EXPO */
+ {0x3501, 0xA9},
+ {0x3502, 0xe0},
+
+ /* Stream On */
+ {0x0100, 0x01},
+};
+
+
+static struct regval_list ov16825_10bit_bining_config[] = {
+ /* PLL control */
+ {0x0300, 0x02},
+ {0x0302, 0x74},
+ {0x0305, 0x05},
+ {0x0306, 0x00},
+ {0x030b, 0x03},
+ {0x030c, 0x13},
+ {0x030e, 0x01},
+ {0x030f, 0x02},
+ {0x0313, 0x02},
+ {0x0314, 0x12},
+ {0x031f, 0x00},
+ {0x3018, 0x72},
+ {0x3022, 0x01},
+ {0x3607, 0x2b},
+ {0x360b, 0x82},
+
+ /* System control */
+ {0x3031, 0x0a},
+ {0x3032, 0x80},
+
+ /* Analog control */
+ {0x3601, 0xf8},
+ {0x3602, 0x00},
+ {0x3603, 0x00},
+ {0x3604, 0x00},
+ {0x3605, 0x50},
+ {0x3606, 0x00},
+ {0x3608, 0x16},
+ {0x3609, 0x00},
+ {0x360a, 0x00},
+ {0x360c, 0x1a},
+ {0x360d, 0x00},
+ {0x360e, 0x99},
+ {0x360f, 0x75},
+ {0x3610, 0x69},
+ {0x3611, 0x59},
+ {0x3612, 0x40},
+ {0x3613, 0x89},
+ {0x3614, 0x77},
+ {0x3615, 0x64},
+ {0x3616, 0x30},
+ {0x3617, 0x00},
+ {0x3618, 0x20},
+ {0x3619, 0x00},
+ {0x361a, 0x10},
+ {0x361c, 0x10},
+ {0x361d, 0x00},
+ {0x361e, 0x00},
+ {0x3631, 0x60},
+ {0x3638, 0x00},
+ {0x3640, 0x15},
+ {0x3641, 0x54},
+ {0x3642, 0x63},
+ {0x3643, 0x32},
+ {0x3644, 0x03},
+ {0x3645, 0x04},
+ {0x3646, 0x85},
+ {0x364a, 0x07},
+
+ /* Sensor timing control */
+ {0x3700, 0x30},
+ {0x3701, 0x08},
+ {0x3702, 0x11},
+ {0x3703, 0x20},
+ {0x3704, 0x08},
+ {0x3705, 0x00},
+ {0x3706, 0x9a},
+ {0x3708, 0x20},
+ {0x3709, 0x3c},
+ {0x370a, 0x01},
+ {0x370b, 0x6f},
+ {0x370c, 0x03},
+ {0x370e, 0x20},
+ {0x370f, 0x05},
+ {0x3710, 0x20},
+ {0x3711, 0x20},
+ {0x3714, 0x31},
+ {0x3719, 0x13},
+ {0x371b, 0x03},
+ {0x371d, 0x03},
+ {0x371e, 0x09},
+ {0x371f, 0x17},
+ {0x3720, 0x0b},
+ {0x3721, 0x18},
+ {0x3722, 0x0b},
+ {0x3723, 0x18},
+ {0x3724, 0x04},
+ {0x3725, 0x04},
+ {0x3726, 0x02},
+ {0x3727, 0x02},
+ {0x3728, 0x02},
+ {0x3729, 0x02},
+ {0x372a, 0x25},
+ {0x372b, 0x65},
+ {0x372c, 0x55},
+ {0x372d, 0x65},
+ {0x372e, 0x53},
+ {0x372f, 0x33},
+ {0x3730, 0x33},
+ {0x3731, 0x33},
+ {0x3732, 0x03},
+ {0x3734, 0x90},
+ {0x3739, 0x03},
+ {0x373a, 0x20},
+ {0x373b, 0x0c},
+ {0x373c, 0x1c},
+ {0x373e, 0x0b},
+ {0x373f, 0x80},
+
+ /* System timing control */
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x12},
+ {0x3805, 0x3f},
+ {0x3806, 0x0d},
+ {0x3807, 0x97},
+ {0x3808, 0x09},
+ {0x3809, 0x00},
+ {0x380a, 0x06},
+ {0x380b, 0xc0},
+ {0x380c, 0x04},
+ {0x380d, 0x76},
+ {0x380e, 0x0a},
+ {0x380f, 0xa2},
+ {0x3811, 0x17},
+ {0x3813, 0x02},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3820, 0x01},
+ {0x3821, 0x07},
+ {0x3823, 0x08},
+ {0x3827, 0x02},
+ {0x3828, 0x00},
+ {0x3829, 0x02},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3832, 0x00},
+ {0x3833, 0x00},
+ {0x3834, 0x00},
+
+ /* OTP control */
+ {0x3d85, 0x17},
+ {0x3d8c, 0x70},
+ {0x3d8d, 0xa0},
+
+ /* ??? */
+ {0x3f00, 0x02},
+
+ /* BLC control */
+ {0x4000, 0xe1},
+ {0x4001, 0x83},
+
+ /* ??? */
+ {0x400e, 0x00},
+ {0x4011, 0x00},
+ {0x4012, 0x00},
+
+ /* Frame control */
+ {0x4200, 0x08},
+
+ /* Clipping */
+ {0x4302, 0x7f},
+ {0x4303, 0xff},
+ {0x4304, 0x00},
+ {0x4305, 0x00},
+
+ /* ??? */
+ {0x4501, 0x30},
+
+ /* VFIFO control */
+ {0x4603, 0x60},
+
+ /* LVDS control */
+ {0x4b00, 0x22},
+
+ /* ISP frame control */
+ {0x4903, 0x00},
+
+ /* DSP general control */
+ {0x5000, 0x7f},
+ {0x5001, 0x01},
+ {0x5004, 0x00},
+
+ /* ??? */
+ {0x5013, 0x20},
+ {0x5051, 0x00},
+
+ /* OTP cluster correction control */
+ {0x5500, 0x01},
+ {0x5501, 0x00},
+ {0x5502, 0x07},
+ {0x5503, 0xff},
+ {0x5505, 0x6c},
+ {0x5509, 0x02},
+
+ /* Defective pixel cancellation */
+ {0x5780, 0xfc},
+ {0x5781, 0xff},
+ {0x5787, 0x40},
+ {0x5788, 0x08},
+ {0x578a, 0x02},
+ {0x578b, 0x01},
+ {0x578c, 0x01},
+ {0x578e, 0x02},
+ {0x578f, 0x01},
+ {0x5790, 0x01},
+ {0x5792, 0x00},
+
+ /* Windowing and scaling control */
+ {0x5980, 0x00},
+ {0x5981, 0x21},
+ {0x5982, 0x00},
+ {0x5983, 0x00},
+ {0x5984, 0x00},
+ {0x5985, 0x00},
+ {0x5986, 0x00},
+ {0x5987, 0x00},
+ {0x5988, 0x00},
+
+ /* ??? */
+ {0x3201, 0x15},
+ {0x3202, 0x2a},
+ {0x3f00, 0x02},
+ {0x3f02, 0x00},
+ {0x3f04, 0x00},
+ {0x3f05, 0x00},
+ {0x3f08, 0x40},
+
+ {0x4002, 0x04},
+ {0x4003, 0x08},
+ {0x4306, 0x00},
+
+ /* PCLK Period */
+ {0x4837, 0x3f},
+
+ /* EXPO */
+ {0x3501, 0xA9},
+ {0x3502, 0xe0},
+
+ /* Stream On */
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov16825_10bit_1080p_config[] = {
+ /* PLL control */
+ {0x0300, 0x02},
+ {0x0302, 0x74},
+ {0x0305, 0x05},
+ {0x0306, 0x00},
+ {0x030b, 0x03},
+ {0x030c, 0x13},
+ {0x030e, 0x01},
+ {0x030f, 0x02},
+ {0x0313, 0x02},
+ {0x0314, 0x12},
+ {0x031f, 0x00},
+ {0x3018, 0x72},
+ {0x3022, 0x01},
+ {0x3607, 0x2b},
+ {0x360b, 0x82},
+
+ /* System control */
+ {0x3031, 0x0a},
+ {0x3032, 0x80},
+
+ /* Analog control */
+ {0x3601, 0xf8},
+ {0x3602, 0x00},
+ {0x3603, 0x00},
+ {0x3604, 0x00},
+ {0x3605, 0x50},
+ {0x3606, 0x00},
+ {0x3608, 0x16},
+ {0x3609, 0x00},
+ {0x360a, 0x00},
+ {0x360c, 0x1a},
+ {0x360d, 0x00},
+ {0x360e, 0x99},
+ {0x360f, 0x75},
+ {0x3610, 0x69},
+ {0x3611, 0x59},
+ {0x3612, 0x40},
+ {0x3613, 0x89},
+ {0x3614, 0x77},
+ {0x3615, 0x64},
+ {0x3616, 0x30},
+ {0x3617, 0x00},
+ {0x3618, 0x20},
+ {0x3619, 0x00},
+ {0x361a, 0x10},
+ {0x361c, 0x10},
+ {0x361d, 0x00},
+ {0x361e, 0x00},
+ {0x3631, 0x60},
+ {0x3638, 0x00},
+ {0x3640, 0x15},
+ {0x3641, 0x54},
+ {0x3642, 0x63},
+ {0x3643, 0x32},
+ {0x3644, 0x03},
+ {0x3645, 0x04},
+ {0x3646, 0x85},
+ {0x364a, 0x07},
+
+ /* Sensor timing control */
+ {0x3700, 0x30},
+ {0x3701, 0x08},
+ {0x3702, 0x11},
+ {0x3703, 0x20},
+ {0x3704, 0x08},
+ {0x3705, 0x00},
+ {0x3706, 0x9a},
+ {0x3708, 0x20},
+ {0x3709, 0x3c},
+ {0x370a, 0x01},
+ {0x370b, 0x6f},
+ {0x370c, 0x03},
+ {0x370e, 0x20},
+ {0x370f, 0x05},
+ {0x3710, 0x20},
+ {0x3711, 0x20},
+ {0x3714, 0x31},
+ {0x3719, 0x13},
+ {0x371b, 0x03},
+ {0x371d, 0x03},
+ {0x371e, 0x09},
+ {0x371f, 0x17},
+ {0x3720, 0x0b},
+ {0x3721, 0x18},
+ {0x3722, 0x0b},
+ {0x3723, 0x18},
+ {0x3724, 0x04},
+ {0x3725, 0x04},
+ {0x3726, 0x02},
+ {0x3727, 0x02},
+ {0x3728, 0x02},
+ {0x3729, 0x02},
+ {0x372a, 0x25},
+ {0x372b, 0x65},
+ {0x372c, 0x55},
+ {0x372d, 0x65},
+ {0x372e, 0x53},
+ {0x372f, 0x33},
+ {0x3730, 0x33},
+ {0x3731, 0x33},
+ {0x3732, 0x03},
+ {0x3734, 0x90},
+ {0x3739, 0x03},
+ {0x373a, 0x20},
+ {0x373b, 0x0c},
+ {0x373c, 0x1c},
+ {0x373e, 0x0b},
+ {0x373f, 0x80},
+
+ /* System timing control */
+ {0x3800, 0x01},
+ {0x3801, 0x80},
+ {0x3802, 0x02},
+ {0x3803, 0x94},
+ {0x3804, 0x10},
+ {0x3805, 0xbf},
+ {0x3806, 0x0b},
+ {0x3807, 0x0f},
+ {0x3808, 0x07},
+ {0x3809, 0x80},
+ {0x380a, 0x04},
+ {0x380b, 0x38},
+ {0x380c, 0x03},
+ {0x380d, 0xf7},
+ {0x380e, 0x04},
+ {0x380f, 0x50},
+ {0x3811, 0x17},
+ {0x3813, 0x02},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3820, 0x01},
+ {0x3821, 0x07},
+ {0x3823, 0x08},
+ {0x3827, 0x02},
+ {0x3828, 0x00},
+ {0x3829, 0x02},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3832, 0x00},
+ {0x3833, 0x00},
+ {0x3834, 0x00},
+
+ /* OTP control */
+ {0x3d85, 0x17},
+ {0x3d8c, 0x70},
+ {0x3d8d, 0xa0},
+
+ /* ??? */
+ {0x3f00, 0x02},
+
+ /* BLC control */
+ {0x4000, 0xe1},
+ {0x4001, 0x83},
+
+ /* ??? */
+ {0x400e, 0x00},
+ {0x4011, 0x00},
+ {0x4012, 0x00},
+
+ /* Frame control */
+ {0x4200, 0x08},
+
+ /* Clipping */
+ {0x4302, 0x7f},
+ {0x4303, 0xff},
+ {0x4304, 0x00},
+ {0x4305, 0x00},
+
+ /* ??? */
+ {0x4501, 0x30},
+
+ /* VFIFO control */
+ {0x4603, 0x60},
+
+ /* LVDS control */
+ {0x4b00, 0x22},
+
+ /* ISP frame control */
+ {0x4903, 0x00},
+
+ /* DSP general control */
+ {0x5000, 0x7f},
+ {0x5001, 0x01},
+ {0x5004, 0x00},
+
+ /* ??? */
+ {0x5013, 0x20},
+ {0x5051, 0x00},
+
+ /* OTP cluster correction control */
+ {0x5500, 0x01},
+ {0x5501, 0x00},
+ {0x5502, 0x07},
+ {0x5503, 0xff},
+ {0x5505, 0x6c},
+ {0x5509, 0x02},
+
+ /* Defective pixel cancellation */
+ {0x5780, 0xfc},
+ {0x5781, 0xff},
+ {0x5787, 0x40},
+ {0x5788, 0x08},
+ {0x578a, 0x02},
+ {0x578b, 0x01},
+ {0x578c, 0x01},
+ {0x578e, 0x02},
+ {0x578f, 0x01},
+ {0x5790, 0x01},
+ {0x5792, 0x00},
+
+ /* Windowing and scaling control */
+ {0x5980, 0x00},
+ {0x5981, 0x21},
+ {0x5982, 0x00},
+ {0x5983, 0x00},
+ {0x5984, 0x00},
+ {0x5985, 0x00},
+ {0x5986, 0x00},
+ {0x5987, 0x00},
+ {0x5988, 0x00},
+
+ /* ??? */
+ {0x3201, 0x15},
+ {0x3202, 0x2a},
+ {0x3f00, 0x02},
+ {0x3f02, 0x00},
+ {0x3f04, 0x00},
+ {0x3f05, 0x00},
+ {0x3f08, 0x40},
+
+ {0x4002, 0x04},
+ {0x4003, 0x08},
+ {0x4306, 0x00},
+
+ /* PCLK Period */
+ {0x4837, 0x3f},
+
+ /* EXPO */
+ {0x3501, 0x40},
+ {0x3502, 0xe0},
+
+ /* Stream On */
+ {0x0100, 0x01},
+};
+
+static struct regval_list ov16825_10bit_1200p_config[] = {
+ /* PLL control */
+ {0x0300, 0x02},
+ {0x0302, 0x74},
+ {0x0305, 0x05},
+ {0x0306, 0x00},
+ {0x030b, 0x03},
+ {0x030c, 0x13},
+ {0x030e, 0x01},
+ {0x030f, 0x02},
+ {0x0313, 0x02},
+ {0x0314, 0x12},
+ {0x031f, 0x00},
+ {0x3018, 0x72},
+ {0x3022, 0x01},
+ {0x3607, 0x2b},
+ {0x360b, 0x82},
+
+ /* System control */
+ {0x3031, 0x0a},
+ {0x3032, 0x80},
+
+ /* Analog control */
+ {0x3601, 0xf8},
+ {0x3602, 0x00},
+ {0x3603, 0x00},
+ {0x3604, 0x00},
+ {0x3605, 0x50},
+ {0x3606, 0x00},
+ {0x3608, 0x16},
+ {0x3609, 0x00},
+ {0x360a, 0x00},
+ {0x360c, 0x1a},
+ {0x360d, 0x00},
+ {0x360e, 0x99},
+ {0x360f, 0x75},
+ {0x3610, 0x69},
+ {0x3611, 0x59},
+ {0x3612, 0x40},
+ {0x3613, 0x89},
+ {0x3614, 0x77},
+ {0x3615, 0x64},
+ {0x3616, 0x30},
+ {0x3617, 0x00},
+ {0x3618, 0x20},
+ {0x3619, 0x00},
+ {0x361a, 0x10},
+ {0x361c, 0x10},
+ {0x361d, 0x00},
+ {0x361e, 0x00},
+ {0x3631, 0x60},
+ {0x3638, 0x00},
+ {0x3640, 0x15},
+ {0x3641, 0x54},
+ {0x3642, 0x63},
+ {0x3643, 0x32},
+ {0x3644, 0x03},
+ {0x3645, 0x04},
+ {0x3646, 0x85},
+ {0x364a, 0x07},
+
+ /* Sensor timing control */
+ {0x3700, 0x30},
+ {0x3701, 0x08},
+ {0x3702, 0x11},
+ {0x3703, 0x20},
+ {0x3704, 0x08},
+ {0x3705, 0x00},
+ {0x3706, 0x9a},
+ {0x3708, 0x20},
+ {0x3709, 0x3c},
+ {0x370a, 0x01},
+ {0x370b, 0x6f},
+ {0x370c, 0x03},
+ {0x370e, 0x20},
+ {0x370f, 0x05},
+ {0x3710, 0x20},
+ {0x3711, 0x20},
+ {0x3714, 0x31},
+ {0x3719, 0x13},
+ {0x371b, 0x03},
+ {0x371d, 0x03},
+ {0x371e, 0x09},
+ {0x371f, 0x17},
+ {0x3720, 0x0b},
+ {0x3721, 0x18},
+ {0x3722, 0x0b},
+ {0x3723, 0x18},
+ {0x3724, 0x04},
+ {0x3725, 0x04},
+ {0x3726, 0x02},
+ {0x3727, 0x02},
+ {0x3728, 0x02},
+ {0x3729, 0x02},
+ {0x372a, 0x25},
+ {0x372b, 0x65},
+ {0x372c, 0x55},
+ {0x372d, 0x65},
+ {0x372e, 0x53},
+ {0x372f, 0x33},
+ {0x3730, 0x33},
+ {0x3731, 0x33},
+ {0x3732, 0x03},
+ {0x3734, 0x90},
+ {0x3739, 0x03},
+ {0x373a, 0x20},
+ {0x373b, 0x0c},
+ {0x373c, 0x1c},
+ {0x373e, 0x0b},
+ {0x373f, 0x80},
+
+ /* System timing control */
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x12},
+ {0x3805, 0x3f},
+ {0x3806, 0x0d},
+ {0x3807, 0x97},
+ {0x3808, 0x06},
+ {0x3809, 0x40},
+ {0x380a, 0x04},
+ {0x380b, 0xb0},
+ {0x380c, 0x03},
+ {0x380d, 0xf7},
+ {0x380e, 0x06},
+ {0x380f, 0xd0},
+ {0x3811, 0x17},
+ {0x3813, 0x02},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3820, 0x01},
+ {0x3821, 0x07},
+ {0x3823, 0x08},
+ {0x3827, 0x02},
+ {0x3828, 0x00},
+ {0x3829, 0x02},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3832, 0x00},
+ {0x3833, 0x00},
+ {0x3834, 0x00},
+
+ /* OTP control */
+ {0x3d85, 0x17},
+ {0x3d8c, 0x70},
+ {0x3d8d, 0xa0},
+
+ /* ??? */
+ {0x3f00, 0x02},
+
+ /* BLC control */
+ {0x4000, 0xe1},
+ {0x4001, 0x83},
+
+ /* ??? */
+ {0x400e, 0x00},
+ {0x4011, 0x00},
+ {0x4012, 0x00},
+
+ /* Frame control */
+ {0x4200, 0x08},
+
+ /* Clipping */
+ {0x4302, 0x7f},
+ {0x4303, 0xff},
+ {0x4304, 0x00},
+ {0x4305, 0x00},
+
+ /* ??? */
+ {0x4501, 0x30},
+
+ /* VFIFO control */
+ {0x4603, 0x60},
+
+ /* LVDS control */
+ {0x4b00, 0x22},
+
+ /* ISP frame control */
+ {0x4903, 0x00},
+
+ /* DSP general control */
+ {0x5000, 0xff},
+ {0x5001, 0x01},
+ {0x5004, 0x00},
+
+ /* ??? */
+ {0x5013, 0x20},
+ {0x5051, 0x00},
+
+ /* OTP cluster correction control */
+ {0x5500, 0x01},
+ {0x5501, 0x00},
+ {0x5502, 0x07},
+ {0x5503, 0xff},
+ {0x5505, 0x6c},
+ {0x5509, 0x02},
+
+ /* Defective pixel cancellation */
+ {0x5780, 0xfc},
+ {0x5781, 0xff},
+ {0x5787, 0x40},
+ {0x5788, 0x08},
+ {0x578a, 0x02},
+ {0x578b, 0x01},
+ {0x578c, 0x01},
+ {0x578e, 0x02},
+ {0x578f, 0x01},
+ {0x5790, 0x01},
+ {0x5792, 0x00},
+
+ /* Windowing and scaling control */
+ {0x5980, 0x00},
+ {0x5981, 0x21},
+ {0x5982, 0x00},
+ {0x5983, 0x00},
+ {0x5984, 0x00},
+ {0x5985, 0x00},
+ {0x5986, 0x00},
+ {0x5987, 0x00},
+ {0x5988, 0x00},
+
+ /* ??? */
+ {0x3201, 0x15},
+ {0x3202, 0x2a},
+ {0x3f00, 0x02},
+ {0x3f02, 0x00},
+ {0x3f04, 0x00},
+ {0x3f05, 0x00},
+ {0x3f08, 0x40},
+
+ {0x4002, 0x04},
+ {0x4003, 0x08},
+ {0x4306, 0x00},
+
+ /* PCLK Period */
+ {0x4837, 0x3f},
+
+ /* EXPO */
+ {0x3501, 0x40},
+ {0x3502, 0xe0},
+
+ /* Stream On */
+ {0x0100, 0x01},
+};
+
+static struct ov16825_config ov16825_configs[] = {
+#define MAKE_CONFIG(w, h, l, c) { .width = (w), .height = (h), \
+ .line_duration_us = (l), \
+ .regs = (c), .regs_len = ARRAY_SIZE(c), }
+ // The first config in this table will be used as default
+ MAKE_CONFIG(4608, 3456, 48, ov16825_10bit_fullres_config),
+ MAKE_CONFIG(4000, 3000, 37, ov16825_10bit_12mp_config),
+ MAKE_CONFIG(2304, 1728, 33, ov16825_10bit_bining_config),
+ MAKE_CONFIG(1920, 1080, 33, ov16825_10bit_1080p_config),
+ MAKE_CONFIG(1600, 1200, 33, ov16825_10bit_1200p_config),
+};
+
+#endif /*__OV16825_CONFS__H__*/
diff --git a/drivers/media/video/ov16825/ov16825_ctrl.c b/drivers/media/video/ov16825/ov16825_ctrl.c
new file mode 100644
index 00000000000000..f6f5f42ee3507a
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_ctrl.c
@@ -0,0 +1,1118 @@
+/*
+ * linux/drivers/parrot/video/ov16825_ctrl.c
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 20-Oct-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/i2c.h>
+#include <media/v4l2-ctrls.h>
+#include "ov16825_ctrl.h"
+
+#define OV16825_CUSTOM_CID(_n) (V4L2_CID_CAMERA_CLASS_BASE + 0x100 + (_n))
+
+#define V4L2_CID_OV16825_ISP_EN OV16825_CUSTOM_CID(0)
+#define V4L2_CID_OV16825_AVERAGE_EN OV16825_CUSTOM_CID(1)
+#define V4L2_CID_OV16825_WHITE_DPC_EN OV16825_CUSTOM_CID(2)
+#define V4L2_CID_OV16825_BLACK_DPC_EN OV16825_CUSTOM_CID(3)
+#define V4L2_CID_OV16825_WB_GAIN_EN OV16825_CUSTOM_CID(4)
+#define V4L2_CID_OV16825_OTP_CC_EN OV16825_CUSTOM_CID(5)
+#define V4L2_CID_OV16825_DBC_EN OV16825_CUSTOM_CID(6)
+#define V4L2_CID_OV16825_SCALE_EN OV16825_CUSTOM_CID(7)
+#define V4L2_CID_OV16825_BLC_EN OV16825_CUSTOM_CID(8)
+#define V4L2_CID_OV16825_AVERAGE_BEFORE OV16825_CUSTOM_CID(9)
+#define V4L2_CID_OV16825_DIGITAL_GAIN_EN OV16825_CUSTOM_CID(10)
+#define V4L2_CID_OV16825_RED_GAIN OV16825_CUSTOM_CID(11)
+#define V4L2_CID_OV16825_GREEN_GAIN OV16825_CUSTOM_CID(12)
+#define V4L2_CID_OV16825_BLUE_GAIN OV16825_CUSTOM_CID(13)
+
+
+
+static u8 const ov16825_test_pattern_registers[] = {
+ PATTERN_DISABLE,
+ PATTERN_BLACK,
+ PATTERN_COLOR_BAR,
+ PATTERN_COLOR_BAR_DARK_TOP_BOTTOM,
+ PATTERN_COLOR_BAR_DARK_LEFT_RIGHT,
+ PATTERN_COLOR_BAR_DARK_BOTTOM_TOP,
+ PATTERN_SQUARE_BW,
+ PATTERN_SQUARE_COLOR,
+ PATTERN_RANDOM,
+};
+
+static const char * const ov16825_test_pattern_menu[] = {
+ "Disabled",
+ "Black image",
+ "Color bar standard",
+ "Color bar top-bottom dark",
+ "Color bar left-right dark",
+ "Color bar bottom-top dark",
+ "Square Black-White",
+ "Square Color",
+ "Random",
+};
+
+static struct ov16825_gain {
+ u8 major;
+ u8 minor;
+ /* Gain multiplied by 256 */
+ u32 gain;
+} ov16825_gain_table[] = {
+#define GAIN(v) ((u32)(v * 256))
+ { 0x0, 0x80, GAIN(1.0000) },
+ { 0x0, 0x81, GAIN(1.0078) },
+ { 0x0, 0x82, GAIN(1.0156) },
+ { 0x0, 0x83, GAIN(1.0234) },
+ { 0x0, 0x84, GAIN(1.0313) },
+ { 0x0, 0x85, GAIN(1.0391) },
+ { 0x0, 0x86, GAIN(1.0469) },
+ { 0x0, 0x87, GAIN(1.0547) },
+ { 0x0, 0x88, GAIN(1.0625) },
+ { 0x0, 0x89, GAIN(1.0703) },
+ { 0x0, 0x8A, GAIN(1.0781) },
+ { 0x0, 0x8B, GAIN(1.0859) },
+ { 0x0, 0x8C, GAIN(1.0938) },
+ { 0x0, 0x8D, GAIN(1.1016) },
+ { 0x0, 0x8E, GAIN(1.1094) },
+ { 0x0, 0x8F, GAIN(1.1172) },
+ { 0x0, 0x90, GAIN(1.1250) },
+ { 0x0, 0x91, GAIN(1.1328) },
+ { 0x0, 0x92, GAIN(1.1406) },
+ { 0x0, 0x93, GAIN(1.1484) },
+ { 0x0, 0x94, GAIN(1.1563) },
+ { 0x0, 0x95, GAIN(1.1641) },
+ { 0x0, 0x96, GAIN(1.1719) },
+ { 0x0, 0x97, GAIN(1.1797) },
+ { 0x0, 0x98, GAIN(1.1875) },
+ { 0x0, 0x99, GAIN(1.1953) },
+ { 0x0, 0x9A, GAIN(1.2031) },
+ { 0x0, 0x9B, GAIN(1.2109) },
+ { 0x0, 0x9C, GAIN(1.2188) },
+ { 0x0, 0x9D, GAIN(1.2266) },
+ { 0x0, 0x9E, GAIN(1.2344) },
+ { 0x0, 0x9F, GAIN(1.2422) },
+ { 0x0, 0xA0, GAIN(1.2500) },
+ { 0x0, 0xA1, GAIN(1.2578) },
+ { 0x0, 0xA2, GAIN(1.2656) },
+ { 0x0, 0xA3, GAIN(1.2734) },
+ { 0x0, 0xA4, GAIN(1.2813) },
+ { 0x0, 0xA5, GAIN(1.2891) },
+ { 0x0, 0xA6, GAIN(1.2969) },
+ { 0x0, 0xA7, GAIN(1.3047) },
+ { 0x0, 0xA8, GAIN(1.3125) },
+ { 0x0, 0xA9, GAIN(1.3203) },
+ { 0x0, 0xAA, GAIN(1.3281) },
+ { 0x0, 0xAB, GAIN(1.3359) },
+ { 0x0, 0xAC, GAIN(1.3438) },
+ { 0x0, 0xAD, GAIN(1.3516) },
+ { 0x0, 0xAE, GAIN(1.3594) },
+ { 0x0, 0xAF, GAIN(1.3672) },
+ { 0x0, 0xB0, GAIN(1.3750) },
+ { 0x0, 0xB1, GAIN(1.3828) },
+ { 0x0, 0xB2, GAIN(1.3906) },
+ { 0x0, 0xB3, GAIN(1.3984) },
+ { 0x0, 0xB4, GAIN(1.4063) },
+ { 0x0, 0xB5, GAIN(1.4141) },
+ { 0x0, 0xB6, GAIN(1.4219) },
+ { 0x0, 0xB7, GAIN(1.4297) },
+ { 0x0, 0xB8, GAIN(1.4375) },
+ { 0x0, 0xB9, GAIN(1.4453) },
+ { 0x0, 0xBA, GAIN(1.4531) },
+ { 0x0, 0xBB, GAIN(1.4609) },
+ { 0x0, 0xBC, GAIN(1.4688) },
+ { 0x0, 0xBD, GAIN(1.4766) },
+ { 0x0, 0xBE, GAIN(1.4844) },
+ { 0x0, 0xBF, GAIN(1.4922) },
+ { 0x0, 0xC0, GAIN(1.5000) },
+ { 0x0, 0xC1, GAIN(1.5078) },
+ { 0x0, 0xC2, GAIN(1.5156) },
+ { 0x0, 0xC3, GAIN(1.5234) },
+ { 0x0, 0xC4, GAIN(1.5313) },
+ { 0x0, 0xC5, GAIN(1.5391) },
+ { 0x0, 0xC6, GAIN(1.5469) },
+ { 0x0, 0xC7, GAIN(1.5547) },
+ { 0x0, 0xC8, GAIN(1.5625) },
+ { 0x0, 0xC9, GAIN(1.5703) },
+ { 0x0, 0xCA, GAIN(1.5781) },
+ { 0x0, 0xCB, GAIN(1.5859) },
+ { 0x0, 0xCC, GAIN(1.5938) },
+ { 0x0, 0xCD, GAIN(1.6016) },
+ { 0x0, 0xCE, GAIN(1.6094) },
+ { 0x0, 0xCF, GAIN(1.6172) },
+ { 0x0, 0xD0, GAIN(1.6250) },
+ { 0x0, 0xD1, GAIN(1.6328) },
+ { 0x0, 0xD2, GAIN(1.6406) },
+ { 0x0, 0xD3, GAIN(1.6484) },
+ { 0x0, 0xD4, GAIN(1.6563) },
+ { 0x0, 0xD5, GAIN(1.6641) },
+ { 0x0, 0xD6, GAIN(1.6719) },
+ { 0x0, 0xD7, GAIN(1.6797) },
+ { 0x0, 0xD8, GAIN(1.6875) },
+ { 0x0, 0xD9, GAIN(1.6953) },
+ { 0x0, 0xDA, GAIN(1.7031) },
+ { 0x0, 0xDB, GAIN(1.7109) },
+ { 0x0, 0xDC, GAIN(1.7188) },
+ { 0x0, 0xDD, GAIN(1.7266) },
+ { 0x0, 0xDE, GAIN(1.7344) },
+ { 0x0, 0xDF, GAIN(1.7422) },
+ { 0x0, 0xE0, GAIN(1.7500) },
+ { 0x0, 0xE1, GAIN(1.7578) },
+ { 0x0, 0xE2, GAIN(1.7656) },
+ { 0x0, 0xE3, GAIN(1.7734) },
+ { 0x0, 0xE4, GAIN(1.7813) },
+ { 0x0, 0xE5, GAIN(1.7891) },
+ { 0x0, 0xE6, GAIN(1.7969) },
+ { 0x0, 0xE7, GAIN(1.8047) },
+ { 0x0, 0xE8, GAIN(1.8125) },
+ { 0x0, 0xE9, GAIN(1.8203) },
+ { 0x0, 0xEA, GAIN(1.8281) },
+ { 0x0, 0xEB, GAIN(1.8359) },
+ { 0x0, 0xEC, GAIN(1.8438) },
+ { 0x0, 0xED, GAIN(1.8516) },
+ { 0x0, 0xEE, GAIN(1.8594) },
+ { 0x0, 0xEF, GAIN(1.8672) },
+ { 0x0, 0xF0, GAIN(1.8750) },
+ { 0x0, 0xF1, GAIN(1.8828) },
+ { 0x0, 0xF2, GAIN(1.8906) },
+ { 0x0, 0xF3, GAIN(1.8984) },
+ { 0x0, 0xF4, GAIN(1.9063) },
+ { 0x0, 0xF5, GAIN(1.9141) },
+ { 0x0, 0xF6, GAIN(1.9219) },
+ { 0x0, 0xF7, GAIN(1.9297) },
+ { 0x0, 0xF8, GAIN(1.9375) },
+ { 0x0, 0xF9, GAIN(1.9453) },
+ { 0x0, 0xFA, GAIN(1.9531) },
+ { 0x0, 0xFB, GAIN(1.9609) },
+ { 0x0, 0xFC, GAIN(1.9688) },
+ { 0x0, 0xFD, GAIN(1.9766) },
+ { 0x0, 0xFE, GAIN(1.9844) },
+ { 0x0, 0xFF, GAIN(1.9922) },
+ { 0x4, 0x80, GAIN(2.0000) },
+ { 0x4, 0x81, GAIN(2.0156) },
+ { 0x4, 0x82, GAIN(2.0313) },
+ { 0x4, 0x83, GAIN(2.0469) },
+ { 0x4, 0x84, GAIN(2.0625) },
+ { 0x4, 0x85, GAIN(2.0781) },
+ { 0x4, 0x86, GAIN(2.0938) },
+ { 0x4, 0x87, GAIN(2.1094) },
+ { 0x4, 0x88, GAIN(2.1250) },
+ { 0x4, 0x89, GAIN(2.1406) },
+ { 0x4, 0x8A, GAIN(2.1563) },
+ { 0x4, 0x8B, GAIN(2.1719) },
+ { 0x4, 0x8C, GAIN(2.1875) },
+ { 0x4, 0x8D, GAIN(2.2031) },
+ { 0x4, 0x8E, GAIN(2.2188) },
+ { 0x4, 0x8F, GAIN(2.2344) },
+ { 0x4, 0x90, GAIN(2.2500) },
+ { 0x4, 0x91, GAIN(2.2656) },
+ { 0x4, 0x92, GAIN(2.2813) },
+ { 0x4, 0x93, GAIN(2.2969) },
+ { 0x4, 0x94, GAIN(2.3125) },
+ { 0x4, 0x95, GAIN(2.3281) },
+ { 0x4, 0x96, GAIN(2.3438) },
+ { 0x4, 0x97, GAIN(2.3594) },
+ { 0x4, 0x98, GAIN(2.3750) },
+ { 0x4, 0x99, GAIN(2.3906) },
+ { 0x4, 0x9A, GAIN(2.4063) },
+ { 0x4, 0x9B, GAIN(2.4219) },
+ { 0x4, 0x9C, GAIN(2.4375) },
+ { 0x4, 0x9D, GAIN(2.4531) },
+ { 0x4, 0x9E, GAIN(2.4688) },
+ { 0x4, 0x9F, GAIN(2.4844) },
+ { 0x4, 0xA0, GAIN(2.5000) },
+ { 0x4, 0xA1, GAIN(2.5156) },
+ { 0x4, 0xA2, GAIN(2.5313) },
+ { 0x4, 0xA3, GAIN(2.5469) },
+ { 0x4, 0xA4, GAIN(2.5625) },
+ { 0x4, 0xA5, GAIN(2.5781) },
+ { 0x4, 0xA6, GAIN(2.5938) },
+ { 0x4, 0xA7, GAIN(2.6094) },
+ { 0x4, 0xA8, GAIN(2.6250) },
+ { 0x4, 0xA9, GAIN(2.6406) },
+ { 0x4, 0xAA, GAIN(2.6563) },
+ { 0x4, 0xAB, GAIN(2.6719) },
+ { 0x4, 0xAC, GAIN(2.6875) },
+ { 0x4, 0xAD, GAIN(2.7031) },
+ { 0x4, 0xAE, GAIN(2.7188) },
+ { 0x4, 0xAF, GAIN(2.7344) },
+ { 0x4, 0xB0, GAIN(2.7500) },
+ { 0x4, 0xB1, GAIN(2.7656) },
+ { 0x4, 0xB2, GAIN(2.7813) },
+ { 0x4, 0xB3, GAIN(2.7969) },
+ { 0x4, 0xB4, GAIN(2.8125) },
+ { 0x4, 0xB5, GAIN(2.8281) },
+ { 0x4, 0xB6, GAIN(2.8438) },
+ { 0x4, 0xB7, GAIN(2.8594) },
+ { 0x4, 0xB8, GAIN(2.8750) },
+ { 0x4, 0xB9, GAIN(2.8906) },
+ { 0x4, 0xBA, GAIN(2.9063) },
+ { 0x4, 0xBB, GAIN(2.9219) },
+ { 0x4, 0xBC, GAIN(2.9375) },
+ { 0x4, 0xBD, GAIN(2.9531) },
+ { 0x4, 0xBE, GAIN(2.9688) },
+ { 0x4, 0xBF, GAIN(2.9844) },
+ { 0x4, 0xC0, GAIN(3.0000) },
+ { 0x4, 0xC1, GAIN(3.0156) },
+ { 0x4, 0xC2, GAIN(3.0313) },
+ { 0x4, 0xC3, GAIN(3.0469) },
+ { 0x4, 0xC4, GAIN(3.0625) },
+ { 0x4, 0xC5, GAIN(3.0781) },
+ { 0x4, 0xC6, GAIN(3.0938) },
+ { 0x4, 0xC7, GAIN(3.1094) },
+ { 0x4, 0xC8, GAIN(3.1250) },
+ { 0x4, 0xC9, GAIN(3.1406) },
+ { 0x4, 0xCA, GAIN(3.1563) },
+ { 0x4, 0xCB, GAIN(3.1719) },
+ { 0x4, 0xCC, GAIN(3.1875) },
+ { 0x4, 0xCD, GAIN(3.2031) },
+ { 0x4, 0xCE, GAIN(3.2188) },
+ { 0x4, 0xCF, GAIN(3.2344) },
+ { 0x4, 0xD0, GAIN(3.2500) },
+ { 0x4, 0xD1, GAIN(3.2656) },
+ { 0x4, 0xD2, GAIN(3.2813) },
+ { 0x4, 0xD3, GAIN(3.2969) },
+ { 0x4, 0xD4, GAIN(3.3125) },
+ { 0x4, 0xD5, GAIN(3.3281) },
+ { 0x4, 0xD6, GAIN(3.3438) },
+ { 0x4, 0xD7, GAIN(3.3594) },
+ { 0x4, 0xD8, GAIN(3.3750) },
+ { 0x4, 0xD9, GAIN(3.3906) },
+ { 0x4, 0xDA, GAIN(3.4063) },
+ { 0x4, 0xDB, GAIN(3.4219) },
+ { 0x4, 0xDC, GAIN(3.4375) },
+ { 0x4, 0xDD, GAIN(3.4531) },
+ { 0x4, 0xDE, GAIN(3.4688) },
+ { 0x4, 0xDF, GAIN(3.4844) },
+ { 0x4, 0xE0, GAIN(3.5000) },
+ { 0x4, 0xE1, GAIN(3.5156) },
+ { 0x4, 0xE2, GAIN(3.5313) },
+ { 0x4, 0xE3, GAIN(3.5469) },
+ { 0x4, 0xE4, GAIN(3.5625) },
+ { 0x4, 0xE5, GAIN(3.5781) },
+ { 0x4, 0xE6, GAIN(3.5938) },
+ { 0x4, 0xE7, GAIN(3.6094) },
+ { 0x4, 0xE8, GAIN(3.6250) },
+ { 0x4, 0xE9, GAIN(3.6406) },
+ { 0x4, 0xEA, GAIN(3.6563) },
+ { 0x4, 0xEB, GAIN(3.6719) },
+ { 0x4, 0xEC, GAIN(3.6875) },
+ { 0x4, 0xED, GAIN(3.7031) },
+ { 0x4, 0xEE, GAIN(3.7188) },
+ { 0x4, 0xEF, GAIN(3.7344) },
+ { 0x4, 0xF0, GAIN(3.7500) },
+ { 0x4, 0xF1, GAIN(3.7656) },
+ { 0x4, 0xF2, GAIN(3.7813) },
+ { 0x4, 0xF3, GAIN(3.7969) },
+ { 0x4, 0xF4, GAIN(3.8125) },
+ { 0x4, 0xF5, GAIN(3.8281) },
+ { 0x4, 0xF6, GAIN(3.8438) },
+ { 0x4, 0xF7, GAIN(3.8594) },
+ { 0x4, 0xF8, GAIN(3.8750) },
+ { 0x4, 0xF9, GAIN(3.8906) },
+ { 0x4, 0xFA, GAIN(3.9063) },
+ { 0x4, 0xFB, GAIN(3.9219) },
+ { 0x4, 0xFC, GAIN(3.9375) },
+ { 0x4, 0xFD, GAIN(3.9531) },
+ { 0x4, 0xFE, GAIN(3.9688) },
+ { 0x4, 0xFF, GAIN(3.9844) },
+ { 0x8, 0x80, GAIN(4.0000) },
+ { 0x8, 0x81, GAIN(4.0313) },
+ { 0x8, 0x82, GAIN(4.0625) },
+ { 0x8, 0x83, GAIN(4.0938) },
+ { 0x8, 0x84, GAIN(4.1250) },
+ { 0x8, 0x85, GAIN(4.1563) },
+ { 0x8, 0x86, GAIN(4.1875) },
+ { 0x8, 0x87, GAIN(4.2188) },
+ { 0x8, 0x88, GAIN(4.2500) },
+ { 0x8, 0x89, GAIN(4.2813) },
+ { 0x8, 0x8A, GAIN(4.3125) },
+ { 0x8, 0x8B, GAIN(4.3438) },
+ { 0x8, 0x8C, GAIN(4.3750) },
+ { 0x8, 0x8D, GAIN(4.4063) },
+ { 0x8, 0x8E, GAIN(4.4375) },
+ { 0x8, 0x8F, GAIN(4.4688) },
+ { 0x8, 0x90, GAIN(4.5000) },
+ { 0x8, 0x91, GAIN(4.5313) },
+ { 0x8, 0x92, GAIN(4.5625) },
+ { 0x8, 0x93, GAIN(4.5938) },
+ { 0x8, 0x94, GAIN(4.6250) },
+ { 0x8, 0x95, GAIN(4.6563) },
+ { 0x8, 0x96, GAIN(4.6875) },
+ { 0x8, 0x97, GAIN(4.7188) },
+ { 0x8, 0x98, GAIN(4.7500) },
+ { 0x8, 0x99, GAIN(4.7813) },
+ { 0x8, 0x9A, GAIN(4.8125) },
+ { 0x8, 0x9B, GAIN(4.8438) },
+ { 0x8, 0x9C, GAIN(4.8750) },
+ { 0x8, 0x9D, GAIN(4.9063) },
+ { 0x8, 0x9E, GAIN(4.9375) },
+ { 0x8, 0x9F, GAIN(4.9688) },
+ { 0x8, 0xA0, GAIN(5.0000) },
+ { 0x8, 0xA1, GAIN(5.0313) },
+ { 0x8, 0xA2, GAIN(5.0625) },
+ { 0x8, 0xA3, GAIN(5.0938) },
+ { 0x8, 0xA4, GAIN(5.1250) },
+ { 0x8, 0xA5, GAIN(5.1563) },
+ { 0x8, 0xA6, GAIN(5.1875) },
+ { 0x8, 0xA7, GAIN(5.2188) },
+ { 0x8, 0xA8, GAIN(5.2500) },
+ { 0x8, 0xA9, GAIN(5.2813) },
+ { 0x8, 0xAA, GAIN(5.3125) },
+ { 0x8, 0xAB, GAIN(5.3438) },
+ { 0x8, 0xAC, GAIN(5.3750) },
+ { 0x8, 0xAD, GAIN(5.4063) },
+ { 0x8, 0xAE, GAIN(5.4375) },
+ { 0x8, 0xAF, GAIN(5.4688) },
+ { 0x8, 0xB0, GAIN(5.5000) },
+ { 0x8, 0xB1, GAIN(5.5313) },
+ { 0x8, 0xB2, GAIN(5.5625) },
+ { 0x8, 0xB3, GAIN(5.5938) },
+ { 0x8, 0xB4, GAIN(5.6250) },
+ { 0x8, 0xB5, GAIN(5.6563) },
+ { 0x8, 0xB6, GAIN(5.6875) },
+ { 0x8, 0xB7, GAIN(5.7188) },
+ { 0x8, 0xB8, GAIN(5.7500) },
+ { 0x8, 0xB9, GAIN(5.7813) },
+ { 0x8, 0xBA, GAIN(5.8125) },
+ { 0x8, 0xBB, GAIN(5.8438) },
+ { 0x8, 0xBC, GAIN(5.8750) },
+ { 0x8, 0xBD, GAIN(5.9063) },
+ { 0x8, 0xBE, GAIN(5.9375) },
+ { 0x8, 0xBF, GAIN(5.9688) },
+ { 0x8, 0xC0, GAIN(6.0000) },
+ { 0x8, 0xC1, GAIN(6.0313) },
+ { 0x8, 0xC2, GAIN(6.0625) },
+ { 0x8, 0xC3, GAIN(6.0938) },
+ { 0x8, 0xC4, GAIN(6.1250) },
+ { 0x8, 0xC5, GAIN(6.1563) },
+ { 0x8, 0xC6, GAIN(6.1875) },
+ { 0x8, 0xC7, GAIN(6.2188) },
+ { 0x8, 0xC8, GAIN(6.2500) },
+ { 0x8, 0xC9, GAIN(6.2813) },
+ { 0x8, 0xCA, GAIN(6.3125) },
+ { 0x8, 0xCB, GAIN(6.3438) },
+ { 0x8, 0xCC, GAIN(6.3750) },
+ { 0x8, 0xCD, GAIN(6.4063) },
+ { 0x8, 0xCE, GAIN(6.4375) },
+ { 0x8, 0xCF, GAIN(6.4688) },
+ { 0x8, 0xD0, GAIN(6.5000) },
+ { 0x8, 0xD1, GAIN(6.5313) },
+ { 0x8, 0xD2, GAIN(6.5625) },
+ { 0x8, 0xD3, GAIN(6.5938) },
+ { 0x8, 0xD4, GAIN(6.6250) },
+ { 0x8, 0xD5, GAIN(6.6563) },
+ { 0x8, 0xD6, GAIN(6.6875) },
+ { 0x8, 0xD7, GAIN(6.7188) },
+ { 0x8, 0xD8, GAIN(6.7500) },
+ { 0x8, 0xD9, GAIN(6.7813) },
+ { 0x8, 0xDA, GAIN(6.8125) },
+ { 0x8, 0xDB, GAIN(6.8438) },
+ { 0x8, 0xDC, GAIN(6.8750) },
+ { 0x8, 0xDD, GAIN(6.9063) },
+ { 0x8, 0xDE, GAIN(6.9375) },
+ { 0x8, 0xDF, GAIN(6.9688) },
+ { 0x8, 0xE0, GAIN(7.0000) },
+ { 0x8, 0xE1, GAIN(7.0313) },
+ { 0x8, 0xE2, GAIN(7.0625) },
+ { 0x8, 0xE3, GAIN(7.0938) },
+ { 0x8, 0xE4, GAIN(7.1250) },
+ { 0x8, 0xE5, GAIN(7.1563) },
+ { 0x8, 0xE6, GAIN(7.1875) },
+ { 0x8, 0xE7, GAIN(7.2188) },
+ { 0x8, 0xE8, GAIN(7.2500) },
+ { 0x8, 0xE9, GAIN(7.2813) },
+ { 0x8, 0xEA, GAIN(7.3125) },
+ { 0x8, 0xEB, GAIN(7.3438) },
+ { 0x8, 0xEC, GAIN(7.3750) },
+ { 0x8, 0xED, GAIN(7.4063) },
+ { 0x8, 0xEE, GAIN(7.4375) },
+ { 0x8, 0xEF, GAIN(7.4688) },
+ { 0x8, 0xF0, GAIN(7.5000) },
+ { 0x8, 0xF1, GAIN(7.5313) },
+ { 0x8, 0xF2, GAIN(7.5625) },
+ { 0x8, 0xF3, GAIN(7.5938) },
+ { 0x8, 0xF4, GAIN(7.6250) },
+ { 0x8, 0xF5, GAIN(7.6563) },
+ { 0x8, 0xF6, GAIN(7.6875) },
+ { 0x8, 0xF7, GAIN(7.7188) },
+ { 0x8, 0xF8, GAIN(7.7500) },
+ { 0x8, 0xF9, GAIN(7.7813) },
+ { 0x8, 0xFA, GAIN(7.8125) },
+ { 0x8, 0xFB, GAIN(7.8438) },
+ { 0x8, 0xFC, GAIN(7.8750) },
+ { 0x8, 0xFD, GAIN(7.9063) },
+ { 0x8, 0xFE, GAIN(7.9375) },
+ { 0x8, 0xFF, GAIN(7.9688) },
+ { 0xC, 0x80, GAIN(8.0000) },
+ { 0xC, 0x81, GAIN(8.0625) },
+ { 0xC, 0x82, GAIN(8.1250) },
+ { 0xC, 0x83, GAIN(8.1875) },
+ { 0xC, 0x84, GAIN(8.2500) },
+ { 0xC, 0x85, GAIN(8.3125) },
+ { 0xC, 0x86, GAIN(8.3750) },
+ { 0xC, 0x87, GAIN(8.4375) },
+ { 0xC, 0x88, GAIN(8.5000) },
+ { 0xC, 0x89, GAIN(8.5625) },
+ { 0xC, 0x8A, GAIN(8.6250) },
+ { 0xC, 0x8B, GAIN(8.6875) },
+ { 0xC, 0x8C, GAIN(8.7500) },
+ { 0xC, 0x8D, GAIN(8.8125) },
+ { 0xC, 0x8E, GAIN(8.8750) },
+ { 0xC, 0x8F, GAIN(8.9375) },
+ { 0xC, 0x90, GAIN(9.0000) },
+ { 0xC, 0x91, GAIN(9.0625) },
+ { 0xC, 0x92, GAIN(9.1250) },
+ { 0xC, 0x93, GAIN(9.1875) },
+ { 0xC, 0x94, GAIN(9.2500) },
+ { 0xC, 0x95, GAIN(9.3125) },
+ { 0xC, 0x96, GAIN(9.3750) },
+ { 0xC, 0x97, GAIN(9.4375) },
+ { 0xC, 0x98, GAIN(9.5000) },
+ { 0xC, 0x99, GAIN(9.5625) },
+ { 0xC, 0x9A, GAIN(9.6250) },
+ { 0xC, 0x9B, GAIN(9.6875) },
+ { 0xC, 0x9C, GAIN(9.7500) },
+ { 0xC, 0x9D, GAIN(9.8125) },
+ { 0xC, 0x9E, GAIN(9.8750) },
+ { 0xC, 0x9F, GAIN(9.9375) },
+ { 0xC, 0xA0, GAIN(10.0000) },
+ { 0xC, 0xA1, GAIN(10.0625) },
+ { 0xC, 0xA2, GAIN(10.1250) },
+ { 0xC, 0xA3, GAIN(10.1875) },
+ { 0xC, 0xA4, GAIN(10.2500) },
+ { 0xC, 0xA5, GAIN(10.3125) },
+ { 0xC, 0xA6, GAIN(10.3750) },
+ { 0xC, 0xA7, GAIN(10.4375) },
+ { 0xC, 0xA8, GAIN(10.5000) },
+ { 0xC, 0xA9, GAIN(10.5625) },
+ { 0xC, 0xAA, GAIN(10.6250) },
+ { 0xC, 0xAB, GAIN(10.6875) },
+ { 0xC, 0xAC, GAIN(10.7500) },
+ { 0xC, 0xAD, GAIN(10.8125) },
+ { 0xC, 0xAE, GAIN(10.8750) },
+ { 0xC, 0xAF, GAIN(10.9375) },
+ { 0xC, 0xB0, GAIN(11.0000) },
+ { 0xC, 0xB1, GAIN(11.0625) },
+ { 0xC, 0xB2, GAIN(11.1250) },
+ { 0xC, 0xB3, GAIN(11.1875) },
+ { 0xC, 0xB4, GAIN(11.2500) },
+ { 0xC, 0xB5, GAIN(11.3125) },
+ { 0xC, 0xB6, GAIN(11.3750) },
+ { 0xC, 0xB7, GAIN(11.4375) },
+ { 0xC, 0xB8, GAIN(11.5000) },
+ { 0xC, 0xB9, GAIN(11.5625) },
+ { 0xC, 0xBA, GAIN(11.6250) },
+ { 0xC, 0xBB, GAIN(11.6875) },
+ { 0xC, 0xBC, GAIN(11.7500) },
+ { 0xC, 0xBD, GAIN(11.8125) },
+ { 0xC, 0xBE, GAIN(11.8750) },
+ { 0xC, 0xBF, GAIN(11.9375) },
+ { 0xC, 0xC0, GAIN(12.0000) },
+ { 0xC, 0xC1, GAIN(12.0625) },
+ { 0xC, 0xC2, GAIN(12.1250) },
+ { 0xC, 0xC3, GAIN(12.1875) },
+ { 0xC, 0xC4, GAIN(12.2500) },
+ { 0xC, 0xC5, GAIN(12.3125) },
+ { 0xC, 0xC6, GAIN(12.3750) },
+ { 0xC, 0xC7, GAIN(12.4375) },
+ { 0xC, 0xC8, GAIN(12.5000) },
+ { 0xC, 0xC9, GAIN(12.5625) },
+ { 0xC, 0xCA, GAIN(12.6250) },
+ { 0xC, 0xCB, GAIN(12.6875) },
+ { 0xC, 0xCC, GAIN(12.7500) },
+ { 0xC, 0xCD, GAIN(12.8125) },
+ { 0xC, 0xCE, GAIN(12.8750) },
+ { 0xC, 0xCF, GAIN(12.9375) },
+ { 0xC, 0xD0, GAIN(13.0000) },
+ { 0xC, 0xD1, GAIN(13.0625) },
+ { 0xC, 0xD2, GAIN(13.1250) },
+ { 0xC, 0xD3, GAIN(13.1875) },
+ { 0xC, 0xD4, GAIN(13.2500) },
+ { 0xC, 0xD5, GAIN(13.3125) },
+ { 0xC, 0xD6, GAIN(13.3750) },
+ { 0xC, 0xD7, GAIN(13.4375) },
+ { 0xC, 0xD8, GAIN(13.5000) },
+ { 0xC, 0xD9, GAIN(13.5625) },
+ { 0xC, 0xDA, GAIN(13.6250) },
+ { 0xC, 0xDB, GAIN(13.6875) },
+ { 0xC, 0xDC, GAIN(13.7500) },
+ { 0xC, 0xDD, GAIN(13.8125) },
+ { 0xC, 0xDE, GAIN(13.8750) },
+ { 0xC, 0xDF, GAIN(13.9375) },
+ { 0xC, 0xE0, GAIN(14.0000) },
+ { 0xC, 0xE1, GAIN(14.0625) },
+ { 0xC, 0xE2, GAIN(14.1250) },
+ { 0xC, 0xE3, GAIN(14.1875) },
+ { 0xC, 0xE4, GAIN(14.2500) },
+ { 0xC, 0xE5, GAIN(14.3125) },
+ { 0xC, 0xE6, GAIN(14.3750) },
+ { 0xC, 0xE7, GAIN(14.4375) },
+ { 0xC, 0xE8, GAIN(14.5000) },
+ { 0xC, 0xE9, GAIN(14.5625) },
+ { 0xC, 0xEA, GAIN(14.6250) },
+ { 0xC, 0xEB, GAIN(14.6875) },
+ { 0xC, 0xEC, GAIN(14.7500) },
+ { 0xC, 0xED, GAIN(14.8125) },
+ { 0xC, 0xEE, GAIN(14.8750) },
+ { 0xC, 0xEF, GAIN(14.9375) },
+ { 0xC, 0xF0, GAIN(15.0000) },
+ { 0xC, 0xF1, GAIN(15.0625) },
+ { 0xC, 0xF2, GAIN(15.1250) },
+ { 0xC, 0xF3, GAIN(15.1875) },
+ { 0xC, 0xF4, GAIN(15.2500) },
+ { 0xC, 0xF5, GAIN(15.3125) },
+ { 0xC, 0xF6, GAIN(15.3750) },
+ { 0xC, 0xF7, GAIN(15.4375) },
+ { 0xC, 0xF8, GAIN(15.5000) },
+ { 0xC, 0xF9, GAIN(15.5625) },
+ { 0xC, 0xFA, GAIN(15.6250) },
+ { 0xC, 0xFB, GAIN(15.6875) },
+ { 0xC, 0xFC, GAIN(15.7500) },
+ { 0xC, 0xFD, GAIN(15.8125) },
+ { 0xC, 0xFE, GAIN(15.8750) },
+ { 0xC, 0xFF, GAIN(15.9375) },
+#undef GAIN
+};
+
+static void ov16825_configure_gain(struct ov16825_info *info, s32 val)
+{
+ struct ov16825_gain *gain;
+ int i;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov16825_gain_table); i++) {
+ gain = &ov16825_gain_table[i];
+
+ if (gain->gain >= val) {
+ break;
+ }
+ }
+
+
+ sensor_write(&info->sd, GAIN0, gain->major);
+ sensor_write(&info->sd, GAIN1, gain->minor);
+}
+
+static void ov16825_configure_exposure(struct ov16825_info *info)
+{
+ s32 expo = info->exposure->val;
+ u32 line_duration = info->config->line_duration_us;
+ u32 reg_val;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ /* Register value has 4 fractional bits but it seems that setting those
+ * causes the camera to stop working. For now I just force them to 0
+ * which means that we don't have sub-line granularity. */
+ reg_val = (expo / line_duration) << 4;
+
+ sensor_write(&info->sd, EXPO0, (reg_val >> 16) & 0xff);
+ sensor_write(&info->sd, EXPO1, (reg_val >> 8) & 0xff);
+ sensor_write(&info->sd, EXPO2, (reg_val >> 0) & 0xff);
+}
+
+void ov16825_force_gain_reconfig(struct ov16825_info *info)
+{
+ /* Force gain reconfiguration by changing the value and reverting back
+ * to the original */
+ ov16825_configure_gain(info, info->gain->val * 2);
+ ov16825_configure_gain(info, info->gain->val);
+}
+
+void ov16825_set_isp_ctrl0(struct ov16825_info *info, int bit, int val)
+{
+ u8 reg;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ sensor_read(&info->sd, ISP_CTRL00, &reg);
+ reg &= ~(1 << bit);
+ reg |= (!!val) << bit;
+ sensor_write(&info->sd, ISP_CTRL00, reg);
+
+ ov16825_force_gain_reconfig(info);
+}
+
+void ov16825_set_isp_ctrl1(struct ov16825_info * info, int bit, int val)
+{
+ u8 reg;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ sensor_read(&info->sd, ISP_CTRL01, &reg);
+ reg &= ~(1 << bit);
+ reg |= (!!val) << bit;
+ sensor_write(&info->sd, ISP_CTRL01, reg);
+
+ ov16825_force_gain_reconfig(info);
+}
+
+void ov16825_set_digital_gain_en(struct ov16825_info * info, int en)
+{
+ sensor_write(&info->sd, ISP_CTRL04, !!en);
+
+ ov16825_force_gain_reconfig(info);
+}
+
+void ov16825_set_component_gain(struct ov16825_info *info, u16 reg, u16 val)
+{
+ u8 high = val >> 8;
+ u8 low = val & 0xff;
+
+ sensor_write(&info->sd, reg, high);
+ sensor_write(&info->sd, reg + 1, low);
+}
+
+static void ov16825_set_test_pattern(struct ov16825_info *info)
+{
+ u8 pattern = ov16825_test_pattern_registers[info->test_pattern->val];
+
+ if (!info->streaming) {
+ return;
+ }
+
+ sensor_write(&info->sd, TEST_PATTERN, pattern);
+}
+
+static void ov16825_configure_vflip(struct ov16825_info *info)
+{
+
+ int flip = !!info->vflip->val;
+ u8 reg;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ sensor_read(&info->sd, FORMAT0, &reg);
+
+ if (flip) {
+ reg |= 0x6;
+ } else {
+ reg &= ~0x6;
+ }
+
+ sensor_write(&info->sd, FORMAT0, reg);
+}
+
+static void ov16825_configure_hflip(struct ov16825_info *info)
+{
+ int flip = !!info->hflip->val;
+ u8 reg;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ sensor_read(&info->sd, FORMAT1, &reg);
+
+ if (flip) {
+ reg |= 0x6;
+ } else {
+ reg &= ~0x6;
+ }
+
+ sensor_write(&info->sd, FORMAT1, reg);
+}
+
+#define VCM_ADDR 0xc
+
+int vcm_write(struct ov16825_info *info, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&info->sd);
+ int ret;
+ u8 data[2];
+ struct i2c_msg msg = {
+ .addr = VCM_ADDR,
+ .flags = 0,
+ .len = sizeof(data),
+ .buf = data,
+ };
+
+ data[0] = val >> 8;
+ data[1] = val & 0xff;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ v4l2_err(&info->sd,
+ "VCM I2C write failed [%04x: %d]\n",
+ val, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static void ov16825_set_focus(struct ov16825_info *info)
+{
+ unsigned focus = info->focus->val;
+
+ if (!info->streaming) {
+ return;
+ }
+
+ // Protection off
+ vcm_write(info, 0xECA3);
+ // Setting
+ vcm_write(info, 0xF200|(0x0F<<3));
+ // Protection On
+ vcm_write(info, 0xDC51);
+ // Position
+ vcm_write(info, (focus << 4 |
+ (0x3 << 2 ) |
+ (0x0 << 0)));
+}
+
+static int ov16825_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov16825_info *info = container_of(ctrl->handler,
+ struct ov16825_info,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ov16825_configure_gain(info, info->gain->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ov16825_configure_exposure(info);
+ break;
+ case V4L2_CID_VFLIP:
+ ov16825_configure_vflip(info);
+ break;
+ case V4L2_CID_HFLIP:
+ ov16825_configure_hflip(info);
+ break;
+ case V4L2_CID_OV16825_ISP_EN:
+ ov16825_set_isp_ctrl0(info, 0, info->isp_en->val);
+ break;
+ case V4L2_CID_OV16825_AVERAGE_EN:
+ ov16825_set_isp_ctrl0(info, 1, info->average_en->val);
+ break;
+ case V4L2_CID_OV16825_WHITE_DPC_EN:
+ ov16825_set_isp_ctrl0(info, 2, info->white_dpc_en->val);
+ break;
+ case V4L2_CID_OV16825_BLACK_DPC_EN:
+ ov16825_set_isp_ctrl0(info, 3, info->black_dpc_en->val);
+ break;
+ case V4L2_CID_OV16825_WB_GAIN_EN:
+ ov16825_set_isp_ctrl0(info, 4, info->wb_gain_en->val);
+ break;
+ case V4L2_CID_OV16825_OTP_CC_EN:
+ ov16825_set_isp_ctrl0(info, 5, info->otp_cc_en->val);
+ break;
+ case V4L2_CID_OV16825_DBC_EN:
+ ov16825_set_isp_ctrl0(info, 6, info->dbc_en->val);
+ break;
+ case V4L2_CID_OV16825_SCALE_EN:
+ ov16825_set_isp_ctrl0(info, 7, info->scale_en->val);
+ break;
+ case V4L2_CID_OV16825_BLC_EN:
+ ov16825_set_isp_ctrl1(info, 0, info->blc_en->val);
+ break;
+ case V4L2_CID_OV16825_AVERAGE_BEFORE:
+ ov16825_set_isp_ctrl1(info, 5, info->average_before->val);
+ break;
+ case V4L2_CID_OV16825_DIGITAL_GAIN_EN:
+ ov16825_set_digital_gain_en(info, info->digital_gain_en->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ov16825_set_test_pattern(info);
+ break;
+ case V4L2_CID_OV16825_RED_GAIN:
+ ov16825_set_component_gain(info,
+ RED_GAIN,
+ info->red_gain->val);
+ break;
+ case V4L2_CID_OV16825_GREEN_GAIN:
+ ov16825_set_component_gain(info,
+ GREEN_GAIN,
+ info->green_gain->val);
+ break;
+ case V4L2_CID_OV16825_BLUE_GAIN:
+ ov16825_set_component_gain(info,
+ BLUE_GAIN,
+ info->blue_gain->val);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ov16825_set_focus(info);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ov16825_ctrl_ops = {
+ .s_ctrl = ov16825_s_ctrl,
+};
+
+void ov16825_apply_ctrl(struct ov16825_info *info)
+{
+ ov16825_s_ctrl(info->focus);
+ ov16825_s_ctrl(info->gain);
+ ov16825_s_ctrl(info->exposure);
+ ov16825_s_ctrl(info->vflip);
+ ov16825_s_ctrl(info->hflip);
+ ov16825_s_ctrl(info->isp_en);
+ ov16825_s_ctrl(info->average_en);
+ ov16825_s_ctrl(info->white_dpc_en);
+ ov16825_s_ctrl(info->black_dpc_en);
+ ov16825_s_ctrl(info->wb_gain_en);
+ ov16825_s_ctrl(info->otp_cc_en);
+ ov16825_s_ctrl(info->dbc_en);
+ ov16825_s_ctrl(info->scale_en);
+ ov16825_s_ctrl(info->blc_en);
+ ov16825_s_ctrl(info->average_before);
+ ov16825_s_ctrl(info->digital_gain_en);
+ ov16825_s_ctrl(info->red_gain);
+ ov16825_s_ctrl(info->green_gain);
+ ov16825_s_ctrl(info->blue_gain);
+}
+
+#define OV16825_CUSTOM_BOOL_CTRL(_desc, _cid, _default) { \
+ .ops = &ov16825_ctrl_ops, \
+ .id = _cid, \
+ .name = _desc, \
+ .type = V4L2_CTRL_TYPE_BOOLEAN, \
+ .min = false, \
+ .max = true, \
+ .step = 1, \
+ .def = _default, \
+}
+
+static const struct v4l2_ctrl_config ov16825_isp_en =
+ OV16825_CUSTOM_BOOL_CTRL("ISP enable",
+ V4L2_CID_OV16825_ISP_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_average_en =
+ OV16825_CUSTOM_BOOL_CTRL("Average function enable",
+ V4L2_CID_OV16825_AVERAGE_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_white_dpc_en =
+ OV16825_CUSTOM_BOOL_CTRL("White DPC enable",
+ V4L2_CID_OV16825_WHITE_DPC_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_black_dpc_en =
+ OV16825_CUSTOM_BOOL_CTRL("Black DPC enable",
+ V4L2_CID_OV16825_BLACK_DPC_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_wb_gain_en =
+ OV16825_CUSTOM_BOOL_CTRL("White balance gain enable",
+ V4L2_CID_OV16825_WB_GAIN_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_otp_cc_en =
+ OV16825_CUSTOM_BOOL_CTRL("OTP cluster cancellation enable",
+ V4L2_CID_OV16825_OTP_CC_EN,
+ false);
+
+static const struct v4l2_ctrl_config ov16825_dbc_en =
+ OV16825_CUSTOM_BOOL_CTRL("Digital binning compensation enable",
+ V4L2_CID_OV16825_DBC_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_scale_en =
+ OV16825_CUSTOM_BOOL_CTRL("Digital scale function enable",
+ V4L2_CID_OV16825_SCALE_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_blc_en =
+ OV16825_CUSTOM_BOOL_CTRL("BLC function enable",
+ V4L2_CID_OV16825_BLC_EN,
+ true);
+
+static const struct v4l2_ctrl_config ov16825_average_before =
+ OV16825_CUSTOM_BOOL_CTRL("Average before ISP",
+ V4L2_CID_OV16825_AVERAGE_BEFORE,
+ false);
+
+static const struct v4l2_ctrl_config ov16825_digital_gain_en =
+ OV16825_CUSTOM_BOOL_CTRL("Digital gain enable",
+ V4L2_CID_OV16825_DIGITAL_GAIN_EN,
+ false);
+
+static const struct v4l2_ctrl_config ov16825_test_pattern = {
+ .ops = &ov16825_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Test Pattern",
+ .min = 0,
+ .max = ARRAY_SIZE(ov16825_test_pattern_menu) - 1,
+ .step = 0,
+ .def = 0,
+ .flags = 0,
+ .menu_skip_mask = 0,
+ .qmenu = ov16825_test_pattern_menu,
+};
+
+static const struct v4l2_ctrl_config ov16825_red_gain = {
+ .ops = &ov16825_ctrl_ops,
+ .id = V4L2_CID_OV16825_RED_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red gain",
+ .min = 0,
+ .max = 0xfff,
+ .step = 1,
+ .def = 0x400,
+};
+
+static const struct v4l2_ctrl_config ov16825_green_gain = {
+ .ops = &ov16825_ctrl_ops,
+ .id = V4L2_CID_OV16825_GREEN_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Green gain",
+ .min = 0,
+ .max = 0xfff,
+ .step = 1,
+ .def = 0x400,
+};
+
+static const struct v4l2_ctrl_config ov16825_blue_gain = {
+ .ops = &ov16825_ctrl_ops,
+ .id = V4L2_CID_OV16825_BLUE_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue gain",
+ .min = 0,
+ .max = 0xfff,
+ .step = 1,
+ .def = 0x400,
+};
+
+int ov16825_ctrl_create(struct ov16825_info *info)
+{
+ int rt;
+ u32 gain_min, gain_max;
+
+ if ((rt = v4l2_ctrl_handler_init(&info->ctrl_handler, 32)))
+ return rt;
+
+ gain_min = ov16825_gain_table[0].gain;
+ gain_max = ov16825_gain_table[ARRAY_SIZE(ov16825_gain_table) - 1].gain;
+
+ info->gain = v4l2_ctrl_new_std(&info->ctrl_handler,
+ &ov16825_ctrl_ops,
+ V4L2_CID_GAIN,
+ gain_min,
+ gain_max,
+ 1, gain_min);
+
+ info->exposure = v4l2_ctrl_new_std(&info->ctrl_handler,
+ &ov16825_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ 1,
+ 100000,
+ 1,
+ 10000);
+
+ info->vflip = v4l2_ctrl_new_std(&info->ctrl_handler,
+ &ov16825_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0,
+ 1,
+ 1,
+ 0);
+
+ info->hflip = v4l2_ctrl_new_std(&info->ctrl_handler,
+ &ov16825_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0,
+ 1,
+ 1,
+ 1);
+
+ info->red_gain = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_red_gain,
+ NULL);
+
+ info->green_gain = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_green_gain,
+ NULL);
+
+ info->blue_gain = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_blue_gain,
+ NULL);
+
+ info->isp_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_isp_en,
+ NULL);
+ info->average_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_average_en,
+ NULL);
+ info->white_dpc_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_white_dpc_en,
+ NULL);
+ info->black_dpc_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_black_dpc_en,
+ NULL);
+ info->wb_gain_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_wb_gain_en,
+ NULL);
+ info->otp_cc_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_otp_cc_en,
+ NULL);
+ info->dbc_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_dbc_en,
+ NULL);
+ info->scale_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_scale_en,
+ NULL);
+ info->blc_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_blc_en,
+ NULL);
+ info->average_before = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_average_before,
+ NULL);
+ info->digital_gain_en = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_digital_gain_en,
+ NULL);
+ info->test_pattern = v4l2_ctrl_new_custom(&info->ctrl_handler,
+ &ov16825_test_pattern,
+ NULL);
+
+ info->focus = v4l2_ctrl_new_std(&info->ctrl_handler,
+ &ov16825_ctrl_ops,
+ V4L2_CID_FOCUS_ABSOLUTE,
+ 50,
+ 1000,
+ 1,
+ 220);
+
+ info->sd.ctrl_handler = &info->ctrl_handler;
+
+ return 0;
+}
+
+void ov16825_ctrl_free(struct ov16825_info *info)
+{
+ v4l2_ctrl_handler_free(&info->ctrl_handler);
+}
diff --git a/drivers/media/video/ov16825/ov16825_ctrl.h b/drivers/media/video/ov16825/ov16825_ctrl.h
new file mode 100644
index 00000000000000..96cfc984f5ad8a
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_ctrl.h
@@ -0,0 +1,35 @@
+/*
+ * linux/drivers/parrot/video/ov16825_ctrl.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 20-Oct-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _OV16825_CTRLS_H_
+#define _OV16825_CTRLS_H_
+
+#include "ov16825_common.h"
+
+#define V4L2_CID_OV16825_GET_PHYTIMDLY (V4L2_CID_CAMERA_CLASS_BASE + 0x100)
+
+extern int ov16825_ctrl_create(struct ov16825_info *info);
+extern void ov16825_ctrl_free(struct ov16825_info *info);
+extern void ov16825_apply_ctrl(struct ov16825_info *info);
+
+#endif
diff --git a/drivers/media/video/ov16825/ov16825_registers.h b/drivers/media/video/ov16825/ov16825_registers.h
new file mode 100644
index 00000000000000..696ba5ac389ad6
--- /dev/null
+++ b/drivers/media/video/ov16825/ov16825_registers.h
@@ -0,0 +1,401 @@
+/*
+ * drivers/media/video/ov16825/device/ov16825.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 01-Dec-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __OV16825__H__
+#define __OV16825__H__
+
+/* PLL control */
+#define PLL1_CTRL0 0x0300
+#define PLL1_CTRL2 0x0302
+#define PLL1_CTRL3 0x0303
+#define PLL1_CTRL4 0x0304
+#define PLL1_CTRL5 0x0305
+
+#define OP_PIX_DIV4 0x0
+#define OP_PIX_DIV5 0x1
+#define OP_PIX_DIV6 0x2
+#define OP_PIX_DIV8 0x3
+#define PLL1_CTRL6 0x0306
+
+#define OP_2LANE_NODIV 0x0
+#define OP_2LANE_DIV2 0x1
+#define PLL1_CTRL7 0x0307
+
+#define PLL2_CTRL0 0x030B
+#define PLL2_CTRL1 0x030C
+#define PLL2_CTRL2 0x030D
+#define PLL2_CTRL3 0x030E
+#define PLL2_CTRL4 0x030F
+#define PLL3_CTRL0 0x0313
+#define PLL3_CTRL1 0x0314
+#define PLL3_CTRL2 0x0315
+#define PLL3_CTRL3 0x0316
+#define PLL3_CTRL4 0x0317
+#define MIPI_BITSEL_CTRL 0x031E
+#define LVDS_BIT16 0x031F
+
+/* system control */
+#define SC_CTRL0100_STREAM_OFF 0x0
+#define SC_CTRL0100_STREAM_ON 0x1
+#define SC_CTRL0100 0x0100
+#define SC_CTRL0103_SOFTWARE_RESET 0x1
+#define SC_CTRL0103 0x0103
+#define SC_CMMN_PAD_OEN0 0x3000
+#define SC_CMMN_PAD_OEN2 0x3002
+#define SCCB_ID 0x3004
+#define CMMN_SCCB_ID2 0x3006
+#define SC_CMMN_PAD_OUT0 0x3008
+#define SC_CMMN_CHIP_ID1 0x300A
+#define SC_CMMN_CHIP_ID2 0x300B
+#define SC_CMMN_CHIP_ID3 0x300C
+#define SC_CMMN_PAD_OUT2 0x300D
+#define SC_CMMN_PAD_SEL2 0x3010
+#define SC_CMMN_PAD_PK 0x3011
+#define SCCB_SECOND_ID 0x3012
+
+#define RESET_MIPI_PHY_WHEN_SLEEP 0x2
+#define MODE_1LANES 0x00
+#define MODE_2LANES 0x20
+#define MODE_4LANES 0x60
+#define MODE_8LANES 0x70
+
+#define SC_CMMN_MIPI_SC_CTRL 0x3018
+#define SC_CMMN_MIPI_SC_CTRL_MANUAL 0x3019
+#define SC_CMMN_CLKRST0 0x301A
+#define SC_CMMN_CLKRST1 0x301B
+#define SC_CMMN_CLKRST2 0x301C
+#define SC_CMMN_CLKRST3 0x301D
+#define SC_CMMN_CLKRST4 0x301E
+#define SC_CMMN_CLKRST5 0x301F
+#define SC_CMMN_CLOCK_SEL 0x3020
+#define SC_CMMN_MISC_CTRL 0x3021
+#define SC_CMMN_MIPI_SC_CTRL2 0x3022
+#define SC_CMMN_SUB_ID 0x302A
+#define SC_CMMN_BIT_SEL 0x3031
+
+#define SEL_SCLK_PLL_SCLK 0x00
+#define SEL_SCLK_PLL2 0x80
+#define SC_CMMN_CORE_CTRL0 0x3032
+
+#define SEL_DACCLK_PLL_DACCLK 0x4
+#define SEL_DACCLK_PLL_SCLK 0x0
+#define SC_CMMN_CORE_CTRL1 0x3033
+#define SIF_CTRL0 0x303C
+
+/* test pattern control */
+#define PATTERN_DISABLE 0x00
+#define PATTERN_BLACK 0x83
+#define PATTERN_COLOR_BAR 0x80
+#define PATTERN_COLOR_BAR_DARK_TOP_BOTTOM 0x84
+#define PATTERN_COLOR_BAR_DARK_LEFT_RIGHT 0x88
+#define PATTERN_COLOR_BAR_DARK_BOTTOM_TOP 0x8C
+#define PATTERN_SQUARE_BW 0x92
+#define PATTERN_SQUARE_COLOR 0x82
+#define PATTERN_RANDOM 0x81
+
+#define RED_GAIN 0x500C
+#define RED_GAIN_LSB 0x500D
+#define GREEN_GAIN 0x500E
+#define GREEN_GAIN_LSB 0x500F
+#define BLUE_GAIN 0x5010
+#define BLUE_GAIN_LSB 0x5011
+
+#define TEST_PATTERN_0 0x5040
+#define TEST_PATTERN_1 0x5041
+
+/* exposure/gain control */
+#define EXPO0 0x3500
+#define EXPO1 0x3501
+#define EXPO2 0x3502
+#define R_MANUAL 0x3503
+#define GAIN0 0x3508
+#define GAIN1 0x3509
+#define AVG_CTRL00 0x5680
+#define AVG_CTRL01 0x5681
+#define AVG_CTRL02 0x5682
+#define AVG_CTRL03 0x5683
+#define AVG_CTRL04 0x5684
+#define AVG_CTRL05 0x5685
+#define AVG_CTRL06 0x5686
+#define AVG_CTRL07 0x5687
+#define AVG_CTRL08 0x5688
+#define AVG_CTRL09 0x5689
+#define AVG_CTRL0A 0x568A
+#define AVG_CTRL0B 0x568B
+#define AVG_CTRL0C 0x568C
+#define AVG_CTRL0D 0x568D
+#define AVG_CTRL0E 0x568E
+#define AVG_CTRL0F 0x568F
+#define AVG_CTRL10 0x5690
+#define AVG_ROREG2 0x5693
+
+/* system timing control registers */
+#define H_CROP_START0 0x3800
+#define H_CROP_START1 0x3801
+#define V_CROP_START0 0x3802
+#define V_CROP_START1 0x3803
+#define H_CROP_END0 0x3804
+#define H_CROP_END1 0x3805
+#define V_CROP_END0 0x3806
+#define V_CROP_END1 0x3807
+#define H_OUTPUT_SIZE0 0x3808
+#define H_OUTPUT_SIZE1 0x3809
+#define V_OUTPUT_SIZE0 0x380A
+#define V_OUTPUT_SIZE1 0x380B
+#define TIMING_HTS0 0x380C
+#define TIMING_HTS1 0x380D
+#define TIMING_VTS0 0x380E
+#define TIMING_VTS1 0x380F
+#define H_WIN_OFF0 0x3810
+#define H_WIN_OFF1 0x3811
+#define V_WIN_OFF0 0x3812
+#define V_WIN_OFF1 0x3813
+#define H_INC_ODD 0x3814
+#define H_INC_EVEN 0x3815
+#define VTS_EXP_DIFF 0x381A
+#define FORMAT0 0x3820
+#define FORMAT1 0x3821
+#define TIMING_REG23 0x3823
+#define CS_RST_FSIN_HI 0x3824
+#define CS_RST_FSIN_LO 0x3825
+#define R_RST_FSIN_HI 0x3826
+#define R_RST_FSIN_LO 0x3827
+#define TIMING_REG28 0x3828
+#define FORMAT2 0x3829
+#define V_INC_ODD 0x382A
+#define V_INC_EVEN 0x382B
+#define FRACTIONAL_HTS_HI 0x3832
+#define FRACTIONAL_HTS_LO 0x3833
+#define EXT_DIV_FACTORS 0x3834
+#define GROUP_WRITE_OPTION 0x3835
+
+/* strobe control */
+#define STROBE_REG0 0x3B00
+#define STROBE_REG1 0x3B02
+#define STROBE_REG2 0x3B03
+
+/* FREX control */
+#define FREX_REG5 0x3B85
+#define FREX_REG6 0x3B86
+#define FREX_REG7 0x3B87
+#define FREX_REG9 0x3B89
+#define FREX_REGA 0x3B8A
+#define FREX_REGB 0x3B8B
+#define FREX_REGC 0x3B8C
+#define FREX_REGD 0x3B8D
+#define FREX_REGE 0x3B8E
+#define FREX_REGF 0x3B8F
+#define FREX_REG10 0x3B90
+#define FREX_REG11 0x3B91
+#define FREX_REG12 0x3B92
+#define FREX_REG13 0x3B93
+#define FREX_REG14 0x3B94
+#define FREX_REG15 0x3B95
+#define FREX_REG16 0x3B96
+#define FREX_REG1E 0x3B9E
+#define FREX_REG1F 0x3B9F
+
+/* OTP control */
+#define OTP_PROGRAM_CTRL 0x3D80
+#define OTP_LOA_CTRL 0x3D81
+#define OTP_MODE_CTRL 0x3D84
+#define OTP_REG85 0x3D85
+#define OTP_START_ADDRESS0 0x3D88
+#define OTP_START_ADDRESS1 0x3D89
+#define OTP_END_ADDRESS0 0x3D8A
+#define OTP_END_ADDRESS1 0x3D8B
+#define OTP_SETTING_STT_ADDRESS0 0x3D8C
+#define OTP_SETTING_STT_ADDRESS1 0x3D8D
+#define OTP_BIST_ERR_ADDRESS0 0x3D8E
+#define OTP_BIST_ERR_ADDRESS1 0x3D8F
+
+/* BLC control */
+#define BLC_CTRL00 0x4000
+#define BLC_CTRL04 0x4004
+#define BLC_CTRL05 0x4005
+
+/* frame control */
+#define FRAME_CONTROL00 0x4200
+#define FRAME_CONTROL01 0x4201
+#define FRAME_CONTROL02 0x4202
+#define FRAME_CONTROL03 0x4203
+
+/* clipping */
+#define CLIPPING_MAX0 0x4302
+#define CLIPPING_MAX1 0x4303
+#define CLIPPING_MIN0 0x4304
+#define CLIPPING_MIN1 0x4305
+#define DPCM_CTRL 0x4306
+#define MIPI_16BIT_DT 0x4307
+
+/* VFIFO control */
+#define VFIFO_REG2 0x4602
+#define VFIFO_REG3 0x4603
+
+/* MIPI control */
+#define MIPI_CTRL_00 0x4800
+#define MIPI_CTRL_01 0x4801
+#define MIPI_CTRL_02 0x4802
+#define MIPI_CTRL_04 0x4804
+#define MIPI_MAX_FRAME_COUNT_HI 0x4810
+#define MIPI_MAX_FRAME_COUNT_LO 0x4811
+#define MIPI_CTRL14 0x4814
+#define MIPI_CTRL15 0x4815
+#define HS_ZERO_MIN_HI 0x4818
+#define HS_ZERO_MIN_LO 0x4819
+#define HS_TRAIL_MIN_HI 0x481A
+#define HS_TRAIL_MIN_LO 0x481B
+#define CLK_ZERO_MIN_HI 0x481C
+#define CLK_ZERO_MIN_LO 0x481D
+#define CLK_PREPARE_MIN_HI 0x481E
+#define CLK_PREPARE_MIN_LO 0x481F
+#define CLK_POST_MIN_HI 0x4820
+#define CLK_POST_MIN_LO 0x4821
+#define CLK_TRAIL_MIN_HI 0x4822
+#define CLK_TRAIL_MIN_LO 0x4823
+#define LPX_P_MIN_HI 0x4824
+#define LPX_P_MIN_LO 0x4825
+#define HS_PREPARE_MIN_HI 0x4826
+#define HS_PREPARE_MIN_LO 0x4827
+#define HS_EXIT_MIN_HI 0x4828
+#define HS_EXIT_MIN_LO 0x4829
+#define UI_HS_ZERO_MIN 0x482A
+#define UI_HS_TRAIL_MIN 0x482B
+#define UI_CLK_ZERO_MIN 0x482C
+#define UI_CLK_PREPARE_MIN 0x482D
+#define UI_CLK_POST_MIN 0x482E
+#define UI_CLK_TRAIL_MIN 0x482F
+#define UI_LPX_P_MIN 0x4830
+#define UI_HS_PREPARE_MIN 0x4831
+#define UI_HS_EXIT_MIN 0x4832
+#define PCLK_PERIOD 0x4837
+#define MIPI_LP_GPIO_0 0x4838
+#define MIPI_LP_GPIO_1 0x483A
+#define MIPI_LP_GPIO 0x483B
+#define MIPI_CTRL_3C 0x483C
+#define MIPI_LP_GPIO_3 0x483D
+#define MIPI_CTRL4C 0x484C
+#define TEST_PATTERN 0x484D
+#define FE_DLY 0x484E
+#define CLK_LANE_TEST_PATTERN 0x484F
+#define MIPI8LANE_MODE 0x4850
+//#define MIPI_CTRL_00 0x4A00
+//#define MIPI_CTRL_01 0x4A01
+//#define MIPI_CTRL_02 0x4A02
+//#define MIPI_CTRL_04 0x4A04
+//#define MIPI_MAX_FRAME_COUNT_HI 0x4A10
+//#define MIPI_MAX_FRAME_COUNT_LO 0x4A11
+//#define MIPI_CTRL14 0x4A14
+//#define MIPI_CTRL15 0x4A15
+//#define HS_ZERO_MIN_HI 0x4A18
+//#define HS_ZERO_MIN_LO 0x4A19
+//#define HS_TRAIL_MIN_HI 0x4A1A
+//#define HS_TRAIL_MIN_LO 0x4A1B
+//#define CLK_ZERO_MIN_HI 0x4A1C
+//#define CLK_ZERO_MIN_LO 0x4A1D
+//#define CLK_PREPARE_MIN_HI 0x4A1E
+//#define CLK_PREPARE_MIN_LO 0x4A1F
+//#define CLK_POST_MIN_HI 0x4A20
+//#define CLK_POST_MIN_LO 0x4A21
+//#define CLK_TRAIL_MIN_HI 0x4A22
+//#define CLK_TRAIL_MIN_LO 0x4A23
+//#define LPX_P_MIN_HI 0x4A24
+//#define LPX_P_MIN_LO 0x4A25
+//#define HS_PREPARE_MIN_HI 0x4A26
+//#define HS_PREPARE_MIN_LO 0x4A27
+//#define HS_EXIT_MIN_HI 0x4A28
+//#define HS_EXIT_MIN_LO 0x4A29
+//#define UI_HS_ZERO_MIN 0x4A2A
+//#define UI_HS_TRAIL_MIN 0x4A2B
+//#define UI_CLK_ZERO_MIN 0x4A2C
+//#define UI_CLK_PREPARE_MIN 0x4A2D
+//#define UI_CLK_POST_MIN 0x4A2E
+//#define UI_CLK_TRAIL_MIN 0x4A2F
+//#define UI_LPX_P_MIN 0x4A30
+//#define UI_HS_PREPARE_MIN 0x4A31
+//#define UI_HS_EXIT_MIN 0x4A32
+//#define PCLK_PERIOD 0x4A37
+//#define MIPI_LP_GPIO_0 0x4A38
+//#define MIPI_LP_GPIO_1 0x4A3A
+//#define MIPI_LP_GPIO 0x4A3B
+//#define MIPI_CTRL_3C 0x4A3C
+//#define MIPI_LP_GPIO_3 0x4A3D
+//#define MIPI_CTRL4C 0x4A4C
+//#define TEST_PATTERN 0x4A4D
+//#define FE_DLY 0x4A4E
+#define CLK_LANE_TEST 0x4A4F
+
+/* ISP frame control */
+//#define FRAME_CONTROL00 0x4900
+//#define FRAME_CONTROL01 0x4901
+//#define FRAME_CONTROL02 0x4902
+//#define FRAME_CONTROL03 0x4903
+
+/* LVDS control */
+#define LVDS_REG0 0x4B00
+#define LVDS_REG2 0x4B02
+#define LVDS_REG3 0x4B03
+#define LVDS_REG4 0x4B04
+#define LVDS_REG5 0x4B05
+#define LVDS_REG6 0x4B06
+#define LVDS_REG7 0x4B07
+#define LVDS_REG8 0x4B08
+#define LVDS_REG9 0x4B09
+#define LVDS_REGA 0x4B0A
+
+/* DSP general control */
+#define ISP_CTRL00 0x5000
+#define ISP_CTRL01 0x5001
+#define ISP_CTRL04 0x5004
+#define ISP_CTRL0C 0x500C
+#define ISP_CTRL0D 0x500D
+#define ISP_CTRL0E 0x500E
+#define ISP_CTRL0F 0x500F
+#define ISP_CTRL10 0x5010
+#define ISP_CTRL11 0x5011
+
+/* OTP cluster correction control */
+#define OTP_CTRL00 0x5500
+#define OTP_CTRL01 0x5501
+#define OTP_CTRL02 0x5502
+#define OTP_CTRL03 0x5503
+#define OTP_CTRL05 0x5505
+#define OTP_CTRL06 0x5506
+#define OTP_CTRL07 0x5507
+#define OTP_CTRL08 0x5508
+#define OTP_CTRL09 0x5509
+
+/* windowing and scaling control */
+#define WINC_CTRL00 0x5980
+#define WINC_CTRL01 0x5981
+#define WINC_CTRL02 0x5982
+#define WINC_CTRL03 0x5983
+#define WINC_CTRL04 0x5984
+#define WINC_CTRL05 0x5985
+#define WINC_CTRL06 0x5986
+#define WINC_CTRL07 0x5987
+#define WINC_CTRL08 0x5988
+#define SCALE_CTRL00 0x5A00
+#define SCALE_CTRL01 0x5A01
+#define SCALE_CTRL02 0x5A02
+
+#endif //__OV16825__H__
diff --git a/drivers/media/video/ov7740.c b/drivers/media/video/ov7740.c
new file mode 100644
index 00000000000000..160e59fbc3db8a
--- /dev/null
+++ b/drivers/media/video/ov7740.c
@@ -0,0 +1,798 @@
+/* ov7740.c
+ *
+ * Driver for Omnivision 7740 camera
+ *
+ * Author : Maxime JOURDAN <maxime.jourdan@parrot.com>
+ *
+ * Date : 10/10/2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/ov7740.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+#define VGA_FPS 30
+
+#define QVGA_WIDTH 320
+#define QVGA_HEIGHT 240
+#define QVGA_FPS 60
+
+#define OV7740_ID_MSB 0x77
+#define OV7740_ID_LSB 0x42
+
+/* Registers */
+#define OV7740_GAIN 0x00
+#define OV7740_BGAIN 0x01
+#define OV7740_RGAIN 0x02
+#define OV7740_GGAIN 0x03
+#define OV7740_AEC_MSB 0x0F
+#define OV7740_AEC_LSB 0x10
+#define OV7740_DIG_GAIN 0x15
+
+MODULE_AUTHOR("Maxime JOURDAN <maxime.jourdan@parrot.com>");
+MODULE_DESCRIPTION("Omnivision OV7740 VGA camera sensor driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+#define SUB_ADDRESS 0x38
+#define YAVG_CONFIG 0xE9
+
+struct ov7740 {
+ struct v4l2_subdev sd;
+ struct i2c_client *i2c_client;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *expo_window;
+
+ int is_streaming;
+ int (*set_power)(int on);
+};
+
+static inline struct ov7740 *to_ov7740(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov7740, sd);
+}
+
+/* Register maps */
+struct reg_cfg {
+ unsigned char reg;
+ unsigned char val;
+};
+
+// Omnivision Application Note defaults
+static struct reg_cfg ov7740_cfg[] = {
+ { 0x11, 0x00 },
+ { 0x12, 0x20 },
+ { 0x17, 0x25 },
+ { 0x18, 0xA1 },
+ { 0x19, 0x03 },
+ { 0x1B, 0x89 },
+ { 0x22, 0x03 },
+ { 0x31, 0xA1 },
+ { 0x32, 0xF0 },
+ { 0x33, 0xC4 },
+ { 0x36, 0x3F },
+ { 0x04, 0x60 },
+ { 0x40, 0x7F },
+ { 0x41, 0x6A },
+ { 0x42, 0x29 },
+ { 0x44, 0xE5 },
+ { 0x45, 0x41 },
+ { 0x47, 0x42 },
+ { 0x48, 0x00 },
+ { 0x49, 0x61 },
+ { 0x64, 0x00 },
+ { 0x67, 0x88 },
+ { 0x68, 0x1A },
+ { 0x26, 0x72 },
+ { 0x53, 0x00 },
+ { 0x55, 0xC0 },
+ { 0x56, 0x55 },
+ { 0x57, 0xFF },
+ { 0x58, 0xFF },
+ { 0x59, 0xFF },
+ { 0x80, 0x00 },
+ { 0x81, 0x3F },
+ { 0x82, 0x32 },
+ { 0x83, 0x01 },
+ { 0x38, 0x11 },
+ { 0x84, 0x70 },
+ { 0x85, 0x00 },
+ { 0x86, 0x03 },
+ { 0x87, 0x01 },
+ { 0x88, 0x05 },
+ { 0x70, 0x00 },
+ { 0x71, 0x34 },
+ { 0x74, 0x28 },
+ { 0x75, 0x98 },
+ { 0x76, 0x00 },
+ { 0x77, 0x08 },
+ { 0x78, 0x01 },
+ { 0x79, 0xC2 },
+ { 0x14, 0x28 },
+ { 0x50, 0x97 },
+ { 0x51, 0x7E },
+ { 0x52, 0x00 },
+ { 0x20, 0x00 },
+ { 0x21, 0x23 },
+ { 0x56, 0xC3 },
+ { 0x57, 0xEB },
+ { 0x58, 0xEB },
+ { 0x59, 0xC3 },
+ { 0x27, 0x00 },
+ { 0x24, 0x88 },
+ { 0x89, 0x30 },
+ { 0x93, 0x38 },
+ { 0x95, 0x85 },
+ { 0x99, 0x33 },
+ { 0x24, 0x44 },
+ { 0x25, 0x33 },
+ { 0x14, 0x28 },
+ { 0x13, 0xBF },
+ { 0x52, 0x00 },
+ { 0x50, 0x97 },
+ { 0x51, 0x7E },
+ { 0x80, 0x7F },
+};
+
+static inline struct ov7740 *ctrl_to_ov7740(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct ov7740, ctrl_handler);
+}
+
+/* i2c transfer functions */
+static s32 ov7740_i2c_read_byte(struct ov7740 *ov7740,
+ u8 command)
+{
+ struct i2c_client *client = ov7740->i2c_client;
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ else
+ return -EIO;
+}
+
+static s32 ov7740_i2c_write_byte(struct ov7740 *ov7740,
+ u8 command,
+ u8 value)
+{
+ struct i2c_client *client = ov7740->i2c_client;
+ union i2c_smbus_data data;
+ int err;
+ int i;
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_write_byte_data(client,
+ command,
+ value);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ v4l2_err(client, "error writing %02x, %02x, %02x\n",
+ client->addr, command, value);
+ return err;
+}
+
+static int ov7740_write_reg_array(struct ov7740 *ov7740,
+ struct reg_cfg *array, int nb)
+{
+ s32 ret;
+ int i;
+ struct v4l2_subdev *sd = &ov7740->sd;
+
+ for(i=0;i<nb;i++) {
+ ret = ov7740_i2c_write_byte(ov7740,
+ array[i].reg,
+ array[i].val);
+ if(ret < 0) {
+ v4l2_err(sd, "failed to write 0x%X to reg 0x%X\n",
+ array[i].reg, array[i].val);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* video operations */
+
+static struct v4l2_mbus_framefmt *
+__ov7740_get_fmt(struct ov7740 *ov7740,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if(pad != 0)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &ov7740->format;
+}
+
+static int ov7740_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov7740 *ov7740 = to_ov7740(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ov7740_get_fmt(ov7740, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int ov7740_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov7740 *ov7740 = to_ov7740(sd);
+ int ret = 0;
+ struct v4l2_mbus_framefmt *format;
+
+ format = __ov7740_get_fmt(ov7740, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ // OV7740 only supports VGA or QVGA
+ if(!(fmt->format.width == 640 && fmt->format.height == 480) &&
+ !(fmt->format.width == 320 && fmt->format.height == 240))
+ return -EINVAL;
+
+ *format = fmt->format;
+ return ret;
+}
+
+static int ov7740_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov7740 *ov7740 = to_ov7740(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = ov7740->format.code;
+ return 0;
+}
+
+static int ov7740_apply_flip(struct ov7740 *ov7740) {
+ u8 val = 0x12;
+
+ if (ov7740->hflip->val) {
+ val |= 0x40;
+ ov7740_i2c_write_byte(ov7740, 0x16, 0x01);
+ } else {
+ ov7740_i2c_write_byte(ov7740, 0x16, 0x00);
+ }
+
+ if (ov7740->vflip->val)
+ val |= 0x80;
+
+ return ov7740_i2c_write_byte(ov7740, 0x0C, val);
+}
+
+
+static int set_sub_address(struct ov7740 *ov7740, u8 sub)
+{
+ u8 sub_address;
+ int ret;
+
+ ret = ov7740_i2c_read_byte(ov7740, SUB_ADDRESS);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sub_address = ret & 0xff;
+
+ sub_address &= 0xf0;
+ sub_address |= sub & 0xf;
+
+ return ov7740_i2c_write_byte(ov7740, SUB_ADDRESS, sub_address);
+}
+
+static int ov7740_apply_expo_window(struct ov7740 *ov7740)
+{
+ u16 *window = ov7740->expo_window->p_new.p_u16;
+ u16 x, y, w, h;
+ u8 reg;
+ int ret;
+
+ x = window[0];
+ y = window[1];
+ w = window[2];
+ h = window[3];
+
+ if ((x | y | w | h) == 0) {
+ /* When the window is 0 we switch to automatic mode */
+ ret = set_sub_address(ov7740, 4);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, 0);
+ }
+
+ /* Build YAVG_CTRL register value */
+ reg = 1 << 5; /* Manual mode */
+ reg |= ((x >> 8) & 1) << 4; /* YAGV XOFFSET MSB */
+ reg |= ((y >> 8) & 3); /* YAGV YOFFSET MSB */
+
+ ret = set_sub_address(ov7740, 4);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, reg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Set YAVG XOFFSET LSB */
+ ret = set_sub_address(ov7740, 2);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, x & 0xff);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Set YAVG YOFFSET LSB */
+ ret = set_sub_address(ov7740, 3);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, y & 0xff);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Set YAVG HSIZE */
+ ret = set_sub_address(ov7740, 5);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, (w / 8) & 0xff);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Set YAVG VSIZE */
+ ret = set_sub_address(ov7740, 6);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ov7740_i2c_write_byte(ov7740, YAVG_CONFIG, (h / 4) & 0xff);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov7740_enable(struct ov7740 *ov7740)
+{
+ int ret;
+
+ // reset
+ ret = ov7740_i2c_write_byte(ov7740, 0x12, 0x80);
+ if(ret < 0)
+ return ret;
+
+ msleep(10);
+ ov7740_write_reg_array(ov7740,
+ ov7740_cfg,
+ ARRAY_SIZE(ov7740_cfg));
+
+ ov7740_apply_flip(ov7740);
+ ov7740_apply_expo_window(ov7740);
+ return 0;
+}
+
+static int ov7740_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov7740 *ov7740 = to_ov7740(sd);
+ int ret = 0;
+
+ if(enable)
+ ret = ov7740_enable(ov7740);
+
+ if(ret < 0)
+ return ret;
+
+ ov7740->is_streaming = !!enable;
+
+ return ret;
+}
+
+
+#define V4L2_CID_OV7740_RGAIN (V4L2_CID_CAMERA_CLASS_BASE + 0x100)
+#define V4L2_CID_OV7740_GGAIN (V4L2_CID_CAMERA_CLASS_BASE + 0x101)
+#define V4L2_CID_OV7740_BGAIN (V4L2_CID_CAMERA_CLASS_BASE + 0x102)
+#define V4L2_CID_OV7740_EXPO_WINDOW (V4L2_CID_CAMERA_CLASS_BASE + 0x103)
+
+static int ov7740_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov7740 *ov7740 = ctrl_to_ov7740(ctrl);
+
+ /* If not streaming, just keep interval structures up-to-date */
+ if (!ov7740->is_streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ return ov7740_apply_flip(ov7740);
+ case V4L2_CID_OV7740_EXPO_WINDOW:
+ return ov7740_apply_expo_window(ov7740);
+ }
+
+ return 0;
+}
+
+#define BIT_X(b, i) ((b >> i) & 0x01)
+
+static int ov7740_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov7740 *ov7740 = ctrl_to_ov7740(ctrl);
+ u8 aec_lsb, aec_msb;
+ u8 again, dgain;
+
+ /* If not streaming, just default value */
+ if (!ov7740->is_streaming) {
+ ctrl->val = ctrl->default_value;
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ again = ov7740_i2c_read_byte(ov7740, OV7740_GAIN);
+ dgain = ov7740_i2c_read_byte(ov7740, OV7740_DIG_GAIN);
+ ctrl->val = (BIT_X(dgain, 1) + 1) *
+ (BIT_X(dgain, 0) + 1) *
+ (BIT_X(again, 7) + 1) *
+ (BIT_X(again, 6) + 1) *
+ (BIT_X(again, 5) + 1) *
+ (BIT_X(again, 4) + 1) *
+ ((again & 0x0F) + 16);
+ break;
+ case V4L2_CID_OV7740_RGAIN:
+ ctrl->val = ov7740_i2c_read_byte(ov7740, OV7740_RGAIN);
+ break;
+ case V4L2_CID_OV7740_GGAIN:
+ ctrl->val = ov7740_i2c_read_byte(ov7740, OV7740_GGAIN);
+ break;
+ case V4L2_CID_OV7740_BGAIN:
+ ctrl->val = ov7740_i2c_read_byte(ov7740, OV7740_BGAIN);
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ aec_msb = ov7740_i2c_read_byte(ov7740, OV7740_AEC_MSB);
+ aec_lsb = ov7740_i2c_read_byte(ov7740, OV7740_AEC_LSB);
+ ctrl->val = (aec_msb << 8) + aec_lsb;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov7740_video_ops = {
+ .s_stream = ov7740_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov7740_pad_ops = {
+ .get_fmt = ov7740_get_fmt,
+ .set_fmt = ov7740_set_fmt,
+ .enum_mbus_code = ov7740_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops ov7740_ops = {
+ .video = &ov7740_video_ops,
+ .pad = &ov7740_pad_ops,
+};
+
+static const struct v4l2_ctrl_ops ov7740_ctrl_ops = {
+ .s_ctrl = ov7740_s_ctrl,
+ .g_volatile_ctrl = ov7740_g_volatile_ctrl,
+};
+
+static const struct v4l2_ctrl_config ov7740_ctrl_rgain = {
+ .ops = &ov7740_ctrl_ops,
+ .id = V4L2_CID_OV7740_RGAIN,
+ .name = "Red gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 255,
+ .step = 1,
+ .def = 100,
+};
+
+static const struct v4l2_ctrl_config ov7740_ctrl_ggain = {
+ .ops = &ov7740_ctrl_ops,
+ .id = V4L2_CID_OV7740_GGAIN,
+ .name = "Green gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 255,
+ .step = 1,
+ .def = 100,
+};
+
+static const struct v4l2_ctrl_config ov7740_ctrl_bgain = {
+ .ops = &ov7740_ctrl_ops,
+ .id = V4L2_CID_OV7740_BGAIN,
+ .name = "Blue gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 255,
+ .step = 1,
+ .def = 100,
+};
+
+static const struct v4l2_ctrl_config ov7740_ctrl_expo_window = {
+ .ops = &ov7740_ctrl_ops,
+ .id = V4L2_CID_OV7740_EXPO_WINDOW,
+ .name = "Exposure Average Window: { x, y, width, height }",
+ .type = V4L2_CTRL_TYPE_U16,
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ .dims = { 4 },
+};
+
+static int ov7740_init_ctrls(struct ov7740* ov7740)
+{
+ struct v4l2_ctrl_handler *hdl = &ov7740->ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 7);
+ if (ret < 0) {
+ v4l2_err(&ov7740->sd, "failed to init ctrl handler\n");
+ goto einit;
+ }
+
+ ov7740->hflip = v4l2_ctrl_new_std(hdl,
+ &ov7740_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+
+ ov7740->vflip = v4l2_ctrl_new_std(hdl,
+ &ov7740_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ /* Exposure window manual configuration */
+ ov7740->expo_window = v4l2_ctrl_new_custom(hdl,
+ &ov7740_ctrl_expo_window,
+ NULL);
+
+ ctrl = v4l2_ctrl_new_std(hdl, &ov7740_ctrl_ops, V4L2_CID_EXPOSURE_ABSOLUTE, 0, 0xFFFF, 1, 0);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std(hdl, &ov7740_ctrl_ops, V4L2_CID_GAIN, 0, 1984, 1, 0);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &ov7740_ctrl_rgain, NULL);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &ov7740_ctrl_ggain, NULL);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &ov7740_ctrl_bgain, NULL);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ if (hdl->error) {
+ v4l2_err(&ov7740->sd, "failed to add new ctrls\n");
+ ret = hdl->error;
+ goto ectrl;
+ }
+
+ ov7740->sd.ctrl_handler = hdl;
+
+ return 0;
+
+ectrl:
+ v4l2_ctrl_handler_free(hdl);
+einit:
+ return ret;
+}
+
+static int ov7740_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ov7740 *ov7740;
+ struct ov7740_platform_data *pdata = NULL;
+ struct v4l2_subdev *sd;
+ int ret = -ENODEV;
+ struct v4l2_mbus_framefmt *format;
+ pdata = client->dev.platform_data;
+
+ if(!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+
+ ov7740 = kzalloc(sizeof(*ov7740), GFP_KERNEL);
+ if(!ov7740) {
+ ret = -ENOMEM;
+ v4l2_err(client, "alloc failed for data structure\n");
+ goto enomem;
+ }
+
+ ov7740->set_power = pdata->set_power;
+ ov7740->i2c_client = client;
+
+ format = &ov7740->format;
+ format->width = VGA_WIDTH;
+ format->height = VGA_HEIGHT;
+ format->field = V4L2_FIELD_NONE;
+ format->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ sd = &ov7740->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov7740_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov7740->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /*see if chip is present */
+ if (pdata->set_power) {
+ pdata->set_power(1);
+ msleep(20);
+ }
+
+ ret = ov7740_i2c_read_byte(ov7740, 0x0A);
+ if (ret < 0 || ret != OV7740_ID_MSB) {
+ v4l2_err(sd, "ID MSB is %02X instead of %02X\n", ret, OV7740_ID_MSB);
+ goto echipident;
+ }
+
+ ret = ov7740_i2c_read_byte(ov7740, 0x0B);
+ if (ret < 0 || ret != OV7740_ID_LSB) {
+ v4l2_err(sd, "ID LSB is %02X instead of %02X\n", ret, OV7740_ID_LSB);
+ goto echipident;
+ }
+
+ ret = ov7740_init_ctrls(ov7740);
+ if (ret)
+ goto echipident;
+
+ // ov7740_enable(ov7740);
+ ret = media_entity_init(&sd->entity, 1, &ov7740->pad, 0);
+ if(ret < 0) {
+ v4l2_err(sd, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ v4l2_info(sd, "Found ov7740 sensor\n");
+
+ return 0;
+echipident:
+ v4l2_err(sd, "Failed to identify OV7740 camera\n");
+emedia:
+ kfree(ov7740);
+ v4l2_device_unregister_subdev(sd);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int ov7740_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov7740 *ov7740 = to_ov7740(sd);
+ struct ov7740_platform_data *pdata = NULL;
+ struct v4l2_ctrl_handler *hdl = &ov7740->ctrl_handler;
+
+ pdata = client->dev.platform_data;
+
+ if(ov7740->set_power)
+ ov7740->set_power(0);
+
+ if(client->irq > 0)
+ free_irq(client->irq, ov7740);
+
+ v4l2_ctrl_handler_free(hdl);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ kfree(ov7740);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int ov7740_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov7740 *ov7740 = to_ov7740(sd);
+
+ // Saving the state to restore at resume
+ int is_streaming = ov7740->is_streaming;
+
+ // Stop if it was streaming
+ if (is_streaming)
+ ov7740_s_stream(sd, 0);
+
+ ov7740->is_streaming = is_streaming;
+ if(ov7740->set_power)
+ ov7740->set_power(0);
+
+ return 0;
+}
+
+int ov7740_resume(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov7740 *ov7740 = to_ov7740(sd);
+
+ if(ov7740->set_power)
+ ov7740->set_power(1);
+
+ if (ov7740->is_streaming)
+ ov7740_s_stream(sd, 1);
+ return 0;
+}
+#endif
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id ov7740_id[] = {
+ { "ov7740", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ov7740_id);
+
+static struct i2c_driver ov7740_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ov7740",
+ },
+ .probe = ov7740_probe,
+ .remove = ov7740_remove,
+#ifdef CONFIG_PM
+ .suspend = ov7740_suspend,
+ .resume = ov7740_resume,
+#endif
+ .id_table = ov7740_id,
+};
+
+module_i2c_driver(ov7740_driver);
diff --git a/drivers/media/video/tc358746a.c b/drivers/media/video/tc358746a.c
new file mode 100644
index 00000000000000..a842fa5fba0749
--- /dev/null
+++ b/drivers/media/video/tc358746a.c
@@ -0,0 +1,751 @@
+/*
+ * tc358746a - Toshiba device TC358746A CSI2.0 <-> Parallel bridge
+ *
+ * It can perform protocol conversion in both direction, MIPI to parallel or
+ * parallel to MIPI.
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Wed Jul 2 09:16:13 CEST 2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <media/tc358746a.h>
+
+#include "tc358746a_reg.h"
+
+MODULE_AUTHOR("Eng-Hong SRON <eng-hong.sron@parrot.com>");
+MODULE_DESCRIPTION("Toshiba TC358746A driver");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME "tc358746a"
+
+enum {
+ TC358746A_IN_PAD = 0,
+ TC358746A_OUT_PAD,
+ TC358746A_NUM_PADS,
+};
+
+struct tc358746a {
+ struct v4l2_subdev sd;
+ struct media_pad pad[TC358746A_NUM_PADS];
+ struct tc358746a_platform_data *pdata;
+ unsigned long pclk;
+};
+
+static inline struct tc358746a *to_tc358746a(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tc358746a, sd);
+}
+
+static int tc358746a_read(struct v4l2_subdev *sd, u16 reg, u16 *val)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 regbuf[2];
+ u8 valbuf[2];
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(regbuf),
+ .buf = regbuf,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(valbuf),
+ .buf = valbuf,
+ },
+ };
+
+ regbuf[0] = reg >> 8;
+ regbuf[1] = reg & 0xff;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ *val = (((u16)valbuf[0]) << 8)
+ | (u16)valbuf[1];
+
+ return 0;
+}
+
+static int tc358746a_write(struct v4l2_subdev *sd, u16 reg, u16 val)
+{
+ int ret;
+ struct i2c_msg msg;
+ u8 buf[4];
+
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ buf[2] = val >> 8;
+ buf[3] = val & 0xff;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev *tc358746a_get_remote_sd(struct v4l2_subdev *sd)
+{
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ struct media_pad *r_pad;
+ struct v4l2_subdev *r_sd;
+
+ r_pad = media_entity_remote_source(&tc358746a->pad[TC358746A_IN_PAD]);
+
+ if (r_pad == NULL ||
+ media_entity_type(r_pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) {
+ v4l2_err(sd, "Nothing connected to input pad\n");
+ return NULL;
+ }
+
+ r_sd = media_entity_to_v4l2_subdev(r_pad->entity);
+
+ if (!r_sd) {
+ v4l2_err(sd, "No connected subdev found\n");
+ return NULL;
+ }
+
+ return r_sd;
+}
+
+/* Since the bridge cannot really change the stream, we simply pass the command
+ * to the remote subdev no matter what is the requested pad.
+ */
+#define TC358746_MK_PAD_OP(_op, _type) \
+ static int tc358746a_ ## _op(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_fh *fh, \
+ struct _type *s) \
+ { \
+ struct v4l2_subdev *remote = tc358746a_get_remote_sd(sd); \
+ if (!remote) \
+ return -ENODEV; \
+ \
+ return v4l2_subdev_call(remote, pad, _op, fh, s); \
+ }
+
+static int tc358746a_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *s)
+{
+ struct v4l2_subdev *remote = tc358746a_get_remote_sd(sd);
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ enum v4l2_mbus_pixelcode pixcode;
+ int ret;
+
+ if (!remote)
+ return -ENODEV;
+
+ ret = v4l2_subdev_call(remote, pad, get_fmt, fh, s);
+ if (ret)
+ return ret;
+
+ pixcode = tc358746a->pdata->force_subdev_pixcode;
+
+ if (pixcode) {
+ s->format.code = pixcode;
+ }
+
+ return 0;
+}
+
+static int tc358746a_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_subdev *remote = tc358746a_get_remote_sd(sd);
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ enum v4l2_mbus_pixelcode pixcode;
+ int ret;
+
+ if (!remote)
+ return -ENODEV;
+
+ ret = v4l2_subdev_call(remote, pad, enum_mbus_code, fh, code);
+ if (ret)
+ return ret;
+
+ pixcode = tc358746a->pdata->force_subdev_pixcode;
+
+ if (pixcode) {
+ code->code = pixcode;
+ }
+
+ return 0;
+}
+
+TC358746_MK_PAD_OP(set_fmt, v4l2_subdev_format)
+
+static const struct v4l2_subdev_pad_ops tc358746a_pad_ops = {
+ .get_fmt = tc358746a_get_fmt,
+ .set_fmt = tc358746a_set_fmt,
+ .enum_mbus_code = tc358746a_enum_mbus_code,
+};
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int tc358746a_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ int ret;
+ u16 val;
+
+ reg->size = 2;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ ret = tc358746a_read(sd, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return 0;
+}
+
+static int tc358746a_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ if (reg->reg & ~0xffff || reg->val & ~0xffff)
+ return -EINVAL;
+
+ return tc358746a_write(sd, reg->reg, reg->val);
+}
+#endif
+
+static const struct v4l2_subdev_core_ops tc358746a_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = tc358746a_get_register,
+ .s_register = tc358746a_set_register,
+#endif
+};
+
+static int tc358746a_set_pclk(struct v4l2_subdev *sd)
+{
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ struct tc358746a_platform_data *pdata = tc358746a->pdata;
+
+ unsigned int div[] = {8, 4, 2};
+ unsigned long pll_clk, ppi_clk;
+ uint16_t frs, fbd, prd;
+ unsigned long best_error = -1;
+ int s, p;
+ union clkctl clkctl;
+ union pllctl0 pllctl0;
+ union pllctl1 pllctl1 = {{
+ .pll_en = 1,
+ .resetb = 1,
+ .cken = 1,
+ .bypcken = 0,
+ .lfbren = 0,
+ .pll_lbws = 0,
+ .pll_frs = 0,
+ }};
+
+#define IS_BETWEEN(_f, _min, _max) ((_f >= _min) && (_f <= _max))
+
+ /* Check Ref frequency */
+ if (!IS_BETWEEN(pdata->refclk, TC358746A_MIN_REFCLK,
+ TC358746A_MAX_REFCLK)) {
+ v4l2_err(sd, "refclk (%lu) is out of range [%lu - %lu]\n",
+ pdata->refclk,
+ TC358746A_MIN_REFCLK,
+ TC358746A_MAX_REFCLK);
+ return -1;
+ }
+
+ /* Check PCLK frequency */
+ if (!IS_BETWEEN(tc358746a->pclk, TC358746A_MIN_PCLK,
+ TC358746A_MAX_PCLK)) {
+ v4l2_err(sd, "pclk (%lu) is out of range [%lu - %lu]\n",
+ tc358746a->pclk,
+ TC358746A_MIN_PCLK,
+ TC358746A_MAX_PCLK);
+ return -1;
+ }
+
+ /* PPIRxClk is used to in CSIRx for detecting CSI LP-to-HS transition,
+ * so we need to maximize its frequency.
+ */
+ for (p = 2 ; p >= 0 ; p--) {
+ for (s = 0 ; s < 3 ; s++) {
+ pll_clk = tc358746a->pclk * div[s];
+ ppi_clk = pll_clk / div[p];
+
+ if (IS_BETWEEN(ppi_clk, TC358746A_MIN_PPICLK,
+ TC358746A_MAX_PPICLK)) {
+ goto ppi_ok;
+ }
+ }
+ }
+
+#undef IS_BETWEEN
+
+ v4l2_err(sd, "Cannot find PPI_CLK for PCLK at %lu\n", tc358746a->pclk);
+ v4l2_err(sd, "PPI_CLK must be between %lu and %lu\n",
+ TC358746A_MIN_PPICLK, TC358746A_MAX_PPICLK);
+
+ return -1;
+
+ppi_ok:
+
+ clkctl.sclkdiv = s;
+ clkctl.ppiclkdiv = p;
+
+ /* Program CSI PLL
+ * pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)]
+ */
+ for (frs = 0 ; frs < 4 ; frs++) {
+ for (prd = 0 ; prd < 16 ; prd++) {
+ for (fbd = 0 ; fbd < 512 ; fbd++) {
+ unsigned long pll_clk_test;
+ unsigned long error;
+
+ pll_clk_test = pdata->refclk * (fbd + 1) /
+ (prd + 1) / (1 << frs);
+
+ error = pll_clk - pll_clk_test;
+
+ if (error < 0)
+ error = -error;
+
+ /* Save the configuration */
+ if (error < best_error || best_error < 0) {
+ best_error = error;
+ pllctl0.pll_fbd = fbd;
+ pllctl0.pll_prd = prd;
+ pllctl1.pll_frs = frs;
+ }
+ }
+ }
+ }
+
+ tc358746a_write(sd, PLLCTL0, pllctl0._register);
+ tc358746a_write(sd, PLLCTL1, pllctl1._register);
+ tc358746a_write(sd, CLKCTL, clkctl._register);
+
+ /* Update pclk */
+ tc358746a->pclk = pdata->refclk * (pllctl0.pll_fbd + 1) /
+ (pllctl0.pll_prd + 1) /
+ (1 << pllctl1.pll_frs) /
+ div[clkctl.sclkdiv];
+
+ /* MCLK configuration */
+ tc358746a_write(sd, MCLKCTL, 0x0201);
+
+ return 0;
+}
+
+/* Loop through all delay value in order to find the working window. And then
+ * set the delay value at its middle
+ */
+static int tc358746a_calibrate(struct v4l2_subdev *sd)
+{
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ int first_dly = 0;
+ int state = 0;
+ unsigned error_wait;
+ unsigned dly;
+
+ union phytimdly phytimdly = {
+ .dsettle = 0,
+ .td_term_sel = 1,
+ .tc_term_sel = 1,
+ };
+
+
+ dly = tc358746a->pdata->phytimdly;
+ if (dly != 0) {
+ /* Use provided delay value */
+ phytimdly.dsettle = dly;
+ tc358746a_write(sd, PHYTIMDLY, phytimdly._register);
+ return 0;
+ }
+
+ error_wait = tc358746a->pdata->calibration_delay_ms;
+ if (error_wait == 0) {
+ /* Default to 30ms */
+ error_wait = 30;
+ }
+
+ for (dly = 0 ; dly <= 0x7f ; dly++) {
+ union csistatus csistatus;
+ union physta physta;
+ int error;
+
+ phytimdly.dsettle = dly;
+
+ tc358746a_write(sd, PHYTIMDLY, phytimdly._register);
+
+ /* Clear previous error measure */
+ tc358746a_write(sd, CSISTATUS, 0xffff);
+ msleep(error_wait); /* In order to receive some packets */
+
+ /* Read Status */
+ tc358746a_read(sd, CSISTATUS, &csistatus._register);
+ tc358746a_read(sd, PHYSTA, &physta._register);
+
+ error = ((physta._register & 0x0055) != 0) ||
+ ((csistatus._register & 0x01FF) != 0);
+
+ /* We hit the first possible value of the PHYTIMDLY window */
+ if (!error && !state) {
+
+ /* In fast calibration mode, assume the window is ~10
+ * dly large and set the value in the middle */
+#ifdef CONFIG_TC358746A_FAST_CALIBRATION
+ phytimdly.dsettle = dly + 4;
+ tc358746a_write(sd, PHYTIMDLY, phytimdly._register);
+ return 0;
+#endif
+
+ first_dly = dly;
+ state = 1;
+ }
+
+ /* We hit the last possible value of the PHYTIMDLY window */
+ if (error && state) {
+ /* Position the PHYTIMDLY in the middle of the window */
+ phytimdly.dsettle = (dly + first_dly) / 2;
+ v4l2_info(sd,
+ "Measured PHYTIMDLY: %d\n",
+ phytimdly.dsettle);
+ tc358746a_write(sd, PHYTIMDLY, phytimdly._register);
+ return 0;
+ }
+ }
+
+ v4l2_err(sd, "Could not find a correct PHYTIMDLY value\n");
+
+ return -1;
+}
+
+static int tc358746a_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ struct tc358746a_platform_data *pdata = tc358746a->pdata;
+ struct v4l2_subdev *remote;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_dv_timings timings;
+ int ret;
+ union confctl confctl;
+ union datafmt datafmt;
+
+ remote = tc358746a_get_remote_sd(sd);
+
+ if (!remote)
+ return -ENODEV;
+
+ /* For stream off, we just need to propagate the command */
+ if (enable == 0) {
+ ret = v4l2_subdev_call(remote, video, s_stream, enable);
+
+ if (ret < 0)
+ v4l2_err(sd, "cannot call s_stream on remote subdev\n");
+
+ if (pdata->set_power)
+ pdata->set_power(TC358746A_POWER_OFF);
+
+ return ret;
+ }
+
+ /* TC358746A needs to be configured before any CSI stream is enabled,
+ * thus we have to put a default configuration, activate the remote
+ * device and then auto-calibrate if needed.
+ */
+ if (pdata->set_power)
+ pdata->set_power(TC358746A_POWER_ON);
+
+ /* Software reset */
+ tc358746a_write(sd, SYSCTL, 0x0001);
+ tc358746a_write(sd, SYSCTL, 0x0000);
+
+ /* Ask remote subdev the pixel clock */
+ ret = v4l2_subdev_call(remote, video, g_dv_timings, &timings);
+ if (ret < 0) {
+ v4l2_err(sd, "cannot call g_dv_timings on remote subdev\n");
+ goto power_off;
+ }
+ tc358746a->pclk = timings.bt.pixelclock;
+
+ ret = tc358746a_set_pclk(sd);
+ if (ret < 0)
+ goto power_off;
+
+ /* PHYTimDly - csettle & dsettle
+ * Here we set it to its minimum, after activating the remote device, we
+ * will start the auto-calibration.
+ */
+ tc358746a_write(sd, PHYTIMDLY, 0x8000);
+
+ /* Here we explicitly specify the format, since in some cases, the
+ * remote subdev generates some embedded data at the beginning with
+ * non-consistant MIPI code.
+ */
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ ret = v4l2_subdev_call(remote, pad, get_fmt, NULL, &fmt);
+ if (ret < 0) {
+ v4l2_err(sd, "cannot call get_fmt on remote subdev\n");
+ goto power_off;
+ }
+
+ datafmt.udt_en = 1;
+
+ switch (fmt.format.code) {
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ datafmt.pdformat = 1;
+ break;
+ case V4L2_MBUS_FMT_SBGGR12_1X12:
+ case V4L2_MBUS_FMT_SGBRG12_1X12:
+ case V4L2_MBUS_FMT_SGRBG12_1X12:
+ case V4L2_MBUS_FMT_SRGGB12_1X12:
+ datafmt.pdformat = 2;
+ break;
+ default:
+ v4l2_err(sd, "code not supported yet\n");
+ ret = -EINVAL;
+ goto power_off;
+ }
+
+ tc358746a_write(sd, DATAFMT, datafmt._register);
+
+ /* ConfCtl */
+ confctl.datalane = pdata->lanes - 1,
+ confctl.auto_incr = 1,
+ confctl.pclkp = 0,
+ confctl.hsyncp = 0,
+ confctl.vsyncp = 0,
+ confctl.ppen = 1,
+ confctl.pdataf = 0,
+ confctl.bt656en = 0,
+ confctl.inte2n = 0,
+ confctl.trien = 1,
+
+ tc358746a_write(sd, CONFCTL, confctl._register);
+
+ /* Finally we can call the stream on for the remote subdev */
+ ret = v4l2_subdev_call(remote, video, s_stream, enable);
+ if (ret < 0) {
+ v4l2_err(sd, "cannot call s_stream on remote subdev\n");
+ goto power_off;
+ }
+ /* We finish with a calibration in order to center the delay */
+ ret = tc358746a_calibrate(sd);
+ if (ret < 0)
+ goto ecalibrate;
+
+ return 0;
+
+ecalibrate:
+ v4l2_subdev_call(remote, video, s_stream, 0);
+power_off:
+ if (pdata->set_power)
+ pdata->set_power(TC358746A_POWER_OFF);
+
+ return ret;
+}
+
+static int tc358746a_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_subdev *remote = tc358746a_get_remote_sd(sd);
+
+ if (!remote)
+ return -ENODEV;
+
+ /* Ask the remote directly */
+ return v4l2_subdev_call(remote, video, g_dv_timings, timings);
+}
+
+
+static const struct v4l2_subdev_video_ops tc358746a_video_ops = {
+ .s_stream = tc358746a_s_stream,
+ .g_dv_timings = tc358746a_g_dv_timings,
+};
+
+static const struct v4l2_subdev_ops tc358746a_ops = {
+ .core = &tc358746a_core_ops,
+ .video = &tc358746a_video_ops,
+ .pad = &tc358746a_pad_ops,
+};
+
+static int tc358746a_detect_chip(struct v4l2_subdev *sd)
+{
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ struct tc358746a_platform_data *pdata = tc358746a->pdata;
+ int ret = 0;
+ u16 chip_id = 0;
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(TC358746A_POWER_ON);
+ if (ret) {
+ v4l2_err(sd, "Power on failed\n");
+ return ret;
+ }
+ }
+
+ ret = tc358746a_read(sd, CHIPID, &chip_id);
+
+ if (pdata->set_power)
+ pdata->set_power(TC358746A_POWER_OFF);
+
+ if (ret) {
+ v4l2_err(sd, "Couldn't read on I2C bus");
+ return ret;
+ }
+
+ if (chip_id == TC358746A_CHIPID ||
+ chip_id == TC358746A_CHIPID_REV0) {
+ v4l2_info(sd, "Found " DRIVER_NAME " chip\n");
+
+ return 0;
+ }
+
+ v4l2_err(sd, "Error Chip ID = 0x%04x instead of 0x%04x\n",
+ chip_id, TC358746A_CHIPID);
+
+ return -ENODEV;
+}
+
+static int tc358746a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358746a *tc358746a;
+ struct tc358746a_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "platform data not specified\n");
+ return -EINVAL;
+ }
+
+ if (pdata->refclk == 0) {
+ dev_err(&client->dev, "refclk frequency is not specified\n");
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "i2c not available\n");
+ return -ENODEV;
+ }
+
+ tc358746a = kzalloc(sizeof(*tc358746a), GFP_KERNEL);
+
+ if (!tc358746a) {
+ dev_err(&client->dev, "alloc failed for data structure\n");
+ return -ENOMEM;
+ }
+
+ tc358746a->pdata = pdata;
+
+ sd = &tc358746a->sd;
+ v4l2_i2c_subdev_init(sd, client, &tc358746a_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ tc358746a->pad[TC358746A_IN_PAD].flags = MEDIA_PAD_FL_SINK;
+ tc358746a->pad[TC358746A_OUT_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ret = media_entity_init(&sd->entity,
+ TC358746A_NUM_PADS,
+ tc358746a->pad,
+ 0);
+
+ if (ret < 0) {
+ v4l_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ /* Check if the chip is preset */
+ ret = tc358746a_detect_chip(sd);
+ if (ret < 0)
+ goto edetect;
+
+ v4l2_info(sd, "TC358746A MIPI bridge successfully probed");
+
+ return 0;
+
+emedia:
+edetect:
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(tc358746a);
+
+ return ret;
+}
+
+static int tc358746a_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tc358746a *tc358746a = to_tc358746a(sd);
+ struct tc358746a_platform_data *pdata = client->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->set_power)
+ pdata->set_power(TC358746A_POWER_OFF);
+
+ media_entity_cleanup(&sd->entity);
+ v4l2_device_unregister_subdev(sd);
+ kfree(tc358746a);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358746a_id[] = {
+ {DRIVER_NAME, 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tc358746a_id);
+
+static struct i2c_driver tc358746a_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = tc358746a_probe,
+ .remove = tc358746a_remove,
+ .id_table = tc358746a_id,
+};
+
+module_i2c_driver(tc358746a_driver);
diff --git a/drivers/media/video/tc358746a_reg.h b/drivers/media/video/tc358746a_reg.h
new file mode 100644
index 00000000000000..02a61911206882
--- /dev/null
+++ b/drivers/media/video/tc358746a_reg.h
@@ -0,0 +1,494 @@
+/*
+ * tc358746a register map
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Wed Jul 2 09:16:13 CEST 2014
+ *
+ */
+#ifndef __TC358746A_REG_H__
+#define __TC358746A_REG_H__
+
+/* I2C parameters */
+#define TC358746A_CHIPID_REV0 0x4400
+#define TC358746A_CHIPID 0x4401
+
+/* Clocks constraints */
+#define TC358746A_MIN_REFCLK UL( 6000000)
+#define TC358746A_MAX_REFCLK UL( 40000000)
+#define TC358746A_MIN_PCLK UL( 0)
+#define TC358746A_MAX_PCLK UL(100000000)
+#define TC358746A_MIN_PPICLK UL( 66000000)
+#define TC358746A_MAX_PPICLK UL(125000000)
+
+/* Global */
+#define CHIPID 0x0000
+#define SYSCTL 0x0002
+#define CONFCTL 0x0004
+#define FIFOCTL 0x0006
+#define DATAFMT 0x0008
+#define MCLKCTL 0x000C
+#define GPIOEN 0x000E
+#define GPIODIR 0x0010
+#define GPIOIN 0x0012
+#define GPIOOUT 0x0014
+#define PLLCTL0 0x0016
+#define PLLCTL1 0x0018
+#define CLKCTL 0x0020
+#define WORDCNT 0x0022
+#define CSITX_DT 0x0050
+#define PHYCLKCTL 0x0056
+#define PHYDATA0CTL 0x0058
+#define PHYDATA1CTL 0x005A
+#define PHYDATA2CTL 0x005C
+#define PHYDATA3CTL 0x005E
+#define PHYTIMDLY 0x0060
+#define PHYSTA 0x0062
+#define CSISTATUS 0x0064
+#define CSIERREN 0x0066
+#define MDLSYNERR 0x0068
+#define CSIDID 0x006A
+#define CSIDIDERR 0x006C
+#define CSIPKTLEN 0x006E
+#define CSIRX_DPCTL 0x0070
+
+/* CSI2-RX Status Counters */
+#define FRMERRCNT 0x0080
+#define CRCERRCNT 0x0082
+#define CORERRCNT 0x0084
+#define HDRERRCNT 0x0086
+#define EIDERRCNT 0x0088
+#define CTLERRCNT 0x008A
+#define SOTERRCNT 0x008C
+#define SYNERRCNT 0x008E
+#define MDLERRCNT 0x0090
+#define FIFOSTATUS 0x00F8
+
+/* TX PHY */
+#define CLW_DPHYCONTTX 0x0100
+#define D0W_DPHYCONTTX 0x0104
+#define D1W_DPHYCONTTX 0x0108
+#define D2W_DPHYCONTTX 0x010C
+#define D3W_DPHYCONTTX 0x0110
+#define CLW_CNTRL 0x0140
+#define D0W_CNTRL 0x0144
+#define D1W_CNTRL 0x0148
+#define D2W_CNTRL 0x014C
+#define D3W_CNTRL 0x0150
+#define STARTCNTRL 0x0204
+#define STATUS 0x0208
+#define LINEINITCNT 0x0210
+#define LPTXTIMECNT 0x0214
+#define TCLK_HEADERCNT 0x0218
+#define TCLK_TRAILCNT 0x021C
+#define THS_HEADERCNT 0x0220
+#define TWAKEUP 0x0224
+#define TCLK_POSTCNT 0x0228
+#define THS_TRAILCNT 0x022C
+#define HSTXVREGCNT 0x0230
+#define HSTXVREGEN 0x0234
+
+/* TX CTRL */
+#define CSI_CONTROL 0x040C
+#define CSI_STATUS 0x0410
+#define CSI_INT 0x0414
+#define CSI_INT_ENA 0x0418
+#define CSI_ERR 0x044C
+#define CSI_ERR_INTENA 0x0450
+#define CSI_ERR_HALT 0x0454
+#define CSI_CONFW 0x0500
+#define CSI_RESET 0x0504
+#define CSI_INT_CLR 0x050C
+#define CSI_START 0x0518
+
+/* Debug Tx */
+#define DBG_LCNT 0x00e0
+#define DBG_WIDTH 0x00e2
+#define DBG_VBLANK 0x00e4
+#define DBG_DATA 0x00e8
+
+/*
+ * Global Registers
+ */
+union sysctl {
+ struct {
+ uint16_t sreset : 1;
+ uint16_t sleep : 1;
+ };
+ uint16_t _register;
+};
+
+union confctl {
+ struct {
+ uint16_t datalane : 2;
+ uint16_t auto_incr : 1;
+ uint16_t pclkp : 1;
+ uint16_t hsyncp : 1;
+ uint16_t vsyncp : 1;
+ uint16_t ppen : 1;
+ unsigned /* unused */ : 1;
+ uint16_t pdataf : 2;
+ unsigned /* unused */ : 2;
+ uint16_t bt656en : 1;
+ uint16_t inte2n : 1;
+ unsigned /* unused */ : 1;
+ uint16_t trien : 1;
+ };
+ uint16_t _register;
+};
+
+union fifoctl {
+ struct {
+ uint16_t fifolevel : 9;
+ };
+ uint16_t _register;
+};
+
+union datafmt {
+ struct {
+ uint16_t udt_en : 1;
+ unsigned /* unused */ : 3;
+ uint16_t pdformat : 4;
+ };
+ uint16_t _register;
+};
+
+union mclkctl {
+ struct {
+ uint16_t mclk_low : 8;
+ uint16_t mclk_high : 8;
+ };
+ uint16_t _register;
+};
+
+union gpioen {
+ struct {
+ unsigned /* unused */ : 4;
+ uint16_t gpioen : 12;
+ };
+ uint16_t _register;
+};
+
+union gpiodir {
+ struct {
+ uint16_t gpiodir : 16;
+ };
+ uint16_t _register;
+};
+
+union gpiopin {
+ struct {
+ uint16_t gpioin : 16;
+ };
+ uint16_t _register;
+};
+
+union gpioout {
+ struct {
+ uint16_t gpioout : 16;
+ };
+ uint16_t _register;
+};
+
+union pllctl0 {
+ struct {
+ uint16_t pll_fbd : 9;
+ unsigned /* unused */ : 3;
+ uint16_t pll_prd : 4;
+ };
+ uint16_t _register;
+};
+
+union pllctl1 {
+ struct {
+ uint16_t pll_en : 1;
+ uint16_t resetb : 1;
+ unsigned /* unused */ : 2;
+ uint16_t cken : 1;
+ uint16_t bypcken : 1;
+ uint16_t lfbren : 1;
+ unsigned /* unused */ : 1;
+ uint16_t pll_lbws : 2;
+ uint16_t pll_frs : 2;
+ };
+ uint16_t _register;
+};
+
+union clkctl {
+ struct {
+ uint16_t sclkdiv : 2;
+ uint16_t mclkrefdiv : 2;
+ uint16_t ppiclkdiv : 2;
+ };
+ uint16_t _register;
+};
+
+
+union wordcnt {
+ struct {
+ uint16_t wordcnt : 16;
+ };
+ uint16_t _register;
+};
+
+union csitx_dt {
+ struct {
+ uint16_t csitx_dt : 8;
+ };
+ uint16_t _register;
+};
+
+/*
+ * Rx control Registers
+ */
+union phyclkctl {
+ struct {
+ uint16_t clkdly : 4;
+ uint16_t hsrxrs : 2;
+ uint16_t cap : 2;
+ };
+ uint16_t _register;
+};
+
+union phydata0ctl {
+ struct {
+ uint16_t datadly : 4;
+ uint16_t hsrxrs : 2;
+ uint16_t cap : 2;
+ };
+ uint16_t _register;
+};
+
+union phydata1ctl {
+ struct {
+ uint16_t datadly : 4;
+ uint16_t hsrxrs : 2;
+ uint16_t cap : 2;
+ };
+ uint16_t _register;
+};
+
+union phydata2ctl {
+ struct {
+ uint16_t datadly : 4;
+ uint16_t hsrxrs : 2;
+ uint16_t cap : 2;
+ };
+ uint16_t _register;
+};
+
+union phydata3ctl {
+ struct {
+ uint16_t datadly : 4;
+ uint16_t hsrxrs : 2;
+ uint16_t cap : 2;
+ };
+ uint16_t _register;
+};
+
+union phytimdly {
+ struct {
+ uint16_t dsettle : 7;
+ uint16_t td_term_sel : 1;
+ unsigned /* unused */ : 7;
+ uint16_t tc_term_sel : 1;
+ };
+ uint16_t _register;
+};
+
+union physta {
+ struct {
+ uint16_t synerr0 : 1;
+ uint16_t soterr0 : 1;
+ uint16_t synerr1 : 1;
+ uint16_t soterr1 : 1;
+ uint16_t synerr2 : 1;
+ uint16_t soterr2 : 1;
+ uint16_t synerr3 : 1;
+ uint16_t soterr3 : 1;
+ };
+ uint16_t _register;
+};
+
+union csistatus {
+ struct {
+ uint16_t synerr : 1;
+ uint16_t soterr : 1;
+ uint16_t ctlerr : 1;
+ uint16_t eiderr : 1;
+ uint16_t hdrerr : 1;
+ uint16_t corerr : 1;
+ uint16_t crcerr : 1;
+ uint16_t frmerr : 1;
+ uint16_t mdlerr : 1;
+ };
+ uint16_t _register;
+};
+
+union csierren {
+ struct {
+ uint16_t synen : 1;
+ uint16_t soten : 1;
+ uint16_t ctlen : 1;
+ uint16_t eiden : 1;
+ uint16_t hdren : 1;
+ uint16_t coren : 1;
+ uint16_t crcen : 1;
+ uint16_t frmen : 1;
+ uint16_t mdlen : 1;
+ };
+ uint16_t _register;
+};
+
+union mdlsynerr {
+ struct {
+ uint16_t sync0 : 1;
+ uint16_t sync1 : 1;
+ uint16_t sync2 : 1;
+ uint16_t sync3 : 1;
+ };
+ uint16_t _register;
+};
+
+union csidid {
+ struct {
+ uint16_t datatype : 8;
+ };
+ uint16_t _register;
+};
+
+union csididerr {
+ struct {
+ uint16_t errtype : 8;
+ };
+ uint16_t _register;
+};
+
+union csipktlen {
+ struct {
+ uint16_t pktlen : 16;
+ };
+ uint16_t _register;
+};
+
+union csirx_dpctl {
+ struct {
+ uint16_t rxch0_cntrl : 2;
+ uint16_t rxch1_cntrl : 2;
+ uint16_t rxch2_cntrl : 2;
+ uint16_t rxch3_cntrl : 2;
+ uint16_t rxck_cntrl : 2;
+ };
+ uint16_t _register;
+};
+
+/*
+ * Rx Status Register
+ */
+union frmerrcnt {
+ struct {
+ uint16_t frmerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union crcerrcnt {
+ struct {
+ uint16_t crcerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union corerrcnt {
+ struct {
+ uint16_t corerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union hdrerrcnt {
+ struct {
+ uint16_t hdrerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union eiderrcnt {
+ struct {
+ uint16_t eiderrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+
+union ctlerrcnt {
+ struct {
+ uint16_t ctlerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union soterrcnt {
+ struct {
+ uint16_t soterrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union synerrcnt {
+ struct {
+ uint16_t synerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union mdlerrcnt {
+ struct {
+ uint16_t mdlerrcnt : 8;
+ };
+ uint16_t _register;
+};
+
+union fifostatus {
+ struct {
+ uint16_t vb_oflow : 1;
+ uint16_t vb_uflow : 1;
+ };
+ uint16_t _register;
+};
+
+/*
+ * TxDebug Register
+ */
+union dbg_lcnt {
+ struct {
+ uint16_t db_alcnt : 10;
+ unsigned /* unused */ : 4;
+ uint16_t db_cen : 1;
+ uint16_t db_wsram : 1;
+ };
+ uint16_t _register;
+};
+
+union dbg_width {
+ struct {
+ uint16_t db_width : 12;
+ };
+ uint16_t _register;
+};
+
+union dbg_vblank {
+ struct {
+ uint16_t db_vb : 7;
+ };
+ uint16_t _register;
+};
+
+union dbg_data {
+ struct {
+ uint16_t db_data : 16;
+ };
+ uint16_t _register;
+};
+
+#endif /* __TC358746A_REG_H__ */
diff --git a/drivers/media/video/tc358764.c b/drivers/media/video/tc358764.c
new file mode 100644
index 00000000000000..c0d547ea33842f
--- /dev/null
+++ b/drivers/media/video/tc358764.c
@@ -0,0 +1,782 @@
+/* Driver for the TC358764 Parallel to MIPI bridge
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 12-Dec-2014
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <media/tc358764.h>
+
+#define DEVICE_NAME "tc358764"
+
+#define TC358764_REG_VERSION 0x0000
+/* Expected chip revision */
+#define TC358764_VERSION 0x4401
+
+/* These registers can be written using tc358764_write_dsi_config */
+#define TC358764_REG_DSI_CONTROL 0x040C
+#define TC358764_REG_DSI_INT_ENA 0x0418
+#define TC358764_REG_DSI_ACKERR_INTENA 0x0438
+#define TC358764_REG_DSI_ACKERR_HALT 0x043C
+#define TC358764_REG_DSI_RXERR_INTENA 0x0444
+#define TC358764_REG_DSI_RXERR_HALT 0x0448
+#define TC358764_REG_DSI_ERR_INTENA 0x0450
+#define TC358764_REG_DSI_ERR_HALT 0x0454
+
+#define TC358764_REG_DSI_CONFW 0x0500
+
+#define TC358764_REG_DSICMD_TX 0x0600
+#define TC358764_REG_DSICMD_TYPE 0x0602
+#define TC358764_REG_DSICMD_WC 0x0604
+#define TC358764_REG_DSICMD_WD0 0x0610
+#define TC358764_REG_DSICMD_WD1 0x0612
+#define TC358764_REG_DSICMD_WD2 0x0614
+#define TC358764_REG_DSICMD_WD3 0x0616
+
+struct tc358764 {
+ struct i2c_client *client;
+};
+
+static int tc358764_read16(struct tc358764 *tc358764, u16 reg, u16 *val)
+{
+ struct i2c_client *client = tc358764->client;
+ u8 regb[2];
+ u8 valb[2];
+ int ret;
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(regb),
+ .buf = regb,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = ARRAY_SIZE(valb),
+ .buf = valb,
+ },
+ };
+
+ regb[0] = (u8)(reg >> 8);
+ regb[1] = (u8)(reg);
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C read failed with error %d", ret);
+ return ret;
+ }
+
+ *val = (((u16)valb[0]) << 8) | ((u16)valb[1]);
+
+ return 0;
+}
+
+static int tc358764_write16(struct tc358764 *tc358764, u16 reg, u16 val)
+{
+ struct i2c_client *client = tc358764->client;
+ u8 data[4];
+ int ret;
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(data),
+ .buf = data,
+ },
+ };
+
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg);
+
+ data[2] = (u8)(val >> 8);
+ data[3] = (u8)(val);
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write failed with error %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tc358764_write32(struct tc358764 *tc358764, u16 reg, u32 val)
+{
+ struct i2c_client *client = tc358764->client;
+ u8 data[6];
+ int ret;
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(data),
+ .buf = data,
+ },
+ };
+
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg);
+
+ // The chip uses this weird "PDP-endian" style byte organisation for
+ // 32byte writes.
+ data[2] = (u8)(val >> 8);
+ data[3] = (u8)(val);
+ data[4] = (u8)(val >> 24);
+ data[5] = (u8)(val >> 16);
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < 0) {
+ dev_err(&client->dev, "I2C write failed with error %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Certain registers have to be written indirectly using the DSI_CONFW
+ * register. It lets us set or clear individual bits (depending on whether set
+ * is true or false) data in reg */
+static int tc358764_write_dsi_config(struct tc358764 *tc358764,
+ u16 reg,
+ int set,
+ u16 data)
+{
+ u32 id = 0;
+ u32 r;
+ int i;
+
+ /* The DSI_CONFW register can be used to write to a specific list of
+ * registers selected with a 5bit identifier */
+ static const struct { u16 reg; u32 id; } id_list[] = {
+ { TC358764_REG_DSI_CONTROL, 0x03 },
+ { TC358764_REG_DSI_INT_ENA, 0x06 },
+ { TC358764_REG_DSI_ACKERR_INTENA, 0x0e },
+ { TC358764_REG_DSI_ACKERR_HALT, 0x0f },
+ { TC358764_REG_DSI_RXERR_INTENA, 0x11 },
+ { TC358764_REG_DSI_RXERR_HALT, 0x12 },
+ { TC358764_REG_DSI_ERR_INTENA, 0x14 },
+ { TC358764_REG_DSI_ERR_HALT, 0x15 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(id_list); i++) {
+ if (id_list[i].reg == reg) {
+ id = id_list[i].id;
+ break;
+ }
+ }
+
+ /* Crash if we're called with an invalid register */
+ BUG_ON(id == 0);
+
+ /* Build the DSI_CONFW command value */
+ r = id << 24;
+ r |= (set ? 0x5 : 0x6) << 29;
+ r |= (u32)data;
+
+ return tc358764_write32(tc358764, TC358764_REG_DSI_CONFW, r);
+}
+
+/* Send a short command with no parameters */
+static int tc358764_dsi_write_no_param(struct tc358764 *tc358764, u8 cmd)
+{
+ int ret;
+
+ /* Short write without parameter */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TYPE, 0x1005);
+ if (ret)
+ goto err;
+
+ /* Word count is always 0 for short writes */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_WC, 0);
+ if (ret)
+ goto err;
+
+ /* Command byte to be sent */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_WD0, cmd);
+ if (ret)
+ goto err;
+
+ /* Start DSI transfer */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TX, 1);
+ if (ret)
+ goto err;
+
+ err:
+ return ret;
+}
+
+/* Send a short command with one parameters */
+static int tc358764_dsi_write_one_param(struct tc358764 *tc358764,
+ u8 cmd,
+ u8
+ param)
+{
+ int ret;
+
+ /* Short write with one parameter */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TYPE, 0x1015);
+ if (ret)
+ goto err;
+
+ /* Word count is always 0 for short writes */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_WC, 0);
+ if (ret)
+ goto err;
+
+ /* Command and parameter to be sent */
+ ret = tc358764_write16(tc358764,
+ TC358764_REG_DSICMD_WD0,
+ (((u16)param) << 8) | (u16)cmd);
+ if (ret)
+ goto err;
+
+ /* Start DSI transfer */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TX, 1);
+ if (ret)
+ goto err;
+
+ err:
+ return ret;
+}
+
+/* Send a long command with up to 7 parameters */
+static int tc358764_dsi_write_long(struct tc358764 *tc358764,
+ u8 cmd,
+ size_t nparams,
+ u8 *params)
+{
+ int ret;
+
+ BUG_ON(nparams > 7);
+
+ /* DCS long write */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TYPE, 0x4039);
+ if (ret)
+ goto err;
+
+ /* Set number of parameters + 1 for the command byte */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_WC, nparams + 1);
+ if (ret)
+ goto err;
+
+ /* Store the command + parameters to be sent. Unused parameters are
+ * padded with 0s */
+
+#define GET_PARAM(n) ((n) < nparams ? params[(n)] : 0)
+#define INTO_U16(l, h) ((u16)(l) | (((u16)(h)) << 8))
+
+ ret = tc358764_write16(tc358764,
+ TC358764_REG_DSICMD_WD0,
+ INTO_U16(cmd, GET_PARAM(0)));
+ if (ret)
+ goto err;
+
+ ret = tc358764_write16(tc358764,
+ TC358764_REG_DSICMD_WD1,
+ INTO_U16(GET_PARAM(1), GET_PARAM(2)));
+ if (ret)
+ goto err;
+
+
+ ret = tc358764_write16(tc358764,
+ TC358764_REG_DSICMD_WD2,
+ INTO_U16(GET_PARAM(3), GET_PARAM(4)));
+ if (ret)
+ goto err;
+
+ ret = tc358764_write16(tc358764,
+ TC358764_REG_DSICMD_WD3,
+ INTO_U16(GET_PARAM(5), GET_PARAM(6)));
+ if (ret)
+ goto err;
+
+#undef INTO_U16
+#undef GET_PARAM
+
+ /* Start DSI transfer */
+ ret = tc358764_write16(tc358764, TC358764_REG_DSICMD_TX, 1);
+ if (ret)
+ goto err;
+
+ err:
+ return ret;
+}
+
+/// Send DSI command with a variable amount of parameters
+static int tc358764_dsi_write(struct tc358764 *tc358764,
+ u8 cmd,
+ size_t nparams,
+ u8 *params)
+{
+ if (nparams == 0) {
+ return tc358764_dsi_write_no_param(tc358764, cmd);
+ } else if (nparams == 1) {
+ return tc358764_dsi_write_one_param(tc358764, cmd, params[0]);
+ } else if (nparams <= 7) {
+ return tc358764_dsi_write_long(tc358764, cmd, nparams, params);
+ } else {
+ /* Sending commands with more parameters is possible with this
+ * bridge but it's tricky. You need to use the video RAM to
+ * store the command (so you have to stop the video streaming)
+ * and we never managed to find the correct sequence to get it
+ * to work reliably. */
+ dev_err(&tc358764->client->dev,
+ "DSI command with more than 7 "
+ "parameters are not supported");
+ return -EINVAL;
+ }
+
+ /* Not sure if that's entirely necessary but it shouldn't hurt */
+ msleep(1);
+}
+
+/* XXX Hardcoded init for RNB6 */
+static int tc358764_rnb6_init(struct tc358764 *tc358764) {
+ int ret;
+
+#define TRY(exp) do { ret = (exp); if (ret < 0) { goto err; } } while (0)
+
+ // Disable Parallel Input and activate Automatic I2C address increment
+ TRY(tc358764_write16(tc358764, 0x04, 0x04));
+
+ // Software reset
+ TRY(tc358764_write16(tc358764, 0x2, 0x1));
+ msleep(10);
+ TRY(tc358764_write16(tc358764, 0x2, 0x0));
+
+ // PLL config
+ TRY(tc358764_write16(tc358764, 0x16, 0x507e));
+ TRY(tc358764_write16(tc358764, 0x18, 0x0203));
+ msleep(1);
+ TRY(tc358764_write16(tc358764, 0x18, 0x0213));
+
+ TRY(tc358764_write16(tc358764, 0x06, 0x0D0));
+ TRY(tc358764_write16(tc358764, 0x08, 0x37)); //?
+ TRY(tc358764_write16(tc358764, 0x50, 0x3e)); //?
+
+ TRY(tc358764_write32(tc358764, 0x140, 0));
+ TRY(tc358764_write32(tc358764, 0x144, 0));
+ TRY(tc358764_write32(tc358764, 0x148, 0));
+ TRY(tc358764_write32(tc358764, 0x14c, 0));
+ TRY(tc358764_write32(tc358764, 0x150, 0));
+
+ TRY(tc358764_write32(tc358764, 0x210, 0x2c88));
+ TRY(tc358764_write32(tc358764, 0x214, 0x5));
+ TRY(tc358764_write32(tc358764, 0x218, 0x1f06));
+ TRY(tc358764_write32(tc358764, 0x21c, 0x3));
+ TRY(tc358764_write32(tc358764, 0x220, 0x606));
+ TRY(tc358764_write32(tc358764, 0x224, 0x4a88));
+ TRY(tc358764_write32(tc358764, 0x228, 0xb));
+
+
+ TRY(tc358764_write32(tc358764, 0x22c, 0x4));
+
+ TRY(tc358764_write32(tc358764, 0x234, 0x1f));
+
+ TRY(tc358764_write32(tc358764, 0x238, 0x1));
+ TRY(tc358764_write32(tc358764, 0x23c, 0x00050005));
+ TRY(tc358764_write32(tc358764, 0x204, 0x1));
+
+ TRY(tc358764_write16(tc358764, 0x620, 0x1));
+ TRY(tc358764_write16(tc358764, 0x622, 0x11));
+ TRY(tc358764_write16(tc358764, 0x626, 0x500));
+ TRY(tc358764_write16(tc358764, 0x628, 0xd8));
+ TRY(tc358764_write16(tc358764, 0x62c, 0x870));
+
+ //DSI Start
+ TRY(tc358764_write32(tc358764, 0x518, 0x1));
+
+ //Set bit DSI CTRL
+ TRY(tc358764_write_dsi_config(tc358764,
+ TC358764_REG_DSI_CONTROL,
+ 1,
+ 0x0126));
+ //Clear bit DSI CTRL
+ TRY(tc358764_write_dsi_config(tc358764,
+ TC358764_REG_DSI_CONTROL,
+ 0,
+ 0xfe81));
+
+ // INIT LCD
+
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xff, 3, (u8[]){0x12, 0x84, 0x01}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xff, 2, (u8[]){0x12, 0x84}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xc0, 7, (u8[]){0x00, 0x64, 0x00, 0x10, 0x10, 0x00, 0x64}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x87}));
+ TRY(tc358764_dsi_write(tc358764, 0xc0, 2, (u8[]){0x10, 0x10}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xc0, 6, (u8[]){0x00, 0x5c, 0x00, 0x01, 0x00, 0x04}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb3}));
+ TRY(tc358764_dsi_write(tc358764, 0xc0, 2, (u8[]){0x00, 0x55}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x81}));
+ TRY(tc358764_dsi_write(tc358764, 0xc1, 1, (u8[]){0x55}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa0}));
+ TRY(tc358764_dsi_write(tc358764, 0xc4, 7, (u8[]){0x05, 0x10, 0x06, 0x02, 0x05, 0x15, 0x10}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa7}));
+ TRY(tc358764_dsi_write(tc358764, 0xc4, 7, (u8[]){0x05, 0x10, 0x07, 0x02, 0x05, 0x15, 0x10}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb0}));
+ TRY(tc358764_dsi_write(tc358764, 0xc4, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x91}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 2, (u8[]){0x46, 0x42}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xd8, 2, (u8[]){0xc7, 0xc7}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xd9, 1, (u8[]){0x68}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb3}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 1, (u8[]){0x84}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xbb}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 1, (u8[]){0x8a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x82}));
+ TRY(tc358764_dsi_write(tc358764, 0xC4, 1, (u8[]){0x0a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc6}));
+ TRY(tc358764_dsi_write(tc358764, 0xB0, 1, (u8[]){0x03}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc2}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 1, (u8[]){0x40}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc3}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 1, (u8[]){0x85}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xd0, 1, (u8[]){0x40}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xd1, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x87}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 4, (u8[]){0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x97}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x9e}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xae}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xbe}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x05}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xce}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 1, (u8[]){0x05}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xde}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xe0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x05, 0x05, 0x00, 0x05, 0x05}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xe7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xf0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 7, (u8[]){0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xf7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcb, 4, (u8[]){0xff, 0xff, 0xff, 0xff}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x0e, 0x10, 0x0a, 0x0c, 0x02, 0x04, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x87}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x2e, 0x2d, 0x00, 0x29}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x8e}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 1, (u8[]){0x2a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x97}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x0d, 0x0f, 0x09, 0x0b, 0x01, 0x03, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x9e}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x2e, 0x2d, 0x00, 0x29, 0x2a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x2d, 0x2e, 0x00, 0x29}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xbe}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 1, (u8[]){0x2a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x0c, 0x0a, 0x10, 0x0e, 0x04, 0x02, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xce}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x2d, 0x2e, 0x00, 0x29, 0x2a}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcc, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x8b, 0x03, 0x18, 0x8a, 0x03, 0x18, 0x89}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x87}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 5, (u8[]){0x03, 0x18, 0x88, 0x03, 0x18}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x97}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa0}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x07, 0x05, 0x00, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa7}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x06, 0x05, 0x01, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb0}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x05, 0x05, 0x02, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb7}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x04, 0x05, 0x03, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc0}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x03, 0x05, 0x04, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc7}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x02, 0x05, 0x05, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd0}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x01, 0x05, 0x06, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd7}));
+ TRY(tc358764_dsi_write(tc358764, 0xce, 7, (u8[]){0x38, 0x00, 0x05, 0x07, 0x00, 0x18, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x80}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x87}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x97}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xa7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc0}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 7, (u8[]){0x3d, 0x02, 0x15, 0x20, 0x00, 0x00, 0x01}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xc7}));
+ TRY(tc358764_dsi_write(tc358764, 0xcf, 4, (u8[]){0x81, 0x00, 0x03, 0x08}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb5}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 6, (u8[]){0x3f, 0x6f, 0xff, 0x00, 0x6f, 0xff}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 4, (u8[]){0x02, 0x11, 0x02, 0x15}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x90}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 1, (u8[]){0x50}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x94}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 1, (u8[]){0x66}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb2}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb4}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb6}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb8}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x94}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x00, 0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xd2}));
+ TRY(tc358764_dsi_write(tc358764, 0xf5, 2, (u8[]){0x06, 0x15}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0xb4}));
+ TRY(tc358764_dsi_write(tc358764, 0xc5, 1, (u8[]){0xcc}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x25}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x34}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x41}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x52}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x5f}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x60}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x88}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x76}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x8d}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x77}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x63}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x63}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x4e}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x4a}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x3a}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x2b}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x1b}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x10}));
+ TRY(tc358764_dsi_write(tc358764, 0xE1, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x25}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x34}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x41}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x52}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x5f}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x60}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x88}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x76}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x8d}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x77}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x63}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x74}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x4e}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x4a}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x3a}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x2b}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x1b}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x10}));
+ TRY(tc358764_dsi_write(tc358764, 0xE2, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0x00, 1, (u8[]){0x00}));
+ TRY(tc358764_dsi_write(tc358764, 0xff, 3, (u8[]){0xff, 0xff, 0xff}));
+
+ msleep(1);
+
+ TRY(tc358764_dsi_write(tc358764, 0x11, 0, NULL));
+
+ msleep(1);
+
+ TRY(tc358764_dsi_write(tc358764, 0x29, 0, NULL));
+
+ msleep(1);
+
+ TRY(tc358764_write_dsi_config(tc358764,
+ TC358764_REG_DSI_CONTROL,
+ 1,
+ 0x00a7));
+
+ TRY(tc358764_write_dsi_config(tc358764,
+ TC358764_REG_DSI_CONTROL,
+ 0,
+ 0x8000));
+
+ TRY(tc358764_write16(tc358764, 0x4, 0x44));
+
+#undef TRY
+
+ err:
+ return ret;
+}
+
+static int tc358764_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358764_pdata *pdata = client->dev.platform_data;
+ struct tc358764 *tc358764;
+ u16 version;
+ int ret;
+
+ tc358764 = kzalloc(sizeof(*tc358764), GFP_KERNEL);
+ if (tc358764 == NULL) {
+ ret = -ENOMEM;
+ goto kzalloc_failed;
+ }
+
+ tc358764->client = client;
+
+ i2c_set_clientdata(client, tc358764);
+
+ if (pdata->set_power) {
+ ret = pdata->set_power(1);
+ if (ret) {
+ goto set_power_failed;
+ }
+ }
+
+ ret = tc358764_read16(tc358764, TC358764_REG_VERSION, &version);
+ if (ret) {
+ goto read_failed;
+ }
+
+ if (version != TC358764_VERSION) {
+ ret = -EINVAL;
+ dev_err(&client->dev,
+ "Unexpected device version: expected %04x got %04x\n",
+ TC358764_VERSION, version);
+ goto bad_version;
+ }
+
+
+ ret = tc358764_rnb6_init(tc358764);
+ if (ret) {
+ goto init_failed;
+ }
+
+ dev_info(&client->dev, "bridge succesfully initialized\n");
+
+ return 0;
+
+ init_failed:
+ bad_version:
+ read_failed:
+ pdata->set_power(0);
+ set_power_failed:
+ kfree(tc358764);
+ kzalloc_failed:
+ i2c_set_clientdata(client, NULL);
+ return ret;
+}
+
+static int tc358764_remove(struct i2c_client *client)
+{
+ struct tc358764 *tc358764 = i2c_get_clientdata(client);
+ struct tc358764_pdata *pdata = client->dev.platform_data;
+
+ if (tc358764 == NULL) {
+ return 0;
+ }
+
+ if (pdata->set_power) {
+ pdata->set_power(0);
+ }
+
+ kfree(tc358764);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358764_id[] = {
+ { DEVICE_NAME, 0 },
+ { /* EOT */ }
+};
+
+MODULE_DEVICE_TABLE(i2c, tc358764_id);
+
+static struct i2c_driver tc358764_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME,
+ },
+ .probe = tc358764_probe,
+ .remove = tc358764_remove,
+ .id_table = tc358764_id,
+};
+
+module_i2c_driver(tc358764_driver);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("kernel driver for tc358764");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/tw8834.c b/drivers/media/video/tw8834.c
new file mode 100644
index 00000000000000..981cce6a581504
--- /dev/null
+++ b/drivers/media/video/tw8834.c
@@ -0,0 +1,824 @@
+/* tw8834.c
+ *
+ * Driver for techwell tw8834
+ * It handles 2 inputs, composite and bt656 input
+ * It handles one output, which is bt656
+ *
+ * Author : Julien BERAUD <julien.beraud@parrot.com>
+ *
+ * Date : 23/04/2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/tw8834.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+
+MODULE_AUTHOR("Julien BERAUD <julien.beraud@parrot.com>");
+MODULE_DESCRIPTION("kernel driver for tw8834");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+enum {
+ TW8834_IN_PAD = 0,
+ TW8834_OUT_PAD,
+ TW8834_NUM_PADS,
+};
+
+enum {
+ TW8834_INPUT_CVBS = 0,
+ TW8834_INPUT_DTV,
+ TW8834_NUM_INPUTS,
+};
+
+static struct v4l2_dv_timings tw8834_ntsc_timings = V4L2_DV_BT_CEA_720X480I59_94;
+
+
+static struct v4l2_dv_timings tw8834_pal_timings = V4L2_DV_BT_CEA_720X576I50;
+
+
+#define TW8834_CVBS_WIDTH 720
+#define TW8834_CVBS_PAL_HEIGHT 576
+#define TW8834_CVBS_NTSC_HEIGHT 480
+
+#define TW8834_IRQ 0x002
+#define TW8834_IMASK 0x003
+#define TW8834_STATUS 0x004
+#define TW8834_STD 0x11C
+#define TW8834_STDR 0x11D
+#define TW8834_BT656_DEC_CTRL2 0x48
+#define TW8834_CSTATUS 0x101
+
+/* tw8834 data structure */
+struct tw8834 {
+ struct v4l2_subdev sd;
+ struct i2c_client *i2c_client;
+ struct media_pad pad[TW8834_NUM_PADS];
+ struct v4l2_mbus_framefmt format[TW8834_NUM_PADS];
+ int input;
+ int is_streaming;
+ int (*set_power)(int on);
+};
+
+static inline struct tw8834 *to_tw8834(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tw8834, sd);
+}
+
+/* Register maps */
+struct reg_cfg {
+ unsigned char reg;
+ unsigned char val;
+};
+
+static struct reg_cfg tw8834_common_cfg[] = {
+ { 0xFF, 0x00 },
+ { 0x08, 0xC6 },
+ { 0x07, 0x08 },//Reserved bit in tw8834 doc but enable bt656 in tw8836 doc
+ { 0x09, 0x05 },//VD input clock polarity control:negative + reserved
+ { 0x41, 0xC0 },//Diff-dtv thresholds for DTV no use for cvbs
+ { 0x51, 0x00 },//
+ { 0x52, 0x02 },//
+ { 0x53, 0x01 },//Diff->unnecessary diff, apply dtv config to both.
+ { 0xDC, 0x00 },//pwm1 freq control
+ { 0xDD, 0x80 },//pwm1 duty ctrl
+ { 0xDE, 0x00 },//pw2 freq ctrl
+ { 0xDF, 0x80 },//pw2 duty ctrl
+ { 0xF9, 0x50 },//sspll freq
+ { 0xFC, 0x30 },//sspll
+ { 0xFF, 0x00 },//
+ { 0xFF, 0x01 },//
+ { 0x02, 0x48 },//Diff-> unapplyable to dtv cvbs input select
+ { 0x05, 0x09 },//anti-aliasing enable
+ { 0x08, 0x16 },//
+ { 0x0A, 0x10 },//
+ { 0x11, 0x64 },//
+ { 0x17, 0x80 },//
+ { 0x21, 0x22 },//
+ { 0x2F, 0xE4 },//LCS enabled
+ { 0x30, 0x00 },//undocumented
+ { 0x3D, 0x40 },
+ { 0xFF, 0x01 },
+ { 0xFF, 0x02 },
+ { 0x40, 0x11 },
+ { 0x41, 0x0A },
+ { 0x42, 0x05 },
+ { 0x43, 0x01 },
+ { 0x44, 0x64 },
+ { 0x45, 0xF4 },
+ { 0x47, 0x0A },
+ { 0x4C, 0x00 },
+ { 0x4D, 0x44 },
+ { 0x80, 0x00 },
+ { 0x8B, 0x44 },
+ { 0xB0, 0x30 },
+ { 0xFF, 0x02 },
+};
+
+static struct reg_cfg tw8834_cvbs_in_cfg[] = {
+ { 0xff, 0x00 },
+ { 0x06, 0x06 },//Diff
+ { 0x67, 0x00 },//Diff source of bt656 output
+ { 0x69, 0x02 },//Diff crop dtv
+ { 0x6A, 0x20 },//Diff crop dtv
+ { 0x6B, 0xF0 },//Diff crop dtv
+ { 0x6C, 0x20 },//Diff crop dtv
+ { 0x6D, 0xD0 },//Diff crop dtv
+ { 0xFF, 0x01 },//
+ { 0x0C, 0xDC },//black level is 7.5 IRE above the blank level
+ { 0x10, 0xD0 },//Diff-brightness
+ { 0x20, 0x30 },//Diff-clamping
+ { 0x24, 0x60 },//Diff-clamping
+ { 0x37, 0xBA },//Diff
+ { 0x3A, 0x00 },//Diff
+ { 0x3B, 0x0F },//Diff
+ { 0xFF, 0x01 },//
+ { 0xFF, 0x02 },//
+ { 0x48, 0x36 },//Added (not present for dtv)
+ { 0xFF, 0x02 },
+};
+
+static struct reg_cfg tw8834_dtv_in_cfg[] = {
+ { 0xFF, 0x00 },//
+ { 0x06, 0x26 },//Diff
+ { 0x67, 0x02 },//Diff source of bt656 output
+ { 0x69, 0x12 },//Diff crop dtv
+ { 0x6A, 0x4F },//Diff crop dtv
+ { 0x6B, 0xE0 },//Diff crop dtv
+ { 0x6C, 0xB4 },//Diff crop dtv
+ { 0x6D, 0x80 },//Diff crop dtv
+ { 0xFF, 0x01 },//
+ { 0x10, 0x00 },//Diff-brightness
+ { 0x20, 0x50 },//Diff-clamping
+ { 0x24, 0x3C },//Diff-clamping
+ { 0x37, 0xB9 },//Diff
+ { 0x3A, 0x88 },//Diff
+ { 0x3B, 0xCF },//Diff
+ { 0xFF, 0x01 },//
+};
+
+/* i2c transfer functions */
+static s32 tw8834_i2c_read_byte(struct tw8834 *tw8834,
+ u8 command)
+{
+ struct i2c_client *client = tw8834->i2c_client;
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ else
+ return -EIO;
+}
+
+static s32 tw8834_i2c_write_byte(struct tw8834 *tw8834,
+ u8 command,
+ u8 value)
+{
+ struct i2c_client *client = tw8834->i2c_client;
+ union i2c_smbus_data data;
+ int err;
+ int i;
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ v4l_err(client, "error writing %02x, %02x, %02x\n",
+ client->addr, command, value);
+ return err;
+}
+
+static int tw8834_write_reg_array(struct tw8834 *tw8834,
+ struct reg_cfg *array, int nb)
+{
+ s32 ret;
+ int i;
+ struct v4l2_subdev *sd = &tw8834->sd;
+
+ for(i=0;i<nb;i++) {
+ ret = tw8834_i2c_write_byte(tw8834,
+ array[i].reg,
+ array[i].val);
+ if(ret < 0) {
+ v4l2_err(sd, "failed to write 0x%X to reg 0x%X\n",
+ array[i].val, array[i].reg);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int tw8834_read_register(struct tw8834 *tw8834,
+ u16 reg)
+{
+ int ret;
+ u8 page = (reg >> 8);
+
+ ret = tw8834_i2c_write_byte(tw8834, 0xFF, page);
+ if(ret < 0)
+ return ret;
+
+ return (tw8834_i2c_read_byte(tw8834, (u8)(reg & 0xFF)));
+}
+
+static int tw8834_write_register(struct tw8834 *tw8834,
+ u16 reg,
+ u8 value)
+{
+ int ret;
+ u8 page = (reg >> 8);
+
+ ret = tw8834_i2c_write_byte(tw8834, 0xFF, page);
+ if(ret < 0)
+ return ret;
+
+ ret = tw8834_i2c_write_byte(tw8834, (u8)(reg & 0xFF), value);
+
+ return ret;
+}
+
+/* Helpers */
+static struct v4l2_subdev *
+tw8834_remote_subdev(struct tw8834 *tw8834, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_source(&tw8834->pad[TW8834_IN_PAD]);
+
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+/* video operations */
+
+static int tw8834_s_routing(struct v4l2_subdev *sd, u32 in, u32 out, u32 config)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+
+ if(!tw8834 || in >= TW8834_NUM_INPUTS)
+ return -EINVAL;
+
+ tw8834->input = in;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__tw8834_get_fmt(struct tw8834 *tw8834,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if(pad > TW8834_NUM_PADS)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &tw8834->format[pad];
+}
+
+static int tw8834_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __tw8834_get_fmt(tw8834, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int tw8834_set_in_fmt(struct tw8834 *tw8834,struct v4l2_subdev_fh *fh,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *in_format;
+ struct v4l2_mbus_framefmt *out_format;
+
+ out_format = __tw8834_get_fmt(tw8834, fh, TW8834_OUT_PAD, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if(out_format == NULL)
+ return -EINVAL;
+
+ if(tw8834->input == TW8834_INPUT_CVBS) {
+ v4l2_err(&tw8834->sd, "Cannot set input pad format for cvbs\n");
+ return -EINVAL;
+ }
+ else if(tw8834->input == TW8834_INPUT_DTV) {
+ in_format = __tw8834_get_fmt(tw8834, fh, TW8834_IN_PAD, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (in_format == NULL)
+ return -EINVAL;
+
+ if(fmt->width == 640 && fmt->height == 480) {
+ /*Up to now, I have only been able to make it work in bt656
+ because bt601 is not functionnal with the register config given
+ by Intersil and I only had time to make VGA mode work, which is
+ the mode we are willing to use the input sensor in (ref:mt9v117) */
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ else
+ return -EINVAL;
+
+ *in_format = *fmt;
+ *out_format = *fmt;
+ /*I have currently only been able to output 694 lines that have
+ to be cropped with top=1, height=494*/
+ out_format->height = 494;
+ }
+
+ return 0;
+}
+
+static int tw8834_set_out_fmt(struct tw8834 *tw8834,struct v4l2_subdev_fh *fh,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *out_format;
+
+ out_format = __tw8834_get_fmt(tw8834, fh, TW8834_OUT_PAD, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if(out_format == NULL)
+ return -EINVAL;
+
+ if(tw8834->input == TW8834_INPUT_CVBS) {
+ if(fmt->width == TW8834_CVBS_WIDTH &&
+ fmt->height == TW8834_CVBS_PAL_HEIGHT) {
+ fmt->field = V4L2_FIELD_INTERLACED;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ else if(fmt->width == TW8834_CVBS_WIDTH &&
+ fmt->height == TW8834_CVBS_NTSC_HEIGHT) {
+ fmt->field = V4L2_FIELD_INTERLACED;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ else
+ return -EINVAL;
+
+ /*propagate to out pad */
+ *out_format = *fmt;
+ }
+ else if(tw8834->input == TW8834_INPUT_DTV) {
+ v4l2_err(&tw8834->sd, "cannot set out fmt for dtv\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tw8834_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ int ret = 0;
+
+ if(fmt->pad == TW8834_IN_PAD)
+ ret = tw8834_set_in_fmt(tw8834, fh, &fmt->format);
+ else
+ ret = tw8834_set_out_fmt(tw8834, fh, &fmt->format);
+
+ return ret;
+}
+
+static int tw8834_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = tw8834->format[code->pad].code;
+ return 0;
+}
+
+static int tw8834_enable_cvbs(struct tw8834 *tw8834)
+{
+ tw8834_write_reg_array(tw8834,
+ tw8834_cvbs_in_cfg,
+ ARRAY_SIZE(tw8834_cvbs_in_cfg));
+ return 0;
+}
+
+static int tw8834_enable_dtv(struct v4l2_subdev *sd, struct tw8834 *tw8834)
+{
+ struct v4l2_subdev *dtv_sd;
+ int ret = 0;
+
+ dtv_sd = tw8834_remote_subdev(tw8834, NULL);
+ if(!sd) {
+ v4l2_err(sd, "no remote subdev found\n");
+ return -ENODEV;
+ }
+ ret = v4l2_subdev_call(dtv_sd, video, s_stream, 1);
+ if(ret < 0) {
+ v4l2_err(sd, "cannot start streaming on dtv subdev\n");
+ return ret;
+ }
+ else {
+ tw8834_write_reg_array(tw8834,
+ tw8834_dtv_in_cfg,
+ ARRAY_SIZE(tw8834_dtv_in_cfg));
+ }
+ return ret;
+}
+
+static int tw8834_disable_dtv(struct v4l2_subdev *sd, struct tw8834 *tw8834)
+{
+ struct v4l2_subdev *dtv_sd;
+ int ret = 0;
+
+ dtv_sd = tw8834_remote_subdev(tw8834, NULL);
+ if(!sd) {
+ v4l2_err(sd, "no remote subdev found\n");
+ return -ENODEV;
+ }
+ ret = v4l2_subdev_call(dtv_sd, video, s_stream, 0);
+ if(ret < 0) {
+ v4l2_err(sd, "cannot stop streaming on dtv subdev\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int tw8834_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ int ret = 0;
+
+ if(enable) {
+ if(tw8834->set_power)
+ tw8834->set_power(TW8834_POWER_INPUT_ENABLE);
+ if(tw8834->input == TW8834_INPUT_CVBS)
+ ret = tw8834_enable_cvbs(tw8834);
+ else
+ ret = tw8834_enable_dtv(sd, tw8834);
+ }
+ else
+ {
+ if(tw8834->set_power)
+ tw8834->set_power(TW8834_POWER_INPUT_DISABLE);
+ if(tw8834->input == TW8834_INPUT_DTV)
+ ret = tw8834_disable_dtv(sd, tw8834);
+ }
+
+ tw8834->is_streaming = !!enable;
+
+ return ret;
+}
+
+static int tw8834_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ unsigned char status;
+ unsigned char freq_50_hz = 0;
+ int lock_timeout = 20;
+
+ while (--lock_timeout) {
+ status = tw8834_read_register(tw8834,
+ TW8834_CSTATUS);
+ if (status & 0x80) {
+ v4l2_info(sd, "Video loss\n");
+ return -ENOLINK;
+ }
+ else if (((status & 0x68) == 0x68)) {
+ v4l2_info(sd, "signal locked\n");
+ break;
+ }
+ else {
+ v4l2_info(sd, "wait for lock, status = 0x%x\n", status);
+ msleep(50);
+ }
+ }
+
+ if (!lock_timeout) {
+ v4l2_info(sd, "timeout waiting for lock\n");
+ return -ENOLINK;
+ }
+
+ freq_50_hz = (status & 0x1);
+
+ if (freq_50_hz) {
+ *timings = tw8834_pal_timings;
+ /*pclk doubled for 8 bits transmission of YUYV
+ so horizontal timngs are doubled*/
+ timings->bt.pixelclock = 2 * timings->bt.pixelclock;
+ timings->bt.hfrontporch = 2 * timings->bt.hfrontporch;
+ timings->bt.hsync = 2 * timings->bt.hsync;
+ timings->bt.hbackporch = 2 * timings->bt.hbackporch;
+ }
+ else {
+ *timings = tw8834_ntsc_timings;
+ /*pclk doubled for 8 bits transmission of YUYV
+ so horizontal timings are doubled*/
+ timings->bt.pixelclock = 2 * timings->bt.pixelclock;
+ timings->bt.hfrontporch = 2 * timings->bt.hfrontporch;
+ timings->bt.hsync = 2 * timings->bt.hsync;
+ timings->bt.hbackporch = 2 * timings->bt.hbackporch;
+ }
+
+ return 0;
+}
+
+int tw8834_subscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+
+int tw8834_unsubscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+static const struct v4l2_subdev_core_ops tw8834_core_ops = {
+ .subscribe_event = tw8834_subscribe_event,
+ .unsubscribe_event = tw8834_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops tw8834_video_ops = {
+ .s_stream = tw8834_s_stream,
+ .query_dv_timings = tw8834_query_dv_timings,
+ .s_routing = tw8834_s_routing,
+};
+
+static const struct v4l2_subdev_pad_ops tw8834_pad_ops = {
+ .get_fmt = tw8834_get_fmt,
+ .set_fmt = tw8834_set_fmt,
+ .enum_mbus_code = tw8834_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops tw8834_ops = {
+ .core = &tw8834_core_ops,
+ .video = &tw8834_video_ops,
+ .pad = &tw8834_pad_ops,
+};
+
+static irqreturn_t tw8834_isr(int irq, void *priv)
+{
+ struct tw8834 *tw8834 = priv;
+ unsigned char status, tw8834_irq;
+ struct video_device *vdev = tw8834->sd.devnode;
+ static const struct v4l2_event event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ tw8834_irq = tw8834_read_register(tw8834, TW8834_IRQ);
+ status = tw8834_read_register(tw8834, TW8834_CSTATUS);
+ tw8834_write_register(tw8834, TW8834_IRQ, tw8834_irq);
+
+ v4l2_info(&tw8834->sd,
+ "read status = 0x%X irq = 0x%X\n", status, tw8834_irq);
+
+ if(vdev)
+ v4l2_event_queue(vdev, &event);
+ return IRQ_HANDLED;
+}
+
+static int tw8834_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tw8834 *tw8834;
+ struct tw8834_platform_data *pdata = NULL;
+ struct v4l2_subdev *sd;
+ int ret = -ENODEV;
+ struct v4l2_mbus_framefmt *format;
+ struct reg_cfg reg;
+ pdata = client->dev.platform_data;
+
+ if(!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+ v4l_dbg(1, debug, client, "detecting tw8834 client on address 0x%x\n",
+ client->addr << 1);
+
+ tw8834 = kzalloc(sizeof(*tw8834), GFP_KERNEL);
+ if(!tw8834) {
+ v4l_err(client, "alloc failed for data structure\n");
+ ret = -ENOMEM;
+ goto enomem;
+ }
+
+ tw8834->set_power = pdata->set_power;
+ tw8834->i2c_client = client;
+ tw8834->input = TW8834_INPUT_CVBS;
+
+ format = &tw8834->format[TW8834_IN_PAD];
+ format->width = 640;
+ format->height = 480;
+ format->field = V4L2_FIELD_NONE;
+ format->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ format = &tw8834->format[TW8834_OUT_PAD];
+ format->width = TW8834_CVBS_WIDTH;
+ format->height = TW8834_CVBS_PAL_HEIGHT;
+ format->field = V4L2_FIELD_INTERLACED;
+ format->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ sd = &tw8834->sd;
+ v4l2_i2c_subdev_init(sd, client, &tw8834_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ tw8834->pad[TW8834_IN_PAD].flags = MEDIA_PAD_FL_SINK;
+ tw8834->pad[TW8834_OUT_PAD].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 2, tw8834->pad, 0);
+ if(ret < 0) {
+ v4l_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ /*see if chip is present */
+ if(pdata->set_power) {
+ pdata->set_power(TW8834_POWER_ON);
+ msleep(500);
+ }
+
+ reg.reg = 0xFF;
+ reg.val = 0x00;
+
+ ret = tw8834_i2c_write_byte(tw8834, reg.reg, reg.val);
+
+ if(ret < 0) {
+ v4l_err(client, "impossible to write to tw8834\n");
+ goto echipident;
+ }
+
+ reg.reg = 0x00;
+
+ ret = tw8834_i2c_read_byte(tw8834, reg.reg);
+
+ if(ret < 0)
+ {
+ v4l_err(client, "failed to read chip id\n");
+ goto echipident;
+ }
+
+ reg.val = ret;
+
+ if(reg.val != 0x34) {
+ v4l_err(client, "read 0x%d in chip ident reg\n", reg.val);
+ ret = -ENODEV;
+ goto echipident;
+ }
+
+ if(client->irq > 0) {
+ ret = request_threaded_irq(client->irq,NULL,
+ tw8834_isr,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "tw8834", tw8834);
+ if(ret < 0) {
+ v4l2_err(client, "failed to register irq\n");
+ client->irq = -1;
+ }
+ }
+ tw8834_write_reg_array(tw8834,
+ tw8834_common_cfg,
+ ARRAY_SIZE(tw8834_common_cfg));
+ tw8834_enable_cvbs(tw8834);
+ tw8834_write_register(tw8834, TW8834_IRQ, 0xFF);
+ tw8834_write_register(tw8834, TW8834_STATUS, 0xFF);
+ tw8834_write_register(tw8834, TW8834_IMASK, 0xEC);
+ tw8834_write_register(tw8834, TW8834_BT656_DEC_CTRL2, 0x10);
+
+ return 0;
+echipident:
+ if(pdata->set_power)
+ pdata->set_power(TW8834_POWER_OFF);
+ media_entity_cleanup(&sd->entity);
+emedia:
+ kfree(tw8834);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int tw8834_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ struct tw8834_platform_data *pdata = NULL;
+
+ pdata = client->dev.platform_data;
+
+ if(pdata->set_power)
+ pdata->set_power(TW8834_POWER_OFF);
+
+ if(client->irq > 0)
+ free_irq(client->irq, tw8834);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ kfree(tw8834);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int tw8834_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw8834 *tw8834 = to_tw8834(sd);
+ int is_streaming;
+
+ if(tw8834->set_power)
+ tw8834->set_power(TW8834_POWER_OFF);
+ // Saving the state to restore at resume
+ is_streaming = tw8834->is_streaming;
+
+ // Stop if it was streaming
+ if (is_streaming)
+ tw8834_s_stream(sd, 0);
+
+ tw8834->is_streaming = is_streaming;
+
+ return 0;
+}
+
+int tw8834_resume(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw8834 *tw8834 = to_tw8834(sd);
+
+ if(tw8834->set_power){
+ tw8834->set_power(TW8834_POWER_ON);
+ msleep(100);
+ }
+
+ tw8834_write_reg_array(tw8834,
+ tw8834_common_cfg,
+ ARRAY_SIZE(tw8834_common_cfg));
+ tw8834_enable_cvbs(tw8834);
+ tw8834_write_register(tw8834, TW8834_IRQ, 0xFF);
+ tw8834_write_register(tw8834, TW8834_STATUS, 0xFF);
+ tw8834_write_register(tw8834, TW8834_IMASK, 0xEC);
+ tw8834_write_register(tw8834, TW8834_BT656_DEC_CTRL2, 0x10);
+
+ if (tw8834->is_streaming)
+ tw8834_s_stream(sd, 1);
+
+ return 0;
+}
+#endif
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id tw8834_id[] = {
+ { "tw8834", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tw8834_id);
+
+static struct i2c_driver tw8834_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tw8834",
+ },
+ .probe = tw8834_probe,
+ .remove = tw8834_remove,
+#ifdef CONFIG_PM
+ .suspend = tw8834_suspend,
+ .resume = tw8834_resume,
+#endif
+ .id_table = tw8834_id,
+};
+
+module_i2c_driver(tw8834_driver);
diff --git a/drivers/media/video/tw8836.c b/drivers/media/video/tw8836.c
new file mode 100644
index 00000000000000..f349dbecfa5e01
--- /dev/null
+++ b/drivers/media/video/tw8836.c
@@ -0,0 +1,882 @@
+/* tw8836.c
+ *
+ * Driver for Techwell tw8836
+ * It handles firmware updater and composite input
+ *
+ * Author : Nicolas Laclau <nicolas.laclau@parrot.com>
+ *
+ * Date : 20 June 2013
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/i2c-dev.h>
+
+#include <media/mx25l6435e.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+/* ----------------------------------------------------------------------- */
+/* Registers Definition : */
+/* Control registers */
+#define TW8836_MODE_CTRL_REG (0x4C2)
+/* I²C to XMEM registers */
+#define TW8836_I2C_XMEM_DMA_HIGH_ADDR (0x4DB)
+#define TW8836_I2C_XMEM_DMA_LOW_ADDR (0x4DC)
+#define TW8836_I2C_XMEM_DMA_DATA_REG (0x4DD)
+/* Registers addresses for MCU management */
+#define TW8836_EN_MCU (0x4EC)
+#define TW8836_ACCESS_REG (0x4ED)
+#define TW8836_CRC16_H_REG (0x4EE)
+#define TW8836_CRC16_L_REG (0x4EF)
+/* Registers addresses for I2C-DMA programming */
+#define TW8836_DMA_CTRL_REG (0X4F3)
+#define TW8836_DMA_FLASH_BUSY_CTRL_REG (0X4F4)
+#define TW8836_PAGE_REG (0X4FF)
+
+/* Others : */
+#define TW8836_FIRMWARE_ADDR (0x0)
+#define TW8836_FIRMWARE_NAME "tw8836.fw"
+
+#define TW8836_XRAM_WORKAREA (0x0400)
+#define TW8836_MAX_CACHE (256)
+
+#define TW8836_MCU_MAGIC_PATTERN_1 (0x55)
+#define TW8836_MCU_MAGIC_PATTERN_2 (0xAA)
+#define TW8836_MCU_HALT (0x00)
+#define TW8836_MCU_RERUN (0x01)
+
+/* SPI-EEPROM commands : */
+#define TW8836_DMA_CMD_CHIP_ERASE (MX25L6435E_CE_2)
+
+/* Macro definitions: */
+/* - Basic */
+#define TW_PAGE_REG (0xFF)
+#define TW_BIT_SHIFT_R(val, shift) (val>>shift)
+#define TW_BIT_SHIFT_L(val, shift) (val<<shift)
+#define TW_MASK(bits) (TW_BIT_SHIFT_L(1, bits)-1)
+#define TW_SHIFT(val, shift, mask) (TW_BIT_SHIFT_R(val, shift) & TW_MASK(mask))
+
+/* - Techwell addresses */
+#define TW_PAGE(reg) (TW_BIT_SHIFT_R(reg, 8) & TW_MASK(8))
+#define TW_INDEX(reg) (reg & TW_MASK(8))
+
+/* - Others */
+#define TW8836_CHECK_RETURN(ret_val) ({if (ret_val < 0) return ret_val; })
+
+/* ----------------------------------------------------------------------- */
+enum {
+ TW8836_OUT_PAD = 0,
+ TW8836_NUM_PADS,
+};
+
+struct tw8836 {
+ struct i2c_client *client;
+ const struct firmware *tw8836_fw;
+ struct v4l2_subdev subdev;
+ struct media_pad pad[TW8836_NUM_PADS];
+ struct v4l2_mbus_framefmt format[TW8836_NUM_PADS];
+};
+/* ----------------------------------------------------------------------- */
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+/*
+ * CRC table provided by Instersil
+ * Used for computing CRC16 on firmware file and compare it with
+ * the CRC16 given by tw8836 chip.
+ */
+static u16 crctab[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+/* ----------------------------------------------------------------------- */
+/* basic i2c methods */
+s32 tw_i2c_read_byte(struct i2c_client *client, u8 reg)
+{
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ else
+ return -EIO;
+}
+
+s32 tw_update_page(struct i2c_client *client, u16 reg)
+{
+ int res;
+ u8 buf[2] = { TW_PAGE_REG, TW_PAGE(reg) };
+ u8 value = 0;
+ struct i2c_msg xfer[2] = {
+ /* 1st message: Request Register page value */
+ { .addr = client->addr, .flags = 0, .len = 1, .buf = &buf[0], },
+ /* 2nd message: Read requested value */
+ { .addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &value } };
+
+ /* Read register page set */
+ res = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (res != ARRAY_SIZE(xfer)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return -EIO;
+ }
+
+ /* if already on the wanted page, then returns (nothing to do) */
+ if (value == buf[1])
+ return 0;
+
+ /* Set the new page */
+ if (i2c_master_send(client, buf, ARRAY_SIZE(buf)) != ARRAY_SIZE(buf)) {
+ v4l_err(client, "%s:%d: i2c send failed\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/* Firmware update Methods */
+
+/*
+ * MCU control allow to start or stop tw8836's MCU
+ * note: ctrl_word is either TW8836_MCU_HALT or TW8836_MCU_RERUN
+ */
+static s32 tw8836_mcu_ctrl(struct i2c_client *client, u8 ctrl_word)
+{
+ s32 res;
+ char page[2] = {
+ TW_INDEX(TW8836_PAGE_REG),
+ 4
+ };
+ char magic1[2] = {
+ TW_INDEX(TW8836_ACCESS_REG),
+ TW8836_MCU_MAGIC_PATTERN_1
+ };
+ char magic2[2] = {
+ TW_INDEX(TW8836_ACCESS_REG),
+ TW8836_MCU_MAGIC_PATTERN_2
+ };
+ char dma_ctlw[2] = {
+ TW_INDEX(TW8836_EN_MCU),
+ ctrl_word
+ };
+ struct i2c_msg xfer[4] = {
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = page},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = magic1},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = magic2},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = dma_ctlw},
+ };
+
+ res = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (res != ARRAY_SIZE(xfer)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+/*
+ * tw8836_index_mode() setup index handling mode on tw8836 side :
+ * 0x00 = auto increment
+ * 0x20 = fixed (don't increment)
+ */
+static s32 tw8836_index_mode(struct i2c_client *client, u8 val)
+{
+ s32 res;
+ u8 cmds[6] = {
+ TW_INDEX(TW8836_PAGE_REG), 0x00, /* 0 - page 0 */
+ 0x06, val,
+ TW_INDEX(TW8836_PAGE_REG), 0x04, /* 4 - page 4 */
+ };
+
+ struct i2c_msg xfer[3] = {
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &cmds[0]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &cmds[2]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &cmds[4]},
+ };
+
+ res = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ /* if read fails, then */
+ if (res != ARRAY_SIZE(xfer)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+/*
+ * tw8836_dma_cfg() method allow to configure tw8836's DMA
+ */
+static s32 tw8836_dma_cfg(struct i2c_client *client,
+ u16 xram, u32 flash, u32 size, u8 crtl, u8 cmd, u8 busy)
+{
+ s32 res;
+ u8 dma_busy[2] = { TW_INDEX(TW8836_DMA_FLASH_BUSY_CTRL_REG), busy };
+ u8 buf_magic[2] = { 0x0A, 0x00 };
+ u8 dma_cfg[13] = {
+ /*INDEX*/TW_INDEX(TW8836_DMA_CTRL_REG),
+ /*F3*/crtl,
+ /*F4*/0x00,
+ /*F5*/TW_SHIFT(size, 16, 8),
+ /*F6*/TW_SHIFT(xram, 8, 8),
+ /*F7*/TW_SHIFT(xram, 0, 8),
+ /*F8*/TW_SHIFT(size, 8, 8),
+ /*F9*/TW_SHIFT(size, 0, 8),
+ /*FA*/cmd,
+ /*FB*/TW_SHIFT(flash, 16, 8),
+ /*FC*/TW_SHIFT(flash, 8, 8),
+ /*FD*/TW_SHIFT(flash, 0, 8),
+ /*FE*/0x00, };
+ struct i2c_msg xfer[3] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(dma_cfg),
+ .buf = dma_cfg },
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(buf_magic),
+ .buf = buf_magic },
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = ARRAY_SIZE(dma_busy),
+ .buf = dma_busy },
+ };
+
+ /* Configure DMA and start it */
+ res = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (res != ARRAY_SIZE(xfer)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+/*
+ * tw8836_write_enable() method allow to command tw8836 to send WEN
+ * to SPI-EEPROM (connected to tw8836)
+ */
+static s32 tw8836_write_enable(struct i2c_client *client)
+{
+ s32 res;
+
+ v4l_dbg(1, debug, client, "%s send WREN\n", __func__);
+ res = tw8836_dma_cfg(client, TW8836_XRAM_WORKAREA, 0, 0,
+ 0xC1, MX25L6435E_WREN, 0x03);
+ TW8836_CHECK_RETURN(res);
+
+ /* Wait until flashing block is over :
+ * read TW8836_DMA_FLASH_BUSY_CTRL_REG[0] until it's set to '0'
+ */
+ do {
+ res = tw_i2c_read_byte(
+ client,
+ (u8)TW8836_DMA_FLASH_BUSY_CTRL_REG);
+ TW8836_CHECK_RETURN(res);
+
+ v4l_dbg(1, debug, client,
+ " %s, busy=0x%x (wait for reg[0] = 0)\n",
+ __func__, res);
+ } while ((res & 0x01) != 0);
+
+ return 0;
+}
+
+/*
+ * tw8836_xram_write() method allow to write data into tw8836's XRAM
+ */
+static s32 tw8836_xram_write(struct i2c_client *client,
+ u8 *pData, u16 addr, u16 iLen)
+{
+ s32 res;
+ u32 count = 1;
+ u8 bit1;
+ u8 buf_data[TW8836_MAX_CACHE + 1];
+ u8 xram_high[2] = {
+ TW_INDEX(TW8836_I2C_XMEM_DMA_HIGH_ADDR),
+ TW_SHIFT(addr, 8, 8) };
+ u8 xram_low[2] = {
+ TW_INDEX(TW8836_I2C_XMEM_DMA_LOW_ADDR),
+ TW_SHIFT(addr, 0, 8) };
+ u8 ctrl_reg[2] = {
+ TW_INDEX(TW8836_MODE_CTRL_REG),
+ 0x00 };
+ u8 xram_access[2] = {
+ TW_INDEX(TW8836_MODE_CTRL_REG),
+ 0x01 };
+ u8 commands[6] = {
+ TW_INDEX(TW8836_PAGE_REG), 0x00, /* 0 - page 0 */
+ 0x06, 0x20, /* index mode (0x00:auto-inc, 0x20:fix) */
+ TW_INDEX(TW8836_PAGE_REG), 0x04, /* 4 - page 4 */
+ };
+
+ struct i2c_msg xfer[7] = {
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = xram_high},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = xram_low},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = buf_data},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = ctrl_reg},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[0]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[2]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[4]},
+ };
+
+ struct i2c_msg xfer_xram[6] = {
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[0]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[2]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &commands[4]},
+ {.addr = client->addr, .flags = 0, .len = 2, .buf = &xram_access[0]},
+ {.addr = client->addr, .flags = 0, .len = 1, .buf = &xram_access[0]},
+ {.addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &bit1 },
+ };
+
+ v4l_dbg(1, debug, client, " %s enter\n", __func__);
+ v4l_dbg(1, debug, client, " %s access to xram...\n", __func__);
+
+ res = i2c_transfer(client->adapter, xfer_xram, ARRAY_SIZE(xfer_xram));
+ if (res != ARRAY_SIZE(xfer_xram)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+
+ /* Wait until I2C can access to XMEM:
+ * read TW8836_MODE_CTRL_REG[1] until it's set
+ */
+ res = 0;
+ while ((res & 0x02) == 0) {
+ v4l_dbg(1, debug, client,
+ " wait XRAM ( read=0x%x, wait until != 0)\n",
+ res);
+ res = tw_i2c_read_byte(client, (u8) TW8836_MODE_CTRL_REG);
+ TW8836_CHECK_RETURN(res);
+ }
+ v4l_dbg(1, debug, client, " %s ... done\n", __func__);
+
+ /* fix mode */
+ v4l_dbg(1, debug, client, " %s set fix mode...\n", __func__);
+ tw8836_index_mode(client, 0x20);
+ v4l_dbg(1, debug, client, " %s ... done\n", __func__);
+
+ /* Prepare buffer to send over i2c */
+ v4l_dbg(1, debug, client, " %s prepare buffer to send...\n", __func__);
+ buf_data[0] = TW_INDEX(TW8836_I2C_XMEM_DMA_DATA_REG);
+ count = 1;
+ while ((count < (TW8836_MAX_CACHE + 1)) && (count < (iLen + 1))) {
+ buf_data[count] = pData[count - 1];
+ count++;
+ }
+ xfer[2].len = count;
+ v4l_dbg(1, debug, client, " %s ... done\n", __func__);
+
+ v4l_dbg(1, debug, client, " %s send %d bytes...\n", __func__, count);
+ commands[3] = 0;
+
+ res = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (res != ARRAY_SIZE(xfer)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+
+ v4l_dbg(1, debug, client, " %s ...done\n", __func__);
+
+ /* auto inc */
+ v4l_dbg(1, debug, client, " %s set to auto inc index\n", __func__);
+ tw8836_index_mode(client, 0x00);
+
+ v4l_dbg(1, debug, client, " %s leave\n", __func__);
+ return 0;
+}
+
+/*
+ * tw8836_program_by_dma() allow to flash data block
+ * into SPI-EEPROM connected to tw8836.
+ */
+static s32 tw8836_program_by_dma(struct i2c_client *client,
+ u16 src, u32 dest, u32 iLen)
+{
+ s32 res;
+
+ v4l_dbg(1, debug, client, " configure dma...\n %s send PP\n",
+ __func__);
+
+ res = tw8836_dma_cfg(client, TW8836_XRAM_WORKAREA, dest, iLen,
+ 0xC4, MX25L6435E_PP, 0x07);
+ TW8836_CHECK_RETURN(res);
+
+ v4l_dbg(1, debug, client, " dma started !\n");
+ /*
+ * Wait until flashing block is over :
+ * read TW8836_DMA_FLASH_BUSY_CTRL_REG[0] until it's set to '0'
+ */
+ do {
+ res = tw_i2c_read_byte(client,
+ (u8) TW8836_DMA_FLASH_BUSY_CTRL_REG);
+ TW8836_CHECK_RETURN(res);
+ v4l_dbg(1, debug, client,
+ " wait flash done (value:0x%x, expected bit0 = 0)\n",
+ res);
+ } while ((res & 0x01) != 0);
+
+ return 0;
+}
+
+/*
+ * tw8836_erase() method allow to erase SPI-EEPROM connected to tw8836.
+ */
+static s32 tw8836_erase(struct i2c_client *client, u8 erase_cmd, u32 addr)
+{
+ s32 res;
+
+ v4l_dbg(1, debug, client, " %s enter\n", __func__);
+
+ /* auto inc */
+ res = tw8836_index_mode(client, 0x00);
+ TW8836_CHECK_RETURN(res);
+
+ /* WREN command */
+ res = tw8836_write_enable(client);
+
+ /* Chip erase command */
+ v4l_dbg(1, debug, client, " %s send Chip erase\n", __func__);
+ res = tw8836_dma_cfg(client, TW8836_XRAM_WORKAREA, 0, 0,
+ 0xC1, MX25L6435E_CE_2, 0x07);
+ TW8836_CHECK_RETURN(res);
+
+ /* Wait until flashing block is over :
+ * read TW8836_DMA_FLASH_BUSY_CTRL_REG[0] until it's set to '0'
+ */
+ do {
+ res = tw_i2c_read_byte(client,
+ (u8) TW8836_DMA_FLASH_BUSY_CTRL_REG);
+ TW8836_CHECK_RETURN(res);
+ v4l_dbg(1, debug, client,
+ " %s wait for complete erase "
+ "(value:0x%x, expected bit0: 0)\n",
+ __func__, res);
+ } while ((res & 0x01) != 0);
+
+ return 0;
+}
+
+/*
+ * tw8836_crc_check() method allow to retrieve crc16 computed by tw8836
+ * on SPI-EEPROM data range.
+ */
+static s32 tw8836_crc_check(struct i2c_client *client,
+ u32 addr, u32 size, u16 *crc16)
+{
+ s32 res;
+ u8 crc_msb, crc_lsb;
+ u8 reg_crc_h = TW_INDEX(TW8836_CRC16_H_REG);
+ u8 reg_crc_l = TW_INDEX(TW8836_CRC16_L_REG);
+
+ struct i2c_msg xfer_crc[4] = {
+ {.addr = client->addr, .flags = 0, .len = 1, .buf = &reg_crc_h},
+ {.addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &crc_msb},
+ {.addr = client->addr, .flags = 0, .len = 1, .buf = &reg_crc_l },
+ {.addr = client->addr, .flags = I2C_M_RD, .len = 1, .buf = &crc_lsb },
+ };
+
+ v4l_dbg(1, debug, client, " %s enter\n", __func__);
+
+ /* auto inc */
+ tw8836_index_mode(client, 0x00);
+
+ /* configure dma and start it */
+ res = tw8836_dma_cfg(client, TW8836_XRAM_WORKAREA, addr, size,
+ 0xE4, MX25L6435E_READ, 0x05);
+ TW8836_CHECK_RETURN(res);
+
+ /* Wait until read is finished :
+ * Wait until TW8836_DMA_FLASH_BUSY_CTRL_REG[0] is set to '0' */
+ do {
+ res = tw_i2c_read_byte(client,
+ (u8) TW8836_DMA_FLASH_BUSY_CTRL_REG);
+ TW8836_CHECK_RETURN(res);
+ v4l_dbg(1, debug, client, " %s wait for busy done ! (0x%x)\n",
+ __func__, res);
+ } while ((res & 0x01) != 0);
+
+ /* Read CRC result*/
+ res = i2c_transfer(client->adapter, xfer_crc, ARRAY_SIZE(xfer_crc));
+ if (res != ARRAY_SIZE(xfer_crc)) {
+ v4l_err(client, "%s: i2c transfer failed (%d)\n",
+ __func__, res);
+ return res;
+ }
+
+ (*crc16) = ((crc_msb << 8) & 0xFF00);
+ (*crc16) |= ((crc_lsb) & 0x00FF);
+
+ v4l_info(client, " %s leave with 0x%x\n", __func__, *crc16);
+
+ /* return crc16_val */
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * ISP (In System Programming) through I2C of TW8836:
+ *
+ * TW8836 does not have an internal ROM code But it provides I2C DMA
+ * for SPI flash programming.
+ * Here is the sequence to use for programming external flash :
+ *
+ * MCU Halt MCU Rerun
+ * | |
+ * +->SPI Erase-+->XRAM Write(256B max)-->I²C SPI DMA-+->CRC16 check-+
+ * | |
+ * +-<---<---<---<---<---<---<---<---<---+
+ * Loop for each 256 Bytes block to flash.
+ *
+ * - 'MCU Halt' done through tw8836_mcu_ctrl(TW8836_MCU_HALT) call.
+ * - 'SPI Erase' done through tw8836_erase(TW8836_DMA_CMD_CHIP_ERASE) call.
+ * - 'XRAM Write' done through tw8836_xram_write() with data up to 256 bytes.
+ * - 'I²C SPI DMA' done through tw8836_program_by_dma() with data length
+ * given in 'XRAM Write'.
+ * - 'CRC16 check' done through tw8836_crc_check() call.
+ * - 'MCU Rerun' done through tw8836_mcu_ctrl(TW8836_MCU_RERUN) call.
+ */
+static int tw8836_upgrade_firmware(struct tw8836 *data)
+{
+ int error, idx, remain, size;
+ u16 crc16_in_chip = 0;
+
+ v4l_info(data->client, "Upgrade firmware");
+
+ error = tw8836_mcu_ctrl(data->client, (u8) TW8836_MCU_HALT);
+ TW8836_CHECK_RETURN(error);
+
+ error = tw8836_erase(data->client, (u8) TW8836_DMA_CMD_CHIP_ERASE, 0);
+ TW8836_CHECK_RETURN(error);
+
+ v4l_dbg(1, debug, data->client, " firmware length %d (0x%x)\n",
+ data->tw8836_fw->size, data->tw8836_fw->size);
+
+ for (idx = 0; idx < data->tw8836_fw->size; idx += TW8836_MAX_CACHE) {
+ remain = data->tw8836_fw->size - idx;
+ if (remain < TW8836_MAX_CACHE)
+ size = remain;
+ else
+ size = TW8836_MAX_CACHE;
+
+ /* Write 256 max bytes in XRAM (XDATA memory) */
+ error = tw8836_xram_write(data->client,
+ (u8 *) (data->tw8836_fw->data + idx),
+ TW8836_XRAM_WORKAREA, size);
+ TW8836_CHECK_RETURN(error);
+
+ error = tw8836_write_enable(data->client);
+ TW8836_CHECK_RETURN(error);
+
+ /* Proceed DMA transfer data block from XRAM to SPI-Flash */
+ error = tw8836_program_by_dma(data->client,
+ TW8836_XRAM_WORKAREA,
+ TW8836_FIRMWARE_ADDR + idx, size);
+ TW8836_CHECK_RETURN(error);
+
+ v4l_dbg(1, debug, data->client, " progression = %d %%\n",
+ ((idx * 100) / data->tw8836_fw->size));
+ }
+
+ error = tw8836_crc_check(data->client,
+ TW8836_FIRMWARE_ADDR,
+ data->tw8836_fw->size,
+ &crc16_in_chip);
+ TW8836_CHECK_RETURN(error);
+
+ error = tw8836_mcu_ctrl(data->client, TW8836_MCU_RERUN);
+ TW8836_CHECK_RETURN(error);
+
+ return 0;
+}
+
+static s32 tw8836_initialize(struct tw8836 *data)
+{
+ s32 error = 0;
+ u16 iCRC16_inChip = 0, iCRC16_fw = 0, count = 0, index = 0;
+ struct device *dev = &data->client->dev;
+
+ /* Get firmware */
+ error = request_firmware(&data->tw8836_fw, TW8836_FIRMWARE_NAME, dev);
+ if (error < 0) {
+ v4l_err(data->client, "Failed to request firmware %s\n",
+ TW8836_FIRMWARE_NAME);
+ return error;
+ }
+
+ /* Check firmware CRC16 */
+ error = tw_update_page(data->client, TW8836_CRC16_H_REG);
+ if (error != 0)
+ goto rlse_firmware;
+
+ error = tw8836_crc_check(data->client, TW8836_FIRMWARE_ADDR,
+ data->tw8836_fw->size, &iCRC16_inChip);
+ if (error != 0)
+ goto rlse_firmware;
+
+ v4l_dbg(1, debug, data->client, " Compute firmware CRC16 ...\n");
+ /* Compute CRC on firmware (given by Intersil) */
+ count = data->tw8836_fw->size;
+ while (count--) {
+ iCRC16_fw =
+ (iCRC16_fw << 8) ^
+ crctab[(iCRC16_fw >> 8) ^
+ data->tw8836_fw->data[index++]];
+ }
+ v4l_dbg(1, debug, data->client, " ... Done !\n");
+
+ /* if different CRC, then upgrade is needed */
+ if (iCRC16_inChip != iCRC16_fw) {
+ v4l_err(data->client,
+ "CRC16 in chips : 0x%04x\tCRC16 firmware : 0x%04x\n",
+ iCRC16_inChip, iCRC16_fw);
+ error = tw8836_upgrade_firmware(data);
+ } else {
+ v4l_info(data->client, "firmware already up-to-date (0x%04x)",
+ iCRC16_inChip);
+ }
+
+rlse_firmware:
+ release_firmware(data->tw8836_fw);
+ return error;
+}
+
+/* ----------------------------------------------------------------------- */
+/* V4L2 dummy ops */
+static int tw8836_dummy_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int tw8836_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ if (!fi)
+ return -EINVAL;
+
+ fi->interval.numerator = 1;
+ fi->interval.denominator = 30;
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__tw8836_get_fmt(struct tw8836 *tw8836, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (pad > TW8836_NUM_PADS)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &tw8836->format[pad];
+}
+
+static int tw8836_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt)
+{
+
+ struct tw8836 *tw8836 = container_of(sd, struct tw8836, subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __tw8836_get_fmt(tw8836, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+/* dummy set format */
+static int tw8836_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt)
+{
+
+ struct tw8836 *tw8836 = container_of(sd, struct tw8836, subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __tw8836_get_fmt(tw8836, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+ return 0;
+}
+
+static int tw8836_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct tw8836 *tw8836 = container_of(sd, struct tw8836, subdev);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = tw8836->format[code->pad].code;
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops tw8836_core_ops;
+
+static struct v4l2_subdev_video_ops tw8836_video_ops = {
+ .s_stream = tw8836_dummy_s_stream,
+ .g_frame_interval = tw8836_g_frame_interval,
+};
+
+static struct v4l2_subdev_pad_ops tw8836_pad_ops = {
+ .get_fmt = tw8836_get_fmt,
+ .set_fmt = tw8836_set_fmt,
+ .enum_mbus_code = tw8836_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops tw8836_ops = {
+ .core = &tw8836_core_ops,
+ .video = &tw8836_video_ops,
+ .pad = &tw8836_pad_ops,
+};
+
+/* ----------------------------------------------------------------------- */
+static int __devinit tw8836_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tw8836 *tw8836;
+ struct v4l2_subdev *sd;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_mbus_framefmt *pdata = client->dev.platform_data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+
+ /* allocate context memory */
+ tw8836 = kzalloc(sizeof(*tw8836), GFP_KERNEL);
+ if (!tw8836) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto enomem;
+ }
+
+ /* Keep client in context */
+ tw8836->client = client;
+
+ /* Initialize tw8836 driver */
+ ret = tw8836_initialize(tw8836);
+ if (ret != 0)
+ goto efwfail;
+
+ /* initialize V4L2 sub device */
+ format = &tw8836->format[TW8836_OUT_PAD];
+ if (pdata) {
+ format->width = pdata->width;
+ format->height = pdata->height;
+ format->field = pdata->field;
+ format->code = pdata->code;
+ format->colorspace = pdata->colorspace;
+ } else {
+ format->width = 720;
+ format->height = 480;
+ format->field = V4L2_FIELD_INTERLACED;
+ format->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ }
+ sd = &tw8836->subdev;
+ v4l2_i2c_subdev_init(sd, client, &tw8836_ops);
+
+ /* create node */
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* pad supports sourcing data */
+ tw8836->pad[TW8836_OUT_PAD].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&sd->entity, TW8836_NUM_PADS, tw8836->pad, 0);
+ if (ret < 0) {
+ v4l_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ v4l_info(client, "entity initialized, dummy\n");
+ return ret;
+
+emedia:
+efwfail:
+ kfree(tw8836);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int __devexit tw8836_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw8836 *tw8836 = container_of(sd, struct tw8836, subdev);
+
+ v4l2_device_unregister_subdev(&tw8836->subdev);
+ media_entity_cleanup(&tw8836->subdev.entity);
+ kfree(tw8836);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+static struct i2c_device_id tw8836_id[] = { { "tw8836", 0 }, { } };
+
+MODULE_DEVICE_TABLE(i2c, tw8836_id);
+
+static struct i2c_driver tw8836_driver = { .driver = { .owner = THIS_MODULE,
+ .name = "tw8836", }, .id_table = tw8836_id, .probe =
+ tw8836_probe, .remove = tw8836_remove, };
+
+module_i2c_driver(tw8836_driver);
+
+MODULE_AUTHOR("Nicolas Laclau <nicolas.laclau@parrot.com>");
+MODULE_DESCRIPTION("Techwell 8836 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/tw9990.c b/drivers/media/video/tw9990.c
new file mode 100644
index 00000000000000..da5685578e870d
--- /dev/null
+++ b/drivers/media/video/tw9990.c
@@ -0,0 +1,625 @@
+/* tw9990.c
+ *
+ * Driver for techwell tw9990
+ * It handles 2 inputs, composite and bt656 input
+ * It handles one output, which is bt656
+ *
+ * Author : Julien BERAUD <julien.beraud@parrot.com>
+ *
+ * Date : 23/04/2014
+ *
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <media/tw9990.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+
+MODULE_AUTHOR("Julien BERAUD <julien.beraud@parrot.com>");
+MODULE_DESCRIPTION("kernel driver for tw9990");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+#define TW9990_NUM_INPUTS 2
+
+#define TW9990_CVBS_WIDTH 720
+#define TW9990_CVBS_PAL_HEIGHT 576
+#define TW9990_CVBS_NTSC_HEIGHT 480
+
+#define TW_BIT_SHIFT_L(val, shift) (val<<shift)
+#define TW_MASK(bits) (TW_BIT_SHIFT_L(1, bits)-1)
+
+/* registers */
+#define TW9990_CHIP_STATUS 0x01
+#define TW9990_IN_FORM 0x02
+#define TW9990_OP_FORM 0x03
+#define TW9990_CMN_MODE_CLAMP 0x0D
+#define TW9990_AN_CTRL_2 0x1A
+#define TW9990_STD_SEL 0x1C
+#define TW9990_STD_RECOG 0x1D
+#define TW9990_SYNCT 0x25
+#define TW9990_MISC1_CTRL_1 0x2D
+#define TW9990_SHORT_DIAG 0xAF
+#define TW9990_INT2_ENABLE 0xB1
+#define TW9990_INT2_RST 0xB4
+#define TW9990_INT2_RAW_STATUS 0xB7
+#define TW9990_INT2_STATUS 0xBA
+
+enum {
+ TW9990_IN_FORM_ID = 0,
+ TW9990_OP_FORM_ID,
+ TW9990_CMN_MODE_CLAMP_ID,
+ TW9990_AN_CTRL_2_ID,
+ TW9990_SYNCT_ID,
+ TW9990_MISC1_CTRL_1_ID,
+ TW9990_MAX_IDS
+};
+
+/* Register maps */
+struct reg_cfg {
+ unsigned char reg;
+ unsigned char val;
+};
+
+/* tw9990 data structure */
+struct tw9990 {
+ struct v4l2_subdev sd;
+ struct i2c_client *i2c_client;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_subdev_frame_interval frame_interval;
+ int is_streaming;
+ int (*set_power)(int on);
+ int irq;
+ int differential_input;
+ struct reg_cfg config[TW9990_MAX_IDS];
+};
+
+static inline struct tw9990 *to_tw9990(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tw9990, sd);
+}
+
+static struct v4l2_dv_timings tw9990_ntsc_timings = V4L2_DV_BT_CEA_720X480I59_94;
+
+
+static struct v4l2_dv_timings tw9990_pal_timings = V4L2_DV_BT_CEA_720X576I50;
+
+/* i2c transfer functions */
+static s32 tw9990_i2c_read_byte(struct tw9990 *tw9990,
+ u8 command)
+{
+ struct i2c_client *client = tw9990->i2c_client;
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ else
+ return -EIO;
+}
+
+static s32 tw9990_i2c_write_byte(struct tw9990 *tw9990,
+ u8 command,
+ u8 value)
+{
+ struct i2c_client *client = tw9990->i2c_client;
+ union i2c_smbus_data data;
+ int err;
+ int i;
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ v4l2_err(client, "error writing %02x, %02x, %02x\n",
+ client->addr, command, value);
+ return err;
+}
+
+static int tw9990_write_reg_array(struct tw9990 *tw9990,
+ struct reg_cfg *array, int nb)
+{
+ s32 ret;
+ int i;
+ struct v4l2_subdev *sd = &tw9990->sd;
+
+ for(i=0;i<nb;i++) {
+ ret = tw9990_i2c_write_byte(tw9990,
+ array[i].reg,
+ array[i].val);
+ if(ret < 0) {
+ v4l2_err(sd, "failed to write 0x%X to reg 0x%X\n",
+ array[i].reg, array[i].val);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* video operations */
+
+static struct v4l2_mbus_framefmt *
+__tw9990_get_fmt(struct tw9990 *tw9990,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if(pad != 0)
+ return NULL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &tw9990->format;
+}
+
+static int tw9990_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __tw9990_get_fmt(tw9990, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int tw9990_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ int ret = 0;
+ struct v4l2_mbus_framefmt *format;
+
+ format = __tw9990_get_fmt(tw9990, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ if(fmt->format.width != 720 ||
+ (fmt->format.height != 480 && fmt->format.height != 576))
+ return -EINVAL;
+
+ fmt->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ *format = fmt->format;
+ return ret;
+}
+
+static int tw9990_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = tw9990->format.code;
+ return 0;
+}
+
+static void tw9990_setup(struct tw9990 *tw9990,
+ struct tw9990_platform_data *pdata)
+{
+ /* Input format configuration: by default on MUX0 */
+ tw9990->config[TW9990_IN_FORM_ID].reg = TW9990_IN_FORM;
+ tw9990->config[TW9990_IN_FORM_ID].val = (0x40 |
+ ((pdata->differential_input == TW9990_DIFFERENTIAL_ENABLED) ?
+ 0x80 : 0));
+
+ /* Output format: configure ITU-656 data sequence format */
+ tw9990->config[TW9990_OP_FORM_ID].reg = TW9990_OP_FORM;
+ tw9990->config[TW9990_OP_FORM_ID].val = 0xA0;
+
+ /* Common mode clamp: based on platform data only */
+ tw9990->config[TW9990_CMN_MODE_CLAMP_ID].reg = TW9990_CMN_MODE_CLAMP;
+ tw9990->config[TW9990_CMN_MODE_CLAMP_ID].val =
+ pdata->common_mode_clamp;
+
+ /* Analog control II: configure anti-alias and power saving
+ * for Y/C channels, based on platform data.
+ */
+ tw9990->config[TW9990_AN_CTRL_2_ID].reg = TW9990_AN_CTRL_2;
+ tw9990->config[TW9990_AN_CTRL_2_ID].val =
+ (pdata->anti_aliasing == TW9990_ANTI_ALIAS_ENABLED) ?
+ 0x0A : 0;
+ tw9990->config[TW9990_AN_CTRL_2_ID].val |=
+ (pdata->power_saving == TW9990_POWER_SAVE_ENABLED) ?
+ 0x05 : 0;
+
+ /* Sync amplitude (SYNCT) */
+ tw9990->config[TW9990_SYNCT_ID].reg = TW9990_SYNCT;
+ if (pdata->synct_mode == TW9990_SYNCT_FORCED)
+ tw9990->config[TW9990_SYNCT_ID].val =
+ pdata->sync_pulse_amplitude & 0x7F;
+ else
+ tw9990->config[TW9990_SYNCT_ID].val = 0xB8; /* default value */
+
+ /* Miscellaneous control I: configure range limited for Luma and
+ * Chroma, output data range are limited to 16~235 for Y and
+ * 16~240 for Cb/Cr.
+ */
+ tw9990->config[TW9990_MISC1_CTRL_1_ID].reg = TW9990_MISC1_CTRL_1;
+ tw9990->config[TW9990_MISC1_CTRL_1_ID].val = 0x1C;
+}
+
+static int tw9990_enable(struct tw9990 *tw9990)
+{
+ return tw9990_write_reg_array(tw9990,
+ tw9990->config,
+ ARRAY_SIZE(tw9990->config));
+}
+
+static int tw9990_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ int ret = 0;
+
+ if (enable)
+ ret = tw9990_enable(tw9990);
+
+ tw9990->is_streaming = !!enable;
+
+ return ret;
+}
+
+static int tw9990_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ if (!fi)
+ return -EINVAL;
+
+ fi->interval.numerator = tw9990->frame_interval.interval.numerator;
+ fi->interval.denominator = tw9990->frame_interval.interval.denominator;
+ return 0;
+}
+
+static int tw9990_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ unsigned char std_sel, std, status;
+
+ std_sel = tw9990_i2c_read_byte(tw9990,
+ TW9990_STD_SEL);
+ while((std_sel & 0x80) != 0) {
+ v4l2_info(sd, "waiting for std detection\n");
+ msleep(500);
+ std_sel = tw9990_i2c_read_byte(tw9990,
+ TW9990_STD_SEL);
+ }
+
+ status = tw9990_i2c_read_byte(tw9990,
+ TW9990_CHIP_STATUS);
+ if(status & 0x80) {
+ v4l2_info(sd, "Video loss\n");
+ return -ENOLINK;
+ }
+
+ std = (std_sel & 0x70) >> 4;
+
+ switch(std) {
+ case 0:
+ case 3:
+ case 4:
+ *timings = tw9990_ntsc_timings;
+ /*pclk doubled for 8 bits transmission of YUYV
+ so horizontal timings are doubled*/
+ timings->bt.pixelclock = 2 * timings->bt.pixelclock;
+ timings->bt.hfrontporch = 2 * timings->bt.hfrontporch;
+ timings->bt.hsync = 2 * timings->bt.hsync;
+ timings->bt.hbackporch = 2 * timings->bt.hbackporch;
+ break;
+ case 1:
+ case 2:
+ case 5:
+ case 6:
+ *timings = tw9990_pal_timings;
+ /*pclk doubled for 8 bits transmission of YUYV
+ so horizontal timngs are doubled*/
+ timings->bt.pixelclock = 2 * timings->bt.pixelclock;
+ timings->bt.hfrontporch = 2 * timings->bt.hfrontporch;
+ timings->bt.hsync = 2 * timings->bt.hsync;
+ timings->bt.hbackporch = 2 * timings->bt.hbackporch;
+ break;
+ case 7:
+ v4l2_info(sd, "no standard detected\n");
+ memset(timings, 0, sizeof(*timings));
+ break;
+ default:
+ v4l2_err(sd, "invalid value for register 0x%X:0x%X\n",
+ TW9990_STD_SEL, std_sel);
+ return -EPIPE;
+ }
+ return 0;
+}
+
+int tw9990_s_routing(struct v4l2_subdev *sd, u32 in, u32 out, u32 config) {
+ struct tw9990 *tw9990 = to_tw9990(sd);
+
+ if(!tw9990 || in >= TW9990_NUM_INPUTS)
+ return -EINVAL;
+ /* Clean-up input selection (b11110011) */
+ tw9990->config[TW9990_IN_FORM_ID].val &= ~(TW_MASK(2) << 2);
+
+ /* Setup new input selection: based on tw9990 data-sheet
+ * In differential mode YSEL take only 0 or 2 respectively
+ * MUX0+/- or MUX2+/-
+ * In single ended mode YSEL take only 0 or 1 respectively
+ * MUX0 or MUX1... for MUX3 or MUX4, who knows ?
+ */
+ if (tw9990->differential_input)
+ tw9990->config[TW9990_IN_FORM_ID].val |= (in << 3);
+ else
+ tw9990->config[TW9990_IN_FORM_ID].val |= (in << 2);
+ return 0;
+}
+
+int tw9990_subscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+
+int tw9990_unsubscribe_event(struct v4l2_subdev *subdev,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static const struct v4l2_subdev_core_ops tw9990_core_ops = {
+ .subscribe_event = tw9990_subscribe_event,
+ .unsubscribe_event = tw9990_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops tw9990_video_ops = {
+ .s_stream = tw9990_s_stream,
+ .g_frame_interval = tw9990_g_frame_interval,
+ .query_dv_timings = tw9990_query_dv_timings,
+ .s_routing = tw9990_s_routing,
+};
+
+static const struct v4l2_subdev_pad_ops tw9990_pad_ops = {
+ .get_fmt = tw9990_get_fmt,
+ .set_fmt = tw9990_set_fmt,
+ .enum_mbus_code = tw9990_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops tw9990_ops = {
+ .core = &tw9990_core_ops,
+ .video = &tw9990_video_ops,
+ .pad = &tw9990_pad_ops,
+};
+
+static irqreturn_t tw9990_isr(int irq, void *priv)
+{
+ struct tw9990 *tw9990 = priv;
+ unsigned char int2status;
+ struct video_device *vdev = tw9990->sd.devnode;
+ static const struct v4l2_event event = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ int2status = tw9990_i2c_read_byte(tw9990,
+ TW9990_INT2_STATUS);
+
+ v4l2_info(&tw9990->sd,
+ "read int2status = 0x%X\n", int2status);
+
+ if(vdev)
+ v4l2_event_queue(vdev, &event);
+ tw9990_i2c_write_byte(tw9990, TW9990_STD_RECOG, 0xFF);
+ tw9990_i2c_write_byte(tw9990, TW9990_INT2_RST, 0xFF);
+ return IRQ_HANDLED;
+}
+
+static int tw9990_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tw9990 *tw9990;
+ struct tw9990_platform_data *pdata = NULL;
+ struct v4l2_subdev *sd;
+ int ret = -ENODEV;
+ struct v4l2_mbus_framefmt *format;
+ struct reg_cfg reg;
+ pdata = client->dev.platform_data;
+
+ if(!pdata)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ ret = -EIO;
+ goto ei2c;
+ }
+ v4l2_dbg(1, debug, client, "detecting tw9990 client on address 0x%x\n",
+ client->addr << 1);
+
+ tw9990 = kzalloc(sizeof(*tw9990), GFP_KERNEL);
+ if(!tw9990) {
+ ret = -ENOMEM;
+ v4l2_err(client, "alloc failed for data structure\n");
+ goto enomem;
+ }
+
+ tw9990->set_power = pdata->set_power;
+ tw9990->differential_input = pdata->differential_input;
+ tw9990->i2c_client = client;
+ tw9990_setup(tw9990, pdata);
+
+ format = &tw9990->format;
+ format->width = TW9990_CVBS_WIDTH;
+ format->height = TW9990_CVBS_PAL_HEIGHT;
+ format->field = V4L2_FIELD_INTERLACED;
+ format->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ format->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ tw9990->frame_interval.interval.numerator = 1;
+ tw9990->frame_interval.interval.denominator = 30;
+
+ sd = &tw9990->sd;
+ v4l2_i2c_subdev_init(sd, client, &tw9990_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ tw9990->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &tw9990->pad, 0);
+ if(ret < 0) {
+ v4l2_err(client, "failed to init media entity\n");
+ goto emedia;
+ }
+
+ /*see if chip is present */
+ if(pdata->set_power) {
+ pdata->set_power(TW9990_POWER_ON);
+ msleep(500);
+ }
+
+ reg.reg = 0x00;
+
+ ret = tw9990_i2c_read_byte(tw9990, reg.reg);
+ /* not chip */
+ if (ret < 0)
+ goto echipident;
+
+ reg.val = ret;
+
+ if(reg.val != 0) {
+ v4l2_err(client, "read 0x%d in chip ident reg\n", reg.val);
+ ret = -ENODEV;
+ goto echipident;
+ }
+
+ if(client->irq > 0) {
+ ret = request_threaded_irq(client->irq,NULL,
+ tw9990_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "tw9990", tw9990);
+ if(ret < 0) {
+ v4l2_err(client, "failed to register irq\n");
+ client->irq = -1;
+ }
+ }
+
+ tw9990_i2c_write_byte(tw9990, TW9990_INT2_RST, 0xFF);
+ tw9990_i2c_write_byte(tw9990, TW9990_INT2_ENABLE, 0x30);
+ tw9990_i2c_write_byte(tw9990, TW9990_SHORT_DIAG, 0x0);
+ tw9990_i2c_write_byte(tw9990, TW9990_STD_RECOG, 0xFF);
+ tw9990_enable(tw9990);
+
+ return 0;
+echipident:
+ media_entity_cleanup(&sd->entity);
+emedia:
+ kfree(tw9990);
+enomem:
+ei2c:
+ return ret;
+}
+
+static int tw9990_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9990 *tw9990 = to_tw9990(sd);
+ struct tw9990_platform_data *pdata = NULL;
+
+ pdata = client->dev.platform_data;
+
+ if(tw9990->set_power)
+ tw9990->set_power(TW9990_POWER_OFF);
+
+ if(client->irq > 0)
+ free_irq(client->irq, tw9990);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ kfree(tw9990);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int tw9990_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9990 *tw9990 = to_tw9990(sd);
+
+ // Saving the state to restore at resume
+ int is_streaming = tw9990->is_streaming;
+
+ // Stop if it was streaming
+ if (is_streaming)
+ tw9990_s_stream(sd, 0);
+
+ tw9990->is_streaming = is_streaming;
+ if(tw9990->set_power)
+ tw9990->set_power(TW9990_POWER_OFF);
+
+ return 0;
+}
+
+int tw9990_resume(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9990 *tw9990 = to_tw9990(sd);
+
+ if(tw9990->set_power)
+ tw9990->set_power(TW9990_POWER_ON);
+
+ if (tw9990->is_streaming)
+ tw9990_s_stream(sd, 1);
+ return 0;
+}
+#endif
+
+/* ----------------------------------------------------------------------- */
+
+static const struct i2c_device_id tw9990_id[] = {
+ { "tw9990", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tw9990_id);
+
+static struct i2c_driver tw9990_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tw9990",
+ },
+ .probe = tw9990_probe,
+ .remove = tw9990_remove,
+#ifdef CONFIG_PM
+ .suspend = tw9990_suspend,
+ .resume = tw9990_resume,
+#endif
+ .id_table = tw9990_id,
+};
+
+module_i2c_driver(tw9990_driver);
diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c
new file mode 100644
index 00000000000000..830d2897afd6d4
--- /dev/null
+++ b/drivers/misc/akm8975.c
@@ -0,0 +1,732 @@
+/* drivers/misc/akm8975.c - akm8975 compass driver
+ *
+ * Copyright (C) 2007-2008 HTC Corporation.
+ * Author: Hou-Kun Chen <houkun.chen@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Revised by AKM 2009/04/02
+ * Revised by Motorola 2010/05/27
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+#include <linux/freezer.h>
+#include <linux/akm8975.h>
+#include <linux/earlysuspend.h>
+
+#define AK8975DRV_CALL_DBG 0
+#if AK8975DRV_CALL_DBG
+#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg);
+#else
+#define FUNCDBG(msg)
+#endif
+
+#define AK8975DRV_DATA_DBG 0
+#define MAX_FAILURE_COUNT 10
+
+struct akm8975_data {
+ struct i2c_client *this_client;
+ struct akm8975_platform_data *pdata;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct mutex flags_lock;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+};
+
+/*
+* Because misc devices can not carry a pointer from driver register to
+* open, we keep this global. This limits the driver to a single instance.
+*/
+struct akm8975_data *akmd_data;
+
+static DECLARE_WAIT_QUEUE_HEAD(open_wq);
+
+static atomic_t open_flag;
+
+static short m_flag;
+static short a_flag;
+static short t_flag;
+static short mv_flag;
+
+static short akmd_delay;
+
+static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,
+ AK8975_REG_CNTL));
+}
+static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ strict_strtoul(buf, 10, &val);
+ if (val > 0xff)
+ return -EINVAL;
+ i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val);
+ return count;
+}
+static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store);
+
+static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = akm->this_client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) {
+ pr_err("akm8975_i2c_rxdata: transfer error\n");
+ return EIO;
+ } else
+ return 0;
+}
+
+static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = akm->this_client->addr,
+ .flags = 0,
+ .len = length,
+ .buf = buf,
+ },
+ };
+
+ FUNCDBG("called");
+
+ if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) {
+ pr_err("akm8975_i2c_txdata: transfer error\n");
+ return -EIO;
+ } else
+ return 0;
+}
+
+static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf)
+{
+ struct akm8975_data *data = i2c_get_clientdata(akm->this_client);
+
+ FUNCDBG("called");
+
+#if AK8975DRV_DATA_DBG
+ pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n",
+ rbuf[0], rbuf[1], rbuf[2]);
+ pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]);
+ pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n",
+ rbuf[6], rbuf[7], rbuf[8]);
+ pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n",
+ rbuf[9], rbuf[10], rbuf[11]);
+#endif
+ mutex_lock(&akm->flags_lock);
+ /* Report magnetic sensor information */
+ if (m_flag) {
+ input_report_abs(data->input_dev, ABS_RX, rbuf[0]);
+ input_report_abs(data->input_dev, ABS_RY, rbuf[1]);
+ input_report_abs(data->input_dev, ABS_RZ, rbuf[2]);
+ input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]);
+ }
+
+ /* Report acceleration sensor information */
+ if (a_flag) {
+ input_report_abs(data->input_dev, ABS_X, rbuf[6]);
+ input_report_abs(data->input_dev, ABS_Y, rbuf[7]);
+ input_report_abs(data->input_dev, ABS_Z, rbuf[8]);
+ input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]);
+ }
+
+ /* Report temperature information */
+ if (t_flag)
+ input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]);
+
+ if (mv_flag) {
+ input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]);
+ input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]);
+ input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]);
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ input_sync(data->input_dev);
+}
+
+static void akm8975_ecs_close_done(struct akm8975_data *akm)
+{
+ FUNCDBG("called");
+ mutex_lock(&akm->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&akm->flags_lock);
+}
+
+static int akm_aot_open(struct inode *inode, struct file *file)
+{
+ int ret = -1;
+
+ FUNCDBG("called");
+ if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
+ wake_up(&open_wq);
+ ret = 0;
+ }
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ file->private_data = akmd_data;
+
+ return ret;
+}
+
+static int akm_aot_release(struct inode *inode, struct file *file)
+{
+ FUNCDBG("called");
+ atomic_set(&open_flag, 0);
+ wake_up(&open_wq);
+ return 0;
+}
+
+static int akm_aot_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+ short flag;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ case ECS_IOCTL_APP_SET_AFLAG:
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ if (flag < 0 || flag > 1)
+ return -EINVAL;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ if (copy_from_user(&flag, argp, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ mutex_lock(&akm->flags_lock);
+ switch (cmd) {
+ case ECS_IOCTL_APP_SET_MFLAG:
+ m_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MFLAG:
+ flag = m_flag;
+ break;
+ case ECS_IOCTL_APP_SET_AFLAG:
+ a_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_AFLAG:
+ flag = a_flag;
+ break;
+ case ECS_IOCTL_APP_SET_MVFLAG:
+ mv_flag = flag;
+ break;
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ flag = mv_flag;
+ break;
+ case ECS_IOCTL_APP_SET_DELAY:
+ akmd_delay = flag;
+ break;
+ case ECS_IOCTL_APP_GET_DELAY:
+ flag = akmd_delay;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ mutex_unlock(&akm->flags_lock);
+
+ switch (cmd) {
+ case ECS_IOCTL_APP_GET_MFLAG:
+ case ECS_IOCTL_APP_GET_AFLAG:
+ case ECS_IOCTL_APP_GET_MVFLAG:
+ case ECS_IOCTL_APP_GET_DELAY:
+ if (copy_to_user(argp, &flag, sizeof(flag)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int akmd_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+
+ FUNCDBG("called");
+ err = nonseekable_open(inode, file);
+ if (err)
+ return err;
+
+ file->private_data = akmd_data;
+ return 0;
+}
+
+static int akmd_release(struct inode *inode, struct file *file)
+{
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+ akm8975_ecs_close_done(akm);
+ return 0;
+}
+
+static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+
+ char rwbuf[16];
+ int ret = -1;
+ int status;
+ short value[12];
+ short delay;
+ struct akm8975_data *akm = file->private_data;
+
+ FUNCDBG("called");
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ case ECS_IOCTL_WRITE:
+ if (copy_from_user(&rwbuf, argp, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+
+ case ECS_IOCTL_SET_YPR:
+ if (copy_from_user(&value, argp, sizeof(value)))
+ return -EFAULT;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (rwbuf[0] < 1)
+ return -EINVAL;
+
+ ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case ECS_IOCTL_WRITE:
+ if (rwbuf[0] < 2)
+ return -EINVAL;
+
+ ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]);
+ if (ret < 0)
+ return ret;
+ break;
+ case ECS_IOCTL_SET_YPR:
+ akm8975_ecs_report_value(akm, value);
+ break;
+
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) != 0));
+ status = atomic_read(&open_flag);
+ break;
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ wait_event_interruptible(open_wq,
+ (atomic_read(&open_flag) == 0));
+ status = atomic_read(&open_flag);
+ break;
+
+ case ECS_IOCTL_GET_DELAY:
+ delay = akmd_delay;
+ break;
+
+ default:
+ FUNCDBG("Unknown cmd\n");
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case ECS_IOCTL_READ:
+ if (copy_to_user(argp, &rwbuf, sizeof(rwbuf)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_OPEN_STATUS:
+ case ECS_IOCTL_GET_CLOSE_STATUS:
+ if (copy_to_user(argp, &status, sizeof(status)))
+ return -EFAULT;
+ break;
+ case ECS_IOCTL_GET_DELAY:
+ if (copy_to_user(argp, &delay, sizeof(delay)))
+ return -EFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* needed to clear the int. pin */
+static void akm_work_func(struct work_struct *work)
+{
+ struct akm8975_data *akm =
+ container_of(work, struct akm8975_data, work);
+
+ FUNCDBG("called");
+ enable_irq(akm->this_client->irq);
+}
+
+static irqreturn_t akm8975_interrupt(int irq, void *dev_id)
+{
+ struct akm8975_data *akm = dev_id;
+ FUNCDBG("called");
+
+ disable_irq_nosync(akm->this_client->irq);
+ schedule_work(&akm->work);
+ return IRQ_HANDLED;
+}
+
+static int akm8975_power_off(struct akm8975_data *akm)
+{
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_off)
+ akm->pdata->power_off();
+
+ return 0;
+}
+
+static int akm8975_power_on(struct akm8975_data *akm)
+{
+ int err;
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ if (akm->pdata->power_on) {
+ err = akm->pdata->power_on();
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_off(akm);
+}
+
+static int akm8975_resume(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ /* TO DO: might need more work after power mgmt
+ is enabled */
+ return akm8975_power_on(akm);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void akm8975_early_suspend(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_suspend(akm->this_client, PMSG_SUSPEND);
+}
+
+static void akm8975_early_resume(struct early_suspend *handler)
+{
+ struct akm8975_data *akm;
+ akm = container_of(handler, struct akm8975_data, early_suspend);
+
+#if AK8975DRV_CALL_DBG
+ pr_info("%s\n", __func__);
+#endif
+ akm8975_resume(akm->this_client);
+}
+#endif
+
+
+static int akm8975_init_client(struct i2c_client *client)
+{
+ struct akm8975_data *data;
+ int ret;
+
+ data = i2c_get_clientdata(client);
+
+ ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING,
+ "akm8975", data);
+
+ if (ret < 0) {
+ pr_err("akm8975_init_client: request irq failed\n");
+ goto err;
+ }
+
+ init_waitqueue_head(&open_wq);
+
+ mutex_lock(&data->flags_lock);
+ m_flag = 1;
+ a_flag = 1;
+ t_flag = 1;
+ mv_flag = 1;
+ mutex_unlock(&data->flags_lock);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct file_operations akmd_fops = {
+ .owner = THIS_MODULE,
+ .open = akmd_open,
+ .release = akmd_release,
+ .ioctl = akmd_ioctl,
+};
+
+static const struct file_operations akm_aot_fops = {
+ .owner = THIS_MODULE,
+ .open = akm_aot_open,
+ .release = akm_aot_release,
+ .ioctl = akm_aot_ioctl,
+};
+
+static struct miscdevice akm_aot_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_aot",
+ .fops = &akm_aot_fops,
+};
+
+static struct miscdevice akmd_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "akm8975_dev",
+ .fops = &akmd_fops,
+};
+
+int akm8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct akm8975_data *akm;
+ int err;
+ FUNCDBG("called");
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_platform_data_null;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL);
+ if (!akm) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ err = -ENOMEM;
+ goto exit_alloc_data_failed;
+ }
+
+ akm->pdata = client->dev.platform_data;
+
+ mutex_init(&akm->flags_lock);
+ INIT_WORK(&akm->work, akm_work_func);
+ i2c_set_clientdata(client, akm);
+
+ err = akm8975_power_on(akm);
+ if (err < 0)
+ goto exit_power_on_failed;
+
+ akm8975_init_client(client);
+ akm->this_client = client;
+ akmd_data = akm;
+
+ akm->input_dev = input_allocate_device();
+ if (!akm->input_dev) {
+ err = -ENOMEM;
+ dev_err(&akm->this_client->dev,
+ "input device allocate failed\n");
+ goto exit_input_dev_alloc_failed;
+ }
+
+ set_bit(EV_ABS, akm->input_dev->evbit);
+
+ /* yaw */
+ input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0);
+ /* pitch */
+ input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0);
+ /* roll */
+ input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0);
+ /* x-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0);
+ /* y-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0);
+ /* z-axis acceleration */
+ input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0);
+ /* temparature */
+ input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0);
+ /* status of magnetic sensor */
+ input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0);
+ /* status of acceleration sensor */
+ input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0);
+ /* x-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0);
+ /* y-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0);
+ /* z-axis of raw magnetic vector */
+ input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0);
+
+ akm->input_dev->name = "compass";
+
+ err = input_register_device(akm->input_dev);
+ if (err) {
+ pr_err("akm8975_probe: Unable to register input device: %s\n",
+ akm->input_dev->name);
+ goto exit_input_register_device_failed;
+ }
+
+ err = misc_register(&akmd_device);
+ if (err) {
+ pr_err("akm8975_probe: akmd_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = misc_register(&akm_aot_device);
+ if (err) {
+ pr_err("akm8975_probe: akm_aot_device register failed\n");
+ goto exit_misc_device_register_failed;
+ }
+
+ err = device_create_file(&client->dev, &dev_attr_akm_ms1);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ akm->early_suspend.suspend = akm8975_early_suspend;
+ akm->early_suspend.resume = akm8975_early_resume;
+ register_early_suspend(&akm->early_suspend);
+#endif
+ return 0;
+
+exit_misc_device_register_failed:
+exit_input_register_device_failed:
+ input_free_device(akm->input_dev);
+exit_input_dev_alloc_failed:
+ akm8975_power_off(akm);
+exit_power_on_failed:
+ kfree(akm);
+exit_alloc_data_failed:
+exit_check_functionality_failed:
+exit_platform_data_null:
+ return err;
+}
+
+static int __devexit akm8975_remove(struct i2c_client *client)
+{
+ struct akm8975_data *akm = i2c_get_clientdata(client);
+ FUNCDBG("called");
+ free_irq(client->irq, NULL);
+ input_unregister_device(akm->input_dev);
+ misc_deregister(&akmd_device);
+ misc_deregister(&akm_aot_device);
+ akm8975_power_off(akm);
+ kfree(akm);
+ return 0;
+}
+
+static const struct i2c_device_id akm8975_id[] = {
+ { "akm8975", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, akm8975_id);
+
+static struct i2c_driver akm8975_driver = {
+ .probe = akm8975_probe,
+ .remove = akm8975_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .resume = akm8975_resume,
+ .suspend = akm8975_suspend,
+#endif
+ .id_table = akm8975_id,
+ .driver = {
+ .name = "akm8975",
+ },
+};
+
+static int __init akm8975_init(void)
+{
+ pr_info("AK8975 compass driver: init\n");
+ FUNCDBG("AK8975 compass driver: init\n");
+ return i2c_add_driver(&akm8975_driver);
+}
+
+static void __exit akm8975_exit(void)
+{
+ FUNCDBG("AK8975 compass driver: exit\n");
+ i2c_del_driver(&akm8975_driver);
+}
+
+module_init(akm8975_init);
+module_exit(akm8975_exit);
+
+MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>");
+MODULE_DESCRIPTION("AK8975 compass driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ti-st/gps_drv.c b/drivers/misc/ti-st/gps_drv.c
new file mode 100644
index 00000000000000..eb3f43a4d695ab
--- /dev/null
+++ b/drivers/misc/ti-st/gps_drv.c
@@ -0,0 +1,805 @@
+/*
+ * GPS Char Driver for Texas Instrument's Connectivity Chip.
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/ti_wilink_st.h>
+
+#undef VERBOSE
+#undef DEBUG
+
+/* Debug macros*/
+#if defined(DEBUG) /* limited debug messages */
+#define GPSDRV_DBG(fmt, arg...) \
+ printk(KERN_INFO "[GPS] (gpsdrv):"fmt"\n" , ## arg)
+#define GPSDRV_VER(fmt, arg...)
+#elif defined(VERBOSE) /* very verbose */
+#define GPSDRV_DBG(fmt, arg...) \
+ printk(KERN_INFO "[GPS] (gpsdrv):"fmt"\n" , ## arg)
+#define GPSDRV_VER(fmt, arg...) \
+ printk(KERN_INFO "[GPS] (gpsdrv):"fmt"\n" , ## arg)
+#define GPSDRV_ERR(fmt, arg...) \
+ printk(KERN_ERR "[GPS] (gpsdrv):"fmt"\n" , ## arg)
+#else /* Error msgs only */
+#define GPSDRV_ERR(fmt, arg...) \
+ printk(KERN_ERR "[GPS] (gpsdrv):"fmt"\n" , ## arg)
+#define GPSDRV_VER(fmt, arg...)
+#define GPSDRV_DBG(fmt, arg...)
+#endif
+
+static void gpsdrv_tsklet_write(unsigned long data);
+
+/* List of error codes returned by the gps driver*/
+enum {
+ GPS_ERR_FAILURE = -1, /* check struct */
+ GPS_SUCCESS,
+ GPS_ERR_CLASS = -15,
+ GPS_ERR_CPY_TO_USR,
+ GPS_ERR_CPY_FRM_USR,
+ GPS_ERR_UNKNOWN,
+};
+
+/* Channel-9 details for GPS */
+#define GPS_CH9_PKT_HDR_SIZE 4
+#define GPS_CH9_PKT_NUMBER 0x9
+#define GPS_CH9_OP_WRITE 0x1
+#define GPS_CH9_OP_READ 0x2
+#define GPS_CH9_OP_COMPLETED_EVT 0x3
+
+/* Macros for Syncronising GPS registration and other R/W/ICTL operations */
+#define GPS_ST_REGISTERED 0
+#define GPS_ST_RUNNING 1
+
+/* Read time out defined to 10 seconds */
+#define GPSDRV_READ_TIMEOUT 10000
+/* Reg time out defined to 6 seconds */
+#define GPSDRV_REG_TIMEOUT 6000
+
+
+struct gpsdrv_event_hdr {
+ uint8_t opcode;
+ uint16_t plen;
+} __packed;
+
+/*
+ * struct gpsdrv_data - gps internal driver data
+ * @gpsdrv_reg_completed - completion to wait for registration
+ * @streg_cbdata - registration feedback
+ * @state - driver state
+ * @tx_count - TX throttling/unthrottling
+ * @st_write - write ptr from ST
+ * @rx_list - Rx data SKB queue
+ * @tx_list - Tx data SKB queue
+ * @gpsdrv_data_q - dataq checked up on poll/receive
+ * @lock - spin lock
+ * @gpsdrv_tx_tsklet - gps write task
+ */
+
+struct gpsdrv_data {
+ struct completion gpsdrv_reg_completed;
+ char streg_cbdata;
+ unsigned long state;
+ unsigned char tx_count;
+ long (*st_write) (struct sk_buff *skb);
+ struct sk_buff_head rx_list;
+ struct sk_buff_head tx_list;
+ wait_queue_head_t gpsdrv_data_q;
+ spinlock_t lock;
+ struct tasklet_struct gpsdrv_tx_tsklet;
+};
+
+#define DEVICE_NAME "tigps"
+
+/***********Functions called from ST driver**********************************/
+
+/* gpsdrv_st_recv Function
+ * This is Called in from -- ST Core when a data is received
+ * This is a registered callback with ST core when the gps driver registers
+ * with ST.
+ *
+ * Parameters:
+ * @skb : SKB buffer pointer which contains the incoming Ch-9 GPS data.
+ * Returns:
+ * GPS_SUCCESS - On Success
+ * else suitable error code
+ */
+long gpsdrv_st_recv(void *arg, struct sk_buff *skb)
+{
+ struct gpsdrv_event_hdr gpsdrv_hdr = { 0x00, 0x0000 };
+ struct gpsdrv_data *hgps = (struct gpsdrv_data *)arg;
+
+ /* SKB is NULL */
+ if (NULL == skb) {
+ GPSDRV_ERR("Input SKB is NULL");
+ return GPS_ERR_FAILURE;
+ }
+
+ /* Sanity Check - To Check if the Rx Pkt is Channel -9 or not */
+ if (0x09 != skb->cb[0]) {
+ GPSDRV_ERR("Input SKB is not a Channel-9 packet");
+ return GPS_ERR_FAILURE;
+ }
+ /* Copy Ch-9 info to local structure */
+ memcpy(&gpsdrv_hdr, skb->data, GPS_CH9_PKT_HDR_SIZE - 1);
+ skb_pull(skb, GPS_CH9_PKT_HDR_SIZE - 1);
+
+ /* check if skb->len and gpsdrv_hdr.plen are equal */
+ if (skb->len != gpsdrv_hdr.plen) {
+ GPSDRV_ERR("Received corrupted packet - Length Mismatch");
+ return -EINVAL;
+ }
+#ifdef VERBOSE
+ printk(KERN_INFO"data start >>\n");
+ print_hex_dump(KERN_INFO, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+ printk(KERN_INFO"\n<< end\n");
+#endif
+ /* Check the Opcode */
+ if ((gpsdrv_hdr.opcode != GPS_CH9_OP_READ) && (gpsdrv_hdr.opcode != \
+ GPS_CH9_OP_COMPLETED_EVT)) {
+ GPSDRV_ERR("Rec corrupt pkt opcode %x", gpsdrv_hdr.opcode);
+ return -EINVAL;
+ }
+
+ /* Strip Channel 9 packet information from SKB only
+ * if the opcode is GPS_CH9_OP_READ and get AI2 packet
+ */
+ if (GPS_CH9_OP_READ == gpsdrv_hdr.opcode) {
+ skb_queue_tail(&hgps->rx_list, skb);
+ wake_up_interruptible(&hgps->gpsdrv_data_q);
+ } else {
+ spin_lock(&hgps->lock);
+ /* The no. of Completed Packets is always 1
+ * in case of Channel 9 as per spec.
+ * Forcing it to 1 for precaution.
+ */
+ hgps->tx_count = 1;
+ /* Check if Tx queue and Tx count not empty */
+ if (!skb_queue_empty(&hgps->tx_list)) {
+ /* Schedule the Tx-task let */
+ spin_unlock(&hgps->lock);
+ GPSDRV_VER(" Scheduling tasklet to write");
+ tasklet_schedule(&hgps->gpsdrv_tx_tsklet);
+ } else {
+ spin_unlock(&hgps->lock);
+ }
+
+ GPSDRV_VER(" Tx count = %x", hgps->tx_count);
+ /* Free the received command complete SKB */
+ kfree_skb(skb);
+ }
+
+ return GPS_SUCCESS;
+
+}
+
+/* gpsdrv_st_cb Function
+ * This is Called in from -- ST Core when the state is pending during
+ * st_register. This is a registered callback with ST core when the gps
+ * driver registers with ST.
+ *
+ * Parameters:
+ * @data Status update of GPS registration
+ * Returns: NULL
+ */
+void gpsdrv_st_cb(void *arg, char data)
+{
+ struct gpsdrv_data *hgps = (struct gpsdrv_data *)arg;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+ hgps->streg_cbdata = data; /* ST registration callback status */
+ complete_all(&hgps->gpsdrv_reg_completed);
+ return;
+}
+
+static struct st_proto_s gpsdrv_proto = {
+ .chnl_id = 0x09,
+ .max_frame_size = 1024,
+ .hdr_len = 3,
+ .offset_len_in_hdr = 1,
+ .len_size = 2,
+ .reserve = 1,
+ .recv = gpsdrv_st_recv,
+ .reg_complete_cb = gpsdrv_st_cb,
+};
+
+/** gpsdrv_tsklet_write Function
+ * This tasklet function will be scheduled when there is a data in Tx queue
+ * and GPS chip sent an command completion packet with non zero value.
+ *
+ * Parameters :
+ * @data : data passed to tasklet function
+ * Returns : NULL
+ */
+void gpsdrv_tsklet_write(unsigned long data)
+{
+ struct sk_buff *skb = NULL;
+ struct gpsdrv_data *hgps = (struct gpsdrv_data *)data;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ spin_lock(&hgps->lock);
+
+ /* Perform sanity check of verifying the status
+ to perform an st_write */
+ if (((!hgps->st_write) || (0 == hgps->tx_count))
+ || ((skb_queue_empty(&hgps->tx_list)))) {
+ spin_unlock(&hgps->lock);
+ GPSDRV_ERR("Sanity check Failed exiting %s", __func__);
+ return;
+ }
+ /* hgps->tx_list not empty skb already present
+ * dequeue the tx-data and perform a st_write
+ */
+ hgps->tx_count--;
+ spin_unlock(&hgps->lock);
+ GPSDRV_VER(" Tx count in gpsdrv_tsklet_write = %x", hgps->tx_count);
+ skb = skb_dequeue(&hgps->tx_list);
+ hgps->st_write(skb);
+
+ return;
+}
+
+/*********Functions Called from GPS host***************************************/
+
+/** gpsdrv_open Function
+ * This function will perform an register on ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @inod :
+ * Returns GPS_SUCCESS - on success
+ * else suitable error code
+ */
+int gpsdrv_open(struct inode *inod, struct file *file)
+{
+ int ret = 0;
+ unsigned long timeout = GPSDRV_REG_TIMEOUT;
+ struct gpsdrv_data *hgps;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ /* Allocate local resource memory */
+ hgps = kzalloc(sizeof(struct gpsdrv_data), GFP_KERNEL);
+ if (!(hgps)) {
+ GPSDRV_ERR("Can't allocate GPS data structure");
+ return -ENOMEM;
+ }
+
+ /* Initialize wait queue, skb queue head and
+ * registration complete strucuture
+ */
+ skb_queue_head_init(&hgps->rx_list);
+ skb_queue_head_init(&hgps->tx_list);
+ init_completion(&hgps->gpsdrv_reg_completed);
+ init_waitqueue_head(&hgps->gpsdrv_data_q);
+ spin_lock_init(&hgps->lock);
+
+ /* Check if GPS is already registered with ST */
+ if (test_and_set_bit(GPS_ST_REGISTERED, &hgps->state)) {
+ GPSDRV_ERR("GPS Registered/Registration in progress with ST"
+ " ,open called again?");
+ kfree(hgps);
+ return -EAGAIN;
+ }
+
+ /* Initialize gpsdrv_reg_completed so as to wait for completion
+ * on the same
+ * if st_register returns with a PENDING status
+ */
+ INIT_COMPLETION(hgps->gpsdrv_reg_completed);
+
+ gpsdrv_proto.priv_data = hgps;
+ /* Resgister GPS with ST */
+ ret = st_register(&gpsdrv_proto);
+ GPSDRV_VER(" st_register returned %d", ret);
+
+ /* If GPS Registration returned with error, then clear GPS_ST_REGISTERED
+ * for future open calls and return the appropriate error code
+ */
+ if (ret < 0 && ret != -EINPROGRESS) {
+ GPSDRV_ERR(" st_register failed");
+ clear_bit(GPS_ST_REGISTERED, &hgps->state);
+ if (ret == -EINPROGRESS)
+ return -EAGAIN;
+ return GPS_ERR_FAILURE;
+ }
+
+ /* if returned status is pending, wait for the completion */
+ if (ret == -EINPROGRESS) {
+ GPSDRV_VER(" GPS Register waiting for completion ");
+ timeout = wait_for_completion_timeout \
+ (&hgps->gpsdrv_reg_completed, msecs_to_jiffies(timeout));
+ /* Check for timed out condition */
+ if (0 == timeout) {
+ GPSDRV_ERR("GPS Device registration timed out");
+ clear_bit(GPS_ST_REGISTERED, &hgps->state);
+ return -ETIMEDOUT;
+ } else if (0 > hgps->streg_cbdata) {
+ GPSDRV_ERR("GPS Device Registration Failed-ST\n");
+ GPSDRV_ERR("RegCB called with ");
+ GPSDRV_ERR("Invalid value %d\n", hgps->streg_cbdata);
+ clear_bit(GPS_ST_REGISTERED, &hgps->state);
+ return -EAGAIN;
+ }
+ }
+ GPSDRV_DBG(" gps registration complete ");
+
+ /* Assign the write callback pointer */
+ hgps->st_write = gpsdrv_proto.write;
+ hgps->tx_count = 1;
+ file->private_data = hgps; /* set drv data */
+ tasklet_init(&hgps->gpsdrv_tx_tsklet, (void *)gpsdrv_tsklet_write,
+ (unsigned long)hgps);
+ set_bit(GPS_ST_RUNNING, &hgps->state);
+
+ return GPS_SUCCESS;
+}
+
+/** gpsdrv_release Function
+ * This function will un-registers from the ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @inod :
+ * Returns GPS_SUCCESS - on success
+ * else suitable error code
+ */
+int gpsdrv_release(struct inode *inod, struct file *file)
+{
+ struct gpsdrv_data *hgps = file->private_data;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ /* Disabling task-let 1st & then un-reg to avoid
+ * tasklet getting scheduled
+ */
+ tasklet_disable(&hgps->gpsdrv_tx_tsklet);
+ tasklet_kill(&hgps->gpsdrv_tx_tsklet);
+ /* Cleat registered bit if already registered */
+ if (test_and_clear_bit(GPS_ST_REGISTERED, &hgps->state)) {
+ if (st_unregister(&gpsdrv_proto) < 0) {
+ GPSDRV_ERR(" st_unregister failed");
+ /* Re-Enable the task-let if un-register fails */
+ tasklet_enable(&hgps->gpsdrv_tx_tsklet);
+ return GPS_ERR_FAILURE;
+ }
+ }
+
+ /* Reset Tx count value and st_write function pointer */
+ hgps->tx_count = 0;
+ hgps->st_write = NULL;
+ clear_bit(GPS_ST_RUNNING, &hgps->state);
+ GPSDRV_VER(" st_unregister success");
+
+ skb_queue_purge(&hgps->rx_list);
+ skb_queue_purge(&hgps->tx_list);
+ kfree(hgps);
+ file->private_data = NULL;
+
+ return GPS_SUCCESS;
+}
+
+/** gpsdrv_read Function
+ * This function will wait till the data received from the ST driver
+ * and then strips the GPS-Channel-9 header from the
+ * incoming AI2 packet and then send it to GPS host application.
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @data : Data which needs to be passed to APP
+ * @size : Length of the data passesd
+ * offset :
+ * Returns Size of AI2 packet received - on success
+ * else suitable error code
+ */
+ssize_t gpsdrv_read(struct file *file, char __user *data, size_t size,
+ loff_t *offset)
+{
+ int len = 0, error = 0;
+ struct sk_buff *skb = NULL;
+ unsigned long timeout = GPSDRV_READ_TIMEOUT;
+ struct gpsdrv_data *hgps;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (((NULL == data) || (0 == size)))) {
+ GPSDRV_ERR("Invalid input params passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hgps = file->private_data;
+ /* Check if GPS is registered to perform read operation */
+ if (!test_bit(GPS_ST_RUNNING, &hgps->state)) {
+ GPSDRV_ERR("GPS Device is not running");
+ return -EINVAL;
+ }
+
+ /* cannot come here if poll-ed before reading
+ * if not poll-ed wait on the same wait_q
+ */
+ timeout = wait_event_interruptible_timeout(hgps->gpsdrv_data_q,
+ !skb_queue_empty(&hgps->rx_list), msecs_to_jiffies(timeout));
+ /* Check for timed out condition */
+ if (0 == timeout) {
+ GPSDRV_ERR("GPS Device Read timed out");
+ return -ETIMEDOUT;
+ }
+
+
+ /* hgps->rx_list not empty skb already present */
+ skb = skb_dequeue(&hgps->rx_list);
+
+ if (!skb) {
+ GPSDRV_ERR("Dequed SKB is NULL?");
+ return GPS_ERR_UNKNOWN;
+ } else if (skb->len > size) {
+ GPSDRV_DBG("SKB length is Greater than requested size ");
+ GPSDRV_DBG("Returning the available length of SKB\n");
+
+ error = copy_to_user(data, skb->data, size);
+ skb_pull(skb, size);
+
+ if (skb->len != 0)
+ skb_queue_head(&hgps->rx_list, skb);
+
+ /* printk(KERN_DEBUG "gpsdrv: total size read= %d", size);*/
+ return size;
+ }
+
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+
+ /* Forward the data to the user */
+ if (skb->len <= size) {
+ if (copy_to_user(data, skb->data, skb->len)) {
+ GPSDRV_ERR(" Unable to copy to user space");
+ /* Queue the skb back to head */
+ skb_queue_head(&hgps->rx_list, skb);
+ return GPS_ERR_CPY_TO_USR;
+ }
+ }
+
+ len = skb->len;
+ kfree_skb(skb);
+ /* printk(KERN_DEBUG "gpsdrv: total size read= %d", len); */
+ return len;
+}
+/* gpsdrv_write Function
+ * This function will pre-pend the GPS-Channel-9 header to the
+ * incoming AI2 packet sent from the GPS host application.
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @data : AI2 packet data from GPS application
+ * @size : Size of the AI2 packet data
+ * @offset :
+ * Returns Size of AI2 packet on success
+ * else suitable error code
+ */
+ssize_t gpsdrv_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ unsigned char channel = GPS_CH9_PKT_NUMBER; /* GPS Channel number */
+ /* Initialize gpsdrv_event_hdr with write opcode */
+ struct gpsdrv_event_hdr gpsdrv_hdr = { GPS_CH9_OP_WRITE, 0x0000 };
+ struct sk_buff *skb = NULL;
+ struct gpsdrv_data *hgps;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+ /* Validate input parameters */
+ if ((NULL == file) || (((NULL == data) || (0 == size)))) {
+ GPSDRV_ERR("Invalid input params passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hgps = file->private_data;
+
+ /* Check if GPS is registered to perform write operation */
+ if (!test_bit(GPS_ST_RUNNING, &hgps->state)) {
+ GPSDRV_ERR("GPS Device is not running");
+ return -EINVAL;
+ }
+
+ if (!hgps->st_write) {
+ GPSDRV_ERR(" Can't write to ST, hgps->st_write null ?");
+ return -EINVAL;
+ }
+
+
+ skb = alloc_skb(size + GPS_CH9_PKT_HDR_SIZE, GFP_ATOMIC);
+ /* Validate Created SKB */
+ if (NULL == skb) {
+ GPSDRV_ERR("Error aaloacting SKB");
+ return -ENOMEM;
+ }
+
+ /* Update chnl-9 information with plen=AI2 pckt size which is "size"*/
+ gpsdrv_hdr.plen = size;
+
+ /* PrePend Channel-9 header to AI2 packet and write to SKB */
+ memcpy(skb_put(skb, 1), &channel, 1);
+ memcpy(skb_put(skb, GPS_CH9_PKT_HDR_SIZE - 1), &gpsdrv_hdr,
+ GPS_CH9_PKT_HDR_SIZE - 1);
+
+ /* Forward the data from the user space to ST core */
+ if (copy_from_user(skb_put(skb, size), data, size)) {
+ GPSDRV_ERR(" Unable to copy from user space");
+ kfree_skb(skb);
+ return GPS_ERR_CPY_FRM_USR;
+ }
+
+#ifdef VERBOSE
+ GPSDRV_VER("start data..");
+ print_hex_dump(KERN_INFO, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, size, 0);
+ GPSDRV_VER("\n..end data");
+#endif
+
+ /* Check if data can be sent to GPS chip
+ * If not, add it to queue and that can be sent later
+ */
+ spin_lock(&hgps->lock);
+ if (0 != hgps->tx_count) {
+ /* If TX Q is empty send current SKB;
+ * else, queue current SKB at end of tx_list queue and
+ * send first SKB in tx_list queue.
+ */
+ hgps->tx_count--;
+ spin_unlock(&hgps->lock);
+
+ GPSDRV_VER(" Tx count in gpsdrv_write = %x", hgps->tx_count);
+
+ if (skb_queue_empty(&hgps->tx_list)) {
+ hgps->st_write(skb);
+ } else {
+ skb_queue_tail(&hgps->tx_list, skb);
+ hgps->st_write(skb_dequeue(&hgps->tx_list));
+ }
+ } else {
+ /* Add it to TX queue */
+ spin_unlock(&hgps->lock);
+ GPSDRV_VER(" SKB added to Tx queue ");
+ GPSDRV_VER("Tx count = %x ", hgps->tx_count);
+ skb_queue_tail(&hgps->tx_list, skb);
+ /* This is added for precaution in the case that there is a
+ * context switch during the execution of the above lines.
+ * Redundant otherwise.
+ */
+ if ((0 != hgps->tx_count) && \
+ (!skb_queue_empty(&hgps->tx_list))) {
+ /* Schedule the Tx-task let */
+ GPSDRV_VER("Scheduling tasklet to write\n");
+ tasklet_schedule(&hgps->gpsdrv_tx_tsklet);
+ }
+ }
+
+ return size;
+}
+
+/** gpsdrv_ioctl Function
+ * This will peform the functions as directed by the command and command
+ * argument.
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @cmd : IOCTL Command
+ * @arg : Command argument for IOCTL command
+ * Returns GPS_SUCCESS on success
+ * else suitable error code
+ */
+static long gpsdrv_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sk_buff *skb = NULL;
+ int retCode = GPS_SUCCESS;
+ struct gpsdrv_data *hgps;
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (0 == cmd)) {
+ GPSDRV_ERR("Invalid input parameters passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hgps = file->private_data;
+
+ /* Check if GPS is registered to perform IOCTL operation */
+ if (!test_bit(GPS_ST_RUNNING, &hgps->state)) {
+ GPSDRV_ERR("GPS Device is not running");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case TCFLSH:
+ GPSDRV_VER(" IOCTL TCFLSH invoked with %ld argument", arg);
+ switch (arg) {
+ /* purge Rx/Tx SKB list queues depending on arg value */
+ case TCIFLUSH:
+ skb_queue_purge(&hgps->rx_list);
+ break;
+ case TCOFLUSH:
+ skb_queue_purge(&hgps->tx_list);
+ break;
+ case TCIOFLUSH:
+ skb_queue_purge(&hgps->rx_list);
+ skb_queue_purge(&hgps->tx_list);
+ break;
+ default:
+ GPSDRV_ERR("Invalid Command passed for tcflush");
+ retCode = 0;
+ break;
+ }
+ break;
+ case FIONREAD:
+ /* Deque the SKB from the head if rx_list is not empty
+ * And update the argument with skb->len to provide
+ * the amount of data available in the available SKB
+ */
+ skb = skb_dequeue(&hgps->rx_list);
+ if (skb != NULL) {
+ *(unsigned int *)arg = skb->len;
+ /* Re-Store the SKB for furtur Read operations */
+ skb_queue_head(&hgps->rx_list, skb);
+ } else {
+ *(unsigned int *)arg = 0;
+ }
+ GPSDRV_DBG("returning %d\n", *(unsigned int *)arg);
+
+ break;
+ default:
+ GPSDRV_DBG("Un-Identified IOCTL %d", cmd);
+ retCode = 0;
+ break;
+ }
+
+ return retCode;
+}
+
+/** gpsdrv_poll Function
+ * This function will wait till some data is received to the gps driver from ST
+ *
+ * Parameters :
+ * @file : File pointer for GPS char driver
+ * @wait : POLL wait information
+ * Returns status of POLL on success
+ * else suitable error code
+ */
+static unsigned int gpsdrv_poll(struct file *file, poll_table *wait)
+{
+ unsigned long mask = 0;
+ struct gpsdrv_data *hgps = file->private_data;
+
+ /* Check if GPS is registered to perform read operation */
+ if (!test_bit(GPS_ST_RUNNING, &hgps->state)) {
+ GPSDRV_ERR("GPS Device is not running");
+ return -EINVAL;
+ }
+
+ /* Wait till data is signalled from gpsdrv_st_recv function
+ * with AI2 packet
+ */
+ poll_wait(file, &hgps->gpsdrv_data_q, wait);
+
+ if (!skb_queue_empty(&hgps->rx_list))
+ mask |= POLLIN; /* TODO: check app for mask */
+
+ return mask;
+}
+
+/* GPS Char driver function pointers
+ * These functions are called from USER space by pefroming File Operations
+ * on /dev/gps node exposed by this driver during init
+ */
+const struct file_operations gpsdrv_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = gpsdrv_open,
+ .read = gpsdrv_read,
+ .write = gpsdrv_write,
+ .unlocked_ioctl = gpsdrv_ioctl,
+ .poll = gpsdrv_poll,
+ .release = gpsdrv_release,
+};
+
+/*********Functions called during insmod and delmod****************************/
+
+static int gpsdrv_major; /* GPS major number */
+static struct class *gpsdrv_class; /* GPS class during class_create */
+static struct device *gpsdrv_dev; /* GPS dev during device_create */
+/** gpsdrv_init Function
+ * This function Initializes the gps driver parametes and exposes
+ * /dev/gps node to user space
+ *
+ * Parameters : NULL
+ * Returns GPS_SUCCESS on success
+ * else suitable error code
+ */
+static int __init gpsdrv_init(void)
+{
+
+ GPSDRV_DBG(" Inside %s", __func__);
+
+ /* Expose the device DEVICE_NAME to user space
+ * And obtain the major number for the device
+ */
+ gpsdrv_major = register_chrdev(0, DEVICE_NAME, \
+ &gpsdrv_chrdev_ops);
+ if (0 > gpsdrv_major) {
+ GPSDRV_ERR("Error when registering to char dev");
+ return GPS_ERR_FAILURE;
+ }
+ GPSDRV_VER("allocated %d, %d", gpsdrv_major, 0);
+
+ /* udev */
+ gpsdrv_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(gpsdrv_class)) {
+ GPSDRV_ERR(" Something went wrong in class_create");
+ unregister_chrdev(gpsdrv_major, DEVICE_NAME);
+ return GPS_ERR_CLASS;
+ }
+
+ gpsdrv_dev =
+ device_create(gpsdrv_class, NULL, MKDEV(gpsdrv_major, 0),
+ NULL, DEVICE_NAME);
+ if (IS_ERR(gpsdrv_dev)) {
+ GPSDRV_ERR(" Error in class_create");
+ unregister_chrdev(gpsdrv_major, DEVICE_NAME);
+ class_destroy(gpsdrv_class);
+ return GPS_ERR_CLASS;
+ }
+
+ return GPS_SUCCESS;
+}
+
+/** gpsdrv_exit Function
+ * This function Destroys the gps driver parametes and /dev/gps node
+ *
+ * Parameters : NULL
+ * Returns NULL
+ */
+static void __exit gpsdrv_exit(void)
+{
+ GPSDRV_DBG(" Inside %s", __func__);
+ GPSDRV_VER(" Bye.. freeing up %d", gpsdrv_major);
+
+ device_destroy(gpsdrv_class, MKDEV(gpsdrv_major, 0));
+ class_destroy(gpsdrv_class);
+ unregister_chrdev(gpsdrv_major, DEVICE_NAME);
+}
+
+module_init(gpsdrv_init);
+module_exit(gpsdrv_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/misc/ti-st/tty_hci.c b/drivers/misc/ti-st/tty_hci.c
new file mode 100644
index 00000000000000..591b0be944ca84
--- /dev/null
+++ b/drivers/misc/ti-st/tty_hci.c
@@ -0,0 +1,543 @@
+/*
+ * TTY emulation for user-space Bluetooth stacks over HCI-H4
+ * Copyright (C) 2011-2012 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/** define one of the following for debugging
+#define DEBUG
+#define VERBOSE
+*/
+
+#define pr_fmt(fmt) "(hci_tty): " fmt
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+
+#include <linux/ti_wilink_st.h>
+
+/* Number of seconds to wait for registration completion
+ * when ST returns PENDING status.
+ */
+#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
+
+/**
+ * struct ti_st - driver operation structure
+ * @hdev: hci device pointer which binds to bt driver
+ * @reg_status: ST registration callback status
+ * @st_write: write function provided by the ST driver
+ * to be used by the driver during send_frame.
+ * @wait_reg_completion - completion sync between ti_st_open
+ * and st_reg_completion_cb.
+ */
+struct ti_st {
+ struct hci_dev *hdev;
+ char reg_status;
+ long (*st_write) (struct sk_buff *);
+ struct completion wait_reg_completion;
+ wait_queue_head_t data_q;
+ struct sk_buff_head rx_list;
+};
+
+#define DEVICE_NAME "hci_tty"
+
+/***********Functions called from ST driver**********************************/
+/* Called by Shared Transport layer when receive data is
+ * available */
+static long st_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct ti_st *hst = (void *) priv_data;
+
+ pr_debug("@ %s", __func__);
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">rx>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+ skb_queue_tail(&hst->rx_list, skb);
+ wake_up_interruptible(&hst->data_q);
+ return 0;
+}
+
+/* Called by ST layer to indicate protocol registration completion
+ * status.ti_st_open() function will wait for signal from this
+ * API when st_register() function returns ST_PENDING.
+ */
+static void st_reg_completion_cb(void *priv_data, char data)
+{
+ struct ti_st *lhst = (void *) priv_data;
+
+ pr_info("@ %s\n", __func__);
+ /* Save registration status for use in ti_st_open() */
+ lhst->reg_status = data;
+ /* complete the wait in ti_st_open() */
+ complete(&lhst->wait_reg_completion);
+}
+
+/* protocol structure registered with shared transport */
+#define MAX_BT_CHNL_IDS 3
+static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+ {
+ .chnl_id = 0x04, /* HCI Events */
+ .hdr_len = 2,
+ .offset_len_in_hdr = 1,
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x02, /* ACL */
+ .hdr_len = 4,
+ .offset_len_in_hdr = 2,
+ .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x03, /* SCO */
+ .hdr_len = 3,
+ .offset_len_in_hdr = 2,
+ .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
+ .reserve = 8,
+ },
+};
+/** hci_tty_open Function
+ * This function will perform an register on ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_open(struct inode *inod, struct file *file)
+{
+ int i = 0, err = 0;
+ unsigned long timeleft;
+ struct ti_st *hst;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ hst = kzalloc(sizeof(*hst), GFP_KERNEL);
+ file->private_data = hst;
+ hst = file->private_data;
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ ti_st_proto[i].priv_data = hst;
+ ti_st_proto[i].max_frame_size = 1026;
+ ti_st_proto[i].recv = st_receive;
+ ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
+
+ /* Prepare wait-for-completion handler */
+ init_completion(&hst->wait_reg_completion);
+ /* Reset ST registration callback status flag,
+ * this value will be updated in
+ * st_reg_completion_cb()
+ * function whenever it called from ST driver.
+ */
+ hst->reg_status = -EINPROGRESS;
+
+ err = st_register(&ti_st_proto[i]);
+ if (!err)
+ goto done;
+
+ if (err != -EINPROGRESS) {
+ pr_err("st_register failed %d", err);
+ return err;
+ }
+
+ /* ST is busy with either protocol
+ * registration or firmware download.
+ */
+ pr_debug("waiting for registration "
+ "completion signal from ST");
+ timeleft = wait_for_completion_timeout
+ (&hst->wait_reg_completion,
+ msecs_to_jiffies(BT_REGISTER_TIMEOUT));
+ if (!timeleft) {
+ pr_err("Timeout(%d sec),didn't get reg "
+ "completion signal from ST",
+ BT_REGISTER_TIMEOUT / 1000);
+ return -ETIMEDOUT;
+ }
+
+ /* Is ST registration callback
+ * called with ERROR status? */
+ if (hst->reg_status != 0) {
+ pr_err("ST registration completed with invalid "
+ "status %d", hst->reg_status);
+ return -EAGAIN;
+ }
+
+done:
+ hst->st_write = ti_st_proto[i].write;
+ if (!hst->st_write) {
+ pr_err("undefined ST write function");
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ /* Undo registration with ST */
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister() failed with "
+ "error %d", err);
+ hst->st_write = NULL;
+ }
+ return -EIO;
+ }
+ }
+
+ skb_queue_head_init(&hst->rx_list);
+ init_waitqueue_head(&hst->data_q);
+
+ return 0;
+}
+
+/** hci_tty_release Function
+ * This function will un-registers from the ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_release(struct inode *inod, struct file *file)
+{
+ int err, i;
+ struct ti_st *hst = file->private_data;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister(%d) failed with error %d",
+ ti_st_proto[i].chnl_id, err);
+ }
+
+ hst->st_write = NULL;
+ skb_queue_purge(&hst->rx_list);
+ kfree(hst);
+ return err;
+}
+
+/** hci_tty_read Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : Data which needs to be passed to APP
+ * @size : Length of the data passesd
+ * offset :
+ * Returns Size of packet received - on success
+ * else suitable error code
+ */
+ssize_t hci_tty_read(struct file *file, char __user *data, size_t size,
+ loff_t *offset)
+{
+ int len = 0, tout;
+ struct sk_buff *skb = NULL, *rskb = NULL;
+ struct ti_st *hst;
+
+ pr_debug("inside %s (%p, %p, %u, %p)\n",
+ __func__, file, data, size, offset);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (((NULL == data) || (0 == size)))) {
+ pr_err("Invalid input params passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ /* cannot come here if poll-ed before reading
+ * if not poll-ed wait on the same wait_q
+ */
+ tout = wait_event_interruptible_timeout(hst->data_q,
+ !skb_queue_empty(&hst->rx_list),
+ msecs_to_jiffies(1000));
+ /* Check for timed out condition */
+ if (0 == tout) {
+ pr_err("Device Read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* hst->rx_list not empty skb already present */
+ skb = skb_dequeue(&hst->rx_list);
+ if (!skb) {
+ pr_err("dequed skb is null?\n");
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+
+ /* Forward the data to the user */
+ if (skb->len >= size) {
+ pr_err("FIONREAD not done before read\n");
+ return -ENOMEM;
+ } else {
+ /* returning skb */
+ rskb = alloc_skb(size, GFP_KERNEL);
+ if (!rskb) {
+ pr_err("alloc_skb error\n");
+ return -ENOMEM;
+ }
+
+ /* cb[0] has the pkt_type 0x04 or 0x02 or 0x03 */
+ memcpy(skb_put(rskb, 1), &skb->cb[0], 1);
+ memcpy(skb_put(rskb, skb->len), skb->data, skb->len);
+
+ if (copy_to_user(data, rskb->data, rskb->len)) {
+ pr_err("unable to copy to user space\n");
+ /* Queue the skb back to head */
+ skb_queue_head(&hst->rx_list, skb);
+ kfree_skb(rskb);
+ return -EIO;
+ }
+ }
+
+ len = rskb->len; /* len of returning skb */
+ kfree_skb(skb);
+ kfree_skb(rskb);
+ pr_debug("total size read= %d\n", len);
+ return len;
+}
+
+/* hci_tty_write Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : packet data from BT application
+ * @size : Size of the packet data
+ * @offset :
+ * Returns Size of packet on success
+ * else suitable error code
+ */
+ssize_t hci_tty_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct ti_st *hst = file->private_data;
+ struct sk_buff *skb;
+
+ pr_debug("inside %s (%p, %p, %u, %p)\n",
+ __func__, file, data, size, offset);
+
+ if (!hst->st_write) {
+ pr_err(" Can't write to ST, hhci_tty->st_write null ?");
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ /* Validate Created SKB */
+ if (NULL == skb) {
+ pr_err("Error aaloacting SKB");
+ return -ENOMEM;
+ }
+
+ /* Forward the data from the user space to ST core */
+ if (copy_from_user(skb_put(skb, size), data, size)) {
+ pr_err(" Unable to copy from user space");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ pr_debug("start data..");
+ print_hex_dump(KERN_INFO, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, size, 0);
+ pr_debug("\n..end data");
+#endif
+
+ hst->st_write(skb);
+ return size;
+}
+
+/** hci_tty_ioctl Function
+ * This will peform the functions as directed by the command and command
+ * argument.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @cmd : IOCTL Command
+ * @arg : Command argument for IOCTL command
+ * Returns 0 on success
+ * else suitable error code
+ */
+static long hci_tty_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sk_buff *skb = NULL;
+ int retCode = 0;
+ struct ti_st *hst;
+
+ pr_debug("inside %s (%p, %u, %lx)", __func__, file, cmd, arg);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (0 == cmd)) {
+ pr_err("invalid input parameters passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ switch (cmd) {
+ case FIONREAD:
+ /* Deque the SKB from the head if rx_list is not empty
+ * update the argument with skb->len to provide amount of data
+ * available in the available SKB +1 for the PKT_TYPE
+ * field not provided in data by TI-ST.
+ */
+ skb = skb_dequeue(&hst->rx_list);
+ if (skb != NULL) {
+ *(unsigned int *)arg = skb->len + 1;
+ /* Re-Store the SKB for furtur Read operations */
+ skb_queue_head(&hst->rx_list, skb);
+ } else {
+ *(unsigned int *)arg = 0;
+ }
+ pr_debug("returning %d\n", *(unsigned int *)arg);
+ break;
+ default:
+ pr_debug("Un-Identified IOCTL %d", cmd);
+ retCode = 0;
+ break;
+ }
+
+ return retCode;
+}
+
+/** hci_tty_poll Function
+ * This function will wait till some data is received to the hci_tty driver from ST
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @wait : POLL wait information
+ * Returns status of POLL on success
+ * else suitable error code
+ */
+static unsigned int hci_tty_poll(struct file *file, poll_table *wait)
+{
+ struct ti_st *hst = file->private_data;
+ unsigned long mask = 0;
+
+ pr_debug("@ %s\n", __func__);
+
+ /* wait to be completed by st_receive */
+ poll_wait(file, &hst->data_q, wait);
+ pr_debug("poll broke\n");
+
+ if (!skb_queue_empty(&hst->rx_list)) {
+ pr_debug("rx list que !empty\n");
+ mask |= POLLIN; /* TODO: check app for mask */
+ }
+
+ return mask;
+}
+
+/* BT Char driver function pointers
+ * These functions are called from USER space by pefroming File Operations
+ * on /dev/hci_tty node exposed by this driver during init
+ */
+const struct file_operations hci_tty_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = hci_tty_open,
+ .read = hci_tty_read,
+ .write = hci_tty_write,
+ .unlocked_ioctl = hci_tty_ioctl,
+ .poll = hci_tty_poll,
+ .release = hci_tty_release,
+};
+
+/*********Functions called during insmod and delmod****************************/
+
+static int hci_tty_major; /* major number */
+static struct class *hci_tty_class; /* class during class_create */
+static struct device *hci_tty_dev; /* dev during device_create */
+/** hci_tty_init Function
+ * This function Initializes the hci_tty driver parametes and exposes
+ * /dev/hci_tty node to user space
+ *
+ * Parameters : NULL
+ * Returns 0 on success
+ * else suitable error code
+ */
+static int __init hci_tty_init(void)
+{
+ pr_info("inside %s\n", __func__);
+
+ /* Expose the device DEVICE_NAME to user space
+ * And obtain the major number for the device
+ */
+ hci_tty_major = register_chrdev(0, DEVICE_NAME, \
+ &hci_tty_chrdev_ops);
+ if (0 > hci_tty_major) {
+ pr_err("Error when registering to char dev");
+ return hci_tty_major;
+ }
+
+ /* udev */
+ hci_tty_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(hci_tty_class)) {
+ pr_err("Something went wrong in class_create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ return -1;
+ }
+
+ hci_tty_dev =
+ device_create(hci_tty_class, NULL, MKDEV(hci_tty_major, 0),
+ NULL, DEVICE_NAME);
+ if (IS_ERR(hci_tty_dev)) {
+ pr_err("Error in device create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ class_destroy(hci_tty_class);
+ return -1;
+ }
+ pr_info("allocated %d, %d\n", hci_tty_major, 0);
+ return 0;
+}
+
+/** hci_tty_exit Function
+ * This function Destroys the hci_tty driver parametes and /dev/hci_tty node
+ *
+ * Parameters : NULL
+ * Returns NULL
+ */
+static void __exit hci_tty_exit(void)
+{
+ pr_info("inside %s\n", __func__);
+ pr_info("bye.. freeing up %d\n", hci_tty_major);
+
+ device_destroy(hci_tty_class, MKDEV(hci_tty_major, 0));
+ class_destroy(hci_tty_class);
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+}
+
+module_init(hci_tty_init);
+module_exit(hci_tty_exit);
+
+MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 00000000000000..509822c81e97b7
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,166 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ struct uid_stat *entry;
+
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static int tcp_snd_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int tcp_rcv_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ unsigned int bytes;
+ char *p = page;
+ struct uid_stat *uid_entry = (struct uid_stat *) data;
+ if (!data)
+ return 0;
+
+ bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN);
+ p += sprintf(p, "%u\n", bytes);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ struct uid_stat *new_uid;
+ /* Create the uid stat struct and append it to the list. */
+ new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+ if (!new_uid)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ list_add_tail(&new_uid->link, &uid_list);
+ return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+ char uid_s[32];
+ struct proc_dir_entry *entry;
+ sprintf(uid_s, "%d", new_uid->uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc,
+ (void *) new_uid);
+
+ create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc,
+ (void *) new_uid);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+ struct uid_stat *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&uid_lock, flags);
+ entry = find_uid_stat(uid);
+ if (entry) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ entry = create_stat(uid);
+ spin_unlock_irqrestore(&uid_lock, flags);
+ if (entry)
+ create_stat_proc(entry);
+ return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c
new file mode 100644
index 00000000000000..f5b95152948b2a
--- /dev/null
+++ b/drivers/misc/wl127x-rfkill.c
@@ -0,0 +1,121 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/wl127x-rfkill.h>
+
+static int wl127x_rfkill_set_power(void *data, enum rfkill_state state)
+{
+ int nshutdown_gpio = (int) data;
+
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ gpio_set_value(nshutdown_gpio, 1);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_set_value(nshutdown_gpio, 0);
+ break;
+ default:
+ printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int wl127x_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+ enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */
+
+ rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio");
+ if (unlikely(rc))
+ return rc;
+
+ rc = gpio_direction_output(pdata->nshutdown_gpio, 0);
+ if (unlikely(rc))
+ return rc;
+
+ rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state);
+ wl127x_rfkill_set_power(NULL, default_state);
+
+ pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (unlikely(!pdata->rfkill))
+ return -ENOMEM;
+
+ pdata->rfkill->name = "wl127x";
+ pdata->rfkill->state = default_state;
+ /* userspace cannot take exclusive control */
+ pdata->rfkill->user_claim_unsupported = 1;
+ pdata->rfkill->user_claim = 0;
+ pdata->rfkill->data = (void *) pdata->nshutdown_gpio;
+ pdata->rfkill->toggle_radio = wl127x_rfkill_set_power;
+
+ rc = rfkill_register(pdata->rfkill);
+
+ if (unlikely(rc))
+ rfkill_free(pdata->rfkill);
+
+ return 0;
+}
+
+static int wl127x_rfkill_remove(struct platform_device *pdev)
+{
+ struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data;
+
+ rfkill_unregister(pdata->rfkill);
+ rfkill_free(pdata->rfkill);
+ gpio_free(pdata->nshutdown_gpio);
+
+ return 0;
+}
+
+static struct platform_driver wl127x_rfkill_platform_driver = {
+ .probe = wl127x_rfkill_probe,
+ .remove = wl127x_rfkill_remove,
+ .driver = {
+ .name = "wl127x-rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wl127x_rfkill_init(void)
+{
+ return platform_driver_register(&wl127x_rfkill_platform_driver);
+}
+
+static void __exit wl127x_rfkill_exit(void)
+{
+ platform_driver_unregister(&wl127x_rfkill_platform_driver);
+}
+
+module_init(wl127x_rfkill_init);
+module_exit(wl127x_rfkill_exit);
+
+MODULE_ALIAS("platform:wl127x");
+MODULE_DESCRIPTION("wl127x-rfkill");
+MODULE_AUTHOR("Motorola");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 00000000000000..def7e75e1d5797
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,148 @@
+/*******************************************************************************
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This implements all the API for managing HW timestamp & PTP.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "stmmac_ptp.h"
+
+static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+ writel(data, ioaddr + PTP_TCR);
+}
+
+static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + PTP_TCR);
+ unsigned long data;
+
+ /* Convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where, ptp_clock = 50MHz.
+ */
+ data = (1000000000ULL / 50000000);
+
+ /* 0.465ns accuracy */
+ if (value & PTP_TCR_TSCTRLSSR)
+ data = (data * 100) / 465;
+
+ writel(data, ioaddr + PTP_SSIR);
+}
+
+static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+ int limit;
+ u32 value;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(nsec, ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSINIT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time initialize to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
+{
+ u32 value;
+ int limit;
+
+ writel(addend, ioaddr + PTP_TAR);
+ /* issue command to update the addend value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSADDREG;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present addend update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub)
+{
+ u32 value;
+ int limit;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
+ ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSUPDT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time adjust/update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u64 stmmac_get_systime(void __iomem *ioaddr)
+{
+ u64 ns;
+
+ ns = readl(ioaddr + PTP_STNSR);
+ /* convert sec time value to nanosecond */
+ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+ return ns;
+}
+
+const struct stmmac_hwtimestamp stmmac_ptp = {
+ .config_hw_tstamping = stmmac_config_hw_tstamping,
+ .init_systime = stmmac_init_systime,
+ .config_sub_second_increment = stmmac_config_sub_second_increment,
+ .config_addend = stmmac_config_addend,
+ .adjust_systime = stmmac_adjust_systime,
+ .get_systime = stmmac_get_systime,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 00000000000000..7680581ebe12fe
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,211 @@
+/*******************************************************************************
+ PTP 1588 clock using the STMMAC.
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+*******************************************************************************/
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+/**
+ * stmmac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 diff, addend;
+ int neg_adj = 0;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ addend = priv->default_addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+ addend = neg_adj ? (addend - diff) : (addend + diff);
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->config_addend(priv->ioaddr, addend);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 sec, nsec;
+ u32 quotient, reminder;
+ int neg_adj = 0;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u64 ns;
+ u32 reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = priv->hw->ptp->get_systime(priv->ioaddr);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
+ ts->tv_nsec = reminder;
+
+ return 0;
+}
+
+/**
+ * stmmac_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int stmmac_set_time(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+static int stmmac_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac_ptp_clock",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime = stmmac_get_time,
+ .settime = stmmac_set_time,
+ .enable = stmmac_enable,
+};
+
+/**
+ * stmmac_ptp_register
+ * @priv: driver private structure
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+int stmmac_ptp_register(struct stmmac_priv *priv)
+{
+ spin_lock_init(&priv->ptp_lock);
+ priv->ptp_clock_ops = stmmac_ptp_clock_ops;
+
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ priv->device);
+ if (IS_ERR(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
+ } else
+ pr_debug("Added PTP HW clock successfully on %s\n",
+ priv->dev->name);
+
+ return 0;
+}
+
+/**
+ * stmmac_ptp_unregister
+ * @priv: driver private structure
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void stmmac_ptp_unregister(struct stmmac_priv *priv)
+{
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->dev->name);
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 00000000000000..3dbc047622fa8b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ PTP Header file
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+******************************************************************************/
+
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
+
+#define STMMAC_SYSCLOCK 62500000
+
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x0700 /* Timestamp Control Reg */
+#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
+#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
+#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
+#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
+#define PTP_TSR 0x0728 /* Timestamp Status */
+
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+
+/* PTP TCR defines */
+#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
+/* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSTRIG 0x00000010
+#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
+#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
+/* Timestamp Digital or Binary Rollover Control */
+#define PTP_TCR_TSCTRLSSR 0x00000200
+
+/* Enable PTP packet Processing for Version 2 Format */
+#define PTP_TCR_TSVER2ENA 0x00000400
+/* Enable Processing of PTP over Ethernet Frames */
+#define PTP_TCR_TSIPENA 0x00000800
+/* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define PTP_TCR_TSIPV6ENA 0x00001000
+/* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define PTP_TCR_TSIPV4ENA 0x00002000
+/* Enable Timestamp Snapshot for Event Messages */
+#define PTP_TCR_TSEVNTENA 0x00004000
+/* Enable Snapshot for Messages Relevant to Master */
+#define PTP_TCR_TSMSTRENA 0x00008000
+/* Select PTP packets for Taking Snapshots */
+#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+/* Enable MAC address for PTP Frame Filtering */
+#define PTP_TCR_TSENMACADDR 0x00040000
+
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/phy/broadcom_br.c b/drivers/net/phy/broadcom_br.c
new file mode 100644
index 00000000000000..5ef8e33098c37b
--- /dev/null
+++ b/drivers/net/phy/broadcom_br.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2013 Parrot SA (Matthieu CASTET)
+ *
+ * Inspired by code from generic phy
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+static int br_config_init(struct phy_device *phydev)
+{
+ u32 features;
+
+ features = SUPPORTED_MII;
+
+ /* When autoneg is enabled, rx clock is not generated
+ and therefore is not usable. So disable autoneg.
+ */
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ features |= SUPPORTED_100baseT_Full;
+ features |= SUPPORTED_10baseT_Full;
+
+ phydev->supported = features;
+ phydev->advertising = features;
+
+ /* set slave 100Mbits 1 pair */
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ /*
+ * PHY "optimization"
+ * Brodcoam's soup (ported from 89810A2_script_v2_2)
+ *
+ */
+ /* reset */
+ phy_write(phydev, MII_BMCR, 0x8000);
+
+ phy_write(phydev, 0x17, 0x0F93);
+ phy_write(phydev, 0x15, 0x107F);
+ phy_write(phydev, 0x17, 0x0F90);
+ phy_write(phydev, 0x15, 0x0001);
+ phy_write(phydev, MII_BMCR, 0x3000);
+ phy_write(phydev, MII_BMCR, 0x0200);
+
+ phy_write(phydev, 0x18, 0x0C00);
+
+ phy_write(phydev, 0x17, 0x0F90);
+ phy_write(phydev, 0x15, 0x0000);
+ phy_write(phydev, MII_BMCR, 0x0100);
+
+ phy_write(phydev, 0x17, 0x0001);
+ phy_write(phydev, 0x15, 0x0027);
+
+ phy_write(phydev, 0x17, 0x000E);
+ phy_write(phydev, 0x15, 0x9B52);
+
+ phy_write(phydev, 0x17, 0x000F);
+ phy_write(phydev, 0x15, 0xA04D);
+
+ phy_write(phydev, 0x17, 0x0F90);
+ phy_write(phydev, 0x15, 0x0001);
+
+ phy_write(phydev, 0x17, 0x0F92);
+ phy_write(phydev, 0x15, 0x9225);
+
+ phy_write(phydev, 0x17, 0x000A);
+ phy_write(phydev, 0x15, 0x0323);
+
+ phy_write(phydev, 0x17, 0x0FFD);
+ phy_write(phydev, 0x15, 0x1C3F);
+
+ phy_write(phydev, 0x17, 0x0FFE);
+ phy_write(phydev, 0x15, 0x1C3F);
+
+ phy_write(phydev, 0x17, 0x0F99);
+ phy_write(phydev, 0x15, 0x7180);
+
+ phy_write(phydev, 0x17, 0x0F9A);
+ phy_write(phydev, 0x15, 0x34C0);
+
+ phy_write(phydev, 0x17, 0x0F0E);
+ phy_write(phydev, 0x15, 0x0000);
+ phy_write(phydev, 0x17, 0x0F9F);
+ phy_write(phydev, 0x15, 0x0000);
+ phy_write(phydev, 0x18, 0xF1E7);
+
+
+ phy_write(phydev, MII_BMCR, 1<<9);
+ pr_info("forced slave 100Mbits 1 pair\n");
+
+ return 0;
+}
+
+static int br_setup_forced(struct phy_device *phydev)
+{
+ int err;
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ phydev->pause = phydev->asym_pause = 0;
+
+ if (SPEED_100 == phydev->speed)
+ ctl |= 0x8 << 6;
+ else
+ ctl &= ~(0x8 << 6);
+
+ /*if (DUPLEX_FULL == phydev->duplex)
+ ctl |= BMCR_FULLDPLX;*/
+ /* XXX how to set pair and master/slave ? */
+
+ err = phy_write(phydev, MII_BMCR, ctl);
+
+ return err;
+}
+
+static inline u32 ethtool_adv_to_brmii_adv_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_10baseT_Half)
+ result |= 1<<1;
+ if (ethadv & ADVERTISED_10baseT_Full)
+ result |= 1<<1;
+ if (ethadv & ADVERTISED_100baseT_Half)
+ result |= 1<<5;
+ if (ethadv & ADVERTISED_100baseT_Full)
+ result |= 1<<5;
+ if (ethadv & ADVERTISED_Pause)
+ result |= 1<<14;
+ if (ethadv & ADVERTISED_Asym_Pause)
+ result |= 1<<15;
+
+ return result;
+}
+
+static int br_config_advert(struct phy_device *phydev)
+{
+ u32 advertise;
+ int oldadv, adv;
+ int err, changed = 0;
+
+ /* Only allow advertising what
+ * this PHY supports */
+ phydev->advertising &= phydev->supported;
+ advertise = phydev->advertising;
+
+ /* Setup standard advertisement */
+ oldadv = adv = phy_read(phydev, MII_ADVERTISE);
+
+ if (adv < 0)
+ return adv;
+
+ adv &= ~(0xffff);
+ adv |= ethtool_adv_to_brmii_adv_t(advertise);
+
+ if (adv != oldadv) {
+ err = phy_write(phydev, MII_ADVERTISE, adv);
+
+ if (err < 0)
+ return err;
+ changed = 1;
+ }
+
+ return changed;
+}
+
+static int br_config_aneg(struct phy_device *phydev)
+{
+ int result;
+
+ if (AUTONEG_ENABLE != phydev->autoneg)
+ return br_setup_forced(phydev);
+
+ result = br_config_advert(phydev);
+
+ if (result < 0) /* error */
+ return result;
+
+ if (result == 0) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated? */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ result = 1; /* do restart aneg */
+ }
+
+ /* Only restart aneg if we are advertising something different
+ * than we were before. */
+ if (result > 0)
+ result = genphy_restart_aneg(phydev);
+
+ return result;
+}
+
+static int br_read_status(struct phy_device *phydev)
+{
+ int adv;
+ int err;
+ int lpa;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+
+ lpa = phy_read(phydev, 0x7);
+
+ if (lpa < 0)
+ return lpa;
+
+ adv = phy_read(phydev, MII_ADVERTISE);
+
+ if (adv < 0)
+ return adv;
+
+ lpa &= adv;
+
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL; /* ???*/
+ phydev->pause = phydev->asym_pause = 0;
+
+ if (lpa & (1<<5)) {
+ phydev->speed = SPEED_100;
+
+ }
+
+ if (phydev->duplex == DUPLEX_FULL){
+ phydev->pause = lpa & (1<<14) ? 1 : 0;
+ phydev->asym_pause = lpa & (1<<15) ? 1 : 0;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ phydev->duplex = DUPLEX_FULL;
+
+ if ((bmcr & (0xf << 6)) == 0x8 << 6)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ phydev->pause = phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+
+static struct phy_driver br_drivers = {
+ .phy_id = 0x03625cc2,
+ /* XXX check mask */
+ .phy_id_mask = 0xfffffff0,
+ .name = "BR PHY",
+ .config_init = br_config_init,
+ .features = 0,
+ .config_aneg = br_config_aneg,
+ .read_status = br_read_status,
+ .update_link = genphy_update_link,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = {.owner= THIS_MODULE, },
+};
+
+static int __init broadcom_init(void)
+{
+ return phy_driver_register(&br_drivers);
+}
+
+static void __exit broadcom_exit(void)
+{
+ phy_driver_unregister(&br_drivers);
+}
+
+module_init(broadcom_init);
+module_exit(broadcom_exit);
+
+static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ { 0x03625cc2, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/fakephy.c b/drivers/net/phy/fakephy.c
new file mode 100644
index 00000000000000..e6116550a8ff67
--- /dev/null
+++ b/drivers/net/phy/fakephy.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2013 Parrot SA (Jimmy Perchet)
+ *
+ * Inspired by code from generic phy
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+static int fp_config_init(struct phy_device *phydev)
+{
+ u32 features;
+
+ printk(KERN_INFO "fakephy config init\n");
+ features = SUPPORTED_MII;
+
+ /* When autoneg is enabled, rx clock is not generated
+ and therefore is not usable. So disable autoneg.
+ */
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ features |= SUPPORTED_100baseT_Full;
+
+ phydev->supported = features;
+ phydev->advertising = features;
+
+ /* set 100Mbits*/
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+
+static int fp_setup_forced(struct phy_device *phydev)
+{
+ phydev->pause = phydev->asym_pause = 0;
+
+ return 0;
+}
+
+static int fp_config_advert(struct phy_device *phydev)
+{
+ return 0;
+}
+static int fp_config_aneg(struct phy_device *phydev)
+{
+ fp_setup_forced(phydev);
+ return 0;
+}
+
+static int fp_update_link(struct phy_device *phydev)
+{
+ phydev->link = 1;
+ return 0;
+}
+
+static int fp_read_status(struct phy_device *phydev)
+{
+ phydev->link = 1;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->pause = phydev->asym_pause = 0;
+ return 0;
+}
+
+static int fp_suspend(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int fp_resume(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static struct phy_driver fp_drivers = {
+ .phy_id = 0xFF003521,
+ .phy_id_mask = 0xfffffff0,
+ .name = "FAKE PHY",
+ .config_init = fp_config_init,
+ .features = 0,
+ .config_aneg = fp_config_aneg,
+ .read_status = fp_read_status,
+ .update_link = fp_update_link,
+ .suspend = fp_suspend,
+ .resume = fp_resume,
+ .driver = {.owner= THIS_MODULE, },
+};
+
+static int __init fakephy_init(void)
+{
+ return phy_driver_register(&fp_drivers);
+}
+
+static void __exit fakephy_exit(void)
+{
+ phy_driver_unregister(&fp_drivers);
+}
+
+module_init(fakephy_init);
+module_exit(fakephy_exit);
+
+static struct mdio_device_id __maybe_unused fakephy_tbl[] = {
+ { 0xFF003521, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, fakephy_tbl);
+MODULE_LICENSE("GPL");
+
+
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644
index 00000000000000..a5d3d634fd9a5d
--- /dev/null
+++ b/drivers/net/ppp/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT 0x80
+#define L2TP_LENGTH_BIT 0x40
+#define L2TP_SEQUENCE_BIT 0x08
+#define L2TP_OFFSET_BIT 0x02
+#define L2TP_VERSION 0x02
+#define L2TP_VERSION_MASK 0x0F
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+union unaligned {
+ __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+ return (union unaligned *)ptr;
+}
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+ struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ __u8 bits;
+ __u8 *ptr;
+
+ /* Drop the packet if L2TP header is missing. */
+ if (skb->len < sizeof(struct udphdr) + 6)
+ goto drop;
+
+ /* Put it back if it is a control packet. */
+ if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+ return opt->backlog_rcv(sk_udp, skb);
+
+ /* Skip UDP header. */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ /* Check the version. */
+ if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+ goto drop;
+ bits = skb->data[0];
+ ptr = &skb->data[2];
+
+ /* Check the length if it is present. */
+ if (bits & L2TP_LENGTH_BIT) {
+ if ((ptr[0] << 8 | ptr[1]) != skb->len)
+ goto drop;
+ ptr += 2;
+ }
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+ (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+ (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+ goto drop;
+
+ /* Skip the offset padding if it is present. */
+ if (bits & L2TP_OFFSET_BIT &&
+ !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+ goto drop;
+
+ /* Check the tunnel and the session. */
+ if (unaligned(ptr)->u32 != opt->local)
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (bits & L2TP_SEQUENCE_BIT) {
+ meta->sequence = ptr[4] << 8 | ptr[5];
+ if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+ if (bits & L2TP_SEQUENCE_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s16 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = (__u16)(meta->sequence + 1);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+ sock_hold(sk_udp);
+ sk_receive_skb(sk_udp, skb, 0);
+ return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_udp = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_udp = (struct sock *)chan->private;
+ struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+
+ /* Install L2TP header. */
+ if (atomic_read(&opt->sequencing)) {
+ skb_push(skb, 10);
+ skb->data[0] = L2TP_SEQUENCE_BIT;
+ skb->data[6] = opt->xmit_sequence >> 8;
+ skb->data[7] = opt->xmit_sequence;
+ skb->data[8] = 0;
+ skb->data[9] = 0;
+ opt->xmit_sequence++;
+ } else {
+ skb_push(skb, 6);
+ skb->data[0] = 0;
+ }
+ skb->data[1] = L2TP_VERSION;
+ unaligned(&skb->data[2])->u32 = opt->remote;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_udp);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+ .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+ struct socket *sock_udp = NULL;
+ struct sock *sk_udp;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppolac) ||
+ !addr->local.tunnel || !addr->local.session ||
+ !addr->remote.tunnel || !addr->remote.session) {
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_udp = sockfd_lookup(addr->udp_socket, &error);
+ if (!sock_udp)
+ goto out;
+ sk_udp = sock_udp->sk;
+ lock_sock(sk_udp);
+
+ /* Remove this check when IPv6 supports UDP encapsulation. */
+ error = -EAFNOSUPPORT;
+ if (sk_udp->sk_family != AF_INET)
+ goto out;
+ error = -EPROTONOSUPPORT;
+ if (sk_udp->sk_protocol != IPPROTO_UDP)
+ goto out;
+ error = -EDESTADDRREQ;
+ if (sk_udp->sk_state != TCP_ESTABLISHED)
+ goto out;
+ error = -EBUSY;
+ if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+ goto out;
+ if (!sk_udp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_udp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ po->chan.hdrlen = 12;
+ po->chan.private = sk_udp;
+ po->chan.ops = &pppolac_channel_ops;
+ po->chan.mtu = PPP_MRU - 80;
+ po->proto.lac.local = unaligned(&addr->local)->u32;
+ po->proto.lac.remote = unaligned(&addr->remote)->u32;
+ atomic_set(&po->proto.lac.sequencing, 1);
+ po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+ sk_udp->sk_backlog_rcv = pppolac_recv_core;
+ sk_udp->sk_user_data = sk;
+out:
+ if (sock_udp) {
+ release_sock(sk_udp);
+ if (error)
+ sockfd_put(sock_udp);
+ }
+ release_sock(sk);
+ return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_udp);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ udp_sk(sk_udp)->encap_type = 0;
+ udp_sk(sk_udp)->encap_rcv = NULL;
+ sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+ sk_udp->sk_user_data = NULL;
+ release_sock(sk_udp);
+ sockfd_put(sk_udp->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+ .name = "PPPOLAC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppolac_release,
+ .bind = sock_no_bind,
+ .connect = pppolac_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppolac_proto_ops;
+ sk->sk_protocol = PX_PROTO_OLAC;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+ .create = pppolac_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+ int error;
+
+ error = proto_register(&pppolac_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+ if (error)
+ proto_unregister(&pppolac_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OLAC);
+ proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644
index 00000000000000..6016d29c0660c5
--- /dev/null
+++ b/drivers/net/ppp/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE 8
+
+#define PPTP_GRE_BITS htons(0x2001)
+#define PPTP_GRE_BITS_MASK htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT htons(0x1000)
+#define PPTP_GRE_ACK_BIT htons(0x0080)
+#define PPTP_GRE_TYPE htons(0x880B)
+
+#define PPP_ADDR 0xFF
+#define PPP_CTRL 0x03
+
+struct header {
+ __u16 bits;
+ __u16 type;
+ __u16 length;
+ __u16 call;
+ __u32 sequence;
+} __attribute__((packed));
+
+struct meta {
+ __u32 sequence;
+ __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+ return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+ struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+ struct meta *meta = skb_meta(skb);
+ __u32 now = jiffies;
+ struct header *hdr;
+
+ /* Skip transport header */
+ skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+ /* Drop the packet if GRE header is missing. */
+ if (skb->len < GRE_HEADER_SIZE)
+ goto drop;
+ hdr = (struct header *)skb->data;
+
+ /* Check the header. */
+ if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+ (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+ goto drop;
+
+ /* Skip all fields including optional ones. */
+ if (!skb_pull(skb, GRE_HEADER_SIZE +
+ (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+ (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+ goto drop;
+
+ /* Check the length. */
+ if (skb->len != ntohs(hdr->length))
+ goto drop;
+
+ /* Check the sequence if it is present. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ meta->sequence = ntohl(hdr->sequence);
+ if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+ goto drop;
+ }
+
+ /* Skip PPP address and control if they are present. */
+ if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+ skb->data[1] == PPP_CTRL)
+ skb_pull(skb, 2);
+
+ /* Fix PPP protocol if it is compressed. */
+ if (skb->len >= 1 && skb->data[0] & 1)
+ skb_push(skb, 1)[0] = 0;
+
+ /* Drop the packet if PPP protocol is missing. */
+ if (skb->len < 2)
+ goto drop;
+
+ /* Perform reordering if sequencing is enabled. */
+ if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+ struct sk_buff *skb1;
+
+ /* Insert the packet into receive queue in order. */
+ skb_set_owner_r(skb, sk);
+ skb_queue_walk(&sk->sk_receive_queue, skb1) {
+ struct meta *meta1 = skb_meta(skb1);
+ __s32 order = meta->sequence - meta1->sequence;
+ if (order == 0)
+ goto drop;
+ if (order < 0) {
+ meta->timestamp = meta1->timestamp;
+ skb_insert(skb1, skb, &sk->sk_receive_queue);
+ skb = NULL;
+ break;
+ }
+ }
+ if (skb) {
+ meta->timestamp = now;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+
+ /* Remove packets from receive queue as long as
+ * 1. the receive buffer is full,
+ * 2. they are queued longer than one second, or
+ * 3. there are no missing packets before them. */
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+ meta = skb_meta(skb);
+ if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ now - meta->timestamp < HZ &&
+ meta->sequence != opt->recv_sequence)
+ break;
+ skb_unlink(skb, &sk->sk_receive_queue);
+ opt->recv_sequence = meta->sequence + 1;
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ }
+ return NET_RX_SUCCESS;
+ }
+
+ /* Flush receive queue if sequencing is disabled. */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_orphan(skb);
+ ppp_input(&pppox_sk(sk)->chan, skb);
+ return NET_RX_SUCCESS;
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+ sock_hold(sk_raw);
+ sk_receive_skb(sk_raw, skb, 0);
+ }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+ mm_segment_t old_fs = get_fs();
+ struct sk_buff *skb;
+
+ set_fs(KERNEL_DS);
+ while ((skb = skb_dequeue(&delivery_queue))) {
+ struct sock *sk_raw = skb->sk;
+ struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)&iov,
+ .msg_iovlen = 1,
+ .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+ };
+ sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+ kfree_skb(skb);
+ }
+ set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk_raw = (struct sock *)chan->private;
+ struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+ struct header *hdr;
+ __u16 length;
+
+ /* Install PPP address and control. */
+ skb_push(skb, 2);
+ skb->data[0] = PPP_ADDR;
+ skb->data[1] = PPP_CTRL;
+ length = skb->len;
+
+ /* Install PPTP GRE header. */
+ hdr = (struct header *)skb_push(skb, 12);
+ hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+ hdr->type = PPTP_GRE_TYPE;
+ hdr->length = htons(length);
+ hdr->call = opt->remote;
+ hdr->sequence = htonl(opt->xmit_sequence);
+ opt->xmit_sequence++;
+
+ /* Now send the packet via the delivery queue. */
+ skb_set_owner_w(skb, sk_raw);
+ skb_queue_tail(&delivery_queue, skb);
+ schedule_work(&delivery_work);
+ return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+ .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+ struct sockaddr_storage ss;
+ struct socket *sock_tcp = NULL;
+ struct socket *sock_raw = NULL;
+ struct sock *sk_tcp;
+ struct sock *sk_raw;
+ int error;
+
+ if (addrlen != sizeof(struct sockaddr_pppopns))
+ return -EINVAL;
+
+ lock_sock(sk);
+ error = -EALREADY;
+ if (sk->sk_state != PPPOX_NONE)
+ goto out;
+
+ sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+ if (!sock_tcp)
+ goto out;
+ sk_tcp = sock_tcp->sk;
+ error = -EPROTONOSUPPORT;
+ if (sk_tcp->sk_protocol != IPPROTO_TCP)
+ goto out;
+ addrlen = sizeof(struct sockaddr_storage);
+ error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+ if (error)
+ goto out;
+ if (!sk_tcp->sk_bound_dev_if) {
+ struct dst_entry *dst = sk_dst_get(sk_tcp);
+ error = -ENODEV;
+ if (!dst)
+ goto out;
+ sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+ dst_release(dst);
+ }
+
+ error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+ if (error)
+ goto out;
+ sk_raw = sock_raw->sk;
+ sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+ error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+ if (error)
+ goto out;
+
+ po->chan.hdrlen = 14;
+ po->chan.private = sk_raw;
+ po->chan.ops = &pppopns_channel_ops;
+ po->chan.mtu = PPP_MRU - 80;
+ po->proto.pns.local = addr->local;
+ po->proto.pns.remote = addr->remote;
+ po->proto.pns.data_ready = sk_raw->sk_data_ready;
+ po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto out;
+
+ sk->sk_state = PPPOX_CONNECTED;
+ lock_sock(sk_raw);
+ sk_raw->sk_data_ready = pppopns_recv;
+ sk_raw->sk_backlog_rcv = pppopns_recv_core;
+ sk_raw->sk_user_data = sk;
+ release_sock(sk_raw);
+out:
+ if (sock_tcp)
+ sockfd_put(sock_tcp);
+ if (error && sock_raw)
+ sock_release(sock_raw);
+ release_sock(sk);
+ return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ if (sk->sk_state != PPPOX_NONE) {
+ struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+ lock_sock(sk_raw);
+ skb_queue_purge(&sk->sk_receive_queue);
+ pppox_unbind_sock(sk);
+ sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+ sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+ sk_raw->sk_user_data = NULL;
+ release_sock(sk_raw);
+ sock_release(sk_raw->sk_socket);
+ }
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+ .name = "PPPOPNS",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+ .family = PF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppopns_release,
+ .bind = sock_no_bind,
+ .connect = pppopns_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = sock_no_poll,
+ .ioctl = pppox_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+ if (!sk)
+ return -ENOMEM;
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppopns_proto_ops;
+ sk->sk_protocol = PX_PROTO_OPNS;
+ sk->sk_state = PPPOX_NONE;
+ return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+ .create = pppopns_create,
+ .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+ int error;
+
+ error = proto_register(&pppopns_proto, 0);
+ if (error)
+ return error;
+
+ error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+ if (error)
+ proto_unregister(&pppopns_proto);
+ else
+ skb_queue_head_init(&delivery_queue);
+ return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OPNS);
+ proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig
new file mode 100644
index 00000000000000..231ae187f40447
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Kconfig
@@ -0,0 +1,47 @@
+config BCMDHD
+ tristate "Broadcom 4329/30 wireless cards support"
+ depends on MMC
+ ---help---
+ This module adds support for wireless adapters based on
+ Broadcom 4329/30 chipset.
+
+ This driver uses the kernel's wireless extensions subsystem.
+
+ If you choose to build a module, it'll be called dhd. Say M if
+ unsure.
+
+config BCMDHD_FW_PATH
+ depends on BCMDHD
+ string "Firmware path"
+ default "/system/etc/firmware/fw_bcmdhd.bin"
+ ---help---
+ Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+ depends on BCMDHD
+ string "NVRAM path"
+ default "/system/etc/wifi/bcmdhd.cal"
+ ---help---
+ Path to the calibration file.
+
+config BCMDHD_WEXT
+ bool "Enable WEXT support"
+ depends on BCMDHD && CFG80211 = n
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ help
+ Enables WEXT support
+
+config DHD_USE_STATIC_BUF
+ bool "Enable memory preallocation"
+ depends on BCMDHD
+ default n
+ ---help---
+ Use memory preallocated in platform
+
+config DHD_USE_SCHED_SCAN
+ bool "Use CFG80211 sched scan"
+ depends on BCMDHD && CFG80211
+ default n
+ ---help---
+ Use CFG80211 sched scan
diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile
new file mode 100644
index 00000000000000..7925661ce62ba5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/Makefile
@@ -0,0 +1,39 @@
+# bcmdhd
+# -DDHDTHREAD -DDHD_GPL -DDHD_SCHED -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
+# -DDHD_BCMEVENTS -DSHOW_EVENTS -DDONGLEOVERLAYS -DBCMDBG \
+# -DCUSTOMER_HW2 -DOOB_INTR_ONLY -DHW_OOB \
+# -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
+# -DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
+# -DKEEP_ALIVE -DCSCAN -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \
+# -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
+
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
+ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
+ -DDHD_BCMEVENTS -DSHOW_EVENTS -DPROP_TXSTATUS -DBCMDBG \
+ -DCUSTOMER_HW2 -DOOB_INTR_ONLY -DHW_OOB \
+ -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \
+ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
+ -DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \
+ -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
+ -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include
+
+DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \
+ dhd_linux_sched.o dhd_sdio.o bcmwifi_channels.o bcmevent.o hndpmu.o \
+ bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o linux_osl.o \
+ bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o
+
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
+ifneq ($(CONFIG_WIRELESS_EXT),)
+bcmdhd-objs += wl_iw.o
+DHDCFLAGS += -DSOFTAP -DUSE_IW
+endif
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o
+DHDCFLAGS += -DWL_CFG80211 -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+endif
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(CONFIG_BCMDHD),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c
new file mode 100644
index 00000000000000..f1db5a2b935128
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/aiutils.c
@@ -0,0 +1,836 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c 321247 2012-03-14 21:14:33Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+#define BCM47162_DMP() (0)
+#define BCM5357_DMP() (0)
+#define remap_coreid(sih, coreid) (coreid)
+#define remap_corerev(sih, corerev) (corerev)
+
+
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+
+ while (TRUE) {
+ ent = R_REG(si_osh(sih), *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+ uint32 *sizel, uint32 *sizeh)
+{
+ uint32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *)regs;
+ uint32 erombase, *eromptr, *eromlim;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+
+ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ eromptr = (uint32 *)(uintptr)erombase;
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+ regs, erombase, eromptr, eromlim));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+ ai_hwfixup(sii);
+ return;
+ }
+
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+ "nsw = %d, nmp = %d & nsp = %d\n",
+ mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
+#else
+ BCM_REFERENCE(crev);
+#endif
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ sii->oob_router = addrl;
+ }
+ }
+ if (cid != GMAC_COMMON_4706_CORE_ID)
+ continue;
+ }
+
+ idx = sii->numcores;
+
+ sii->cia[idx] = cia;
+ sii->cib[idx] = cib;
+ sii->coreid[idx] = remap_coreid(sih, cid);
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else
+ if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("First Slave ASD for core 0x%04x malformed "
+ "(0x%08x)\n", cid, asd));
+ goto error;
+ }
+ }
+ sii->coresba[idx] = addrl;
+ sii->coresba_size[idx] = sizel;
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+ sii->coresba2[idx] = addrl;
+ sii->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+
+ if (asd == 0)
+ break;
+ j++;
+ } while (1);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ goto error;
+ }
+ }
+
+
+ for (i = 0; i < nmw; i++) {
+ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0)
+ sii->wrapba[idx] = addrl;
+ }
+
+
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->wrapba[idx] = addrl;
+ }
+
+
+
+ if (br)
+ continue;
+
+
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+ sii->numcores = 0;
+ return;
+}
+
+
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 addr, wrap;
+ void *regs;
+
+ if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+ return (NULL);
+
+ addr = sii->coresba[coreidx];
+ wrap = sii->wrapba[coreidx];
+
+
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ sii->curmap = regs = sii->regs[coreidx];
+ if (!sii->wrappers[coreidx]) {
+ sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->wrappers[coreidx]));
+ }
+ sii->curwrap = sii->wrappers[coreidx];
+ break;
+
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sii->curmap = regs = (void *)((uintptr)addr);
+ sii->curwrap = (void *)((uintptr)wrap);
+ break;
+
+ case PCMCIA_BUS:
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ sii->curmap = regs;
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = NULL;
+ uint32 erombase, *eromptr, *eromlim;
+ uint i, j, cidx;
+ uint32 cia, cib, nmp, nsp;
+ uint32 asd, addrl, addrh, sizel, sizeh;
+
+ for (i = 0; i < sii->numcores; i++) {
+ if (sii->coreid[i] == CC_CORE_ID) {
+ cc = (chipcregs_t *)sii->regs[i];
+ break;
+ }
+ }
+ if (cc == NULL)
+ goto error;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ cidx = sii->curidx;
+ cia = sii->cia[cidx];
+ cib = sii->cib[cidx];
+
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+
+ while (eromptr < eromlim) {
+ if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+ (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+ break;
+ }
+ }
+
+
+ for (i = 0; i < nmp; i++)
+ get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+ &sizel, &sizeh);
+ }
+
+ j = 1;
+ do {
+ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ j++;
+ } while (asd != 0);
+
+
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0)
+ break;
+
+ if (!asidx--) {
+ *addr = addrl;
+ *size = sizel;
+ return;
+ }
+ j++;
+ } while (1);
+
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n", i));
+ break;
+ }
+ }
+
+error:
+ *size = 0;
+ return;
+}
+
+
+int
+ai_numaddrspaces(si_t *sih)
+{
+ return 2;
+}
+
+
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba[cidx];
+ else if (asidx == 1)
+ return sii->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
+}
+
+uint
+ai_flag(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+ return sii->curidx;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+ return sii->curidx;
+ }
+ ai = sii->curwrap;
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 *map = (uint32 *) sii->curwrap;
+
+ if (mask || val) {
+ uint32 w = R_REG(sii->osh, map+(offset/4));
+ w &= ~mask;
+ w |= val;
+ W_REG(sii->osh, map+(offset/4), val);
+ }
+
+ return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cia;
+
+ sii = SI_INFO(sih);
+ cia = sii->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ uint32 cib;
+
+ sii = SI_INFO(sih);
+ cib = sii->cib[sii->curidx];
+ return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+
+ fast = TRUE;
+
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+
+ origidx = si_coreidx(&sii->pub);
+
+
+ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ uint32 status;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+ return;
+
+
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+ if (status != 0) {
+
+
+ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+
+
+ }
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+
+ ai_core_disable(sih, (bits | resetbits));
+
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+
+ W_REG(sii->osh, &ai->resetctrl, 0);
+ dummy = R_REG(sii->osh, &ai->resetctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __FUNCTION__));
+ return;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __FUNCTION__));
+ return 0;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
+ __FUNCTION__));
+ return 0;
+ }
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c
new file mode 100644
index 00000000000000..4cc398c76cf775
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmevent.c
@@ -0,0 +1,148 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c 326276 2012-04-06 23:16:42Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+#if WLC_E_LAST != 94
+#error "You need to add an entry to bcmevent_names[] for the new event"
+#endif
+
+const bcmevent_name_t bcmevent_names[] = {
+ { WLC_E_SET_SSID, "SET_SSID" },
+ { WLC_E_JOIN, "JOIN" },
+ { WLC_E_START, "START" },
+ { WLC_E_AUTH, "AUTH" },
+ { WLC_E_AUTH_IND, "AUTH_IND" },
+ { WLC_E_DEAUTH, "DEAUTH" },
+ { WLC_E_DEAUTH_IND, "DEAUTH_IND" },
+ { WLC_E_ASSOC, "ASSOC" },
+ { WLC_E_ASSOC_IND, "ASSOC_IND" },
+ { WLC_E_REASSOC, "REASSOC" },
+ { WLC_E_REASSOC_IND, "REASSOC_IND" },
+ { WLC_E_DISASSOC, "DISASSOC" },
+ { WLC_E_DISASSOC_IND, "DISASSOC_IND" },
+ { WLC_E_QUIET_START, "START_QUIET" },
+ { WLC_E_QUIET_END, "END_QUIET" },
+ { WLC_E_BEACON_RX, "BEACON_RX" },
+ { WLC_E_LINK, "LINK" },
+ { WLC_E_MIC_ERROR, "MIC_ERROR" },
+ { WLC_E_NDIS_LINK, "NDIS_LINK" },
+ { WLC_E_ROAM, "ROAM" },
+ { WLC_E_TXFAIL, "TXFAIL" },
+ { WLC_E_PMKID_CACHE, "PMKID_CACHE" },
+ { WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
+ { WLC_E_PRUNE, "PRUNE" },
+ { WLC_E_AUTOAUTH, "AUTOAUTH" },
+ { WLC_E_EAPOL_MSG, "EAPOL_MSG" },
+ { WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_ADDTS_IND, "ADDTS_IND" },
+ { WLC_E_DELTS_IND, "DELTS_IND" },
+ { WLC_E_BCNSENT_IND, "BCNSENT_IND" },
+ { WLC_E_BCNRX_MSG, "BCNRX_MSG" },
+ { WLC_E_BCNLOST_MSG, "BCNLOST_IND" },
+ { WLC_E_ROAM_PREP, "ROAM_PREP" },
+ { WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" },
+ { WLC_E_PFN_NET_LOST, "PFNLOST_IND" },
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+ { WLC_E_IBSS_ASSOC, "IBSS_ASSOC" },
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+ { WLC_E_RADIO, "RADIO" },
+ { WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+ { WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" },
+ { WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
+ { WLC_E_PSK_SUP, "PSK_SUP" },
+ { WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" },
+ { WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
+ { WLC_E_ICV_ERROR, "ICV_ERROR" },
+ { WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
+ { WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
+ { WLC_E_TRACE, "TRACE" },
+#ifdef WLBTAMP
+ { WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" },
+#endif
+ { WLC_E_IF, "IF" },
+#ifdef WLP2P
+ { WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" },
+#endif
+ { WLC_E_RSSI, "RSSI" },
+ { WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" },
+ { WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" },
+#ifdef WIFI_ACT_FRAME
+ { WLC_E_ACTION_FRAME, "ACTION_FRAME" },
+ { WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" },
+ { WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+#endif
+#if defined(NDISVER) && (NDISVER >= 0x0620)
+ { WLC_E_PRE_ASSOC_IND, "ASSOC_RECV" },
+ { WLC_E_PRE_REASSOC_IND, "REASSOC_RECV" },
+ { WLC_E_CHANNEL_ADOPTED, "CHANNEL_ADOPTED" },
+ { WLC_E_AP_STARTED, "AP_STARTED" },
+ { WLC_E_DFS_AP_STOP, "DFS_AP_STOP" },
+ { WLC_E_DFS_AP_RESUME, "DFS_AP_RESUME" },
+ { WLC_E_ASSOC_IND_NDIS, "ASSOC_IND_NDIS"},
+ { WLC_E_REASSOC_IND_NDIS, "REASSOC_IND_NDIS"},
+ { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
+ { WLC_E_AUTH_REQ, "WLC_E_AUTH_REQ" },
+#endif /* NDISVER && NDISVER >= 0x0620 */
+#ifdef BCMWAPI_WAI
+ { WLC_E_WAI_STA_EVENT, "WAI_STA_EVENT" },
+ { WLC_E_WAI_MSG, "WAI_MSG" },
+#endif /* BCMWAPI_WAI */
+ { WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" },
+ { WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" },
+#ifdef WLP2P
+ { WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" },
+ { WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" },
+#endif
+#ifdef PROP_TXSTATUS
+ { WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" },
+#endif
+ { WLC_E_WAKE_EVENT, "WAKE_EVENT" },
+ { WLC_E_DCS_REQUEST, "DCS_REQUEST" },
+ { WLC_E_RM_COMPLETE, "RM_COMPLETE" },
+#ifdef WLMEDIA_HTSF
+ { WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" },
+#endif
+ { WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" },
+ { WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND"},
+ { WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" },
+ { WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" },
+ { WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" },
+#ifdef SOFTAP
+ { WLC_E_GTK_PLUMBED, "GTK_PLUMBED" },
+#endif
+ { WLC_E_ASSOC_REQ_IE, "ASSOC_REQ_IE" },
+ { WLC_E_ASSOC_RESP_IE, "ASSOC_RESP_IE" },
+ { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
+#ifdef WLTDLS
+ { WLC_E_TDLS_PEER_EVENT, "TDLS_PEER_EVENT" },
+#endif /* WLTDLS */
+};
+
+const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c
new file mode 100644
index 00000000000000..6e1a6b0e2144db
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh.c
@@ -0,0 +1,726 @@
+/*
+ * BCMSDH interface glue
+ * implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c 300445 2011-12-03 05:37:20Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h> /* common SDIO/controller interface */
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+};
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+ sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+{
+ bcmsdh_info_t *bcmsdh;
+
+ if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+ BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
+ if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
+ bcmsdh_detach(osh, bcmsdh);
+ return NULL;
+ }
+
+ bcmsdh->osh = osh;
+ bcmsdh->init_success = TRUE;
+
+ *regsva = (uint32 *)SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ bcmsdh->sbwad = SI_ENUM_BASE;
+ return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bcmsdh != NULL) {
+ if (bcmsdh->sdioh) {
+ sdioh_detach(osh, bcmsdh->sdioh);
+ bcmsdh->sdioh = NULL;
+ }
+ MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+ }
+
+ l_bcmsdh = NULL;
+ return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ bool on;
+
+ ASSERT(bcmsdh);
+ status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+ if (SDIOH_API_SUCCESS(status))
+ return FALSE;
+ else
+ return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ ASSERT(bcmsdh);
+
+ status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ ASSERT(sdh);
+ return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+ ASSERT(sdh);
+
+ /* don't support yet */
+ return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+ uint8 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int32 retry = 0;
+#endif
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ OSL_DELAY(1000);
+#endif
+ status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+ if (err)
+ *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 data = 0;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+ fnc_num, addr, data));
+
+ return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+ addr, &data, 4);
+
+ if (err)
+ *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+ addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ uint8 *tmp_buf, *tmp_ptr;
+ uint8 *ptr;
+ bool ascii = func & ~0xf;
+ func &= 0x7;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+ ASSERT(cis);
+ ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+ status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+ if (ascii) {
+ /* Move binary bits to tmp and format them into the provided buffer. */
+ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+ BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ bcopy(cis, tmp_buf, length);
+ for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+ ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff);
+ if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+ ptr += sprintf((char *)ptr, "\n");
+ }
+ MFREE(bcmsdh->osh, tmp_buf, length);
+ }
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+ int err = 0;
+ uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (bar0 != bcmsdh->sbwad || force_set) {
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+ if (!err)
+ bcmsdh->sbwad = bar0;
+ else
+ /* invalidate cached window var */
+ bcmsdh->sbwad = 0;
+
+ }
+
+ return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint32 word = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+ return 0xFFFFFFFF;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+ /* if ok, return appropriately masked word */
+ if (SDIOH_API_SUCCESS(status)) {
+ switch (size) {
+ case sizeof(uint8):
+ return (word & 0xff);
+ case sizeof(uint16):
+ return (word & 0xffff);
+ case sizeof(uint32):
+ return word;
+ default:
+ bcmsdh->regfail = TRUE;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+ return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ int err = 0;
+
+ BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ __FUNCTION__, addr, size*8, data));
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ ASSERT(bcmsdh->init_success);
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+ if (SDIOH_API_SUCCESS(status))
+ return 0;
+
+ BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ __FUNCTION__, data, addr, size));
+ return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+ return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+ ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+ return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+ ASSERT(sdh);
+ return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+ return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+ if (!bcmsdh)
+ bcmsdh = l_bcmsdh;
+
+ return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+ return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_sleep(sd, enab);
+#else
+ return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+ bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+ sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+ return sdioh_gpioout(sd, gpio, enab);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
new file mode 100644
index 00000000000000..79760eb2423668
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c
@@ -0,0 +1,742 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c 312788 2012-02-03 23:06:32Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+
+#if defined(OOB_INTR_ONLY)
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif /* defined(OOB_INTR_ONLY) */
+
+/**
+ * SDIO Host Controller info
+ */
+typedef struct bcmsdh_hc bcmsdh_hc_t;
+
+struct bcmsdh_hc {
+ bcmsdh_hc_t *next;
+#ifdef BCMPLATFORM_BUS
+ struct device *dev; /* platform device handle */
+#else
+ struct pci_dev *dev; /* pci device handle */
+#endif /* BCMPLATFORM_BUS */
+ osl_t *osh;
+ void *regs; /* SDIO Host Controller address */
+ bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
+ void *ch;
+ unsigned int oob_irq;
+ unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
+ bool oob_irq_registered;
+ bool oob_irq_enable_flag;
+#if defined(OOB_INTR_ONLY)
+ spinlock_t irq_lock;
+#endif
+};
+static bcmsdh_hc_t *sdhcinfo = NULL;
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL};
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+ /* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+ /* Check for Arasan host controller */
+ if (vendor == VENDOR_SI_IMAGE) {
+ return (TRUE);
+ }
+ /* Check for BRCM 27XX Standard host controller */
+ if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for BRCM Standard host controller */
+ if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ return (TRUE);
+ }
+ /* Check for TI PCIxx21 Standard host controller */
+ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+ return (TRUE);
+ }
+ /* Ricoh R5C822 Standard SDIO Host */
+ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+ return (TRUE);
+ }
+ /* JMicron Standard SDIO Host */
+ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+ /* This is the PciSpiHost. */
+ if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+ printf("Found PCI SPI Host Controller\n");
+ return (TRUE);
+ }
+
+#endif /* BCMSDIOH_SPI */
+
+ return (FALSE);
+}
+
+#if defined(BCMPLATFORM_BUS)
+#if defined(BCMLXSDMMC)
+/* forward declarations */
+int bcmsdh_probe(struct device *dev);
+int bcmsdh_remove(struct device *dev);
+
+EXPORT_SYMBOL(bcmsdh_probe);
+EXPORT_SYMBOL(bcmsdh_remove);
+
+#else
+/* forward declarations */
+static int __devinit bcmsdh_probe(struct device *dev);
+static int __devexit bcmsdh_remove(struct device *dev);
+#endif /* BCMLXSDMMC */
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_probe(struct device *dev)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs = 0;
+ bcmsdh_info_t *sdh = NULL;
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ struct platform_device *pdev;
+ struct resource *r;
+#endif /* BCMLXSDMMC */
+ int irq = 0;
+ uint32 vendevid;
+ unsigned long irq_flags = 0;
+
+#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
+ pdev = to_platform_device(dev);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ return -ENXIO;
+#endif /* BCMLXSDMMC */
+
+#if defined(OOB_INTR_ONLY)
+#ifdef HW_OOB
+ irq_flags =
+ IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
+#else
+ irq_flags = IRQF_TRIGGER_FALLING;
+#endif /* HW_OOB */
+
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ irq = dhd_customer_oob_irq_map(&irq_flags);
+ if (irq < 0) {
+ SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
+ return 1;
+ }
+#endif /* defined(OOB_INTR_ONLY) */
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = (void *)dev;
+
+#ifdef BCMLXSDMMC
+ if (!(sdh = bcmsdh_attach(osh, (void *)0,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#else
+ if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
+ (void **)&regs, irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+#endif /* BCMLXSDMMC */
+ sdhc->sdh = sdh;
+ sdhc->oob_irq = irq;
+ sdhc->oob_flags = irq_flags;
+ sdhc->oob_irq_registered = FALSE; /* to make sure.. */
+ sdhc->oob_irq_enable_flag = FALSE;
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&sdhc->irq_lock);
+#endif
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ /* Read the vendor/device ID from the CIS */
+ vendevid = bcmsdh_query_device(sdh);
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
+ (vendevid & 0xFFFF), 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+#ifndef BCMLXSDMMC
+static
+#endif /* BCMLXSDMMC */
+int bcmsdh_remove(struct device *dev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ sdhc = sdhcinfo;
+ drvinfo.detach(sdhc->ch);
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == (void *)dev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc) {
+ SDLX_MSG(("%s: failed\n", __FUNCTION__));
+ return 0;
+ }
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+
+#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
+ dev_set_drvdata(dev, NULL);
+#endif /* !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) */
+
+ return 0;
+}
+
+#else /* BCMPLATFORM_BUS */
+
+#if !defined(BCMLXSDMMC)
+/* forward declarations for PCI probe and remove functions. */
+static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
+
+/**
+ * pci id table
+ */
+static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
+ { vendor: PCI_ANY_ID,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: 0,
+ class_mask: 0,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
+
+/**
+ * SDIO Host Controller pci driver info
+ */
+static struct pci_driver bcmsdh_pci_driver = {
+ node: {},
+ name: "bcmsdh",
+ id_table: bcmsdh_pci_devid,
+ probe: bcmsdh_pci_probe,
+ remove: bcmsdh_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: NULL,
+ resume: NULL,
+ };
+
+
+extern uint sd_pci_slot; /* Force detection to a particular PCI */
+ /* slot only . Allows for having multiple */
+ /* WL devices at once in a PC */
+ /* Only one instance of dhd will be */
+ /* usable at a time */
+ /* Upper word is bus number, */
+ /* lower word is slot number */
+ /* Default value of 0xffffffff turns this */
+ /* off */
+module_param(sd_pci_slot, uint, 0);
+
+
+/**
+ * Detect supported SDIO Host Controller and attach if found.
+ *
+ * Determine if the device described by pdev is a supported SDIO Host
+ * Controller. If so, attach to it and attach to the target device.
+ */
+static int __devinit
+bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ osl_t *osh = NULL;
+ bcmsdh_hc_t *sdhc = NULL;
+ ulong regs;
+ bcmsdh_info_t *sdh = NULL;
+ int rc;
+
+ if (sd_pci_slot != 0xFFFFffff) {
+ if (pdev->bus->number != (sd_pci_slot>>16) ||
+ PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Found compatible SDIOHC"
+ :"Probing unknown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
+ pdev->device));
+ return -ENODEV;
+ }
+ SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
+ __FUNCTION__,
+ bcmsdh_chipmatch(pdev->vendor, pdev->device)
+ ?"Using compatible SDIOHC"
+ :"WARNING, forced use of unkown device",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
+ }
+
+ if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
+ (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
+ uint32 config_reg;
+
+ SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
+
+ /*
+ * Set MMC_SD_DIS bit in FlashMedia Controller.
+ * Disbling the SD/MMC Controller in the FlashMedia Controller
+ * allows the Standard SD Host Controller to take over control
+ * of the SD Slot.
+ */
+ config_reg |= 0x02;
+ OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
+ osl_detach(osh);
+ }
+ /* match this pci device with what we support */
+ /* we can't solely rely on this to believe it is our SDIO Host Controller! */
+ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
+ return -ENODEV;
+ }
+
+ /* this is a pci device we might support */
+ SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
+ __FUNCTION__,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), pdev->irq));
+
+ /* use bcmsdh_query_device() to get the vendor ID of the target device so
+ * it will eventually appear in the Broadcom string on the console
+ */
+
+ /* allocate SDIO Host Controller state info */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
+ SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
+ __FUNCTION__,
+ MALLOCED(osh)));
+ goto err;
+ }
+ bzero(sdhc, sizeof(bcmsdh_hc_t));
+ sdhc->osh = osh;
+
+ sdhc->dev = pdev;
+
+ /* map to address where host can access */
+ pci_set_master(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
+ goto err;
+ }
+ if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
+ (void **)&regs, pdev->irq))) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ sdhc->sdh = sdh;
+
+ /* try to attach to the target device */
+ if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
+ bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
+ (void *)regs, NULL, sdh))) {
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+
+ return 0;
+
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->sdh)
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ }
+ if (osh)
+ osl_detach(osh);
+ return -ENODEV;
+}
+
+
+/**
+ * Detach from target devices and SDIO Host Controller
+ */
+static void __devexit
+bcmsdh_pci_remove(struct pci_dev *pdev)
+{
+ bcmsdh_hc_t *sdhc, *prev;
+ osl_t *osh;
+
+ /* find the SDIO Host Controller state for this pdev and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == pdev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc)
+ return;
+
+ drvinfo.detach(sdhc->ch);
+
+ bcmsdh_detach(sdhc->osh, sdhc->sdh);
+
+ /* release SDIO Host Controller info */
+ osh = sdhc->osh;
+ MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
+ osl_detach(osh);
+}
+#endif /* BCMLXSDMMC */
+#endif /* BCMPLATFORM_BUS */
+
+extern int sdio_function_init(void);
+
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+ return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+ sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+ int error = 0;
+
+ drvinfo = *driver;
+
+#if defined(BCMPLATFORM_BUS)
+ SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+ error = sdio_function_init();
+ return error;
+#endif /* defined(BCMPLATFORM_BUS) */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&bcmsdh_pci_driver)))
+ return 0;
+#else
+ if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
+ return 0;
+#endif
+
+ SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#endif /* BCMPLATFORM_BUS */
+
+ return error;
+}
+
+extern void sdio_function_cleanup(void);
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (bcmsdh_pci_driver.node.next)
+#endif
+
+#if defined(BCMLXSDMMC)
+ sdio_function_cleanup();
+#endif /* BCMLXSDMMC */
+
+#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
+ pci_unregister_driver(&bcmsdh_pci_driver);
+#endif /* BCMPLATFORM_BUS */
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bool enable)
+{
+ static bool curstate = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
+ if (curstate != enable) {
+ if (enable)
+ enable_irq(sdhcinfo->oob_irq);
+ else
+ disable_irq_nosync(sdhcinfo->oob_irq);
+ curstate = enable;
+ }
+ spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+ dhd_pub_t *dhdp;
+
+ dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
+
+ bcmsdh_oob_intr_set(0);
+
+ if (dhdp == NULL) {
+ SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
+ return IRQ_HANDLED;
+ }
+
+ dhdsdio_isr((void *)dhdp->bus);
+
+ return IRQ_HANDLED;
+}
+
+int bcmsdh_register_oob_intr(void * dhdp)
+{
+ int error = 0;
+
+ SDLX_MSG(("%s Enter \n", __FUNCTION__));
+
+ /* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
+
+ dev_set_drvdata(sdhcinfo->dev, dhdp);
+
+ if (!sdhcinfo->oob_irq_registered) {
+ SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__,
+ (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
+ /* Refer to customer Host IRQ docs about proper irqflags definition */
+ error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
+ "bcmsdh_sdmmc", NULL);
+ if (error)
+ return -ENODEV;
+
+ enable_irq_wake(sdhcinfo->oob_irq);
+ sdhcinfo->oob_irq_registered = TRUE;
+ sdhcinfo->oob_irq_enable_flag = TRUE;
+ }
+
+ return 0;
+}
+
+void bcmsdh_set_irq(int flag)
+{
+ if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) {
+ SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+ sdhcinfo->oob_irq_enable_flag = flag;
+ if (flag) {
+ enable_irq(sdhcinfo->oob_irq);
+ enable_irq_wake(sdhcinfo->oob_irq);
+ } else {
+ disable_irq_wake(sdhcinfo->oob_irq);
+ disable_irq(sdhcinfo->oob_irq);
+ }
+ }
+}
+
+void bcmsdh_unregister_oob_intr(void)
+{
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+
+ if (sdhcinfo->oob_irq_registered == TRUE) {
+ bcmsdh_set_irq(FALSE);
+ free_irq(sdhcinfo->oob_irq, NULL);
+ sdhcinfo->oob_irq_registered = FALSE;
+ }
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+#if defined(BCMLXSDMMC)
+void *bcmsdh_get_drvdata(void)
+{
+ if (!sdhcinfo)
+ return NULL;
+ return dev_get_drvdata(sdhcinfo->dev);
+}
+#endif
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel; /* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor; /* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok; /* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
new file mode 100644
index 00000000000000..afe4019a160f36
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
@@ -0,0 +1,1432 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 321372 2012-03-15 01:10:32Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <sdioh.h> /* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+extern PBCMSDH_SDMMC_INSTANCE gInstance;
+
+uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 512; /* Default blocksize */
+
+uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK 0x03
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+ int err_ret;
+ uint32 fbraddr;
+ uint8 func;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ /* Get the Card's common CIS address */
+ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+ __FUNCTION__, func, sd->func_cis_ptr[func]));
+ }
+
+ sd->func_cis_ptr[0] = sd->com_cis_ptr;
+ sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+ /* Enable Function 1 */
+ sdio_claim_host(gInstance->func[1]);
+ err_ret = sdio_enable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+ }
+
+ return FALSE;
+}
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+ int err_ret;
+
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (gInstance == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ return NULL;
+ }
+
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (sdioh_sdmmc_osinit(sd) != 0) {
+ sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+ sd->use_rxchain = FALSE;
+
+ gInstance->sd = sd;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ err_ret = sdio_set_block_size(gInstance->func[1], 64);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
+ sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+ return sd;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+
+ if (sd) {
+
+ /* Disable Function 2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_disable_func(gInstance->func[2]);
+ sdio_release_host(gInstance->func[2]);
+
+ /* Disable Function 1 */
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_disable_func(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ gInstance->func[1] = NULL;
+ gInstance->func[2] = NULL;
+
+ /* deregister irq */
+ sdioh_sdmmc_osfree(sd);
+
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Enable F1 and F2 interrupts, set master enable */
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
+
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(gInstance->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(void)
+{
+ uint8 reg;
+ int err;
+
+ if (gInstance->func[0]) {
+ sdio_claim_host(gInstance->func[0]);
+ reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(gInstance->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+
+ sdio_release_host(gInstance->func[0]);
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ if (fn == NULL) {
+ sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+
+ /* register and unmask irq */
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ if (gInstance->func[1]) {
+ sdio_claim_host(gInstance->func[1]);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[1]);
+ }
+#elif defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+ if (gInstance->func[1]) {
+ /* register and unmask irq */
+ sdio_claim_host(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_host(gInstance->func[1]);
+ }
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+ sdio_release_irq(gInstance->func[2]);
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
+ {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+ BCM_REFERENCE(bool_val);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKMODE):
+ int_val = (int32)si->sd_blockmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKMODE):
+ si->sd_blockmode = (bool)int_val;
+ /* Haven't figured out how to make non-block mode with DMA */
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_BLOCKSIZE):
+ {
+ uint func = ((uint32)int_val >> 16);
+ uint blksize = (uint16)int_val;
+ uint maxsize;
+
+ if (func > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ switch (func) {
+ case 0: maxsize = 32; break;
+ case 1: maxsize = BLOCK_SIZE_4318; break;
+ case 2: maxsize = BLOCK_SIZE_4328; break;
+ default: maxsize = 0;
+ }
+ if (blksize > maxsize) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ if (!blksize) {
+ blksize = maxsize;
+ }
+
+ /* Now set it */
+ si->client_block_size[func] = blksize;
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_RXCHAIN):
+ int_val = (int32)si->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ si->use_client_ints = (bool)int_val;
+ if (si->use_client_ints)
+ si->intmask |= CLIENT_INTR;
+ else
+ si->intmask &= ~CLIENT_INTR;
+
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ if (sd_ptr->offset & 1)
+ int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+ else if (sd_ptr->offset & 2)
+ int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+ else
+ int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_HOSTREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+
+ if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+ sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+ (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+ sd_ptr->offset));
+ break;
+ }
+
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = 0;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+ SDIOH_API_RC status;
+ uint8 data;
+
+ if (enable)
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+ else
+ data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+ return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ uint32 scratch, regdata;
+ uint8 *ptr = (uint8 *)&scratch;
+ for (i = 0; i < 3; i++) {
+ if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+ sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+ *ptr++ = (uint8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = ltoh32(scratch);
+ scratch &= 0x0001FFFF;
+ return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 foo;
+ uint8 *cis = cisd;
+
+ sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+ if (!sd->func_cis_ptr[func]) {
+ bzero(cis, length);
+ sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+ for (count = 0; count < length; count++) {
+ offset = sd->func_cis_ptr[func] + count;
+ if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ *cis = (uint8)(foo & 0xff);
+ cis++;
+ }
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int err_ret;
+
+ sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ if(rw) { /* CMD52 Write */
+ if (func == 0) {
+ /* Can only directly write to some F0 registers. Handle F2 enable
+ * as a special case.
+ */
+ if (regaddr == SDIOD_CCCR_IOEN) {
+ if (gInstance->func[2]) {
+ sdio_claim_host(gInstance->func[2]);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ err_ret));
+ }
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(gInstance->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ err_ret));
+ }
+ }
+ sdio_release_host(gInstance->func[2]);
+ }
+ }
+#if defined(MMC_SDIO_ABORT)
+ /* to allow abort command through F1 */
+ else if (regaddr == SDIOD_CCCR_IOABORT) {
+ sdio_claim_host(gInstance->func[func]);
+ /*
+ * this sdio_f0_writeb() can be replaced with another api
+ * depending upon MMC driver change.
+ * As of this time, this is temporaray one
+ */
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+#endif /* MMC_SDIO_ABORT */
+ else if (regaddr < 0xF0) {
+ sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+ } else {
+ /* Claim host controller, perform F0 write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else {
+ /* Claim host controller, perform Fn write, and release */
+ sdio_claim_host(gInstance->func[func]);
+ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(gInstance->func[func]);
+ }
+ } else { /* CMD52 Read */
+ /* Claim host controller, perform Fn read, and release */
+ sdio_claim_host(gInstance->func[func]);
+
+ if (func == 0) {
+ *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+ } else {
+ *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+ }
+
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int err_ret = SDIOH_API_RC_FAIL;
+
+ if (func == 0) {
+ sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+
+ if(rw) { /* CMD52 Write */
+ if (nbytes == 4) {
+ sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+ } else if (nbytes == 2) {
+ sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ } else { /* CMD52 Read */
+ if (nbytes == 4) {
+ *word = sdio_readl(gInstance->func[func], addr, &err_ret);
+ } else if (nbytes == 2) {
+ *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+ } else {
+ sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+ }
+ }
+
+ /* Release host controller */
+ sdio_release_host(gInstance->func[func]);
+
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+ rw ? "Write" : "Read", err_ret));
+ }
+
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint32 SGCount = 0;
+ int err_ret = 0;
+ void *pnext, *pprev;
+ uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
+ uint blk_num;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ ttl_len = xfred_len = 0;
+ /* at least 4 bytes alignment of skb buff is guaranteed */
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
+ ttl_len += PKTLEN(sd->osh, pnext);
+
+ if (!sd->use_rxchain || ttl_len <= sd->client_block_size[func]) {
+ blk_num = 0;
+ dma_len = 0;
+ } else {
+ blk_num = ttl_len / sd->client_block_size[func];
+ dma_len = blk_num * sd->client_block_size[func];
+ }
+ lft_len = ttl_len - dma_len;
+
+ sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
+ __FUNCTION__, write ? "W" : "R",
+ ttl_len, func, addr, blk_num, lft_len));
+
+ if (0 != dma_len) {
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ /* Set up DMA descriptors */
+ pprev = pkt;
+ for (pnext = pkt;
+ pnext && dma_len;
+ pnext = PKTNEXT(sd->osh, pnext)) {
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (dma_len > pkt_len)
+ dma_len -= pkt_len;
+ else {
+ pkt_len = xfred_len = dma_len;
+ dma_len = 0;
+ pkt = pnext;
+ }
+
+ sg_set_buf(&sd->sg_list[SGCount++],
+ (uint8*)PKTDATA(sd->osh, pnext),
+ pkt_len);
+
+ if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
+ sd_err(("%s: sg list entries exceed limit\n",
+ __FUNCTION__));
+ return (SDIOH_API_RC_FAIL);
+ }
+ }
+
+ mmc_dat.sg = sd->sg_list;
+ mmc_dat.sg_len = SGCount;
+ mmc_dat.blksz = sd->client_block_size[func];
+ mmc_dat.blocks = blk_num;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.arg = write ? 1<<31 : 0;
+ mmc_cmd.arg |= (func & 0x7) << 28;
+ mmc_cmd.arg |= 1<<27;
+ mmc_cmd.arg |= fifo ? 0 : 1<<26;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+ mmc_cmd.arg |= blk_num & 0x1FF;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
+ sdio_claim_host(gInstance->func[func]);
+ mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card);
+ mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req);
+ sdio_release_host(gInstance->func[func]);
+
+ err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+ if (0 != err_ret) {
+ sd_err(("%s:CMD53 %s failed with code %d\n",
+ __FUNCTION__,
+ write ? "write" : "read",
+ err_ret));
+ sd_err(("%s:Disabling rxchain and fire it with PIO\n",
+ __FUNCTION__));
+ sd->use_rxchain = FALSE;
+ pkt = pprev;
+ lft_len = ttl_len;
+ } else if (!fifo) {
+ addr = addr + ttl_len - lft_len - dma_len;
+ }
+ }
+
+ /* PIO mode */
+ if (0 != lft_len) {
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
+ xfred_len;
+ pkt_len = PKTLEN(sd->osh, pnext);
+ if (0 != xfred_len) {
+ pkt_len -= xfred_len;
+ xfred_len = 0;
+ }
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+#ifdef CONFIG_MMC_MSM7X00A
+ if ((pkt_len % 64) == 32) {
+ sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+ pkt_len += 32;
+ }
+#endif /* CONFIG_MMC_MSM7X00A */
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(
+ gInstance->func[func],
+ addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(
+ gInstance->func[func],
+ addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(
+ gInstance->func[func],
+ buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(
+ gInstance->func[func],
+ buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ SGCount ++;
+ }
+ sdio_release_host(gInstance->func[func]);
+ }
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC Status;
+ void *mypkt = NULL;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ sd_data(("%s: Creating new %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", buflen_u));
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, buflen_u));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
+ }
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
+ /* Case 2: We have a packet, but it is unaligned. */
+
+ /* In this case, we cannot have a chain. */
+ ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+ sd_data(("%s: Creating aligned %s Packet, len=%d\n",
+ __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#else
+ if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ sd_err(("%s: PKTGET failed: len %d\n",
+ __FUNCTION__, PKTLEN(sd->osh, pkt)));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write) {
+ bcopy(PKTDATA(sd->osh, pkt),
+ PKTDATA(sd->osh, mypkt),
+ PKTLEN(sd->osh, pkt));
+ }
+
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write) {
+ bcopy(PKTDATA(sd->osh, mypkt),
+ PKTDATA(sd->osh, pkt),
+ PKTLEN(sd->osh, mypkt));
+ }
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
+#else
+ PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ } else { /* case 3: We have a packet and it is aligned. */
+ sd_data(("%s: Aligned %s Packet, direct DMA\n",
+ __FUNCTION__, write ? "Tx" : "Rx"));
+ Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+ }
+
+ return (Status);
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+ char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+ /* issue abort cmd52 command through F1 */
+ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+ sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp = 0;
+
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ *data = temp;
+ *data &= 0xff;
+ sd_data(("%s: byte read data=0x%02x\n",
+ __FUNCTION__, *data));
+ } else {
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ sd_data(("%s: word read data=0x%08x\n",
+ __FUNCTION__, *data));
+ }
+
+ return SUCCESS;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ sdio_release_host(gInstance->func[0]);
+
+ if (sd->use_client_ints) {
+ sd->intrcount++;
+ ASSERT(sd->intr_handler);
+ ASSERT(sd->intr_handler_arg);
+ (sd->intr_handler)(sd->intr_handler_arg);
+ } else {
+ sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+ sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+ __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+ }
+
+ sdio_claim_host(gInstance->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+
+ sd = gInstance->sd;
+
+ ASSERT(sd != NULL);
+ BCM_REFERENCE(sd);
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ uint8 temp;
+
+ temp = data & 0xff;
+ sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ sd_data(("%s: byte write data=0x%02x\n",
+ __FUNCTION__, data));
+ } else {
+ if (regsize == 2)
+ data &= 0xffff;
+
+ sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+ sd_data(("%s: word write data=0x%08x\n",
+ __FUNCTION__, data));
+ }
+
+ return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *si, int stage)
+{
+ int ret;
+ sdioh_info_t *sd = gInstance->sd;
+
+ /* Need to do this stages as we can't enable the interrupt till
+ downloading of the firmware is complete, other wise polling
+ sdio access will come in way
+ */
+ if (gInstance->func[0]) {
+ if (stage == 0) {
+ /* Since the power to the chip is killed, we will have
+ re enumerate the device again. Set the block size
+ and enable the fucntion 1 for in preparation for
+ downloading the code
+ */
+ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
+ 2.6.27. The implementation prior to that is buggy, and needs broadcom's
+ patch for it
+ */
+ if ((ret = sdio_reset_comm(gInstance->func[0]->card))) {
+ sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ sd->num_funcs = 2;
+ sd->sd_blockmode = TRUE;
+ sd->use_client_ints = TRUE;
+ sd->client_block_size[0] = 64;
+
+ /* Claim host controller */
+ sdio_claim_host(gInstance->func[1]);
+
+ sd->client_block_size[1] = 64;
+ if (sdio_set_block_size(gInstance->func[1], 64)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ }
+
+ /* Release host controller F1 */
+ sdio_release_host(gInstance->func[1]);
+
+ if (gInstance->func[2]) {
+ /* Claim host controller F2 */
+ sdio_claim_host(gInstance->func[2]);
+
+ sd->client_block_size[2] = sd_f2_blocksize;
+ if (sdio_set_block_size(gInstance->func[2],
+ sd_f2_blocksize)) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+ "blocksize to %d\n", sd_f2_blocksize));
+ }
+
+ /* Release host controller F2 */
+ sdio_release_host(gInstance->func[2]);
+ }
+
+ sdioh_sdmmc_card_enablefuncs(sd);
+ }
+ } else {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_enable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+
+ return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *si)
+{
+ /* MSM7201A Android sdio stack has bug with interrupt
+ So internaly within SDIO stack they are polling
+ which cause issue when device is turned off. So
+ unregister interrupt with SDIO stack to stop the
+ polling
+ */
+ if (gInstance->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+ sdio_claim_host(gInstance->func[0]);
+ sdio_release_irq(gInstance->func[1]);
+ sdio_release_irq(gInstance->func[2]);
+ sdio_release_host(gInstance->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+ sdioh_disable_func_intr();
+#endif
+ bcmsdh_oob_intr_set(FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+ }
+ else
+ sd_err(("%s Failed\n", __FUNCTION__));
+ return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
new file mode 100644
index 00000000000000..1bb1511e319a2c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,387 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 312783 2012-02-03 22:53:56Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <linux/sched.h> /* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239 43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+
+
+#include <bcmsdh_sdmmc.h>
+
+#include <dhd_dbg.h>
+
+#ifdef WL_CFG80211
+extern void wl_cfg80211_set_parent_dev(void *dev);
+#endif
+
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern int dhd_os_check_wakelock(void *dhdp);
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+PBCMSDH_SDMMC_INSTANCE gInstance;
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern int bcmsdh_probe(struct device *dev);
+extern int bcmsdh_remove(struct device *dev);
+extern volatile bool dhd_mmc_suspend;
+
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ static struct sdio_func sdio_func_0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_trace(("sdio_device: 0x%04x\n", func->device));
+ sd_trace(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 1) {
+ sdio_func_0.num = 0;
+ sdio_func_0.card = func->card;
+ gInstance->func[0] = &sdio_func_0;
+ if(func->device == 0x4) { /* 4318 */
+ gInstance->func[2] = NULL;
+ sd_trace(("NIC found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+ }
+
+ gInstance->func[func->num] = func;
+
+ if (func->num == 2) {
+#ifdef WL_CFG80211
+ wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+ sd_trace(("F2 found, calling bcmsdh_probe...\n"));
+ ret = bcmsdh_probe(&func->dev);
+ }
+
+ return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling bcmsdh_remove...\n"));
+ bcmsdh_remove(&func->dev);
+ } else if (func->num == 1) {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ gInstance->func[1] = NULL;
+ }
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ if (func->num != 2)
+ return 0;
+
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+ if (dhd_os_check_wakelock(bcmsdh_get_drvdata()))
+ return -EBUSY;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(0);
+#endif /* defined(OOB_INTR_ONLY) */
+ dhd_mmc_suspend = TRUE;
+ smp_mb();
+
+ return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+#if defined(OOB_INTR_ONLY)
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+#endif
+ sd_trace(("%s Enter\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+ if ((func->num == 2) && dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+
+ smp_mb();
+ return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+ .suspend = bcmsdh_sdmmc_suspend,
+ .resume = bcmsdh_sdmmc_resume,
+};
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ if (notify_semaphore)
+ up(notify_semaphore);
+ return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .name = "dummy_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+ };
+
+int sdio_func_reg_notify(void* semaphore)
+{
+ notify_semaphore = semaphore;
+ return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+ sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+ .probe = bcmsdh_sdmmc_probe,
+ .remove = bcmsdh_sdmmc_remove,
+ .name = "bcmsdh_sdmmc",
+ .id_table = bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+ .drv = {
+ .pm = &bcmsdh_sdmmc_pm_ops,
+ },
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+ };
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+};
+
+
+int
+sdioh_sdmmc_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ return BCME_OK;
+}
+
+void
+sdioh_sdmmc_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+#if !defined(OOB_INTR_ONLY)
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+#endif /* !defined(OOB_INTR_ONLY) */
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable) {
+ sdioh_sdmmc_devintr_on(sd);
+ } else {
+ sdioh_sdmmc_devintr_off(sd);
+ }
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+ int error = 0;
+ sdio_function_init();
+ return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+ sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int sdio_function_init(void)
+{
+ int error = 0;
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+
+ gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
+ if (!gInstance)
+ return -ENOMEM;
+
+ error = sdio_register_driver(&bcmsdh_sdmmc_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+extern int bcmsdh_remove(struct device *dev);
+void sdio_function_cleanup(void)
+{
+ sd_trace(("%s Enter\n", __FUNCTION__));
+
+
+ sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+
+ if (gInstance)
+ kfree(gInstance);
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c
new file mode 100644
index 00000000000000..0d92efc6a62a43
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmutils.c
@@ -0,0 +1,2093 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c 312855 2012-02-04 02:01:18Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+void *_bcmutils_dummy_fn = NULL;
+
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(buf, PKTDATA(osh, p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+ int len;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p)) {
+ len = PKTLEN(osh, p);
+ total += len;
+ }
+
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p))
+ cnt++;
+
+ return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+ uint cnt;
+ uint8 *pktdata;
+ uint len, remain, align64;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+ cnt++;
+ len = PKTLEN(osh, p);
+ if (len > 128) {
+ pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */
+ /* Check for page boundary straddle (2048B) */
+ if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+ cnt++;
+
+ align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */
+ align64 = (64 - align64) & 0x3f;
+ len -= align64; /* bytes from aligned 64B to end */
+ /* if aligned to 128B, check for MOD 128 between 1 to 4B */
+ remain = len % 128;
+ if (remain > 0 && remain <= 4)
+ cnt++; /* add extra seg */
+ }
+ }
+
+ return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktoffset(osl_t *osh, void *p, uint offset)
+{
+ uint total = pkttotlen(osh, p);
+ uint pkt_off = 0, len = 0;
+ uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+ if (offset > total)
+ return NULL;
+
+ for (; p; p = PKTNEXT(osh, p)) {
+ pdata = (uint8 *) PKTDATA(osh, p);
+ pkt_off = offset - len;
+ len += PKTLEN(osh, p);
+ if (len > offset)
+ break;
+ }
+ return (uint8*) (pdata+pkt_off);
+}
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if (prev_p == NULL)
+ return NULL;
+
+ if ((p = PKTLINK(prev_p)) == NULL)
+ return NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(prev_p, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (prec < pq->num_prec)
+ pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+ int prec;
+
+ /* Optimize flush, if pktq len = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->len == 0) {
+ return;
+ }
+
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir, fn, arg);
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ {
+ return NULL;
+ }
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+#endif /* BCMDRIVER */
+
+const unsigned char bcm_ctype[] = {
+
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+ ulong result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus)
+ return (ulong)-1;
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong)(-(long)result);
+
+ if (endp)
+ *endp = DISCARD_QUAL(cp, char);
+
+ return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+ int len, nlen;
+ int i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return DISCARD_QUAL(haystack, char);
+
+ nlen = strlen(needle);
+ len = strlen(haystack) - nlen + 1;
+
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return DISCARD_QUAL(&haystack[i], char);
+ return (NULL);
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
+
+ return (dest);
+}
+
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char*)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char*)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char*)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
+ }
+}
+
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+ int i = 0;
+ char *ep;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ p = ep;
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
+
+ if (abuflen == 0)
+ return 0;
+
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
+
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
+
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char hex[] =
+ {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ const uint8 *octet = ea->octet;
+ char *p = buf;
+ int i;
+
+ for (i = 0; i < 6; i++, octet++) {
+ *p++ = hex[(*octet >> 4) & 0xf];
+ *p++ = hex[*octet & 0xf];
+ *p++ = ':';
+ }
+
+ *(p-1) = '\0';
+
+ return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
+
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
+
+ pktdata = (uint8 *)PKTDATA(NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+ } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ rc |= PKTPRIO_DSCP;
+ }
+
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
+
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#endif /* BCMDRIVER */
+
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
+{
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+static const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+ uint8 *pend;
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ return crc;
+}
+
+#ifdef notdef
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
+ }
+
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (TLV_HDR_LEN + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) &&
+ (totlen >= (len + TLV_HDR_LEN)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) &&
+ (totlen >= (len + TLV_HDR_LEN)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+ return NULL;
+}
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+ int i;
+ char* p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ uint32 bit;
+ const char* name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+#endif
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "AES_OCB_MSDU",
+ "AES_OCB_MPDU",
+ "NALG"
+ "UNDEF",
+ "UNDEF",
+ "UNDEF",
+#ifdef BCMWAPI_WPI
+ "WAPI",
+#endif /* BCMWAPI_WPI */
+ "UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary) break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ /* r == 0 is also the case when strlen(fmt) is zero.
+ * typically the case when "" is passed as argument.
+ */
+ if ((r == -1) || (r >= (int)b->size)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+{
+ int i;
+
+ if (msg != NULL && msg[0] != '\0')
+ bcm_bprintf(b, "%s", msg);
+ for (i = 0; i < len; i ++)
+ bcm_bprintf(b, "%02X", buf[i]);
+ if (newline)
+ bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+ int i;
+ int per_line = 0;
+
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ unsigned int buf_len, n;
+ unsigned int pad = 0;
+
+ dp = varbuf;
+
+ findNewline = FALSE;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = (unsigned int)(dp - varbuf);
+ if (buf_len % 4) {
+ pad = 4 - buf_len % 4;
+ if (pad && (buf_len + pad <= len)) {
+ buf_len += pad;
+ }
+ }
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
new file mode 100644
index 00000000000000..0a570f66af2252
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c
@@ -0,0 +1,936 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif
+
+#ifdef _bcmwifi_c_
+
+#include <bcmwifi.h>
+#else
+#include <bcmwifi_channels.h>
+#endif
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h>
+#endif
+
+#ifndef D11AC_IOTYPES
+
+
+
+
+
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band, *bw, *sb;
+ uint channel;
+
+ band = "";
+ bw = "";
+ sb = "";
+ channel = CHSPEC_CHANNEL(chspec);
+
+ if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+ if (CHSPEC_IS40(chspec)) {
+ if (CHSPEC_SB_UPPER(chspec)) {
+ sb = "u";
+ channel += CH_10MHZ_APART;
+ } else {
+ sb = "l";
+ channel -= CH_10MHZ_APART;
+ }
+ } else if (CHSPEC_IS10(chspec)) {
+ bw = "n";
+ }
+
+
+ snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+ return (buf);
+}
+
+
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ char *endp = NULL;
+ uint channel, band, bw, ctl_sb;
+ char c;
+
+ channel = strtoul(a, &endp, 10);
+
+
+ if (endp == a)
+ return 0;
+
+ if (channel > MAXCHANNEL)
+ return 0;
+
+ band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+ a = endp;
+
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+
+
+ if (c == 'a' || c == 'b') {
+ band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+ a++;
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+ }
+
+
+ if (c == 'n') {
+ bw = WL_CHANSPEC_BW_10;
+ } else if (c == 'l') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+
+ if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+ channel += CH_10MHZ_APART;
+ else
+ return 0;
+ } else if (c == 'u') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+
+ if (channel > CH_20MHZ_APART)
+ channel -= CH_10MHZ_APART;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+
+done:
+ return (channel | band | bw | ctl_sb);
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return TRUE;
+
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return TRUE;
+
+
+ if (CHSPEC_IS20(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return TRUE;
+ } else {
+ if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint8 ctl_chan;
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+
+ ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+
+ ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER);
+
+ ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ }
+
+ return ctl_chan;
+}
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = 0;
+ uint8 channel;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return chspec;
+ } else {
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+ channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+#else
+
+
+
+
+
+
+static const char *wf_chspec_bw_str[] =
+{
+ "5",
+ "10",
+ "20",
+ "40",
+ "80",
+ "160",
+ "80+80",
+ "na"
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+ (sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+ (sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+ (sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+ (sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+
+
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+ uint bw;
+
+ bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+ return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+
+static uint8
+center_chan_to_edge(uint bw)
+{
+
+ return (uint8)(((bw - 20) / 2) / 5);
+}
+
+
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+ return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+ uint lowest = channel_low_edge(center_ch, bw);
+ uint sb;
+
+ if ((ctl_ch - lowest) % 4) {
+
+ return -1;
+ }
+
+ sb = ((ctl_ch - lowest) / 4);
+
+
+ if (sb >= (bw / 20)) {
+
+ return -1;
+ }
+
+ return sb;
+}
+
+
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+ return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+
+static int
+channel_80mhz_to_id(uint ch)
+{
+ uint i;
+ for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+ if (ch == wf_5g_80m_chans[i])
+ return i;
+ }
+
+ return -1;
+}
+
+
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band;
+ uint ctl_chan;
+
+ if (wf_chspec_malformed(chspec))
+ return NULL;
+
+ band = "";
+
+
+ if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+
+ ctl_chan = wf_chspec_ctlchan(chspec);
+
+
+ if (CHSPEC_IS20(chspec)) {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+ } else if (!CHSPEC_IS8080(chspec)) {
+ const char *bw;
+ const char *sb = "";
+
+ bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+
+ if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ }
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+
+ if (CHSPEC_IS40(chspec)) {
+ sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+ } else {
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+ }
+#endif
+
+ } else {
+
+ uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+ uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+
+ chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+ chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+
+ snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+ }
+
+ return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+ unsigned long val;
+ char *endp = NULL;
+
+ val = strtoul(*p, &endp, 10);
+
+ if (endp == *p)
+ return 0;
+
+
+ *p = endp;
+
+ *num = (unsigned int)val;
+
+ return 1;
+}
+
+
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+ chanspec_t chspec;
+ uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+ uint num, ctl_ch;
+ uint ch1, ch2;
+ char c, sb_ul = '\0';
+ int i;
+
+ bw = 20;
+ chspec_sb = 0;
+ chspec_ch = ch1 = ch2 = 0;
+
+
+ if (!read_uint(&a, &num))
+ return 0;
+
+
+ c = tolower(a[0]);
+ if (c == 'g') {
+ a ++;
+
+
+ if (num == 2)
+ chspec_band = WL_CHANSPEC_BAND_2G;
+ else if (num == 5)
+ chspec_band = WL_CHANSPEC_BAND_5G;
+ else
+ return 0;
+
+
+ if (!read_uint(&a, &ctl_ch))
+ return 0;
+
+ c = tolower(a[0]);
+ }
+ else {
+
+ ctl_ch = num;
+ chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ }
+
+ if (c == '\0') {
+
+ chspec_bw = WL_CHANSPEC_BW_20;
+ goto done_read;
+ }
+
+ a ++;
+
+
+ if (c == 'u' || c == 'l') {
+ sb_ul = c;
+ chspec_bw = WL_CHANSPEC_BW_40;
+ goto done_read;
+ }
+
+
+ if (c != '/')
+ return 0;
+
+
+ if (!read_uint(&a, &bw))
+ return 0;
+
+
+ if (bw == 20) {
+ chspec_bw = WL_CHANSPEC_BW_20;
+ } else if (bw == 40) {
+ chspec_bw = WL_CHANSPEC_BW_40;
+ } else if (bw == 80) {
+ chspec_bw = WL_CHANSPEC_BW_80;
+ } else if (bw == 160) {
+ chspec_bw = WL_CHANSPEC_BW_160;
+ } else {
+ return 0;
+ }
+
+
+
+ c = tolower(a[0]);
+
+
+ if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+ if (c == 'u' || c == 'l') {
+ a ++;
+ sb_ul = c;
+ goto done_read;
+ }
+ }
+
+
+ if (c == '+') {
+
+ static const char *plus80 = "80/";
+
+
+ chspec_bw = WL_CHANSPEC_BW_8080;
+
+ a ++;
+
+
+ for (i = 0; i < 3; i++) {
+ if (*a++ != *plus80++) {
+ return 0;
+ }
+ }
+
+
+ if (!read_uint(&a, &ch1))
+ return 0;
+
+
+ if (a[0] != '-')
+ return 0;
+ a ++;
+
+
+ if (!read_uint(&a, &ch2))
+ return 0;
+ }
+
+done_read:
+
+ while (a[0] == ' ') {
+ a ++;
+ }
+
+
+ if (a[0] != '\0')
+ return 0;
+
+
+
+
+ if (sb_ul != '\0') {
+ if (sb_ul == 'l') {
+ chspec_ch = UPPER_20_SB(ctl_ch);
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+ } else if (sb_ul == 'u') {
+ chspec_ch = LOWER_20_SB(ctl_ch);
+ chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+ }
+ }
+
+ else if (chspec_bw == WL_CHANSPEC_BW_20) {
+ chspec_ch = ctl_ch;
+ chspec_sb = 0;
+ }
+
+ else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+
+ const uint8 *center_ch = NULL;
+ int num_ch = 0;
+ int sb = -1;
+
+ if (chspec_bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < num_ch; i ++) {
+ sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+ if (sb >= 0) {
+ chspec_ch = center_ch[i];
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ break;
+ }
+ }
+
+
+ if (sb < 0) {
+ return 0;
+ }
+ }
+
+ else {
+ int ch1_id = 0, ch2_id = 0;
+ int sb;
+
+ ch1_id = channel_80mhz_to_id(ch1);
+ ch2_id = channel_80mhz_to_id(ch2);
+
+
+ if (ch1 >= ch2 || ch1_id < 0 || ch2_id < 0)
+ return 0;
+
+
+ chspec_ch = (((uint16)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+ ((uint16)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+
+
+
+ sb = channel_to_sb(ch1, ctl_ch, bw);
+ if (sb < 0) {
+
+ sb = channel_to_sb(ch2, ctl_ch, bw);
+ if (sb < 0) {
+
+ return 0;
+ }
+
+ sb += 4;
+ }
+
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+ }
+
+ chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+ if (wf_chspec_malformed(chspec))
+ return 0;
+
+ return chspec;
+}
+
+
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+ uint chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+
+ if (CHSPEC_IS2G(chanspec)) {
+
+ if (chspec_bw != WL_CHANSPEC_BW_20 &&
+ chspec_bw != WL_CHANSPEC_BW_40) {
+ return TRUE;
+ }
+ } else if (CHSPEC_IS5G(chanspec)) {
+ if (chspec_bw == WL_CHANSPEC_BW_8080) {
+ uint ch1_id, ch2_id;
+
+
+ ch1_id = CHSPEC_CHAN1(chanspec);
+ ch2_id = CHSPEC_CHAN2(chanspec);
+ if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+ return TRUE;
+
+
+ if (ch2_id <= ch1_id)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+ chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+ if (chspec_ch > MAXCHANNEL) {
+ return TRUE;
+ }
+ } else {
+
+ return TRUE;
+ }
+ } else {
+
+ return TRUE;
+ }
+
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+ if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+ uint chspec_bw = CHSPEC_BW(chanspec);
+ uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+ if (wf_chspec_malformed(chanspec))
+ return FALSE;
+
+ if (CHSPEC_IS2G(chanspec)) {
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (chspec_ch >= 1 && chspec_ch <= 14)
+ return TRUE;
+ } else if (chspec_bw == WL_CHANSPEC_BW_40) {
+ if (chspec_ch >= 3 && chspec_ch <= 11)
+ return TRUE;
+ }
+ } else if (CHSPEC_IS5G(chanspec)) {
+ if (chspec_bw == WL_CHANSPEC_BW_8080) {
+ uint16 ch1, ch2;
+
+ ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+ ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+
+ if (ch2 > ch1 + CH_80MHZ_APART)
+ return TRUE;
+ } else {
+ const uint8 *center_ch;
+ uint num_ch, i;
+
+ if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+ center_ch = wf_5g_40m_chans;
+ num_ch = WF_NUM_5G_40M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ center_ch = wf_5g_80m_chans;
+ num_ch = WF_NUM_5G_80M_CHANS;
+ } else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ center_ch = wf_5g_160m_chans;
+ num_ch = WF_NUM_5G_160M_CHANS;
+ } else {
+
+ return FALSE;
+ }
+
+
+ if (chspec_bw == WL_CHANSPEC_BW_20) {
+
+ for (i = 0; i < num_ch; i ++) {
+ if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+ chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+ break;
+ }
+
+ if (i == num_ch) {
+
+ if (chspec_ch == 34 || chspec_ch == 38 ||
+ chspec_ch == 42 || chspec_ch == 46)
+ i = 0;
+ }
+ } else {
+
+ for (i = 0; i < num_ch; i ++) {
+ if (chspec_ch == center_ch[i])
+ break;
+ }
+ }
+
+ if (i < num_ch) {
+
+ return TRUE;
+ }
+ }
+ }
+
+ return FALSE;
+}
+
+
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint center_chan;
+ uint bw_mhz;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (CHSPEC_IS20(chspec)) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+ sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+ if (CHSPEC_IS8080(chspec)) {
+ bw_mhz = 80;
+
+ if (sb < 4) {
+ center_chan = CHSPEC_CHAN1(chspec);
+ }
+ else {
+ center_chan = CHSPEC_CHAN2(chspec);
+ sb -= 4;
+ }
+
+
+ center_chan = wf_5g_80m_chans[center_chan];
+ }
+ else {
+ bw_mhz = bw_chspec_to_mhz(chspec);
+ center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+ }
+
+ return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+ }
+}
+
+
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = chspec;
+ uint8 ctl_chan;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+
+ if (!CHSPEC_IS20(chspec)) {
+ ctl_chan = wf_chspec_ctlchan(chspec);
+ ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+#endif
+
+
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec40 = chspec;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ if (CHSPEC_IS80(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+ sb = CHSPEC_CTL_SB(chspec);
+
+ if (sb == WL_CHANSPEC_CTL_SB_UL) {
+
+ sb = WL_CHANSPEC_CTL_SB_L;
+ center_chan += CH_20MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UU) {
+
+ sb = WL_CHANSPEC_CTL_SB_U;
+ center_chan += CH_20MHZ_APART;
+ } else {
+
+
+ center_chan -= CH_20MHZ_APART;
+ }
+
+
+ chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+ sb | center_chan);
+ }
+
+ return chspec40;
+}
+
+
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+
+ if (offset != (ch * 5))
+ return -1;
+
+
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+
+
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (ch > 200))
+ freq = -1;
+ else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+ freq = 2484;
+ else
+ freq = ch * 5 + start_factor / 2;
+
+ return freq;
+}
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
new file mode 100644
index 00000000000000..c7970474128f85
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h
@@ -0,0 +1,345 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _bcmwifi_channels_h_
+#define _bcmwifi_channels_h_
+
+
+
+typedef uint16 chanspec_t;
+
+
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_80MHZ_APART 16
+#define CH_40MHZ_APART 8
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1
+#define CH_MAX_2G_CHANNEL 14
+#define MAXCHANNEL 224
+#define CHSPEC_CTLOVLP(sp1, sp2, sep) ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)
+
+
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0300
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_CHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_CHANSPEC_BW_MASK 0x0C00
+#define WL_CHANSPEC_BW_SHIFT 10
+#define WL_CHANSPEC_BW_10 0x0400
+#define WL_CHANSPEC_BW_20 0x0800
+#define WL_CHANSPEC_BW_40 0x0C00
+
+#define WL_CHANSPEC_BAND_MASK 0xf000
+#define WL_CHANSPEC_BAND_SHIFT 12
+#define WL_CHANSPEC_BAND_5G 0x1000
+#define WL_CHANSPEC_BAND_2G 0x2000
+#define INVCHANSPEC 255
+
+
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+
+
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+
+#endif
+
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
+#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
+#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
+#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
+ (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
+ (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+#define CHANSPEC_STR_LEN 8
+
+#else
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+#define WL_CHANSPEC_CHAN1_MASK 0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT 0
+#define WL_CHANSPEC_CHAN2_MASK 0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT 4
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LLL 0x0000
+#define WL_CHANSPEC_CTL_SB_LLU 0x0100
+#define WL_CHANSPEC_CTL_SB_LUL 0x0200
+#define WL_CHANSPEC_CTL_SB_LUU 0x0300
+#define WL_CHANSPEC_CTL_SB_ULL 0x0400
+#define WL_CHANSPEC_CTL_SB_ULU 0x0500
+#define WL_CHANSPEC_CTL_SB_UUL 0x0600
+#define WL_CHANSPEC_CTL_SB_UUU 0x0700
+#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
+
+#define WL_CHANSPEC_BW_MASK 0x3800
+#define WL_CHANSPEC_BW_SHIFT 11
+#define WL_CHANSPEC_BW_5 0x0000
+#define WL_CHANSPEC_BW_10 0x0800
+#define WL_CHANSPEC_BW_20 0x1000
+#define WL_CHANSPEC_BW_40 0x1800
+#define WL_CHANSPEC_BW_80 0x2000
+#define WL_CHANSPEC_BW_160 0x2800
+#define WL_CHANSPEC_BW_8080 0x3000
+
+#define WL_CHANSPEC_BAND_MASK 0xc000
+#define WL_CHANSPEC_BAND_SHIFT 14
+#define WL_CHANSPEC_BAND_2G 0x0000
+#define WL_CHANSPEC_BAND_3G 0x4000
+#define WL_CHANSPEC_BAND_4G 0x8000
+#define WL_CHANSPEC_BAND_5G 0xc000
+#define INVCHANSPEC 255
+
+
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \
+ ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK)
+#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK)
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) 0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) 0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) 0
+#endif
+
+#else
+
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#endif
+
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+
+#define CHANSPEC_STR_LEN 20
+
+
+
+#define WL_LCHANSPEC_CHAN_MASK 0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT 0
+
+#define WL_LCHANSPEC_CTL_SB_MASK 0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT 8
+#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_LCHANSPEC_BW_MASK 0x0C00
+#define WL_LCHANSPEC_BW_SHIFT 10
+#define WL_LCHANSPEC_BW_10 0x0400
+#define WL_LCHANSPEC_BW_20 0x0800
+#define WL_LCHANSPEC_BW_40 0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK 0xf000
+#define WL_LCHANSPEC_BAND_SHIFT 12
+#define WL_LCHANSPEC_BAND_5G 0x1000
+#define WL_LCHANSPEC_BAND_2G 0x2000
+
+#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#endif
+
+
+
+
+#define WF_CHAN_FACTOR_2_4_G 4814
+
+
+#define WF_CHAN_FACTOR_5_G 10000
+
+
+#define WF_CHAN_FACTOR_4_G 8000
+
+
+#define WLC_MAXRATE 108
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+#define WLC_2G_25MHZ_OFFSET 5
+
+
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+
+extern chanspec_t wf_chspec_aton(const char *a);
+
+
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
new file mode 100644
index 00000000000000..9896b2360a6a5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h
@@ -0,0 +1,306 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_rates.h 252708 2011-04-12 06:45:56Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define WL_RATESET_SZ_DSSS 4
+#define WL_RATESET_SZ_OFDM 8
+#define WL_RATESET_SZ_HT_MCS 8
+#define WL_RATESET_SZ_VHT_MCS 10
+
+#define WL_TX_CHAINS_MAX 3
+
+#define WL_RATE_DISABLED (-128)
+
+
+typedef enum wl_tx_bw {
+ WL_TX_BW_20,
+ WL_TX_BW_40,
+ WL_TX_BW_80,
+ WL_TX_BW_20IN40,
+ WL_TX_BW_20IN80,
+ WL_TX_BW_40IN80,
+ WL_TX_BW_ALL
+} wl_tx_bw_t;
+
+
+
+typedef enum wl_tx_mode {
+ WL_TX_MODE_NONE,
+ WL_TX_MODE_STBC,
+ WL_TX_MODE_CDD,
+ WL_TX_MODE_SDM
+} wl_tx_mode_t;
+
+
+
+typedef enum wl_tx_chains {
+ WL_TX_CHAINS_1 = 1,
+ WL_TX_CHAINS_2,
+ WL_TX_CHAINS_3
+} wl_tx_chains_t;
+
+
+
+typedef enum wl_tx_nss {
+ WL_TX_NSS_1 = 1,
+ WL_TX_NSS_2,
+ WL_TX_NSS_3
+} wl_tx_nss_t;
+
+
+typedef enum clm_rates {
+
+
+
+ WL_RATE_1X1_DSSS_1 = 0,
+ WL_RATE_1X1_DSSS_2 = 1,
+ WL_RATE_1X1_DSSS_5_5 = 2,
+ WL_RATE_1X1_DSSS_11 = 3,
+
+ WL_RATE_1X1_OFDM_6 = 4,
+ WL_RATE_1X1_OFDM_9 = 5,
+ WL_RATE_1X1_OFDM_12 = 6,
+ WL_RATE_1X1_OFDM_18 = 7,
+ WL_RATE_1X1_OFDM_24 = 8,
+ WL_RATE_1X1_OFDM_36 = 9,
+ WL_RATE_1X1_OFDM_48 = 10,
+ WL_RATE_1X1_OFDM_54 = 11,
+
+ WL_RATE_1X1_MCS0 = 12,
+ WL_RATE_1X1_MCS1 = 13,
+ WL_RATE_1X1_MCS2 = 14,
+ WL_RATE_1X1_MCS3 = 15,
+ WL_RATE_1X1_MCS4 = 16,
+ WL_RATE_1X1_MCS5 = 17,
+ WL_RATE_1X1_MCS6 = 18,
+ WL_RATE_1X1_MCS7 = 19,
+
+ WL_RATE_1X1_VHT0SS1 = 12,
+ WL_RATE_1X1_VHT1SS1 = 13,
+ WL_RATE_1X1_VHT2SS1 = 14,
+ WL_RATE_1X1_VHT3SS1 = 15,
+ WL_RATE_1X1_VHT4SS1 = 16,
+ WL_RATE_1X1_VHT5SS1 = 17,
+ WL_RATE_1X1_VHT6SS1 = 18,
+ WL_RATE_1X1_VHT7SS1 = 19,
+ WL_RATE_1X1_VHT8SS1 = 20,
+ WL_RATE_1X1_VHT9SS1 = 21,
+
+
+
+
+
+ WL_RATE_1X2_DSSS_1 = 22,
+ WL_RATE_1X2_DSSS_2 = 23,
+ WL_RATE_1X2_DSSS_5_5 = 24,
+ WL_RATE_1X2_DSSS_11 = 25,
+
+ WL_RATE_1X2_CDD_OFDM_6 = 26,
+ WL_RATE_1X2_CDD_OFDM_9 = 27,
+ WL_RATE_1X2_CDD_OFDM_12 = 28,
+ WL_RATE_1X2_CDD_OFDM_18 = 29,
+ WL_RATE_1X2_CDD_OFDM_24 = 30,
+ WL_RATE_1X2_CDD_OFDM_36 = 31,
+ WL_RATE_1X2_CDD_OFDM_48 = 32,
+ WL_RATE_1X2_CDD_OFDM_54 = 33,
+
+ WL_RATE_1X2_CDD_MCS0 = 34,
+ WL_RATE_1X2_CDD_MCS1 = 35,
+ WL_RATE_1X2_CDD_MCS2 = 36,
+ WL_RATE_1X2_CDD_MCS3 = 37,
+ WL_RATE_1X2_CDD_MCS4 = 38,
+ WL_RATE_1X2_CDD_MCS5 = 39,
+ WL_RATE_1X2_CDD_MCS6 = 40,
+ WL_RATE_1X2_CDD_MCS7 = 41,
+
+ WL_RATE_1X2_VHT0SS1 = 34,
+ WL_RATE_1X2_VHT1SS1 = 35,
+ WL_RATE_1X2_VHT2SS1 = 36,
+ WL_RATE_1X2_VHT3SS1 = 37,
+ WL_RATE_1X2_VHT4SS1 = 38,
+ WL_RATE_1X2_VHT5SS1 = 39,
+ WL_RATE_1X2_VHT6SS1 = 40,
+ WL_RATE_1X2_VHT7SS1 = 41,
+ WL_RATE_1X2_VHT8SS1 = 42,
+ WL_RATE_1X2_VHT9SS1 = 43,
+
+
+ WL_RATE_2X2_STBC_MCS0 = 44,
+ WL_RATE_2X2_STBC_MCS1 = 45,
+ WL_RATE_2X2_STBC_MCS2 = 46,
+ WL_RATE_2X2_STBC_MCS3 = 47,
+ WL_RATE_2X2_STBC_MCS4 = 48,
+ WL_RATE_2X2_STBC_MCS5 = 49,
+ WL_RATE_2X2_STBC_MCS6 = 50,
+ WL_RATE_2X2_STBC_MCS7 = 51,
+
+ WL_RATE_2X2_STBC_VHT0SS1 = 44,
+ WL_RATE_2X2_STBC_VHT1SS1 = 45,
+ WL_RATE_2X2_STBC_VHT2SS1 = 46,
+ WL_RATE_2X2_STBC_VHT3SS1 = 47,
+ WL_RATE_2X2_STBC_VHT4SS1 = 48,
+ WL_RATE_2X2_STBC_VHT5SS1 = 49,
+ WL_RATE_2X2_STBC_VHT6SS1 = 50,
+ WL_RATE_2X2_STBC_VHT7SS1 = 51,
+ WL_RATE_2X2_STBC_VHT8SS1 = 52,
+ WL_RATE_2X2_STBC_VHT9SS1 = 53,
+
+ WL_RATE_2X2_SDM_MCS8 = 54,
+ WL_RATE_2X2_SDM_MCS9 = 55,
+ WL_RATE_2X2_SDM_MCS10 = 56,
+ WL_RATE_2X2_SDM_MCS11 = 57,
+ WL_RATE_2X2_SDM_MCS12 = 58,
+ WL_RATE_2X2_SDM_MCS13 = 59,
+ WL_RATE_2X2_SDM_MCS14 = 60,
+ WL_RATE_2X2_SDM_MCS15 = 61,
+
+ WL_RATE_2X2_VHT0SS2 = 54,
+ WL_RATE_2X2_VHT1SS2 = 55,
+ WL_RATE_2X2_VHT2SS2 = 56,
+ WL_RATE_2X2_VHT3SS2 = 57,
+ WL_RATE_2X2_VHT4SS2 = 58,
+ WL_RATE_2X2_VHT5SS2 = 59,
+ WL_RATE_2X2_VHT6SS2 = 60,
+ WL_RATE_2X2_VHT7SS2 = 61,
+ WL_RATE_2X2_VHT8SS2 = 62,
+ WL_RATE_2X2_VHT9SS2 = 63,
+
+
+
+
+
+ WL_RATE_1X3_DSSS_1 = 64,
+ WL_RATE_1X3_DSSS_2 = 65,
+ WL_RATE_1X3_DSSS_5_5 = 66,
+ WL_RATE_1X3_DSSS_11 = 67,
+
+ WL_RATE_1X3_CDD_OFDM_6 = 68,
+ WL_RATE_1X3_CDD_OFDM_9 = 69,
+ WL_RATE_1X3_CDD_OFDM_12 = 70,
+ WL_RATE_1X3_CDD_OFDM_18 = 71,
+ WL_RATE_1X3_CDD_OFDM_24 = 72,
+ WL_RATE_1X3_CDD_OFDM_36 = 73,
+ WL_RATE_1X3_CDD_OFDM_48 = 74,
+ WL_RATE_1X3_CDD_OFDM_54 = 75,
+
+ WL_RATE_1X3_CDD_MCS0 = 76,
+ WL_RATE_1X3_CDD_MCS1 = 77,
+ WL_RATE_1X3_CDD_MCS2 = 78,
+ WL_RATE_1X3_CDD_MCS3 = 79,
+ WL_RATE_1X3_CDD_MCS4 = 80,
+ WL_RATE_1X3_CDD_MCS5 = 81,
+ WL_RATE_1X3_CDD_MCS6 = 82,
+ WL_RATE_1X3_CDD_MCS7 = 83,
+
+ WL_RATE_1X3_VHT0SS1 = 76,
+ WL_RATE_1X3_VHT1SS1 = 77,
+ WL_RATE_1X3_VHT2SS1 = 78,
+ WL_RATE_1X3_VHT3SS1 = 79,
+ WL_RATE_1X3_VHT4SS1 = 80,
+ WL_RATE_1X3_VHT5SS1 = 81,
+ WL_RATE_1X3_VHT6SS1 = 82,
+ WL_RATE_1X3_VHT7SS1 = 83,
+ WL_RATE_1X3_VHT8SS1 = 84,
+ WL_RATE_1X3_VHT9SS1 = 85,
+
+
+ WL_RATE_2X3_STBC_MCS0 = 86,
+ WL_RATE_2X3_STBC_MCS1 = 87,
+ WL_RATE_2X3_STBC_MCS2 = 88,
+ WL_RATE_2X3_STBC_MCS3 = 89,
+ WL_RATE_2X3_STBC_MCS4 = 90,
+ WL_RATE_2X3_STBC_MCS5 = 91,
+ WL_RATE_2X3_STBC_MCS6 = 92,
+ WL_RATE_2X3_STBC_MCS7 = 93,
+
+ WL_RATE_2X3_STBC_VHT0SS1 = 86,
+ WL_RATE_2X3_STBC_VHT1SS1 = 87,
+ WL_RATE_2X3_STBC_VHT2SS1 = 88,
+ WL_RATE_2X3_STBC_VHT3SS1 = 89,
+ WL_RATE_2X3_STBC_VHT4SS1 = 90,
+ WL_RATE_2X3_STBC_VHT5SS1 = 91,
+ WL_RATE_2X3_STBC_VHT6SS1 = 92,
+ WL_RATE_2X3_STBC_VHT7SS1 = 93,
+ WL_RATE_2X3_STBC_VHT8SS1 = 94,
+ WL_RATE_2X3_STBC_VHT9SS1 = 95,
+
+ WL_RATE_2X3_SDM_MCS8 = 96,
+ WL_RATE_2X3_SDM_MCS9 = 97,
+ WL_RATE_2X3_SDM_MCS10 = 98,
+ WL_RATE_2X3_SDM_MCS11 = 99,
+ WL_RATE_2X3_SDM_MCS12 = 100,
+ WL_RATE_2X3_SDM_MCS13 = 101,
+ WL_RATE_2X3_SDM_MCS14 = 102,
+ WL_RATE_2X3_SDM_MCS15 = 103,
+
+ WL_RATE_2X3_VHT0SS2 = 96,
+ WL_RATE_2X3_VHT1SS2 = 97,
+ WL_RATE_2X3_VHT2SS2 = 98,
+ WL_RATE_2X3_VHT3SS2 = 99,
+ WL_RATE_2X3_VHT4SS2 = 100,
+ WL_RATE_2X3_VHT5SS2 = 101,
+ WL_RATE_2X3_VHT6SS2 = 102,
+ WL_RATE_2X3_VHT7SS2 = 103,
+ WL_RATE_2X3_VHT8SS2 = 104,
+ WL_RATE_2X3_VHT9SS2 = 105,
+
+
+ WL_RATE_3X3_SDM_MCS16 = 106,
+ WL_RATE_3X3_SDM_MCS17 = 107,
+ WL_RATE_3X3_SDM_MCS18 = 108,
+ WL_RATE_3X3_SDM_MCS19 = 109,
+ WL_RATE_3X3_SDM_MCS20 = 110,
+ WL_RATE_3X3_SDM_MCS21 = 111,
+ WL_RATE_3X3_SDM_MCS22 = 112,
+ WL_RATE_3X3_SDM_MCS23 = 113,
+
+ WL_RATE_3X3_VHT0SS3 = 106,
+ WL_RATE_3X3_VHT1SS3 = 107,
+ WL_RATE_3X3_VHT2SS3 = 108,
+ WL_RATE_3X3_VHT3SS3 = 109,
+ WL_RATE_3X3_VHT4SS3 = 110,
+ WL_RATE_3X3_VHT5SS3 = 111,
+ WL_RATE_3X3_VHT6SS3 = 112,
+ WL_RATE_3X3_VHT7SS3 = 113,
+ WL_RATE_3X3_VHT8SS3 = 114,
+ WL_RATE_3X3_VHT9SS3 = 115,
+
+
+ WL_NUMRATES = 116
+} clm_rates_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h
new file mode 100644
index 00000000000000..27281465497650
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd.h
@@ -0,0 +1,778 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h 327089 2012-04-12 02:05:07Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+
+#define ALL_INTERFACES 0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+
+
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+ DHD_BUS_DOWN, /* Not ready for frame transfers */
+ DHD_BUS_LOAD, /* Download access only (CPU reset) */
+ DHD_BUS_DATA /* Ready for frame transfers */
+};
+
+
+/* Firmware requested operation mode */
+#define STA_MASK 0x0001
+#define HOSTAPD_MASK 0x0002
+#define WFD_MASK 0x0004
+#define SOFTAP_FW_MASK 0x0008
+#define P2P_GO_ENABLED 0x0010
+#define P2P_GC_ENABLED 0x0020
+#define CONCURENT_MASK 0x00F0
+
+/* max sequential rxcntl timeouts to set HANG event */
+#define MAX_CNTL_TIMEOUT 2
+
+#define DHD_SCAN_ACTIVE_TIME 40 /* ms : Embedded default Active setting from DHD Driver */
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD Driver */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY (10) /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS (2000) /* ms: time out in waiting wifi to come up */
+#endif
+
+enum dhd_bus_wake_state {
+ WAKE_LOCK_OFF,
+ WAKE_LOCK_PRIV,
+ WAKE_LOCK_DPC,
+ WAKE_LOCK_IOCTL,
+ WAKE_LOCK_DOWNLOAD,
+ WAKE_LOCK_TMOUT,
+ WAKE_LOCK_WATCHDOG,
+ WAKE_LOCK_LINK_DOWN_TMOUT,
+ WAKE_LOCK_PNO_FIND_TMOUT,
+ WAKE_LOCK_SOFTAP_SET,
+ WAKE_LOCK_SOFTAP_STOP,
+ WAKE_LOCK_SOFTAP_START,
+ WAKE_LOCK_SOFTAP_THREAD,
+ WAKE_LOCK_MAX
+};
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF,
+ DHD_PREALLOC_DATABUF,
+ DHD_PREALLOC_OSL_BUF
+};
+
+typedef enum {
+ DHD_IF_NONE = 0,
+ DHD_IF_ADD,
+ DHD_IF_DEL,
+ DHD_IF_CHANGE,
+ DHD_IF_DELETING
+} dhd_if_state_t;
+
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+
+uint8* dhd_os_prealloc(void *osh, int section, uint size);
+void dhd_os_prefree(void *osh, void *addr, uint size);
+#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size)
+#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size)
+
+#else
+
+#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size)
+#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size)
+
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+ void **p;
+ uint8 flow_id;
+ uint8 cur_idx;
+ uint8 exp_idx;
+ uint8 max_idx;
+ uint8 pend_pkts;
+} reorder_info_t;
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+ /* Linkage ponters */
+ osl_t *osh; /* OSL handle */
+ struct dhd_bus *bus; /* Bus module handle */
+ struct dhd_prot *prot; /* Protocol module handle */
+ struct dhd_info *info; /* Info module handle */
+
+ /* Internal dhd items */
+ bool up; /* Driver up/down (to OS) */
+ bool txoff; /* Transmit flow-controlled */
+ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
+ enum dhd_bus_state busstate;
+ uint hdrlen; /* Total DHD header length (proto + bus) */
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ uint rxsz; /* Rx buffer size bus module should use */
+ uint8 wme_dp; /* wme discard priority */
+
+ /* Dongle media info */
+ bool iswl; /* Dongle-resident driver is wl */
+ ulong drv_version; /* Version of dongle-resident driver */
+ struct ether_addr mac; /* MAC address obtained from dongle */
+ dngl_stats_t dstats; /* Stats for dongle-based data */
+
+ /* Additional stats for the bus level */
+ ulong tx_packets; /* Data packets sent to dongle */
+ ulong tx_multicast; /* Multicast data packets sent to dongle */
+ ulong tx_errors; /* Errors in sending data to dongle */
+ ulong tx_ctlpkts; /* Control packets sent to dongle */
+ ulong tx_ctlerrs; /* Errors sending control frames to dongle */
+ ulong rx_packets; /* Packets sent up the network interface */
+ ulong rx_multicast; /* Multicast packets sent up the network interface */
+ ulong rx_errors; /* Errors processing rx data packets */
+ ulong rx_ctlpkts; /* Control frames processed from dongle */
+ ulong rx_ctlerrs; /* Errors in processing rx control frames */
+ ulong rx_dropped; /* Packets dropped locally (no memory) */
+ ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
+ ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
+
+ ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
+ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
+ ulong fc_packets; /* Number of flow control pkts recvd */
+
+ /* Last error return */
+ int bcmerror;
+ uint tickcnt;
+
+ /* Last error from dongle */
+ int dongle_error;
+
+ uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+ /* Suspend disable flag and "in suspend" flag */
+ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+ int in_suspend; /* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+ int pno_suspend; /* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+ int dtim_skip; /* dtim skip , default 0 means wake each dtim */
+
+ /* Pkt filter defination */
+ char * pktfilter[100];
+ int pktfilter_count;
+
+ wl_country_t dhd_cspec; /* Current Locale info */
+ char eventmask[WL_EVENTING_MASK_LEN];
+ int op_mode; /* STA, HostAPD, WFD, SoftAP */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+ struct wake_lock wakelock[WAKE_LOCK_MAX];
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif
+
+#ifdef WLBTAMP
+ uint16 maxdatablks;
+#endif /* WLBTAMP */
+#ifdef PROP_TXSTATUS
+ int wlfc_enabled;
+ void* wlfc_state;
+#endif
+ bool dongle_isolation;
+ int hang_was_sent;
+ int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
+ int txcnt_timeout; /* counter txcnt timeout to send HANG */
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+ struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+} dhd_pub_t;
+
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+ #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define _DHD_PM_RESUME_WAIT(a, b) do {\
+ int retry = 0; \
+ SMP_RD_BARRIER_DEPENDS(); \
+ while (dhd_mmc_suspend && retry++ != b) { \
+ SMP_RD_BARRIER_DEPENDS(); \
+ wait_event_interruptible_timeout(a, !dhd_mmc_suspend, HZ/100); \
+ } \
+ } while (0)
+ #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0)
+ #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9999; \
+ while ((exp) && (countdown >= 10000)) { \
+ wait_event_interruptible_timeout(a, FALSE, HZ/100); \
+ countdown -= 10000; \
+ } \
+ } while (0)
+
+ #else
+
+ #define DHD_PM_RESUME_WAIT_INIT(a)
+ #define DHD_PM_RESUME_WAIT(a)
+ #define DHD_PM_RESUME_WAIT_FOREVER(a)
+ #define DHD_PM_RESUME_RETURN_ERROR(a)
+ #define DHD_PM_RESUME_RETURN
+
+ #define DHD_SPINWAIT_SLEEP_INIT(a)
+ #define SPINWAIT_SLEEP(a, exp, us) do { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) { \
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+ } while (0)
+
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+#ifndef DHDTHREAD
+#undef SPINWAIT_SLEEP
+#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us)
+#endif /* DHDTHREAD */
+#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/* Wakelock Functions */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_timeout_enable(pub, val)
+#define DHD_PACKET_TIMEOUT_MS 1000
+#define DHD_EVENT_TIMEOUT_MS 1500
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+typedef struct dhd_if_event {
+ uint8 ifidx;
+ uint8 action;
+ uint8 flags;
+ uint8 bssidx;
+ uint8 is_AP;
+} dhd_if_event_t;
+
+typedef enum dhd_attach_states
+{
+ DHD_ATTACH_STATE_INIT = 0x0,
+ DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+ DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+ DHD_ATTACH_STATE_ADD_IF = 0x4,
+ DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+ DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+ DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+ DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+ DHD_ATTACH_STATE_CFG80211 = 0x80,
+ DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+ DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID -1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID -2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* To allow osl_attach/detach calls from os-independent modules */
+osl_t *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(osl_t *osh);
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void * dhd_os_open_image(char * filename);
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_customer_gpio_wlan_ctrl(int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+
+#ifdef PNO_SUPPORT
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_dev_pno_reset(struct net_device *dev);
+extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local,
+ int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled);
+extern int dhd_dev_get_pno_status(struct net_device *dev);
+#endif /* PNO_SUPPORT */
+
+#define DHD_UNICAST_FILTER_NUM 0
+#define DHD_BROADCAST_FILTER_NUM 1
+#define DHD_MULTICAST4_FILTER_NUM 2
+#define DHD_MULTICAST6_FILTER_NUM 3
+extern int net_os_set_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+extern bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+#if defined(OOB_INTR_ONLY)
+extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
+#endif /* defined(OOB_INTR_ONLY) */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+typedef struct {
+ uint32 limit; /* Expiration time (usec) */
+ uint32 increment; /* Current expiration increment (usec) */
+ uint32 elapsed; /* Current elapsed time (usec) */
+ uint32 tick; /* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+ wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+ int ifindex);
+
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+ char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
+extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf);
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+extern void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+extern void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+
+typedef enum cust_gpio_modes {
+ WLAN_RESET_ON,
+ WLAN_RESET_OFF,
+ WLAN_POWER_ON,
+ WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#define DHD_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR "null_pkt"
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN 2048
+extern char fw_path[MOD_PARAM_PATHLEN];
+extern char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xe
+#define DHD_BAD_IF -0xf
+
+#ifdef PROP_TXSTATUS
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+ /*
+ b[11 ] - 1 = this packet was sent in response to one time packet request,
+ do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+ b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+ b[9 ] - 1 = packet is host->firmware (transmit direction)
+ - 0 = packet received from firmware (firmware->host)
+ b[8 ] - 1 = packet was sent due to credit_request (pspoll),
+ packet does not count against FIFO credit.
+ - 0 = normal transaction, packet counts against FIFO credit
+ b[7 ] - 1 = AP, 0 = STA
+ b[6:4] - AC FIFO number
+ b[3:0] - interface index
+ */
+ uint16 if_flags;
+ /* destination MAC address for this packet so that not every
+ module needs to open the packet to find this
+ */
+ uint8 dstn_ether[ETHER_ADDR_LEN];
+ /*
+ This 32-bit goes from host to device for every packet.
+ */
+ uint32 htod_tag;
+ /* bus specific stuff */
+ union {
+ struct {
+ void* stuff;
+ uint32 thing1;
+ uint32 thing2;
+ } sd;
+ struct {
+ void* bus;
+ void* urb;
+ } usb;
+ } bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_IFMASK 0xf
+#define DHD_PKTTAG_IFTYPE_MASK 0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT 7
+#define DHD_PKTTAG_FIFO_MASK 0x7
+#define DHD_PKTTAG_FIFO_SHIFT 4
+
+#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
+
+#define DHD_PKTTAG_PKTDIR_MASK 0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT 9
+
+#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
+
+#define DHD_PKTTAG_INVALID_FIFOID 0x7
+
+#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+ (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK)
+#define DHD_PKTTAG_IF(tag) (((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK)
+
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+ (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+ (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+ (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+ (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+ (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+ (dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
+#endif
+
+#endif /* PROP_TXSTATUS */
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock) *lock = 0
+#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \
+ NdisStallExecution(1);
+#define IFUNLOCK(lock) InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+
+#ifdef PNO_SUPPORT
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pnoenable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max);
+extern int dhd_pno_get_status(dhd_pub_t *dhd);
+extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
+ ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
+extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
+extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
+#endif /* PNO_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES 8
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#endif /* _dhd_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c
new file mode 100644
index 00000000000000..15c605ea248fd1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.c
@@ -0,0 +1,338 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c 303834 2011-12-20 06:17:39Z $
+ */
+#ifndef WLBTAMP
+#error "WLBTAMP is not defined"
+#endif /* WLBTAMP */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+ uint len = sizeof(buf);
+ wl_ioctl_t ioc;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BADLEN;
+
+ if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+ return BCME_BADLEN;
+
+ len = bcm_mkiovar("HCI_cmd",
+ (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+ int prec;
+ struct pktq *q;
+ uint count = 0;
+
+ q = dhd_bus_txq(pub->bus);
+ if (q == NULL)
+ return;
+
+ DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+ dhd_os_sdlock_txq(pub);
+
+ /* Walk through the txq and toss all HCI ACL data packets */
+ PKTQ_PREC_ITER(q, prec) {
+ void *head_pkt = NULL;
+
+ while (pktq_ppeek(q, prec) != head_pkt) {
+ void *pkt = pktq_pdeq(q, prec);
+ int ifidx;
+
+ PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+ dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
+
+ if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+ struct ether_header *eh =
+ (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+ if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+ struct dot11_llc_snap_header *lsh =
+ (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+ DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+ amp_hci_ACL_data_t *ACL_data =
+ (amp_hci_ACL_data_t *)&lsh[1];
+ uint16 handle = ltoh16(ACL_data->handle);
+
+ if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+ PKTFREE(pub->osh, pkt, TRUE);
+ count ++;
+ continue;
+ }
+ }
+ }
+ }
+
+ dhd_prot_hdrpush(pub, ifidx, pkt);
+ PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
+
+ if (head_pkt == NULL)
+ head_pkt = pkt;
+ pktq_penq(q, prec, pkt);
+ }
+ }
+
+ dhd_os_sdunlock_txq(pub);
+
+ DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ * < 0: stop, fail
+ * > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+ int status = 0;
+
+ switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+ case HCI_Enhanced_Flush: {
+ eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+ dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+ break;
+ }
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+ int status;
+
+ if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+ DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+ len, cmd_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* intercept and handle the HCI cmd locally */
+ if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+ return 0;
+ else if (status < 0)
+ return status;
+
+ /* copy in HCI cmd */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(cmd, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(eh->ether_dhost);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = 0;
+
+ return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+ amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+ osl_t *osh = pub->osh;
+ uint len;
+ void *p;
+
+ if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+ return BCME_BADLEN;
+ }
+
+ if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+ len, data_len));
+ /* return BCME_BADLEN; */
+ }
+
+ p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+ if (p == NULL) {
+ DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+ return BCME_NOMEM;
+ }
+
+
+ /* copy in HCI ACL data header and HCI ACL data */
+ PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+ bcopy(data, PKTDATA(osh, p), len);
+
+ /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+ PKTPUSH(osh, p, RFC1042_HDR_LEN);
+ eh = (struct ether_header *)PKTDATA(osh, p);
+ bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+ eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+ bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+ lsh->type = HTON16(BTA_PROT_L2CAP);
+
+ return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+ uint16 handle = ltoh16(ACL_data->handle);
+ uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+ wl_event_msg_t event;
+ uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+ amp_hci_event_t *evt;
+ num_completed_data_blocks_evt_parms_t *parms;
+
+ uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+ /* update the event struct */
+ memset(&event, 0, sizeof(event));
+ event.version = hton16(BCM_EVENT_MSG_VERSION);
+ event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+ event.status = 0;
+ event.reason = 0;
+ event.auth_type = 0;
+ event.datalen = hton32(len);
+ event.flags = 0;
+
+ /* generate Number of Completed Blocks event */
+ evt = (amp_hci_event_t *)data;
+ evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+ evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+ parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+ htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+ parms->num_handles = 1;
+ htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+ parms->completed[0].pkts = 1;
+ parms->completed[0].blocks = 1;
+
+ dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+ amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+ switch (evt->ecode) {
+ case HCI_Command_Complete: {
+ cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+ switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+ case HCI_Read_Data_Block_Size: {
+ read_data_block_size_evt_parms_t *parms2 =
+ (read_data_block_size_evt_parms_t *)parms->parms;
+ dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+ break;
+ }
+ }
+ break;
+ }
+
+ case HCI_Flush_Occurred: {
+ flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+ dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+ break;
+ }
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h
new file mode 100644
index 00000000000000..0337f15d285a02
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h
new file mode 100644
index 00000000000000..36267719214626
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_bus.h
@@ -0,0 +1,109 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h 313456 2012-02-07 22:03:40Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+extern void dhd_disable_intr(dhd_pub_t *dhd);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+
+
+#define DHD_SET_BUS_STATE_DOWN(_bus) do { \
+ (_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c
new file mode 100644
index 00000000000000..3b03cb62de3ad2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c
@@ -0,0 +1,2851 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c 325074 2012-03-31 21:24:57Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE
+ * defined in dhd_sdio.c (amount of header tha might be added)
+ * plus any space that might be needed for alignment padding.
+ */
+#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+ * round off at the end of buffer
+ */
+
+#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
+
+#ifdef PROP_TXSTATUS
+typedef struct dhd_wlfc_commit_info {
+ uint8 needs_hdr;
+ uint8 ac_fifo_credit_spent;
+ ewlfc_packet_state_t pkt_type;
+ wlfc_mac_descriptor_t* mac_entry;
+ void* p;
+} dhd_wlfc_commit_info_t;
+#endif /* PROP_TXSTATUS */
+
+typedef struct dhd_prot {
+ uint16 reqid;
+ uint8 pending;
+ uint32 lastcmd;
+ uint8 bus_header[BUS_HEADER_LEN];
+ cdc_ioctl_t msg;
+ unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+ int err = 0;
+ dhd_prot_t *prot = dhd->prot;
+ int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* NOTE : cdc->msg.len holds the desired length of the buffer to be
+ * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+ * is actually sent to the dongle
+ */
+ if (len > CDC_MAX_MSG_SIZE)
+ len = CDC_MAX_MSG_SIZE;
+
+ /* Send request */
+ err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+ int ret;
+ int cdc_len = len + sizeof(cdc_ioctl_t);
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ do {
+ ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+ if (ret < 0)
+ break;
+ } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+ return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ void *info;
+ int ret = 0, retries = 0;
+ uint32 id, flags = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ if (!dhd->hang_was_sent)
+ DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+ goto done;
+ }
+
+retry:
+ /* wait for interrupt and get first fragment */
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if ((id < prot->reqid) && (++retries < RETRIES))
+ goto retry;
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check info buffer */
+ info = (void*)&msg[1];
+
+ /* Copy info buffer */
+ if (buf)
+ {
+ if (ret < (int)len)
+ len = ret;
+ memcpy(buf, info, len);
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+ cdc_ioctl_t *msg = &prot->msg;
+ int ret = 0;
+ uint32 flags, id;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ memset(msg, 0, sizeof(cdc_ioctl_t));
+
+ msg->cmd = htol32(cmd);
+ msg->len = htol32(len);
+ msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+ CDC_SET_IF_IDX(msg, ifidx);
+ /* add additional action bits */
+ action &= WL_IOCTL_ACTION_MASK;
+ msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+ msg->flags = htol32(msg->flags);
+
+ if (buf)
+ memcpy(prot->buf, buf, len);
+
+ if ((ret = dhdcdc_msg(dhd)) < 0) {
+ DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+ goto done;
+
+ flags = ltoh32(msg->flags);
+ id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+ if (id != prot->reqid) {
+ DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Check the ERROR flag */
+ if (flags & CDCF_IOC_ERROR)
+ {
+ ret = ltoh32(msg->status);
+ /* Cache error from dongle */
+ dhd->dongle_error = ret;
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+ uint8 action;
+#if defined(NDIS630)
+ bool acquired = FALSE;
+#endif
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+#if defined(NDIS630)
+ if (dhd_os_proto_block(dhd))
+ {
+ acquired = TRUE;
+ }
+ else
+ {
+ /* attempt to acquire protocol mutex timed out. */
+ ret = -1;
+ return ret;
+ }
+#endif /* NDIS630 */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ action = ioc->set;
+ if (action & WL_IOCTL_ACTION_SET)
+ ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ else {
+ ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret - sizeof(cdc_ioctl_t);
+ }
+
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ cdc_ioctl_t *msg = &prot->msg;
+ ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ prot->pending = FALSE;
+
+done:
+#if defined(NDIS630)
+ if (acquired)
+ dhd_os_proto_unblock(dhd);
+#endif
+ return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+#ifdef PROP_TXSTATUS
+void
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i;
+ uint8* ea;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhdp->wlfc_state;
+ wlfc_hanger_t* h;
+ wlfc_mac_descriptor_t* mac_table;
+ wlfc_mac_descriptor_t* interfaces;
+ char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+ if (wlfc == NULL) {
+ bcm_bprintf(strbuf, "wlfc not initialized yet\n");
+ return;
+ }
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ }
+
+ mac_table = wlfc->destination_entries.nodes;
+ interfaces = wlfc->destination_entries.interfaces;
+ bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+ if (h) {
+ bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+ "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+ h->pushed,
+ h->popped,
+ h->failed_to_push,
+ h->failed_to_pop,
+ h->failed_slotfind,
+ (h->pushed - h->popped));
+ }
+
+ bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+ "(dq_full,sendq_full, rollback_fail) = (%d,%d,%d,%d), (%d,%d,%d)\n",
+ wlfc->stats.tlv_parse_failed,
+ wlfc->stats.credit_request_failed,
+ wlfc->stats.mac_update_failed,
+ wlfc->stats.psmode_update_failed,
+ wlfc->stats.delayq_full_error,
+ wlfc->stats.sendq_full_error,
+ wlfc->stats.rollback_failed);
+
+ bcm_bprintf(strbuf, "SENDQ (len,credit,sent) "
+ "(AC0[%d,%d,%d],AC1[%d,%d,%d],AC2[%d,%d,%d],AC3[%d,%d,%d],BC_MC[%d,%d,%d])\n",
+ wlfc->SENDQ.q[0].len, wlfc->FIFO_credit[0], wlfc->stats.sendq_pkts[0],
+ wlfc->SENDQ.q[1].len, wlfc->FIFO_credit[1], wlfc->stats.sendq_pkts[1],
+ wlfc->SENDQ.q[2].len, wlfc->FIFO_credit[2], wlfc->stats.sendq_pkts[2],
+ wlfc->SENDQ.q[3].len, wlfc->FIFO_credit[3], wlfc->stats.sendq_pkts[3],
+ wlfc->SENDQ.q[4].len, wlfc->FIFO_credit[4], wlfc->stats.sendq_pkts[4]);
+
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "SENDQ dropped: AC[0-3]:(%d,%d,%d,%d), (bcmc,atim):(%d,%d)\n",
+ wlfc->stats.dropped_qfull[0], wlfc->stats.dropped_qfull[1],
+ wlfc->stats.dropped_qfull[2], wlfc->stats.dropped_qfull[3],
+ wlfc->stats.dropped_qfull[4], wlfc->stats.dropped_qfull[5]);
+#endif
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (interfaces[i].occupied) {
+ char* iftype_desc;
+
+ if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+ iftype_desc = "<Unknown";
+ else
+ iftype_desc = iftypes[interfaces[i].iftype];
+
+ ea = interfaces[i].ea;
+ bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ interfaces[i].interface_id,
+ iftype_desc);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ interfaces[i].psq.len,
+ ((interfaces[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ interfaces[i].requested_credit);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ interfaces[i].psq.q[0].len,
+ interfaces[i].psq.q[1].len,
+ interfaces[i].psq.q[2].len,
+ interfaces[i].psq.q[3].len,
+ interfaces[i].psq.q[4].len,
+ interfaces[i].psq.q[5].len,
+ interfaces[i].psq.q[6].len,
+ interfaces[i].psq.q[7].len);
+ }
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (mac_table[i].occupied) {
+ ea = mac_table[i].ea;
+ bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ mac_table[i].interface_id);
+
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ(len,state,credit)"
+ "= (%d,%s,%d)\n",
+ i,
+ mac_table[i].psq.len,
+ ((mac_table[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ mac_table[i].requested_credit);
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+ i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+ bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ"
+ "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
+ "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
+ i,
+ mac_table[i].psq.q[0].len,
+ mac_table[i].psq.q[1].len,
+ mac_table[i].psq.q[2].len,
+ mac_table[i].psq.q[3].len,
+ mac_table[i].psq.q[4].len,
+ mac_table[i].psq.q[5].len,
+ mac_table[i].psq.q[6].len,
+ mac_table[i].psq.q[7].len);
+ }
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int avg;
+ int moving_avg = 0;
+ int moving_samples;
+
+ if (wlfc->stats.latency_sample_count) {
+ moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+ for (i = 0; i < moving_samples; i++)
+ moving_avg += wlfc->stats.deltas[i];
+ moving_avg /= moving_samples;
+
+ avg = (100 * wlfc->stats.total_status_latency) /
+ wlfc->stats.latency_sample_count;
+ bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+ "(%d.%d, %03d, %03d)\n",
+ moving_samples, avg/100, (avg - (avg/100)*100),
+ wlfc->stats.latency_most_recent,
+ moving_avg);
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+ "back = (%d,%d,%d,%d,%d,%d)\n",
+ wlfc->stats.fifo_credits_sent[0],
+ wlfc->stats.fifo_credits_sent[1],
+ wlfc->stats.fifo_credits_sent[2],
+ wlfc->stats.fifo_credits_sent[3],
+ wlfc->stats.fifo_credits_sent[4],
+ wlfc->stats.fifo_credits_sent[5],
+
+ wlfc->stats.fifo_credits_back[0],
+ wlfc->stats.fifo_credits_back[1],
+ wlfc->stats.fifo_credits_back[2],
+ wlfc->stats.fifo_credits_back[3],
+ wlfc->stats.fifo_credits_back[4],
+ wlfc->stats.fifo_credits_back[5]);
+ {
+ uint32 fifo_cr_sent = 0;
+ uint32 fifo_cr_acked = 0;
+ uint32 request_cr_sent = 0;
+ uint32 request_cr_ack = 0;
+ uint32 bc_mc_cr_ack = 0;
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+ fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+ }
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+ fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.nodes[i].dstncredit_acks;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.interfaces[i].dstncredit_acks;
+ }
+ }
+ bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+ "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+ fifo_cr_sent, fifo_cr_acked,
+ request_cr_sent, request_cr_ack,
+ wlfc->destination_entries.other.dstncredit_acks,
+ bc_mc_cr_ack,
+ wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)"
+ "(freed,free_err,rollback)) = "
+ "((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.pktin,
+ wlfc->stats.pkt2bus,
+ wlfc->stats.txstatus_in,
+ wlfc->stats.dhd_hdrpulls,
+
+ wlfc->stats.pktdropped,
+ wlfc->stats.wlfc_header_only_pkt,
+ wlfc->stats.wlc_tossed_pkts,
+
+ wlfc->stats.pkt_freed,
+ wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+ bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+ "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+
+ wlfc->stats.d11_suppress,
+ wlfc->stats.wl_suppress,
+ wlfc->stats.bad_suppress,
+
+ wlfc->stats.psq_d11sup_enq,
+ wlfc->stats.psq_wlsup_enq,
+ wlfc->stats.psq_hostq_enq,
+ wlfc->stats.mac_handle_notfound,
+
+ wlfc->stats.psq_d11sup_retx,
+ wlfc->stats.psq_wlsup_retx,
+ wlfc->stats.psq_hostq_retx);
+ return;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+ a status comes back, suppress or otherwise.
+
+ hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+ int i;
+ wlfc_hanger_t* hanger;
+
+ /* allow only up to a specific size for now */
+ ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+ if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+ return NULL;
+
+ memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+ hanger->max_items = max_items;
+
+ for (i = 0; i < hanger->max_items; i++) {
+ hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+ return hanger;
+}
+
+static int
+dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+ return BCME_OK;
+ }
+ return BCME_BADARG;
+}
+
+static uint16
+dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h) {
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE)
+ return (uint16)i;
+ }
+ h->failed_slotfind++;
+ }
+ return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+ h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+ h->items[slot_id].pkt = pkt;
+ h->items[slot_id].identifier = slot_id;
+ h->pushed++;
+ }
+ else {
+ h->failed_to_push++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+{
+ int rc = BCME_OK;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+ /* this packet was not pushed at the time it went to the firmware */
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return BCME_NOTFOUND;
+
+ if (h) {
+ if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ *pktout = h->items[slot_id].pkt;
+ if (remove_from_hanger) {
+ h->items[slot_id].state =
+ WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[slot_id].pkt = NULL;
+ h->items[slot_id].identifier = 0;
+ h->popped++;
+ }
+ }
+ else {
+ h->failed_to_pop++;
+ rc = BCME_NOTFOUND;
+ }
+ }
+ else
+ rc = BCME_BADARG;
+ return rc;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+ uint8 tim_bmp, uint8 mac_handle, uint32 htodtag)
+{
+ uint32 wl_pktinfo = 0;
+ uint8* wlh;
+ uint8 dataOffset;
+ uint8 fillers;
+ uint8 tim_signal_len = 0;
+
+ struct bdc_header *h;
+
+ if (tim_signal) {
+ tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ }
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len;
+ fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+ dataOffset += fillers;
+
+ PKTPUSH(ctx->osh, p, dataOffset);
+ wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+ wl_pktinfo = htol32(htodtag);
+
+ wlh[0] = WLFC_CTL_TYPE_PKTTAG;
+ wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG;
+ memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32));
+
+ if (tim_signal_len) {
+ wlh[dataOffset - fillers - tim_signal_len ] =
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 1] =
+ WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+ wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+ }
+ if (fillers)
+ memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+ PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+ h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(p))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = dataOffset >> 2;
+ BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+ struct bdc_header *h;
+
+ if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+ WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+ h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+ /* pull BDC header */
+ PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+ /* pull wl-header */
+ PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+ return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+ int i;
+ wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+ uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+ uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+
+ if (((ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_STA) ||
+ ETHER_ISMULTI(dstn) ||
+ (ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_P2P_CLIENT)) &&
+ (ctx->destination_entries.interfaces[ifid].occupied)) {
+ return &ctx->destination_entries.interfaces[ifid];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (table[i].occupied) {
+ if (table[i].interface_id == ifid) {
+ if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN))
+ return &table[i];
+ }
+ }
+ }
+ return &ctx->destination_entries.other;
+}
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+ void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+ /*
+ put the packet back to the head of queue
+
+ - a packet from send-q will need to go back to send-q and not delay-q
+ since that will change the order of packets.
+ - suppressed packet goes back to suppress sub-queue
+ - pull out the header, if new or delayed packet
+
+ Note: hslot is used only when header removal is done.
+ */
+ wlfc_mac_descriptor_t* entry;
+ void* pktout;
+ int rc = BCME_OK;
+ int prec;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+ if (entry != NULL) {
+ if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) {
+ /* wl-header is saved for suppressed packets */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* remove header first */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+ /* delay-q packets are going to delay-q */
+ if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ else {
+ /* these are going to SENDQ */
+ if (WLFC_PKTQ_PENQ_HEAD(&ctx->SENDQ, prec, p) == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ }
+ /* free the hanger slot */
+ dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
+
+ /* decrement sequence count */
+ WLFC_DECR_SEQCOUNT(entry, prec);
+ }
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the firmware (for pspoll etc.)
+ */
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ entry->requested_credit++;
+ }
+ }
+ else {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ rc = BCME_ERROR;
+ }
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+ else
+ ctx->stats.rollback++;
+
+ return rc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+ if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+ /* start traffic */
+ ctx->hostif_flow_state[if_id] = OFF;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("F"));
+ /* dhd_txflowcontrol(ctx->dhdp, if_id, OFF); */
+ ctx->toggle_host_if = 0;
+ }
+ if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+ /* stop traffic */
+ ctx->hostif_flow_state[if_id] = ON;
+ /*
+ WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n",
+ pq->len, if_id, __FUNCTION__));
+ */
+ WLFC_DBGMESG(("N"));
+ /* dhd_txflowcontrol(ctx->dhdp, if_id, ON); */
+ ctx->host_ifidx = if_id;
+ ctx->toggle_host_if = 1;
+ }
+ return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ uint8 ta_bmp)
+{
+ int rc = BCME_OK;
+ void* p = NULL;
+ int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12;
+
+ /* allocate a dummy packet */
+ p = PKTGET(ctx->osh, dummylen, TRUE);
+ if (p) {
+ PKTPULL(ctx->osh, p, dummylen);
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+ _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0);
+ DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+ ctx->stats.signal_only_pkts_sent++;
+#endif
+ rc = dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p);
+ if (rc != BCME_OK) {
+ PKTFREE(ctx->osh, p, TRUE);
+ }
+ }
+ else {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, dummylen));
+ rc = BCME_NOMEM;
+ }
+ return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ int prec)
+{
+ bool rc = FALSE;
+
+ if (entry->state == WLFC_STATE_CLOSE) {
+ if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+ (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+ if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp & ~ NBITVAL(prec);
+ }
+ }
+ else {
+ if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+ rc = TRUE;
+ entry->traffic_pending_bmp =
+ entry->traffic_pending_bmp | NBITVAL(prec);
+ }
+ }
+ }
+ if (rc) {
+ /* request a TIM update to firmware at the next piggyback opportunity */
+ if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+ entry->send_tim_signal = 1;
+ _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ entry->send_tim_signal = 0;
+ }
+ else {
+ rc = FALSE;
+ }
+ }
+ return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+ wlfc_mac_descriptor_t* entry;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_NOTFOUND;
+ }
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ ctx->stats.delayq_full_error++;
+ /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+ WLFC_DBGMESG(("s"));
+ return BCME_ERROR;
+ }
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+ int rc = BCME_OK;
+ int hslot = WLFC_HANGER_MAXITEMS;
+ bool send_tim_update = FALSE;
+ uint32 htod = 0;
+ uint8 free_ctr;
+
+ *slot = hslot;
+
+ if (entry == NULL) {
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+ }
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+ if (entry->send_tim_signal) {
+ send_tim_update = TRUE;
+ entry->send_tim_signal = 0;
+ entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+ }
+ if (header_needed) {
+ hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ }
+ else {
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ }
+ WLFC_PKTID_HSLOT_SET(htod, hslot);
+ WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation);
+
+ if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ /*
+ Indicate that this packet is being sent in response to an
+ explicit request from the firmware side.
+ */
+ WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+ }
+ else {
+ WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+ }
+ if (header_needed) {
+ rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ if (rc == BCME_OK) {
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+ /*
+ a new header was created for this packet.
+ push to hanger slot and scrub q. Since bus
+ send succeeded, increment seq number as well.
+ */
+ rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ if (rc == BCME_OK) {
+ /* increment free running sequence count */
+ WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+#ifdef PROP_TXSTATUS_DEBUG
+ ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+ OSL_SYSUPTIME();
+#endif
+ }
+ else {
+ WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n",
+ __FUNCTION__, rc));
+ }
+ }
+ }
+ else {
+ /* remove old header */
+ _dhd_wlfc_pullheader(ctx, p);
+
+ hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ /* push new header */
+ _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ }
+ *slot = hslot;
+ return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx,
+ wlfc_mac_descriptor_t* entry, int prec)
+{
+ if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+ WLC_E_IF_ROLE_P2P_GO) {
+ /* - destination interface is of type p2p GO.
+ For a p2pGO interface, if the destination is OPEN but the interface is
+ CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+ destination-specific-credit left send packets. This is because the
+ firmware storing the destination-specific-requested packet in queue.
+ */
+ if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0))
+ return 1;
+ }
+ /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+ if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+ (entry->requested_packet == 0)) ||
+ (!(entry->ac_bitmap & (1 << prec))))
+ return 1;
+
+ return 0;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx,
+ int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out)
+{
+ wlfc_mac_descriptor_t* entry;
+ wlfc_mac_descriptor_t* table;
+ uint8 token_pos;
+ int total_entries;
+ void* p = NULL;
+ int pout;
+ int i;
+
+ *entry_out = NULL;
+ token_pos = ctx->token_pos[prec];
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = 1;
+ *needs_hdr = 1;
+
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&ctx->destination_entries;
+ total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+
+ for (i = 0; i < total_entries; i++) {
+ entry = &table[(token_pos + i) % total_entries];
+ if (entry->occupied) {
+ if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+ p = pktq_mdeq(&entry->psq,
+ /* higher precedence will be picked up first,
+ i.e. suppressed packets before delayed ones
+ */
+ (NBITVAL((prec << 1) + 1) | NBITVAL((prec << 1))),
+ &pout);
+ if (p != NULL) {
+ /* did the packet come from suppress sub-queue? */
+ if (pout == ((prec << 1) + 1)) {
+ /*
+ this packet was suppressed and was sent on the bus
+ previously; this already has a header
+ */
+ *needs_hdr = 0;
+ }
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ /*
+ if the packet was pulled out while destination is in
+ closed state but had a non-zero packets requested,
+ then this should not count against the FIFO credit.
+ That is due to the fact that the firmware will
+ most likely hold onto this packet until a suitable
+ time later to push it to the appropriate AC FIFO.
+ */
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+ }
+ /* move token to ensure fair round-robin */
+ ctx->token_pos[prec] =
+ (token_pos + i + 1) % total_entries;
+ *entry_out = entry;
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq,
+ DHD_PKTTAG_IF(PKTTAG(p)));
+ /*
+ A packet has been picked up, update traffic
+ availability bitmap, if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ return p;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static void*
+_dhd_wlfc_deque_sendq(athost_wl_status_info_t* ctx, int prec, uint8* ac_credit_spent)
+{
+ wlfc_mac_descriptor_t* entry;
+ void* p;
+
+ /* most cases a packet will count against FIFO credit */
+ *ac_credit_spent = 1;
+
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p != NULL) {
+ if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))
+ /* bc/mc packets do not have a delay queue */
+ return p;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if (entry == NULL) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return p;
+ }
+
+ while ((p != NULL) && _dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
+ /*
+ - suppressed packets go to sub_queue[2*prec + 1] AND
+ - delayed packets go to sub_queue[2*prec + 0] to ensure
+ order of delivery.
+ */
+ if (WLFC_PKTQ_PENQ(&entry->psq, (prec << 1), p) == NULL) {
+ WLFC_DBGMESG(("D"));
+ /* dhd_txcomplete(ctx->dhdp, p, FALSE); */
+ PKTFREE(ctx->osh, p, TRUE);
+ ctx->stats.delayq_full_error++;
+ }
+ /*
+ A packet has been pushed, update traffic availability bitmap,
+ if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+ p = pktq_pdeq(&ctx->SENDQ, prec);
+ if (p == NULL)
+ break;
+
+ entry = _dhd_wlfc_find_table_entry(ctx, p);
+
+ if ((entry == NULL) || (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))) {
+ return p;
+ }
+ }
+ if (p) {
+ if (entry->requested_packet == 0) {
+ if (entry->requested_credit > 0)
+ entry->requested_credit--;
+ }
+ else {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+ }
+ if (entry->state == WLFC_STATE_CLOSE)
+ *ac_credit_spent = 0;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_sent_packets++;
+#endif
+ }
+ if (p)
+ _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p)));
+ }
+ return p;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ int rc = BCME_OK;
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+ pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+ }
+ else if (action == eWLFC_MAC_ENTRY_ACTION_UPDATE) {
+ entry->occupied = 1;
+ entry->state = WLFC_STATE_OPEN;
+ entry->requested_credit = 0;
+ entry->interface_id = ifid;
+ entry->iftype = iftype;
+ entry->ac_bitmap = 0xff; /* update this when handling APSD */
+ /* for an interface entry we may not care about the MAC address */
+ if (ea != NULL)
+ memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+ }
+ else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+ entry->occupied = 0;
+ entry->state = WLFC_STATE_CLOSE;
+ entry->requested_credit = 0;
+ /* enable after packets are queued-deqeued properly.
+ pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0);
+ */
+ }
+ return rc;
+}
+
+int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac)
+{
+ int lender_ac;
+ int rc = BCME_ERROR;
+
+ if (ctx == NULL || available_credit_map == 0) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ /* Borrow from lowest priority available AC (including BC/MC credits) */
+ for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) {
+ if ((available_credit_map && (1 << lender_ac)) &&
+ (ctx->FIFO_credit[lender_ac] > 0)) {
+ ctx->credits_borrowed[borrower_ac][lender_ac]++;
+ ctx->FIFO_credit[lender_ac]--;
+ rc = BCME_OK;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_interface_entry_update(void* state,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ wlfc_mac_descriptor_t* entry;
+
+ if (ifid >= WLFC_MAX_IFNUM)
+ return BCME_BADARG;
+
+ entry = &ctx->destination_entries.interfaces[ifid];
+ return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea);
+}
+
+int
+dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ /* update the AC FIFO credit map */
+ ctx->FIFO_credit[0] = credits[0];
+ ctx->FIFO_credit[1] = credits[1];
+ ctx->FIFO_credit[2] = credits[2];
+ ctx->FIFO_credit[3] = credits[3];
+ /* credit for bc/mc packets */
+ ctx->FIFO_credit[4] = credits[4];
+ /* credit for ATIM FIFO is not used yet. */
+ ctx->FIFO_credit[5] = 0;
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enque_sendq(void* state, int prec, void* p)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ if ((state == NULL) ||
+ /* prec = AC_COUNT is used for bc/mc queue */
+ (prec > AC_COUNT) ||
+ (p == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+ if (FALSE == dhd_prec_enq(ctx->dhdp, &ctx->SENDQ, p, prec)) {
+ ctx->stats.sendq_full_error++;
+ /*
+ WLFC_DBGMESG(("Error: %s():%d, qlen:%d\n",
+ __FUNCTION__, __LINE__, ctx->SENDQ.len));
+ */
+ WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, prec);
+ WLFC_DBGMESG(("Q"));
+ PKTFREE(ctx->osh, p, TRUE);
+ return BCME_ERROR;
+ }
+ ctx->stats.pktin++;
+ /* _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); */
+ return BCME_OK;
+}
+
+int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+ dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ uint32 hslot;
+ int rc;
+
+ /*
+ if ac_fifo_credit_spent = 0
+
+ This packet will not count against the FIFO credit.
+ To ensure the txstatus corresponding to this packet
+ does not provide an implied credit (default behavior)
+ mark the packet accordingly.
+
+ if ac_fifo_credit_spent = 1
+
+ This is a normal packet and it counts against the FIFO
+ credit count.
+ */
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+ rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+ commit_info->needs_hdr, &hslot);
+
+ if (rc == BCME_OK)
+ rc = fcommit(commit_ctx, commit_info->p);
+ else
+ ctx->stats.generic_error++;
+
+ if (rc == BCME_OK) {
+ ctx->stats.pkt2bus++;
+ if (commit_info->ac_fifo_credit_spent) {
+ ctx->stats.sendq_pkts[ac]++;
+ WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+ }
+ }
+ else {
+ /*
+ bus commit has failed, rollback.
+ - remove wl-header for a delayed packet
+ - save wl-header header for suppressed packets
+ */
+ rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p,
+ (commit_info->pkt_type), hslot);
+ if (rc != BCME_OK)
+ ctx->stats.rollback_failed++;
+
+ rc = BCME_ERROR;
+ }
+
+ return rc;
+}
+
+int
+dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx)
+{
+ int ac;
+ int credit;
+ int rc;
+ dhd_wlfc_commit_info_t commit_info;
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ int credit_count = 0;
+ int bus_retry_count = 0;
+ uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */
+
+ if ((state == NULL) ||
+ (fcommit == NULL)) {
+ WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ memset(&commit_info, 0, sizeof(commit_info));
+
+ /*
+ Commit packets for regular AC traffic. Higher priority first.
+ First, use up FIFO credits available to each AC. Based on distribution
+ and credits left, borrow from other ACs as applicable
+
+ -NOTE:
+ If the bus between the host and firmware is overwhelmed by the
+ traffic from host, it is possible that higher priority traffic
+ starves the lower priority queue. If that occurs often, we may
+ have to employ weighted round-robin or ucode scheme to avoid
+ low priority packet starvation.
+ */
+
+ for (ac = AC_COUNT; ac >= 0; ac--) {
+
+ int initial_credit_count = ctx->FIFO_credit[ac];
+
+ for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ credit++;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ ctx->FIFO_credit[ac] -= credit;
+ return rc;
+ }
+ }
+ }
+
+ ctx->FIFO_credit[ac] -= credit;
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ for (credit = 0; credit < ctx->FIFO_credit[ac];) {
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent));
+ if (commit_info.p == NULL)
+ break;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ credit++;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ ctx->FIFO_credit[ac] -= credit;
+ return rc;
+ }
+ }
+ }
+
+ ctx->FIFO_credit[ac] -= credit;
+
+ /* If no credits were used, the queue is idle and can be re-used
+ Note that resv credits cannot be borrowed
+ */
+ if (initial_credit_count == ctx->FIFO_credit[ac]) {
+ ac_available |= (1 << ac);
+ credit_count += ctx->FIFO_credit[ac];
+ }
+ }
+
+ /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD
+
+ Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to:
+ a) ignore BC/MC for deferring borrow
+ b) ignore AC_BE being available along with other ACs
+ (this should happen only for pure BC/MC traffic)
+
+ i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and
+ we do not care if AC_BE and BC/MC are available or not
+ */
+ if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) {
+
+ if (ctx->allow_credit_borrow) {
+ ac = 1; /* Set ac to AC_BE and borrow credits */
+ }
+ else {
+ int delta;
+ int curr_t = OSL_SYSUPTIME();
+
+ if (curr_t > ctx->borrow_defer_timestamp)
+ delta = curr_t - ctx->borrow_defer_timestamp;
+ else
+ delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp;
+
+ if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+ /* Reset borrow but defer to next iteration (defensive borrowing) */
+ ctx->allow_credit_borrow = TRUE;
+ ctx->borrow_defer_timestamp = 0;
+ }
+ return BCME_OK;
+ }
+ }
+ else {
+ /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+ ctx->allow_credit_borrow = FALSE;
+ ctx->borrow_defer_timestamp = OSL_SYSUPTIME();
+ return BCME_OK;
+ }
+
+ /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE)
+ Generically use "ac" only in case we extend to all ACs in future
+ */
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry));
+ if (commit_info.p == NULL)
+ break;
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ /* packets from SENDQ are fresh and they'd need header and have no MAC entry */
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
+
+ for (; (credit_count > 0);) {
+
+ commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent));
+ if (commit_info.p == NULL)
+ break;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent) {
+ (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
+ credit_count--;
+ }
+ }
+ else {
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n"));
+ return rc;
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static uint8
+dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+ wlfc_mac_descriptor_t* table =
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+ uint8 table_index;
+
+ if (ea != NULL) {
+ for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+ if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+ table[table_index].occupied)
+ return table_index;
+ }
+ }
+ return WLFC_MAC_DESC_ID_INVALID;
+}
+
+void
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ void* p;
+ int fifo_id;
+
+ if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.signal_only_pkts_freed++;
+#endif
+ /* is this a signal-only packet? */
+ PKTFREE(wlfc->osh, txp, TRUE);
+ return;
+ }
+ if (!success) {
+ WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+ __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG
+ (PKTTAG(txp))), &p, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, txp, FALSE);
+
+ /* return the credit, if necessary */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) {
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp));
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+
+ PKTFREE(wlfc->osh, txp, TRUE);
+ }
+ return;
+}
+
+/* Handle discard or suppress indication */
+static int
+dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info)
+{
+ uint8 status_flag;
+ uint32 status;
+ int ret;
+ int remove_from_hanger = 1;
+ void* pktbuf;
+ uint8 fifo_id;
+ wlfc_mac_descriptor_t* entry = NULL;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ memcpy(&status, pkt_info, sizeof(uint32));
+ status_flag = WL_TXSTATUS_GET_FLAGS(status);
+ wlfc->stats.txstatus_in++;
+
+ if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+ wlfc->stats.pkt_freed++;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+ wlfc->stats.d11_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+ wlfc->stats.wl_suppress++;
+ remove_from_hanger = 0;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+ wlfc->stats.wlc_tossed_pkts++;
+ }
+
+ ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
+ WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
+ if (ret != BCME_OK) {
+ /* do something */
+ return ret;
+ }
+
+ if (!remove_from_hanger) {
+ /* this packet was suppressed */
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ entry->generation = WLFC_PKTID_GEN(status);
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ uint32 new_t = OSL_SYSUPTIME();
+ uint32 old_t;
+ uint32 delta;
+ old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
+ WLFC_PKTID_HSLOT_GET(status)].push_time;
+
+
+ wlfc->stats.latency_sample_count++;
+ if (new_t > old_t)
+ delta = new_t - old_t;
+ else
+ delta = 0xffffffff + new_t - old_t;
+ wlfc->stats.total_status_latency += delta;
+ wlfc->stats.latency_most_recent = delta;
+
+ wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+ if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+ wlfc->stats.idx_delta = 0;
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+ /* pick up the implicit credit from this packet */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
+
+ int lender, credit_returned = 0; /* Note that borrower is fifo_id */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+ }
+ else {
+ /*
+ if this packet did not count against FIFO credit, it must have
+ taken a requested_credit from the destination entry (for pspoll etc.)
+ */
+ if (!entry) {
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+ }
+ if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+ entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+ entry->dstncredit_acks++;
+#endif
+ }
+ if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+ (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+ ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+ if (ret != BCME_OK) {
+ /* delay q is full, drop this packet */
+ dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
+ &pktbuf, 1);
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, pktbuf, FALSE);
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ }
+ else {
+ dhd_txcomplete(dhd, pktbuf, TRUE);
+ /* free the packet */
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+ int i;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+ /* update FIFO credits */
+ if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+ {
+ int lender; /* Note that borrower is i */
+
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+ if (wlfc->credits_borrowed[i][lender] > 0) {
+ if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+ credits[i] -= wlfc->credits_borrowed[i][lender];
+ wlfc->FIFO_credit[lender] +=
+ wlfc->credits_borrowed[i][lender];
+ wlfc->credits_borrowed[i][lender] = 0;
+ }
+ else {
+ wlfc->credits_borrowed[i][lender] -= credits[i];
+ wlfc->FIFO_credit[lender] += credits[i];
+ credits[i] = 0;
+ }
+ }
+ }
+
+ /* If we have more credits left over, these must belong to the AC */
+ if (credits[i] > 0) {
+ wlfc->FIFO_credit[i] += credits[i];
+ }
+ }
+ }
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+ (void)dhd;
+ (void)rssi;
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ int rc;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 existing_index;
+ uint8 table_index;
+ uint8 ifid;
+ uint8* ea;
+
+ WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+ __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+ ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+ WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+ table = wlfc->destination_entries.nodes;
+ table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+ ifid = value[1];
+ ea = &value[2];
+
+ if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+ existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+ if (existing_index == WLFC_MAC_DESC_ID_INVALID) {
+ /* this MAC entry does not exist, create one */
+ if (!table[table_index].occupied) {
+ table[table_index].mac_handle = value[0];
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been empty, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ else {
+ /*
+ there is an existing entry, move it to new index
+ if necessary.
+ */
+ if (existing_index != table_index) {
+ /* if we already have an entry, free the old one */
+ table[existing_index].occupied = 0;
+ table[existing_index].state = WLFC_STATE_CLOSE;
+ table[existing_index].requested_credit = 0;
+ table[existing_index].interface_id = 0;
+ /* enable after packets are queued-deqeued properly.
+ pktq_flush(dhd->osh, &table[existing_index].psq, FALSE, NULL, 0);
+ */
+ }
+ }
+ }
+ if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+ if (table[table_index].occupied) {
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea);
+ }
+ else {
+ /* the space should have been occupied, but it's not */
+ wlfc->stats.mac_update_failed++;
+ }
+ }
+ BCM_REFERENCE(rc);
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle = value[0];
+ int i;
+
+ table = wlfc->destination_entries.nodes;
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ /* a fresh PS mode should wipe old ps credits? */
+ desc->requested_credit = 0;
+ if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+ desc->state = WLFC_STATE_OPEN;
+ DHD_WLFC_CTRINC_MAC_OPEN(desc);
+ }
+ else {
+ desc->state = WLFC_STATE_CLOSE;
+ DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+ /*
+ Indicate to firmware if there is any traffic pending.
+ */
+ for (i = AC_BE; i < AC_COUNT; i++) {
+ _dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+ }
+ }
+ }
+ else {
+ wlfc->stats.psmode_update_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+ /* Handle PS on/off indication */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ uint8 if_id = value[0];
+
+ if (if_id < WLFC_MAX_IFNUM) {
+ table = wlfc->destination_entries.interfaces;
+ if (table[if_id].occupied) {
+ if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+ table[if_id].state = WLFC_STATE_OPEN;
+ /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+ }
+ else {
+ table[if_id].state = WLFC_STATE_CLOSE;
+ /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+ }
+ return BCME_OK;
+ }
+ }
+ wlfc->stats.interface_update_failed++;
+
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 credit;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ credit = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_credit = credit;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.credit_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static int
+dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_mac_descriptor_t* desc;
+ uint8 mac_handle;
+ uint8 packet_count;
+
+ table = wlfc->destination_entries.nodes;
+ mac_handle = value[1];
+ packet_count = value[0];
+
+ desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+ if (desc->occupied) {
+ desc->requested_packet = packet_count;
+
+ desc->ac_bitmap = value[2];
+ }
+ else {
+ wlfc->stats.packet_request_failed++;
+ }
+ return BCME_OK;
+}
+
+static void
+dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+ if (info_len) {
+ if (info_buf) {
+ bcopy(val, info_buf, len);
+ *info_len = len;
+ }
+ else
+ *info_len = 0;
+ }
+}
+
+static int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+ uint *reorder_info_len)
+{
+ uint8 type, len;
+ uint8* value;
+ uint8* tmpbuf;
+ uint16 remainder = tlv_hdr_len;
+ uint16 processed = 0;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+ if (remainder) {
+ while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+ type = tmpbuf[processed];
+ if (type == WLFC_CTL_TYPE_FILLER) {
+ remainder -= 1;
+ processed += 1;
+ continue;
+ }
+
+ len = tmpbuf[processed + 1];
+ value = &tmpbuf[processed + 2];
+
+ if (remainder < (2 + len))
+ break;
+
+ remainder -= 2 + len;
+ processed += 2 + len;
+ if (type == WLFC_CTL_TYPE_TXSTATUS)
+ dhd_wlfc_txstatus_update(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+ dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+ reorder_info_len);
+ else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+ dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_RSSI)
+ dhd_wlfc_rssi_indicate(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+ dhd_wlfc_credit_request(dhd, value);
+
+ else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+ dhd_wlfc_packet_request(dhd, value);
+
+ else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+ (type == WLFC_CTL_TYPE_MAC_CLOSE))
+ dhd_wlfc_psmode_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+ (type == WLFC_CTL_TYPE_MACDESC_DEL))
+ dhd_wlfc_mac_table_update(dhd, value, type);
+
+ else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+ (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+ dhd_wlfc_interface_update(dhd, value, type);
+ }
+ }
+ if (remainder != 0) {
+ /* trouble..., something is not right */
+ wlfc->stats.tlv_parse_failed++;
+ }
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+ char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */
+ /* enable all signals & indicate host proptxstatus logic is active */
+ uint32 tlv = dhd->wlfc_enabled?
+ WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0;
+ /* WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0; */
+
+
+ /*
+ try to enable/disable signaling by sending "tlv" iovar. if that fails,
+ fallback to no flow control? Print a message for now.
+ */
+
+ /* enable proptxtstatus signaling by default */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+ }
+ else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ }
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+ int i;
+ athost_wl_status_info_t* wlfc;
+
+ if (!dhd->wlfc_enabled || dhd->wlfc_state)
+ return BCME_OK;
+
+ /* allocate space to track txstatus propagated from firmware */
+ dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+ if (dhd->wlfc_state == NULL)
+ return BCME_NOMEM;
+
+ /* initialize state space */
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+ /* remember osh & dhdp */
+ wlfc->osh = dhd->osh;
+ wlfc->dhdp = dhd;
+
+ wlfc->hanger =
+ dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+ if (wlfc->hanger == NULL) {
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return BCME_NOMEM;
+ }
+
+ /* initialize all interfaces to accept traffic */
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ wlfc->hostif_flow_state[i] = OFF;
+ }
+
+ /*
+ create the SENDQ containing
+ sub-queues for all AC precedences + 1 for bc/mc traffic
+ */
+ pktq_init(&wlfc->SENDQ, (AC_COUNT + 1), WLFC_SENDQ_LEN);
+
+ wlfc->destination_entries.other.state = WLFC_STATE_OPEN;
+ /* bc/mc FIFO is always open [credit aside], i.e. b[5] */
+ wlfc->destination_entries.other.ac_bitmap = 0x1f;
+ wlfc->destination_entries.other.interface_id = 0;
+
+ wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+
+ wlfc->allow_credit_borrow = TRUE;
+ wlfc->borrow_defer_timestamp = 0;
+
+ return BCME_OK;
+}
+
+/* release all packet resources */
+void
+dhd_wlfc_cleanup(dhd_pub_t *dhd)
+{
+ int i;
+ int total_entries;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_hanger_t* h;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ /* search all entries, include nodes as well as interfaces */
+ table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+ for (i = 0; i < total_entries; i++) {
+ if (table[i].occupied) {
+ if (table[i].psq.len) {
+ WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n",
+ __FUNCTION__, i, table[i].psq.len));
+ /* release packets held in DELAYQ */
+ pktq_flush(wlfc->osh, &table[i].psq, TRUE, NULL, 0);
+ }
+ table[i].occupied = 0;
+ }
+ }
+ /* release packets held in SENDQ */
+ if (wlfc->SENDQ.len)
+ pktq_flush(wlfc->osh, &wlfc->SENDQ, TRUE, NULL, 0);
+ /* any in the hanger? */
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
+ }
+ }
+ return;
+}
+
+void
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+ /* cleanup all psq related resources */
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
+ dhd->wlfc_state;
+
+ if (dhd->wlfc_state == NULL)
+ return;
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+ WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+ __FUNCTION__, i, h->items[i].pkt,
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+ }
+ }
+ }
+#endif
+ /* delete hanger */
+ dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+
+ /* free top structure */
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ return;
+}
+#endif /* PROP_TXSTATUS */
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state)
+ dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif /* BDC */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ /* Push BDC header used to convey priority for buses that don't */
+
+ PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN);
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+ if (PKTSUMNEEDED(pktbuf))
+ h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+ h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+ h->flags2 = 0;
+ h->dataOffset = 0;
+#endif /* BDC */
+ BDC_SET_IF_IDX(h, ifidx);
+}
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+ uint *reorder_info_len)
+{
+#ifdef BDC
+ struct bdc_header *h;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+ if (reorder_info_len)
+ *reorder_info_len = 0;
+ /* Pop BDC header used to convey priority for buses that don't */
+
+ if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+ return BCME_ERROR;
+ }
+
+ h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+ if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+ __FUNCTION__, *ifidx));
+ return BCME_ERROR;
+ }
+
+#if defined(NDIS630)
+ h->dataOffset = 0;
+#endif
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+ DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+ h->dataOffset = 0;
+ else
+ return BCME_ERROR;
+ }
+
+ if (h->flags & BDC_FLAG_SUM_GOOD) {
+ DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ dhd_ifname(dhd, *ifidx), h->flags));
+ PKTSETSUMGOOD(pktbuf, TRUE);
+ }
+
+ PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+ PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+#if !defined(NDIS630)
+ if (PKTLEN(dhd->osh, pktbuf) < (uint32) (h->dataOffset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), (h->dataOffset * 4)));
+ return BCME_ERROR;
+ }
+#endif
+#ifdef PROP_TXSTATUS
+ if (dhd->wlfc_state &&
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE &&
+ (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) {
+ /*
+ - parse txstatus only for packets that came from the firmware
+ */
+ dhd_os_wlfc_block(dhd);
+ dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2),
+ reorder_buf_info, reorder_info_len);
+ ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++;
+ dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ (void *)dhd->bus);
+ dhd_os_wlfc_unblock(dhd);
+ }
+#endif /* PROP_TXSTATUS */
+#if !defined(NDIS630)
+ PKTPULL(dhd->osh, pktbuf, (h->dataOffset << 2));
+#endif
+ return 0;
+}
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *cdc;
+
+ if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT,
+ sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(cdc, 0, sizeof(dhd_prot_t));
+
+ /* ensure that the msg buf directly follows the cdc msg struct */
+ if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+ DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ goto fail;
+ }
+
+ dhd->prot = cdc;
+#ifdef BDC
+ dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+ dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ return 0;
+
+fail:
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ if (cdc != NULL)
+ MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(dhd);
+#endif
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ /* No stats from dongle added yet, copy bus stats */
+ dhd->dstats.tx_packets = dhd->tx_packets;
+ dhd->dstats.tx_errors = dhd->tx_errors;
+ dhd->dstats.rx_packets = dhd->rx_packets;
+ dhd->dstats.rx_errors = dhd->rx_errors;
+ dhd->dstats.rx_dropped = dhd->rx_dropped;
+ dhd->dstats.multicast = dhd->rx_multicast;
+ return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0)
+ goto done;
+
+
+#ifdef PROP_TXSTATUS
+ ret = dhd_wlfc_init(dhd);
+#endif
+
+#if defined(WL_CFG80211)
+ if (dhd_download_fw_on_driverload)
+#endif /* defined(WL_CFG80211) */
+ ret = dhd_preinit_ioctls(dhd);
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+
+done:
+ return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+ /* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+ uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+ uint i;
+ void *plast = NULL, *p;
+ uint32 pkt_cnt = 0;
+
+ if (ptr->pend_pkts == 0) {
+ DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+ *pplast = NULL;
+ *pkt_count = 0;
+ *pkt = NULL;
+ return;
+ }
+ if (start == end)
+ i = ptr->max_idx + 1;
+ else {
+ if (start > end)
+ i = (ptr->max_idx - end) + start;
+ else
+ i = end - start;
+ }
+ while (i) {
+ p = (void *)(ptr->p[start]);
+ ptr->p[start] = NULL;
+
+ if (p != NULL) {
+ if (plast == NULL)
+ *pkt = p;
+ else
+ PKTSETNEXT(osh, plast, p);
+
+ plast = p;
+ pkt_cnt++;
+ }
+ i--;
+ if (start++ == ptr->max_idx)
+ start = 0;
+ }
+ *pplast = plast;
+ *pkt_count = (uint32)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+ void **pkt, uint32 *pkt_count)
+{
+ uint8 flow_id, max_idx, cur_idx, exp_idx;
+ struct reorder_info *ptr;
+ uint8 flags;
+ void *cur_pkt, *plast = NULL;
+ uint32 cnt = 0;
+
+ if (pkt == NULL) {
+ if (pkt_count != NULL)
+ *pkt_count = 0;
+ return 0;
+ }
+ cur_pkt = *pkt;
+ *pkt = NULL;
+
+ flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+ flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+ DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+ reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+ reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+
+ ptr = dhd->reorder_bufs[flow_id];
+ if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+ __FUNCTION__, flow_id));
+
+ if (ptr == NULL) {
+ DHD_ERROR(("%s: received flags to cleanup, but no flow (%d) yet\n",
+ __FUNCTION__, flow_id));
+ *pkt_count = 1;
+ return 0;
+ }
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ /* set it to the last packet */
+ if (plast) {
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ cnt++;
+ }
+ else {
+ if (cnt != 0) {
+ DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+ __FUNCTION__, cnt));
+ }
+ *pkt = cur_pkt;
+ cnt = 1;
+ }
+ buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+ MFREE(dhd->osh, ptr, buf_size);
+ dhd->reorder_bufs[flow_id] = NULL;
+ *pkt_count = cnt;
+ return 0;
+ }
+ /* all the other cases depend on the existance of the reorder struct for that flow id */
+ if (ptr == NULL) {
+ uint32 buf_size_alloc = sizeof(reorder_info_t);
+ max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+ buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+ /* allocate space to hold the buffers, index etc */
+
+ DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+ __FUNCTION__, buf_size_alloc, flow_id, max_idx));
+ ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+ if (ptr == NULL) {
+ DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+ *pkt_count = 1;
+ return 0;
+ }
+ bzero(ptr, buf_size_alloc);
+ dhd->reorder_bufs[flow_id] = ptr;
+ ptr->p = (void *)(ptr+1);
+ ptr->max_idx = max_idx;
+ }
+ if (flags & WLHOST_REORDERDATA_NEW_HOLE) {
+ DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+ if (ptr->pend_pkts) {
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, ptr->exp_idx);
+ ptr->pend_pkts = 0;
+ }
+ ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+ ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+ ptr->p[ptr->cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ *pkt_count = cnt;
+ }
+ else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+ cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+ if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ ptr->cur_idx = cur_idx;
+ DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+ __FUNCTION__, ptr->pend_pkts));
+ *pkt_count = 0;
+ *pkt = NULL;
+ }
+ else if (ptr->exp_idx == cur_idx) {
+ /* got the right one ..flush from cur to exp and update exp */
+ DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+ __FUNCTION__, cur_idx));
+ if (ptr->p[cur_idx] != NULL) {
+ DHD_REORDER(("%s: Error buffer pending..free it\n",
+ __FUNCTION__));
+ PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+ ptr->p[cur_idx] = NULL;
+ }
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+
+ ptr->cur_idx = cur_idx;
+ ptr->exp_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ cur_idx, exp_idx);
+ ptr->pend_pkts -= (uint8)cnt;
+ *pkt_count = cnt;
+ DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+ __FUNCTION__, cnt, ptr->pend_pkts));
+ }
+ else {
+ uint8 end_idx;
+ bool flush_current = FALSE;
+ /* both cur and exp are moved now .. */
+ DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+ __FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+ ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+ ptr->exp_idx, end_idx);
+
+ if (cur_idx == ptr->max_idx) {
+ if (exp_idx == 0)
+ flush_current = TRUE;
+ } else {
+ if (exp_idx == cur_idx + 1)
+ flush_current = TRUE;
+ }
+ if (flush_current) {
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ }
+ else {
+ ptr->p[cur_idx] = cur_pkt;
+ ptr->pend_pkts++;
+ }
+ ptr->exp_idx = exp_idx;
+ ptr->cur_idx = cur_idx;
+ *pkt_count = cnt;
+ }
+ }
+ else {
+ uint8 end_idx;
+ /* no real packet but update to exp_seq...that means explicit window move */
+ exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+ DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+ __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+ if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+ end_idx = ptr->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+ ptr->pend_pkts -= (uint8)cnt;
+ if (plast)
+ PKTSETNEXT(dhd->osh, plast, cur_pkt);
+ else
+ *pkt = cur_pkt;
+ cnt++;
+ *pkt_count = cnt;
+ /* set the new expected idx */
+ ptr->exp_idx = exp_idx;
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
new file mode 100644
index 00000000000000..f654d3113fb099
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c
@@ -0,0 +1,594 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+
+extern struct wl_priv *wlcfg_drv_priv;
+static int dhd_dongle_up = FALSE;
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+s32 dhd_cfg80211_down(struct wl_priv *wl)
+{
+ dhd_dongle_up = FALSE;
+ return 0;
+}
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+{
+ s32 err = 0;
+
+ err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ }
+ return err;
+}
+s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
+#endif
+ struct net_device *ndev;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (dhd_dongle_up) {
+ WL_ERR(("Dongle is already up\n"));
+ return err;
+ }
+
+ ndev = wl_to_prmry_ndev(wl);
+
+ if (need_lock)
+ rtnl_lock();
+
+ err = wl_dongle_up(ndev, 0);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_up failed\n"));
+ goto default_conf_out;
+ }
+ dhd_dongle_up = true;
+
+default_conf_out:
+ if (need_lock)
+ rtnl_unlock();
+ return err;
+
+}
+
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+#define COEX_DHCP
+
+#if defined(COEX_DHCP)
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME 2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+ BT_DHCP_IDLE,
+ BT_DHCP_START,
+ BT_DHCP_OPPR_WIN,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+ uint reg, int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+ (char *)(&var), sizeof(var.buf));
+ error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf_local[1024];
+#else
+ static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+ return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)&reg_addr[0], (char *)addr, 4);
+ memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+ WL_TRACE(("%s, sample[%d], btc params: 27:%x\n",
+ __FUNCTION__, i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERR(("%s ioc read btc params error\n", __FUNCTION__));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) { /* count both sco & esco */
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ __FUNCTION__, sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] =
+ { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] =
+ { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] =
+ { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] =
+ { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] =
+ { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+ /* this should reduce eSCO agressive retransmit
+ * w/o breaking it
+ */
+
+ /* 1st save current */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+ saved_status = TRUE;
+ WL_TRACE(("%s saved bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ __FUNCTION__, saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+ } else {
+ WL_ERR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ WL_TRACE(("override with [50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+ } else if (saved_status) {
+ /* restore previously saved bt params */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg71);
+
+ WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+ /* set = 1, save & turn on 0 - off & restore prev settings */
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE)
+ /* Forcing bt_flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0],
+ sizeof(buf_flag7_dhcp_on));
+ else
+ /* Restoring default bt flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0],
+ sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+ struct btcoex_info *bt_local = (struct btcoex_info *)data;
+ WL_TRACE(("%s\n", __FUNCTION__));
+ bt_local->timer_on = 0;
+ schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+ struct btcoex_info *btcx_inf;
+
+ btcx_inf = container_of(work, struct btcoex_info, work);
+
+ if (btcx_inf->timer_on) {
+ btcx_inf->timer_on = 0;
+ del_timer_sync(&btcx_inf->timer);
+ }
+
+ switch (btcx_inf->bt_state) {
+ case BT_DHCP_START:
+ /* DHCP started
+ * provide OPPORTUNITY window to get DHCP address
+ */
+ WL_TRACE(("%s bt_dhcp stm: started \n",
+ __FUNCTION__));
+ btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_OPPR_WIN_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPR_WIN:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T1 expiration\n",
+ __FUNCTION__));
+ goto btc_coex_idle;
+ }
+
+ /* DHCP is not over yet, start lowering BT priority
+ * enforce btc_params + flags if necessary
+ */
+ WL_TRACE(("%s DHCP T1:%d expired\n", __FUNCTION__,
+ BT_DHCP_OPPR_WIN_TIME));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+ btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&btcx_inf->timer,
+ jiffies + BT_DHCP_FLAG_FORCE_TIME*HZ/1000);
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("%s DHCP Done before T2 expiration\n",
+ __FUNCTION__));
+ } else {
+ /* Noo dhcp during T1+T2, restore BT priority */
+ WL_TRACE(("%s DHCP wait interval T2:%d"
+ "msec expired\n", __FUNCTION__,
+ BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ /* Restoring default bt priority */
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+
+ default:
+ WL_ERR(("%s error g_status=%d !!!\n", __FUNCTION__,
+ btcx_inf->bt_state));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(btcx_inf->dev);
+}
+
+int wl_cfg80211_btcoex_init(struct wl_priv *wl)
+{
+ struct btcoex_info *btco_inf = NULL;
+
+ btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+ if (!btco_inf)
+ return -ENOMEM;
+
+ btco_inf->bt_state = BT_DHCP_IDLE;
+ btco_inf->ts_dhcp_start = 0;
+ btco_inf->ts_dhcp_ok = 0;
+ /* Set up timer for BT */
+ btco_inf->timer_ms = 10;
+ init_timer(&btco_inf->timer);
+ btco_inf->timer.data = (ulong)btco_inf;
+ btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+ btco_inf->dev = wl->wdev->netdev;
+
+ INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+ wl->btcoex_info = btco_inf;
+ return 0;
+}
+
+void wl_cfg80211_btcoex_deinit(struct wl_priv *wl)
+{
+ if (!wl->btcoex_info)
+ return;
+
+ if (!wl->btcoex_info->timer_on) {
+ wl->btcoex_info->timer_on = 0;
+ del_timer_sync(&wl->btcoex_info->timer);
+ }
+
+ cancel_work_sync(&wl->btcoex_info->work);
+
+ kfree(wl->btcoex_info);
+ wl->btcoex_info = NULL;
+}
+#endif
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command)
+{
+
+ struct wl_priv *wl = wlcfg_drv_priv;
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+#ifdef COEX_DHCP
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+ struct btcoex_info *btco_inf = wl->btcoex_info;
+#endif /* COEX_DHCP */
+
+ /* Figure out powermode 1 or o command */
+ strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+
+ WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__));
+
+ /* Retrieve and saved orig regs value */
+ if ((saved_status == FALSE) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+ /* Disable PM mode during dhpc session */
+
+ /* Disable PM mode during dhpc session */
+#ifdef COEX_DHCP
+ /* Start BT timer only for SCO connection */
+ if (btcoex_is_sco_active(dev)) {
+ /* btc_params 66 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+ /* btc_params 41 0x33 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+ /* btc_params 68 0x190 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ btco_inf->bt_state = BT_DHCP_START;
+ btco_inf->timer_on = 1;
+ mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+ WL_TRACE(("%s enable BT DHCP Timer\n",
+ __FUNCTION__));
+ }
+#endif /* COEX_DHCP */
+ }
+ else if (saved_status == TRUE) {
+ WL_ERR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__));
+ }
+ }
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+ /* Restoring PM mode */
+
+#ifdef COEX_DHCP
+ /* Stop any bt timer because DHCP session is done */
+ WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__));
+ if (btco_inf->timer_on) {
+ btco_inf->timer_on = 0;
+ del_timer_sync(&btco_inf->timer);
+
+ if (btco_inf->bt_state != BT_DHCP_IDLE) {
+ /* need to restore original btc flags & extra btc params */
+ WL_TRACE(("%s bt->bt_state:%d\n",
+ __FUNCTION__, btco_inf->bt_state));
+ /* wake up btcoex thread to restore btlags+params */
+ schedule_work(&btco_inf->work);
+ }
+ }
+
+ /* Restoring btc_flag paramter anyway */
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+#endif /* COEX_DHCP */
+
+ /* Restore original values */
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)&regaddr, (char *)&saved_reg68);
+
+ WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERR(("%s Unkwown yet power setting, ignored\n",
+ __FUNCTION__));
+ }
+
+ snprintf(command, 3, "OK");
+
+ return (strlen("OK"));
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
new file mode 100644
index 00000000000000..18b213fa57166e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h
@@ -0,0 +1,42 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+s32 dhd_cfg80211_init(struct wl_priv *wl);
+s32 dhd_cfg80211_deinit(struct wl_priv *wl);
+s32 dhd_cfg80211_down(struct wl_priv *wl);
+s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock);
+
+int wl_cfg80211_btcoex_init(struct wl_priv *wl);
+void wl_cfg80211_btcoex_deinit(struct wl_priv *wl);
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c
new file mode 100644
index 00000000000000..84c3335cb29788
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_common.c
@@ -0,0 +1,2323 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c 321870 2012-03-17 00:43:35Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+
+#include <proto/bcmevent.h>
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef WLBTAMP
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+char fw_path[MOD_PARAM_PATHLEN];
+char nv_path[MOD_PARAM_PATHLEN];
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+
+#ifdef DHD_DEBUG
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
+ __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#endif
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+ IOV_VERSION = 1,
+ IOV_MSGLEVEL,
+ IOV_BCMERRORSTR,
+ IOV_BCMERROR,
+ IOV_WDTICK,
+ IOV_DUMP,
+ IOV_CLEARCOUNTS,
+ IOV_LOGDUMP,
+ IOV_LOGCAL,
+ IOV_LOGSTAMP,
+ IOV_GPIOOB,
+ IOV_IOCTLTIMEOUT,
+#ifdef WLBTAMP
+ IOV_HCI_CMD, /* HCI command */
+ IOV_HCI_ACL_DATA, /* HCI data packet */
+#endif
+#if defined(DHD_DEBUG)
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+ IOV_PROPTXSTATUS_ENABLE,
+ IOV_PROPTXSTATUS_MODE,
+#endif
+ IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+ IOV_WLPKTDLYSTAT_SZ,
+#endif
+ IOV_CHANGEMTU,
+ IOV_HOSTREORDER_FLOWS,
+ IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+ {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN },
+ {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 },
+ {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 },
+ {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+ {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 },
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 },
+#endif
+ {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
+ {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
+#ifdef WLBTAMP
+ {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0},
+ {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
+#endif
+#ifdef PROP_TXSTATUS
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 },
+ /*
+ set the proptxtstatus operation mode:
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
+#endif
+ {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+ {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+ {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+ {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
+ (WLHOST_REORDERDATA_MAXFLOWS + 1) },
+ {NULL, 0, 0, 0, 0 }
+};
+
+void
+dhd_common_init(osl_t *osh)
+{
+#ifdef CONFIG_BCMDHD_FW_PATH
+ bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_FW_PATH */
+ fw_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+#else /* CONFIG_BCMDHD_NVRAM_PATH */
+ nv_path[0] = '\0';
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+#ifdef SOFTAP
+ fw_path2[0] = '\0';
+#endif
+}
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ struct bcmstrbuf b;
+ struct bcmstrbuf *strbuf = &b;
+
+ bcm_binit(strbuf, buf, buflen);
+
+ /* Base DHD info */
+ bcm_bprintf(strbuf, "%s\n", dhd_version);
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ dhdp->up, dhdp->txoff, dhdp->busstate);
+ bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+ dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+ dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+ bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
+
+ bcm_bprintf(strbuf, "dongle stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
+ dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+ dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
+ dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+ dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+ bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
+
+ bcm_bprintf(strbuf, "bus stats:\n");
+ bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+ dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+ dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+ bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
+ dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+ bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n",
+ dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+ bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n",
+ dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any prot info */
+ dhd_prot_dump(dhdp, strbuf);
+ bcm_bprintf(strbuf, "\n");
+
+ /* Add any bus info */
+ dhd_bus_dump(dhdp, strbuf);
+
+ return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+{
+ wl_ioctl_t ioc;
+
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+}
+
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+{
+ int ret;
+
+ dhd_os_proto_block(dhd_pub);
+
+ ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
+ if (!ret)
+ dhd_os_check_hang(dhd_pub, ifindex, ret);
+
+ dhd_os_proto_unblock(dhd_pub);
+ return ret;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_VERSION):
+ /* Need to have checked buffer length */
+ bcm_strncpy_s((char*)arg, len, dhd_version, len);
+ break;
+
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)dhd_msg_level;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ dhd_msg_level = int_val;
+#ifdef WL_CFG80211
+ /* Enable DHD and WL logs in oneshot */
+ if (dhd_msg_level & DHD_WL_VAL)
+ wl_cfg80211_enable_trace(dhd_msg_level);
+#endif
+ break;
+ case IOV_GVAL(IOV_BCMERRORSTR):
+ bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+ ((char *)arg)[BCME_STRLEN - 1] = 0x00;
+ break;
+
+ case IOV_GVAL(IOV_BCMERROR):
+ int_val = (int32)dhd_pub->bcmerror;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (int32)dhd_watchdog_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WDTICK):
+ if (!dhd_pub->up) {
+ bcmerror = BCME_NOTUP;
+ break;
+ }
+ dhd_os_wd_timer(dhd_pub, (uint)int_val);
+ break;
+
+ case IOV_GVAL(IOV_DUMP):
+ bcmerror = dhd_dump(dhd_pub, arg, len);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (int32)dhd_console_ms;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ dhd_console_ms = (uint)int_val;
+ break;
+
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0)
+ bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+ break;
+#endif /* DHD_DEBUG */
+
+ case IOV_SVAL(IOV_CLEARCOUNTS):
+ dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+ dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+ dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+ dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->rx_dropped = 0;
+ dhd_pub->rx_readahead_cnt = 0;
+ dhd_pub->tx_realloc = 0;
+ dhd_pub->wd_dpc_sched = 0;
+ memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+ dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+ /* clear proptxstatus related counters */
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc_hanger_t* hanger;
+
+ memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+ hanger->pushed = 0;
+ hanger->popped = 0;
+ hanger->failed_slotfind = 0;
+ hanger->failed_to_pop = 0;
+ hanger->failed_to_push = 0;
+ }
+#endif /* PROP_TXSTATUS */
+ break;
+
+
+ case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+ int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+ break;
+ }
+
+#ifdef WLBTAMP
+ case IOV_SVAL(IOV_HCI_CMD): {
+ amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
+
+ /* sanity check: command preamble present */
+ if (len < HCI_CMD_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: command parameters are present */
+ if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_docmd(dhd_pub, cmd, len);
+ break;
+ }
+
+ case IOV_SVAL(IOV_HCI_ACL_DATA): {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
+
+ /* sanity check: HCI header present */
+ if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
+ return BCME_BUFTOOSHORT;
+
+ /* sanity check: ACL data is present */
+ if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
+ return BCME_BUFTOOSHORT;
+
+ dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
+ break;
+ }
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+ case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
+ int_val = dhd_pub->wlfc_enabled? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
+ dhd_pub->wlfc_enabled = int_val? 1 : 0;
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+ if (dhd_pub->wlfc_state) {
+ athost_wl_status_info_t *wlfc =
+ (athost_wl_status_info_t*)dhd_pub->wlfc_state;
+ wlfc->proptxstatus_mode = int_val & 0xff;
+ }
+ break;
+#endif /* PROP_TXSTATUS */
+
+ case IOV_GVAL(IOV_BUS_TYPE):
+ /* The dhd application queries the driver to check if its usb or sdio. */
+#ifdef BCMDHDUSB
+ int_val = BUS_TYPE_USB;
+#endif
+ int_val = BUS_TYPE_SDIO;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+ int_val = dhd_pub->htsfdlystat_sz;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+ dhd_pub->htsfdlystat_sz = int_val & 0xff;
+ printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+ break;
+#endif
+ case IOV_SVAL(IOV_CHANGEMTU):
+ int_val &= 0xffff;
+ bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+ break;
+
+ case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+ {
+ uint i = 0;
+ uint8 *ptr = (uint8 *)arg;
+ uint8 count = 0;
+
+ ptr++;
+ for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+ if (dhd_pub->reorder_bufs[i] != NULL) {
+ *ptr = dhd_pub->reorder_bufs[i]->flow_id;
+ ptr++;
+ count++;
+ }
+ }
+ ptr = (uint8 *)arg;
+ *ptr = count;
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+ return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+ /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+ * because an encryption/rsn mismatch results in both events, and
+ * the important information is in the WLC_E_PRUNE.
+ */
+ if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+ dhd_conn_event == WLC_E_PRUNE)) {
+ dhd_conn_event = event;
+ dhd_conn_status = status;
+ dhd_conn_reason = reason;
+ }
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+ void *p;
+ int eprec = -1; /* precedence to evict from */
+ bool discard_oldest;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+ pktq_penq(q, prec, pkt);
+ return TRUE;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(q, prec))
+ eprec = prec;
+ else if (pktq_full(q)) {
+ p = pktq_peek_tail(q, &eprec);
+ ASSERT(p);
+ if (eprec > prec || eprec < 0)
+ return FALSE;
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktq_pempty(q, eprec));
+ discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ if (eprec == prec && !discard_oldest)
+ return FALSE; /* refuse newer (incoming) packet */
+ /* Evict packet according to discard policy */
+ p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+ ASSERT(p);
+
+ PKTFREE(dhdp->osh, p, TRUE);
+ }
+
+ /* Enqueue */
+ p = pktq_penq(q, prec, pkt);
+ ASSERT(p);
+
+ return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+ int val_size;
+ const bcm_iovar_t *vi = NULL;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+ bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+ int bcmerror = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!buf) {
+ return BCME_BADARG;
+ }
+
+ switch (ioc->cmd) {
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
+
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = -BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
+
+ case DHD_GET_VAR:
+ case DHD_SET_VAR: {
+ char *arg;
+ uint arglen;
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+ ;
+
+ if (*arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ /* account for the NUL terminator */
+ arg++, arglen--;
+
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ if (bcmerror != BCME_UNSUPPORTED)
+ break;
+
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+
+ break;
+ }
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+ uint i, status, reason;
+ bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ const char *auth_str;
+ const char *event_name;
+ uchar *buf;
+ char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+ uint event_type, flags, auth_type, datalen;
+
+ event_type = ntoh32(event->event_type);
+ flags = ntoh16(event->flags);
+ status = ntoh32(event->status);
+ reason = ntoh32(event->reason);
+ BCM_REFERENCE(reason);
+ auth_type = ntoh32(event->auth_type);
+ datalen = ntoh32(event->datalen);
+
+ /* debug dump of event messages */
+ sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (uchar)event->addr.octet[0]&0xff,
+ (uchar)event->addr.octet[1]&0xff,
+ (uchar)event->addr.octet[2]&0xff,
+ (uchar)event->addr.octet[3]&0xff,
+ (uchar)event->addr.octet[4]&0xff,
+ (uchar)event->addr.octet[5]&0xff);
+
+ event_name = "UNKNOWN";
+ for (i = 0; i < (uint)bcmevent_names_size; i++)
+ if (bcmevent_names[i].event == event_type)
+ event_name = bcmevent_names[i].name;
+
+ if (flags & WLC_EVENT_MSG_LINK)
+ link = TRUE;
+ if (flags & WLC_EVENT_MSG_GROUP)
+ group = TRUE;
+ if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ flush_txq = TRUE;
+
+ switch (event_type) {
+ case WLC_E_START:
+ case WLC_E_DEAUTH:
+ case WLC_E_DISASSOC:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_ASSOC:
+ case WLC_E_REASSOC:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+ event_name, eabuf, (int)status));
+ }
+ break;
+
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+ break;
+
+ case WLC_E_AUTH:
+ case WLC_E_AUTH_IND:
+ if (auth_type == DOT11_OPEN_SYSTEM)
+ auth_str = "Open System";
+ else if (auth_type == DOT11_SHARED_KEY)
+ auth_str = "Shared Key";
+ else {
+ sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
+ auth_str = err_msg;
+ }
+ if (event_type == WLC_E_AUTH_IND) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
+ }
+ BCM_REFERENCE(auth_str);
+
+ break;
+
+ case WLC_E_JOIN:
+ case WLC_E_ROAM:
+ case WLC_E_SET_SSID:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+ } else if (status == WLC_E_STATUS_NO_NETWORKS) {
+ DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
+ }
+ break;
+
+ case WLC_E_BEACON_RX:
+ if (status == WLC_E_STATUS_SUCCESS) {
+ DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == WLC_E_STATUS_FAIL) {
+ DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ } else {
+ DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+ }
+ break;
+
+ case WLC_E_LINK:
+ DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+ BCM_REFERENCE(link);
+ break;
+
+ case WLC_E_MIC_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
+ BCM_REFERENCE(group);
+ BCM_REFERENCE(flush_txq);
+ break;
+
+ case WLC_E_ICV_ERROR:
+ case WLC_E_UNICAST_DECODE_ERROR:
+ case WLC_E_MULTICAST_DECODE_ERROR:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+ event_name, eabuf));
+ break;
+
+ case WLC_E_TXFAIL:
+ DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ break;
+
+ case WLC_E_SCAN_COMPLETE:
+ case WLC_E_ASSOC_REQ_IE:
+ case WLC_E_ASSOC_RESP_IE:
+ case WLC_E_PMKID_CACHE:
+ DHD_EVENT(("MACEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_NET_LOST:
+ case WLC_E_PFN_SCAN_COMPLETE:
+ case WLC_E_PFN_SCAN_NONE:
+ case WLC_E_PFN_SCAN_ALLGONE:
+ DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ break;
+
+ case WLC_E_PSK_SUP:
+ case WLC_E_PRUNE:
+ DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ event_name, (int)status, (int)reason));
+ break;
+
+#ifdef WIFI_ACT_FRAME
+ case WLC_E_ACTION_FRAME:
+ DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+ break;
+#endif /* WIFI_ACT_FRAME */
+
+ case WLC_E_TRACE: {
+ static uint32 seqnum_prev = 0;
+ msgtrace_hdr_t hdr;
+ uint32 nblost;
+ char *s, *p;
+
+ buf = (uchar *) event_data;
+ memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ printf("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version);
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+ /* There are 2 bytes available at the end of data */
+ buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+ if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+ printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+ }
+
+ nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0) {
+ printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost);
+ }
+ seqnum_prev = ntoh32(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ while ((s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ printf("%s\n", p);
+ p = s+1;
+ }
+ printf("%s\n", p);
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ break;
+ }
+
+
+ case WLC_E_RSSI:
+ DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+ break;
+
+ default:
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+ event_name, event_type, eabuf, (int)status, (int)reason,
+ (int)auth_type));
+ break;
+ }
+
+ /* show any appended data */
+ if (datalen) {
+ buf = (uchar *) event_data;
+ DHD_EVENT((" data (%d) : ", datalen));
+ for (i = 0; i < datalen; i++)
+ DHD_EVENT((" 0x%02x ", *buf++));
+ DHD_EVENT(("\n"));
+ }
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data_ptr)
+{
+ /* check whether packet is a BRCM event pkt */
+ bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+ uint8 *event_data;
+ uint32 type, status, datalen;
+ uint16 flags;
+ int evlen;
+
+ if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+ DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+ if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+ DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+ return (BCME_ERROR);
+ }
+
+ *data_ptr = &pvt_data[1];
+ event_data = *data_ptr;
+
+ /* memcpy since BRCM event pkt may be unaligned. */
+ memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+ type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ datalen = ntoh32_ua((void *)&event->datalen);
+ evlen = datalen + sizeof(bcm_event_t);
+
+ switch (type) {
+#ifdef PROP_TXSTATUS
+ case WLC_E_FIFO_CREDIT_MAP:
+ dhd_wlfc_event(dhd_pub->info);
+ dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
+ WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+ "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+ event_data[2],
+ event_data[3], event_data[4], event_data[5]));
+ break;
+#endif
+
+ case WLC_E_IF:
+ {
+ dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+#ifdef PROP_TXSTATUS
+ {
+ uint8* ea = pvt_data->eth.ether_dhost;
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+ "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ ifevent->ifidx,
+ ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
+ ((ifevent->is_AP == 0) ? "STA":"AP "),
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+ (void)ea;
+ if (ifevent->action == WLC_E_IF_CHANGE)
+ dhd_wlfc_interface_event(dhd_pub->info,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ ifevent->ifidx, ifevent->is_AP, ea);
+ else
+ dhd_wlfc_interface_event(dhd_pub->info,
+ ((ifevent->action == WLC_E_IF_ADD) ?
+ eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+ ifevent->ifidx, ifevent->is_AP, ea);
+
+
+ /* dhd already has created an interface by default, for 0 */
+ if (ifevent->ifidx == 0)
+ break;
+ }
+#endif /* PROP_TXSTATUS */
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange()) {
+ DHD_ERROR(("%s: ifidx %d for %s action %d\n",
+ __FUNCTION__, ifevent->ifidx,
+ event->ifname, ifevent->action));
+ if (ifevent->action == WLC_E_IF_ADD)
+ wl_cfg80211_notify_ifchange();
+ return (BCME_OK);
+ }
+#endif /* WL_CFG80211 */
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+ if (ifevent->action == WLC_E_IF_ADD) {
+ if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
+ NULL, event->ifname,
+ event->addr.octet,
+ ifevent->flags, ifevent->bssidx)) {
+ DHD_ERROR(("%s: dhd_add_if failed!!"
+ " ifidx: %d for %s\n",
+ __FUNCTION__,
+ ifevent->ifidx,
+ event->ifname));
+ return (BCME_ERROR);
+ }
+ }
+ else if (ifevent->action == WLC_E_IF_DEL)
+ dhd_del_if(dhd_pub->info, ifevent->ifidx);
+ } else {
+#ifndef PROP_TXSTATUS
+ DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+ }
+ }
+ /* send up the if event: btamp user needs it */
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case WLC_E_HTSFSYNC:
+ htsf_update(dhd_pub->info, event_data);
+ break;
+#endif /* WLMEDIA_HTSF */
+#if defined(NDIS630)
+ case WLC_E_NDIS_LINK:
+ break;
+#else /* defined(NDIS630) && defined(BCMDONGLEHOST) */
+ case WLC_E_NDIS_LINK: {
+ uint32 temp = hton32(WLC_E_LINK);
+
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+#endif
+ /* These are what external supplicant/authenticator wants */
+ /* fall through */
+ case WLC_E_LINK:
+ case WLC_E_DEAUTH:
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC:
+ case WLC_E_DISASSOC_IND:
+ DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ /* fall through */
+ default:
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ /* push up to external supp/auth */
+ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+ DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
+ BCM_REFERENCE(flags);
+ BCM_REFERENCE(status);
+
+ /* put it back to WLC_E_NDIS_LINK */
+ if (type == WLC_E_NDIS_LINK) {
+ uint32 temp;
+
+ temp = ntoh32_ua((void *)&event->event_type);
+ DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+
+ temp = ntoh32(WLC_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->event.event_type), &temp,
+ sizeof(pvt_data->event.event_type));
+ }
+ break;
+ }
+
+#ifdef SHOW_EVENTS
+ wl_show_host_event(event, (void *)event_data);
+#endif /* SHOW_EVENTS */
+
+ return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+ int i, j = 0;
+ unsigned char *buf = pbuf;
+
+ if (bytes_per_line == 0) {
+ bytes_per_line = len;
+ }
+
+ for (i = 0; i < len; i++) {
+ printf("%2.2x", *buf++);
+ j++;
+ if (j == bytes_per_line) {
+ printf("\n");
+ j = 0;
+ } else {
+ printf(":");
+ }
+ }
+ printf("\n");
+#endif /* DHD_DEBUG */
+}
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ bcm_strncpy_s(num, sizeof(num), src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+ char *argv[8];
+ int i = 0;
+ const char *str;
+ int buf_len;
+ int str_len;
+ char *arg_save = 0, *arg_org = 0;
+ int rc;
+ char buf[128];
+ wl_pkt_filter_enable_t enable_parm;
+ wl_pkt_filter_enable_t * pkt_filterp;
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ arg_org = arg_save;
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_enable";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, sizeof(buf), str, str_len);
+ buf[str_len] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+ /* Parse enable/disable value. */
+ enable_parm.enable = htod32(enable);
+
+ buf_len += sizeof(enable_parm);
+ memcpy((char *)pkt_filterp,
+ &enable_parm,
+ sizeof(enable_parm));
+
+ /* Enable/disable the specified filter. */
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+ /* Contorl the master mode */
+ bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc;
+ uint32 mask_size;
+ uint32 pattern_size;
+ char *argv[8], * buf = 0;
+ int i = 0;
+ char *arg_save = 0, *arg_org = 0;
+#define BUF_SIZE 2048
+
+ if (!arg)
+ return;
+
+ if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ arg_org = arg_save;
+
+ if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ memcpy(arg_save, arg, strlen(arg) + 1);
+
+ if (strlen(arg) > BUF_SIZE) {
+ DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+ goto fail;
+ }
+
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+ while (argv[i++])
+ argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+ i = 0;
+ if (argv[i] == NULL) {
+ DHD_ERROR(("No args provided\n"));
+ goto fail;
+ }
+
+ str = "pkt_filter_add";
+ str_len = strlen(str);
+ bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+ buf[ str_len ] = '\0';
+ buf_len = str_len + 1;
+
+ pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+ /* Parse packet filter id. */
+ pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Polarity not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter polarity. */
+ pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Filter type not provided\n"));
+ goto fail;
+ }
+
+ /* Parse filter type. */
+ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Offset not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter offset. */
+ pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Bitmask not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter mask. */
+ mask_size =
+ htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+ if (argv[++i] == NULL) {
+ DHD_ERROR(("Pattern not provided\n"));
+ goto fail;
+ }
+
+ /* Parse pattern filter pattern. */
+ pattern_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+ if (mask_size != pattern_size) {
+ DHD_ERROR(("Mask and pattern not the same size\n"));
+ goto fail;
+ }
+
+ pkt_filter.u.pattern.size_bytes = mask_size;
+ buf_len += WL_PKT_FILTER_FIXED_LEN;
+ buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+ /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
+ ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+ ** guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)pkt_filterp,
+ &pkt_filter,
+ WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+ rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+ rc = rc >= 0 ? 0 : rc;
+
+ if (rc)
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __FUNCTION__, arg, rc));
+ else
+ DHD_TRACE(("%s: successfully added pktfilter %s\n",
+ __FUNCTION__, arg));
+
+fail:
+ if (arg_org)
+ MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+ if (buf)
+ MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ __FUNCTION__, arp_mode, retcode));
+ else
+ DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+ char iovbuf[32];
+ int retcode;
+
+ bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ retcode = retcode >= 0 ? 0 : retcode;
+ if (retcode)
+ DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ __FUNCTION__, arp_enable, retcode));
+ else
+ DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ __FUNCTION__, arp_enable));
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ int iov_len = 0;
+ char iovbuf[128];
+
+ if (dhd == NULL) return;
+
+ iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr)
+{
+ int iov_len = 0;
+ char iovbuf[32];
+ int retcode;
+
+ iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+
+ if (retcode)
+ DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
+ else
+ DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+ __FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
+{
+ int retcode, i;
+ int iov_len;
+ uint32 *ptr32 = buf;
+ bool clr_bottom = FALSE;
+
+ if (!buf)
+ return -1;
+
+ iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+ BCM_REFERENCE(iov_len);
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, 0);
+
+ if (retcode) {
+ DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, retcode));
+
+ return -1;
+ }
+
+ /* clean up the buf, ascii reminder */
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (!clr_bottom) {
+ if (*ptr32 == 0)
+ clr_bottom = TRUE;
+ } else {
+ *ptr32 = 0;
+ }
+ ptr32++;
+ }
+
+ return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+ case WLC_E_BTA_HCI_EVENT:
+ break;
+#endif /* WLBTAMP */
+ default:
+ break;
+ }
+
+ /* Call per-port handler. */
+ dhd_sendup_event(dhdp, event, data);
+}
+
+#ifdef SIMPLE_ISCAN
+
+uint iscan_thread_id = 0;
+iscan_buf_t * iscan_chain = 0;
+
+iscan_buf_t *
+dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
+{
+ iscan_buf_t *iscanbuf_alloc = 0;
+ iscan_buf_t *iscanbuf_head;
+
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+ dhd_iscan_lock();
+
+ iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
+ if (iscanbuf_alloc == NULL)
+ goto fail;
+
+ iscanbuf_alloc->next = NULL;
+ iscanbuf_head = *iscanbuf;
+
+ DHD_ISCAN(("%s: addr of allocated node = 0x%X"
+ "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
+ __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
+
+ if (iscanbuf_head == NULL) {
+ *iscanbuf = iscanbuf_alloc;
+ DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
+ goto fail;
+ }
+
+ while (iscanbuf_head->next)
+ iscanbuf_head = iscanbuf_head->next;
+
+ iscanbuf_head->next = iscanbuf_alloc;
+
+fail:
+ dhd_iscan_unlock();
+ return iscanbuf_alloc;
+}
+
+void
+dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
+{
+ iscan_buf_t *iscanbuf_free = 0;
+ iscan_buf_t *iscanbuf_prv = 0;
+ iscan_buf_t *iscanbuf_cur;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
+
+ dhd_iscan_lock();
+
+ iscanbuf_cur = iscan_chain;
+
+ /* If iscan_delete is null then delete the entire
+ * chain or else delete specific one provided
+ */
+ if (!iscan_delete) {
+ while (iscanbuf_cur) {
+ iscanbuf_free = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ iscanbuf_free->next = 0;
+ MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
+ }
+ iscan_chain = 0;
+ } else {
+ while (iscanbuf_cur) {
+ if (iscanbuf_cur == iscan_delete)
+ break;
+ iscanbuf_prv = iscanbuf_cur;
+ iscanbuf_cur = iscanbuf_cur->next;
+ }
+ if (iscanbuf_prv)
+ iscanbuf_prv->next = iscan_delete->next;
+
+ iscan_delete->next = 0;
+ MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
+
+ if (!iscanbuf_prv)
+ iscan_chain = 0;
+ }
+ dhd_iscan_unlock();
+}
+
+iscan_buf_t *
+dhd_iscan_result_buf(void)
+{
+ return iscan_chain;
+}
+
+int
+dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
+{
+ int rc = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ char *buf;
+ char iovar[] = "iscan";
+ uint32 allocSize = 0;
+ wl_ioctl_t ioctl;
+
+ if (pParams) {
+ allocSize = (size + strlen(iovar) + 1);
+ if ((allocSize < size) || (allocSize < strlen(iovar)))
+ {
+ DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
+ __FUNCTION__, allocSize, size, strlen(iovar)));
+ goto cleanUp;
+ }
+ buf = MALLOC(dhd->osh, allocSize);
+
+ if (buf == NULL)
+ {
+ DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
+ goto cleanUp;
+ }
+ ioctl.cmd = WLC_SET_VAR;
+ bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize);
+ }
+
+cleanUp:
+ if (buf) {
+ MFREE(dhd->osh, buf, allocSize);
+ }
+
+ return rc;
+}
+
+static int
+dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
+{
+ wl_iscan_results_t *list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ iscan_buf_t *iscan_cur;
+ int status = -1;
+ dhd_pub_t *dhd = dhd_bus_pub(dhdp);
+ int rc;
+ wl_ioctl_t ioctl;
+
+ DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
+
+ iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
+ if (!iscan_cur) {
+ DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
+ dhd_iscan_free_buf(dhdp, 0);
+ dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
+ dhd_ind_scan_confirm(dhdp, FALSE);
+ goto fail;
+ }
+
+ dhd_iscan_lock();
+
+ memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
+ iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+ ioctl.cmd = WLC_GET_VAR;
+ ioctl.set = FALSE;
+ rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
+
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ *scan_count = results->count = dtoh32(results->count);
+ status = dtoh32(list_buf->status);
+ DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
+
+ dhd_iscan_unlock();
+
+ if (!(*scan_count)) {
+ /* TODO: race condition when FLUSH already called */
+ dhd_iscan_free_buf(dhdp, 0);
+ }
+fail:
+ return status;
+}
+
+#endif /* SIMPLE_ISCAN */
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf)
+{
+ char bssid[6], zbuf[6];
+ int ret = -1;
+
+ bzero(bssid, 6);
+ bzero(zbuf, 6);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+ DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTASSOCIATED) {
+ DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+ }
+
+ if (ret < 0)
+ return FALSE;
+
+ if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+ /* STA is assocoated BSSID is non zero */
+
+ if (bss_buf) {
+ /* return bss if caller provided buf */
+ memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+ }
+ return TRUE;
+ } else {
+ DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+ return FALSE;
+ }
+}
+
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_dtim_skip(dhd_pub_t *dhd)
+{
+ int bcn_li_dtim;
+ int ret = -1;
+ int dtim_assoc = 0;
+
+ if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
+ bcn_li_dtim = 3;
+ else
+ bcn_li_dtim = dhd->dtim_skip;
+
+ /* Check if associated */
+ if (dhd_is_associated(dhd, NULL) == FALSE) {
+ DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ /* if assoc grab ap's dtim value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+ &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+ __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
+
+ /* if not assocated just eixt */
+ if (dtim_assoc == 0) {
+ goto exit;
+ }
+
+ /* check if sta listen interval fits into AP dtim */
+ if (dtim_assoc > LISTEN_INTERVAL) {
+ /* AP DTIM to big for our Listen Interval : no dtim skiping */
+ bcn_li_dtim = 1;
+ DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+ __FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
+ goto exit;
+ }
+
+ if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
+ /* Round up dtim_skip to fit into STAs Listen Interval */
+ bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
+ DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+ }
+
+exit:
+ return bcn_li_dtim;
+}
+
+/* Check if HostAPD or WFD mode setup */
+bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd)
+{
+#ifdef WL_CFG80211
+ if (((dhd->op_mode & HOSTAPD_MASK) == HOSTAPD_MASK) ||
+ ((dhd->op_mode & WFD_MASK) == WFD_MASK))
+ return TRUE;
+ else
+#endif /* WL_CFG80211 */
+ return FALSE;
+}
+
+#if defined(PNO_SUPPORT)
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+ char iovbuf[128];
+ int pfn_enabled = 0;
+ int iov_len = 0;
+ int ret;
+
+ /* Disable pfn */
+ iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) {
+ /* clear pfn */
+ iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (iov_len) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ iov_len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+ }
+ }
+ else {
+ ret = -1;
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
+ }
+ }
+ else
+ DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
+{
+ char iovbuf[128];
+ int ret = -1;
+
+ if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ return ret;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (ret);
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+
+ if ((pfn_enabled) && (dhd_is_associated(dhd, NULL) == TRUE)) {
+ DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__));
+ return ret;
+ }
+
+ /* Enable/disable PNO */
+ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ dhd->pno_enable = pfn_enabled;
+ DHD_TRACE(("%s set pno as %s\n",
+ __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable"));
+ }
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
+
+ return ret;
+}
+
+/* Function to execute combined scan */
+int
+dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr,
+ int pno_repeat, int pno_freq_expo_max)
+{
+ int err = -1;
+ char iovbuf[128];
+ int k, i;
+ wl_pfn_param_t pfn_param;
+ wl_pfn_t pfn_element;
+ uint len = 0;
+
+ DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
+
+ if ((!dhd) && (!ssids_local)) {
+ DHD_ERROR(("%s error exit\n", __FUNCTION__));
+ err = -1;
+ return err;
+ }
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (err);
+
+ /* Check for broadcast ssid */
+ for (k = 0; k < nssid; k++) {
+ if (!ssids_local[k].SSID_len) {
+ DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
+ return err;
+ }
+ }
+/* #define PNO_DUMP 1 */
+#ifdef PNO_DUMP
+ {
+ int j;
+ for (j = 0; j < nssid; j++) {
+ DHD_ERROR(("%d: scan for %s size =%d\n", j,
+ ssids_local[j].SSID, ssids_local[j].SSID_len));
+ }
+ }
+#endif /* PNO_DUMP */
+
+ /* clean up everything */
+ if ((err = dhd_pno_clean(dhd)) < 0) {
+ DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
+ return err;
+ }
+ memset(iovbuf, 0, sizeof(iovbuf));
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ memset(&pfn_element, 0, sizeof(pfn_element));
+
+ /* set pfn parameters */
+ pfn_param.version = htod32(PFN_VERSION);
+ pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
+
+ /* check and set extra pno params */
+ if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
+ pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = (uchar) (pno_repeat);
+ pfn_param.exp = (uchar) (pno_freq_expo_max);
+ }
+ /* set up pno scan fr */
+ if (scan_fr != 0)
+ pfn_param.scan_freq = htod32(scan_fr);
+
+ if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
+ DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
+ return err;
+ }
+ if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
+ DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ return err;
+ }
+
+ len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
+ if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s pfn_set failed for error=%d\n",
+ __FUNCTION__, err));
+ return err;
+ }
+
+ /* set all pfn ssid */
+ for (i = 0; i < nssid; i++) {
+
+ pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+ pfn_element.auth = (DOT11_OPEN_SYSTEM);
+ pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+ pfn_element.wsec = htod32(0);
+ pfn_element.infra = htod32(1);
+ pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
+ pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
+
+ if ((len =
+ bcm_mkiovar("pfn_add", (char *)&pfn_element,
+ sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
+ if ((err =
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
+ DHD_ERROR(("%s failed for i=%d error=%d\n",
+ __FUNCTION__, i, err));
+ return err;
+ }
+ else
+ DHD_TRACE(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n",
+ __FUNCTION__, pfn_param.scan_freq,
+ pfn_param.repeat, pfn_param.exp));
+ }
+ else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
+ }
+
+ /* Enable PNO */
+ /* dhd_pno_enable(dhd, 1); */
+ return err;
+}
+
+int
+dhd_pno_get_status(dhd_pub_t *dhd)
+{
+ int ret = -1;
+
+ if (!dhd)
+ return ret;
+ else
+ return (dhd->pno_enable);
+}
+
+#endif /* OEM_ANDROID && PNO_SUPPORT */
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+ char buf[256];
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int buf_len;
+ int str_len;
+ int res = -1;
+
+ if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
+ return (res);
+
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[ str_len ] = '\0';
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+ mkeep_alive_pkt.period_msec = KEEP_ALIVE_PERIOD;
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+ res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+ return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ * data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+ int input_size, int *bytes_left)
+{
+ char* str = *list_str;
+ uint16 short_temp;
+ uint32 int_temp;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ /* Clean all dest bytes */
+ memset(dst, 0, dst_size);
+ while (*bytes_left > 0) {
+
+ if (str[0] != token) {
+ DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+ __FUNCTION__, token, str[0], *bytes_left));
+ return -1;
+ }
+
+ *bytes_left -= 1;
+ str += 1;
+
+ if (input_size == 1) {
+ memcpy(dst, str, input_size);
+ }
+ else if (input_size == 2) {
+ memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+ input_size);
+ }
+ else if (input_size == 4) {
+ memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+ input_size);
+ }
+
+ *bytes_left -= input_size;
+ str += input_size;
+ *list_str = str;
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+ int channel_num, int *bytes_left)
+{
+ char* str = *list_str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+ *list_str = str;
+ DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+ /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* All channels */
+ channel_list[idx] = 0x0;
+ }
+ else {
+ channel_list[idx] = (uint16)str[0];
+ DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
+ }
+ *bytes_left -= 1;
+ str += 1;
+
+ if (idx++ > 255) {
+ DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/*
+ * SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+ char* str;
+ int idx = 0;
+
+ if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+ DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+ return -1;
+ }
+ str = *list_str;
+ while (*bytes_left > 0) {
+
+ if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+ *list_str = str;
+ DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+ return idx;
+ }
+
+ /* Get proper CSCAN_TLV_TYPE_SSID_IE */
+ *bytes_left -= 1;
+ str += 1;
+
+ if (str[0] == 0) {
+ /* Broadcast SSID */
+ ssid[idx].SSID_len = 0;
+ memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+ *bytes_left -= 1;
+ str += 1;
+
+ DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
+ }
+ else if (str[0] <= DOT11_MAX_SSID_LEN) {
+ /* Get proper SSID size */
+ ssid[idx].SSID_len = str[0];
+ *bytes_left -= 1;
+ str += 1;
+
+ /* Get SSID */
+ if (ssid[idx].SSID_len > *bytes_left) {
+ DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+ __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+ return -1;
+ }
+
+ memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+ *bytes_left -= ssid[idx].SSID_len;
+ str += ssid[idx].SSID_len;
+
+ DHD_TRACE(("%s :size=%d left=%d\n",
+ (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+ }
+ else {
+ DHD_ERROR(("### SSID size more that %d\n", str[0]));
+ return -1;
+ }
+
+ if (idx++ > max) {
+ DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+ return -1;
+ }
+ }
+
+ *list_str = str;
+ return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx. Max specifies size of the ssid array. Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied. Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+ char* str, *ptr;
+
+ if ((list_str == NULL) || (*list_str == NULL))
+ return -1;
+
+ for (str = *list_str; str != NULL; str = ptr) {
+
+ /* check for next TAG */
+ if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+ *list_str = str + strlen(GET_CHANNEL);
+ return idx;
+ }
+
+ if ((ptr = strchr(str, ',')) != NULL) {
+ *ptr++ = '\0';
+ }
+
+ if (strlen(str) > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+ return -1;
+ }
+
+ if (strlen(str) == 0)
+ ssid[idx].SSID_len = 0;
+
+ if (idx < max) {
+ bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str);
+ ssid[idx].SSID_len = strlen(str);
+ }
+ idx++;
+ }
+ return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+ int num;
+ int val;
+ char* str;
+ char* endptr = NULL;
+
+ if ((list_str == NULL)||(*list_str == NULL))
+ return -1;
+
+ str = *list_str;
+ num = 0;
+ while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+ val = (int)strtoul(str, &endptr, 0);
+ if (endptr == str) {
+ printf("could not parse channel number starting at"
+ " substring \"%s\" in list:\n%s\n",
+ str, *list_str);
+ return -1;
+ }
+ str = endptr + strspn(endptr, " ,");
+
+ if (num == channel_num) {
+ DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+ channel_num, *list_str));
+ return -1;
+ }
+
+ channel_list[num++] = (uint16)val;
+ }
+ *list_str = str;
+ return num;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
new file mode 100644
index 00000000000000..9a9d182e6b8e03
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c
@@ -0,0 +1,293 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c 291086 2011-10-21 01:17:24Z $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern void bcm_wlan_power_off(int);
+extern void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+#if defined(CUSTOMER_HW2)
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#else
+int wifi_set_power(int on, unsigned long msec) { return -1; }
+int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; }
+int wifi_get_mac_addr(unsigned char *buf) { return -1; }
+void *wifi_get_country_code(char *ccode) { return NULL; }
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#endif /* CUSTOMER_HW2 */
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC) */
+
+#ifdef CUSTOMER_HW3
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ * 1) return : Host gpio interrupt number per customer platform
+ * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ * NOTE :
+ * Customer should check his platform definitions
+ * and his Host Interrupt spec
+ * to figure out the proper setting for his platform.
+ * Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+{
+ int host_oob_irq = 0;
+
+#ifdef CUSTOMER_HW2
+ host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
+
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif /* CUSTOMER_HW2 */
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW
+ host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
+#elif defined CUSTOMER_HW3
+ gpio_request(dhd_oob_gpio_num, "oob irq");
+ host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+ gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW */
+#endif /* CUSTOMER_HW2 */
+
+ return (host_oob_irq);
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Customer function to control hw specific wlan gpios */
+void
+dhd_customer_gpio_wlan_ctrl(int onoff)
+{
+ switch (onoff) {
+ case WLAN_RESET_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(0, 0);
+#endif
+ WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+ break;
+
+ case WLAN_RESET_ON:
+ WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(2);
+#endif /* CUSTOMER_HW */
+#ifdef CUSTOMER_HW2
+ wifi_set_power(1, 0);
+#endif
+ WL_ERROR(("=========== WLAN going back to live ========\n"));
+ break;
+
+ case WLAN_POWER_OFF:
+ WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_off(1);
+#endif /* CUSTOMER_HW */
+ break;
+
+ case WLAN_POWER_ON:
+ WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+ __FUNCTION__));
+#ifdef CUSTOMER_HW
+ bcm_wlan_power_on(1);
+ /* Lets customer power to get stable */
+ OSL_DELAY(200);
+#endif /* CUSTOMER_HW */
+ break;
+ }
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+ int ret = 0;
+
+ WL_TRACE(("%s Enter\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+
+ /* Customer access to MAC address stored outside of DHD driver */
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ ret = wifi_get_mac_addr(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+ /* EXAMPLE code */
+ {
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+ bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+ }
+#endif /* EXAMPLE_GET_MAC */
+
+ return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+#ifdef EXAMPLE_TABLE
+ {"", "XY", 4}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */
+ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */
+ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */
+ {"AT", "EU", 5},
+ {"BE", "EU", 5},
+ {"BG", "EU", 5},
+ {"CY", "EU", 5},
+ {"CZ", "EU", 5},
+ {"DK", "EU", 5},
+ {"EE", "EU", 5},
+ {"FI", "EU", 5},
+ {"FR", "EU", 5},
+ {"DE", "EU", 5},
+ {"GR", "EU", 5},
+ {"HU", "EU", 5},
+ {"IE", "EU", 5},
+ {"IT", "EU", 5},
+ {"LV", "EU", 5},
+ {"LI", "EU", 5},
+ {"LT", "EU", 5},
+ {"LU", "EU", 5},
+ {"MT", "EU", 5},
+ {"NL", "EU", 5},
+ {"PL", "EU", 5},
+ {"PT", "EU", 5},
+ {"RO", "EU", 5},
+ {"SK", "EU", 5},
+ {"SI", "EU", 5},
+ {"ES", "EU", 5},
+ {"SE", "EU", 5},
+ {"GB", "EU", 5},
+ {"KR", "XY", 3},
+ {"AU", "XY", 3},
+ {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */
+ {"TW", "XY", 3},
+ {"AR", "XY", 3},
+ {"MX", "XY", 3},
+ {"IL", "IL", 0},
+ {"CH", "CH", 0},
+ {"TR", "TR", 0},
+ {"NO", "NO", 0},
+#endif /* EXMAPLE_TABLE */
+};
+
+
+/* Customized Locale convertor
+* input : ISO 3166-1 country abbreviation
+* output: customized cspec
+*/
+void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+ struct cntry_locales_custom *cloc_ptr;
+
+ if (!cspec)
+ return;
+
+ cloc_ptr = wifi_get_country_code(country_iso_code);
+ if (cloc_ptr) {
+ strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = cloc_ptr->custom_locale_rev;
+ }
+ return;
+#else
+ int size, i;
+
+ size = ARRAYSIZE(translate_custom_table);
+
+ if (cspec == 0)
+ return;
+
+ if (size == 0)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+ memcpy(cspec->ccode,
+ translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[i].custom_locale_rev;
+ return;
+ }
+ }
+#ifdef EXAMPLE_TABLE
+ /* if no country code matched return first universal code from translate_custom_table */
+ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ);
+ cspec->rev = translate_custom_table[0].custom_locale_rev;
+#endif /* EXMAPLE_TABLE */
+ return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h
new file mode 100644
index 00000000000000..2b40d2738c9db4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h
@@ -0,0 +1,110 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h 308299 2012-01-14 01:36:58Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+ printf args;} while (0)
+#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+
+#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+
+#define DHD_ERROR_ON() 0
+#define DHD_TRACE_ON() 0
+#define DHD_INFO_ON() 0
+#define DHD_DATA_ON() 0
+#define DHD_CTL_ON() 0
+#define DHD_TIMER_ON() 0
+#define DHD_HDRS_ON() 0
+#define DHD_BYTES_ON() 0
+#define DHD_INTR_ON() 0
+#define DHD_GLOM_ON() 0
+#define DHD_EVENT_ON() 0
+#define DHD_BTA_ON() 0
+#define DHD_ISCAN_ON() 0
+#define DHD_ARPOE_ON() 0
+#define DHD_REORDER_ON() 0
+#endif
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c
new file mode 100644
index 00000000000000..22e95cbcbd432c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux.c
@@ -0,0 +1,5327 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c 324874 2012-03-30 18:29:52Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#ifdef WLBTAMP
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dhd_bta.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200 /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
+#define TSMAX 1000 /* max no. of timing record kept */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+ uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add);
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr);
+
+static struct notifier_block dhd_notifier = {
+ .notifier_call = dhd_device_event
+};
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+ return "";
+}
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(CONFIG_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t g_wl_iw_params;
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
+extern int dhd_get_dtim_skip(dhd_pub_t *dhd);
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ dhd_if_state_t state; /* interface state */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool set_multicast;
+ bool event2cfg80211; /* To determine if pass event to cfg80211 */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+ uint32 low;
+ uint32 high;
+} tsf_t;
+
+typedef struct {
+ uint32 last_cycle;
+ uint32 last_sec;
+ uint32 last_tsf;
+ uint32 coef; /* scaling factor */
+ uint32 coefdec1; /* first decimal */
+ uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif /* WLMEDIA_HTSF */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(CONFIG_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd_pub_t pub;
+
+ /* For supporting multiple interfaces */
+ dhd_if_t *iflist[DHD_MAX_IFS];
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+ htsf_t htsf;
+#endif
+ wait_queue_head_t ioctl_resp_wait;
+ struct timer_list timer;
+ bool wd_timer_valid;
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t dhd_lock;
+#ifdef DHDTHREAD
+ /* Thread based operation */
+ bool threads_only;
+ struct semaphore sdsem;
+
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+#endif /* DHDTHREAD */
+ bool dhd_tasklet_create;
+ tsk_ctl_t thr_sysioc_ctl;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+#endif
+ spinlock_t wakelock_spinlock;
+ int wakelock_counter;
+ int wakelock_timeout_enable;
+
+ /* Thread to issue ioctl for multicast */
+ unsigned char set_macaddress;
+ struct ether_addr macvalue;
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+ void *rpc_th;
+ void *rpc_osh;
+ struct timer_list rpcth_timer;
+ bool rpcth_timer_active;
+ bool fdaggr;
+#endif
+} dhd_info_t;
+
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+int op_mode = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+extern int net_os_send_hang_message(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+struct semaphore dhd_registration_sem;
+struct semaphore dhd_chipup_sem;
+
+#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* Spawn a thread for system ioctls (set mac, set mcast) */
+uint dhd_sysioc = TRUE;
+module_param(dhd_sysioc, uint, 0);
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+
+/* Watchdog interval */
+uint dhd_watchdog_ms = 10;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+uint dhd_arp_mode = 0xb;
+module_param(dhd_arp_mode, uint, 0);
+
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+uint dhd_master_mode = TRUE;
+module_param(dhd_master_mode, uint, 1);
+
+#ifdef DHDTHREAD
+/* Watchdog thread priority, -1 to use kernel timer */
+int dhd_watchdog_prio = 97;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int dhd_dpc_prio = 98;
+module_param(dhd_dpc_prio, int, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+extern int dhd_dongle_memsize;
+module_param(dhd_dongle_memsize, int, 0);
+#endif /* DHDTHREAD */
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#else
+#define DHD_COMPILED
+#endif /* DHD_DEBUG */
+
+static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+#ifdef DHD_DEBUG
+"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
+#endif
+;
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ dhd_mmc_suspend = TRUE;
+ ret = NOTIFY_OK;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ dhd_mmc_suspend = FALSE;
+ ret = NOTIFY_OK;
+ break;
+ }
+ smp_mb();
+#endif
+ return ret;
+}
+
+static struct notifier_block dhd_sleep_pm_notifier = {
+ .notifier_call = dhd_sleep_pm_callback,
+ .priority = 0
+};
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+ DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable) {
+ int i;
+
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
+ }
+#endif
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+ int power_mode = PM_MAX;
+ /* wl_pkt_filter_enable_t enable_parm; */
+ char iovbuf[32];
+ int bcn_li_dtim = 3;
+ uint roamvar = 1;
+
+ DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+ __FUNCTION__, value, dhd->in_suspend));
+
+ if (dhd && dhd->up) {
+ if (value && dhd->in_suspend) {
+
+ /* Kernel suspended */
+ DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* Enable packet filter, only allow unicast packet to send up */
+ dhd_set_packet_filter(1, dhd);
+
+ /* If DTIM skip is set up as default, force it to wake
+ * each third DTIM for better power savings. Note that
+ * one side effect is a chance to miss BC/MC packet.
+ */
+ bcn_li_dtim = dhd_get_dtim_skip(dhd);
+ bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* Disable firmware roaming during suspend */
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ } else {
+
+ /* Kernel resumed */
+ DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
+
+ power_mode = PM_FAST;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+
+ /* disable pkt filter */
+ dhd_set_packet_filter(0, dhd);
+
+ /* restore pre-suspend setting for dtim_skip */
+ bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
+ 4, iovbuf, sizeof(iovbuf));
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ roamvar = dhd_roam_disable;
+ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+ sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
+{
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if ((!dhdp->suspend_disable_flag) && (dhd_check_ap_wfd_mode_set(dhdp) == FALSE))
+ dhd_set_suspend(val, dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 1);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 0);
+}
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+ tmo->limit = usec;
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = 1000000 / HZ;
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = 1;
+ return 0;
+ }
+
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
+
+ /* Add the delay that's about to take place */
+ tmo->elapsed += tmo->increment;
+
+ if (tmo->increment < tmo->tick) {
+ OSL_DELAY(tmo->increment);
+ tmo->increment *= 2;
+ if (tmo->increment > tmo->tick)
+ tmo->increment = tmo->tick;
+ } else {
+ wait_queue_head_t delay_wait;
+ DECLARE_WAITQUEUE(wait, current);
+ int pending;
+ init_waitqueue_head(&delay_wait);
+ add_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ pending = signal_pending(current);
+ remove_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ if (pending)
+ return 1; /* Interrupted */
+ }
+
+ return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
+
+ ASSERT(dhd);
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
+ }
+
+ return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+ struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+ struct dhd_info *dhd_info;
+
+ if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+ return NULL;
+ dhd_info = dhd_pub->info;
+ if (dhd_info && dhd_info->iflist[ifidx])
+ return dhd_info->iflist[ifidx]->net;
+ return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ break;
+
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+ return i; /* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
+ }
+
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+ int i;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+ ASSERT(dhd);
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+ return dhd->iflist[i]->mac_addr;
+
+ return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
+
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+ /* Send down the multicast list first. */
+
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
+ }
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#else
+ for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(allmulti);
+ if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+ return;
+ }
+ allmulti = htol32(allmulti);
+
+ if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+ DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+ MFREE(dhd->pub.osh, buf, buflen);
+ return;
+ }
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+ allmulti = htol32(allmulti);
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
+}
+
+static int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+{
+ char buf[32];
+ wl_ioctl_t ioc;
+ int ret;
+
+ if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+ DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+ return -1;
+ }
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = 32;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+ } else {
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+ }
+
+ return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_op_if(dhd_if_t *ifp)
+{
+ dhd_info_t *dhd;
+ int ret = 0, err = 0;
+#ifdef SOFTAP
+ unsigned long flags;
+#endif
+
+ if (!ifp || !ifp->info || !ifp->idx)
+ return;
+ ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
+ dhd = ifp->info;
+
+ DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+
+#ifdef WL_CFG80211
+ if (wl_cfg80211_is_progress_ifchange())
+ return;
+
+#endif
+ switch (ifp->state) {
+ case DHD_IF_ADD:
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the WLC_E_IF_DEL event.
+ */
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
+ __FUNCTION__, ifp->net->name));
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ /* Allocate etherdev, including space for private structure */
+ if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ ret = -ENOMEM;
+ }
+ if (ret == 0) {
+ strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
+ ifp->net->name[IFNAMSIZ - 1] = '\0';
+ memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
+ if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx,
+ (void*)dhd_net_attach)) {
+ ifp->state = DHD_IF_NONE;
+ ifp->event2cfg80211 = TRUE;
+ return;
+ }
+#endif
+ if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
+ DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
+ __FUNCTION__, err));
+ ret = -EOPNOTSUPP;
+ } else {
+#if defined(SOFTAP)
+ if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ /* semaphore that the soft AP CODE waits on */
+ flags = dhd_os_spin_lock(&dhd->pub);
+
+ /* save ptr to wl0.1 netdev for use in wl_iw.c */
+ ap_net_dev = ifp->net;
+ /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
+ up(&ap_eth_ctl.sema);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+#endif
+ DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
+ current->pid, ifp->net->name));
+ ifp->state = DHD_IF_NONE;
+ }
+ }
+ break;
+ case DHD_IF_DEL:
+ /* Make sure that we don't enter again here if .. */
+ /* dhd_op_if is called again from some other context */
+ ifp->state = DHD_IF_DELETING;
+ if (ifp->net != NULL) {
+ DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__));
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_notify_ifdel(ifp->net);
+ }
+#endif
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
+ }
+ break;
+ case DHD_IF_DELETING:
+ break;
+ default:
+ DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
+ ASSERT(!ifp->state);
+ break;
+ }
+
+ if (ret < 0) {
+ ifp->set_multicast = FALSE;
+ if (ifp->net) {
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ dhd->iflist[ifp->idx] = NULL;
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ if (ifp->net == ap_net_dev)
+ ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ }
+}
+
+static int
+_dhd_sysioc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+
+ int i;
+#ifdef SOFTAP
+ bool in_ap = FALSE;
+ unsigned long flags;
+#endif
+
+ DAEMONIZE("dhd_sysioc");
+
+ complete(&tsk->completed);
+
+ while (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
+#ifdef SOFTAP
+ flags = dhd_os_spin_lock(&dhd->pub);
+ in_ap = (ap_net_dev != NULL);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+#endif /* SOFTAP */
+ if (dhd->iflist[i] && dhd->iflist[i]->state)
+ dhd_op_if(dhd->iflist[i]);
+
+ if (dhd->iflist[i] == NULL) {
+ DHD_TRACE(("\n\n %s: interface %d just been removed,"
+ "!\n\n", __FUNCTION__, i));
+ continue;
+ }
+#ifdef SOFTAP
+ if (in_ap && dhd->set_macaddress == i+1) {
+ DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
+ "blocked. \n", dhd->iflist[i]->net->name));
+ dhd->set_macaddress = 0;
+ continue;
+ }
+
+ if (in_ap && dhd->iflist[i]->set_multicast) {
+ DHD_TRACE(("attempt to set MULTICAST list for %s"
+ "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
+ dhd->iflist[i]->set_multicast = FALSE;
+ continue;
+ }
+#endif /* SOFTAP */
+ if (dhd->iflist[i]->set_multicast) {
+ dhd->iflist[i]->set_multicast = FALSE;
+ _dhd_set_multicast_list(dhd, i);
+ }
+ if (dhd->set_macaddress == i+1) {
+ dhd->set_macaddress = 0;
+ _dhd_set_mac_address(dhd, i, &dhd->macvalue);
+ }
+ }
+ }
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+ }
+ DHD_TRACE(("%s: stopped\n", __FUNCTION__));
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+ int ret = 0;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+ dhd->set_macaddress = ifidx+1;
+ up(&dhd->thr_sysioc_ctl.sema);
+
+ return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ifidx;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ dhd->iflist[ifidx]->set_multicast = TRUE;
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ spin_lock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+ (void)di;
+ ASSERT(di != NULL);
+ spin_unlock_bh(&di->wlfc_spinlock);
+ return 1;
+}
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
+
+#endif /* PROP_TXSTATUS */
+int
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh = NULL;
+
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ /* free the packet here since the caller won't */
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *)pktdata;
+
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+ atomic_inc(&dhd->pend_8021x_cnt);
+ } else {
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+
+ /* Look into the packet and update the packet priority */
+ if (PKTPRIO(pktbuf) == 0)
+ pktsetprio(pktbuf, FALSE);
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state) {
+ /* store the interface ID */
+ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+ /* store destination MAC in the tag as well */
+ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+ /* decide which FIFO this packet belongs to */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ /* one additional queue index (highest AC + 1) is used for bc/mc queue */
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+ else
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+ } else
+#endif /* PROP_TXSTATUS */
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
+ != WLFC_FCMODE_NONE) {
+ dhd_os_wlfc_block(dhdp);
+ ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)),
+ pktbuf);
+ dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus);
+ if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
+ }
+ dhd_os_wlfc_unblock(dhdp);
+ }
+ else
+ /* non-proptxstatus way */
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* PROP_TXSTATUS */
+
+ return ret;
+}
+
+int
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+ int ret;
+ void *pktbuf;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ int ifidx;
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+ uint8 htsfdlystat_sz = 0;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* Reject if down */
+ if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ netif_stop_queue(net);
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ net_os_send_hang_message(net);
+ }
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ netif_stop_queue(net);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+
+ /* Make sure there's enough room for any header */
+
+ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+ struct sk_buff *skb2;
+
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
+
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
+ }
+#ifdef WLMEDIA_HTSF
+ if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
+
+ if (!ETHER_ISMULTI(eh->ether_dhost) &&
+ (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+ eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+ }
+ }
+#endif
+
+ ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+
+done:
+ if (ret)
+ dhd->pub.dstats.tx_dropped++;
+ else
+ dhd->pub.tx_packets++;
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return 0;
+#else
+ return NETDEV_TX_OK;
+#endif
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdp->txoff = state;
+ ASSERT(dhd);
+
+ if (ifidx == ALL_INTERFACES) {
+ /* Flow control on all active interfaces */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ net = dhd->iflist[i]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+ }
+ else {
+ if (dhd->iflist[ifidx]) {
+ net = dhd->iflist[ifidx]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+}
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void *data, *pnext = NULL;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+ int tout = DHD_PACKET_TIMEOUT_MS;
+
+
+ BCM_REFERENCE(tout);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+#ifdef WLBTAMP
+ struct ether_header *eh;
+ struct dot11_llc_snap_header *lsh;
+#endif
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ /* Dropping packets before registering net device to avoid kernel panic */
+ if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED ||
+ !dhd->pub.up) {
+ DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+ __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#endif
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+
+#ifdef WLBTAMP
+ eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf);
+ lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
+ (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) &&
+ bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ lsh->type == HTON16(BTA_PROT_L2CAP)) {
+ amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
+ ((uint8 *)eh + RFC1042_HDR_LEN);
+ ACL_data = NULL;
+ }
+#endif /* WLBTAMP */
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
+ /* WLFC may send header only packet when
+ there is an urgent message but no packet to
+ piggy-back on
+ */
+ ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+#endif
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ }
+
+ skb->data = eth;
+ skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb->mac_header,
+#else
+ skb->mac.raw,
+#endif
+ &event,
+ &data);
+
+#ifdef WLBTAMP
+ wl_event_to_host_order(&event);
+ if (event.event_type == WLC_E_BTA_HCI_EVENT) {
+ dhd_bta_doevt(dhdp, data, event.datalen);
+ }
+ tout = DHD_EVENT_TIMEOUT_MS;
+#endif /* WLBTAMP */
+ }
+
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp->net)
+ ifp->net->last_rx = jiffies;
+
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+ }
+ }
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(dhdp, tout);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+ /* Linux version has nothing to do */
+ return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+ uint ifidx;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
+#ifdef WLBTAMP
+ uint len;
+#endif
+
+ dhd_prot_hdrpull(dhdp, &ifidx, txp, NULL, NULL);
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
+
+ if (type == ETHER_TYPE_802_1X)
+ atomic_dec(&dhd->pend_8021x_cnt);
+
+#ifdef WLBTAMP
+ /* Crack open the packet and check to see if it is BT HCI ACL data packet.
+ * If yes generate packet completion event.
+ */
+ len = PKTLEN(dhdp->osh, txp);
+
+ /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
+ if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
+ struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
+
+ if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+ ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+
+ dhd_bta_tx_hcidata_complete(dhdp, txp, success);
+ }
+ }
+#endif /* WLBTAMP */
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_if_t *ifp;
+ int ifidx;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+ return NULL;
+ }
+
+ ifp = dhd->iflist[ifidx];
+ ASSERT(dhd && ifp);
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
+ }
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
+ ifp->stats.multicast = dhd->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_watchdog_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_watchdog");
+
+ /* Run until signal received */
+ complete(&tsk->completed);
+
+ while (1)
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ dhd_os_sdlock(&dhd->pub);
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer,
+ jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ }
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void dhd_watchdog(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+ unsigned long flags;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ if (dhd->pub.dongle_reset) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return;
+ }
+
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ up(&dhd->thr_wdt_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ dhd_os_sdlock(&dhd->pub);
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ dhd_os_sdunlock(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHDTHREAD
+static int
+dhd_dpc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, &param);
+ }
+
+ DAEMONIZE("dhd_dpc");
+ /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */
+
+ /* signal: thread has started */
+ complete(&tsk->completed);
+
+ /* Run until signal received */
+ while (1) {
+ if (down_interruptible(&tsk->sema) == 0) {
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ up(&tsk->sema);
+ }
+ else {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ }
+ else
+ break;
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+#endif /* DHDTHREAD */
+
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)data;
+
+ /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+ * down below , wake lock is set,
+ * the tasklet is initialized in dhd_attach()
+ */
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ if (dhd_bus_dpc(dhd->pub.bus))
+ tasklet_schedule(&dhd->tasklet);
+ else
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ DHD_OS_WAKE_LOCK(dhdp);
+#ifdef DHDTHREAD
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ up(&dhd->thr_dpc_ctl.sema);
+ return;
+ }
+#endif /* DHDTHREAD */
+
+ if (dhd->dhd_tasklet_create)
+ tasklet_schedule(&dhd->tasklet);
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "toe_ol");
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int toe, ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = TRUE;
+
+ /* Set toe_ol as requested */
+
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+
+ toe = (toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+ sprintf(info->driver, "wl");
+ sprintf(info->version, "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver)-1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, EPI_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+ if (!dhdp)
+ return FALSE;
+ if ((error == -ETIMEDOUT) || ((dhdp->busstate == DHD_BUS_DOWN) &&
+ (!dhdp->dongle_reset))) {
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+ net_os_send_hang_message(net);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int buflen = 0;
+ void *buf = NULL;
+ uint driver = 0;
+ int ifidx;
+ int ret;
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->pub.hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
+ }
+
+#if defined(CONFIG_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(net, ifr, cmd);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+
+ if (cmd != SIOCDEVPRIVATE) {
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -EOPNOTSUPP;
+ }
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ /* Copy out any buffer passed */
+ if (ioc.buf) {
+ if (ioc.len == 0) {
+ DHD_TRACE(("%s: ioc.len=0, returns BCME_BADARG \n", __FUNCTION__));
+ bcmerror = -BCME_BADARG;
+ goto done;
+ }
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ /* optimization for direct ioctl calls from kernel */
+ /*
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ buf = ioc.buf;
+ } else {
+ */
+ {
+ if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
+ bcmerror = -BCME_NOMEM;
+ goto done;
+ }
+ if (copy_from_user(buf, ioc.buf, buflen)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+ }
+ }
+
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = -BCME_BADADDR;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = -BCME_EPERM;
+ goto done;
+ }
+
+ /* check for local dhd ioctl and handle it */
+ if (driver == DHD_IOCTL_MAGIC) {
+ bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+ if (bcmerror)
+ dhd->pub.bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* send to dongle (must be up, and wl). */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ if (!dhd->pub.iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+
+ /*
+ * Flush the TX queue if required for proper message serialization:
+ * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption and
+ * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+ * prevent disassoc frame being sent before WPS-DONE frame.
+ */
+ if (ioc.cmd == WLC_SET_KEY ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("wsec_key", ioc.buf, 9) == 0) ||
+ (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL &&
+ strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) ||
+ ioc.cmd == WLC_DISASSOC)
+ dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+ if (ioc.buf) {
+ /* short cut wl ioctl calls here */
+ if (strcmp("htsf", ioc.buf) == 0) {
+ dhd_ioctl_htsf_get(dhd, 0);
+ return BCME_OK;
+ }
+
+ if (strcmp("htsflate", ioc.buf) == 0) {
+ if (ioc.set) {
+ memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+ memset(&maxdelayts, 0, sizeof(tstamp_t));
+ maxdelay = 0;
+ tspktcnt = 0;
+ maxdelaypktno = 0;
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ } else {
+ dhd_dump_latency();
+ }
+ return BCME_OK;
+ }
+ if (strcmp("htsfclear", ioc.buf) == 0) {
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ htsf_seqnum = 0;
+ return BCME_OK;
+ }
+ if (strcmp("htsfhis", ioc.buf) == 0) {
+ dhd_dump_htsfhisto(&vi_d1, "H to D");
+ dhd_dump_htsfhisto(&vi_d2, "D to D");
+ dhd_dump_htsfhisto(&vi_d3, "D to H");
+ dhd_dump_htsfhisto(&vi_d4, "H to H");
+ return BCME_OK;
+ }
+ if (strcmp("tsport", ioc.buf) == 0) {
+ if (ioc.set) {
+ memcpy(&tsport, ioc.buf + 7, 4);
+ } else {
+ DHD_ERROR(("current timestamp port: %d \n", tsport));
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ if ((ioc.cmd == WLC_SET_VAR || ioc.cmd == WLC_GET_VAR) &&
+ ioc.buf != NULL && strncmp("rpc_", ioc.buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+ bcmerror = dhd_fdaggr_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+#else
+ bcmerror = BCME_UNSUPPORTED;
+#endif
+ goto done;
+ }
+ bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+
+done:
+ dhd_check_hang(net, &dhd->pub, bcmerror);
+
+ if (!bcmerror && buf && ioc.buf) {
+ if (copy_to_user(ioc.buf, buf, buflen))
+ bcmerror = -EFAULT;
+ }
+
+ if (buf)
+ MFREE(dhd->pub.osh, buf, buflen);
+
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+#ifdef WL_CFG80211
+static int
+dhd_cleanup_virt_ifaces(dhd_info_t *dhd)
+{
+ int i = 1; /* Leave ifidx 0 [Primary Interface] */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int rollback_lock = FALSE;
+#endif
+
+ DHD_TRACE(("%s: Enter \n", __func__));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /* release lock for unregister_netdev */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = TRUE;
+ }
+#endif
+
+ for (i = 1; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ DHD_TRACE(("Deleting IF: %d \n", i));
+ if ((dhd->iflist[i]->state != DHD_IF_DEL) &&
+ (dhd->iflist[i]->state != DHD_IF_DELETING)) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_net_if_lock_local(dhd);
+ dhd_op_if(dhd->iflist[i]);
+ dhd_net_if_unlock_local(dhd);
+ }
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (rollback_lock)
+ rtnl_lock();
+#endif
+
+ return 0;
+}
+#endif /* WL_CFG80211 */
+
+static int
+dhd_stop(struct net_device *net)
+{
+ int ifidx;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
+ if (dhd->pub.up == 0) {
+ goto exit;
+ }
+ ifidx = dhd_net2idx(dhd, net);
+ BCM_REFERENCE(ifidx);
+
+#ifdef WL_CFG80211
+ if (ifidx == 0) {
+ wl_cfg80211_down(NULL);
+
+ /*
+ * For CFG80211: Clean up all the left over virtual interfaces
+ * when the primary Interface is brought down. [ifconfig wlan0 down]
+ */
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ dhd_cleanup_virt_ifaces(dhd);
+ }
+ }
+#endif
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_cleanup(&dhd->pub);
+#endif
+ /* Set state and stop OS transmissions */
+ dhd->pub.up = 0;
+ netif_stop_queue(net);
+
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+#if defined(WL_CFG80211)
+ if (ifidx == 0 && !dhd_download_fw_on_driverload)
+ wl_android_wifi_off(net);
+#endif
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
+ OLD_MOD_DEC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return 0;
+}
+
+static int
+dhd_open(struct net_device *net)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ uint up = 0;
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+ int ifidx;
+ int32 ret = 0;
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ /* Update FW path if it was changed */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0')) {
+ if (firmware_path[strlen(firmware_path)-1] == '\n')
+ firmware_path[strlen(firmware_path)-1] = '\0';
+ strcpy(fw_path, firmware_path);
+ firmware_path[0] = '\0';
+ }
+
+#if !defined(WL_CFG80211)
+ /*
+ * Force start if ifconfig_up gets called before START command
+ * We keep WEXT's wl_control_wl_start to provide backward compatibility
+ * This should be removed in the future
+ */
+ wl_control_wl_start(net);
+#endif
+
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ if (ifidx < 0) {
+ DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+
+ if (ifidx == 0) {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+ DHD_ERROR(("\n%s\n", dhd_version));
+ if (!dhd_download_fw_on_driverload) {
+ ret = wl_android_wifi_on(net);
+ if (ret != 0) {
+ DHD_ERROR(("wl_android_wifi_on failed (%d)\n", ret));
+ goto exit;
+ }
+ }
+#endif
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+
+ /* try to bring up bus */
+ if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
+ }
+
+ }
+
+ /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+ if (unlikely(wl_cfg80211_up(NULL))) {
+ DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+#endif /* WL_CFG80211 */
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ dhd->pub.up = 1;
+
+ /* Fire a WLC_UP for primary interface to enable RF */
+ if (ifidx == 0)
+ dhd_wl_ioctl_cmd(&dhd->pub, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
+
+#ifdef BCMDBGFS
+ dhd_dbg_init(&dhd->pub);
+#endif
+
+ OLD_MOD_INC_USE_COUNT;
+exit:
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+ dhd_info_t *dhd = NULL;
+
+ if (!net) {
+ DHD_ERROR(("Primary Interface not initialized \n"));
+ return -EINVAL;
+ }
+
+ dhd = *(dhd_info_t **)netdev_priv(net);
+
+ /* If driver is already initialized, do nothing
+ */
+ if (dhd->pub.busstate == DHD_BUS_DATA) {
+ DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+ return 0;
+ }
+
+ if (dhd_open(net) < 0) {
+ DHD_ERROR(("Driver Init Failed \n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+osl_t *
+dhd_osl_attach(void *pdev, uint bustype)
+{
+ return osl_attach(pdev, bustype, TRUE);
+}
+
+void
+dhd_osl_detach(osl_t *osh)
+{
+ if (MALLOCED(osh)) {
+ DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ }
+ osl_detach(osh);
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ up(&dhd_registration_sem);
+#if defined(BCMLXSDMMC)
+ up(&dhd_chipup_sem);
+#endif
+#endif
+}
+
+int
+dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+ uint8 *mac_addr, uint32 flags, uint8 bssidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+
+ ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ netif_stop_queue(ifp->net);
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ }
+ } else
+ if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->event2cfg80211 = FALSE;
+ ifp->info = dhd;
+ dhd->iflist[ifidx] = ifp;
+ strncpy(ifp->name, name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ] = '\0';
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ if (handle == NULL) {
+ ifp->state = DHD_IF_ADD;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+ } else
+ ifp->net = (struct net_device *)handle;
+
+ if (ifidx == 0) {
+ ifp->event2cfg80211 = TRUE;
+ }
+
+ return 0;
+}
+
+void
+dhd_del_if(dhd_info_t *dhd, int ifidx)
+{
+ dhd_if_t *ifp;
+
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
+ ifp = dhd->iflist[ifidx];
+ if (!ifp) {
+ DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
+ return;
+ }
+
+ ifp->state = DHD_IF_DEL;
+ ifp->idx = ifidx;
+ ASSERT(&dhd->thr_sysioc_ctl.thr_pid >= 0);
+ up(&dhd->thr_sysioc_ctl.sema);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net = NULL;
+
+ dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* updates firmware nvram path if it was provided as module parameters */
+ if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
+ strcpy(fw_path, firmware_path);
+ if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
+ strcpy(nv_path, nvram_path);
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(dhd)))) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
+
+ /* Allocate primary dhd_info */
+ if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(dhd, 0, sizeof(dhd_info_t));
+
+#ifdef DHDTHREAD
+ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+ dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#endif /* DHDTHREAD */
+ dhd->dhd_tasklet_create = FALSE;
+ dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
+ dhd->pub.osh = osh;
+
+ /* Link to info module */
+ dhd->pub.info = dhd;
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
+
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strncpy(net->name, iface_name, IFNAMSIZ);
+ net->name[IFNAMSIZ - 1] = 0;
+ len = strlen(net->name);
+ ch = net->name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strcat(net->name, "%d");
+ }
+
+ if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+ goto fail;
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+ spin_lock_init(&dhd->wlfc_spinlock);
+ dhd->pub.wlfc_enabled = TRUE;
+#endif /* PROP_TXSTATUS */
+
+ /* Initialize other structure content */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->ctrl_wait);
+
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->dhd_lock);
+
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wakelock_spinlock);
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ mutex_init(&dhd->dhd_net_if_mutex);
+#endif
+ dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+ /* Attach and link in the cfg80211 */
+ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+ DHD_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
+ }
+
+ dhd_monitor_init(&dhd->pub);
+ dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(CONFIG_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+
+ /* Set up the watchdog timer */
+ init_timer(&dhd->timer);
+ dhd->timer.data = (ulong)dhd;
+ dhd->timer.function = dhd_watchdog;
+
+#ifdef DHDTHREAD
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
+ dhd->threads_only = TRUE;
+ }
+ else {
+ dhd->threads_only = FALSE;
+ }
+
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize watchdog thread */
+ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0);
+ } else {
+ dhd->thr_wdt_ctl.thr_pid = -1;
+ }
+
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0);
+ } else {
+ /* use tasklet for dpc */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->thr_dpc_ctl.thr_pid = -1;
+ }
+#else
+ /* Set up the bottom half handler */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->dhd_tasklet_create = TRUE;
+#endif /* DHDTHREAD */
+
+ if (dhd_sysioc) {
+ PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0);
+ } else {
+ dhd->thr_sysioc_ctl.thr_pid = -1;
+ }
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+ /*
+ * Save the dhd_info into the priv
+ */
+ memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ register_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+ dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ register_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
+ return &dhd->pub;
+
+fail:
+ if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
+ if (net) free_netdev(net);
+ } else {
+ DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+ __FUNCTION__, dhd_state, &dhd->pub));
+ dhd->dhd_state = dhd_state;
+ dhd_detach(&dhd->pub);
+ dhd_free(&dhd->pub);
+ }
+
+ return NULL;
+}
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ unsigned long flags;
+
+ ASSERT(dhd);
+
+ DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+
+
+ /* try to download image and nvram to the dongle */
+ if ((dhd->pub.busstate == DHD_BUS_DOWN) &&
+ (fw_path != NULL) && (fw_path[0] != '\0') &&
+ (nv_path != NULL) && (nv_path[0] != '\0')) {
+ /* wake lock moved to dhdsdio_download_firmware */
+ if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ fw_path, nv_path))) {
+ DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
+ __FUNCTION__, fw_path, nv_path));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -1;
+ }
+ }
+ if (dhd->pub.busstate != DHD_BUS_LOAD) {
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENETDOWN;
+ }
+
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return ret;
+ }
+#if defined(OOB_INTR_ONLY)
+ /* Host registration for OOB interrupt */
+ if (bcmsdh_register_oob_intr(dhdp)) {
+ /* deactivate timer and wait for the handler to finish */
+
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ flags = dhd_os_spin_lock(&dhd->pub);
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ return -ENODEV;
+ }
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+
+#ifdef READ_MACADDR
+ dhd_read_macaddr(dhd);
+#endif
+
+ /* Bus is ready, do any protocol initialization */
+ if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+ return ret;
+
+#ifdef WRITE_MACADDR
+ dhd_write_macaddr(dhd->pub.mac.octet);
+#endif
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+ aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd->pend_ipaddr = 0;
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+ return 0;
+}
+
+#if !defined(AP) && defined(WLP2P)
+/* For Android ICS MR2 release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+static u32
+dhd_concurrent_fw(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char buf[WLC_IOCTL_SMLEN];
+
+ if ((!op_mode) && (strstr(fw_path, "_p2p") == NULL) &&
+ (strstr(fw_path, "_apsta") == NULL)) {
+ /* Given path is for the STA firmware. Check whether P2P support is present in
+ * the firmware. If so, set mode as P2P (concurrent support).
+ */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_TRACE(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+ } else if (buf[0] == 1) {
+ DHD_TRACE(("%s: P2P is supported\n", __FUNCTION__));
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ uint power_mode = PM_OFF; /* PM_FAST */
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = 0;
+ uint bcn_timeout = 4;
+ uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ int arpoe = 1;
+#endif
+ int scan_assoc_time = DHD_SCAN_ACTIVE_TIME;
+ int scan_unassoc_time = 40;
+ int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+ uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+ uint16 chipID;
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+ uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+#endif
+
+#if defined(AP) || defined(WLP2P)
+ uint32 apsta = 1; /* Enable APSTA mode */
+#endif /* defined(AP) || defined(WLP2P) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
+ dhd->op_mode = 0;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ } else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ /* Get the default device MAC address directly from firmware */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ return BCME_NOTUP;
+ }
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef SET_RANDOM_MAC_SOFTAP
+ if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == 0x02)) {
+ uint rand_mac;
+
+ srandom32((uint)jiffies);
+ rand_mac = random32();
+ iovbuf[0] = 0x02; /* locally administered bit */
+ iovbuf[1] = 0x1A;
+ iovbuf[2] = 0x11;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+ bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+ }
+#endif /* SET_RANDOM_MAC_SOFTAP */
+
+ DHD_TRACE(("Firmware = %s\n", fw_path));
+#if !defined(AP) && defined(WLP2P)
+ /* Check if firmware with WFD support used */
+ if ((!op_mode && strstr(fw_path, "_p2p") != NULL) || (op_mode == 0x04) ||
+ (dhd_concurrent_fw(dhd))) {
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s APSTA for WFD failed ret= %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= WFD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ }
+ }
+#endif
+
+#if !defined(AP) && defined(WL_CFG80211)
+ /* Check if firmware with HostAPD support used */
+ if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == 0x02)) {
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
+ } else {
+ dhd->op_mode |= HOSTAPD_MASK;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* (ARP_OFFLOAD_SUPPORT) */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ }
+ }
+#endif
+
+ if ((dhd->op_mode != WFD_MASK) && (dhd->op_mode != HOSTAPD_MASK)) {
+ /* STA only operation mode */
+ dhd->op_mode |= STA_MASK;
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#endif
+ }
+
+ DHD_ERROR(("Firmware up: op_mode=%d, "
+ "Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ dhd->op_mode,
+ dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2],
+ dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5]));
+
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+ sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ }
+
+ /* Set Listen Interval */
+ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+ /* Set PowerSave mode */
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+ /* Match Host and Dongle rx alignment */
+ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ /* disable glom option for some chips */
+ chipID = (uint16)dhd_bus_chip_id(dhd);
+ if ((chipID == BCM4330_CHIP_ID) || (chipID == BCM4329_CHIP_ID)) {
+ DHD_INFO(("%s disable glom for chipID=0x%X\n", __FUNCTION__, chipID));
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ /* Setup assoc_retry_max count to reconnect target AP in dongle */
+ bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif
+
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded == FALSE)
+#endif
+ if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, res));
+ }
+#endif /* defined(KEEP_ALIVE) */
+
+ /* Read event_msgs mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+ /* Setup event_msgs */
+ setbit(eventmask, WLC_E_SET_SSID);
+ setbit(eventmask, WLC_E_PRUNE);
+ setbit(eventmask, WLC_E_AUTH);
+ setbit(eventmask, WLC_E_REASSOC);
+ setbit(eventmask, WLC_E_REASSOC_IND);
+ setbit(eventmask, WLC_E_DEAUTH);
+ setbit(eventmask, WLC_E_DEAUTH_IND);
+ setbit(eventmask, WLC_E_DISASSOC_IND);
+ setbit(eventmask, WLC_E_DISASSOC);
+ setbit(eventmask, WLC_E_JOIN);
+ setbit(eventmask, WLC_E_ASSOC_IND);
+ setbit(eventmask, WLC_E_PSK_SUP);
+ setbit(eventmask, WLC_E_LINK);
+ setbit(eventmask, WLC_E_NDIS_LINK);
+ setbit(eventmask, WLC_E_MIC_ERROR);
+ setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+ setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+ setbit(eventmask, WLC_E_PMKID_CACHE);
+ setbit(eventmask, WLC_E_TXFAIL);
+ setbit(eventmask, WLC_E_JOIN_START);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+ setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+ setbit(eventmask, WLC_E_PFN_NET_FOUND);
+#endif /* PNO_SUPPORT */
+ /* enable dongle roaming event */
+ setbit(eventmask, WLC_E_ROAM);
+#ifdef WL_CFG80211
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+ if ((dhd->op_mode & WFD_MASK) == WFD_MASK) {
+ setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+ setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE);
+ setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE);
+ setbit(eventmask, WLC_E_P2P_PROBREQ_MSG);
+ setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ }
+#endif /* WL_CFG80211 */
+
+ /* Write updated Event mask */
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+ sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* Set and enable ARP offload feature for STA only */
+#if defined(SOFTAP)
+ if (arpoe && !ap_fw_loaded) {
+#else
+ if (arpoe) {
+#endif
+ dhd_arp_offload_enable(dhd, TRUE);
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ } else {
+ dhd_arp_offload_enable(dhd, FALSE);
+ dhd_arp_offload_set(dhd, 0);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup defintions for pktfilter , enable in suspend */
+ dhd->pktfilter_count = 4;
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[0] = "100 0 0 0 0x01 0x00";
+ dhd->pktfilter[1] = NULL;
+ dhd->pktfilter[2] = NULL;
+ dhd->pktfilter[3] = NULL;
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ int i;
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ 0, dhd_master_mode);
+ }
+ }
+#endif /* defined(SOFTAP) */
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ /* Print fw version info */
+ DHD_ERROR(("Firmware version = %s\n", buf));
+ }
+
+done:
+ return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+ char buf[strlen(name) + 1 + cmd_len];
+ int len = sizeof(buf);
+ wl_ioctl_t ioc;
+ int ret;
+
+ len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+ memset(&ioc, 0, sizeof(ioc));
+
+ ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = TRUE;
+
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (!set && ret >= 0)
+ memcpy(cmd_buf, buf, cmd_len);
+
+ return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+ struct dhd_info *dhd = dhdp->info;
+ struct net_device *dev = NULL;
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ ASSERT(dev);
+
+ if (netif_running(dev)) {
+ DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+ return BCME_NOTDOWN;
+ }
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+ if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+ DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+ return BCME_BADARG;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add)
+{
+ u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+ int i;
+ int ret;
+
+ bzero(ipv4_buf, sizeof(ipv4_buf));
+
+ /* display what we've got */
+ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+ /* now we saved hoste_ip table, clr it in the dongle AOE */
+ dhd_aoe_hostip_clr(dhd_pub);
+
+ if (ret) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return;
+ }
+
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (add && (ipv4_buf[i] == 0)) {
+ ipv4_buf[i] = ipa;
+ add = FALSE; /* added ipa to local table */
+ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+ __FUNCTION__, i));
+ } else if (ipv4_buf[i] == ipa) {
+ ipv4_buf[i] = 0;
+ DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+ __FUNCTION__, ipa, i));
+ }
+
+ if (ipv4_buf[i] != 0) {
+ /* add back host_ip entries from our local cache */
+ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]);
+ DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+ __FUNCTION__, ipv4_buf[i], i));
+ }
+ }
+#ifdef AOE_DBG
+ /* see the resulting hostip table */
+ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf));
+ DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+static int dhd_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+
+ if (!ifa)
+ return NOTIFY_DONE;
+
+ dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+ dhd_pub = &dhd->pub;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+ if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
+#else
+ if (ifa->ifa_dev->dev) {
+#endif
+ switch (event) {
+ case NETDEV_UP:
+ DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+ if (dhd->pend_ipaddr) {
+ DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+ __FUNCTION__, dhd->pend_ipaddr));
+ }
+ dhd->pend_ipaddr = ifa->ifa_address;
+ break;
+ }
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) {
+ DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+ }
+ else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE);
+#endif
+ break;
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+ dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+ if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) {
+ DHD_ARPOE(("%s: primary interface is down, AOE clr all\n",
+ __FUNCTION__));
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+ } else
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE);
+#else
+ dhd_aoe_hostip_clr(&dhd->pub);
+ dhd_aoe_arp_clr(&dhd->pub);
+#endif
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
+ }
+ }
+ return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+int
+dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct net_device *net = NULL;
+ int err = 0;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
+
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+ ASSERT(dhd && dhd->iflist[ifidx]);
+
+ net = dhd->iflist[ifidx]->net;
+ ASSERT(net);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->get_stats = dhd_get_stats;
+ net->do_ioctl = dhd_ioctl_entry;
+ net->hard_start_xmit = dhd_start_xmit;
+ net->set_mac_address = dhd_set_mac_address;
+ net->set_multicast_list = dhd_set_multicast_list;
+ net->open = net->stop = NULL;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ /* Ok, link into the network layer... */
+ if (ifidx == 0) {
+ /*
+ * device functions for the primary interface only
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = dhd_open;
+ net->stop = dhd_stop;
+#else
+ net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+ } else {
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+ /*
+ * Android sets the locally administered bit to indicate that this is a
+ * portable hotspot. This will not work in simultaneous AP/STA mode,
+ * nor with P2P. Need to set the Donlge's MAC address, and then use that.
+ */
+ if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+ ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+ __func__, net->name));
+ temp_addr[0] |= 0x02;
+ }
+ }
+
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ if ((err = register_netdev(net)) != 0) {
+ DHD_ERROR(("couldn't register the net device, err %d\n", err));
+ goto fail;
+ }
+ printf("Broadcom Dongle Host Driver: register interface [%s]"
+ " MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+ net->name,
+ net->dev_addr[0], net->dev_addr[1], net->dev_addr[2],
+ net->dev_addr[3], net->dev_addr[4], net->dev_addr[5]);
+
+#if defined(SOFTAP) && defined(CONFIG_WIRELESS_EXT) && !defined(WL_CFG80211)
+ wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (ifidx == 0) {
+ up(&dhd_registration_sem);
+ }
+#endif
+ return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+ return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
+
+ /*
+ * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+ * calling stop again will cuase SD read/write errors.
+ */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
+
+ /* Stop the bus module */
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ }
+
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_unregister_oob_intr();
+#endif /* defined(OOB_INTR_ONLY) */
+ }
+ }
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ unsigned long flags;
+ int timer_valid = FALSE;
+
+ if (!dhdp)
+ return;
+
+ dhd = (dhd_info_t *)dhdp->info;
+ if (!dhd)
+ return;
+
+ DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ /* Give sufficient time for threads to start running in case
+ * dhd_attach() has failed
+ */
+ osl_delay(1000*100);
+ }
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ unregister_inetaddr_notifier(&dhd_notifier);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+ /* Detatch and unlink in the iw */
+ wl_iw_detach();
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+ if (&dhd->thr_sysioc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_sysioc_ctl);
+ }
+
+ /* delete all interfaces, start with virtual */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+ int i = 1;
+ dhd_if_t *ifp;
+
+ /* Cleanup virtual interfaces */
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i]) {
+ dhd->iflist[i]->state = DHD_IF_DEL;
+ dhd->iflist[i]->idx = i;
+ dhd_op_if(dhd->iflist[i]);
+ }
+
+ /* delete primary interface 0 */
+ ifp = dhd->iflist[0];
+ ASSERT(ifp);
+ ASSERT(ifp->net);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ if (ifp->net->open)
+#else
+ if (ifp->net->netdev_ops == &dhd_ops_pri)
+#endif
+ {
+ if (ifp->net) {
+ unregister_netdev(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ dhd->iflist[0] = NULL;
+
+ }
+ }
+
+ /* Clear the watchdog timer */
+ flags = dhd_os_spin_lock(&dhd->pub);
+ timer_valid = dhd->wd_timer_valid;
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(&dhd->pub, flags);
+ if (timer_valid)
+ del_timer_sync(&dhd->timer);
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHDTHREAD
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_wdt_ctl);
+ }
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ }
+ else
+#endif /* DHDTHREAD */
+ tasklet_kill(&dhd->tasklet);
+ }
+ if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+ dhd_bus_detach(dhdp);
+
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+ }
+
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ wl_cfg80211_detach(NULL);
+ dhd_monitor_uninit();
+ }
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+ unregister_pm_notifier(&dhd_sleep_pm_notifier);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+ /* && defined(CONFIG_PM_SLEEP) */
+
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_destroy(&dhd->wl_wifi);
+ wake_lock_destroy(&dhd->wl_rxwake);
+#endif
+ }
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhdp) {
+ int i;
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
+
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ dhdp->reorder_bufs[i] = NULL;
+ }
+ }
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd)
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ }
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhd_bus_unregister();
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+ wl_android_exit();
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+}
+
+
+static int __init
+dhd_module_init(void)
+{
+ int error = 0;
+
+#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int retry = POWERUP_MAX_RETRY;
+ int chip_up = 0;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ wl_android_init();
+
+#if defined(DHDTHREAD)
+ /* Sanity check on the module parameters */
+ do {
+ /* Both watchdog and DPC as tasklets are ok */
+ if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
+ break;
+
+ /* If both watchdog and DPC are threads, TX must be deferred */
+ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
+ break;
+
+ DHD_ERROR(("Invalid module parameters.\n"));
+ return -EINVAL;
+ } while (0);
+#endif
+
+#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ do {
+ sema_init(&dhd_chipup_sem, 0);
+ dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ if (wl_android_wifictrl_func_add() < 0)
+ goto fail_1;
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+ if (down_timeout(&dhd_chipup_sem,
+ msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+ dhd_bus_unreg_sdio_notify();
+ chip_up = 1;
+ break;
+ }
+ DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+ retry+1));
+ dhd_bus_unreg_sdio_notify();
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+ } while (retry-- > 0);
+
+ if (!chip_up) {
+ DHD_ERROR(("\nfailed to power up wifi chip, max retry reached, exits **\n\n"));
+ return -ENODEV;
+ }
+#else
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ if (wl_android_wifictrl_func_add() < 0)
+ goto fail_1;
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ sema_init(&dhd_registration_sem, 0);
+#endif
+
+
+ error = dhd_bus_register();
+
+ if (!error)
+ printf("\n%s\n", dhd_version);
+ else {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail_1;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
+ error = -ENODEV;
+ DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
+ goto fail_2;
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if defined(WL_CFG80211)
+ wl_android_post_init();
+#endif /* defined(WL_CFG80211) */
+
+ return error;
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+fail_2:
+ dhd_bus_unregister();
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+fail_1:
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+ wl_android_wifictrl_func_del();
+#endif
+
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+
+ return error;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+late_initcall(dhd_module_init);
+#else
+module_init(dhd_module_init);
+#endif
+
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ down(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ up(&dhd->proto_sem);
+ return 1;
+ }
+
+ return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = dhd_ioctl_timeout_msec;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(timeout);
+#else
+ timeout = timeout * HZ / 1000;
+#endif
+
+ /* Wait until control frame is available */
+ add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Memory barrier to support multi-processing
+ * As the variable "condition", which points to dhd->rxlen (dhd_bus_rxctl[dhd_sdio.c])
+ * Can be changed by another processor.
+ */
+ smp_mb();
+ while (!(*condition) && (!signal_pending(current) && timeout)) {
+ timeout = schedule_timeout(timeout);
+ smp_mb();
+ }
+
+ if (signal_pending(current))
+ *pending = TRUE;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+
+ return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (waitqueue_active(&dhd->ioctl_resp_wait)) {
+ wake_up_interruptible(&dhd->ioctl_resp_wait);
+ }
+
+ return 0;
+}
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ flags = dhd_os_spin_lock(pub);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ dhd_os_spin_unlock(pub, flags);
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ dhd_os_spin_unlock(pub, flags);
+#ifdef DHDTHREAD
+ del_timer_sync(&dhd->timer);
+#else
+ del_timer(&dhd->timer);
+#endif /* DHDTHREAD */
+ return;
+ }
+
+ if (wdtick) {
+ dhd_watchdog_ms = (uint)wdtick;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
+ dhd->wd_timer_valid = TRUE;
+ }
+ dhd_os_spin_unlock(pub, flags);
+}
+
+void *
+dhd_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
+
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ down(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+
+#ifdef DHDTHREAD
+ if (dhd->threads_only)
+ up(&dhd->sdsem);
+ else
+#endif /* DHDTHREAD */
+ spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+ dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+ dhd_os_sdunlock(pub);
+}
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+uint8* dhd_os_prealloc(void *osh, int section, uint size)
+{
+ return (uint8*)wl_android_prealloc(section, size);
+}
+
+void dhd_os_prefree(void *osh, void *addr, uint size)
+{
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
+
+#if defined(CONFIG_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (!dhd->pub.up) {
+ return NULL;
+ }
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
+}
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data)
+{
+ int bcmerror = 0;
+ ASSERT(dhd != NULL);
+
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+
+#if defined(CONFIG_WIRELESS_EXT)
+ if (event->bsscfgidx == 0) {
+ /*
+ * Wireless ext is on primary interface only
+ */
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+ if (dhd->iflist[*ifidx]->net) {
+ wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+ }
+#endif /* defined(CONFIG_WIRELESS_EXT) */
+
+#ifdef WL_CFG80211
+ if ((ntoh32(event->event_type) == WLC_E_IF) &&
+ (((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD))
+ /* If ADD_IF has been called directly by wl utility then we
+ * should not report this. In case if ADD_IF was called from
+ * CFG stack, then too this event need not be reported back
+ */
+ return (BCME_OK);
+ if ((wl_cfg80211_is_progress_ifchange() ||
+ wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
+ /*
+ * If IF_ADD/CHANGE operation is going on,
+ * discard any event received on the virtual I/F
+ */
+ return (BCME_OK);
+ }
+
+ ASSERT(dhd->iflist[*ifidx] != NULL);
+ ASSERT(dhd->iflist[*ifidx]->net != NULL);
+ if (dhd->iflist[*ifidx]->event2cfg80211 && dhd->iflist[*ifidx]->net) {
+ wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+ }
+#endif /* defined(WL_CFG80211) */
+
+ return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+ switch (ntoh32(event->event_type)) {
+#ifdef WLBTAMP
+ /* Send up locally generated AMP HCI Events */
+ case WLC_E_BTA_HCI_EVENT: {
+ struct sk_buff *p, *skb;
+ bcm_event_t *msg;
+ wl_event_msg_t *p_bcm_event;
+ char *ptr;
+ uint32 len;
+ uint32 pktlen;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *eth;
+ int ifidx;
+
+ len = ntoh32(event->datalen);
+ pktlen = sizeof(bcm_event_t) + len + 2;
+ dhd = dhdp->info;
+ ifidx = dhd_ifname2idx(dhd, event->ifname);
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
+
+ bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
+
+ msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ /* BCM Vendor specific header... */
+ msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
+ msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
+ bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
+
+ /* vendor spec header length + pvt data length (private indication
+ * hdr + actual message itself)
+ */
+ msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
+ BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
+ msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
+
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ /* copy wl_event_msg_t into sk_buf */
+
+ /* pointer to wl_event_msg_t in sk_buf */
+ p_bcm_event = &msg->event;
+ bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
+
+ /* copy hci event into sk_buf */
+ bcopy(data, (p_bcm_event + 1), len);
+
+ msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
+ ntoh16(msg->bcm_hdr.length));
+ PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
+
+ ptr = (char *)(msg + 1);
+ /* Last 2 bytes of the message are 0x00 0x00 to signal that there
+ * are no ethertypes which are following this
+ */
+ ptr[len+0] = 0x00;
+ ptr[len+1] = 0x00;
+
+ skb = PKTTONATIVE(dhdp->osh, p);
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ }
+ else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ }
+ break;
+ } /* case WLC_E_BTA_HCI_EVENT */
+#endif /* WLBTAMP */
+
+ default:
+ break;
+ }
+}
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ dhd_os_sdunlock(dhd);
+ wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
+ dhd_os_sdlock(dhd);
+#endif
+ return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up_interruptible(&dhdinfo->ctrl_wait);
+#endif
+ return;
+}
+
+int
+dhd_dev_reset(struct net_device *dev, uint8 flag)
+{
+ int ret;
+
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
+ }
+ return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val)
+{
+ int ret = 0;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd) {
+ ret = dhd_set_suspend(val, &dhd->pub);
+ }
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+ return ret;
+}
+
+int net_os_set_dtim_skip(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd)
+ dhd->pub.dtim_skip = val;
+
+ return 0;
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *filterp = NULL;
+ int ret = 0;
+
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
+ return ret;
+ if (num >= dhd->pub.pktfilter_count)
+ return -EINVAL;
+ if (add_remove) {
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ dhd->pub.pktfilter[num] = filterp;
+ return ret;
+}
+
+int net_os_set_packet_filter(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhd && dhd->pub.up) {
+ if (dhd->pub.in_suspend) {
+ if (!val || (val && !dhd->pub.suspend_disable_flag))
+ dhd_set_packet_filter(val, &dhd->pub);
+ }
+ }
+ return ret;
+}
+
+
+void
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_preinit_ioctls(&dhd->pub);
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_clean */
+int
+dhd_dev_pno_reset(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_clean(&dhd->pub));
+}
+
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable(&dhd->pub, pfn_enabled));
+}
+
+
+/* Linux wrapper to call common dhd_pno_set */
+int
+dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+ ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
+}
+
+/* Linux wrapper to get pno status */
+int
+dhd_dev_get_pno_status(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_status(&dhd->pub));
+}
+
+#endif /* PNO_SUPPORT */
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd) {
+ if (!dhd->pub.hang_was_sent) {
+ dhd->pub.hang_was_sent = 1;
+#if defined(CONFIG_WIRELESS_EXT)
+ ret = wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+ ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+ dev_close(dev);
+ dev_open(dev);
+#endif
+ }
+ }
+ return ret;
+}
+
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ if (dhd && dhd->pub.up)
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
+
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+ return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
+ }
+ return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+ if (!fp) {
+ printf("%s: open file error\n", __FUNCTION__);
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ MFREE(dhd->osh, buf, size);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_timeout_enable)
+ wake_lock_timeout(&dhd->wl_rxwake,
+ msecs_to_jiffies(dhd->wakelock_timeout_enable));
+#endif
+ dhd->wakelock_timeout_enable = 0;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_timeout_enable)
+ dhd->wakelock_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
+int net_os_wake_lock_timeout_enable(struct net_device *dev, int val)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout_enable(&dhd->pub, val);
+ return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_lock(&dhd->wl_wifi);
+#endif
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter) {
+ dhd->wakelock_counter--;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (!dhd->wakelock_counter)
+ wake_unlock(&dhd->wl_wifi);
+#endif
+ ret = dhd->wakelock_counter;
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_check_wakelock(void *dhdp)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+ dhd_info_t *dhd;
+
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd && wake_lock_active(&dhd->wl_wifi))
+ return 1;
+#endif
+ return 0;
+}
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
+}
+
+int dhd_os_check_if_up(void *dhdp)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+
+ if (!pub)
+ return 0;
+ return pub->up;
+}
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+ int ifidx;
+ int ret = 0;
+ dhd_info_t *dhd = NULL;
+
+ if (!net || !netdev_priv(net)) {
+ DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd = *(dhd_info_t **)netdev_priv(net);
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+ struct net_device *net;
+
+ net = dhd_idx2net(dhdp, ifidx);
+ return dhd_check_hang(net, dhdp, ret);
+}
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid,
+ uint8 iftype, uint8* ea);
+extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
+
+int dhd_wlfc_interface_event(struct dhd_info *dhd,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
+}
+
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
+{
+ if (dhd->pub.wlfc_state == NULL)
+ return BCME_OK;
+
+ return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
+}
+
+int dhd_wlfc_event(struct dhd_info *dhd)
+{
+ return dhd_wlfc_enable(&dhd->pub);
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
+
+ return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ loff_t pos = -1;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
+ }
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
+ }
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+ int err;
+
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ err = PTR_ERR(g_dbgfs.debugfs_dir);
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbg_create();
+
+ return;
+}
+
+void dhd_dbg_remove(void)
+{
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
+
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct sk_buff *skb;
+ uint32 htsf = 0;
+ uint16 dport = 0, oldmagic = 0xACAC;
+ char *p1;
+ htsfts_t ts;
+
+ /* timestamp packet */
+
+ p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/* memcpy(&proto, p1+26, 4); */
+ memcpy(&dport, p1+40, 2);
+/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
+ dport = ntoh16(dport);
+ }
+
+ /* timestamp only if icmp or udb iperf with port 5555 */
+/* if (proto == 17 && dport == tsport) { */
+ if (dport >= tsport && dport <= tsport + 20) {
+
+ skb = (struct sk_buff *) pktbuf;
+
+ htsf = dhd_get_htsf(dhd, 0);
+ memset(skb->data + 44, 0, 2); /* clear checksum */
+ memcpy(skb->data+82, &oldmagic, 2);
+ memcpy(skb->data+84, &htsf, 4);
+
+ memset(&ts, 0, sizeof(htsfts_t));
+ ts.magic = HTSFMAGIC;
+ ts.prio = PKTPRIO(pktbuf);
+ ts.seqnum = htsf_seqnum++;
+ ts.c10 = get_cycles();
+ ts.t10 = htsf;
+ ts.endmagic = HTSFENDMAGIC;
+
+ memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+ }
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+ int pktcnt = 0, curval = 0, i;
+ for (i = 0; i < (NUMBIN-2); i++) {
+ curval += 500;
+ printf("%d ", his->bin[i]);
+ pktcnt += his->bin[i];
+ }
+ printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+ his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+ int i, binval = 0;
+
+ if (value < 0) {
+ histo->bin[NUMBIN-1]++;
+ return;
+ }
+ if (value > histo->bin[NUMBIN-2]) /* store the max value */
+ histo->bin[NUMBIN-2] = value;
+
+ for (i = 0; i < (NUMBIN-2); i++) {
+ binval += 500; /* 500m s bins */
+ if (value <= binval) {
+ histo->bin[i]++;
+ return;
+ }
+ }
+ histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ char *p1;
+ uint16 old_magic;
+ int d1, d2, d3, end2end;
+ htsfts_t *htsf_ts;
+ uint32 htsf;
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+ memcpy(&old_magic, p1+78, 2);
+ htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+ }
+ else
+ return;
+
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+ htsf_ts->cE0 = get_cycles();
+ }
+
+ if (old_magic == 0xACAC) {
+
+ tspktcnt++;
+ htsf = dhd_get_htsf(dhd, 0);
+ memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+ memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+ d1 = ts[tsidx].t2 - ts[tsidx].t1;
+ d2 = ts[tsidx].t3 - ts[tsidx].t2;
+ d3 = ts[tsidx].t4 - ts[tsidx].t3;
+ end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+ sorttobin(d1, &vi_d1);
+ sorttobin(d2, &vi_d2);
+ sorttobin(d3, &vi_d3);
+ sorttobin(end2end, &vi_d4);
+
+ if (end2end > 0 && end2end > maxdelay) {
+ maxdelay = end2end;
+ maxdelaypktno = tspktcnt;
+ memcpy(&maxdelayts, &ts[tsidx], 16);
+ }
+ if (++tsidx >= TSMAX)
+ tsidx = 0;
+ }
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+ uint32 htsf = 0, cur_cycle, delta, delta_us;
+ uint32 factor, baseval, baseval2;
+ cycles_t t;
+
+ t = get_cycles();
+ cur_cycle = t;
+
+ if (cur_cycle > dhd->htsf.last_cycle)
+ delta = cur_cycle - dhd->htsf.last_cycle;
+ else {
+ delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
+ }
+
+ delta = delta >> 4;
+
+ if (dhd->htsf.coef) {
+ /* times ten to get the first digit */
+ factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+ baseval = (delta*10)/factor;
+ baseval2 = (delta*10)/(factor+1);
+ delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+ htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+ }
+ else {
+ DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+ }
+
+ return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+ int i, max = 0;
+ int d1, d2, d3, d4, d5;
+
+ printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
+ for (i = 0; i < TSMAX; i++) {
+ d1 = ts[i].t2 - ts[i].t1;
+ d2 = ts[i].t3 - ts[i].t2;
+ d3 = ts[i].t4 - ts[i].t3;
+ d4 = ts[i].t4 - ts[i].t1;
+ d5 = ts[max].t4-ts[max].t1;
+ if (d4 > d5 && d4 > 0) {
+ max = i;
+ }
+ printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
+ ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+ d1, d2, d3, d4, i);
+ }
+
+ printf("current idx = %d \n", tsidx);
+
+ printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+ printf("%08X %08X %08X %08X \t%d %d %d %d\n",
+ maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+ maxdelayts.t2 - maxdelayts.t1,
+ maxdelayts.t3 - maxdelayts.t2,
+ maxdelayts.t4 - maxdelayts.t3,
+ maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+ wl_ioctl_t ioc;
+ char buf[32];
+ int ret;
+ uint32 s1, s2;
+
+ struct tsf {
+ uint32 low;
+ uint32 high;
+ } tsf_buf;
+
+ memset(&ioc, 0, sizeof(ioc));
+ memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = (uint)sizeof(buf);
+ ioc.set = FALSE;
+
+ strcpy(buf, "tsf");
+ s1 = dhd_get_htsf(dhd, 0);
+ if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: tsf is not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ s2 = dhd_get_htsf(dhd, 0);
+
+ memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+ printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+ tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+ dhd->htsf.coefdec2, s2-tsf_buf.low);
+ printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+ return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+ static ulong cur_cycle = 0, prev_cycle = 0;
+ uint32 htsf, tsf_delta = 0;
+ uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+ ulong b, a;
+ cycles_t t;
+
+ /* cycles_t in inlcude/mips/timex.h */
+
+ t = get_cycles();
+
+ prev_cycle = cur_cycle;
+ cur_cycle = t;
+
+ if (cur_cycle > prev_cycle)
+ cyc_delta = cur_cycle - prev_cycle;
+ else {
+ b = cur_cycle;
+ a = prev_cycle;
+ cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+ }
+
+ if (data == NULL)
+ printf(" tsf update ata point er is null \n");
+
+ memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+ memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+ if (cur_tsf.low == 0) {
+ DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+ return;
+ }
+
+ if (cur_tsf.low > prev_tsf.low)
+ tsf_delta = (cur_tsf.low - prev_tsf.low);
+ else {
+ DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+ cur_tsf.low, prev_tsf.low));
+ if (cur_tsf.high > prev_tsf.high) {
+ tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+ DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
+ }
+ else
+ return; /* do not update */
+ }
+
+ if (tsf_delta) {
+ hfactor = cyc_delta / tsf_delta;
+ tmp = (cyc_delta - (hfactor * tsf_delta))*10;
+ dec1 = tmp/tsf_delta;
+ dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+ tmp = (tmp - (dec1*tsf_delta))*10;
+ dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+ if (dec3 > 4) {
+ if (dec2 == 9) {
+ dec2 = 0;
+ if (dec1 == 9) {
+ dec1 = 0;
+ hfactor++;
+ }
+ else {
+ dec1++;
+ }
+ }
+ else
+ dec2++;
+ }
+ }
+
+ if (hfactor) {
+ htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
+ dhd->htsf.coef = hfactor;
+ dhd->htsf.last_cycle = cur_cycle;
+ dhd->htsf.last_tsf = cur_tsf.low;
+ dhd->htsf.coefdec1 = dec1;
+ dhd->htsf.coefdec2 = dec2;
+ }
+ else {
+ htsf = prev_tsf.low;
+ }
+}
+
+#endif /* WLMEDIA_HTSF */
diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
new file mode 100644
index 00000000000000..290caf7e658cb1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c
@@ -0,0 +1,39 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c 291086 2011-10-21 01:17:24Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+ int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+ return rc;
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h
new file mode 100644
index 00000000000000..e420166a95a9ea
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_proto.h
@@ -0,0 +1,109 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h 303834 2011-12-20 06:17:39Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT 20000 /* In milli second */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_enque_sendq(void* state, int prec, void* p);
+extern int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx);
+extern void dhd_wlfc_cleanup(dhd_pub_t *dhd);
+#endif /* PROP_TXSTATUS */
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+ uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#elif defined(RNDIS)
+#define DHD_PROTOCOL "rndis"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c
new file mode 100644
index 00000000000000..70e664ef7597fc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c
@@ -0,0 +1,6939 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c 325483 2012-04-03 13:22:22Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hndrte_armtrap.h>
+#include <hndrte_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
+#endif
+
+#define QLEN 256 /* bulk rx and tx queue lengths */
+#define FCHI (QLEN - 10)
+#define FCLOW (FCHI / 2)
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
+
+#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
+
+#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD 32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#ifdef SDTEST
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ 32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi. Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+#if defined(OOB_INTR_ONLY)
+extern void bcmsdh_set_irq(int flag);
+#endif /* defined(OOB_INTR_ONLY) */
+#ifdef PROP_TXSTATUS
+extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+#endif
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192
+#define CONSOLE_BUFFER_MAX 2024
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hndrte_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+#define REMAP_ENAB(bus) ((bus)->remap)
+#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define KSO_ENAB(bus) ((bus)->kso)
+#define SR_ENAB(bus) ((bus)->_srenab)
+#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto))
+#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618)
+#define MIN_RSRC_SR 0x3
+#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c)
+#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1)
+
+#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup)
+#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */
+#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */
+#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */
+#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0)
+
+
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+
+ bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+
+ sdpcmd_regs_t *regs; /* Registers for SDIO core */
+ uint sdpcmrev; /* SDIO core revision */
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 srmemsize; /* Size of SRMEM */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+ const char *nvram_params; /* user specified nvram params. */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ uint8 flowcontrol; /* per prio flow control bitmask */
+ uint8 tx_seq; /* Transmit sequence number (next) */
+ uint8 tx_max; /* Maximum transmit sequence allowed */
+
+ uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+ uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ uint16 nextlen; /* Next Read Len from last header */
+ uint8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ void *glomd; /* Packet containing glomming descriptor */
+ void *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ uint8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ uint8 *rxctl; /* Aligned pointer into rxbuf */
+ uint8 *databuf; /* Buffer for receiving big glom packet */
+ uint8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ uint8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef DHD_DEBUG
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+ uint regfails; /* Count of R_REG/W_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ int32 idletime; /* Control for activity timeout */
+ int32 idlecount; /* Activity timeout counter */
+ int32 idleclock; /* How to set bus driver when idle */
+ int32 sd_divisor; /* Speed control to bus driver */
+ int32 sd_mode; /* Mode control to bus driver */
+ int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
+ bool use_rxchain; /* If dhd should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ uint rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+ /* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+#ifdef SDTEST
+ /* external loopback */
+ bool ext_loop;
+ uint8 loopid;
+
+ /* pktgen configuration */
+ uint pktgen_freq; /* Ticks between bursts */
+ uint pktgen_count; /* Packets to send each burst */
+ uint pktgen_print; /* Bursts between count displays */
+ uint pktgen_total; /* Stop after this many */
+ uint pktgen_minlen; /* Minimum packet data len */
+ uint pktgen_maxlen; /* Maximum packet data len */
+ uint pktgen_mode; /* Configured mode: tx, rx, or echo */
+ uint pktgen_stop; /* Number of tx failures causing stop */
+
+ /* active pktgen fields */
+ uint pktgen_tick; /* Tick counter for bursts */
+ uint pktgen_ptick; /* Burst counter for printing */
+ uint pktgen_sent; /* Number of test packets generated */
+ uint pktgen_rcvd; /* Number of test packets received */
+ uint pktgen_fail; /* Number of failed send attempts */
+ uint16 pktgen_len; /* Length of next packet to send */
+#define PKTGEN_RCV_IDLE (0)
+#define PKTGEN_RCV_ONGOING (1)
+ uint16 pktgen_rcv_state; /* receive state */
+ uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ uint8 *ctrl_frame_buf;
+ uint32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+ uint32 rxint_mode; /* rx interrupt mode */
+ bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
+ * Available with socram rev 16
+ * Remap region not DMA-able
+ */
+ bool kso;
+ bool _slpauto;
+ bool _oobwakeup;
+ bool _srenab;
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#define DHD_NOPMU(dhd) (FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+int dhd_dongle_memsize;
+
+static bool dhd_doflow;
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static const uint watermark = 8;
+static const uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+#define HDATLEN (firstread - (SDPCM_HDRLEN))
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+#define ALIGNMENT 4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align) \
+ do { \
+ uint datalign; \
+ datalign = (uintptr)PKTDATA((osh), (p)); \
+ datalign = ROUNDUP(datalign, (align)) - datalign; \
+ ASSERT(datalign < (align)); \
+ ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
+ if (datalign) \
+ PKTPULL((osh), (p), datalign); \
+ PKTSETLEN((osh), (p), (len)); \
+ } while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+ (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ regvar = R_REG(bus->dhd->osh, regaddr); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) { \
+ DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ regvar = 0; \
+ } \
+ } \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+ retryvar = 0; \
+ do { \
+ W_REG(bus->dhd->osh, regaddr, regval); \
+ } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+ if (retryvar) { \
+ bus->regfails += (retryvar-1); \
+ if (retryvar > retry_limit) \
+ DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+ __FUNCTION__, __LINE__)); \
+ } \
+} while (0)
+
+#define BUS_WAKE(bus) \
+ do { \
+ if ((bus)->sleeping) \
+ dhdsdio_bussleep((bus), FALSE); \
+ } while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0: Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1: (sdiod core rev >= 4)
+ * Device sets a new bit in the intstatus whenever there is a packet
+ * available in fifo. Host can't clear this specific status bit until all the
+ * packets are read from the FIFO. No need to ack dongle intstatus.
+ * Mode 2: (sdiod core rev >= 4)
+ * Device sets a bit in the intstatus, and host acks this by writing
+ * one to this bit. Dongle won't generate anymore packet interrupts
+ * until host reads all the packets from the dongle and reads a zero to
+ * figure that there are no more packets. No need to disable host ints.
+ * Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) \
+ ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count);
+#endif
+
+#ifdef DHD_DEBUG
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+ void * regsva, uint16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+ bool reset_flag);
+
+static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+
+static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_memsize, min_size));
+ if ((dhd_dongle_memsize > min_size) &&
+ (dhd_dongle_memsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_memsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+ int err = 0;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ if (!err)
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+ return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+ uint32 val, addr, data;
+
+ bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+ /* Set device for gpio1 wakeup */
+ bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+ val = bcmsdh_reg_read(bus->sdh, data, 4);
+ val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+ bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+ bus->_oobwakeup = TRUE;
+
+ return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+ bool cap = FALSE;
+ uint32 min = 0, core_capext;
+
+ core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
+ if (!(core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK) && !(bus->sih->chip == BCM4324_CHIP_ID))
+ return FALSE;
+
+ min = bcmsdh_reg_read(bus->sdh, MIN_RSRC_ADDR, 4);
+ if (min == MIN_RSRC_SR) {
+ cap = TRUE;
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev < 3)) {
+ cap = FALSE;
+
+ DHD_ERROR(("Only 4334 >= B2 supports SR: curr rev %d\n",
+ bus->sih->chiprev));
+ }
+ }
+
+ return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+
+ bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+ dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+ return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+ dhdsdio_srwar_init(bus);
+
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+ val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+ 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+
+ /* Add CMD14 Support */
+ dhdsdio_devcap_set(bus,
+ (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+
+ bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+ bus->_srenab = TRUE;
+
+ return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+ uint8 val;
+ int err = 0;
+
+ /* set flag */
+ bus->kso = TRUE;
+
+ /*
+ * Enable KeepSdioOn (KSO) bit for normal operation
+ * Default is 0 (4334A0) so set it. Fixed in B0.
+ */
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+ if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+ if (err)
+ DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+ }
+
+ return 0;
+}
+
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+ uint8 val = 0;
+ int err = 0;
+
+ /* Don't read here since sdio could be off so just write only */
+ val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+
+ if (err)
+ DHD_TRACE(("%s: KSO toggle %d failed: %d\n", __FUNCTION__, on, err));
+ return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0;
+
+ if (on == FALSE) {
+
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ dhdsdio_clk_kso_enab(bus, FALSE);
+ } else {
+ DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+
+ /* Double-write to be safe in case transition of AOS */
+ dhdsdio_clk_kso_enab(bus, TRUE);
+ dhdsdio_clk_kso_enab(bus, TRUE);
+ OSL_DELAY(4000);
+
+ /* Wait for device ready during transition to wake-up */
+ SPINWAIT(((dhdsdio_sleepcsr_get(bus)) !=
+ (SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |
+ SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)),
+ (10000));
+
+ DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+ dhdsdio_sleepcsr_get(bus)));
+ }
+
+ bus->kso = on;
+ BCM_REFERENCE(err);
+
+ return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+ int err = 0;
+ uint8 val = 0;
+
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+ if (err)
+ DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+ return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+ return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+ int err = 0;
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+ if (err)
+ DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+ return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+ int err = 0, retry;
+ uint8 val;
+
+ retry = 0;
+ if (on == TRUE) {
+ /* Enter Sleep */
+
+ /* Be sure we request clk before going to sleep
+ * so we can wake-up with clk request already set
+ * else device can go back to sleep immediately
+ */
+ if (!SLPAUTO_ENAB(bus))
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ else {
+ val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if ((val & SBSDIO_CSR_MASK) == 0) {
+ DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+ __FUNCTION__, val));
+
+ /* Reset clock request */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
+ DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+ }
+ }
+
+ DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+ bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+ err = dhdsdio_clk_kso_enab(bus, FALSE);
+ if (OOB_WAKEUP_ENAB(bus))
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
+#endif
+ } else {
+ /* Exit Sleep */
+ /* Make sure we have SD bus access */
+ if (bus->clkstate == CLK_NONE) {
+ DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+ }
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+ SPINWAIT((bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+ GPIO_DEV_SRSTATE_TIMEOUT);
+
+ if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+ DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+ }
+ }
+#ifdef USE_CMD14
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (SLPAUTO_ENAB(bus) && (err != 0)) {
+ OSL_DELAY(10000);
+ DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+
+ if (err) {
+ OSL_DELAY(10000);
+ DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+ /* Toggle sleep to resync with host and device */
+ err = bcmsdh_sleep(bus->sdh, TRUE);
+ OSL_DELAY(10000);
+ err = bcmsdh_sleep(bus->sdh, FALSE);
+ if (err) {
+ DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+ DHD_ERROR(("%s: FATAL: Device non-response!\n",
+ __FUNCTION__));
+ err = 0;
+ }
+ }
+ }
+#else
+ if (OOB_WAKEUP_ENAB(bus))
+ err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
+
+ do {
+ err = dhdsdio_clk_kso_enab(bus, TRUE);
+ OSL_DELAY(10000);
+ } while ((err != 0) && (++retry < 3));
+
+ if (err != 0) {
+ DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+ err = 0; /* continue anyway */
+ }
+#endif /* !USE_CMD14 */
+
+ if (err == 0) {
+ uint8 csr;
+
+ /* Wait for device ready during transition to wake-up */
+ SPINWAIT((((csr = dhdsdio_sleepcsr_get(bus)) &
+ SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+ (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (10000));
+
+ DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+ if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+ DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+ __FUNCTION__, csr));
+ err = BCME_NODEVICE;
+ }
+
+ SPINWAIT((((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+ (SBSDIO_HT_AVAIL)), (10000));
+
+ }
+ }
+
+ /* Update if successful */
+ if (err == 0)
+ bus->kso = on ? FALSE : TRUE;
+ else {
+ DHD_ERROR(("%s: Sleep request failed: on:%d err:%d\n", __FUNCTION__, on, err));
+ }
+
+ return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+ int err;
+ uint8 clkctl, clkreq, devctl;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#if defined(OOB_INTR_ONLY)
+ pendok = FALSE;
+#endif
+ clkctl = 0;
+ sdh = bus->sdh;
+
+
+ if (!KSO_ENAB(bus))
+ return BCME_OK;
+
+ if (SLPAUTO_ENAB(bus)) {
+ bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+ return BCME_OK;
+ }
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ if (pendok &&
+ ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) {
+ uint32 dummy, retries;
+ R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+ BCM_REFERENCE(dummy);
+ }
+
+ /* Check current status */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ DHD_INFO(("CLKCTL: set PENDING\n"));
+ bus->clkstate = CLK_PENDING;
+ return BCME_OK;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+ }
+ if (err) {
+ DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+ __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+ return BCME_ERROR;
+ }
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+ if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+ if (!SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+ }
+#endif /* !defined(BCMLXSDMMC) */
+ } else {
+ if (SBSDIO_ALPONLY(clkctl)) {
+ DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+ }
+ }
+#endif /* defined (DHD_DEBUG) */
+
+ bus->activity = TRUE;
+ } else {
+ clkreq = 0;
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ if (!SR_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ DHD_INFO(("CLKCTL: turned OFF\n"));
+ if (err) {
+ DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+ int err;
+ int32 iovalue;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (on) {
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ /* Turn on clock and restore mode */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+
+ iovalue = bus->sd_mode;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_mode: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Restore clock speed */
+ iovalue = bus->sd_divisor;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_SDONLY;
+ } else {
+ /* Stop or slow the SD clock itself */
+ if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+ DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+ __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+ return BCME_ERROR;
+ }
+ if (bus->idleclock == DHD_IDLE_STOP) {
+ if (sd1idle) {
+ /* Change to SD1 mode and turn off clock */
+ iovalue = 1;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+
+ iovalue = 0;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+ /* Set divisor to idle value */
+ iovalue = bus->idleclock;
+ err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &iovalue, sizeof(iovalue), TRUE);
+ if (err) {
+ DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+ __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ }
+ bus->clkstate = CLK_NONE;
+ }
+
+ return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+ int ret = BCME_OK;
+#ifdef DHD_DEBUG
+ uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ return ret;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ dhdsdio_sdclk(bus, TRUE);
+ /* Now request HT Avail on the backplane */
+ ret = dhdsdio_htclk(bus, TRUE, pendok);
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ bus->activity = TRUE;
+ }
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ ret = dhdsdio_sdclk(bus, TRUE);
+ else if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ else
+ DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+ bus->clkstate, target));
+ if (ret == BCME_OK) {
+ dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ }
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ ret = dhdsdio_htclk(bus, FALSE, FALSE);
+ /* Now remove the SD clock */
+ ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+ if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+ if (bus->poll == 0)
+ dhd_os_wd_timer(bus->dhd, 0);
+ break;
+ }
+#ifdef DHD_DEBUG
+ DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+ return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+ int err = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return BCME_OK;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return BCME_BUSY;
+
+
+ if (!SLPAUTO_ENAB(bus)) {
+ /* Disable SDIO interrupts (no longer interested) */
+ bcmsdh_intr_disable(bus->sdh);
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ if (bus->sih->chip != BCM4329_CHIP_ID &&
+ bus->sih->chip != BCM4319_CHIP_ID) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ }
+ } else {
+ /* Leave interrupts enabled since device can exit sleep and
+ * interrupt host
+ */
+ err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+ }
+
+ /* Change state */
+ bus->sleeping = TRUE;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ if (!SLPAUTO_ENAB(bus)) {
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+ /* Force pad isolation off if possible (in case power never toggled) */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+ /* Make sure the controller has the bus up */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+ /* Make sure we have SD bus access */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Enable interrupts again */
+ if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ bus->intdis = FALSE;
+ bcmsdh_intr_enable(bus->sdh);
+ }
+ } else {
+ err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+ }
+
+ if (err == 0) {
+ /* Change state */
+ bus->sleeping = FALSE;
+ }
+ }
+
+ return err;
+}
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+ bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (enable == TRUE) {
+
+ /* Tell device to start using OOB wakeup */
+ W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ if (retries > retry_limit)
+ DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+ } else {
+ /* Send misc interrupt to indicate OOB not needed */
+ W_SDREG(0, &regs->tosbmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ }
+
+ /* Turn off our contribution to the HT clock request */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+{
+ int ret;
+ osl_t *osh;
+ uint8 *frame;
+ uint16 len, pad1 = 0;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh;
+ void *new;
+ int i;
+#ifdef WLMEDIA_HTSF
+ char *p;
+ htsfts_t *htsf_ts;
+#endif
+
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+ if (PKTLEN(osh, pkt) >= 100) {
+ p = PKTDATA(osh, pkt);
+ htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->c20 = get_cycles();
+ htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+ /* Add alignment padding, allocate new packet if needed */
+ if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
+ if (PKTHEADROOM(osh, pkt) < pad1) {
+ DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
+ __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
+ bus->dhd->tx_realloc++;
+ new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+ if (!new) {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ /* free the pkt if canned one is not used */
+ free_pkt = TRUE;
+ pkt = new;
+ frame = (uint8*)PKTDATA(osh, pkt);
+ ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+ pad1 = 0;
+ } else {
+ PKTPUSH(osh, pkt, pad1);
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
+ bzero(frame, pad1 + SDPCM_HDRLEN);
+ }
+ }
+ ASSERT(pad1 < DHD_SDALIGN);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (uint16)PKTLEN(osh, pkt);
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
+ tx_packets[PKTPRIO(pkt)]++;
+ }
+ if (DHD_BYTES_ON() &&
+ (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad2 = bus->blocksize - (len % bus->blocksize);
+ if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
+#ifdef NOTUSED
+ if (pad2 <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+ len += pad2;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+ if (PKTTAILROOM(osh, pkt))
+#endif
+ len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+ else
+ DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+ }
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, pkt, NULL, NULL);
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
+ dhd_os_sdlock(bus->dhd);
+ } else {
+#endif /* PROP_TXSTATUS */
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+
+#ifdef PROP_TXSTATUS
+ }
+#endif
+ return ret;
+}
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
+ }
+#endif /* SDTEST */
+
+ /* Add space for the header */
+ PKTPUSH(osh, pkt, SDPCM_HDRLEN);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+#ifndef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
+ pktq_len(&bus->txq)));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+#ifndef DHDTHREAD
+ /* Need to also release txqlock before releasing sdlock.
+ * This thread still has txqlock and releases sdlock.
+ * Deadlock happens when dpc() grabs sdlock first then
+ * attempts to grab txqlock.
+ */
+ dhd_os_sdunlock_txq(bus->dhd);
+ dhd_os_sdunlock(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state)
+ dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
+ else
+#endif
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+#ifndef DHDTHREAD
+ dhd_os_sdlock(bus->dhd);
+ dhd_os_sdlock_txq(bus->dhd);
+#endif
+#ifdef PROP_TXSTATUS
+ /* let the caller decide whether to free the packet */
+ if (!bus->dhd->wlfc_state)
+#endif
+ PKTFREE(osh, pkt, TRUE);
+ ret = BCME_NORESOURCE;
+ }
+ else
+ ret = BCME_OK;
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+
+#ifdef DHD_DEBUG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+#ifdef DHDTHREAD
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+ }
+
+#ifndef DHDTHREAD
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHDTHREAD */
+
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+ void *pkt;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ uint8 tx_prec_map;
+
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+ dhd_os_sdlock_txq(bus->dhd);
+ if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+ dhd_os_sdunlock_txq(bus->dhd);
+ break;
+ }
+ dhd_os_sdunlock_txq(bus->dhd);
+ datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE);
+#else
+ ret = dhdsdio_txpkt(bus, pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE);
+#endif
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, &regs->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+ dhd->txoff && (pktq_len(&bus->txq) < FCLOW))
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+
+ return cnt;
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ uint8 *frame;
+ uint16 len;
+ uint32 swheader;
+ uint retries = 0;
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint8 doff = 0;
+ int ret = -1;
+ int i;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ if (dhd_alignctl) {
+ if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ bzero(frame, doff + SDPCM_HDRLEN);
+ }
+ ASSERT(doff < DHD_SDALIGN);
+ }
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (len & (ALIGNMENT - 1)))
+ len = ROUNDUP(len, ALIGNMENT);
+
+ ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(uint16*)frame = htol16((uint16)msglen);
+ *(((uint16*)frame) + 1) = htol16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!TXCTLOK(bus)) {
+ DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq));
+ bus->ctrl_frame_stat = TRUE;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == FALSE) {
+ DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+ ret = 0;
+ } else {
+ bus->dhd->txcnt_timeout++;
+ if (!bus->dhd->hang_was_sent)
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout));
+ ret = -1;
+ bus->ctrl_frame_stat = FALSE;
+ goto done;
+ }
+ }
+
+ bus->dhd->txcnt_timeout = 0;
+
+ if (ret == -1) {
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ do {
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ } while ((ret < 0) && retries++ < TXRETRIES);
+ }
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ if (ret)
+ bus->dhd->tx_ctlerrs++;
+ else
+ bus->dhd->tx_ctlpkts++;
+
+ if (bus->dhd->txcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return ret ? -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+ dhd_os_sdlock(bus->dhd);
+ rxlen = bus->rxlen;
+ bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+ bus->rxlen = 0;
+ dhd_os_sdunlock(bus->dhd);
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __FUNCTION__, rxlen, msglen));
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ if (!SLPAUTO_ENAB(bus)) {
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+ }
+#endif /* DHD_DEBUG */
+ } else if (pending == TRUE) {
+ DHD_CTL(("%s: canceled\n", __FUNCTION__));
+ return -ERESTARTSYS;
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+ }
+ if (timeleft == 0) {
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+ }
+ else
+ bus->dhd->rxcnt_timeout = 0;
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_POLLRATE,
+ IOV_SDREG,
+ IOV_SBREG,
+ IOV_SDCIS,
+ IOV_MEMBYTES,
+ IOV_MEMSIZE,
+#ifdef DHD_DEBUG
+ IOV_CHECKDIED,
+ IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+ IOV_SET_DOWNLOAD_STATE,
+ IOV_SOCRAM_STATE,
+ IOV_FORCEEVEN,
+ IOV_SDIOD_DRIVE,
+ IOV_READAHEAD,
+ IOV_SDRXCHAIN,
+ IOV_ALIGNCTL,
+ IOV_SDALIGN,
+ IOV_DEVRESET,
+ IOV_CPU,
+#ifdef SDTEST
+ IOV_PKTGEN,
+ IOV_EXTLOOP,
+#endif /* SDTEST */
+ IOV_SPROM,
+ IOV_TXBOUND,
+ IOV_RXBOUND,
+ IOV_TXMINMAX,
+ IOV_IDLETIME,
+ IOV_IDLECLOCK,
+ IOV_SD1IDLE,
+ IOV_SLEEP,
+ IOV_DONGLEISOLATION,
+ IOV_KSO,
+ IOV_DEVSLEEP,
+ IOV_DEVCAP,
+ IOV_VARS,
+#ifdef SOFTAP
+ IOV_FWPATH
+#endif
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+ {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
+ {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 },
+ {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 },
+ {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
+ {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 },
+ {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
+ {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 },
+ {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 },
+ {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 },
+ {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 },
+ {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 },
+ {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 },
+ {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
+ {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
+ {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 },
+ {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 },
+#ifdef DHD_DEBUG
+ {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 },
+ {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 },
+#endif /* DHD_DEBUG */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+ {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
+ {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+ {"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
+ {"kso", IOV_KSO, 0, IOVT_UINT32, 0 },
+ {"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 },
+#ifdef SOFTAP
+ {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+ {NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+ uint q1, q2;
+
+ if (!div) {
+ bcm_bprintf(strbuf, "%s N/A", desc);
+ } else {
+ q1 = num / div;
+ q2 = (100 * (num - (q1 * div))) / div;
+ bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ }
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+ bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+ bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+ bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
+ bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->rxlen, bus->rx_seq);
+ bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+ bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+ bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+ bus->pollrate, bus->pollcnt, bus->regfails);
+
+ bcm_bprintf(strbuf, "\nAdditional counters:\n");
+ bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
+ bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+ bus->rxc_errors);
+ bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+ bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+ bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+ bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+ bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+ bus->f2txdata, bus->f1regdata);
+ {
+ dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->dhd->rx_packets);
+ dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+ dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ (bus->f2txdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+ dhd_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+ dhd_dump_pct(strbuf, ", pkts/sd",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+ dhd_dump_pct(strbuf, ", pkts/int",
+ (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+ bcm_bprintf(strbuf, "\n\n");
+ }
+
+#ifdef SDTEST
+ if (bus->pktgen_count) {
+ bcm_bprintf(strbuf, "pktgen config and count:\n");
+ bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n",
+ bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+ bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+ bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+ bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+ }
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+ bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+ bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+ bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+ bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+ bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+ bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+ bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+ bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+ bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+ bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+
+ pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.freq = bus->pktgen_freq;
+ pktgen.count = bus->pktgen_count;
+ pktgen.print = bus->pktgen_print;
+ pktgen.total = bus->pktgen_total;
+ pktgen.minlen = bus->pktgen_minlen;
+ pktgen.maxlen = bus->pktgen_maxlen;
+ pktgen.numsent = bus->pktgen_sent;
+ pktgen.numrcvd = bus->pktgen_rcvd;
+ pktgen.numfail = bus->pktgen_fail;
+ pktgen.mode = bus->pktgen_mode;
+ pktgen.stop = bus->pktgen_stop;
+
+ bcopy(&pktgen, arg, sizeof(pktgen));
+
+ return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+ dhd_pktgen_t pktgen;
+ uint oldcnt, oldmode;
+
+ bcopy(arg, &pktgen, sizeof(pktgen));
+ if (pktgen.version != DHD_PKTGEN_VERSION)
+ return BCME_BADARG;
+
+ oldcnt = bus->pktgen_count;
+ oldmode = bus->pktgen_mode;
+
+ bus->pktgen_freq = pktgen.freq;
+ bus->pktgen_count = pktgen.count;
+ bus->pktgen_print = pktgen.print;
+ bus->pktgen_total = pktgen.total;
+ bus->pktgen_minlen = pktgen.minlen;
+ bus->pktgen_maxlen = pktgen.maxlen;
+ bus->pktgen_mode = pktgen.mode;
+ bus->pktgen_stop = pktgen.stop;
+
+ bus->pktgen_tick = bus->pktgen_ptick = 0;
+ bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+ bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+ /* Clear counts for a new pktgen (mode change, or was stopped) */
+ if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode))
+ bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0;
+
+ return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+ uint8 enable, protect, remap;
+
+ si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+ remap = val ? TRUE : FALSE;
+ si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint32 sdaddr;
+ uint dsize;
+
+ /* In remap mode, adjust address beyond socram and redirect
+ * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+ * is not backplane accessible
+ */
+ if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+ address -= bus->orig_ramsize;
+ address += SOCDEVRAM_BP_ADDR;
+ }
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+ (address & SBSDIO_SBWINDOW_MASK)));
+ if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+ DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+ DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+ break;
+ }
+ sdaddr = 0;
+ dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+ DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+ bcmsdh_cur_sbwad(bus->sdh)));
+ }
+
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+ uint32 addr;
+ int rv, i;
+ uint32 shaddr = 0;
+
+ shaddr = bus->ramsize - 4;
+
+ i = 0;
+ do {
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+ return rv;
+
+ addr = ltoh32(addr);
+
+ DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+ if ((bus->srmemsize > 0) && (i++ == 0)) {
+ shaddr -= bus->srmemsize;
+ } else {
+ DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+ __FUNCTION__, addr));
+ return BCME_ERROR;
+ }
+ } else
+ break;
+ } while (i < 2);
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+ return BCME_OK;
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __FUNCTION__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX 192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ if (!KSO_ENAB(bus))
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ char *console_buffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ trap_t tr;
+ sdpcm_shared_t sdpcm_shared;
+ struct bcmstrbuf strbuf;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, i, addr;
+ int rv;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+ if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (sdpcm_shared.assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (sdpcm_shared.assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+ }
+
+ if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+ sdpcm_shared.trap_addr,
+ (uint8*)&tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bcm_bprintf(&strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+ ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+ ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+ ltoh32(sdpcm_shared.trap_addr),
+ ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+ ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0)
+ goto printbuf;
+
+ addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+ if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0)
+ goto printbuf;
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ goto printbuf;
+
+ if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0)
+ goto printbuf;
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ if (dhd_msg_level & DHD_ERROR_VAL)
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+ }
+ }
+
+printbuf:
+ if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+
+ return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Basic sanity checks */
+ if (bus->dhd->up) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24)
+#define CC_CHIPCTRL_JTAG_SEL (1 << 3)
+#define CC_CHIPCTRL_GPIO_SEL (0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+ int int_val;
+ uint32 addr, data, uart_enab = 0;
+ uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+ uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+ *bcmerror = 0;
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+ }
+ else if (bus->sih->chip == BCM4334_CHIP_ID) {
+ if (enable) {
+ /* Moved to PMU chipcontrol 1 from 4330 */
+ int_val &= ~gpio_sel;
+ int_val |= jtag_sel;
+ } else {
+ int_val |= gpio_sel;
+ int_val &= ~jtag_sel;
+ }
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+ }
+
+ if (!set)
+ return (int_val & uart_enab);
+ if (enable)
+ int_val |= uart_enab;
+ else
+ int_val &= ~uart_enab;
+ bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+ if (bcmsdh_regfail(bus->sdh)) {
+ *bcmerror = BCME_SDIO_ERROR;
+ return -1;
+ }
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uint32 chipcontrol;
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+ chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+ chipcontrol &= ~jtag_sel;
+ if (enable) {
+ chipcontrol |= jtag_sel;
+ chipcontrol &= ~gpio_sel;
+ }
+ bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+ }
+
+ return (int_val & uart_enab);
+}
+#endif
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+ /* Some ioctls use the bus */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ /*
+ * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+ */
+ if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+ dhdsdio_clk_kso_iovar(bus, bool_val);
+ goto exit;
+ } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+ {
+ dhdsdio_clk_devsleep_iovar(bus, bool_val);
+ if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+ DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+ bus->dpc_sched));
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ }
+ }
+ goto exit;
+ }
+
+ /* Handle sleep stuff before any clock mucking */
+ if (vi->varid == IOV_SLEEP) {
+ if (IOV_ISSET(actionid)) {
+ bcmerror = dhdsdio_bussleep(bus, bool_val);
+ } else {
+ int_val = (int32)bus->sleeping;
+ bcopy(&int_val, arg, val_size);
+ }
+ goto exit;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ if (!bus->dhd->dongle_reset) {
+ BUS_WAKE(bus);
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+
+ switch (actionid) {
+ case IOV_GVAL(IOV_INTR):
+ int_val = (int32)bus->intr;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_INTR):
+ bus->intr = bool_val;
+ bus->intdis = FALSE;
+ if (bus->dhd->up) {
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ }
+ break;
+
+ case IOV_GVAL(IOV_POLLRATE):
+ int_val = (int32)bus->pollrate;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POLLRATE):
+ bus->pollrate = (uint)int_val;
+ bus->poll = (bus->pollrate != 0);
+ break;
+
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+
+ case IOV_GVAL(IOV_IDLECLOCK):
+ int_val = (int32)bus->idleclock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLECLOCK):
+ bus->idleclock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SD1IDLE):
+ int_val = (int32)sd1idle;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SD1IDLE):
+ sd1idle = bool_val;
+ break;
+
+
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address;
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+ (set ? "write" : "read"), size, address));
+
+ /* If we know about SOCRAM, check for a fit */
+ if ((bus->orig_ramsize) &&
+ ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+ {
+ uint8 enable, protect, remap;
+ si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+ if (!enable || protect) {
+ DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+ __FUNCTION__, bus->orig_ramsize, size, address));
+ DHD_ERROR(("%s: socram enable %d, protect %d\n",
+ __FUNCTION__, enable, protect));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+ uint32 devramsize = si_socdevram_size(bus->sih);
+ if ((address < SOCDEVRAM_ARM_ADDR) ||
+ (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+ DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+ __FUNCTION__, address, size));
+ DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+ __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ /* move it such that address is real now */
+ address -= SOCDEVRAM_ARM_ADDR;
+ address += SOCDEVRAM_BP_ADDR;
+ DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+ __FUNCTION__, (set ? "write" : "read"), size, address));
+ } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+ /* Can not access remap region while devram remap bit is set
+ * ROM content would be returned in this case
+ */
+ DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+ __FUNCTION__, address));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_SDIOD_DRIVE):
+ int_val = (int32)dhd_sdiod_drive_strength;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDIOD_DRIVE):
+ dhd_sdiod_drive_strength = int_val;
+ si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+ break;
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_SOCRAM_STATE):
+ bcmerror = dhdsdio_download_state(bus, bool_val);
+ break;
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_GVAL(IOV_READAHEAD):
+ int_val = (int32)dhd_readahead;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_READAHEAD):
+ if (bool_val && !dhd_readahead)
+ bus->nextlen = 0;
+ dhd_readahead = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDRXCHAIN):
+ int_val = (int32)bus->use_rxchain;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDRXCHAIN):
+ if (bool_val && !bus->sd_rxchain)
+ bcmerror = BCME_UNSUPPORTED;
+ else
+ bus->use_rxchain = bool_val;
+ break;
+ case IOV_GVAL(IOV_ALIGNCTL):
+ int_val = (int32)dhd_alignctl;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ALIGNCTL):
+ dhd_alignctl = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_SDALIGN):
+ int_val = DHD_SDALIGN;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_VARS):
+ if (bus->varsz < (uint)len)
+ bcopy(bus->vars, arg, bus->varsz);
+ else
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+ case IOV_GVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SDREG):
+ {
+ sdreg_t *sd_ptr;
+ uint32 addr, size;
+
+ sd_ptr = (sdreg_t *)params;
+
+ addr = (uintptr)bus->regs + sd_ptr->offset;
+ size = sd_ptr->func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ /* Same as above, but offset is not backplane (not SDIO core) */
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = SI_ENUM_BASE + sdreg.offset;
+ size = sdreg.func;
+ bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+ if (bcmsdh_regfail(bus->sdh))
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ case IOV_GVAL(IOV_SDCIS):
+ {
+ *(char *)arg = 0;
+
+ bcmstrcat(arg, "\nFunc 0\n");
+ bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 1\n");
+ bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ bcmstrcat(arg, "\nFunc 2\n");
+ bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+
+ case IOV_GVAL(IOV_FORCEEVEN):
+ int_val = (int32)forcealign;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FORCEEVEN):
+ forcealign = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_TXBOUND):
+ int_val = (int32)dhd_txbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXBOUND):
+ dhd_txbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_RXBOUND):
+ int_val = (int32)dhd_rxbound;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RXBOUND):
+ dhd_rxbound = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_TXMINMAX):
+ int_val = (int32)dhd_txminmax;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TXMINMAX):
+ dhd_txminmax = (uint)int_val;
+ break;
+
+ case IOV_GVAL(IOV_SERIALCONS):
+ int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+ if (bcmerror != 0)
+ break;
+
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SERIALCONS):
+ dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+ break;
+
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+ case IOV_GVAL(IOV_EXTLOOP):
+ int_val = (int32)bus->ext_loop;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_EXTLOOP):
+ bus->ext_loop = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_get(bus, arg);
+ break;
+
+ case IOV_SVAL(IOV_PKTGEN):
+ bcmerror = dhdsdio_pktgen_set(bus, arg);
+ break;
+#endif /* SDTEST */
+
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_SVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+ __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+ bus->dhd->busstate));
+
+ ASSERT(bus->dhd->osh);
+ /* ASSERT(bus->cl_devid); */
+
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+ break;
+#ifdef SOFTAP
+ case IOV_GVAL(IOV_FWPATH):
+ {
+ uint32 fw_path_len;
+
+ fw_path_len = strlen(bus->fw_path);
+ DHD_INFO(("[softap] get fwpath, l=%d\n", len));
+
+ if (fw_path_len > len-1) {
+ bcmerror = BCME_BUFTOOSHORT;
+ break;
+ }
+
+ if (fw_path_len) {
+ bcopy(bus->fw_path, arg, fw_path_len);
+ ((uchar*)arg)[fw_path_len] = 0;
+ }
+ break;
+ }
+
+ case IOV_SVAL(IOV_FWPATH):
+ DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
+
+ switch (int_val) {
+ case 1:
+ bus->fw_path = fw_path; /* ordinary one */
+ break;
+ case 2:
+ bus->fw_path = fw_path2;
+ break;
+ default:
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL")));
+ break;
+
+#endif /* SOFTAP */
+ case IOV_GVAL(IOV_DEVRESET):
+ DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+ /* Get its status */
+ int_val = (bool) bus->dhd->dongle_reset;
+ bcopy(&int_val, arg, val_size);
+
+ break;
+
+ case IOV_GVAL(IOV_KSO):
+ int_val = dhdsdio_sleepcsr_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DEVCAP):
+ int_val = dhdsdio_devcap_get(bus);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DEVCAP):
+ dhdsdio_devcap_set(bus, (uint8) int_val);
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize, phys_size;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+ if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+ DHD_ERROR(("PR85623WAR in place\n"));
+ varsize += 4;
+ varaddr -= 4;
+ }
+ }
+
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray)
+ return BCME_NOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ phys_size, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((phys_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+ uint retries;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = TRUE;
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_disable(bus->sih, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Disable remap for download */
+ if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+ dhdsdio_devram_remap(bus, FALSE);
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if ((bcmerror = dhdsdio_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Enable remap before ARM reset but after vars.
+ * No backplane access in remap mode
+ */
+ if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+ dhdsdio_devram_remap(bus, TRUE);
+
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+ if (bcmsdh_regfail(bus->sdh)) {
+ DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to SDIOD core */
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+ si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Turn on clock in case SD command needs backplane */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+ /* Check for bus configuration changes of interest */
+
+ /* If it was divisor change, read the new one */
+ if (set && strcmp(name, "sd_divisor") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_divisor = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_divisor));
+ }
+ }
+ /* If it was a mode change, read the new one */
+ if (set && strcmp(name, "sd_mode") == 0) {
+ if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_mode = -1;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, name, bus->sd_mode));
+ }
+ }
+ /* Similar check for blocksize change */
+ if (set && strcmp(name, "sd_blocksize") == 0) {
+ int32 fnum = 2;
+ if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+ goto exit;
+ }
+
+ DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ osl_t *osh;
+ uint32 local_hostintmask;
+ uint8 saveclk;
+ uint retries;
+ int err;
+ if (!bus->dhd)
+ return;
+
+ osh = bus->dhd->osh;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitlockfree(NULL);
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ /* Change our idea of bus state */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+
+ if (KSO_ENAB(bus)) {
+
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ }
+
+ /* Turn off the bus (F2), free any pending packets */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+ }
+
+ /* Turn off the backplane clock (only) */
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ PKTFREE(osh, bus->glomd, FALSE);
+
+ if (bus->glom)
+ PKTFREE(osh, bus->glom, FALSE);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = FALSE;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+}
+
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ dhd_timeout_t tmo;
+ uint retries = 0;
+ uint8 ready, enable;
+ int err, ret = 0;
+ uint8 saveclk;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ if (enforce_mutex)
+ dhd_os_sdlock(bus->dhd);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (bus->clkstate != CLK_AVAIL) {
+ DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+ goto exit;
+ }
+
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+ &bus->regs->tosbmailboxdata, retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+ /* Give the dongle some time to do its thing and set IOR2 */
+ dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+ ready = 0;
+ while (ready != enable && !dhd_timeout_expired(&tmo))
+ ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+ DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Make sure we're talking to the core. */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+ bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+ ASSERT(bus->regs != NULL);
+
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ /* corerev 4 could use the newer interrupt logic to detect the frames */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+ (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+ bus->hostintmask &= ~I_HMB_FRAME_IND;
+ bus->hostintmask |= I_XMTDATA_AVAIL;
+ }
+ W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ bus->intdis = FALSE;
+ if (bus->intr) {
+ DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ bcmsdh_intr_enable(bus->sdh);
+ } else {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ bcmsdh_intr_disable(bus->sdh);
+ }
+
+ }
+
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ }
+
+ if (dhdsdio_sr_cap(bus))
+ dhdsdio_sr_init(bus);
+ else
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+ /* If we didn't come up, turn off backplane clock */
+ if (dhdp->busstate != DHD_BUS_DATA)
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+ if (enforce_mutex)
+ dhd_os_sdunlock(bus->dhd);
+
+ return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint retries = 0;
+ uint16 lastrbc;
+ uint8 hi, lo;
+ int err;
+
+ DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+ (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return;
+ }
+
+ if (abort) {
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ }
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+ __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries) {
+ DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+ } else {
+ DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+ }
+
+ if (rtx) {
+ bus->rxrtx++;
+ W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ bus->f1regdata++;
+ if (retries <= retry_limit) {
+ bus->rxskip = TRUE;
+ }
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || bcmsdh_regfail(sdh))
+ bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ uint rdlen, pad;
+
+ int sdret;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Control data already received in aligned rxctl */
+ if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+ goto gotpkt;
+
+ ASSERT(bus->rxbuf);
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+
+ /* Copy the already-read portion over */
+ bcopy(hdr, bus->rxctl, firstread);
+ if (len <= firstread)
+ goto gotpkt;
+
+ /* Copy the full data pkt in gSPI case and process ioctl. */
+ if (bus->bus == SPI_BUS) {
+ bcopy(hdr, bus->rxctl, len);
+ goto gotpkt;
+ }
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - firstread;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->dhd->maxctl))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + firstread) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+ __FUNCTION__, rdlen, bus->dhd->maxctl));
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+ if ((len - doff) > bus->dhd->maxctl) {
+ DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ goto done;
+ }
+
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+ bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+ prhex("RxCtrl", bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ dhd_os_ioctl_resp_wake(bus->dhd);
+}
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+ uint16 dlen, totlen;
+ uint8 *dptr, num = 0;
+
+ uint16 sublen, check;
+ void *pfirst, *plast, *pnext, *save_pfirst;
+ osl_t *osh = bus->dhd->osh;
+
+ int errcode;
+ uint8 chan, seq, doff, sfdoff;
+ uint8 txmax;
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ dhd_os_sdlock_rxq(bus->dhd);
+
+ pfirst = plast = pnext = NULL;
+ dlen = (uint16)PKTLEN(osh, bus->glomd);
+ dptr = PKTDATA(osh, bus->glomd);
+ if (!dlen || (dlen & 1)) {
+ DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+ __FUNCTION__, dlen));
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = ltoh16_ua(dptr);
+ dlen -= sizeof(uint16);
+ dptr += sizeof(uint16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+ __FUNCTION__, num, sublen));
+ pnext = NULL;
+ break;
+ }
+ if (sublen % DHD_SDALIGN) {
+ DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+ __FUNCTION__, sublen, DHD_SDALIGN));
+ usechain = FALSE;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total is a block multiple */
+ if (!dlen) {
+ sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+ totlen = ROUNDUP(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+ DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+ __FUNCTION__, num, sublen));
+ break;
+ }
+ ASSERT(!PKTLINK(pnext));
+ if (!pfirst) {
+ ASSERT(!plast);
+ pfirst = plast = pnext;
+ } else {
+ ASSERT(plast);
+ PKTSETNEXT(osh, plast, pnext);
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain in bus structure */
+ if (pnext) {
+ DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+ __FUNCTION__, totlen, num));
+ if (DHD_GLOM_ON() && bus->nextlen) {
+ if (totlen != bus->nextlen) {
+ DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+ "rxseq %d\n", __FUNCTION__, bus->nextlen,
+ totlen, rxseq));
+ }
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ PKTFREE(osh, pfirst, FALSE);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ PKTFREE(osh, bus->glomd, FALSE);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+
+ /* Ok -- either we just generated a packet chain, or had one from before */
+ if (bus->glom) {
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+ for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+ DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (uint8*)PKTDATA(osh, pnext),
+ PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (uint16)pkttotlen(osh, pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+ dlen, pfirst, NULL, NULL);
+ } else if (bus->dataptr) {
+ errcode = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ F2SYNC, bus->dataptr,
+ dlen, NULL, NULL, NULL);
+ sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+ if (sublen != dlen) {
+ DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+ __FUNCTION__, dlen, sublen));
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+ ASSERT(errcode != BCME_PENDING);
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __FUNCTION__, dlen, errcode));
+ bus->dhd->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 48));
+ }
+#endif
+
+
+ /* Validate the superframe header */
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, sublen, check));
+ errcode = -1;
+ } else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+ DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+ DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+ SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+ DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN));
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ PKTPULL(osh, pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = PKTNEXT(osh, pnext)) {
+ dptr = (uint8 *)PKTDATA(osh, pnext);
+ dlen = (uint16)PKTLEN(osh, pnext);
+ sublen = ltoh16_ua(dptr);
+ check = ltoh16_ua(dptr + sizeof(uint16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("subframe", dptr, 32);
+ }
+#endif
+
+ if ((uint16)~(sublen^check)) {
+ DHD_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, num, sublen, check));
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ DHD_ERROR(("%s (subframe %d): length mismatch: "
+ "len 0x%04x, expect 0x%04x\n",
+ __FUNCTION__, num, sublen, dlen));
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+ __FUNCTION__, num, chan));
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ PKTPUSH(osh, pfirst, sfdoff);
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ } else {
+ bus->glomerr = 0;
+ dhdsdio_rxfail(bus, TRUE, FALSE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(osh, bus->glom, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = PKTNEXT(osh, pfirst);
+ PKTSETNEXT(osh, pfirst, NULL);
+
+ dptr = (uint8 *)PKTDATA(osh, pfirst);
+ sublen = ltoh16_ua(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+ PKTLEN(osh, pfirst), sublen, chan, seq));
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+ if (rxseq != seq) {
+ DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Subframe Data", dptr, dlen);
+ }
+#endif
+
+ PKTSETLEN(osh, pfirst, sublen);
+ PKTPULL(osh, pfirst, doff);
+
+ reorder_info_len = sizeof(reorder_info_buf);
+
+ if (PKTLEN(osh, pfirst) == 0) {
+ PKTFREE(bus->dhd->osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ bus->dhd->rx_errors++;
+ PKTFREE(osh, pfirst, FALSE);
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+ if (reorder_info_len) {
+ uint32 free_buf_count;
+ void *ppfirst;
+
+ ppfirst = pfirst;
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+ reorder_info_len, &ppfirst, &free_buf_count);
+
+ if (free_buf_count == 0) {
+ if (plast) {
+ PKTSETNEXT(osh, plast, pnext);
+ } else {
+ ASSERT(save_pfirst == pfirst);
+ save_pfirst = pnext;
+ }
+ continue;
+ }
+ else {
+ void *temp;
+
+ /* go to the end of the chain and attach the pnext there */
+ temp = ppfirst;
+ while (PKTNEXT(osh, temp) != NULL) {
+ temp = PKTNEXT(osh, temp);
+ }
+ pfirst = temp;
+ if (plast) {
+ PKTSETNEXT(osh, plast, ppfirst);
+ }
+ else {
+ /* first one in the chain */
+ save_pfirst = ppfirst;
+ }
+
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ }
+
+ num += (uint8)free_buf_count;
+ }
+ else {
+ /* this packet will go up, link back into chain and count it */
+ PKTSETNEXT(osh, pfirst, pnext);
+ plast = pfirst;
+ num++;
+ }
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+ __FUNCTION__, num, pfirst,
+ PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+ PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+ prhex("", (uint8 *)PKTDATA(osh, pfirst),
+ MIN(PKTLEN(osh, pfirst), 32));
+ }
+#endif /* DHD_DEBUG */
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+ if (num) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num, 0);
+ dhd_os_sdlock(bus->dhd);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+ osl_t *osh = bus->dhd->osh;
+ bcmsdh_info_t *sdh = bus->sdh;
+
+ uint16 len, check; /* Extracted hardware header fields */
+ uint8 chan, seq, doff; /* Extracted software header fields */
+ uint8 fcbits; /* Extracted fcbits from software header */
+ uint8 delta;
+
+ void *pkt; /* Packet for event or data frames */
+ uint16 pad; /* Number of pad bytes to read */
+ uint16 rdlen; /* Total number of bytes to read */
+ uint8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from bcmsdh calls */
+ uint8 txmax; /* Maximum tx sequence offered */
+ bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+ uint8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+ uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+ uint reorder_info_len;
+ uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+ bool sdtest = FALSE; /* To limit message spew from test mode */
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+ return 0;
+ }
+
+ ASSERT(maxframes);
+
+#ifdef SDTEST
+ /* Allow pktgen to override maxframes */
+ if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ maxframes = bus->pktgen_count;
+ sdtest = TRUE;
+ }
+#endif
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = FALSE;
+
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+#ifdef DHDTHREAD
+ /* tx more to improve rx performance */
+ if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) {
+ dhdsdio_sendfromq(bus, dhd_txbound);
+ }
+#endif /* DHDTHREAD */
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ uint8 cnt;
+ DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __FUNCTION__, bus->glomd, bus->glom));
+ cnt = dhdsdio_rxglom(bus, rxseq);
+ DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (dhd_readahead && bus->nextlen) {
+ uint16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ if (bus->bus == SPI_BUS) {
+ rdlen = len = nextlen;
+ }
+ else {
+ rdlen = len = nextlen << 4;
+
+ /* Pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+ }
+
+ /* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+ * Later we use buffer-poll for data as well as control packets.
+ * This is required because dhd receives full frame in gSPI unlike SDIO.
+ * After the frame is received we have to distinguish whether it is data
+ * or non-data frame.
+ */
+ /* Allocate a packet buffer */
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+ if (bus->bus == SPI_BUS) {
+ bus->usebufpool = FALSE;
+ bus->rxctl = bus->rxbuf;
+ if (dhd_alignctl) {
+ bus->rxctl += firstread;
+ if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+ bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl -= firstread;
+ }
+ ASSERT(bus->rxctl >= bus->rxbuf);
+ rxbuf = bus->rxctl;
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus,
+ bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ NULL, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ /* dhd.rx_ctlerrs is higher level */
+ bus->rxc_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ } else {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+ "expected rxseq %d\n",
+ __FUNCTION__, len, rdlen, rxseq));
+ /* Just go try again w/normal header read */
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ } else {
+ if (bus->bus == SPI_BUS)
+ bus->usebufpool = TRUE;
+
+ ASSERT(!PKTLINK(pkt));
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+ rxbuf = (uint8 *)PKTDATA(osh, pkt);
+ /* Read the entire frame */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ SDIO_FUNC_2,
+ F2SYNC, rxbuf, rdlen,
+ pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+ __FUNCTION__, rdlen, sdret));
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ bus->dhd->rx_errors++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ /* Force retry w/normal header read. Don't attempt NAK for
+ * gSPI
+ */
+ dhdsdio_rxfail(bus, TRUE,
+ (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ continue;
+ }
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ /* Now check the header */
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means readahead info was bad */
+ if (!(len|check)) {
+ DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+ __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+ len, check));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+ __FUNCTION__, len));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal header (may be >4K) */
+ DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+ GSPI_PR55150_BAILOUT;
+ continue;
+ }
+
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+ " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+ seq));
+ bus->nextlen = 0;
+ }
+
+ bus->dhd->rx_readahead_cnt ++;
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+ __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", rxbuf, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ if (bus->bus == SPI_BUS) {
+ dhdsdio_read_control(bus, rxbuf, len, doff);
+ if (bus->usebufpool) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ }
+ continue;
+ } else {
+ DHD_ERROR(("%s (nextlen): readahead on control"
+ " packet %d?\n", __FUNCTION__, seq));
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ dhdsdio_rxfail(bus, FALSE, TRUE);
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ }
+ }
+
+ if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+ DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+ "rx pktbuf's or not yet malloced.\n", len, chan));
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE2();
+ dhd_os_sdunlock_rxq(bus->dhd);
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+ /* gSPI frames should not be handled in fractions */
+ if (bus->bus == SPI_BUS) {
+ break;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr, firstread, NULL, NULL, NULL);
+ bus->f2rxhdrs++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+ bus->rx_hdrfail++;
+ dhdsdio_rxfail(bus, TRUE, TRUE);
+ continue;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = ltoh16_ua(bus->rxhdr);
+ check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+ /* All zeros means no more frames */
+ if (!(len|check)) {
+ *finished = TRUE;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((uint16)~(len^check)) {
+ DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+ __FUNCTION__, len, check));
+ bus->rx_badhdr++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+ bus->rx_badhdr++;
+ ASSERT(0);
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ __FUNCTION__, bus->nextlen, seq));
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ delta = 0;
+ if (~bus->flowcontrol & fcbits) {
+ bus->fc_xoff++;
+ delta = 1;
+ }
+ if (bus->flowcontrol & ~fcbits) {
+ bus->fc_xon++;
+ delta = 1;
+ }
+
+ if (delta) {
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+ __FUNCTION__, txmax, bus->tx_seq));
+ txmax = bus->tx_max;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+ (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+ /* Length to read */
+ rdlen = (len > firstread) ? (len - firstread) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % DHD_SDALIGN) {
+ rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (forcealign && (rdlen & (ALIGNMENT - 1)))
+ rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+ if ((rdlen + firstread) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+ bus->dhd->rx_errors++; bus->rx_toolong++;
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ continue;
+ }
+
+ dhd_os_sdlock_rxq(bus->dhd);
+ if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+ /* Give up on data, request rtx of events */
+ DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+ __FUNCTION__, rdlen, chan));
+ bus->dhd->rx_dropped++;
+ dhd_os_sdunlock_rxq(bus->dhd);
+ dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+ continue;
+ }
+ dhd_os_sdunlock_rxq(bus->dhd);
+
+ ASSERT(!PKTLINK(pkt));
+
+ /* Leave room for what we already read, and align remainder */
+ ASSERT(firstread < (PKTLEN(osh, pkt)));
+ PKTPULL(osh, pkt, firstread);
+ PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+ bus->f2rxdata++;
+ ASSERT(sdret != BCME_PENDING);
+
+ if (sdret < 0) {
+ DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+ ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ PKTPUSH(osh, pkt, firstread);
+ bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ prhex("Rx Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+ __FUNCTION__, len));
+#ifdef DHD_DEBUG
+ if (DHD_GLOM_ON()) {
+ prhex("Glom Data", PKTDATA(osh, pkt), len);
+ }
+#endif
+ PKTSETLEN(osh, pkt, len);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+ dhdsdio_rxfail(bus, FALSE, FALSE);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ PKTSETLEN(osh, pkt, len);
+ PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+ /* Test channel packets are processed separately */
+ if (chan == SDPCM_TEST_CHANNEL) {
+ dhdsdio_testrcv(bus, pkt, seq);
+ continue;
+ }
+#endif /* SDTEST */
+
+ if (PKTLEN(osh, pkt) == 0) {
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ continue;
+ } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+ &reorder_info_len) != 0) {
+ DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+ dhd_os_sdlock_rxq(bus->dhd);
+ PKTFREE(bus->dhd->osh, pkt, FALSE);
+ dhd_os_sdunlock_rxq(bus->dhd);
+ bus->dhd->rx_errors++;
+ continue;
+ }
+ if (reorder_info_len) {
+ /* Reordering info from the firmware */
+ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+ &pkt, &pkt_count);
+ if (pkt_count == 0)
+ continue;
+ }
+ else
+ pkt_count = 1;
+
+
+ /* Unlock during rx call */
+ dhd_os_sdunlock(bus->dhd);
+ dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+ dhd_os_sdlock(bus->dhd);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+ /* Message if we hit the limit */
+ if (!rxleft && !sdtest)
+ DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+ else
+#endif /* DHD_DEBUG */
+ DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus = 0;
+ uint32 hmb_data;
+ uint8 fcbits;
+ uint retries = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Read mailbox data and ack that we did so */
+ R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ if (retries <= retry_limit)
+ W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+ if (!bus->rxskip) {
+ DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+ }
+ bus->rxskip = FALSE;
+ intstatus |= FRAME_AVAIL_MASK(bus);
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ else
+ DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+ /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+ uint32 val;
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+ val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+ }
+
+#ifdef DHD_DEBUG
+ /* Retrieve console state address now that firmware should have updated it */
+ {
+ sdpcm_shared_t shared;
+ if (dhdsdio_readshared(bus, &shared) == 0)
+ bus->console_addr = shared.console_addr;
+ }
+#endif /* DHD_DEBUG */
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more. Leave this here for possibly remaining backward
+ * compatible with older dongles
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+#ifdef DHD_DEBUG
+ /* At least print a message if FW halted */
+ if (hmb_data & HMB_DATA_FWHALT) {
+ DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED\n"));
+ dhdsdio_checkdied(bus, NULL, 0);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_FWHALT |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK |
+ HMB_DATA_VERSION_MASK)) {
+ DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ }
+
+ return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+ bcmsdh_info_t *sdh = bus->sdh;
+ sdpcmd_regs_t *regs = bus->regs;
+ uint32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+ uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = TRUE; /* Flag for no more read data */
+ bool resched = FALSE; /* Flag indicating resched wanted */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ return 0;
+ }
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ dhd_os_sdlock(bus->dhd);
+
+ if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ goto exit;
+ }
+
+ /* If waiting for HTAVAIL, check status */
+ if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+ int err;
+ uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+ /* Check for inconsistent device control */
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ }
+#endif /* DHD_DEBUG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+
+ DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+ __FUNCTION__, err));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ BUS_WAKE(bus);
+
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+ if (bus->clkstate != CLK_AVAIL)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = FALSE;
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ bus->f1regdata++;
+ if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+ (newstatus == I_XMTDATA_AVAIL)) {
+ }
+ else
+ W_SDREG(newstatus, &regs->intstatus, retries);
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+ R_SDREG(newstatus, &regs->intstatus, retries);
+ bus->f1regdata += 2;
+ bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Just being here means nothing more to do for chipactive */
+ if (intstatus & I_CHIPACTIVE) {
+ /* ASSERT(bus->clkstate == CLK_AVAIL); */
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= dhdsdio_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ DHD_ERROR(("Dongle reports SBINT\n"));
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip) {
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ }
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE(bus, intstatus)) {
+ framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~FRAME_AVAIL_MASK(bus);
+ rxlimit -= MIN(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ /* Re-enable interrupts to detect new device events (mailbox, rx frame)
+ * or clock availability. (Allows tx loop to check ipend if desired.)
+ * (Unless register access seems hosed, as we may not be able to ACK...)
+ */
+ if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+ DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+ __FUNCTION__, rxdone, framecnt));
+ bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ bcmsdh_intr_enable(sdh);
+ }
+
+ if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL);
+ ASSERT(ret != BCME_PENDING);
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+
+ bus->ctrl_frame_stat = FALSE;
+ dhd_wait_event_wakeup(bus->dhd);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+ framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+ framecnt = dhdsdio_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+ /* Resched the DPC if ctrl cmd is pending on bus credit */
+ if (bus->ctrl_frame_stat)
+ resched = TRUE;
+
+ /* Resched if events or tx frames are pending, else await next interrupt */
+ /* On failed register access, all bets are off: no resched or interrupts */
+ if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+ if ((bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+ SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+ /* Bus failed because of KSO */
+ DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+ bus->kso = FALSE;
+ } else {
+ DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+ __FUNCTION__));
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->intstatus = 0;
+ }
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Awaiting I_CHIPACTIVE; don't resched */
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
+ resched = TRUE;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+
+exit:
+ dhd_os_sdunlock(bus->dhd);
+ return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ bool resched;
+
+ /* Call the DPC directly. */
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ resched = dhdsdio_dpc(bus);
+
+ return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ bcmsdh_info_t *sdh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ return;
+ }
+ sdh = bus->sdh;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = TRUE;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (!SLPAUTO_ENAB(bus)) {
+ if (bus->sleeping) {
+ DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ return;
+ } else if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("ISR in devsleep 1\n"));
+ }
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (bus->intr) {
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ }
+
+ bcmsdh_intr_disable(sdh);
+ bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+ DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhdsdio_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+#endif
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+ /* Default to specified length, or full range */
+ if (dhd_pktgen_len) {
+ bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ bus->pktgen_minlen = bus->pktgen_maxlen;
+ } else {
+ bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_minlen = 0;
+ }
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Default to per-watchdog burst with 10s print time */
+ bus->pktgen_freq = 1;
+ bus->pktgen_print = 10000 / dhd_watchdog_ms;
+ bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+ /* Default to echo mode */
+ bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+ void *pkt;
+ uint8 *data;
+ uint pktcount;
+ uint fillbyte;
+ osl_t *osh = bus->dhd->osh;
+ uint16 len;
+
+ /* Display current count if appropriate */
+ if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+ bus->pktgen_ptick = 0;
+ printf("%s: send attempts %d rcvd %d\n",
+ __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd);
+ }
+
+ /* For recv mode, just make sure dongle has started sending */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+ dhdsdio_sdtest_set(bus, (uint8)bus->pktgen_total);
+ }
+ return;
+ }
+
+ /* Otherwise, generate or request the specified number of packets */
+ for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+ /* Stop if total has been reached */
+ if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ break;
+ }
+
+ /* Allocate an appropriate-sized packet */
+ len = bus->pktgen_len;
+ if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ TRUE))) {;
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ break;
+ }
+ PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Write test header cmd and extra based on mode */
+ switch (bus->pktgen_mode) {
+ case DHD_PKTGEN_ECHO:
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_SEND:
+ *data++ = SDPCM_TEST_DISCARD;
+ *data++ = (uint8)bus->pktgen_sent;
+ break;
+
+ case DHD_PKTGEN_RXBURST:
+ *data++ = SDPCM_TEST_BURST;
+ *data++ = (uint8)bus->pktgen_count;
+ break;
+
+ default:
+ DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+ PKTFREE(osh, pkt, TRUE);
+ bus->pktgen_count = 0;
+ return;
+ }
+
+ /* Write test header length field */
+ *data++ = (len >> 0);
+ *data++ = (len >> 8);
+
+ /* Then fill in the remainder -- N/A for burst, but who cares... */
+ for (fillbyte = 0; fillbyte < len; fillbyte++)
+ *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+
+#ifdef DHD_DEBUG
+ if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+ prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) {
+ bus->pktgen_fail++;
+ if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+ bus->pktgen_count = 0;
+ }
+ bus->pktgen_sent++;
+
+ /* Bump length if not fixed, wrap at max */
+ if (++bus->pktgen_len > bus->pktgen_maxlen)
+ bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+ /* Special case for burst mode: just send one request! */
+ if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ break;
+ }
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count)
+{
+ void *pkt;
+ uint8 *data;
+ osl_t *osh = bus->dhd->osh;
+
+ /* Allocate the packet */
+ if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) {
+ DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+ return;
+ }
+ PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+ /* Fill in the test header */
+ *data++ = SDPCM_TEST_SEND;
+ *data++ = count;
+ *data++ = (bus->pktgen_maxlen >> 0);
+ *data++ = (bus->pktgen_maxlen >> 8);
+
+ /* Send it */
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE))
+ bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+ osl_t *osh = bus->dhd->osh;
+ uint8 *data;
+ uint pktlen;
+
+ uint8 cmd;
+ uint8 extra;
+ uint16 len;
+ uint16 offset;
+
+ /* Check for min length */
+ if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+
+ /* Extract header fields */
+ data = PKTDATA(osh, pkt);
+ cmd = *data++;
+ extra = *data++;
+ len = *data++; len += *data++ << 8;
+ DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+ /* Check length for relevant commands */
+ if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+ if (pktlen != len + SDPCM_TEST_HDRLEN) {
+ DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ return;
+ }
+ }
+
+ /* Process as per command */
+ switch (cmd) {
+ case SDPCM_TEST_ECHOREQ:
+ /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+ *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+ if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) {
+ bus->pktgen_sent++;
+ } else {
+ bus->pktgen_fail++;
+ PKTFREE(osh, pkt, FALSE);
+ }
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_ECHORSP:
+ if (bus->ext_loop) {
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+ }
+
+ for (offset = 0; offset < len; offset++, data++) {
+ if (*data != SDPCM_TEST_FILL(offset, extra)) {
+ DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+ "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+ offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+ break;
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_DISCARD:
+ {
+ int i = 0;
+ uint8 *prn = data;
+ uint8 testval = extra;
+ for (i = 0; i < len; i++) {
+ if (*prn != testval) {
+ DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+ i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+ prn++; testval++;
+ }
+ }
+ }
+ PKTFREE(osh, pkt, FALSE);
+ bus->pktgen_rcvd++;
+ break;
+
+ case SDPCM_TEST_BURST:
+ case SDPCM_TEST_SEND:
+ default:
+ DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+ PKTFREE(osh, pkt, FALSE);
+ break;
+ }
+
+ /* For recv mode, stop at limit (and tell dongle to stop sending) */
+ if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+ bus->pktgen_rcvd_rcvsession++;
+
+ if (bus->pktgen_total &&
+ (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+ bus->pktgen_count = 0;
+ DHD_ERROR(("Pktgen:rcv test complete!\n"));
+ bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+ dhdsdio_sdtest_set(bus, FALSE);
+ bus->pktgen_rcvd_rcvsession = 0;
+ }
+ }
+ }
+}
+#endif /* SDTEST */
+
+extern void
+dhd_disable_intr(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+ bus = dhdp->bus;
+ bcmsdh_intr_disable(bus->sdh);
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus;
+
+ DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+ bus = dhdp->bus;
+
+ if (bus->dhd->dongle_reset)
+ return FALSE;
+
+ /* Ignore the timer if simulating bus down */
+ if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+ return FALSE;
+
+ if (dhdp->busstate == DHD_BUS_DOWN)
+ return FALSE;
+
+ /* Poll period: check device if appropriate. */
+ if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+ uint32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ uint8 devpend;
+ devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+ SDIOD_CCCR_INTPEND, NULL);
+ intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = TRUE;
+ if (bus->intr) {
+ bcmsdh_intr_disable(bus->sdh);
+ }
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+
+#ifdef DHD_DEBUG
+ /* Poll for console output periodically */
+ if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
+ /* Make sure backplane clock is on */
+ if (SLPAUTO_ENAB(bus))
+ dhdsdio_bussleep(bus, FALSE);
+ else
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ if (dhdsdio_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+ /* Generate packets if configured */
+ if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+ /* Make sure backplane clock is on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ bus->pktgen_tick = 0;
+ dhdsdio_pktgen(bus);
+ }
+#endif
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = FALSE;
+ if (SLPAUTO_ENAB(bus))
+ dhdsdio_bussleep(bus, TRUE);
+ else
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ }
+ }
+
+ return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ uint32 addr, val;
+ int rv;
+ void *pkt;
+
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Exclusive bus access */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
+ }
+
+ /* Request clock to allow SDIO accesses */
+ BUS_WAKE(bus);
+ /* No pend allowed since txpkt is called later, ht clk has to be on */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+ val = htol32(0);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Bump dongle by sending an empty packet on the event channel.
+ * sdpcm_sendup (RX) checks for virtual console input.
+ */
+ if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+ dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE);
+
+done:
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+ uint byte, tag, tdata;
+ DHD_INFO(("Function %d CIS:\n", fn));
+
+ for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+ if ((byte % 16) == 0)
+ DHD_INFO((" "));
+ DHD_INFO(("%02x ", cis[byte]));
+ if ((byte % 16) == 15)
+ DHD_INFO(("\n"));
+ if (!tdata--) {
+ tag = cis[byte];
+ if (tag == 0xff)
+ break;
+ else if (!tag)
+ tdata = 0;
+ else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+ tdata = cis[byte + 1] + 1;
+ else
+ DHD_INFO(("]"));
+ }
+ }
+ if ((byte % 16) != 15)
+ DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+ if (chipid == BCM4325_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4329_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4315_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4319_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4336_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4330_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43237_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43362_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4314_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4334_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43239_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4324_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4335_CHIP_ID)
+ return TRUE;
+ return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+ uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+ int ret;
+ dhd_bus_t *bus;
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver. Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent initializations.
+ */
+ dhd_txbound = DHD_TXBOUND;
+ dhd_rxbound = DHD_RXBOUND;
+ dhd_alignctl = TRUE;
+ sd1idle = TRUE;
+ dhd_readahead = TRUE;
+ retrydata = FALSE;
+ dhd_doflow = FALSE;
+ dhd_dongle_memsize = 0;
+ dhd_txminmax = DHD_TXMINMAX;
+
+ forcealign = TRUE;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+ /* We make assumptions about address window mappings */
+ ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+ /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+ * means early parse could fail, so here we should get either an ID
+ * we recognize OR (-1) indicating we must request power first.
+ */
+ /* Check the Vendor ID */
+ switch (venid) {
+ case 0x0000:
+ case VENDOR_BROADCOM:
+ break;
+ default:
+ DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+ __FUNCTION__, venid));
+ return NULL;
+ }
+
+ /* Check the Device ID and make sure it's one that we support */
+ switch (devid) {
+ case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
+ case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
+ case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
+ DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4329_D11N_ID: /* 4329 802.11n dualband device */
+ case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
+ case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
+ case 0x4329:
+ DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */
+ case BCM4315_D11G_ID: /* 4315 802.11g id */
+ case BCM4315_D11A_ID: /* 4315 802.11a id */
+ DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+ break;
+ case BCM4319_D11N_ID: /* 4319 802.11n id */
+ case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
+ case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
+ DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+ break;
+ case 0:
+ DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+ __FUNCTION__));
+ break;
+
+ default:
+ DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __FUNCTION__, venid, devid));
+ return NULL;
+ }
+
+ if (osh == NULL) {
+ /* Ask the OS interface part for an OSL handle */
+ if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
+ DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
+ return NULL;
+ }
+ }
+
+ /* Allocate private bus interface state */
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ goto fail;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->sdh = sdh;
+ bus->cl_devid = (uint16)devid;
+ bus->bus = DHD_BUS;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+ /* attach the common module */
+ dhd_common_init(osh);
+
+ /* attempt to attach to the dongle */
+ if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+ DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Attach to the dhd/OS/network interface */
+ if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* Allocate buffers */
+ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+ DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+ bcmsdh_intr_disable(sdh);
+ if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+ DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+ __FUNCTION__, ret));
+ goto fail;
+ }
+ DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+ __FUNCTION__));
+ }
+
+ DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ /* Read MAC address from external customer place */
+ memset(&ea_addr, 0, sizeof(ea_addr));
+ ret = dhd_custom_get_mac_address(ea_addr.octet);
+ if (!ret) {
+ memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
+ }
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ /* if firmware path present try to download and bring up bus */
+ if (dhd_download_fw_on_driverload && (ret = dhd_bus_start(bus->dhd)) != 0) {
+ DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+ if (ret == BCME_NOTUP)
+ goto fail;
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (dhd_net_attach(bus->dhd, 0) != 0) {
+ DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ return bus;
+
+fail:
+ dhdsdio_release(bus, osh);
+ return NULL;
+}
+
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+ uint16 devid)
+{
+ int err = 0;
+ uint8 clkctl = 0;
+
+ bus->alp_only = TRUE;
+ bus->sih = NULL;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+ DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+ }
+
+#ifdef DHD_DEBUG
+ DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+
+#endif /* DHD_DEBUG */
+
+
+ /* Force PLL off until si_attach() programs PLL control regs */
+
+
+
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+ DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, DHD_INIT_CLKCTL1, clkctl));
+ goto fail;
+ }
+
+#ifdef DHD_DEBUG
+ if (DHD_INFO_ON()) {
+ uint fn, numfn;
+ uint8 *cis[SDIOD_MAX_IOFUNCS];
+ int err = 0;
+
+ numfn = bcmsdh_query_iofnum(sdh);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ /* Make sure ALP is available before trying to read CIS */
+ SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+ /* Now request ALP be put on the bus */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ DHD_INIT_CLKCTL2, &err);
+ OSL_DELAY(65);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+ break;
+ }
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+ DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ break;
+ }
+ dhd_dump_cis(fn, cis[fn]);
+ }
+
+ while (fn-- > 0) {
+ ASSERT(cis[fn]);
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ }
+
+ if (err) {
+ DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+ goto fail;
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+ if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+ DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __FUNCTION__, bus->sih->chip));
+ goto fail;
+ }
+
+ if (bus->sih->buscorerev >= 12)
+ dhdsdio_clk_kso_init(bus);
+ else
+ bus->kso = TRUE;
+
+ if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+ }
+
+ si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+ /* Get info on the ARM and SOCRAM cores... */
+ if (!DHD_NOPMU(bus)) {
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_memsize)
+ dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+ bus->ramsize, bus->orig_ramsize));
+
+ bus->srmemsize = si_socram_srmem_size(bus->sih);
+ }
+
+ /* ...but normally deal with the SDPCMDEV core */
+ if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+ !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+ DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+ goto fail;
+ }
+ bus->sdpcmrev = si_corerev(bus->sih);
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+ bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+ if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+ (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1))
+ {
+ uint32 val;
+
+ val = R_REG(osh, &bus->regs->corecontrol);
+ val &= ~CC_XMTDATAAVAIL_MODE;
+ val |= CC_XMTDATAAVAIL_CTRL;
+ W_REG(osh, &bus->regs->corecontrol, val);
+ }
+
+
+ pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+ if ((bus->poll = (bool)dhd_poll))
+ bus->pollrate = 1;
+
+ return TRUE;
+
+fail:
+ if (bus->sih != NULL)
+ si_detach(bus->sih);
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->maxctl) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+ __FUNCTION__, bus->rxblen));
+ goto fail;
+ }
+ }
+ /* Allocate buffer to receive glomed packet */
+ if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+ __FUNCTION__, MAX_DATA_BUF));
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
+ goto fail;
+ }
+
+ /* Align the buffer */
+ if ((uintptr)bus->databuf % DHD_SDALIGN)
+ bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return TRUE;
+
+fail:
+ return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+ int32 fnum;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef SDTEST
+ dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->sleeping = FALSE;
+ bus->rxflow = FALSE;
+ bus->prev_rxlim_hit = 0;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = (int32)dhd_idletime;
+ bus->idleclock = DHD_IDLE_ACTIVE;
+
+ /* Query the SD clock speed */
+ if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+ &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+ bus->sd_divisor = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_divisor", bus->sd_divisor));
+ }
+
+ /* Query the SD bus mode */
+ if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+ &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+ bus->sd_mode = -1;
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_mode", bus->sd_mode));
+ }
+
+ /* Query the F2 block size, set roundup accordingly */
+ fnum = 2;
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+ &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+ bus->blocksize = 0;
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ } else {
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
+ }
+ bus->roundup = MIN(max_roundup, bus->blocksize);
+
+ /* Query if bus module supports packet chaining, default to use if supported */
+ if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+ bus->sd_rxchain = FALSE;
+ } else {
+ DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+ __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+ }
+ bus->use_rxchain = (bool)bus->sd_rxchain;
+
+ return TRUE;
+}
+
+bool
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path)
+{
+ bool ret;
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+
+ ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+ return ret;
+}
+
+static bool
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+ bool ret;
+
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ /* Download the firmware */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ ret = _dhdsdio_download_firmware(bus) == 0;
+
+ dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+ bool dongle_isolation = FALSE;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(osh);
+
+ if (bus->dhd) {
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhd_detach(bus->dhd);
+ }
+
+ /* De-register interrupt handler */
+ bcmsdh_intr_disable(bus->sdh);
+ bcmsdh_intr_dereg(bus->sdh);
+
+ if (bus->dhd) {
+ dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+ if (bus->console.buf != NULL)
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+ }
+
+ if (osh)
+ dhd_osl_detach(osh);
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+ }
+
+ if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+ bus->databuf = NULL;
+ }
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+ return;
+
+ if (bus->sih) {
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ }
+#if !defined(BCMLXSDMMC)
+ if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+ si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+ if (bus->dhd) {
+ dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+ }
+ si_detach(bus->sih);
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+ dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ ASSERT(bus->dhd);
+ dhdsdio_release(bus, bus->dhd->osh);
+ }
+
+ DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+ dhdsdio_probe,
+ dhdsdio_disconnect
+};
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+ return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+ bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *ularray = NULL;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ uint len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+
+ DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+ image = dhd_os_open_image(pfw_path);
+ if (image == NULL)
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+/*
+ EXAMPLE: nvram_array
+ nvram_arry format:
+ name=value
+ Use carriage return at the end of each assignment, and an empty string with
+ carriage return at the end of array.
+
+ For example:
+ unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+ Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+ Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+ bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+ if (!nvram_file_exists && (bus->nvram_params == NULL))
+ return (0);
+
+ if (nvram_file_exists) {
+ image = dhd_os_open_image(pnv_path);
+ if (image == NULL)
+ goto err;
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+
+ /* Download variables */
+ if (nvram_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+ }
+ else {
+ len = strlen(bus->nvram_params);
+ ASSERT(len <= MAX_NVRAMBUF_SIZE);
+ memcpy(memblock, bus->nvram_params, len);
+ }
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = process_nvram_vars(bufp, len);
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return 0;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdsdio_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ }
+ else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ }
+ else {
+ dlok = TRUE;
+ }
+ }
+#else
+ BCM_REFERENCE(embed);
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+ /* External nvram takes precedence if specified */
+ if (dhdsdio_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdsdio_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ int status;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+ return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+uint
+dhd_bus_hdrlen(struct dhd_bus *bus)
+{
+ return SDPCM_HDRLEN;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ int bcmerror = 0;
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+
+ if (flag == TRUE) {
+ if (!bus->dhd->dongle_reset) {
+ dhd_os_sdlock(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Force flow control as protection when stop come before ifconfig_down */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+ /* Expect app to have torn down any connection before calling */
+ /* Stop the bus, disable F2 */
+ dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+ /* Clean up any pending IRQ */
+ bcmsdh_set_irq(FALSE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ /* Clean tx/rx buffer pointers, detach from the dongle */
+ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+ bus->dhd->dongle_reset = TRUE;
+ bus->dhd->up = FALSE;
+ dhd_os_sdunlock(dhdp);
+
+ DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ /* App can now remove power from device */
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else {
+ /* App must have restored power to device before calling */
+
+ DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset) {
+ /* Turn on WLAN */
+#ifdef DHDTHREAD
+ dhd_os_sdlock(dhdp);
+#endif /* DHDTHREAD */
+ /* Reset SD client */
+ bcmsdh_reset(bus->sdh);
+
+ /* Attempt to re-attach & download */
+ if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+ (uint32 *)SI_ENUM_BASE,
+ bus->cl_devid)) {
+ /* Attempt to download binary to the dongle */
+ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+
+ /* Re-init bus, enable F2 transfer */
+ bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+ if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_set_irq(TRUE);
+ dhd_enable_oob_intr(bus, TRUE);
+#endif /* defined(OOB_INTR_ONLY) */
+
+ bus->dhd->dongle_reset = FALSE;
+ bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+ /* Restore flow control */
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif
+ dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+ DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+ } else {
+ dhd_bus_stop(bus, FALSE);
+ dhdsdio_release_dongle(bus, bus->dhd->osh,
+ TRUE, FALSE);
+ }
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+ } else
+ bcmerror = BCME_SDIO_ERROR;
+
+#ifdef DHDTHREAD
+ dhd_os_sdunlock(dhdp);
+#endif /* DHDTHREAD */
+ } else {
+ bcmerror = BCME_SDIO_ERROR;
+ DHD_INFO(("%s called when dongle is not in reset\n",
+ __FUNCTION__));
+ DHD_INFO(("Will call dhd_bus_start instead\n"));
+ sdioh_start(NULL, 1);
+ if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+ DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ return bcmerror;
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ return bus->sih->chip;
+}
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+ dhd_bus_t *bus;
+
+ bus = dhdp->bus;
+ return dhdsdio_membytes(bus, set, address, data, size);
+}
diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
new file mode 100644
index 00000000000000..09a3d3457d4ee6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h
@@ -0,0 +1,277 @@
+/*
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h 322459 2012-03-20 22:36:07Z $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 1024
+
+#define WLFC_HANGER_ITEM_STATE_FREE 1
+#define WLFC_HANGER_ITEM_STATE_INUSE 2
+
+#define WLFC_PKTID_HSLOT_MASK 0xffff /* allow 16 bits only */
+#define WLFC_PKTID_HSLOT_SHIFT 8
+
+/* x -> TXSTATUS TAG to/from firmware */
+#define WLFC_PKTID_HSLOT_GET(x) \
+ (((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK)
+#define WLFC_PKTID_HSLOT_SET(var, slot) \
+ ((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \
+ (((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT))
+
+#define WLFC_PKTID_FREERUNCTR_MASK 0xff
+
+#define WLFC_PKTID_FREERUNCTR_GET(x) ((x) & WLFC_PKTID_FREERUNCTR_MASK)
+#define WLFC_PKTID_FREERUNCTR_SET(var, ctr) \
+ ((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \
+ (((ctr) & WLFC_PKTID_FREERUNCTR_MASK))))
+
+#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \
+ NULL : pktq_penq((pq), (prec), (p)))
+#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \
+ NULL : pktq_penq_head((pq), (prec), (p)))
+
+typedef enum ewlfc_packet_state {
+ eWLFC_PKTTYPE_NEW,
+ eWLFC_PKTTYPE_DELAYED,
+ eWLFC_PKTTYPE_SUPPRESSED,
+ eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+ eWLFC_MAC_ENTRY_ACTION_ADD,
+ eWLFC_MAC_ENTRY_ACTION_DEL,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+ uint8 state;
+ uint8 pad[3];
+ uint32 identifier;
+ void* pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 push_time;
+#endif
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+ int max_items;
+ uint32 pushed;
+ uint32 popped;
+ uint32 failed_to_push;
+ uint32 failed_to_pop;
+ uint32 failed_slotfind;
+ wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \
+ sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN 1
+#define WLFC_STATE_CLOSE 2
+
+#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_PSQ_LEN 64
+#define WLFC_SENDQ_LEN 256
+
+#define WLFC_FLOWCONTROL_DELTA 8
+#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - WLFC_FLOWCONTROL_DELTA)
+#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER - WLFC_FLOWCONTROL_DELTA)
+
+typedef struct wlfc_mac_descriptor {
+ uint8 occupied;
+ uint8 interface_id;
+ uint8 iftype;
+ uint8 state;
+ uint8 ac_bitmap; /* for APSD */
+ uint8 requested_credit;
+ uint8 requested_packet;
+ uint8 ea[ETHER_ADDR_LEN];
+ /*
+ maintain (MAC,AC) based seq count for
+ packets going to the device. As well as bc/mc.
+ */
+ uint8 seq[AC_COUNT + 1];
+ uint8 generation;
+ struct pktq psq;
+ /* The AC pending bitmap that was reported to the fw at last change */
+ uint8 traffic_lastreported_bmp;
+ /* The new AC pending bitmap */
+ uint8 traffic_pending_bmp;
+ /* 1= send on next opportunity */
+ uint8 send_tim_signal;
+ uint8 mac_handle;
+#ifdef PROP_TXSTATUS_DEBUG
+ uint32 dstncredit_sent_packets;
+ uint32 dstncredit_acks;
+ uint32 opened_ct;
+ uint32 closed_ct;
+#endif
+} wlfc_mac_descriptor_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+ entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+ uint32 pktin;
+ uint32 pkt2bus;
+ uint32 pktdropped;
+ uint32 tlv_parse_failed;
+ uint32 rollback;
+ uint32 rollback_failed;
+ uint32 sendq_full_error;
+ uint32 delayq_full_error;
+ uint32 credit_request_failed;
+ uint32 packet_request_failed;
+ uint32 mac_update_failed;
+ uint32 psmode_update_failed;
+ uint32 interface_update_failed;
+ uint32 wlfc_header_only_pkt;
+ uint32 txstatus_in;
+ uint32 d11_suppress;
+ uint32 wl_suppress;
+ uint32 bad_suppress;
+ uint32 pkt_freed;
+ uint32 pkt_free_err;
+ uint32 psq_wlsup_retx;
+ uint32 psq_wlsup_enq;
+ uint32 psq_d11sup_retx;
+ uint32 psq_d11sup_enq;
+ uint32 psq_hostq_retx;
+ uint32 psq_hostq_enq;
+ uint32 mac_handle_notfound;
+ uint32 wlc_tossed_pkts;
+ uint32 dhd_hdrpulls;
+ uint32 generic_error;
+ /* an extra one for bc/mc traffic */
+ uint32 sendq_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+ /* all pkt2bus -> txstatus latency accumulated */
+ uint32 latency_sample_count;
+ uint32 total_status_latency;
+ uint32 latency_most_recent;
+ int idx_delta;
+ uint32 deltas[10];
+ uint32 fifo_credits_sent[6];
+ uint32 fifo_credits_back[6];
+ uint32 dropped_qfull[6];
+ uint32 signal_only_pkts_sent;
+ uint32 signal_only_pkts_freed;
+#endif
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+ (ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+ (ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE 0
+#define WLFC_FCMODE_IMPLIED_CREDIT 1
+#define WLFC_FCMODE_EXPLICIT_CREDIT 2
+
+/* How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+/* Mask to check for only on-going AC_BE traffic */
+#define WLFC_AC_BE_TRAFFIC_ONLY 0xD
+
+typedef struct athost_wl_status_info {
+ uint8 last_seqid_to_wlc;
+
+ /* OSL handle */
+ osl_t* osh;
+ /* dhd pub */
+ void* dhdp;
+
+ /* stats */
+ athost_wl_stat_counters_t stats;
+
+ /* the additional ones are for bc/mc and ATIM FIFO */
+ int FIFO_credit[AC_COUNT + 2];
+
+ /* Credit borrow counts for each FIFO from each of the other FIFOs */
+ int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+ struct pktq SENDQ;
+
+ /* packet hanger and MAC->handle lookup table */
+ void* hanger;
+ struct {
+ /* table for individual nodes */
+ wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE];
+ /* table for interfaces */
+ wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM];
+ /* OS may send packets to unknown (unassociated) destinations */
+ /* A place holder for bc/mc and packets to unknown destinations */
+ wlfc_mac_descriptor_t other;
+ } destination_entries;
+ /* token position for different priority packets */
+ uint8 token_pos[AC_COUNT+1];
+ /* ON/OFF state for flow control to the host network interface */
+ uint8 hostif_flow_state[WLFC_MAX_IFNUM];
+ uint8 host_ifidx;
+ /* to flow control an OS interface */
+ uint8 toggle_host_if;
+
+ /*
+ Mode in which the dhd flow control shall operate. Must be set before
+ traffic starts to the device.
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ */
+ uint8 proptxstatus_mode;
+
+ /* To borrow credits */
+ uint8 allow_credit_borrow;
+
+ /* Timestamp to compute how long to defer borrowing for */
+ uint32 borrow_defer_timestamp;
+
+} athost_wl_status_info_t;
+
+int dhd_wlfc_enable(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(struct dhd_info *,
+ ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data);
+int dhd_wlfc_event(struct dhd_info *dhd);
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h
new file mode 100644
index 00000000000000..5e5a2e2e53140f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
new file mode 100644
index 00000000000000..0e37df6e19eff3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+ uint8 type; /* Header type */
+ uint8 version; /* Header version */
+ int8 rssi; /* RSSI */
+ uint8 pad; /* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN sizeof(wl_header_t)
+#define WL_HEADER_TYPE 0
+#define WL_HEADER_VER 1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c
new file mode 100644
index 00000000000000..eeee5ea9888a96
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/hndpmu.c
@@ -0,0 +1,197 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c 324060 2012-03-27 23:26:47Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define PMU_ERROR(args)
+
+#define PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define PMU_NONE(args)
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+ {4, 0x2},
+ {2, 0x3},
+ {1, 0x0},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+ {32, 0x7},
+ {26, 0x6},
+ {22, 0x5},
+ {16, 0x4},
+ {12, 0x3},
+ {8, 0x2},
+ {4, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val = 0;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0;
+ uint32 str_shift = 0;
+
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+ switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+ if (sih->pmurev == 8) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+ }
+ else if (sih->pmurev == 11) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ }
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 cc_data_temp;
+ int i;
+
+ /* Pick the lowest available drive strength equal or greater than the
+ * requested strength. Drive strength of 0 requests tri-state.
+ */
+ for (i = 0; drivestrength < str_tab[i].strength; i++)
+ ;
+
+ if (i > 0 && drivestrength > str_tab[i].strength)
+ i--;
+
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ cc_data_temp |= str_tab[i].sel << str_shift;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+ PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+ drivestrength, str_tab[i].strength));
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile
new file mode 100644
index 00000000000000..42b3b689b561b1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/Makefile
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+# automerger maintained database to see where src/include
+# came from
+# 2. For select components, as listed in compvers.sh
+# it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 241686 2011-02-19 00:22:45Z $
+#
+
+SRCBASE := ..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch version
+epivers:
+ bash epivers.sh
+
+# Generate epivers.h for native branch version
+compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "Generating component versions, if any"; \
+ bash compvers.sh; \
+ else \
+ echo "Skipping component version generation"; \
+ fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+ @if [ -s "compvers.sh" ]; then \
+ echo "bash compvers.sh clean"; \
+ bash compvers.sh clean; \
+ else \
+ echo "Skipping component version clean"; \
+ fi
+
+clean:
+ rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h
new file mode 100644
index 00000000000000..63513e6f6e2996
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/aidmp.h
@@ -0,0 +1,375 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _AIDMP_H
+#define _AIDMP_H
+
+
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+
+#define CC_SIM 0
+#define CC_EROM 1
+#define CC_CORESIGHT 9
+#define CC_VERIF 0xb
+#define CC_OPTIMO 0xd
+#define CC_GEN 0xe
+#define CC_PRIMECELL 0xf
+
+
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+
+#define MPD_MUI_MASK 0x0000ff00
+#define MPD_MUI_SHIFT 8
+#define MPD_MP_MASK 0x000000f0
+#define MPD_MP_SHIFT 4
+
+
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000
+
+
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+ uint32 oobselina30;
+ uint32 oobselina74;
+ uint32 PAD[6];
+ uint32 oobselinb30;
+ uint32 oobselinb74;
+ uint32 PAD[6];
+ uint32 oobselinc30;
+ uint32 oobselinc74;
+ uint32 PAD[6];
+ uint32 oobselind30;
+ uint32 oobselind74;
+ uint32 PAD[38];
+ uint32 oobselouta30;
+ uint32 oobselouta74;
+ uint32 PAD[6];
+ uint32 oobseloutb30;
+ uint32 oobseloutb74;
+ uint32 PAD[6];
+ uint32 oobseloutc30;
+ uint32 oobseloutc74;
+ uint32 PAD[6];
+ uint32 oobseloutd30;
+ uint32 oobseloutd74;
+ uint32 PAD[38];
+ uint32 oobsynca;
+ uint32 oobseloutaen;
+ uint32 PAD[6];
+ uint32 oobsyncb;
+ uint32 oobseloutben;
+ uint32 PAD[6];
+ uint32 oobsyncc;
+ uint32 oobseloutcen;
+ uint32 PAD[6];
+ uint32 oobsyncd;
+ uint32 oobseloutden;
+ uint32 PAD[38];
+ uint32 oobaextwidth;
+ uint32 oobainwidth;
+ uint32 oobaoutwidth;
+ uint32 PAD[5];
+ uint32 oobbextwidth;
+ uint32 oobbinwidth;
+ uint32 oobboutwidth;
+ uint32 PAD[5];
+ uint32 oobcextwidth;
+ uint32 oobcinwidth;
+ uint32 oobcoutwidth;
+ uint32 PAD[5];
+ uint32 oobdextwidth;
+ uint32 oobdinwidth;
+ uint32 oobdoutwidth;
+ uint32 PAD[37];
+ uint32 ioctrlset;
+ uint32 ioctrlclear;
+ uint32 ioctrl;
+ uint32 PAD[61];
+ uint32 iostatus;
+ uint32 PAD[127];
+ uint32 ioctrlwidth;
+ uint32 iostatuswidth;
+ uint32 PAD[62];
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 resetreadid;
+ uint32 resetwriteid;
+ uint32 PAD[60];
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+ uint32 PAD[56];
+ uint32 intstatus;
+ uint32 PAD[255];
+ uint32 config;
+ uint32 PAD[63];
+ uint32 itcr;
+ uint32 PAD[3];
+ uint32 itipooba;
+ uint32 itipoobb;
+ uint32 itipoobc;
+ uint32 itipoobd;
+ uint32 PAD[4];
+ uint32 itipoobaout;
+ uint32 itipoobbout;
+ uint32 itipoobcout;
+ uint32 itipoobdout;
+ uint32 PAD[4];
+ uint32 itopooba;
+ uint32 itopoobb;
+ uint32 itopoobc;
+ uint32 itopoobd;
+ uint32 PAD[4];
+ uint32 itopoobain;
+ uint32 itopoobbin;
+ uint32 itopoobcin;
+ uint32 itopoobdin;
+ uint32 PAD[4];
+ uint32 itopreset;
+ uint32 PAD[15];
+ uint32 peripherialid4;
+ uint32 peripherialid5;
+ uint32 peripherialid6;
+ uint32 peripherialid7;
+ uint32 peripherialid0;
+ uint32 peripherialid1;
+ uint32 peripherialid2;
+ uint32 peripherialid3;
+ uint32 componentid0;
+ uint32 componentid1;
+ uint32 componentid2;
+ uint32 componentid3;
+} aidmp_t;
+
+#endif
+
+
+#define OOB_BUSCONFIG 0x020
+#define OOB_STATUSA 0x100
+#define OOB_STATUSB 0x104
+#define OOB_STATUSC 0x108
+#define OOB_STATUSD 0x10c
+#define OOB_ENABLEA0 0x200
+#define OOB_ENABLEA1 0x204
+#define OOB_ENABLEA2 0x208
+#define OOB_ENABLEA3 0x20c
+#define OOB_ENABLEB0 0x280
+#define OOB_ENABLEB1 0x284
+#define OOB_ENABLEB2 0x288
+#define OOB_ENABLEB3 0x28c
+#define OOB_ENABLEC0 0x300
+#define OOB_ENABLEC1 0x304
+#define OOB_ENABLEC2 0x308
+#define OOB_ENABLEC3 0x30c
+#define OOB_ENABLED0 0x380
+#define OOB_ENABLED1 0x384
+#define OOB_ENABLED2 0x388
+#define OOB_ENABLED3 0x38c
+#define OOB_ITCR 0xf00
+#define OOB_ITIPOOBA 0xf10
+#define OOB_ITIPOOBB 0xf14
+#define OOB_ITIPOOBC 0xf18
+#define OOB_ITIPOOBD 0xf1c
+#define OOB_ITOPOOBA 0xf30
+#define OOB_ITOPOOBB 0xf34
+#define OOB_ITOPOOBC 0xf38
+#define OOB_ITOPOOBD 0xf3c
+
+
+#define AI_OOBSELINA30 0x000
+#define AI_OOBSELINA74 0x004
+#define AI_OOBSELINB30 0x020
+#define AI_OOBSELINB74 0x024
+#define AI_OOBSELINC30 0x040
+#define AI_OOBSELINC74 0x044
+#define AI_OOBSELIND30 0x060
+#define AI_OOBSELIND74 0x064
+#define AI_OOBSELOUTA30 0x100
+#define AI_OOBSELOUTA74 0x104
+#define AI_OOBSELOUTB30 0x120
+#define AI_OOBSELOUTB74 0x124
+#define AI_OOBSELOUTC30 0x140
+#define AI_OOBSELOUTC74 0x144
+#define AI_OOBSELOUTD30 0x160
+#define AI_OOBSELOUTD74 0x164
+#define AI_OOBSYNCA 0x200
+#define AI_OOBSELOUTAEN 0x204
+#define AI_OOBSYNCB 0x220
+#define AI_OOBSELOUTBEN 0x224
+#define AI_OOBSYNCC 0x240
+#define AI_OOBSELOUTCEN 0x244
+#define AI_OOBSYNCD 0x260
+#define AI_OOBSELOUTDEN 0x264
+#define AI_OOBAEXTWIDTH 0x300
+#define AI_OOBAINWIDTH 0x304
+#define AI_OOBAOUTWIDTH 0x308
+#define AI_OOBBEXTWIDTH 0x320
+#define AI_OOBBINWIDTH 0x324
+#define AI_OOBBOUTWIDTH 0x328
+#define AI_OOBCEXTWIDTH 0x340
+#define AI_OOBCINWIDTH 0x344
+#define AI_OOBCOUTWIDTH 0x348
+#define AI_OOBDEXTWIDTH 0x360
+#define AI_OOBDINWIDTH 0x364
+#define AI_OOBDOUTWIDTH 0x368
+
+
+#define AI_IOCTRLSET 0x400
+#define AI_IOCTRLCLEAR 0x404
+#define AI_IOCTRL 0x408
+#define AI_IOSTATUS 0x500
+#define AI_RESETCTRL 0x800
+#define AI_RESETSTATUS 0x804
+
+#define AI_IOCTRLWIDTH 0x700
+#define AI_IOSTATUSWIDTH 0x704
+
+#define AI_RESETREADID 0x808
+#define AI_RESETWRITEID 0x80c
+#define AI_ERRLOGCTRL 0xa00
+#define AI_ERRLOGDONE 0xa04
+#define AI_ERRLOGSTATUS 0xa08
+#define AI_ERRLOGADDRLO 0xa0c
+#define AI_ERRLOGADDRHI 0xa10
+#define AI_ERRLOGID 0xa14
+#define AI_ERRLOGUSER 0xa18
+#define AI_ERRLOGFLAGS 0xa1c
+#define AI_INTSTATUS 0xa00
+#define AI_CONFIG 0xe00
+#define AI_ITCR 0xf00
+#define AI_ITIPOOBA 0xf10
+#define AI_ITIPOOBB 0xf14
+#define AI_ITIPOOBC 0xf18
+#define AI_ITIPOOBD 0xf1c
+#define AI_ITIPOOBAOUT 0xf30
+#define AI_ITIPOOBBOUT 0xf34
+#define AI_ITIPOOBCOUT 0xf38
+#define AI_ITIPOOBDOUT 0xf3c
+#define AI_ITOPOOBA 0xf50
+#define AI_ITOPOOBB 0xf54
+#define AI_ITOPOOBC 0xf58
+#define AI_ITOPOOBD 0xf5c
+#define AI_ITOPOOBAIN 0xf70
+#define AI_ITOPOOBBIN 0xf74
+#define AI_ITOPOOBCIN 0xf78
+#define AI_ITOPOOBDIN 0xf7c
+#define AI_ITOPRESET 0xf90
+#define AI_PERIPHERIALID4 0xfd0
+#define AI_PERIPHERIALID5 0xfd4
+#define AI_PERIPHERIALID6 0xfd8
+#define AI_PERIPHERIALID7 0xfdc
+#define AI_PERIPHERIALID0 0xfe0
+#define AI_PERIPHERIALID1 0xfe4
+#define AI_PERIPHERIALID2 0xfe8
+#define AI_PERIPHERIALID3 0xfec
+#define AI_COMPONENTID0 0xff0
+#define AI_COMPONENTID1 0xff4
+#define AI_COMPONENTID2 0xff8
+#define AI_COMPONENTID3 0xffc
+
+
+#define AIRC_RESET 1
+
+
+#define AICFG_OOB 0x00000020
+#define AICFG_IOS 0x00000010
+#define AICFG_IOC 0x00000008
+#define AICFG_TO 0x00000004
+#define AICFG_ERRL 0x00000002
+#define AICFG_RST 0x00000001
+
+
+#define OOB_SEL_OUTEN_B_5 15
+#define OOB_SEL_OUTEN_B_6 23
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_android_types.h b/drivers/net/wireless/bcmdhd/include/bcm_android_types.h
new file mode 100644
index 00000000000000..fd8aea72539239
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_android_types.h
@@ -0,0 +1,44 @@
+/*
+ * Android related remote wl declarations
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ * $Id: bcm_android_types.h 241182 2011-02-17 21:50:03Z $
+ *
+ */
+#ifndef _wlu_android_h
+#define _wlu_android_h
+#define __fd_mask unsigned long
+typedef struct
+ {
+
+#ifdef __USE_XOPEN
+ __fd_mask fds_bits[__FD_SETSIZE / __NFDBITS];
+# define __FDS_BITS(set) ((set)->fds_bits)
+#else
+ __fd_mask __fds_bits[__FD_SETSIZE / __NFDBITS];
+# define __FDS_BITS(set) ((set)->__fds_bits)
+#endif
+ } fd_set1;
+#define fd_set fd_set1
+
+#define htons(x) BCMSWAP16(x)
+#define htonl(x) BCMSWAP32(x)
+#define __FD_ZERO(s) \
+ do { \
+ unsigned int __i; \
+ fd_set *__arr = (s); \
+ for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
+ __FDS_BITS(__arr)[__i] = 0; \
+ } while (0)
+#define __FD_SET(d, s) (__FDS_BITS (s)[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, s) (__FDS_BITS (s)[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, s) ((__FDS_BITS (s)[__FDELT(d)] & __FDMASK(d)) != 0)
+#define MCL_CURRENT 1
+#define MCL_FUTURE 2
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
new file mode 100644
index 00000000000000..26da752186a20b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h
@@ -0,0 +1,29 @@
+/*
+ * BCM common config options
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_cfg.h 294399 2011-11-07 03:31:22Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
new file mode 100644
index 00000000000000..8fe3de7afb7249
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h
@@ -0,0 +1,361 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ * pool manager: A singleton object that acts as a factory for
+ * pool allocators. It also is used for global
+ * instrumentation, such as reporting all blocks
+ * in use across all data structures. The pool manager
+ * creates and provides individual memory pools
+ * upon request to application code.
+ *
+ * memory pool: An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ * bcm_mpm_* Memory pool manager
+ * bcm_mp_* Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ * prealloc: The contiguous memory block of objects can either be supplied
+ * by the client or malloc'ed by the memory manager. The objects are
+ * allocated out of a block of memory and freed back to the block.
+ *
+ * heap: The memory pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing statistics
+ * and instrumentation on top of the heap, without modifying the heap
+ * allocation implementation.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+ char name[BCM_MP_NAMELEN]; /* Name of this pool. */
+ unsigned int objsz; /* Object size allocated in this pool */
+ uint16 nobj; /* Total number of objects in this pool */
+ uint16 num_alloc; /* Number of objects currently allocated */
+ uint16 high_water; /* Max number of allocated objects. */
+ uint16 failed_alloc; /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ * osh: INPUT Operating system handle. Needed for heap memory allocation.
+ * max_pools: INPUT Maximum number of mempools supported.
+ * mgr: OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ * BCME_OK Object initialized successfully. May be used.
+ * BCME_NOMEM Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ * mgr: INPUT Pointer to pool manager handle.
+ *
+ * Returns:
+ * BCME_OK Memory pool manager successfully de-initialized.
+ * other Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ * pool uses a contiguous block of pre-alloced
+ * memory. The memory block may either be provided
+ * by the client or dynamically allocated by the
+ * pool manager.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * Must be >= sizeof(void *).
+ * nobj: INPUT Maximum number of concurrently existing objects to support
+ * memstart INPUT Pointer to the memory to use, or NULL to malloc()
+ * memsize INPUT Number of bytes referenced from memstart (for error checking).
+ * Must be 0 if 'memstart' is NULL.
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+ unsigned int obj_sz,
+ int nobj,
+ void *memstart,
+ unsigned int memsize,
+ char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ * pool allocator uses the heap (malloc/free) for memory.
+ * In this case, the pool allocator is just providing
+ * statistics and instrumentation on top of the heap,
+ * without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pool manager
+ * obj_sz: INPUT Size of objects that will be allocated by the new pool
+ * poolname INPUT For instrumentation, the name of the pool
+ * newp: OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ * BCME_OK Pool created ok.
+ * other Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+ char poolname[BCM_MP_NAMELEN],
+ bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ * all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * pool: INPUT The handle of the pool to delete
+ *
+ * Returns:
+ * BCME_OK Pool deleted ok.
+ * other Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * stats: OUTPUT Array of pool statistics.
+ * nentries: MOD Max elements in 'stats' array on INPUT. Actual number
+ * of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager
+ * b: OUTPUT Output buffer.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ * compensate for alignment requirements of the objects.
+ * This function provides the padded object size. If clients
+ * pre-allocate a memory slab for a memory pool, the
+ * padded object size should be used by the client to allocate
+ * the memory slab (in order to provide sufficent space for
+ * the maximum number of objects).
+ *
+ * Parameters:
+ * mgr: INPUT The handle to the pools manager.
+ * obj_sz: INPUT Input object size.
+ * padded_obj_sz: OUTPUT Padded object size.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * BCME_BADARG Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ *
+ * Returns:
+ * A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool.
+ * objp: INPUT A pointer to the object to free.
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * stats: OUTPUT Pool statistics
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error getting statistics.
+ *
+ */
+int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ * pool: INPUT The handle to the pool
+ * b OUTPUT Output buffer
+ *
+ * Returns:
+ * BCME_OK Ok
+ * other Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
new file mode 100644
index 00000000000000..9bae1c20a9bfdb
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h
@@ -0,0 +1,126 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ */
+#ifndef _bcmcdc_h_
+#define _bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+ uint32 cmd;
+ uint32 len;
+ uint32 flags;
+ uint32 status;
+} cdc_ioctl_t;
+
+
+#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
+
+
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF
+
+#define CDCL_IOC_OUTLEN_SHIFT 0
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000
+#define CDCL_IOC_INLEN_SHIFT 16
+
+
+#define CDCF_IOC_ERROR 0x01
+#define CDCF_IOC_SET 0x02
+#define CDCF_IOC_OVL_IDX_MASK 0x3c
+#define CDCF_IOC_OVL_RSV 0x40
+#define CDCF_IOC_OVL 0x80
+#define CDCF_IOC_ACTION_MASK 0xfe
+#define CDCF_IOC_ACTION_SHIFT 1
+#define CDCF_IOC_IF_MASK 0xF000
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000
+#define CDCF_IOC_ID_SHIFT 16
+
+#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+
+
+struct bdc_header {
+ uint8 flags;
+ uint8 priority;
+ uint8 flags2;
+ uint8 dataOffset;
+};
+
+#define BDC_HEADER_LEN 4
+
+
+#define BDC_FLAG_80211_PKT 0x01
+#define BDC_FLAG_SUM_GOOD 0x04
+#define BDC_FLAG_SUM_NEEDED 0x08
+#define BDC_FLAG_EVENT_MSG 0x08
+#define BDC_FLAG_VER_MASK 0xf0
+#define BDC_FLAG_VER_SHIFT 4
+
+
+#define BDC_PRIORITY_MASK 0x07
+#define BDC_PRIORITY_FC_MASK 0xf0
+#define BDC_PRIORITY_FC_SHIFT 4
+
+
+#define BDC_FLAG2_IF_MASK 0x0f
+#define BDC_FLAG2_IF_SHIFT 0
+#define BDC_FLAG2_FC_FLAG 0x10
+
+
+
+#define BDC_PROTO_VER_1 1
+#define BDC_PROTO_VER 2
+
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK 0xf0
+#define BDC_FLAG_PAD_MASK 0x03
+#define BDC_FLAG2_PAD_SHIFT 2
+#define BDC_FLAG_PAD_SHIFT 0
+#define BDC_FLAG2_PAD_IDX 0x3c
+#define BDC_FLAG_PAD_IDX 0x03
+#define BDC_GET_PAD_LEN(hdr) \
+ ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+ ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+ (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+ ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+ (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
new file mode 100644
index 00000000000000..a35ed72aa85752
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h
@@ -0,0 +1,239 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h 316830 2012-02-23 20:29:22Z $
+ */
+
+#ifndef _bcmdefs_h_
+#define _bcmdefs_h_
+
+
+
+
+#define BCM_REFERENCE(data) ((void)(data))
+
+
+#define STATIC_ASSERT(expr) { \
+ \
+ typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e; \
+ \
+ typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1]; \
+}
+
+
+
+#define bcmreclaimed 0
+#define _data _data
+#define _fn _fn
+#define BCMPREATTACHDATA(_data) _data
+#define BCMPREATTACHFN(_fn) _fn
+#define _data _data
+#define _fn _fn
+#define _fn _fn
+#define BCMNMIATTACHFN(_fn) _fn
+#define BCMNMIATTACHDATA(_data) _data
+#define CONST const
+#ifndef BCMFASTPATH
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+
+
+
+#define _data _data
+#define BCMROMDAT_NAME(_data) _data
+#define _fn _fn
+#define _fn _fn
+#define STATIC static
+#define BCMROMDAT_ARYSIZ(data) ARRAYSIZE(data)
+#define BCMROMDAT_SIZEOF(data) sizeof(data)
+#define BCMROMDAT_APATCH(data)
+#define BCMROMDAT_SPATCH(data)
+
+
+#define SI_BUS 0
+#define PCI_BUS 1
+#define PCMCIA_BUS 2
+#define SDIO_BUS 3
+#define JTAG_BUS 4
+#define USB_BUS 5
+#define SPI_BUS 6
+#define RPC_BUS 7
+
+
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) (BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) (bus)
+#endif
+
+
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) (BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) (bus)
+#endif
+
+
+
+#if defined(BCMSPROMBUS)
+#define SPROMBUS (BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS (PCMCIA_BUS)
+#else
+#define SPROMBUS (PCI_BUS)
+#endif
+
+
+#ifdef BCMCHIPID
+#define CHIPID(chip) (BCMCHIPID)
+#else
+#define CHIPID(chip) (chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev) (BCMCHIPREV)
+#else
+#define CHIPREV(rev) (rev)
+#endif
+
+
+#define DMADDR_MASK_32 0x0
+#define DMADDR_MASK_30 0xc0000000
+#define DMADDR_MASK_0 0xffffffff
+
+#define DMADDRWIDTH_30 30
+#define DMADDRWIDTH_32 32
+#define DMADDRWIDTH_63 63
+#define DMADDRWIDTH_64 64
+
+#ifdef BCMDMA64OSL
+typedef struct {
+ uint32 loaddr;
+ uint32 hiaddr;
+} dma64addr_t;
+
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) ((_pa).hiaddr)
+#define PHYSADDRHISET(_pa, _val) \
+ do { \
+ (_pa).hiaddr = (_val); \
+ } while (0)
+#define PHYSADDRLO(_pa) ((_pa).loaddr)
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa).loaddr = (_val); \
+ } while (0)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa) = (_val); \
+ } while (0)
+#endif
+
+
+typedef struct {
+ dmaaddr_t addr;
+ uint32 length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 4
+
+
+typedef struct {
+ void *oshdmah;
+ uint origsize;
+ uint nsegs;
+ hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+
+#define BCMEXTRAHDROOM 220
+#else
+#define BCMEXTRAHDROOM 172
+#endif
+
+
+#ifndef SDALIGN
+#define SDALIGN 32
+#endif
+
+
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif
+
+
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
+
+
+#ifdef BCMSMALL
+#undef BCMSPACE
+#define bcmspace FALSE
+#else
+#define BCMSPACE
+#define bcmspace TRUE
+#endif
+
+
+#define MAXSZ_NVRAM_VARS 4096
+
+
+
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS
+#endif
+
+#ifdef BCMUSBDEV_ENABLED
+extern uint32 gFWID;
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
new file mode 100644
index 00000000000000..7b0f9b165b3f0c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h
@@ -0,0 +1,487 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h 327007 2012-04-11 22:45:50Z $
+ */
+
+#ifndef _BCMDEVS_H
+#define _BCMDEVS_H
+
+
+#define VENDOR_EPIGRAM 0xfeda
+#define VENDOR_BROADCOM 0x14e4
+#define VENDOR_3COM 0x10b7
+#define VENDOR_NETGEAR 0x1385
+#define VENDOR_DIAMOND 0x1092
+#define VENDOR_INTEL 0x8086
+#define VENDOR_DELL 0x1028
+#define VENDOR_HP 0x103c
+#define VENDOR_HP_COMPAQ 0x0e11
+#define VENDOR_APPLE 0x106b
+#define VENDOR_SI_IMAGE 0x1095
+#define VENDOR_BUFFALO 0x1154
+#define VENDOR_TI 0x104c
+#define VENDOR_RICOH 0x1180
+#define VENDOR_JMICRON 0x197b
+
+
+
+#define VENDOR_BROADCOM_PCMCIA 0x02d0
+
+
+#define VENDOR_BROADCOM_SDIO 0x00BF
+
+
+#define BCM_DNGL_VID 0x0a5c
+#define BCM_DNGL_BL_PID_4328 0xbd12
+#define BCM_DNGL_BL_PID_4322 0xbd13
+#define BCM_DNGL_BL_PID_4319 0xbd16
+#define BCM_DNGL_BL_PID_43236 0xbd17
+#define BCM_DNGL_BL_PID_4332 0xbd18
+#define BCM_DNGL_BL_PID_4330 0xbd19
+#define BCM_DNGL_BL_PID_4334 0xbd1a
+#define BCM_DNGL_BL_PID_43239 0xbd1b
+#define BCM_DNGL_BL_PID_4324 0xbd1c
+#define BCM_DNGL_BL_PID_4360 0xbd1d
+
+#define BCM_DNGL_BDC_PID 0x0bdc
+#define BCM_DNGL_JTAG_PID 0x4a44
+
+
+#define BCM_HWUSB_PID_43239 43239
+
+
+#define BCM4210_DEVICE_ID 0x1072
+#define BCM4230_DEVICE_ID 0x1086
+#define BCM4401_ENET_ID 0x170c
+#define BCM3352_DEVICE_ID 0x3352
+#define BCM3360_DEVICE_ID 0x3360
+#define BCM4211_DEVICE_ID 0x4211
+#define BCM4231_DEVICE_ID 0x4231
+#define BCM4303_D11B_ID 0x4303
+#define BCM4311_D11G_ID 0x4311
+#define BCM4311_D11DUAL_ID 0x4312
+#define BCM4311_D11A_ID 0x4313
+#define BCM4328_D11DUAL_ID 0x4314
+#define BCM4328_D11G_ID 0x4315
+#define BCM4328_D11A_ID 0x4316
+#define BCM4318_D11G_ID 0x4318
+#define BCM4318_D11DUAL_ID 0x4319
+#define BCM4318_D11A_ID 0x431a
+#define BCM4325_D11DUAL_ID 0x431b
+#define BCM4325_D11G_ID 0x431c
+#define BCM4325_D11A_ID 0x431d
+#define BCM4306_D11G_ID 0x4320
+#define BCM4306_D11A_ID 0x4321
+#define BCM4306_UART_ID 0x4322
+#define BCM4306_V90_ID 0x4323
+#define BCM4306_D11DUAL_ID 0x4324
+#define BCM4306_D11G_ID2 0x4325
+#define BCM4321_D11N_ID 0x4328
+#define BCM4321_D11N2G_ID 0x4329
+#define BCM4321_D11N5G_ID 0x432a
+#define BCM4322_D11N_ID 0x432b
+#define BCM4322_D11N2G_ID 0x432c
+#define BCM4322_D11N5G_ID 0x432d
+#define BCM4329_D11N_ID 0x432e
+#define BCM4329_D11N2G_ID 0x432f
+#define BCM4329_D11N5G_ID 0x4330
+#define BCM4315_D11DUAL_ID 0x4334
+#define BCM4315_D11G_ID 0x4335
+#define BCM4315_D11A_ID 0x4336
+#define BCM4319_D11N_ID 0x4337
+#define BCM4319_D11N2G_ID 0x4338
+#define BCM4319_D11N5G_ID 0x4339
+#define BCM43231_D11N2G_ID 0x4340
+#define BCM43221_D11N2G_ID 0x4341
+#define BCM43222_D11N_ID 0x4350
+#define BCM43222_D11N2G_ID 0x4351
+#define BCM43222_D11N5G_ID 0x4352
+#define BCM43224_D11N_ID 0x4353
+#define BCM43224_D11N_ID_VEN1 0x0576
+#define BCM43226_D11N_ID 0x4354
+#define BCM43236_D11N_ID 0x4346
+#define BCM43236_D11N2G_ID 0x4347
+#define BCM43236_D11N5G_ID 0x4348
+#define BCM43225_D11N2G_ID 0x4357
+#define BCM43421_D11N_ID 0xA99D
+#define BCM4313_D11N2G_ID 0x4727
+#define BCM4330_D11N_ID 0x4360
+#define BCM4330_D11N2G_ID 0x4361
+#define BCM4330_D11N5G_ID 0x4362
+#define BCM4336_D11N_ID 0x4343
+#define BCM6362_D11N_ID 0x435f
+#define BCM4331_D11N_ID 0x4331
+#define BCM4331_D11N2G_ID 0x4332
+#define BCM4331_D11N5G_ID 0x4333
+#define BCM43237_D11N_ID 0x4355
+#define BCM43237_D11N5G_ID 0x4356
+#define BCM43227_D11N2G_ID 0x4358
+#define BCM43228_D11N_ID 0x4359
+#define BCM43228_D11N5G_ID 0x435a
+#define BCM43362_D11N_ID 0x4363
+#define BCM43239_D11N_ID 0x4370
+#define BCM4324_D11N_ID 0x4374
+#define BCM43217_D11N2G_ID 0x43a9
+#define BCM43131_D11N2G_ID 0x43aa
+#define BCM4314_D11N2G_ID 0x4364
+#define BCM43142_D11N2G_ID 0x4365
+#define BCM4334_D11N_ID 0x4380
+#define BCM4334_D11N2G_ID 0x4381
+#define BCM4334_D11N5G_ID 0x4382
+#define BCM4360_D11AC_ID 0x43a0
+#define BCM4360_D11AC2G_ID 0x43a1
+#define BCM4360_D11AC5G_ID 0x43a2
+
+
+#define BCM943228HMB_SSID_VEN1 0x0607
+#define BCM94313HMGBL_SSID_VEN1 0x0608
+#define BCM94313HMG_SSID_VEN1 0x0609
+
+
+#define BCM4335_D11AC_ID 0x43ae
+#define BCM4335_D11AC2G_ID 0x43af
+#define BCM4335_D11AC5G_ID 0x43b0
+#define BCM4352_D11AC_ID 0x43b1
+#define BCM4352_D11AC2G_ID 0x43b2
+#define BCM4352_D11AC5G_ID 0x43b3
+
+#define BCMGPRS_UART_ID 0x4333
+#define BCMGPRS2_UART_ID 0x4344
+#define FPGA_JTAGM_ID 0x43f0
+#define BCM_JTAGM_ID 0x43f1
+#define SDIOH_FPGA_ID 0x43f2
+#define BCM_SDIOH_ID 0x43f3
+#define SDIOD_FPGA_ID 0x43f4
+#define SPIH_FPGA_ID 0x43f5
+#define BCM_SPIH_ID 0x43f6
+#define MIMO_FPGA_ID 0x43f8
+#define BCM_JTAGM2_ID 0x43f9
+#define SDHCI_FPGA_ID 0x43fa
+#define BCM4402_ENET_ID 0x4402
+#define BCM4402_V90_ID 0x4403
+#define BCM4410_DEVICE_ID 0x4410
+#define BCM4412_DEVICE_ID 0x4412
+#define BCM4430_DEVICE_ID 0x4430
+#define BCM4432_DEVICE_ID 0x4432
+#define BCM4704_ENET_ID 0x4706
+#define BCM4710_DEVICE_ID 0x4710
+#define BCM47XX_AUDIO_ID 0x4711
+#define BCM47XX_V90_ID 0x4712
+#define BCM47XX_ENET_ID 0x4713
+#define BCM47XX_EXT_ID 0x4714
+#define BCM47XX_GMAC_ID 0x4715
+#define BCM47XX_USBH_ID 0x4716
+#define BCM47XX_USBD_ID 0x4717
+#define BCM47XX_IPSEC_ID 0x4718
+#define BCM47XX_ROBO_ID 0x4719
+#define BCM47XX_USB20H_ID 0x471a
+#define BCM47XX_USB20D_ID 0x471b
+#define BCM47XX_ATA100_ID 0x471d
+#define BCM47XX_SATAXOR_ID 0x471e
+#define BCM47XX_GIGETH_ID 0x471f
+#define BCM4712_MIPS_ID 0x4720
+#define BCM4716_DEVICE_ID 0x4722
+#define BCM47XX_SMBUS_EMU_ID 0x47fe
+#define BCM47XX_XOR_EMU_ID 0x47ff
+#define EPI41210_DEVICE_ID 0xa0fa
+#define EPI41230_DEVICE_ID 0xa10e
+#define JINVANI_SDIOH_ID 0x4743
+#define BCM27XX_SDIOH_ID 0x2702
+#define PCIXX21_FLASHMEDIA_ID 0x803b
+#define PCIXX21_SDIOH_ID 0x803c
+#define R5C822_SDIOH_ID 0x0822
+#define JMICRON_SDIOH_ID 0x2381
+
+
+#define BCM4306_CHIP_ID 0x4306
+#define BCM4311_CHIP_ID 0x4311
+#define BCM43111_CHIP_ID 43111
+#define BCM43112_CHIP_ID 43112
+#define BCM4312_CHIP_ID 0x4312
+#define BCM4313_CHIP_ID 0x4313
+#define BCM43131_CHIP_ID 43131
+#define BCM4315_CHIP_ID 0x4315
+#define BCM4318_CHIP_ID 0x4318
+#define BCM4319_CHIP_ID 0x4319
+#define BCM4320_CHIP_ID 0x4320
+#define BCM4321_CHIP_ID 0x4321
+#define BCM43217_CHIP_ID 43217
+#define BCM4322_CHIP_ID 0x4322
+#define BCM43221_CHIP_ID 43221
+#define BCM43222_CHIP_ID 43222
+#define BCM43224_CHIP_ID 43224
+#define BCM43225_CHIP_ID 43225
+#define BCM43227_CHIP_ID 43227
+#define BCM43228_CHIP_ID 43228
+#define BCM43226_CHIP_ID 43226
+#define BCM43231_CHIP_ID 43231
+#define BCM43234_CHIP_ID 43234
+#define BCM43235_CHIP_ID 43235
+#define BCM43236_CHIP_ID 43236
+#define BCM43237_CHIP_ID 43237
+#define BCM43238_CHIP_ID 43238
+#define BCM43239_CHIP_ID 43239
+#define BCM43420_CHIP_ID 43420
+#define BCM43421_CHIP_ID 43421
+#define BCM43428_CHIP_ID 43428
+#define BCM43431_CHIP_ID 43431
+#define BCM43460_CHIP_ID 43460
+#define BCM4325_CHIP_ID 0x4325
+#define BCM4328_CHIP_ID 0x4328
+#define BCM4329_CHIP_ID 0x4329
+#define BCM4331_CHIP_ID 0x4331
+#define BCM4336_CHIP_ID 0x4336
+#define BCM43362_CHIP_ID 43362
+#define BCM4330_CHIP_ID 0x4330
+#define BCM6362_CHIP_ID 0x6362
+#define BCM4314_CHIP_ID 0x4314
+#define BCM43142_CHIP_ID 43142
+#define BCM4324_CHIP_ID 0x4324
+#define BCM43242_CHIP_ID 43242
+#define BCM4334_CHIP_ID 0x4334
+#define BCM4360_CHIP_ID 0x4360
+#define BCM4352_CHIP_ID 0x4352
+#define BCM43526_CHIP_ID 0xAA06
+
+#define BCM4335_CHIP_ID 0x4335
+
+#define BCM4342_CHIP_ID 4342
+#define BCM4402_CHIP_ID 0x4402
+#define BCM4704_CHIP_ID 0x4704
+#define BCM4706_CHIP_ID 0x5300
+#define BCM4710_CHIP_ID 0x4710
+#define BCM4712_CHIP_ID 0x4712
+#define BCM4716_CHIP_ID 0x4716
+#define BCM47162_CHIP_ID 47162
+#define BCM4748_CHIP_ID 0x4748
+#define BCM4749_CHIP_ID 0x4749
+#define BCM4785_CHIP_ID 0x4785
+#define BCM5350_CHIP_ID 0x5350
+#define BCM5352_CHIP_ID 0x5352
+#define BCM5354_CHIP_ID 0x5354
+#define BCM5365_CHIP_ID 0x5365
+#define BCM5356_CHIP_ID 0x5356
+#define BCM5357_CHIP_ID 0x5357
+#define BCM53572_CHIP_ID 53572
+
+
+#define BCM4303_PKG_ID 2
+#define BCM4309_PKG_ID 1
+#define BCM4712LARGE_PKG_ID 0
+#define BCM4712SMALL_PKG_ID 1
+#define BCM4712MID_PKG_ID 2
+#define BCM4328USBD11G_PKG_ID 2
+#define BCM4328USBDUAL_PKG_ID 3
+#define BCM4328SDIOD11G_PKG_ID 4
+#define BCM4328SDIODUAL_PKG_ID 5
+#define BCM4329_289PIN_PKG_ID 0
+#define BCM4329_182PIN_PKG_ID 1
+#define BCM5354E_PKG_ID 1
+#define BCM4716_PKG_ID 8
+#define BCM4717_PKG_ID 9
+#define BCM4718_PKG_ID 10
+#define BCM5356_PKG_NONMODE 1
+#define BCM5358U_PKG_ID 8
+#define BCM5358_PKG_ID 9
+#define BCM47186_PKG_ID 10
+#define BCM5357_PKG_ID 11
+#define BCM5356U_PKG_ID 12
+#define BCM53572_PKG_ID 8
+#define BCM5357C0_PKG_ID 8
+#define BCM47188_PKG_ID 9
+#define BCM5358C0_PKG_ID 0xa
+#define BCM5356C0_PKG_ID 0xb
+#define BCM4331TT_PKG_ID 8
+#define BCM4331TN_PKG_ID 9
+#define BCM4331TNA0_PKG_ID 0xb
+#define BCM4706L_PKG_ID 1
+
+#define HDLSIM5350_PKG_ID 1
+#define HDLSIM_PKG_ID 14
+#define HWSIM_PKG_ID 15
+#define BCM43224_FAB_CSM 0x8
+#define BCM43224_FAB_SMIC 0xa
+#define BCM4336_WLBGA_PKG_ID 0x8
+#define BCM4330_WLBGA_PKG_ID 0x0
+#define BCM4314PCIE_ARM_PKG_ID (8 | 0)
+#define BCM4314SDIO_PKG_ID (8 | 1)
+#define BCM4314PCIE_PKG_ID (8 | 2)
+#define BCM4314SDIO_ARM_PKG_ID (8 | 3)
+#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4)
+#define BCM4314DEV_PKG_ID (8 | 6)
+
+#define PCIXX21_FLASHMEDIA0_ID 0x8033
+#define PCIXX21_SDIOH0_ID 0x8034
+
+
+#define BFL_BTC2WIRE 0x00000001
+#define BFL_BTCOEX 0x00000001
+#define BFL_PACTRL 0x00000002
+#define BFL_AIRLINEMODE 0x00000004
+#define BFL_ADCDIV 0x00000008
+#define BFL_RFPLL 0x00000008
+#define BFL_ENETROBO 0x00000010
+#define BFL_NOPLLDOWN 0x00000020
+#define BFL_CCKHIPWR 0x00000040
+#define BFL_ENETADM 0x00000080
+#define BFL_ENETVLAN 0x00000100
+#define BFL_UNUSED 0x00000200
+#define BFL_NOPCI 0x00000400
+#define BFL_FEM 0x00000800
+#define BFL_EXTLNA 0x00001000
+#define BFL_HGPA 0x00002000
+#define BFL_BTC2WIRE_ALTGPIO 0x00004000
+#define BFL_ALTIQ 0x00008000
+#define BFL_NOPA 0x00010000
+#define BFL_RSSIINV 0x00020000
+#define BFL_PAREF 0x00040000
+#define BFL_3TSWITCH 0x00080000
+#define BFL_PHASESHIFT 0x00100000
+#define BFL_BUCKBOOST 0x00200000
+#define BFL_FEM_BT 0x00400000
+#define BFL_NOCBUCK 0x00800000
+#define BFL_CCKFAVOREVM 0x01000000
+#define BFL_PALDO 0x02000000
+#define BFL_LNLDO2_2P5 0x04000000
+#define BFL_FASTPWR 0x08000000
+#define BFL_UCPWRCTL_MININDX 0x08000000
+#define BFL_EXTLNA_5GHz 0x10000000
+#define BFL_TRSW_1by2 0x20000000
+#define BFL_LO_TRSW_R_5GHz 0x40000000
+#define BFL_ELNA_GAINDEF 0x80000000
+#define BFL_EXTLNA_TX 0x20000000
+
+
+#define BFL2_RXBB_INT_REG_DIS 0x00000001
+#define BFL2_APLL_WAR 0x00000002
+#define BFL2_TXPWRCTRL_EN 0x00000004
+#define BFL2_2X4_DIV 0x00000008
+#define BFL2_5G_PWRGAIN 0x00000010
+#define BFL2_PCIEWAR_OVR 0x00000020
+#define BFL2_CAESERS_BRD 0x00000040
+#define BFL2_BTC3WIRE 0x00000080
+#define BFL2_BTCLEGACY 0x00000080
+#define BFL2_SKWRKFEM_BRD 0x00000100
+#define BFL2_SPUR_WAR 0x00000200
+#define BFL2_GPLL_WAR 0x00000400
+#define BFL2_TRISTATE_LED 0x00000800
+#define BFL2_SINGLEANT_CCK 0x00001000
+#define BFL2_2G_SPUR_WAR 0x00002000
+#define BFL2_BPHY_ALL_TXCORES 0x00004000
+#define BFL2_FCC_BANDEDGE_WAR 0x00008000
+#define BFL2_GPLL_WAR2 0x00010000
+#define BFL2_IPALVLSHIFT_3P3 0x00020000
+#define BFL2_INTERNDET_TXIQCAL 0x00040000
+#define BFL2_XTALBUFOUTEN 0x00080000
+
+
+
+#define BFL2_ANAPACTRL_2G 0x00100000
+#define BFL2_ANAPACTRL_5G 0x00200000
+#define BFL2_ELNACTRL_TRSW_2G 0x00400000
+#define BFL2_BT_SHARE_ANT0 0x00800000
+#define BFL2_TEMPSENSE_HIGHER 0x01000000
+#define BFL2_BTC3WIREONLY 0x02000000
+#define BFL2_PWR_NOMINAL 0x04000000
+#define BFL2_EXTLNA_PWRSAVE 0x08000000
+
+#define BFL2_4313_RADIOREG 0x10000000
+
+#define BFL2_SDR_EN 0x20000000
+
+
+#define BOARD_GPIO_BTC3W_IN 0x850
+#define BOARD_GPIO_BTC3W_OUT 0x020
+#define BOARD_GPIO_BTCMOD_IN 0x010
+#define BOARD_GPIO_BTCMOD_OUT 0x020
+#define BOARD_GPIO_BTC_IN 0x080
+#define BOARD_GPIO_BTC_OUT 0x100
+#define BOARD_GPIO_PACTRL 0x200
+#define BOARD_GPIO_12 0x1000
+#define BOARD_GPIO_13 0x2000
+#define BOARD_GPIO_BTC4_IN 0x0800
+#define BOARD_GPIO_BTC4_BT 0x2000
+#define BOARD_GPIO_BTC4_STAT 0x4000
+#define BOARD_GPIO_BTC4_WLAN 0x8000
+#define BOARD_GPIO_1_WLAN_PWR 0x02
+#define BOARD_GPIO_3_WLAN_PWR 0x08
+#define BOARD_GPIO_4_WLAN_PWR 0x10
+
+#define GPIO_BTC4W_OUT_4312 0x010
+#define GPIO_BTC4W_OUT_43224 0x020
+#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0
+#define GPIO_BTC4W_OUT_43225 0x0e0
+#define GPIO_BTC4W_OUT_43421 0x020
+#define GPIO_BTC4W_OUT_4313 0x060
+#define GPIO_BTC4W_OUT_4331_SHARED 0x010
+
+#define PCI_CFG_GPIO_SCS 0x10
+#define PCI_CFG_GPIO_HWRAD 0x20
+#define PCI_CFG_GPIO_XTAL 0x40
+#define PCI_CFG_GPIO_PLL 0x80
+
+
+#define PLL_DELAY 150
+#define FREF_DELAY 200
+#define MIN_SLOW_CLK 32
+#define XTAL_ON_DELAY 1000
+
+
+
+#define GPIO_NUMPINS 32
+
+
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329 0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+
+
+#define MUXENAB_UART 0x00000001
+#define MUXENAB_GPIO 0x00000002
+#define MUXENAB_ERCX 0x00000004
+#define MUXENAB_JTAG 0x00000008
+#define MUXENAB_HOST_WAKE 0x00000010
+#define MUXENAB_I2S_EN 0x00000020
+#define MUXENAB_I2S_MASTER 0x00000040
+#define MUXENAB_I2S_FULL 0x00000080
+#define MUXENAB_SFLASH 0x00000100
+#define MUXENAB_RFSWCTRL0 0x00000200
+#define MUXENAB_RFSWCTRL1 0x00000400
+#define MUXENAB_RFSWCTRL2 0x00000800
+#define MUXENAB_SECI 0x00001000
+#define MUXENAB_BT_LEGACY 0x00002000
+#define MUXENAB_HOST_WAKE1 0x00004000
+
+
+#define FLASH_KERNEL_NFLASH 0x00000001
+#define FLASH_BOOT_NFLASH 0x00000002
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h
new file mode 100644
index 00000000000000..22eb7dbcb9523e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h
@@ -0,0 +1,278 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmendian.h 241182 2011-02-17 21:50:03Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+
+#define BCMSWAP16(val) \
+ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+ (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+
+#define BCMSWAP32(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
+ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
+ (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+
+#define BCMSWAP32BY16(val) \
+ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+ (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define hton16(i) bcmswap16(i)
+#define HTON32(i) BCMSWAP32(i)
+#define hton32(i) bcmswap32(i)
+#define NTOH16(i) BCMSWAP16(i)
+#define ntoh16(i) bcmswap16(i)
+#define NTOH32(i) BCMSWAP32(i)
+#define ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#endif
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+
+#define load32_ua(a) ltoh32_ua(a)
+#define store32_ua(a, v) htol32_ua_store(v, a)
+#define load16_ua(a) ltoh16_ua(a)
+#define store16_ua(a, v) htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+ sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+ sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+ *(uint8 *)0)
+
+#ifdef __GNUC__
+
+
+
+#define bcmswap16(val) ({ \
+ uint16 _val = (val); \
+ BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+ uint32 _val = (val); \
+ BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+ uint16 *_buf = (uint16 *)(buf); \
+ uint _wds = (len) / 2; \
+ while (_wds--) { \
+ *_buf = bcmswap16(*_buf); \
+ _buf++; \
+ } \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val & 0xff; \
+ _bytes[1] = (_val >> 8) & 0xff; \
+ _bytes[2] = (_val >> 16) & 0xff; \
+ _bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+ uint16 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 8; \
+ _bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+ uint32 _val = (val); \
+ uint8 *_bytes = (uint8 *)(bytes); \
+ _bytes[0] = _val >> 24; \
+ _bytes[1] = (_val >> 16) & 0xff; \
+ _bytes[2] = (_val >> 8) & 0xff; \
+ _bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+ const uint8 *_bytes = (const uint8 *)(bytes); \
+ _NTOH32_UA(_bytes); \
+})
+
+#else
+
+
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+ return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+ return BCMSWAP32(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+ return BCMSWAP32BY16(val);
+}
+
+
+
+
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+ len = len / 2;
+
+ while (len--) {
+ *buf = bcmswap16(*buf);
+ buf++;
+ }
+}
+
+
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = val >> 8;
+}
+
+
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = (val >> 16) & 0xff;
+ bytes[3] = val >> 24;
+}
+
+
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+ bytes[0] = val >> 8;
+ bytes[1] = val & 0xff;
+}
+
+
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+ bytes[0] = val >> 24;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+ return _LTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+ return _LTOH32_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+ return _NTOH16_UA((const uint8 *)bytes);
+}
+
+
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+ return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
new file mode 100644
index 00000000000000..ce0e0350874320
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h
@@ -0,0 +1,179 @@
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmnvram.h 320632 2012-03-12 19:22:42Z $
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+struct nvram_header {
+ uint32 magic;
+ uint32 len;
+ uint32 crc_ver_init;
+ uint32 config_refresh;
+ uint32 config_ncdl;
+};
+
+struct nvram_tuple {
+ char *name;
+ char *value;
+ struct nvram_tuple *next;
+};
+
+
+extern char *nvram_default_get(const char *name);
+
+
+extern int nvram_init(void *sih);
+
+
+extern int nvram_append(void *si, char *vars, uint varsz);
+
+extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
+
+
+extern int nvram_reset(void *sih);
+
+
+extern void nvram_exit(void *sih);
+
+
+extern char * nvram_get(const char *name);
+
+
+extern int nvram_resetgpio_init(void *sih);
+
+
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+ char *p = nvram_get(name);
+ return p ? p : "";
+}
+
+
+static INLINE int
+nvram_match(char *name, char *match)
+{
+ const char *value = nvram_get(name);
+ return (value && !strcmp(value, match));
+}
+
+
+static INLINE int
+nvram_invmatch(char *name, char *invmatch)
+{
+ const char *value = nvram_get(name);
+ return (value && strcmp(value, invmatch));
+}
+
+
+extern int nvram_set(const char *name, const char *value);
+
+
+extern int nvram_unset(const char *name);
+
+
+extern int nvram_commit(void);
+
+
+extern int nvram_getall(char *nvram_buf, int count);
+
+
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+#endif
+
+
+#define NVRAM_SOFTWARE_VERSION "1"
+
+#define NVRAM_MAGIC 0x48534C46
+#define NVRAM_CLEAR_MAGIC 0x0
+#define NVRAM_INVALID_MAGIC 0xFFFFFFFF
+#define NVRAM_VERSION 1
+#define NVRAM_HEADER_SIZE 20
+#define NVRAM_SPACE 0x8000
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION 9
+#define NVRAM_CRC_VER_MASK 0xffffff00
+
+
+#define NVRAM_START_COMPRESSED 0x400
+#define NVRAM_START 0x1000
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
+ defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
+#define IMAGE_SIZE "image_size"
+#define BOOTPARTITION "bootpartition"
+#define IMAGE_BOOT BOOTPARTITION
+#define PARTIALBOOTS "partialboots"
+#define MAXPARTIALBOOTS "maxpartialboots"
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#endif
+
+#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
+ defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
+
+#define IMAGE_BOOT "image_boot"
+#define BOOTPARTITION IMAGE_BOOT
+
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_SIZE "image_size"
+
+
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+
+
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#define POLICY_TOGGLE "toggle"
+#define LINUX_PART_TO_FLASH "linux_to_flash"
+#define LINUX_FLASH_POLICY "linux_flash_policy"
+
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
new file mode 100644
index 00000000000000..44b263cec6a1a7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h
@@ -0,0 +1,181 @@
+/*
+ * Broadcom PCI-SPI Host Controller Register Definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_PCI_SPI_H
+#define _BCM_PCI_SPI_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ uint32 spih_ctrl; /* 0x00 SPI Control Register */
+ uint32 spih_stat; /* 0x04 SPI Status Register */
+ uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */
+ uint32 spih_ext; /* 0x0C SPI Extension Register */
+ uint32 PAD[4]; /* 0x10-0x1F PADDING */
+
+ uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */
+ uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */
+ uint32 PAD[6]; /* 0x28-0x3F PADDING */
+
+ uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */
+ uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */
+ /* 1=Active High) */
+ uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */
+ uint32 spih_int_status; /* 0x4C SPI Interrupt Status */
+ uint32 PAD[4]; /* 0x50-0x5F PADDING */
+
+ uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */
+ uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */
+ uint32 PAD[1]; /* 0x68 PADDING */
+ uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */
+ uint32 PAD[4]; /* 0x70-0x7F PADDING */
+ uint32 PAD[8]; /* 0x80-0x9F PADDING */
+ uint32 PAD[8]; /* 0xA0-0xBF PADDING */
+ uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */
+ uint32 spih_pll_status; /* 0xC4 PLL Status Register */
+ uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */
+ uint32 spih_clk_count; /* 0xCC External Clock Count Register */
+
+} spih_regs_t;
+
+typedef volatile struct {
+ uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */
+ uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */
+
+ uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */
+ uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */
+ uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */
+ uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */
+ uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */
+ uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */
+ uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */
+ uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */
+ uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */
+ uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */
+ uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */
+ uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */
+ uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */
+ uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */
+ uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */
+ uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */
+ uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */
+ uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */
+ uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */
+ uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */
+ uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */
+ uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */
+ uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */
+ uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */
+ uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */
+ uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */
+
+ uint32 PAD[5]; /* 0x16C-0x17F PADDING */
+
+ uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */
+ uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */
+ uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */
+ uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */
+ uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */
+ uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */
+ uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */
+ uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */
+ uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */
+ uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */
+ uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */
+ uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */
+ uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */
+ uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */
+ uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */
+ uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */
+ uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */
+ uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */
+ uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */
+ uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */
+ uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */
+ uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */
+ uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */
+ uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */
+ uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */
+ uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */
+
+ uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */
+ uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */
+ uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */
+} spih_pciregs_t;
+
+/*
+ * PCI Core interrupt enable and status bit definitions.
+ */
+
+/* PCI Core ICR Register bit definitions */
+#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */
+#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */
+#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */
+#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */
+#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
+#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
+
+/* PCI Core ISR Register bit definitions */
+#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
+#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
+#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */
+#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
+#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
+
+/* Registers on the Wishbone bus */
+#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
+#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
+#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */
+
+/* GPIO Bit definitions */
+#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */
+#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */
+#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */
+
+/* SPI Status Register Bit definitions */
+#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */
+#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */
+#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */
+#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */
+#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */
+#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */
+
+#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */
+
+#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */
+#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */
+
+/* Spin bit loop bound check */
+#define SPI_SPIN_BOUND 0xf4240 /* 1 million */
+
+#endif /* _BCM_PCI_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h
new file mode 100644
index 00000000000000..743830768899ba
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define BCMPERF_GETICACHE_MISS(x) ((x) = 0)
+#define BCMPERF_GETICACHE_HIT(x) ((x) = 0)
+#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
new file mode 100644
index 00000000000000..36c3604ca5044e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h
@@ -0,0 +1,131 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h 320190 2012-03-09 19:13:53Z $
+ */
+
+#ifndef _sdio_api_h_
+#define _sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS (0x00)
+#define SDIOH_API_RC_FAIL (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+ uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+/* Helper function */
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+
+
+#if defined(BCMSDIOH_STD)
+ #define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
new file mode 100644
index 00000000000000..4c62dba18f81bc
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h
@@ -0,0 +1,238 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ * export functions to client drivers
+ * abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h 326276 2012-04-06 23:16:42Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef _bcmsdh_h_
+#define _bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL 0x0001 /* Error */
+#define BCMSDH_INFO_VAL 0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+
+#if (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+/* Attach and build an interface to the underlying SD host driver.
+ * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
+ * - Returns the bcmsdh handle and virtual address base for register access.
+ * The returned handle should be used in all subsequent calls, but the bcmsh
+ * implementation may maintain a single "default" handle (e.g. the first or
+ * most recent one) to enable single-instance implementations to pass NULL.
+ */
+
+#if defined(NDISVER) && (NDISVER >= 0x0630) && 1
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
+ void **regsva, uint irq, shared_info_t *sh);
+#else
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+#endif
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
+
+/* callback functions */
+typedef struct {
+ /* attach to device */
+ void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ uint16 func, uint bustype, void * regsva, osl_t * osh,
+ void * param);
+ /* detach from device */
+ void (*detach)(void *ch);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_register_oob_intr(void * dhdp);
+extern void bcmsdh_unregister_oob_intr(void);
+extern void bcmsdh_oob_intr_set(bool enable);
+#endif /* defined(OOB_INTR_ONLY) */
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif /* _bcmsdh_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
new file mode 100644
index 00000000000000..80f39003ea4a11
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h
@@ -0,0 +1,123 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h 313732 2012-02-08 19:49:00Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+/* Allocate/init/free per-OS private data */
+extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
+extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+struct sdioh_info {
+ osl_t *osh; /* osh handler */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
+ struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+ bool use_rxchain;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+typedef struct _BCMSDH_SDMMC_INSTANCE {
+ sdioh_info_t *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
+
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
new file mode 100644
index 00000000000000..80c0a3d13eea5b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h
@@ -0,0 +1,274 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h 291086 2011-10-21 01:17:24Z $
+ */
+
+#ifndef _bcmsdpcm_h_
+#define _bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */
+#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET 6 /* Version # */
+#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET 7 /* Spare */
+#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL 15
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0 0x01
+#define SDPCM_FLAG_RESVD1 0x02
+#define SDPCM_FLAG_GSPI_TXENAB 0x04
+#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on command.
+ * Len is current or requested frame length, not
+ * including test header; sent little-endian.
+ */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */
+#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
+ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
+ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
+ uint32 rxdescuflo; /* receive descriptor underflows */
+ uint32 rxfifooflo; /* receive fifo overflows */
+ uint32 txfifouflo; /* transmit fifo underflows */
+ uint32 runt; /* runt (too short) frames recv'd from bus */
+ uint32 badlen; /* frame's rxh len does not match its hw tag len */
+ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
+ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */
+ uint32 rxfcrc; /* frame rx header indicates crc error */
+ uint32 rxfwoos; /* frame rx header indicates write out of sync */
+ uint32 rxfwft; /* frame rx header indicates write frame termination */
+ uint32 rxfabort; /* frame rx header indicates frame aborted */
+ uint32 woosint; /* write out of sync interrupt */
+ uint32 roosint; /* read out of sync interrupt */
+ uint32 rftermint; /* read frame terminate interrupt */
+ uint32 wftermint; /* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val) ((var) == (val))
+#define SDIODREV_GE(var, val) ((var) >= (val))
+#define SDIODREV_GT(var, val) ((var) > (val))
+#define SDIODREV_LT(var, val) ((var) < (val))
+#define SDIODREV_LE(var, val) ((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+ (SDIODREV_LT((h)->corerev, 1) ? \
+ SDIODDMAREG32((h), (dir), (chnl)) : \
+ SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+ ((dir) == DMA_TX ? \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODDMAREG(h, dir, chnl) : \
+ PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+ (SDIODREV_LT((corerev), 1) ? \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+ ((coreid) == SDIOD_CORE_ID ? \
+ SDIODFIFOREG(h, corerev) : \
+ PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0001
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+#define SDPCM_SHARED_IN_BRPT 0x0800
+#define SDPCM_SHARED_SET_BRPT 0x1000
+#define SDPCM_SHARED_PENDING_BRPT 0x2000
+
+typedef struct {
+ uint32 flags;
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hndrte_cons_t */
+ uint32 msgtrace_addr;
+ uint32 brpt_addr;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+/* Function can be used to notify host of FW halt */
+extern void sdpcmd_fwhalt(void);
+
+#endif /* _bcmsdpcm_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
new file mode 100644
index 00000000000000..3d444f3ba2650d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ */
+#ifndef _BCM_SD_SPI_H
+#define _BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of sdspi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ bool got_hcint; /* Host Controller interrupt. */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current register transfer size */
+ uint32 cmd53_wr_data; /* Used to pass CMD53 write data */
+ uint32 card_response; /* Used to pass back response status byte */
+ uint32 card_rsp_data; /* Used to pass back response data word */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
new file mode 100644
index 00000000000000..8acc0044d5027f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h
@@ -0,0 +1,248 @@
+/*
+ * 'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h 324797 2012-03-30 11:02:00Z $
+ */
+#ifndef _BCM_SD_STD_H
+#define _BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+#define SDIOH_MODE_SD1 1
+#define SDIOH_MODE_SD4 2
+
+#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK 1
+#define SDIOH_TYPE_BCM27XX 2
+#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS 0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd) do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+#define USE_FIFO 0x8 /* Fifo vs non-fifo */
+
+#define CLIENT_INTR 0x100 /* Get rid of this! */
+
+#define HC_INTR_RETUNING 0x1000
+
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
+
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint target_dev; /* Target device ID */
+ uint16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ void *dma_buf; /* DMA Buffer virtual address */
+ ulong dma_phys; /* DMA Buffer physical address */
+ void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
+ ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */
+
+ /* adjustments needed to make the dma align properly */
+ void *dma_start_buf;
+ ulong dma_start_phys;
+ uint alloced_dma_size;
+ void *adma2_dscr_start_buf;
+ ulong adma2_dscr_start_phys;
+ uint alloced_adma2_dscr_size;
+
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ bool got_hcint; /* local interrupt flag */
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ * Card in terms of Voltage [1.8 or 3.3].
+ */
+ int global_UHSI_Supp; /* type of UHSI support in both host and card.
+ * HOST_SDR_UNSUPP: capabilities not supported/matched
+ * HOST_SDR_12_25: SDR12 and SDR25 supported
+ * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+ */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
+ uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+};
+
+#define DMA_MODE_NONE 0
+#define DMA_MODE_SDMA 1
+#define DMA_MODE_ADMA1 2
+#define DMA_MODE_ADMA2 3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO -1
+
+#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 0
+#define TUNING_START 1
+#define TUNING_START_AFTER_DAT 2
+#define TUNING_ONGOING 3
+
+#define DATA_TRANSFER_IDLE 0
+#define DATA_TRANSFER_ONGOING 1
+
+#define CHECK_TUNING_PRE_DATA 1
+#define CHECK_TUNING_POST_DATA 2
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h
new file mode 100644
index 00000000000000..e226cb102292e3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_SPI_H
+#define _BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
new file mode 100644
index 00000000000000..0975dc8acdad93
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h
@@ -0,0 +1,139 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _BCM_SPI_BRCM_H
+#define _BCM_SPI_BRCM_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_F1 64
+#define BLOCK_SIZE_F2 2048
+#define BLOCK_SIZE_F3 2048
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+#define ERROR_UF 2
+#define ERROR_OF 3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ void *bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+
+ uint lockcount; /* nest count of spi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 card_dstatus; /* 32bit device status */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SPI_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ uint32 wordlen; /* host processor 16/32bits */
+ uint32 prev_fun;
+ uint32 chip;
+ uint32 chiprev;
+ bool resp_delay_all;
+ bool dwordmode;
+ bool resp_delay_new;
+
+ struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S 31
+#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S 30
+#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S 28
+#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S 11
+#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */
+#define SPI_LEN_S 0
+
+#endif /* _BCM_SPI_BRCM_H */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
new file mode 100644
index 00000000000000..074601f5ec0355
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h
@@ -0,0 +1,606 @@
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_fmt.h 326494 2012-04-09 13:29:57Z $
+ */
+
+#ifndef _bcmsrom_fmt_h_
+#define _bcmsrom_fmt_h_
+
+#define SROM_MAXREV 11 /* max revisiton supported by driver */
+
+/* Maximum srom: 6 Kilobits == 768 bytes */
+#define SROM_MAX 768
+#define SROM_MAXW 384
+#define VARS_MAX 4096
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+
+#define SROM_WORDS 64
+
+#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
+
+#define SROM_SSID 2
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET0 39
+#define SROM_MACMID_ET0 40
+#define SROM_MACLO_ET0 41
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM2_ENETPHY 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+#define SROM4_AA2G_MASK 0x00ff
+#define SROM4_AA2G_SHIFT 0
+#define SROM4_AA5G_MASK 0xff00
+#define SROM4_AA5G_SHIFT 8
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define B2G_MAXP_MASK 0xff
+#define B2G_ITT_SHIFT 8
+#define B5G_MAXP_MASK 0xff
+#define B5G_ITT_SHIFT 8
+#define B5GH_MAXP_MASK 0xff
+#define B5GL_MAXP_SHIFT 8
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define SROM8_SIGN 64
+
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+#define SROM8_EXTLNAGAIN 93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2 95
+
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define SROM8_CRCREV 219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+#define SROM8_RXGAINERR_2G 205
+#define SROM8_RXGAINERR_5GL 206
+#define SROM8_RXGAINERR_5GM 207
+#define SROM8_RXGAINERR_5GH 208
+#define SROM8_RXGAINERR_5GU 209
+#define SROM8_SUBBAND_PPR 210
+#define SROM8_PCIEINGRESS_WAR 211
+#define SROM9_SAR 212
+
+#define SROM8_NOISELVL_2G 213
+#define SROM8_NOISELVL_5GL 214
+#define SROM8_NOISELVL_5GM 215
+#define SROM8_NOISELVL_5GH 216
+#define SROM8_NOISELVL_5GU 217
+
+#define SROM9_REV_CRC 219
+
+#define SROM10_CCKPWROFFSET 218
+#define SROM10_SIGN 219
+#define SROM10_SWCTRLMAP_2G 220
+#define SROM10_CRCREV 229
+
+#define SROM10_WORDS 230
+#define SROM10_SIGNATURE SROM4_SIGNATURE
+
+
+/* SROM REV 11 */
+#define SROM11_BREV 65
+
+#define SROM11_BFL0 66
+#define SROM11_BFL1 67
+#define SROM11_BFL2 68
+#define SROM11_BFL3 69
+#define SROM11_BFL4 70
+#define SROM11_BFL5 71
+
+#define SROM11_MACHI 72
+#define SROM11_MACMID 73
+#define SROM11_MACLO 74
+
+#define SROM11_CCODE 75
+#define SROM11_REGREV 76
+
+#define SROM11_LEDBH10 77
+#define SROM11_LEDBH32 78
+
+#define SROM11_LEDDC 79
+
+#define SROM11_AA 80
+
+#define SROM11_AGBG10 81
+#define SROM11_AGBG2A0 82
+#define SROM11_AGA21 83
+
+#define SROM11_TXRXC 84
+
+#define SROM11_FEM_CFG1 85
+#define SROM11_FEM_CFG2 86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK 0xf800
+#define SROM11_FEMCTRL_SHIFT 11
+#define SROM11_PAPDCAP_MASK 0x0400
+#define SROM11_PAPDCAP_SHIFT 10
+#define SROM11_TWORANGETSSI_MASK 0x0200
+#define SROM11_TWORANGETSSI_SHIFT 9
+#define SROM11_PDGAIN_MASK 0x01f0
+#define SROM11_PDGAIN_SHIFT 4
+#define SROM11_EPAGAIN_MASK 0x000e
+#define SROM11_EPAGAIN_SHIFT 1
+#define SROM11_TSSIPOSSLOPE_MASK 0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT 0
+#define SROM11_GAINCTRLSPH_MASK 0xf800
+#define SROM11_GAINCTRLSPH_SHIFT 11
+
+#define SROM11_THERMAL 87
+#define SROM11_MPWR_RAWTS 88
+#define SROM11_TS_SLP_OPT_CORRX 89
+#define SROM11_PHYCAL_TEMPDELTA 92
+#define SROM11_MPWR_1_AND_2 93
+
+/* Masks and offsets for Terrmal parameters */
+#define SROM11_TEMPS_PERIOD_MASK 0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT 4
+#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT 0
+#define SROM11_TEMPCORRX_MASK 0xfc
+#define SROM11_TEMPCORRX_SHIFT 2
+#define SROM11_TEMPSENSE_OPTION_MASK 0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT 0
+
+#define SROM11_PDOFF_40M_A0 101
+#define SROM11_PDOFF_40M_A1 102
+#define SROM11_PDOFF_40M_A2 103
+#define SROM11_PDOFF_80M_A0 104
+#define SROM11_PDOFF_80M_A1 105
+#define SROM11_PDOFF_80M_A2 106
+
+#define SROM11_SUBBAND5GVER 107
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_11 3
+#define SROM11_PATH0 108
+#define SROM11_PATH1 128
+#define SROM11_PATH2 148
+
+#define SROM11_2G_MAXP 0
+#define SROM11_2G_PA 1
+#define SROM11_RXGAINS 5
+#define SROM11_5GB1B0_MAXP 6
+#define SROM11_5GB3B2_MAXP 7
+#define SROM11_5GB0_PA 8
+#define SROM11_5GB1_PA 11
+#define SROM11_5GB2_PA 14
+#define SROM11_5GB3_PA 17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS2GTRISOA_MASK 0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT 3
+#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT 3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO 168
+#define SROM11_CCKBW20UL2GPO 169
+#define SROM11_MCSBW202GPO 170
+#define SROM11_MCSBW202GPO_1 171
+#define SROM11_MCSBW402GPO 172
+#define SROM11_MCSBW402GPO_1 173
+#define SROM11_DOT11AGOFDMHRBW202GPO 174
+#define SROM11_OFDMLRBW202GPO 175
+
+#define SROM11_MCSBW205GLPO 176
+#define SROM11_MCSBW205GLPO_1 177
+#define SROM11_MCSBW405GLPO 178
+#define SROM11_MCSBW405GLPO_1 179
+#define SROM11_MCSBW805GLPO 180
+#define SROM11_MCSBW805GLPO_1 181
+#define SROM11_MCSBW1605GLPO 182
+#define SROM11_MCSBW1605GLPO_1 183
+#define SROM11_MCSBW205GMPO 184
+#define SROM11_MCSBW205GMPO_1 185
+#define SROM11_MCSBW405GMPO 186
+#define SROM11_MCSBW405GMPO_1 187
+#define SROM11_MCSBW805GMPO 188
+#define SROM11_MCSBW805GMPO_1 189
+#define SROM11_MCSBW1605GMPO 190
+#define SROM11_MCSBW1605GMPO_1 191
+#define SROM11_MCSBW205GHPO 192
+#define SROM11_MCSBW205GHPO_1 193
+#define SROM11_MCSBW405GHPO 194
+#define SROM11_MCSBW405GHPO_1 195
+#define SROM11_MCSBW805GHPO 196
+#define SROM11_MCSBW805GHPO_1 197
+#define SROM11_MCSBW1605GHPO 198
+#define SROM11_MCSBW1605GHPO_1 199
+
+#define SROM11_MCSLR5GLPO 200
+#define SROM11_MCSLR5GMPO 201
+#define SROM11_MCSLR5GHPO 202
+
+#define SROM11_SB20IN40HRPO 203
+#define SROM11_SB20IN80AND160HR5GLPO 204
+#define SROM11_SB40AND80HR5GLPO 205
+#define SROM11_SB20IN80AND160HR5GMPO 206
+#define SROM11_SB40AND80HR5GMPO 207
+#define SROM11_SB20IN80AND160HR5GHPO 208
+#define SROM11_SB40AND80HR5GHPO 209
+#define SROM11_SB20IN40LRPO 210
+#define SROM11_SB20IN80AND160LR5GLPO 211
+#define SROM11_SB40AND80LR5GLPO 212
+#define SROM11_SB20IN80AND160LR5GMPO 213
+#define SROM11_SB40AND80LR5GMPO 214
+#define SROM11_SB20IN80AND160LR5GHPO 215
+#define SROM11_SB40AND80LR5GHPO 216
+
+#define SROM11_DOT11AGDUPHRPO 217
+#define SROM11_DOT11AGDUPLRPO 218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR 220
+#define SROM11_SAR 221
+
+#define SROM11_NOISELVL_2G 222
+#define SROM11_NOISELVL_5GL 223
+#define SROM11_NOISELVL_5GM 224
+#define SROM11_NOISELVL_5GH 225
+#define SROM11_NOISELVL_5GU 226
+
+#define SROM11_RXGAINERR_2G 227
+#define SROM11_RXGAINERR_5GL 228
+#define SROM11_RXGAINERR_5GM 229
+#define SROM11_RXGAINERR_5GH 230
+#define SROM11_RXGAINERR_5GU 231
+
+#define SROM11_SIGN 64
+#define SROM11_CRCREV 233
+
+#define SROM11_WORDS 234
+#define SROM11_SIGNATURE 0x0634
+
+typedef struct {
+ uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
+ uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
+ uint8 triso; /* TR switch isolation */
+ uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif /* _bcmsrom_fmt_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
new file mode 100644
index 00000000000000..682b187f92b8c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h
@@ -0,0 +1,874 @@
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsrom_tbl.h 326655 2012-04-10 05:23:43Z $
+ */
+
+#ifndef _bcmsrom_tbl_h_
+#define _bcmsrom_tbl_h_
+
+#include "sbpcmcia.h"
+#include "wlioctl.h"
+
+typedef struct {
+ const char *name;
+ uint32 revmask;
+ uint32 flags;
+ uint16 off;
+ uint16 mask;
+} sromvar_t;
+
+#define SRFL_MORE 1 /* value continues as described by the next entry */
+#define SRFL_NOFFS 2 /* value bits can't be all one's */
+#define SRFL_PRHEX 4 /* value is in hexdecimal format */
+#define SRFL_PRSIGN 8 /* value is in signed decimal format */
+#define SRFL_CCODE 0x10 /* value is in country code format */
+#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
+#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
+#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST
+ * ONE in the array should have this flag set.
+ */
+
+
+/* Assumptions:
+ * - Ethernet address spans across 3 consective words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ * (even multiple fields in the same word) with each entry except the last having
+ * it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ * bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ * entries must have non-NULL name.
+ */
+
+static const sromvar_t pci_sromvars[] = {
+ {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff},
+ {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
+ {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
+ {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
+ {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
+ {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM_BFL2, 0xffff},
+ {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM3_BFL2, 0xffff},
+ {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff},
+ {"", 0, 0, SROM4_BFL1, 0xffff},
+ {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff},
+ {"", 0, 0, SROM5_BFL1, 0xffff},
+ {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff},
+ {"", 0, 0, SROM8_BFL1, 0xffff},
+ {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff},
+ {"", 0, 0, SROM4_BFL3, 0xffff},
+ {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff},
+ {"", 0, 0, SROM5_BFL3, 0xffff},
+ {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff},
+ {"", 0, 0, SROM8_BFL3, 0xffff},
+ {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
+
+ {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
+ {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff},
+ {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff},
+ {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff},
+ {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff},
+ {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
+ {"regrev", 0x00000008, 0, SROM_OPO, 0xff00},
+ {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff},
+ {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff},
+ {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff},
+ {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
+ {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
+ {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
+ {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
+ {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
+ {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
+ {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
+ {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
+ {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
+ {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
+ {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
+ {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff},
+ {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
+ {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
+ {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
+ {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
+ {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00},
+ {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff},
+ {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff},
+ {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff},
+ {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
+ {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff},
+ {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff},
+ {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
+ {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00},
+ {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00},
+ {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff},
+ {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00},
+ {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff},
+ {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00},
+ {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff},
+ {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00},
+ {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff},
+ {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00},
+ {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff},
+ {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00},
+ {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
+ {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
+ {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
+ {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
+ {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
+ {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
+ {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
+ {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
+ {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
+ {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00},
+ {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
+ {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
+ {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
+ {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
+ {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
+ {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
+ {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
+ {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
+ {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
+ {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
+ {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
+ {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
+ {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00},
+ {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff},
+ {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00},
+ {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
+ {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
+ {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f},
+ {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
+ {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f},
+ {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff},
+ {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00},
+ {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00},
+ {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff},
+ {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00},
+ {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00},
+ {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
+ {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
+ {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK},
+ {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK},
+ {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK},
+ {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK},
+ {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
+ {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
+ {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
+ {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
+ {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
+ {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
+ {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
+ {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
+ {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
+ {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
+ {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
+ {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
+ {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
+ {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
+ {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
+ {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
+
+ {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
+ {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
+ {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+ {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff},
+ {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
+ {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
+ {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
+ {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
+ {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff},
+ {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff},
+ {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff},
+ {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff},
+ {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff},
+ {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff},
+
+ {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00},
+ {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff},
+ {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300},
+ {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f},
+ {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010},
+ {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020},
+ {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff},
+ {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00},
+ {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80},
+
+ {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
+ {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
+ {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
+ {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
+ {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
+ {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
+ {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff},
+ {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff},
+ {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff},
+ {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
+ {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff},
+ {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff},
+ {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff},
+ {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
+
+ /* power per rate from sromrev 9 */
+ {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff},
+ {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
+ {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
+ {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
+ {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff},
+ {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff},
+ {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f},
+ {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0},
+ {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800},
+ {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f},
+ {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0},
+ {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800},
+ {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f},
+ {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0},
+ {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800},
+ {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800},
+ {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff},
+ {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00},
+ {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00},
+ {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f},
+ {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0},
+ {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00},
+ {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f},
+ {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0},
+ {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00},
+ {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f},
+ {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0},
+ {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00},
+ {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f},
+ {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00},
+ {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7},
+
+ {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff},
+ /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+ {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff},
+ {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff},
+
+ /* sromrev 11 */
+ {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL3, 0xffff},
+ {"", 0, 0, SROM11_BFL3, 0xffff},
+ {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff},
+ {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff},
+ {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff},
+ {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff},
+ {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff},
+ {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00},
+ {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff},
+ {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00},
+ {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff},
+ {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff},
+ {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00},
+ {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0x00ff},
+ {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0xff00},
+ {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff},
+ {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00},
+ {"aga1", 0xfffff800, 0, SROM11_AGA21, 0x00ff},
+ {"aga2", 0xfffff800, 0, SROM11_AGA21, 0xff00},
+ {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK},
+
+ {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001},
+ {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e},
+ {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0},
+ {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200},
+ {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400},
+ {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800},
+
+ {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001},
+ {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e},
+ {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0},
+ {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200},
+ {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400},
+ {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800},
+
+ {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00},
+ {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff},
+ {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300},
+ {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80},
+
+ {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff},
+
+ /* power per rate */
+ {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff},
+ {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff},
+ {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff},
+ {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff},
+ {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff},
+ {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff},
+ {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff},
+ {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff},
+ {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff},
+ {"mcsbw1605glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GLPO_1, 0xffff},
+ {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff},
+ {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff},
+ {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff},
+ {"mcsbw1605gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GMPO_1, 0xffff},
+ {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff},
+ {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff},
+ {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff},
+ {"mcsbw1605ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW1605GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW1605GHPO_1, 0xffff},
+ {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xffff},
+ {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff},
+ {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff},
+ {"sb20in40hrrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff},
+ {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff},
+ {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff},
+ {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff},
+ {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff},
+ {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff},
+ {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff},
+ {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff},
+ {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff},
+ {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff},
+ {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff},
+ {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff},
+ {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff},
+ {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff},
+ {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff},
+ {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff},
+
+ /* Misc */
+ {"pcieingress_war", 0xfffff800, 0, SROM11_PCIEINGRESS_WAR, 0xf},
+ {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff},
+ {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00},
+ {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00},
+ {"noiselvl5gla0", 0xfffff800, 0, SROM11_NOISELVL_5GL, 0x001f},
+ {"noiselvl5gla1", 0xfffff800, 0, SROM11_NOISELVL_5GL, 0x03e0},
+ {"noiselvl5gla2", 0xfffff800, 0, SROM11_NOISELVL_5GL, 0x7c00},
+ {"noiselvl5gma0", 0xfffff800, 0, SROM11_NOISELVL_5GM, 0x001f},
+ {"noiselvl5gma1", 0xfffff800, 0, SROM11_NOISELVL_5GM, 0x03e0},
+ {"noiselvl5gma2", 0xfffff800, 0, SROM11_NOISELVL_5GM, 0x7c00},
+ {"noiselvl5gha0", 0xfffff800, 0, SROM11_NOISELVL_5GH, 0x001f},
+ {"noiselvl5gha1", 0xfffff800, 0, SROM11_NOISELVL_5GH, 0x03e0},
+ {"noiselvl5gha2", 0xfffff800, 0, SROM11_NOISELVL_5GH, 0x7c00},
+ {"noiselvl5gua0", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f},
+ {"noiselvl5gua1", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5gua2", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00},
+ {"rxgainerr2g", 0xfffff800, SRFL_PRHEX, SROM11_RXGAINERR_2G, 0xffff},
+ {"rxgainerr5g", 0xfffff800, SRFL_PRHEX|SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX|SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX|SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_RXGAINERR_5GU, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t perpath_pci_sromvars[] = {
+ {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
+ {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
+ {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
+ {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
+ {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
+ {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
+ {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff},
+ {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff},
+ {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff},
+ {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff},
+ {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
+ {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
+ {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
+ {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
+ {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff},
+ {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff},
+
+ /* sromrev 11 */
+ {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff},
+ {"pa2ga", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff},
+ {"rxgains2gelnagaina", 0xfffff800, 0, SROM11_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0xfffff800, 0, SROM11_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0xfffff800, 0, SROM11_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0xfffff800, 0, SROM11_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0xfffff800, 0, SROM11_RXGAINS, 0x8000},
+ {"maxp5ga", 0xfffff800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff},
+ {"", 0xfffff800, 0, SROM11_5GB3B2_MAXP, 0xff00},
+ {"pa5ga", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff},
+
+ {NULL, 0, 0, 0, 0}
+};
+
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP))
+#define PHY_TYPE_HT 7 /* HT-Phy value */
+#define PHY_TYPE_N 4 /* N-Phy value */
+#define PHY_TYPE_LP 5 /* LP-Phy value */
+#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N) && defined(PHY_TYPE_LP)) */
+#if !defined(PHY_TYPE_AC)
+#define PHY_TYPE_AC 11 /* AC-Phy value */
+#endif /* !defined(PHY_TYPE_AC) */
+#if !defined(PHY_TYPE_NULL)
+#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
+#endif /* !defined(PHY_TYPE_NULL) */
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ uint16 chain;
+ const char *vars;
+} pavars_t;
+
+static const pavars_t pavars[] = {
+ /* HTPHY */
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ /* LPPHY */
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_2G, 0, "pa0b0 pa0b1 pa0b2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GL, 0, "pa1lob0 pa1lob1 pa1lob2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GM, 0, "pa1b0 pa1b1 pa1b2"},
+ {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GH, 0, "pa1hib0 pa1hib1 pa1hib2"},
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ const char *vars;
+} povars_t;
+
+static const povars_t povars[] = {
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
+ "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
+ "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
+ "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
+ "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
+ {PHY_TYPE_NULL, 0, ""}
+};
+
+typedef struct {
+ uint8 tag; /* Broadcom subtag name */
+ uint32 revmask; /* Supported cis_sromrev */
+ uint8 len; /* Length field of the tuple, note that it includes the
+ * subtag name (1 byte): 1 + tuple content length
+ */
+ const char *params;
+} cis_tuple_t;
+
+#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */
+#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */
+#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */
+#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */
+
+static const cis_tuple_t cis_hnbuvars[] = {
+ {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */
+ {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */
+ {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */
+ /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+ {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"},
+ {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"},
+ /* NOTE: subdevid is also written to boardtype.
+ * Need to write HNBU_BOARDTYPE to change it if it is different.
+ */
+ {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+ {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"},
+ {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+ {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"},
+ {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+ {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+ {HNBU_BOARDFLAGS, 0xffffffff, 13, "4boardflags 4boardflags2 4boardflags3"},
+ {HNBU_LEDS, 0xffffffff, 5, "1ledbh0 1ledbh1 1ledbh2 1ledbh3"},
+ {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"},
+ {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"},
+ {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"},
+ {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+ "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+ "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+ {HNBU_RDLID, 0xffffffff, 3, "2rdlid"},
+ {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+ "0rssisav2g 0bxa2g"}, /* special case */
+ {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+ "0rssisav5g 0bxa5g"}, /* special case */
+ {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"},
+ {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"},
+ {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+ {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"},
+ {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"},
+ {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"},
+ {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */
+ {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"},
+ {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"},
+ {HNBU_LEDDC, 0xffffffff, 3, "2leddc"},
+ {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"},
+ {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+ {HNBU_REGREV, 0xffffffff, 2, "1regrev"},
+ {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+ "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+ {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+ "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+ "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+ {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+ "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+ "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+ {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+ "4ofdm5ghpo"},
+ {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+ "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+ {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+ "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+ {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+ "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+ "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+ "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+ {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"},
+ {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"},
+ {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"},
+ {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"},
+ {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"},
+ {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"},
+ {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"},
+ {HNBU_USBFS, 0xffffffff, 2, "1usbfs"},
+ {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"},
+ {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"},
+ {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"},
+ {OTP_RAW, 0xffffffff, 0, ""}, /* special case */
+ {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+ {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"},
+ {HNBU_CCKBW202GPO, 0xffffffff, 5, "2cckbw202gpo 2cckbw20ul2gpo"},
+ {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gp"},
+ {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+ "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+ {HNBU_MCS2GPO, 0xffffffff, 13, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo"},
+ {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+ {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+ {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+ {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"},
+ {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"},
+ {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis "
+ "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+ "1phycal_tempdelta"}, /* special case */
+ {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"},
+ {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+ "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+ "0tssiposslope5g"}, /* special case */
+ {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
+ "1*4maxp5ga0 2*12pa5ga0"},
+ {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+ {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+ {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"},
+ {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+ "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+ {HNBU_ACPPR_2GPO, 0xfffff800, 5, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo"},
+ {HNBU_ACPPR_5GPO, 0xfffff800, 31, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+ "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5rlpo 2mcslr5gmpo 2mcslr5ghpo"},
+ {HNBU_ACPPR_SBPO, 0xfffff800, 33, "2sb20in40hrrpo 2sb20in80and160hr5glpo "
+ "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+ "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+ "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+ "2dot11agduphrpo 2dot11agduplrpo"},
+ {HNBU_NOISELVL, 0xfffff800, 11, "2noiselvl2g 2noiselvl5gl 2noiselvl5gm "
+ "2noiselvl5gh 2noiselvl5gu"},
+ {HNBU_RXGAIN_ERR, 0xfffff800, 11, "2rxgainerr2g 2*4rxgainerr5g"},
+ {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+ {HNBU_UUID, 0xffffffff, 17, "16uuid"},
+ {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"},
+ {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+ "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+ "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+ "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */
+ {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+ "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+ "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+ "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */
+ {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+ "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+ "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+ "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */
+ {0xFF, 0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h
new file mode 100644
index 00000000000000..0225124d75e253
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h
@@ -0,0 +1,775 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h 315959 2012-02-20 18:04:48Z $
+ */
+
+#ifndef _bcmutils_h_
+#define _bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef PKTQ_LOG
+#include <wlioctl.h>
+#endif
+
+
+#define _BCM_U 0x01
+#define _BCM_L 0x02
+#define _BCM_D 0x04
+#define _BCM_C 0x08
+#define _BCM_P 0x10
+#define _BCM_S 0x20
+#define _BCM_X 0x40
+#define _BCM_SP 0x80
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+
+
+struct bcmstrbuf {
+ char *buf;
+ unsigned int size;
+ char *origbuf;
+ unsigned int origsize;
+};
+
+
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 0x20
+
+
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ OSL_DELAY(10); \
+ countdown -= 10; \
+ } \
+}
+
+
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16
+#endif
+
+typedef struct pktq_prec {
+ void *head;
+ void *tail;
+ uint16 len;
+ uint16 max;
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+ uint32 requested;
+ uint32 stored;
+ uint32 saved;
+ uint32 selfsaved;
+ uint32 full_dropped;
+ uint32 dropped;
+ uint32 sacrificed;
+ uint32 busy;
+ uint32 retry;
+ uint32 ps_retry;
+ uint32 retry_drop;
+ uint32 max_avail;
+ uint32 max_used;
+ uint32 queue_capacity;
+} pktq_counters_t;
+#endif
+
+
+#define PKTQ_COMMON \
+ uint16 num_prec; \
+ uint16 hi_prec; \
+ uint16 max; \
+ uint16 len;
+
+
+struct pktq {
+ PKTQ_COMMON
+
+ struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+ pktq_counters_t _prec_cnt[PKTQ_MAX_PREC];
+#endif
+};
+
+
+struct spktq {
+ PKTQ_COMMON
+
+ struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool) ((pool) && (pool)->inited)
+#if defined(BCM4329C0)
+#define SHARED_POOL (pktpool_shared_ptr)
+#else
+#define SHARED_POOL (pktpool_shared)
+#endif
+#else
+#define POOL_ENAB(bus) 0
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX 40
+#endif
+#define PKTPOOL_CB_MAX 3
+
+struct pktpool;
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+ pktpool_cb_t cb;
+ void *arg;
+} pktpool_cbinfo_t;
+
+#ifdef BCMDBG_POOL
+
+#define POOL_IDLE 0
+#define POOL_RXFILL 1
+#define POOL_RXDH 2
+#define POOL_RXD11 3
+#define POOL_TXDH 4
+#define POOL_TXD11 5
+#define POOL_AMPDU 6
+#define POOL_TXENQ 7
+
+typedef struct {
+ void *p;
+ uint32 cycles;
+ uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+ uint8 txdh;
+ uint8 txd11;
+ uint8 enq;
+ uint8 rxdh;
+ uint8 rxd11;
+ uint8 rxfill;
+ uint8 idle;
+} pktpool_stats_t;
+#endif
+
+typedef struct pktpool {
+ bool inited;
+ uint16 r;
+ uint16 w;
+ uint16 len;
+ uint16 maxlen;
+ uint16 plen;
+ bool istx;
+ bool empty;
+ uint8 cbtoggle;
+ uint8 cbcnt;
+ uint8 ecbcnt;
+ bool emptycb_disable;
+ pktpool_cbinfo_t *availcb_excl;
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+ void *q[PKTPOOL_LEN_MAX + 1];
+
+#ifdef BCMDBG_POOL
+ uint8 dbg_cbcnt;
+ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+ uint16 dbg_qlen;
+ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+#if defined(BCM4329C0)
+extern pktpool_t *pktpool_shared_ptr;
+#else
+extern pktpool_t *pktpool_shared;
+#endif
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern uint16 pktpool_avail(pktpool_t *pktp);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+
+#define POOLPTR(pp) ((pktpool_t *)(pp))
+#define pktpool_len(pp) (POOLPTR(pp)->len - 1)
+#define pktpool_plen(pp) (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif
+
+
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+ ifpkt_cb_t fn, int arg);
+
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+
+#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+
+
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktoffset(osl_t *osh, void *p, uint offset);
+
+
+#define PKTPRIO_VDSCP 0x100
+#define PKTPRIO_VLAN 0x200
+#define PKTPRIO_UPD 0x400
+#define PKTPRIO_DSCP 0x800
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+
+
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+
+
+extern void bcm_mdelay(uint ms);
+
+#define NVRAM_RECLAIM_CHECK(name)
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define bcmlog(fmt, a1, a2)
+#define bcmdumplog(buf, size) *buf = '\0'
+#define bcmdumplogent(buf, idx) -1
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(buf, size)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+
+
+
+typedef struct bcm_iovar {
+ const char *name;
+ uint16 varid;
+ uint16 flags;
+ uint16 type;
+ uint16 minlen;
+} bcm_iovar_t;
+
+
+
+
+#define IOV_GET 0
+#define IOV_SET 1
+
+
+#define IOV_GVAL(id) ((id) * 2)
+#define IOV_SVAL(id) ((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid) (actionid >> 1)
+
+
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif
+#endif
+
+
+#define IOVT_VOID 0
+#define IOVT_BOOL 1
+#define IOVT_INT8 2
+#define IOVT_UINT8 3
+#define IOVT_INT16 4
+#define IOVT_UINT16 5
+#define IOVT_INT32 6
+#define IOVT_UINT32 7
+#define IOVT_BUFFER 8
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+
+#define BCM_IOV_TYPE_INIT { \
+ "void", \
+ "bool", \
+ "int8", \
+ "uint8", \
+ "int16", \
+ "uint16", \
+ "int32", \
+ "uint32", \
+ "buffer", \
+ "" }
+
+#define BCM_IOVT_IS_INT(type) (\
+ (type == IOVT_BOOL) || \
+ (type == IOVT_INT8) || \
+ (type == IOVT_UINT8) || \
+ (type == IOVT_INT16) || \
+ (type == IOVT_UINT16) || \
+ (type == IOVT_INT32) || \
+ (type == IOVT_UINT32))
+
+
+
+#define BCME_STRLEN 64
+#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
+
+
+
+
+#define BCME_OK 0
+#define BCME_ERROR -1
+#define BCME_BADARG -2
+#define BCME_BADOPTION -3
+#define BCME_NOTUP -4
+#define BCME_NOTDOWN -5
+#define BCME_NOTAP -6
+#define BCME_NOTSTA -7
+#define BCME_BADKEYIDX -8
+#define BCME_RADIOOFF -9
+#define BCME_NOTBANDLOCKED -10
+#define BCME_NOCLK -11
+#define BCME_BADRATESET -12
+#define BCME_BADBAND -13
+#define BCME_BUFTOOSHORT -14
+#define BCME_BUFTOOLONG -15
+#define BCME_BUSY -16
+#define BCME_NOTASSOCIATED -17
+#define BCME_BADSSIDLEN -18
+#define BCME_OUTOFRANGECHAN -19
+#define BCME_BADCHAN -20
+#define BCME_BADADDR -21
+#define BCME_NORESOURCE -22
+#define BCME_UNSUPPORTED -23
+#define BCME_BADLEN -24
+#define BCME_NOTREADY -25
+#define BCME_EPERM -26
+#define BCME_NOMEM -27
+#define BCME_ASSOCIATED -28
+#define BCME_RANGE -29
+#define BCME_NOTFOUND -30
+#define BCME_WME_NOT_ENABLED -31
+#define BCME_TSPEC_NOTFOUND -32
+#define BCME_ACM_NOTSUPPORTED -33
+#define BCME_NOT_WME_ASSOCIATION -34
+#define BCME_SDIO_ERROR -35
+#define BCME_DONGLE_DOWN -36
+#define BCME_VERSION -37
+#define BCME_TXFAIL -38
+#define BCME_RXFAIL -39
+#define BCME_NODEVICE -40
+#define BCME_NMODE_DISABLED -41
+#define BCME_NONRESIDENT -42
+#define BCME_LAST BCME_NONRESIDENT
+
+
+#define BCMERRSTRINGTABLE { \
+ "OK", \
+ "Undefined error", \
+ "Bad Argument", \
+ "Bad Option", \
+ "Not up", \
+ "Not down", \
+ "Not AP", \
+ "Not STA", \
+ "Bad Key Index", \
+ "Radio Off", \
+ "Not band locked", \
+ "No clock", \
+ "Bad Rate valueset", \
+ "Bad Band", \
+ "Buffer too short", \
+ "Buffer too long", \
+ "Busy", \
+ "Not Associated", \
+ "Bad SSID len", \
+ "Out of Range Channel", \
+ "Bad Channel", \
+ "Bad Address", \
+ "Not Enough Resources", \
+ "Unsupported", \
+ "Bad length", \
+ "Not Ready", \
+ "Not Permitted", \
+ "No Memory", \
+ "Associated", \
+ "Not In Range", \
+ "Not Found", \
+ "WME Not Enabled", \
+ "TSPEC Not Found", \
+ "ACM Not Supported", \
+ "Not WME Association", \
+ "SDIO Bus Error", \
+ "Dongle Not Accessible", \
+ "Incorrect version", \
+ "TX Failure", \
+ "RX Failure", \
+ "Device Not Present", \
+ "NMODE Disabled", \
+ "Nonresident overlay access", \
+}
+
+#ifndef ABS
+#define ABS(a) (((a) < 0) ? -(a) : (a))
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+ & ~((boundary) - 1))
+#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask) !((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+
+#include <stddef.h>
+#define OFFSETOF(type, member) offsetof(type, member)
+#else
+#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+#endif
+#endif
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
+#endif
+
+
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f))
+
+
+#ifndef setbit
+#ifndef NBBY
+#define NBBY 8
+#endif
+#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+
+#define CRC8_INIT_VALUE 0xff
+#define CRC8_GOOD_VALUE 0x9f
+#define CRC16_INIT_VALUE 0xffff
+#define CRC16_GOOD_VALUE 0xf0b8
+#define CRC32_INIT_VALUE 0xffffffff
+#define CRC32_GOOD_VALUE 0xdebb20e3
+
+
+#define MACF "%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \
+ ((struct ether_addr *) (ea))->octet[1], \
+ ((struct ether_addr *) (ea))->octet[2], \
+ ((struct ether_addr *) (ea))->octet[3], \
+ ((struct ether_addr *) (ea))->octet[4], \
+ ((struct ether_addr *) (ea))->octet[5]
+
+#define ETHER_TO_MACF(ea) (ea).octet[0], \
+ (ea).octet[1], \
+ (ea).octet[2], \
+ (ea).octet[3], \
+ (ea).octet[4], \
+ (ea).octet[5]
+
+
+typedef struct bcm_bit_desc {
+ uint32 bit;
+ const char* name;
+} bcm_bit_desc_t;
+
+
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+
+#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
+
+
+#define ETHER_ADDR_STR_LEN 18
+
+
+
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+ if (
+#ifdef __i386__
+ 1 ||
+#endif
+ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+
+
+ ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+ ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+ ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+ ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+ } else {
+
+ int k;
+ for (k = 0; k < 16; k++)
+ dst[k] = src1[k] ^ src2[k];
+ }
+}
+
+
+
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC)
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+ defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+
+extern const char *bcmerrorstr(int bcmerror);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+
+
+typedef uint32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit))
+#define mboolclr(mb, bit) ((mb) &= ~(bit))
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+
+struct fielddesc {
+ const char *nameandfmt;
+ uint32 offset;
+ uint32 len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+ char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h
new file mode 100644
index 00000000000000..c5ea223ec8c89c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dbus.h
@@ -0,0 +1,571 @@
+/*
+ * Dongle BUS interface Abstraction layer
+ * target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dbus.h 323680 2012-03-26 17:52:31Z $
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+
+#define DBUSTRACE(args)
+#define DBUSERR(args)
+#define DBUSINFO(args)
+#define DBUSTRACE(args)
+#define DBUSDBGLOCK(args)
+
+enum {
+ DBUS_OK = 0,
+ DBUS_ERR = -200,
+ DBUS_ERR_TIMEOUT,
+ DBUS_ERR_DISCONNECT,
+ DBUS_ERR_NODEVICE,
+ DBUS_ERR_UNSUPPORTED,
+ DBUS_ERR_PENDING,
+ DBUS_ERR_NOMEM,
+ DBUS_ERR_TXFAIL,
+ DBUS_ERR_TXTIMEOUT,
+ DBUS_ERR_TXDROP,
+ DBUS_ERR_RXFAIL,
+ DBUS_ERR_RXDROP,
+ DBUS_ERR_TXCTLFAIL,
+ DBUS_ERR_RXCTLFAIL,
+ DBUS_ERR_REG_PARAM,
+ DBUS_STATUS_CANCELLED,
+ DBUS_ERR_NVRAM,
+ DBUS_JUMBO_NOMATCH,
+ DBUS_JUMBO_BAD_FORMAT,
+ DBUS_NVRAM_NONTXT
+};
+
+#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */
+#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */
+#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */
+
+#define ERR_CBMASK_TXFAIL 0x00000001
+#define ERR_CBMASK_RXFAIL 0x00000002
+#define ERR_CBMASK_ALL 0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE 0
+#define DBUS_CBCTL_READ 1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL 2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */
+
+#define DBUS_BUFFER_SIZE_TX 16000
+#define DBUS_BUFFER_SIZE_RX 5000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG 2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG 2048
+
+/* DBUS types */
+enum {
+ DBUS_USB,
+ DBUS_SDIO,
+ DBUS_SPI,
+ DBUS_UNKNOWN
+};
+
+enum dbus_state {
+ DBUS_STATE_DL_PENDING,
+ DBUS_STATE_DL_DONE,
+ DBUS_STATE_UP,
+ DBUS_STATE_DOWN,
+ DBUS_STATE_PNP_FWDL,
+ DBUS_STATE_DISCONNECT,
+ DBUS_STATE_SLEEP
+};
+
+enum dbus_pnp_state {
+ DBUS_PNP_DISCONNECT,
+ DBUS_PNP_SLEEP,
+ DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+ DBUS_FIRMWARE,
+ DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+ INVALID_SPEED = -1,
+ LOW_SPEED = 1, /* USB 1.1: 1.5 Mbps */
+ FULL_SPEED, /* USB 1.1: 12 Mbps */
+ HIGH_SPEED, /* USB 2.0: 480 Mbps */
+ SUPER_SPEED, /* USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+ int bustype;
+ int vid;
+ int pid;
+ int devid;
+ int chiprev; /* chip revsion number */
+ int mtu;
+ int nchan; /* Data Channels */
+ int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+ uint32 rx_errors;
+ uint32 tx_errors;
+ uint32 rx_dropped;
+ uint32 tx_dropped;
+} dbus_stats_t;
+
+/*
+ * Configurable BUS parameters
+ */
+typedef struct {
+ bool rxctl_deferrespok;
+} dbus_config_t;
+
+/*
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+ uint8 *fw;
+ int fwlen;
+ uint8 *vars;
+ int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/* Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+ void (*send_complete)(void *cbarg, void *info, int status);
+ void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+ void (*recv_pkt)(void *cbarg, void *pkt);
+ void (*txflowcontrol)(void *cbarg, bool onoff);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+ void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+ void (*detach)(struct dbus_pub *pub, void *bus);
+
+ int (*up)(void *bus);
+ int (*down)(void *bus);
+ int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+ int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*send_ctl)(void *bus, uint8 *buf, int len);
+ int (*recv_ctl)(void *bus, uint8 *buf, int len);
+ int (*get_stats)(void *bus, dbus_stats_t *stats);
+ int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+ int (*pnp)(void *bus, int evnt);
+ int (*remove)(void *bus);
+ int (*resume)(void *bus);
+ int (*suspend)(void *bus);
+ int (*stop)(void *bus);
+ int (*reset)(void *bus);
+
+ /* Access to bus buffers directly */
+ void *(*pktget)(void *bus, int len);
+ void (*pktfree)(void *bus, void *pkt);
+
+ int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+ bool set);
+ void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+ int (*set_config)(void *bus, dbus_config_t *config);
+ int (*get_config)(void *bus, dbus_config_t *config);
+
+ bool (*device_exists)(void *bus);
+ bool (*dlneeded)(void *bus);
+ int (*dlstart)(void *bus, uint8 *fw, int len);
+ int (*dlrun)(void *bus);
+ bool (*recv_needed)(void *bus);
+
+ void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+ void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+ int (*tx_timer_init)(void *bus);
+ int (*tx_timer_start)(void *bus, uint timeout);
+ int (*tx_timer_stop)(void *bus);
+
+ int (*sched_dpc)(void *bus);
+ int (*lock)(void *bus);
+ int (*unlock)(void *bus);
+ int (*sched_probe_cb)(void *bus);
+
+ int (*shutdown)(void *bus);
+
+ int (*recv_stop)(void *bus);
+ int (*recv_resume)(void *bus);
+
+ int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+ int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+ /* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+ struct osl_info *osh;
+ dbus_stats_t stats;
+ dbus_attrib_t attrib;
+ enum dbus_state busstate;
+ DEVICE_SPEED device_speed;
+ int ntxq, nrxq, rxsize;
+ void *bus;
+ struct shared_info *sh;
+ void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define ALIGNED_LOCAL_VARIABLE(var, align) \
+ uint8 buffer[SDALIGN+64]; \
+ uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ * maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ * For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ void *param1, void *param2);
+extern int dbus_deregister(void);
+
+extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq,
+ void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh);
+extern void dbus_detach(dbus_pub_t *pub);
+
+extern int dbus_up(dbus_pub_t *pub);
+extern int dbus_down(dbus_pub_t *pub);
+extern int dbus_stop(dbus_pub_t *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+extern int dbus_iovar_op(dbus_pub_t *pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/* IO Request Block (IRB) */
+typedef struct dbus_irb {
+ struct dbus_irb *next; /* it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int buf_len;
+ int actual_len;
+ void *pkt;
+ void *info;
+ void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int len;
+ void *pkt;
+ int retry_count;
+ void *info;
+ void *arg;
+ void *send_buf; /* linear bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/* DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+ void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+ void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+ void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ bool (*isr)(void *cbarg, bool *wantdpc);
+ bool (*dpc)(void *cbarg, bool bounded);
+ void (*watchdog)(void *cbarg);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+ struct dbus_irb* (*getirb)(void *cbarg, bool send);
+ void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, uint8 **fw, int *fwlen, int type,
+ uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ /* Backward compatibility */
+ typedef unsigned int gfp_t;
+
+ #define dma_pool pci_pool
+ #define dma_pool_create(name, dev, size, align, alloc) \
+ pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+ #define dma_pool_destroy(pool) pci_pool_destroy(pool)
+ #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+ #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+ #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir)
+ #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir)
+ #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+ #define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+ #define hc32_to_cpu(x) le32_to_cpu(x)
+ #define cpu_to_hc32(x) cpu_to_le32(x)
+ typedef unsigned int __hc32;
+#else
+ #error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH 0x31
+#define EHCI_SET_EP_BYPASS EHCI_FASTPATH
+#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1)
+#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2)
+#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3)
+#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4)
+#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS 5
+#define EHCI_QTD_ALIGN 32
+#define EHCI_BULK_PACKET_SIZE 512
+#define EHCI_QTD_XACTERR_MAX 32
+
+struct ehci_qtd {
+ /* Hardware map */
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff)
+#define EHCI_QTD_IOC 0x00008000
+#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x) ((x) << 10)
+#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x) ((x) << 8)
+#define EHCI_QTD_ACTIVE 0x80
+#define EHCI_QTD_HALTED 0x40
+#define EHCI_QTD_BUFERR 0x20
+#define EHCI_QTD_BABBLE 0x10
+#define EHCI_QTD_XACTERR 0x08
+#define EHCI_QTD_MISSEDMICRO 0x04
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+ /* Implementation extension */
+ dma_addr_t qtd_self; /* own hardware address */
+ struct ehci_qtd *obj_next; /* software link to the next QTD */
+ void *rpc; /* pointer to the rpc buffer */
+ size_t length; /* length of the data in the buffer */
+ void *buff; /* pointer to the reassembly buffer */
+ int xacterrs; /* retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/* Queue Head */
+/* NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible
+ */
+struct ehci_qh {
+ /* Hardware map */
+ volatile uint32_t qh_link;
+ volatile uint32_t qh_endp;
+ volatile uint32_t qh_endphub;
+ volatile uint32_t qh_curqtd;
+
+ /* QTD overlay */
+ volatile uint32_t ow_next;
+ volatile uint32_t ow_altnext;
+ volatile uint32_t ow_status;
+ volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS];
+ volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+ /* Extension (should match the kernel layout) */
+ dma_addr_t unused0;
+ void *unused1;
+ struct list_head unused2;
+ struct ehci_qtd *dummy;
+ struct ehci_qh *unused3;
+
+ struct ehci_hcd *unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct kref unused5;
+ unsigned unused6;
+
+ uint8_t unused7;
+
+ /* periodic schedule info */
+ uint8_t unused8;
+ uint8_t unused9;
+ uint8_t unused10;
+ uint16_t unused11;
+ uint16_t unused12;
+ uint16_t unused13;
+ struct usb_device *unused14;
+#else
+ unsigned unused5;
+
+ u8 unused6;
+
+ /* periodic schedule info */
+ u8 unused7;
+ u8 unused8;
+ u8 unused9;
+ unsigned short unused10;
+ unsigned short unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+ struct usb_device *unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+ struct ehci_qtd *first_qtd;
+ /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+ /* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev { /* usb_device.hcpriv points to this */
+ struct list_head unused0;
+ struct list_head unused1;
+
+ /* array of QH pointers */
+ void *ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc,
+ int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+ int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
new file mode 100644
index 00000000000000..21a5ecb86da15c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h
@@ -0,0 +1,135 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h 326276 2012-04-06 23:16:42Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define _dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ bool set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+ uint driver; /* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+ BUS_TYPE_USB = 0, /* for USB dongles */
+ BUS_TYPE_SDIO /* for SDIO dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION 1
+
+#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC 0
+#define DHD_GET_VERSION 1
+#define DHD_GET_VAR 2
+#define DHD_SET_VAR 3
+
+/* message levels */
+#define DHD_ERROR_VAL 0x0001
+#define DHD_TRACE_VAL 0x0002
+#define DHD_INFO_VAL 0x0004
+#define DHD_DATA_VAL 0x0008
+#define DHD_CTL_VAL 0x0010
+#define DHD_TIMER_VAL 0x0020
+#define DHD_HDRS_VAL 0x0040
+#define DHD_BYTES_VAL 0x0080
+#define DHD_INTR_VAL 0x0100
+#define DHD_LOG_VAL 0x0200
+#define DHD_GLOM_VAL 0x0400
+#define DHD_EVENT_VAL 0x0800
+#define DHD_BTA_VAL 0x1000
+#if defined(NDISVER) && (NDISVER >= 0x0630) && 1
+#define DHD_SCAN_VAL 0x2000
+#else
+#define DHD_ISCAN_VAL 0x2000
+#endif
+#define DHD_ARPOE_VAL 0x4000
+#define DHD_REORDER_VAL 0x8000
+#define DHD_WL_VAL 0x10000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+ uint version; /* To allow structure change tracking */
+ uint freq; /* Max ticks between tx/rx attempts */
+ uint count; /* Test packets to send/rcv each attempt */
+ uint print; /* Print counts every <print> attempts */
+ uint total; /* Total packets (or bursts) */
+ uint minlen; /* Minimum length of packets to send */
+ uint maxlen; /* Maximum length of packets to send */
+ uint numsent; /* Count of test packets sent */
+ uint numrcvd; /* Count of test packets received */
+ uint numfail; /* Count of test send failures */
+ uint mode; /* Test mode (type of test packets) */
+ uint stop; /* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE (-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h
new file mode 100644
index 00000000000000..8141510e78ac9a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define EPI_MAJOR_VERSION 1
+
+#define EPI_MINOR_VERSION 26
+
+#define EPI_RC_NUMBER 0
+
+#define EPI_INCREMENTAL_NUMBER 0
+
+#define EPI_BUILD_NUMBER 0
+
+#define EPI_VERSION 1, 26, 0, 0
+
+#define EPI_VERSION_NUM 0x011a0000
+
+#define EPI_VERSION_DEV 1.26.0
+
+
+#define EPI_VERSION_STR "1.26 (r327295)"
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h
new file mode 100644
index 00000000000000..c41def6c7d8abf
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h
@@ -0,0 +1,36 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
new file mode 100644
index 00000000000000..90d979929381b7
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h 261365 2011-05-24 20:42:23Z $
+ */
+
+#ifndef _hndrte_armtrap_h
+#define _hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0; /* a1 */
+ uint32 r1; /* a2 */
+ uint32 r2; /* a3 */
+ uint32 r3; /* a4 */
+ uint32 r4; /* v1 */
+ uint32 r5; /* v2 */
+ uint32 r6; /* v3 */
+ uint32 r7; /* v4 */
+ uint32 r8; /* v5 */
+ uint32 r9; /* sb/v6 */
+ uint32 r10; /* sl/v7 */
+ uint32 r11; /* fp/v8 */
+ uint32 r12; /* ip */
+ uint32 r13; /* sp */
+ uint32 r14; /* lr */
+ uint32 pc; /* r15 */
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hndrte_armtrap_h */
diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_cons.h b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
new file mode 100644
index 00000000000000..57abbbd5b67bde
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h
@@ -0,0 +1,67 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h 300516 2011-12-04 17:39:44Z $
+ */
+#ifndef _HNDRTE_CONS_H
+#define _HNDRTE_CONS_H
+
+#include <typedefs.h>
+
+#define CBUF_LEN (128)
+
+#define LOG_BUF_LEN 1024
+
+typedef struct {
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ char *_buf_compat; /* redundant pointer for backward compat. */
+} hndrte_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hndrte_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hndrte_cons_t;
+
+#endif /* _HNDRTE_CONS_H */
diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h
new file mode 100644
index 00000000000000..66640c3b0cbd57
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h
@@ -0,0 +1,235 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _HNDSOC_H
+#define _HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
+#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
+#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+
+#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
+#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+
+#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
+#define SI_FASTRAM_SWAPPED 0x19800000
+
+#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
+#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
+#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */
+#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
+#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
+#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */
+#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */
+#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */
+#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
+#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
+#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
+#define EROM_CORE_ID 0x366 /* EROM core ID */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+#define CC_4706_CORE_ID 0x500 /* chipcommon core */
+#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */
+#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */
+#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */
+#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */
+#define ALTA_CORE_ID 0x534 /* I2S core */
+#define DDR23_PHY_CORE_ID 0x5dd
+
+#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */
+#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define SI_CC_IDX 0
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+#define SOCI_UBUS 2
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+
+/* clk_ctl_st register */
+#define CCS_FORCEALP 0x00000001 /* force ALP request */
+#define CCS_FORCEHT 0x00000002 /* force HT request */
+#define CCS_FORCEILP 0x00000004 /* force ILP request */
+#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
+#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
+#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
+#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */
+#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
+#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT 8
+#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
+#define CCS_HTAVAIL 0x00020000 /* HT is available */
+#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
+#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
+#define CCS_ERSRC_STS_SHIFT 24
+
+#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size */
+#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
+#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
+#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
+#define BISZ_TXTST_IDX 1 /* 1: text start */
+#define BISZ_TXTEND_IDX 2 /* 2: text end */
+#define BISZ_DATAST_IDX 3 /* 3: data start */
+#define BISZ_DATAEND_IDX 4 /* 4: data end */
+#define BISZ_BSSST_IDX 5 /* 5: bss start */
+#define BISZ_BSSEND_IDX 6 /* 6: bss end */
+#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h
new file mode 100644
index 00000000000000..257aaf642f8a18
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h
@@ -0,0 +1,422 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h 326751 2012-04-10 20:13:19Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+
+
+#ifdef BCMDRIVER
+
+
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+
+extern uint32 g_assert_type;
+
+
+#if defined(BCMASSERT_LOG)
+ #define ASSERT(exp) \
+ do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+ #ifdef __GNUC__
+ #define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+ #if GCC_VERSION > 30100
+ #define ASSERT(exp) do {} while (0)
+ #else
+
+ #define ASSERT(exp)
+ #endif
+ #endif
+#endif
+
+
+#define OSL_DELAY(usec) osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+ osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+
+#define OSL_PCI_READ_CONFIG(osh, offset, size) \
+ osl_pci_read_config((osh), (offset), (size))
+#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+ osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+
+#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+
+typedef struct {
+ bool pkttag;
+ uint pktalloced;
+ bool mmbus;
+ pktfree_cb_fn_t tx_fn;
+ void *tx_ctx;
+ void *unused[3];
+} osl_pubinfo_t;
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
+ do { \
+ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
+ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
+ } while (0)
+
+
+
+#define BUS_SWAP32(v) (v)
+
+ #define MALLOC(osh, size) osl_malloc((osh), (size))
+ #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
+ #define MALLOCED(osh) osl_malloced((osh))
+ extern void *osl_malloc(osl_t *osh, uint size);
+ extern void osl_mfree(osl_t *osh, void *addr, uint size);
+ extern uint osl_malloced(osl_t *osh);
+
+#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC)
+#define NATIVE_MFREE(osh, addr, size) kfree(addr)
+
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+
+#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
+#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+
+
+#define DMA_TX 1
+#define DMA_RX 2
+
+
+#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+
+
+ #include <bcmsdh.h>
+ #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
+ #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+ mmap_op else bus_op
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+ mmap_op : bus_op
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+
+#define PKTBUFSZ 2048
+
+
+#include <linuxver.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
+#define printf(fmt, args...) printk(fmt , ## args)
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ BCM_REFERENCE(osh); \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+
+#define W_REG(osh, r, v) do { \
+ BCM_REFERENCE(osh); \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+
+
+#define bcopy(src, dst, len) memcpy((dst), (src), (len))
+#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
+#define bzero(b, len) memset((b), '\0', (len))
+
+
+#define OSL_UNCACHED(va) ((void *)va)
+#define OSL_CACHED(va) ((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz)
+#define OSL_PREF_RANGE_ST(va, sz)
+
+
+#if defined(__i386__)
+#define OSL_GETCYCLES(x) rdtscl((x))
+#else
+#define OSL_GETCYCLES(x) ((x) = 0)
+#endif
+
+
+#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
+
+
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size) (void *)(0)
+#endif
+#define REG_UNMAP(va) iounmap((va))
+
+
+#define R_SM(r) *(r)
+#define W_SM(r, v) (*(r) = (v))
+#define BZERO_SM(r, len) memset((r), '\0', (len))
+
+
+#include <linuxver.h>
+
+
+#define PKTGET(osh, len, send) osl_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#define PKTLIST_DUMP(osh, buf)
+#define PKTDBG_TRACE(osh, pkt, bit)
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#endif
+#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
+#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
+#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
+#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
+#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced
+#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
+#define PKTPOOL(osh, skb) FALSE
+#define PKTSHRINK(osh, m) (m)
+
+#ifdef CTFPOOL
+#define CTFPOOL_REFILL_THRESH 3
+typedef struct ctfpool {
+ void *head;
+ spinlock_t lock;
+ uint max_obj;
+ uint curr_obj;
+ uint obj_size;
+ uint refills;
+ uint fast_allocs;
+ uint fast_frees;
+ uint slow_allocs;
+} ctfpool_t;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define FASTBUF (1 << 16)
+#define CTFBUF (1 << 17)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
+#else
+#define FASTBUF (1 << 0)
+#define CTFBUF (1 << 1)
+#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= FASTBUF)
+#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF))
+#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= CTFBUF)
+#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~CTFBUF))
+#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) & FASTBUF)
+#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) & CTFBUF)
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
+#endif
+
+#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk)
+#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#endif
+
+
+#ifdef HNDCTF
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define SKIPCT (1 << 18)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len & SKIPCT)
+#else
+#define SKIPCT (1 << 2)
+#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused |= SKIPCT)
+#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~SKIPCT))
+#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused & SKIPCT)
+#endif
+#else
+#define PKTSETSKIPCT(osh, skb)
+#define PKTCLRSKIPCT(osh, skb)
+#define PKTSKIPCT(osh, skb)
+#endif
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+#define DMA_MAP(osh, va, size, direction, p, dmah) \
+ osl_dma_map((osh), (va), (size), (direction))
+
+#ifdef PKTC
+
+struct chain_node {
+ struct sk_buff *link;
+ unsigned int flags:3, pkts:9, bytes:20;
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#define CHAIN_NODE(skb) ((struct chain_node*)&(((struct sk_buff*)skb)->tstamp))
+#else
+#define CHAIN_NODE(skb) ((struct chain_node*)&(((struct sk_buff*)skb)->stamp))
+#endif
+
+#define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts)
+#define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes)
+#define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags)
+#define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c) & ((1 << 9) - 1))
+#define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l) & ((1 << 20) - 1))
+#define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb))
+#define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb))
+#define PKTCLINK(skb) (CHAIN_NODE(skb)->link)
+#define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define PKTISCHAINED(skb) (PKTCLINK(skb) != NULL)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for (; (skb) != NULL; (skb) = (nskb)) \
+ if ((nskb) = PKTCLINK(skb), PKTSETCLINK((skb), NULL), 1)
+#define PKTCFREE(osh, skb, send) \
+do { \
+ void *nskb; \
+ ASSERT((skb) != NULL); \
+ FOREACH_CHAINED_PKT((skb), nskb) { \
+ PKTFREE((osh), (skb), (send)); \
+ } \
+} while (0)
+#endif
+
+#else
+
+
+
+ #define ASSERT(exp) do {} while (0)
+
+
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+
+#include <string.h>
+
+
+#include <stdio.h>
+
+
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h
new file mode 100644
index 00000000000000..e59ac886f75f1e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/linuxver.h
@@ -0,0 +1,614 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h 315203 2012-02-16 00:58:00Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#endif
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)
+ } while (0);
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
+#else
+#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+ (RHEL_MAJOR == 5))
+
+typedef void (*work_func_t)(void *work);
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef CONFIG_NET_RADIO
+#define CONFIG_WIRELESS_EXT
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#define __devinit __init
+#endif
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x) x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev) (dev)->sysdata
+#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
+
+
+
+struct pci_device_id {
+ unsigned int vendor, device;
+ unsigned int subvendor, subdevice;
+ unsigned int class, class_mask;
+ unsigned long driver_data;
+};
+
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table;
+ int (*probe)(struct pci_dev *dev,
+ const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *dev);
+ void (*suspend)(struct pci_dev *dev);
+ void (*resume)(struct pci_dev *dev);
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x) __initcall(x);
+#define module_exit(x) __exitcall(x);
+#endif
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+
+
+#ifndef PCI_DMA_TODEVICE
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#endif
+
+typedef u32 dma_addr_t;
+
+
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a) dev_kfree_skb(a)
+#define netif_down(dev) do { (dev)->start = 0; } while (0)
+
+
+#ifndef _COMPAT_NETDEVICE_H
+
+
+
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+}
+
+#define netif_queue_stopped(dev) (dev)->tbusy
+#define netif_running(dev) (dev)->start
+
+#endif
+
+#define netif_device_attach(dev) netif_start_queue(dev)
+#define netif_device_detach(dev) netif_stop_queue(dev)
+
+
+#define tasklet_struct tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+ queue_task(tasklet, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+ void (*func)(unsigned long),
+ unsigned long data)
+{
+ tasklet->next = NULL;
+ tasklet->sync = 0;
+ tasklet->routine = (void (*)(void *))func;
+ tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet) { do {} while (0); }
+
+
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+
+
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+#endif
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define PCI_SAVE_STATE(a, b) pci_save_state(a)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
+#else
+#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
+#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &buffer[i]);
+ }
+ return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+ int i;
+
+ if (buffer) {
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(dev, i * 4, buffer[i]);
+ }
+
+ else {
+ for (i = 0; i < 6; i ++)
+ pci_write_config_dword(dev,
+ PCI_BASE_ADDRESS_0 + (i * 4),
+ pci_resource_start(dev, i));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ }
+ return 0;
+}
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#else
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev) do {} while (0)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev) kfree(dev)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+
+#define af_packet_priv data
+#endif
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+ void *parent;
+ struct task_struct *p_task;
+ long thr_pid;
+ int prio;
+ struct semaphore sema;
+ int terminated;
+ struct completion completed;
+} tsk_ctl_t;
+
+
+
+
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags) \
+{ \
+ sema_init(&((tsk_ctl)->sema), 0); \
+ init_completion(&((tsk_ctl)->completed)); \
+ (tsk_ctl)->parent = owner; \
+ (tsk_ctl)->terminated = FALSE; \
+ (tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \
+ if ((tsk_ctl)->thr_pid > 0) \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+ (tsk_ctl)->terminated = TRUE; \
+ smp_wmb(); \
+ up(&((tsk_ctl)->sema)); \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \
+ (tsk_ctl)->thr_pid = -1; \
+}
+
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid; \
+pid = find_get_pid((pid_t)nr); \
+tsk = pid_task(pid, PIDTYPE_PID); \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+ struct task_struct *tsk; \
+ tsk = find_task_by_vpid(pid); \
+ if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+ kill_proc(pid, sig, 1); \
+}
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret) \
+do { \
+ wait_queue_t __wait; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ ret = schedule_timeout(ret); \
+ if (!ret) \
+ break; \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!(condition)) \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev) (dev->priv)
+#else
+#define DEV_PRIV(dev) netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p) wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h
new file mode 100644
index 00000000000000..c1eca68a602d61
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY 128 /* Max options */
+typedef struct miniopt {
+
+ /* These are persistent after miniopt_init() */
+ const char* name; /* name for prompt in error strings */
+ const char* flags; /* option chars that take no args */
+ bool longflags; /* long options may be flags */
+ bool opt_end; /* at end of options (passed a "--") */
+
+ /* These are per-call to miniopt() */
+
+ int consumed; /* number of argv entries cosumed in
+ * the most recent call to miniopt()
+ */
+ bool positional;
+ bool good_int; /* 'val' member is the result of a sucessful
+ * strtol conversion of the option value
+ */
+ char opt;
+ char key[MINIOPT_MAXKEY];
+ char* valstr; /* positional param, or value for the option,
+ * or null if the option had
+ * no accompanying value
+ */
+ uint uval; /* strtol translation of valstr */
+ int val; /* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* MINI_OPT_H */
diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h
new file mode 100644
index 00000000000000..7c5fd8106f3ff8
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h
@@ -0,0 +1,74 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h 281527 2011-09-02 17:12:53Z $
+ */
+
+#ifndef _MSGTRACE_H
+#define _MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+ uint8 version;
+ uint8 spare;
+ uint16 len; /* Len of the trace */
+ uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost
+ * because of DMA error or a bus reset (ex: SDIO Func2)
+ */
+ uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */
+ uint32 discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+ uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern void msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _MSGTRACE_H */
diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h
new file mode 100644
index 00000000000000..ca171d8acd6931
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/osl.h
@@ -0,0 +1,88 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h 320905 2012-03-13 15:33:25Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ 32
+
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)
+#endif
+
+#define PKTCTFMAP(osh, p)
+
+
+
+#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif
+
+#if !defined(PKTC)
+#define PKTCCNT(skb) (0)
+#define PKTCLEN(skb) (0)
+#define PKTCFLAGS(skb) (0)
+#define PKTCSETCNT(skb, c)
+#define PKTCSETLEN(skb, l)
+#define PKTCSETFLAG(skb, fb)
+#define PKTCCLRFLAG(skb, fb)
+#define PKTCLINK(skb) PKTLINK(skb)
+#define PKTSETCLINK(skb, x) PKTSETLINK((skb), (x))
+#define PKTISCHAINED(skb) FALSE
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define PKTCFREE PKTFREE
+#endif
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
new file mode 100644
index 00000000000000..24ff4672734fad
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h
@@ -0,0 +1,53 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #undef BWL_PACKED_SECTION
+#else
+ #error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+
+#undef BWL_PRE_PACKED_STRUCT
+#undef BWL_POST_PACKED_STRUCT
diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
new file mode 100644
index 00000000000000..7fce0ddfd56e0b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h
@@ -0,0 +1,60 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ * some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h 286783 2011-09-29 06:18:57Z $
+ */
+
+
+
+#ifdef BWL_PACKED_SECTION
+ #error "BWL_PACKED_SECTION is already defined!"
+#else
+ #define BWL_PACKED_SECTION
+#endif
+
+
+
+
+
+#if defined(__GNUC__) || defined(__lint)
+ #define BWL_PRE_PACKED_STRUCT
+ #define BWL_POST_PACKED_STRUCT __attribute__ ((packed))
+#elif defined(__CC_ARM)
+ #define BWL_PRE_PACKED_STRUCT __packed
+ #define BWL_POST_PACKED_STRUCT
+#else
+ #error "Unknown compiler!"
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h
new file mode 100644
index 00000000000000..5f7df6a7a7e693
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h
@@ -0,0 +1,90 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _h_pcicfg_
+#define _h_pcicfg_
+
+
+#define PCI_CFG_VID 0
+#define PCI_CFG_DID 2
+#define PCI_CFG_CMD 4
+#define PCI_CFG_STAT 6
+#define PCI_CFG_REV 8
+#define PCI_CFG_PROGIF 9
+#define PCI_CFG_SUBCL 0xa
+#define PCI_CFG_BASECL 0xb
+#define PCI_CFG_CLSZ 0xc
+#define PCI_CFG_LATTIM 0xd
+#define PCI_CFG_HDR 0xe
+#define PCI_CFG_BIST 0xf
+#define PCI_CFG_BAR0 0x10
+#define PCI_CFG_BAR1 0x14
+#define PCI_CFG_BAR2 0x18
+#define PCI_CFG_BAR3 0x1c
+#define PCI_CFG_BAR4 0x20
+#define PCI_CFG_BAR5 0x24
+#define PCI_CFG_CIS 0x28
+#define PCI_CFG_SVID 0x2c
+#define PCI_CFG_SSID 0x2e
+#define PCI_CFG_ROMBAR 0x30
+#define PCI_CFG_CAPPTR 0x34
+#define PCI_CFG_INT 0x3c
+#define PCI_CFG_PIN 0x3d
+#define PCI_CFG_MINGNT 0x3e
+#define PCI_CFG_MAXLAT 0x3f
+#define PCI_BAR0_WIN 0x80
+#define PCI_BAR1_WIN 0x84
+#define PCI_SPROM_CONTROL 0x88
+#define PCI_BAR1_CONTROL 0x8c
+#define PCI_INT_STATUS 0x90
+#define PCI_INT_MASK 0x94
+#define PCI_TO_SB_MB 0x98
+#define PCI_BACKPLANE_ADDR 0xa0
+#define PCI_BACKPLANE_DATA 0xa4
+#define PCI_CLK_CTL_ST 0xa8
+#define PCI_BAR0_WIN2 0xac
+#define PCI_GPIO_IN 0xb0
+#define PCI_GPIO_OUT 0xb4
+#define PCI_GPIO_OUTEN 0xb8
+
+#define PCI_BAR0_SHADOW_OFFSET (2 * 1024)
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+
+#define PCIE2_BAR0_WIN2 0x70
+#define PCIE2_BAR0_CORE2_WIN 0x74
+#define PCIE2_BAR0_CORE2_WIN2 0x78
+
+#define PCI_BAR0_WINSZ (16 * 1024)
+
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+#define PCI_16KBB0_WINSZ (16 * 1024)
+
+
+#define PCI_CONFIG_SPACE_SIZE 256
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
new file mode 100644
index 00000000000000..84f86671e1602c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h
@@ -0,0 +1,2244 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h 320583 2012-03-12 15:09:36Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US 1024
+
+
+#define DOT11_A3_HDR_LEN 24
+#define DOT11_A4_HDR_LEN 30
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
+#define DOT11_FCS_LEN 4
+#define DOT11_ICV_LEN 4
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_HTC_LEN 4
+
+#define DOT11_KEY_INDEX_SHIFT 6
+#define DOT11_IV_LEN 4
+#define DOT11_IV_TKIP_LEN 8
+#define DOT11_IV_AES_OCB_LEN 4
+#define DOT11_IV_AES_CCM_LEN 8
+#define DOT11_IV_MAX_LEN 8
+
+
+#define DOT11_MAX_MPDU_BODY_LEN 2304
+
+#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ DOT11_IV_AES_CCM_LEN + \
+ DOT11_MAX_MPDU_BODY_LEN + \
+ DOT11_ICV_LEN + \
+ DOT11_FCS_LEN)
+
+#define DOT11_MAX_SSID_LEN 32
+
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_MAX_RTS_LEN 2347
+
+
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_MAX_FRAG_LEN 2346
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+
+
+#define DOT11_MIN_DTIM_PERIOD 1
+#define DOT11_MAX_DTIM_PERIOD 0xFF
+
+
+#define DOT11_LLC_SNAP_HDR_LEN 8
+#define DOT11_OUI_LEN 3
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+ uint8 dsap;
+ uint8 ssap;
+ uint8 ctl;
+ uint8 oui[DOT11_OUI_LEN];
+ uint16 type;
+} BWL_POST_PACKED_STRUCT;
+
+
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr a1;
+ struct ether_addr a2;
+ struct ether_addr a3;
+ uint16 seq;
+ struct ether_addr a4;
+} BWL_POST_PACKED_STRUCT;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_RTS_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTS_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACK_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr bssid;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_PS_POLL_LEN 16
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CS_END_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+#define DOT11_ACTION_VS_HDR_LEN 6
+
+#define BCM_ACTION_OUI_BYTE0 0x00
+#define BCM_ACTION_OUI_BYTE1 0x90
+#define BCM_ACTION_OUI_BYTE2 0x4c
+
+
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001
+#define DOT11_BA_CTL_POLICY_MASK 0x0001
+
+#define DOT11_BA_CTL_MTID 0x0002
+#define DOT11_BA_CTL_COMPRESSED 0x0004
+
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6
+
+#define DOT11_BA_CTL_TID_MASK 0xF000
+#define DOT11_BA_CTL_TID_SHIFT 12
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr ra;
+ struct ether_addr ta;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+ uint16 bar_control;
+ uint16 seqnum;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN 4
+
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_BA_CMP_BITMAP_LEN 8
+
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+ uint16 ba_control;
+ uint16 seqnum;
+ uint8 bitmap[DOT11_BA_BITMAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+ uint16 fc;
+ uint16 durid;
+ struct ether_addr da;
+ struct ether_addr sa;
+ struct ether_addr bssid;
+ uint16 seq;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_HDR_LEN 24
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+ uint32 timestamp[2];
+ uint16 beacon_interval;
+ uint16 capability;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BCN_PRB_LEN 12
+#define DOT11_BCN_PRB_FIXED_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+ uint16 alg;
+ uint16 seq;
+ uint16 status;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+ uint16 capability;
+ uint16 listen;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+ uint16 capability;
+ uint16 listen;
+ struct ether_addr ap;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN 10
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+ uint16 capability;
+ uint16 status;
+ uint16 aid;
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN 6
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+ uint8 category;
+ uint8 action;
+ uint8 ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+ uint8 category;
+ uint8 action;
+ uint8 control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+ uint8 category;
+ uint8 action;
+ uint16 id;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE 1
+#define SM_PWRSAVE_MODE 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+ uint8 id;
+ uint8 len;
+ uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+ uint8 min;
+ uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+ uint8 id;
+ uint8 len;
+ uint8 tx_pwr;
+ uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+ uint8 id;
+ uint8 len;
+ uint8 first_channel;
+ uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+ uint8 id;
+ uint8 len;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ uint8 extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN 5
+#define BRCM_EXTCH_IE_TYPE 53
+#define DOT11_EXTCH_IE_LEN 1
+#define DOT11_EXT_CH_MASK 0x03
+#define DOT11_EXT_CH_UPPER 0x01
+#define DOT11_EXT_CH_LOWER 0x03
+#define DOT11_EXT_CH_NONE 0x00
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+ uint8 category;
+ uint8 action;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN 2
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+ uint8 id;
+ uint8 len;
+ uint8 mode;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN 3
+
+#define DOT11_CSA_MODE_ADVISORY 0
+#define DOT11_CSA_MODE_NO_TX 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+ uint8 category;
+ uint8 action;
+ dot11_chan_switch_ie_t chan_switch_ie;
+ dot11_brcm_extch_ie_t extch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+ uint8 mode;
+ uint8 reg;
+ uint8 channel;
+ uint8 count;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+ uint8 id;
+ uint8 len;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ dot11_ext_csa_ie_t chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+ uint8 category;
+ uint8 action;
+ struct dot11_csa_body b;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+ uint8 id;
+ uint8 len;
+ uint8 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN 1
+
+#define DOT11_OBSS_COEX_INFO_REQ 0x01
+#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
+#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+ uint8 id;
+ uint8 len;
+ uint8 regclass;
+ uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_MAX 7
+#define DOT11_EXTCAP_LEN_COEX 1
+#define DOT11_EXTCAP_LEN_BT 3
+#define DOT11_EXTCAP_LEN_IW 4
+#define DOT11_EXTCAP_LEN_SI 6
+
+#define DOT11_EXTCAP_LEN_TDLS 5
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+ uint8 extcap[DOT11_EXTCAP_LEN_TDLS];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+
+#define TDLS_CAP_TDLS 37
+#define TDLS_CAP_PU_BUFFER_STA 28
+#define TDLS_CAP_PEER_PSM 20
+#define TDLS_CAP_CH_SW 30
+#define TDLS_CAP_PROH 38
+#define TDLS_CAP_CH_SW_PROH 39
+
+#define TDLS_CAP_MAX_BIT 39
+
+
+
+#define DOT11_MEASURE_TYPE_BASIC 0
+#define DOT11_MEASURE_TYPE_CCA 1
+#define DOT11_MEASURE_TYPE_RPI 2
+#define DOT11_MEASURE_TYPE_CHLOAD 3
+#define DOT11_MEASURE_TYPE_NOISE 4
+#define DOT11_MEASURE_TYPE_BEACON 5
+#define DOT11_MEASURE_TYPE_FRAME 6
+#define DOT11_MEASURE_TYPE_STATS 7
+#define DOT11_MEASURE_TYPE_LCI 8
+#define DOT11_MEASURE_TYPE_TXSTREAM 9
+#define DOT11_MEASURE_TYPE_PAUSE 255
+
+
+#define DOT11_MEASURE_MODE_PARALLEL (1<<0)
+#define DOT11_MEASURE_MODE_ENABLE (1<<1)
+#define DOT11_MEASURE_MODE_REQUEST (1<<2)
+#define DOT11_MEASURE_MODE_REPORT (1<<3)
+#define DOT11_MEASURE_MODE_DUR (1<<4)
+
+#define DOT11_MEASURE_MODE_LATE (1<<0)
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1)
+#define DOT11_MEASURE_MODE_REFUSED (1<<2)
+
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0))
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1))
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2))
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3))
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4))
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14
+
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+ } BWL_POST_PACKED_STRUCT basic;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+ uint8 channel;
+ uint8 start_time[8];
+ uint16 duration;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+ uint8 id;
+ uint8 len;
+ uint8 count;
+ uint8 period;
+ uint16 duration;
+ uint16 offset;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+ uint8 channel;
+ uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+ uint8 id;
+ uint8 len;
+ uint8 eaddr[ETHER_ADDR_LEN];
+ uint8 interval;
+ chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+
+#define WME_OUI "\x00\x50\xf2"
+#define WME_OUI_LEN 3
+#define WME_OUI_TYPE 2
+#define WME_TYPE 2
+#define WME_SUBTYPE_IE 0
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_SUBTYPE_TSPEC 2
+#define WME_VER 1
+
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+#define AC_COUNT 4
+
+typedef uint8 ac_bitmap_t;
+
+#define AC_BITMAP_NONE 0x0
+#define AC_BITMAP_ALL 0xf
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+ uint8 ACI;
+ uint8 ECW;
+ uint16 TXOP;
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+ uint8 oui[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 version;
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN 24
+
+
+#define WME_QI_AP_APSD_MASK 0x80
+#define WME_QI_AP_APSD_SHIFT 7
+#define WME_QI_AP_COUNT_MASK 0x0f
+#define WME_QI_AP_COUNT_SHIFT 0
+
+
+#define WME_QI_STA_MAXSPLEN_MASK 0x60
+#define WME_QI_STA_MAXSPLEN_SHIFT 5
+#define WME_QI_STA_APSD_ALL_MASK 0xf
+#define WME_QI_STA_APSD_ALL_SHIFT 0
+#define WME_QI_STA_APSD_BE_MASK 0x8
+#define WME_QI_STA_APSD_BE_SHIFT 3
+#define WME_QI_STA_APSD_BK_MASK 0x4
+#define WME_QI_STA_APSD_BK_SHIFT 2
+#define WME_QI_STA_APSD_VI_MASK 0x2
+#define WME_QI_STA_APSD_VI_SHIFT 1
+#define WME_QI_STA_APSD_VO_MASK 0x1
+#define WME_QI_STA_APSD_VO_SHIFT 0
+
+
+#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MAX 15
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_ACM_MASK 0x10
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_AIFSN_SHIFT 12
+
+
+#define EDCF_ECW_MIN 0
+#define EDCF_ECW_MAX 15
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_MASK 0xf0
+#define EDCF_ECWMAX_SHIFT 4
+
+
+#define EDCF_TXOP_MIN 0
+#define EDCF_TXOP_MAX 65535
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+
+
+#define NON_EDCF_AC_BE_ACI_STA 0x02
+
+
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VO_TXOP_STA 0x002f
+
+
+#define EDCF_AC_BE_ACI_AP 0x03
+#define EDCF_AC_BE_ECW_AP 0x64
+#define EDCF_AC_BE_TXOP_AP 0x0000
+#define EDCF_AC_BK_ACI_AP 0x27
+#define EDCF_AC_BK_ECW_AP 0xA4
+#define EDCF_AC_BK_TXOP_AP 0x0000
+#define EDCF_AC_VI_ACI_AP 0x41
+#define EDCF_AC_VI_ECW_AP 0x43
+#define EDCF_AC_VI_TXOP_AP 0x005e
+#define EDCF_AC_VO_ACI_AP 0x61
+#define EDCF_AC_VO_ECW_AP 0x32
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+ uint8 qosinfo;
+ uint8 rsvd;
+ edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+ uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+ uint8 id;
+ uint8 length;
+ uint16 station_count;
+ uint8 channel_utilization;
+ uint16 aac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 7
+
+
+#define FIXED_MSDU_SIZE 0x8000
+#define MSDU_SIZE_MASK 0x7fff
+
+
+
+#define INTEGER_SHIFT 13
+#define FRACTION_MASK 0x1FFF
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+ uint8 ti_type;
+ uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE 1
+#define TI_TYPE_KEY_LIFETIME 2
+
+
+#define WME_ADDTS_REQUEST 0
+#define WME_ADDTS_RESPONSE 1
+#define WME_DELTS_REQUEST 2
+
+
+#define WME_ADMISSION_ACCEPTED 0
+#define WME_INVALID_PARAMETERS 1
+#define WME_ADMISSION_REFUSED 3
+
+
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+
+#define DOT11_OPEN_SYSTEM 0
+#define DOT11_SHARED_KEY 1
+#define DOT11_FAST_BSS 2
+#define DOT11_CHALLENGE_LEN 128
+
+
+#define FC_PVER_MASK 0x3
+#define FC_PVER_SHIFT 0
+#define FC_TYPE_MASK 0xC
+#define FC_TYPE_SHIFT 2
+#define FC_SUBTYPE_MASK 0xF0
+#define FC_SUBTYPE_SHIFT 4
+#define FC_TODS 0x100
+#define FC_TODS_SHIFT 8
+#define FC_FROMDS 0x200
+#define FC_FROMDS_SHIFT 9
+#define FC_MOREFRAG 0x400
+#define FC_MOREFRAG_SHIFT 10
+#define FC_RETRY 0x800
+#define FC_RETRY_SHIFT 11
+#define FC_PM 0x1000
+#define FC_PM_SHIFT 12
+#define FC_MOREDATA 0x2000
+#define FC_MOREDATA_SHIFT 13
+#define FC_WEP 0x4000
+#define FC_WEP_SHIFT 14
+#define FC_ORDER 0x8000
+#define FC_ORDER_SHIFT 15
+
+
+#define SEQNUM_SHIFT 4
+#define SEQNUM_MAX 0x1000
+#define FRAGNUM_MASK 0xF
+
+
+
+
+#define FC_TYPE_MNG 0
+#define FC_TYPE_CTL 1
+#define FC_TYPE_DATA 2
+
+
+#define FC_SUBTYPE_ASSOC_REQ 0
+#define FC_SUBTYPE_ASSOC_RESP 1
+#define FC_SUBTYPE_REASSOC_REQ 2
+#define FC_SUBTYPE_REASSOC_RESP 3
+#define FC_SUBTYPE_PROBE_REQ 4
+#define FC_SUBTYPE_PROBE_RESP 5
+#define FC_SUBTYPE_BEACON 8
+#define FC_SUBTYPE_ATIM 9
+#define FC_SUBTYPE_DISASSOC 10
+#define FC_SUBTYPE_AUTH 11
+#define FC_SUBTYPE_DEAUTH 12
+#define FC_SUBTYPE_ACTION 13
+#define FC_SUBTYPE_ACTION_NOACK 14
+
+
+#define FC_SUBTYPE_CTL_WRAPPER 7
+#define FC_SUBTYPE_BLOCKACK_REQ 8
+#define FC_SUBTYPE_BLOCKACK 9
+#define FC_SUBTYPE_PS_POLL 10
+#define FC_SUBTYPE_RTS 11
+#define FC_SUBTYPE_CTS 12
+#define FC_SUBTYPE_ACK 13
+#define FC_SUBTYPE_CF_END 14
+#define FC_SUBTYPE_CF_END_ACK 15
+
+
+#define FC_SUBTYPE_DATA 0
+#define FC_SUBTYPE_DATA_CF_ACK 1
+#define FC_SUBTYPE_DATA_CF_POLL 2
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3
+#define FC_SUBTYPE_NULL 4
+#define FC_SUBTYPE_CF_ACK 5
+#define FC_SUBTYPE_CF_POLL 6
+#define FC_SUBTYPE_CF_ACK_POLL 7
+#define FC_SUBTYPE_QOS_DATA 8
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11
+#define FC_SUBTYPE_QOS_NULL 12
+#define FC_SUBTYPE_QOS_CF_POLL 14
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15
+
+
+#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
+
+
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK)
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)
+
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)
+
+
+
+
+#define QOS_PRIO_SHIFT 0
+#define QOS_PRIO_MASK 0x0007
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)
+
+
+#define QOS_TID_SHIFT 0
+#define QOS_TID_MASK 0x000f
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)
+
+
+#define QOS_EOSP_SHIFT 4
+#define QOS_EOSP_MASK 0x0010
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)
+
+
+#define QOS_ACK_NORMAL_ACK 0
+#define QOS_ACK_NO_ACK 1
+#define QOS_ACK_NO_EXP_ACK 2
+#define QOS_ACK_BLOCK_ACK 3
+#define QOS_ACK_SHIFT 5
+#define QOS_ACK_MASK 0x0060
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)
+
+
+#define QOS_AMSDU_SHIFT 7
+#define QOS_AMSDU_MASK 0x0080
+
+
+
+
+
+
+#define DOT11_MNG_AUTH_ALGO_LEN 2
+#define DOT11_MNG_AUTH_SEQ_LEN 2
+#define DOT11_MNG_BEACON_INT_LEN 2
+#define DOT11_MNG_CAP_LEN 2
+#define DOT11_MNG_AP_ADDR_LEN 6
+#define DOT11_MNG_LISTEN_INT_LEN 2
+#define DOT11_MNG_REASON_LEN 2
+#define DOT11_MNG_AID_LEN 2
+#define DOT11_MNG_STATUS_LEN 2
+#define DOT11_MNG_TIMESTAMP_LEN 8
+
+
+#define DOT11_AID_MASK 0x3fff
+
+
+#define DOT11_RC_RESERVED 0
+#define DOT11_RC_UNSPECIFIED 1
+#define DOT11_RC_AUTH_INVAL 2
+#define DOT11_RC_DEAUTH_LEAVING 3
+#define DOT11_RC_INACTIVITY 4
+#define DOT11_RC_BUSY 5
+#define DOT11_RC_INVAL_CLASS_2 6
+#define DOT11_RC_INVAL_CLASS_3 7
+#define DOT11_RC_DISASSOC_LEAVING 8
+#define DOT11_RC_NOT_AUTH 9
+#define DOT11_RC_BAD_PC 10
+#define DOT11_RC_BAD_CHANNELS 11
+
+
+
+#define DOT11_RC_UNSPECIFIED_QOS 32
+#define DOT11_RC_INSUFFCIENT_BW 33
+#define DOT11_RC_EXCESSIVE_FRAMES 34
+#define DOT11_RC_TX_OUTSIDE_TXOP 35
+#define DOT11_RC_LEAVING_QBSS 36
+#define DOT11_RC_BAD_MECHANISM 37
+#define DOT11_RC_SETUP_NEEDED 38
+#define DOT11_RC_TIMEOUT 39
+
+#define DOT11_RC_MAX 23
+
+#define DOT11_RC_TDLS_PEER_UNREACH 25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26
+
+
+#define DOT11_SC_SUCCESS 0
+#define DOT11_SC_FAILURE 1
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2
+
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3
+#define DOT11_SC_TDLS_SEC_DISABLED 5
+#define DOT11_SC_LIFETIME_REJ 6
+#define DOT11_SC_NOT_SAME_BSS 7
+#define DOT11_SC_CAP_MISMATCH 10
+#define DOT11_SC_REASSOC_FAIL 11
+#define DOT11_SC_ASSOC_FAIL 12
+#define DOT11_SC_AUTH_MISMATCH 13
+#define DOT11_SC_AUTH_SEQ 14
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15
+#define DOT11_SC_AUTH_TIMEOUT 16
+#define DOT11_SC_ASSOC_BUSY_FAIL 17
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25
+#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26
+#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28
+#define DOT11_SC_ASSOC_TRY_LATER 30
+#define DOT11_SC_ASSOC_MFP_VIOLATION 31
+
+#define DOT11_SC_DECLINED 37
+#define DOT11_SC_INVALID_PARAMS 38
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42
+#define DOT11_SC_INVALID_AKMP 43
+#define DOT11_SC_INVALID_RSNIE_CAP 45
+#define DOT11_SC_DLS_NOT_ALLOWED 48
+#define DOT11_SC_INVALID_PMKID 53
+#define DOT11_SC_INVALID_MDID 54
+#define DOT11_SC_INVALID_FTIE 55
+
+#define DOT11_SC_UNEXP_MSG 70
+#define DOT11_SC_INVALID_SNONCE 71
+#define DOT11_SC_INVALID_RSNIE 72
+
+
+#define DOT11_MNG_DS_PARAM_LEN 1
+#define DOT11_MNG_IBSS_PARAM_LEN 2
+
+
+#define DOT11_MNG_TIM_FIXED_LEN 3
+#define DOT11_MNG_TIM_DTIM_COUNT 0
+#define DOT11_MNG_TIM_DTIM_PERIOD 1
+#define DOT11_MNG_TIM_BITMAP_CTL 2
+#define DOT11_MNG_TIM_PVB 3
+
+
+#define TLV_TAG_OFF 0
+#define TLV_LEN_OFF 1
+#define TLV_HDR_LEN 2
+#define TLV_BODY_OFF 2
+
+
+#define DOT11_MNG_SSID_ID 0
+#define DOT11_MNG_RATES_ID 1
+#define DOT11_MNG_FH_PARMS_ID 2
+#define DOT11_MNG_DS_PARMS_ID 3
+#define DOT11_MNG_CF_PARMS_ID 4
+#define DOT11_MNG_TIM_ID 5
+#define DOT11_MNG_IBSS_PARMS_ID 6
+#define DOT11_MNG_COUNTRY_ID 7
+#define DOT11_MNG_HOPPING_PARMS_ID 8
+#define DOT11_MNG_HOPPING_TABLE_ID 9
+#define DOT11_MNG_REQUEST_ID 10
+#define DOT11_MNG_QBSS_LOAD_ID 11
+#define DOT11_MNG_EDCA_PARAM_ID 12
+#define DOT11_MNG_CHALLENGE_ID 16
+#define DOT11_MNG_PWR_CONSTRAINT_ID 32
+#define DOT11_MNG_PWR_CAP_ID 33
+#define DOT11_MNG_TPC_REQUEST_ID 34
+#define DOT11_MNG_TPC_REPORT_ID 35
+#define DOT11_MNG_SUPP_CHANNELS_ID 36
+#define DOT11_MNG_CHANNEL_SWITCH_ID 37
+#define DOT11_MNG_MEASURE_REQUEST_ID 38
+#define DOT11_MNG_MEASURE_REPORT_ID 39
+#define DOT11_MNG_QUIET_ID 40
+#define DOT11_MNG_IBSS_DFS_ID 41
+#define DOT11_MNG_ERP_ID 42
+#define DOT11_MNG_TS_DELAY_ID 43
+#define DOT11_MNG_HT_CAP 45
+#define DOT11_MNG_QOS_CAP_ID 46
+#define DOT11_MNG_NONERP_ID 47
+#define DOT11_MNG_RSN_ID 48
+#define DOT11_MNG_EXT_RATES_ID 50
+#define DOT11_MNG_AP_CHREP_ID 51
+#define DOT11_MNG_NBR_REP_ID 52
+#define DOT11_MNG_MDIE_ID 54
+#define DOT11_MNG_FTIE_ID 55
+#define DOT11_MNG_FT_TI_ID 56
+#define DOT11_MNG_REGCLASS_ID 59
+#define DOT11_MNG_EXT_CSA_ID 60
+#define DOT11_MNG_HT_ADD 61
+#define DOT11_MNG_EXT_CHANNEL_OFFSET 62
+#define DOT11_MNG_WAPI_ID 68
+#define DOT11_MNG_TIME_ADVERTISE_ID 69
+#define DOT11_MNG_RRM_CAP_ID 70
+#define DOT11_MNG_HT_BSS_COEXINFO_ID 72
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73
+#define DOT11_MNG_HT_OBSS_ID 74
+#define DOT11_MNG_CHANNEL_USAGE 97
+#define DOT11_MNG_TIME_ZONE_ID 98
+#define DOT11_MNG_LINK_IDENTIFIER_ID 101
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104
+#define DOT11_MNG_PTI_CONTROL_ID 105
+#define DOT11_MNG_PU_BUFFER_STATUS_ID 106
+#define DOT11_MNG_INTERWORKING_ID 107
+#define DOT11_MNG_ADVERTISEMENT_ID 108
+#define DOT11_MNG_EXP_BW_REQ_ID 109
+#define DOT11_MNG_QOS_MAP_ID 110
+#define DOT11_MNG_ROAM_CONSORT_ID 111
+#define DOT11_MNG_EMERGCY_ALERT_ID 112
+#define DOT11_MNG_EXT_CAP_ID 127
+#define DOT11_MNG_VHT_CAP_ID 191
+#define DOT11_MNG_VHT_OPERATION_ID 192
+
+#define DOT11_MNG_WPA_ID 221
+#define DOT11_MNG_PROPR_ID 221
+
+#define DOT11_MNG_VS_ID 221
+
+
+#define DOT11_RATE_BASIC 0x80
+#define DOT11_RATE_MASK 0x7F
+
+
+#define DOT11_MNG_ERP_LEN 1
+#define DOT11_MNG_NONERP_PRESENT 0x01
+#define DOT11_MNG_USE_PROTECTION 0x02
+#define DOT11_MNG_BARKER_PREAMBLE 0x04
+
+#define DOT11_MGN_TS_DELAY_LEN 4
+#define TS_DELAY_FIELD_SIZE 4
+
+
+#define DOT11_CAP_ESS 0x0001
+#define DOT11_CAP_IBSS 0x0002
+#define DOT11_CAP_POLLABLE 0x0004
+#define DOT11_CAP_POLL_RQ 0x0008
+#define DOT11_CAP_PRIVACY 0x0010
+#define DOT11_CAP_SHORT 0x0020
+#define DOT11_CAP_PBCC 0x0040
+#define DOT11_CAP_AGILITY 0x0080
+#define DOT11_CAP_SPECTRUM 0x0100
+#define DOT11_CAP_SHORTSLOT 0x0400
+#define DOT11_CAP_RRM 0x1000
+#define DOT11_CAP_CCK_OFDM 0x2000
+
+
+
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0
+
+#define DOT11_EXT_CAP_SPSMP 6
+
+#define DOT11_EXT_CAP_BSS_TRANSITION_MGMT 19
+
+#define DOT11_EXT_CAP_IW 31
+
+#define DOT11_EXT_CAP_SI 41
+#define DOT11_EXT_CAP_SI_MASK 0x0E
+
+
+#define DOT11_ACTION_HDR_LEN 2
+#define DOT11_ACTION_CAT_OFF 0
+#define DOT11_ACTION_ACT_OFF 1
+
+
+#define DOT11_ACTION_CAT_ERR_MASK 0x80
+#define DOT11_ACTION_CAT_MASK 0x7F
+#define DOT11_ACTION_CAT_SPECT_MNG 0
+#define DOT11_ACTION_CAT_QOS 1
+#define DOT11_ACTION_CAT_DLS 2
+#define DOT11_ACTION_CAT_BLOCKACK 3
+#define DOT11_ACTION_CAT_PUBLIC 4
+#define DOT11_ACTION_CAT_RRM 5
+#define DOT11_ACTION_CAT_FBT 6
+#define DOT11_ACTION_CAT_HT 7
+#define DOT11_ACTION_CAT_SA_QUERY 8
+#define DOT11_ACTION_CAT_PDPA 9
+#define DOT11_ACTION_CAT_BSSMGMT 10
+#define DOT11_ACTION_NOTIFICATION 17
+#define DOT11_ACTION_CAT_VSP 126
+#define DOT11_ACTION_CAT_VS 127
+
+
+#define DOT11_SM_ACTION_M_REQ 0
+#define DOT11_SM_ACTION_M_REP 1
+#define DOT11_SM_ACTION_TPC_REQ 2
+#define DOT11_SM_ACTION_TPC_REP 3
+#define DOT11_SM_ACTION_CHANNEL_SWITCH 4
+#define DOT11_SM_ACTION_EXT_CSA 5
+
+
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0
+#define DOT11_ACTION_ID_HT_MIMO_PS 1
+
+
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4
+
+
+#define DOT11_BA_ACTION_ADDBA_REQ 0
+#define DOT11_BA_ACTION_ADDBA_RESP 1
+#define DOT11_BA_ACTION_DELBA 2
+
+
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6
+
+#define DOT11_ADDBA_POLICY_DELAYED 0
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1
+
+
+#define DOT11_FT_ACTION_FT_RESERVED 0
+#define DOT11_FT_ACTION_FT_REQ 1
+#define DOT11_FT_ACTION_FT_RES 2
+#define DOT11_FT_ACTION_FT_CON 3
+#define DOT11_FT_ACTION_FT_ACK 4
+
+
+#define DOT11_DLS_ACTION_REQ 0
+#define DOT11_DLS_ACTION_RESP 1
+#define DOT11_DLS_ACTION_TD 2
+
+
+#define DOT11_WNM_ACTION_EVENT_REQ 0
+#define DOT11_WNM_ACTION_EVENT_REP 1
+#define DOT11_WNM_ACTION_DIAG_REQ 2
+#define DOT11_WNM_ACTION_DIAG_REP 3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ 4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP 5
+#define DOT11_WNM_ACTION_BSS_TRANS_QURY 6
+#define DOT11_WNM_ACTION_BSS_TRANS_REQ 7
+#define DOT11_WNM_ACTION_BSS_TRANS_RESP 8
+#define DOT11_WNM_ACTION_FMS_REQ 9
+#define DOT11_WNM_ACTION_FMS_RESP 10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12
+#define DOT11_WNM_ACTION_TFS_REQ 13
+#define DOT11_WNM_ACTION_TFS_RESP 14
+#define DOT11_WNM_ACTION_TFS_NOTIFY 15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17
+#define DOT11_WNM_ACTION_TIM_BCAST_REQ 18
+#define DOT11_WNM_ACTION_TIM_BCAST_RESP 19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22
+#define DOT11_WNM_ACTION_DMS_REQ 23
+#define DOT11_WNM_ACTION_DMS_RESP 24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ 26
+#define DOT11_WNM_ACTION_NOTFCTN_RES 27
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+ uint8 category;
+ uint8 action;
+ struct ether_addr da;
+ struct ether_addr sa;
+ uint16 cap;
+ uint16 timeout;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+ uint8 category;
+ uint8 action;
+ uint16 status;
+ struct ether_addr da;
+ struct ether_addr sa;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_query {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 reason;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_query dot11_bss_trans_query_t;
+#define DOT11_BSS_TRANS_QUERY_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 reqmode;
+ uint16 disassoc_tmr;
+ uint8 validity_intrvl;
+ uint8 data[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_req dot11_bss_trans_req_t;
+#define DOT11_BSS_TRANS_REQ_LEN 7
+
+#define DOT11_BSS_TERM_DUR_LEN 12
+
+
+
+#define DOT11_BSS_TRNS_REQMODE_PREF_LIST_INCL 0x01
+#define DOT11_BSS_TRNS_REQMODE_ABRIDGED 0x02
+#define DOT11_BSS_TRNS_REQMODE_DISASSOC_IMMINENT 0x04
+#define DOT11_BSS_TRNS_REQMODE_BSS_TERM_INCL 0x08
+#define DOT11_BSS_TRNS_REQMODE_ESS_DISASSOC_IMNT 0x10
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_res {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 status;
+ uint8 term_delay;
+ uint8 data[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_trans_res dot11_bss_trans_res_t;
+#define DOT11_BSS_TRANS_RES_LEN 5
+
+
+#define DOT11_BSS_TRNS_RES_STATUS_ACCEPT 0
+#define DOT11_BSS_TRNS_RES_STATUS_REJECT 1
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_BCN 2
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_CAP 3
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_UNDESIRED 4
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_DELAY_REQ 5
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_BSS_LIST_PROVIDED 6
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_NO_SUITABLE_BSS 7
+#define DOT11_BSS_TRNS_RES_STATUS_REJ_LEAVING_ESS 8
+
+
+
+#define DOT11_NBR_RPRT_BSSID_INFO_REACHABILTY 0x0003
+#define DOT11_NBR_RPRT_BSSID_INFO_SEC 0x0004
+#define DOT11_NBR_RPRT_BSSID_INFO_KEY_SCOPE 0x0008
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP 0x03f0
+
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_SPEC_MGMT 0x0010
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_QOS 0x0020
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_APSD 0x0040
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_RDIO_MSMT 0x0080
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_DEL_BA 0x0100
+#define DOT11_NBR_RPRT_BSSID_INFO_CAP_IMM_BA 0x0200
+
+
+#define DOT11_NBR_RPRT_SUBELEM_BSS_CANDDT_PREF_ID 3
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 addba_param_set;
+ uint16 timeout;
+ uint16 start_seqnum;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN 9
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 status;
+ uint16 addba_param_set;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN 9
+
+
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000
+#define DOT11_DELBA_PARAM_TID_SHIFT 12
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+ uint8 category;
+ uint8 action;
+ uint16 delba_param_set;
+ uint16 reason;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN 6
+
+
+#define SA_QUERY_REQUEST 0
+#define SA_QUERY_RESPONSE 1
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+ uint8 category;
+ uint8 action;
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+ uint8 category;
+ uint8 action;
+ uint8 sta_addr[ETHER_ADDR_LEN];
+ uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+ uint16 status;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+
+
+
+
+
+#define DOT11_RRM_CAP_LEN 5
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+ uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+
+#define DOT11_RRM_CAP_LINK 0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1
+#define DOT11_RRM_CAP_PARALLEL 2
+#define DOT11_RRM_CAP_REPEATED 3
+#define DOT11_RRM_CAP_BCN_PASSIVE 4
+#define DOT11_RRM_CAP_BCN_ACTIVE 5
+#define DOT11_RRM_CAP_BCN_TABLE 6
+#define DOT11_RRM_CAP_BCN_REP_COND 7
+#define DOT11_RRM_CAP_AP_CHANREP 16
+
+
+
+#define DOT11_OP_CLASS_NONE 255
+
+
+
+#define DOT11_RM_ACTION_RM_REQ 0
+#define DOT11_RM_ACTION_RM_REP 1
+#define DOT11_RM_ACTION_LM_REQ 2
+#define DOT11_RM_ACTION_LM_REP 3
+#define DOT11_RM_ACTION_NR_REQ 4
+#define DOT11_RM_ACTION_NR_REP 5
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint16 reps;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN 5
+
+
+#define DOT11_RMREQ_MODE_PARALLEL 1
+#define DOT11_RMREQ_MODE_ENABLE 2
+#define DOT11_RMREQ_MODE_REQUEST 4
+#define DOT11_RMREQ_MODE_REPORT 8
+#define DOT11_RMREQ_MODE_DURMAND 0x10
+
+
+#define DOT11_RMREP_MODE_LATE 1
+#define DOT11_RMREP_MODE_INCAPABLE 2
+#define DOT11_RMREP_MODE_REFUSED 4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 reg;
+ uint8 channel;
+ uint16 interval;
+ uint16 duration;
+ uint8 bcn_mode;
+ struct ether_addr bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN 18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+ uint8 reg;
+ uint8 channel;
+ uint32 starttime[2];
+ uint16 duration;
+ uint8 frame_info;
+ uint8 rcpi;
+ uint8 rsni;
+ struct ether_addr bssid;
+ uint8 antenna_id;
+ uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN 26
+
+
+#define DOT11_RMREQ_BCN_PASSIVE 0
+#define DOT11_RMREQ_BCN_ACTIVE 1
+#define DOT11_RMREQ_BCN_TABLE 2
+
+
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID 1
+#define DOT11_RMREQ_BCN_REPDET_ID 2
+#define DOT11_RMREQ_BCN_REQUEST_ID 10
+#define DOT11_RMREQ_BCN_APCHREP_ID 51
+
+
+#define DOT11_RMREQ_BCN_REPDET_FIXED 0
+#define DOT11_RMREQ_BCN_REPDET_REQUEST 1
+#define DOT11_RMREQ_BCN_REPDET_ALL 2
+
+
+#define DOT11_RMREP_BCN_FRM_BODY 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr {
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uchar sub_elements[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t;
+#define DOT11_RMREP_NBR_LEN 13
+
+
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0
+#define DOT11_BSSTYPE_INDEPENDENT 1
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_SCANTYPE_ACTIVE 0
+#define DOT11_SCANTYPE_PASSIVE 1
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ uint8 txpwr;
+ uint8 maxtxpwr;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN 5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+ uint8 category;
+ uint8 action;
+ uint8 token;
+ dot11_tpc_rep_t tpc;
+ uint8 rxant;
+ uint8 txant;
+ uint8 rcpi;
+ uint8 rsni;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN 11
+
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
+
+
+#define RIFS_11N_TIME 2
+
+
+
+#define HT_SIG1_MCS_MASK 0x00007F
+#define HT_SIG1_CBW 0x000080
+#define HT_SIG1_HT_LENGTH 0xFFFF00
+
+
+#define HT_SIG2_SMOOTHING 0x000001
+#define HT_SIG2_NOT_SOUNDING 0x000002
+#define HT_SIG2_RESERVED 0x000004
+#define HT_SIG2_AGGREGATION 0x000008
+#define HT_SIG2_STBC_MASK 0x000030
+#define HT_SIG2_STBC_SHIFT 4
+#define HT_SIG2_FEC_CODING 0x000040
+#define HT_SIG2_SHORT_GI 0x000080
+#define HT_SIG2_ESS_MASK 0x000300
+#define HT_SIG2_ESS_SHIFT 8
+#define HT_SIG2_CRC 0x03FC00
+#define HT_SIG2_TAIL 0x1C0000
+
+
+#define APHY_SLOT_TIME 9
+#define APHY_SIFS_TIME 16
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SYMBOL_TIME 4
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define APHY_CWMIN 15
+
+
+#define BPHY_SLOT_TIME 20
+#define BPHY_SIFS_TIME 10
+#define BPHY_DIFS_TIME 50
+#define BPHY_PLCP_TIME 192
+#define BPHY_PLCP_SHORT_TIME 96
+#define BPHY_CWMIN 31
+
+
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+
+#define PHY_CWMAX 1023
+
+#define DOT11_MAXNUMFRAGS 16
+
+
+
+typedef int vht_group_id_t;
+
+
+
+#define VHT_SIGA1_CONST_MASK 0x800004
+
+#define VHT_SIGA1_20MHZ_VAL 0x000000
+#define VHT_SIGA1_40MHZ_VAL 0x000001
+#define VHT_SIGA1_80MHZ_VAL 0x000002
+#define VHT_SIGA1_160MHZ_VAL 0x000003
+
+#define VHT_SIGA1_STBC 0x000008
+
+#define VHT_SIGA1_GID_MAX_GID 0x3f
+#define VHT_SIGA1_GID_SHIFT 4
+#define VHT_SIGA1_GID_TO_AP 0x00
+#define VHT_SIGA1_GID_NOT_TO_AP 0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT 10
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+
+#define VHT_SIGA1_PARTIAL_AID_SHIFT 13
+
+
+#define VHT_SIGA2_GI_NONE 0x000000
+#define VHT_SIGA2_GI_SHORT 0x000001
+#define VHT_SIGA2_GI_W_MOD10 0x000002
+#define VHT_SIGA2_CODING_LDPC 0x000004
+#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100
+#define VHT_SIGA2_MCS_SHIFT 4
+
+#define VHT_SIGA2_B9_RESERVED 0x000200
+#define VHT_SIGA2_TAIL_MASK 0xfc0000
+#define VHT_SIGA2_TAIL_VALUE 0x000000
+
+#define VHT_SIGA2_SVC_BITS 16
+#define VHT_SIGA2_TAIL_BITS 6
+
+
+
+typedef struct d11cnt {
+ uint32 txfrag;
+ uint32 txmulti;
+ uint32 txfail;
+ uint32 txretry;
+ uint32 txretrie;
+ uint32 rxdup;
+ uint32 txrts;
+ uint32 txnocts;
+ uint32 txnoack;
+ uint32 rxfrag;
+ uint32 rxmulti;
+ uint32 rxcrc;
+ uint32 txfrmsnt;
+ uint32 rxundec;
+} d11cnt_t;
+
+
+#define BRCM_PROP_OUI "\x00\x90\x4C"
+
+
+
+#define BRCM_OUI "\x00\x10\x18"
+
+
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 ver;
+ uint8 assoc;
+ uint8 flags;
+ uint8 flags1;
+ uint16 amsdu_mtu_pref;
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 11
+#define BRCM_IE_VER 2
+#define BRCM_IE_LEGACY_AES_VER 1
+
+
+#define BRF_LZWDS 0x4
+#define BRF_BLOCKACK 0x8
+
+
+#define BRF1_AMSDU 0x1
+#define BRF1_WMEPS 0x4
+#define BRF1_PSOFIX 0x8
+#define BRF1_RX_LARGE_AGG 0x10
+#define BRF1_RFAWARE_DCS 0x20
+#define BRF1_SOFTAP 0x40
+
+
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+ uchar id;
+ uchar len;
+ uchar oui [3];
+ uchar data [1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN 2
+#define VNDR_IE_MIN_LEN 3
+#define VNDR_IE_MAX_LEN 256
+
+
+#define MCSSET_LEN 16
+#define MAX_MCS_NUM (128)
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+ uint16 cap;
+ uint8 params;
+ uint8 supp_mcs[MCSSET_LEN];
+ uint16 ext_htcap;
+ uint32 txbf_cap;
+ uint8 as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD 4
+#define HT_CAP_IE_LEN 26
+#define HT_CAP_IE_TYPE 51
+
+#define HT_CAP_LDPC_CODING 0x0001
+#define HT_CAP_40MHZ 0x0002
+#define HT_CAP_MIMO_PS_MASK 0x000C
+#define HT_CAP_MIMO_PS_SHIFT 0x0002
+#define HT_CAP_MIMO_PS_OFF 0x0003
+#define HT_CAP_MIMO_PS_RTS 0x0001
+#define HT_CAP_MIMO_PS_ON 0x0000
+#define HT_CAP_GF 0x0010
+#define HT_CAP_SHORT_GI_20 0x0020
+#define HT_CAP_SHORT_GI_40 0x0040
+#define HT_CAP_TX_STBC 0x0080
+#define HT_CAP_RX_STBC_MASK 0x0300
+#define HT_CAP_RX_STBC_SHIFT 8
+#define HT_CAP_DELAYED_BA 0x0400
+#define HT_CAP_MAX_AMSDU 0x0800
+
+#define HT_CAP_DSSS_CCK 0x1000
+#define HT_CAP_PSMP 0x2000
+#define HT_CAP_40MHZ_INTOLERANT 0x4000
+#define HT_CAP_LSIG_TXOP 0x8000
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3
+
+#define VHT_MAX_MPDU 11454
+#define VHT_MPDU_MSDU_DELTA 56
+
+#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU 7935
+#define HT_MIN_AMSDU 3835
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03
+#define HT_PARAMS_DENSITY_MASK 0x1C
+#define HT_PARAMS_DENSITY_SHIFT 2
+
+
+#define AMPDU_MAX_MPDU_DENSITY 7
+#define AMPDU_RX_FACTOR_8K 0
+#define AMPDU_RX_FACTOR_16K 1
+#define AMPDU_RX_FACTOR_32K 2
+#define AMPDU_RX_FACTOR_64K 3
+#define AMPDU_RX_FACTOR_BASE 8*1024
+
+#define AMPDU_DELIMITER_LEN 4
+#define AMPDU_DELIMITER_LEN_MAX 63
+
+#define HT_CAP_EXT_PCO 0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT 1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8
+#define HT_CAP_EXT_HTC 0x0400
+#define HT_CAP_EXT_RD_RESP 0x0800
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+ uint8 ctl_ch;
+ uint8 byte1;
+ uint16 opmode;
+ uint16 misc_bits;
+ uint8 basic_mcs[MCSSET_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type;
+ ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN 22
+#define HT_ADD_IE_TYPE 52
+
+
+#define HT_BW_ANY 0x04
+#define HT_RIFS_PERMITTED 0x08
+
+
+#define HT_OPMODE_MASK 0x0003
+#define HT_OPMODE_SHIFT 0
+#define HT_OPMODE_PURE 0x0000
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+#define HT_BASIC_STBC_MCS 0x007f
+#define HT_DUAL_STBC_PROT 0x0080
+#define HT_SECOND_BCN 0x0100
+#define HT_LSIG_TXOP 0x0200
+#define HT_PCO_ACTIVE 0x0400
+#define HT_PCO_PHASE 0x0800
+#define HT_DUALCTS_PROTECTION 0x0080
+
+
+#define DOT11N_2G_TXBURST_LIMIT 6160
+#define DOT11N_5G_TXBURST_LIMIT 3080
+
+
+#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ >> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_MIXED)
+#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_HT20IN40)
+#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+ == HT_OPMODE_OPTIONAL)
+#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
+ HT_MIXEDMODE_PRESENT((add_ie)))
+#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+ == HT_OPMODE_NONGF)
+#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+ == DOT11N_TXBURST)
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+ == DOT11N_OBSS_NONHT)
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+ uint16 passive_dwell;
+ uint16 active_dwell;
+ uint16 bss_widthscan_interval;
+ uint16 passive_total;
+ uint16 active_total;
+ uint16 chanwidth_transition_dly;
+ uint16 activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+ uint8 id;
+ uint8 len;
+ obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t)
+
+
+#define HT_CTRL_LA_TRQ 0x00000002
+#define HT_CTRL_LA_MAI 0x0000003C
+#define HT_CTRL_LA_MAI_SHIFT 2
+#define HT_CTRL_LA_MAI_MRQ 0x00000004
+#define HT_CTRL_LA_MAI_MSI 0x00000038
+#define HT_CTRL_LA_MFSI 0x000001C0
+#define HT_CTRL_LA_MFSI_SHIFT 6
+#define HT_CTRL_LA_MFB_ASELC 0x0000FE00
+#define HT_CTRL_LA_MFB_ASELC_SH 9
+#define HT_CTRL_LA_ASELC_CMD 0x00000C00
+#define HT_CTRL_LA_ASELC_DATA 0x0000F000
+#define HT_CTRL_CAL_POS 0x00030000
+#define HT_CTRL_CAL_SEQ 0x000C0000
+#define HT_CTRL_CSI_STEERING 0x00C00000
+#define HT_CTRL_CSI_STEER_SHIFT 22
+#define HT_CTRL_CSI_STEER_NFB 0
+#define HT_CTRL_CSI_STEER_CSI 1
+#define HT_CTRL_CSI_STEER_NCOM 2
+#define HT_CTRL_CSI_STEER_COM 3
+#define HT_CTRL_NDP_ANNOUNCE 0x01000000
+#define HT_CTRL_AC_CONSTRAINT 0x40000000
+#define HT_CTRL_RDG_MOREPPDU 0x80000000
+
+#define HT_OPMODE_OPTIONAL 0x0001
+#define HT_OPMODE_HT20IN40 0x0002
+#define HT_OPMODE_MIXED 0x0003
+#define HT_OPMODE_NONGF 0x0004
+#define DOT11N_TXBURST 0x0008
+#define DOT11N_OBSS_NONHT 0x0010
+
+
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+ uint32 vht_cap_info;
+
+ uint16 rx_mcs_map;
+ uint16 rx_max_rate;
+ uint16 tx_mcs_map;
+ uint16 tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+#define VHT_CAP_IE_LEN 12
+
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c
+#define VHT_CAP_INFO_LDPC 0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ 0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ 0x00000040
+#define VHT_CAP_INFO_TX_STBC 0x00000080
+
+#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT 8
+#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13
+
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16
+#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000
+#define VHT_CAP_INFO_TXOPPS 0x00200000
+#define VHT_CAP_INFO_HTCVHT 0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23
+
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26
+
+
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0
+
+#define VHT_CAP_MCS_MAP_0_7 0
+#define VHT_CAP_MCS_MAP_0_8 1
+#define VHT_CAP_MCS_MAP_0_9 2
+#define VHT_CAP_MCS_MAP_NONE 3
+
+#define VHT_CAP_MCS_MAP_NSS_MAX 8
+
+
+typedef enum vht_cap_chan_width {
+ VHT_CAP_CHAN_WIDTH_20_40 = 0x00,
+ VHT_CAP_CHAN_WIDTH_80 = 0x04,
+ VHT_CAP_CHAN_WIDTH_160 = 0x08
+} vht_cap_chan_width_t;
+
+
+typedef enum vht_cap_max_mpdu_len {
+ VHT_CAP_MPDU_MAX_4K = 0x00,
+ VHT_CAP_MPDU_MAX_8K = 0x01,
+ VHT_CAP_MPDU_MAX_11K = 0x02
+} vht_cap_max_mpdu_len_t;
+
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+ uint8 chan_width;
+ uint8 chan1;
+ uint8 chan2;
+ uint16 supp_mcs;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+ VHT_OP_CHAN_WIDTH_20_40 = 0,
+ VHT_OP_CHAN_WIDTH_80 = 1,
+ VHT_OP_CHAN_WIDTH_160 = 2,
+ VHT_OP_CHAN_WIDTH_80_80 = 3
+} vht_op_chan_width_t;
+
+
+#define VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams) ((numSpatialStreams-1)*2)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(numSpatialStreams, mcsMap) \
+ ((mcsMap >> VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams)) & 0x3)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(numSpatialStreams, numMcs, mcsMap) \
+ (mcsMap |= ((numMcs & 0x3) << VHT_MCS_MAP_GET_SS_IDX(numSpatialStreams)))
+
+
+#define WPA_OUI "\x00\x50\xF2"
+#define WPA_OUI_LEN 3
+#define WPA_OUI_TYPE 1
+#define WPA_VERSION 1
+#define WPA2_OUI "\x00\x0F\xAC"
+#define WPA2_OUI_LEN 3
+#define WPA2_VERSION 1
+#define WPA2_VERSION_LEN 2
+
+
+#define WPS_OUI "\x00\x50\xF2"
+#define WPS_OUI_LEN 3
+#define WPS_OUI_TYPE 4
+
+
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI MAC_OUI
+#else
+#define WFA_OUI "\x50\x6F\x9A"
+#endif
+#define WFA_OUI_LEN 3
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_P2P 9
+#endif
+
+#define WFA_OUI_TYPE_TPC 8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_WFD 10
+#endif
+
+
+#define RSN_AKM_NONE 0
+#define RSN_AKM_UNSPECIFIED 1
+#define RSN_AKM_PSK 2
+#define RSN_AKM_FBT_1X 3
+#define RSN_AKM_FBT_PSK 4
+#define RSN_AKM_MFP_1X 5
+#define RSN_AKM_MFP_PSK 6
+#define RSN_AKM_TPK 7
+
+
+#define DOT11_MAX_DEFAULT_KEYS 4
+#define DOT11_MAX_KEY_SIZE 32
+#define DOT11_MAX_IV_SIZE 16
+#define DOT11_EXT_IV_FLAG (1<<5)
+#define DOT11_WPA_KEY_RSC_LEN 8
+
+#define WEP1_KEY_SIZE 5
+#define WEP1_KEY_HEX_SIZE 10
+#define WEP128_KEY_SIZE 13
+#define WEP128_KEY_HEX_SIZE 26
+#define TKIP_MIC_SIZE 8
+#define TKIP_EOM_SIZE 7
+#define TKIP_EOM_FLAG 0x5a
+#define TKIP_KEY_SIZE 32
+#define TKIP_MIC_AUTH_TX 16
+#define TKIP_MIC_AUTH_RX 24
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX
+#define AES_KEY_SIZE 16
+#define AES_MIC_SIZE 8
+#define BIP_KEY_SIZE 16
+
+
+#define WCN_OUI "\x00\x50\xf2"
+#define WCN_TYPE 4
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN 16
+#define SMS4_WPI_CBC_MAC_LEN 16
+#endif
+
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mdid;
+ uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS 0x01
+#define FBT_MDID_CAP_RRP 0x02
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+ uint8 id;
+ uint8 len;
+ uint16 mic_control;
+ uint8 mic[16];
+ uint8 anonce[32];
+ uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED 0
+#define TIE_TYPE_REASSOC_DEADLINE 1
+#define TIE_TYPE_KEY_LIEFTIME 2
+#define TIE_TYPE_ASSOC_COMEBACK 3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+ uint8 id;
+ uint8 len;
+ uint8 type;
+ uint32 value;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+ uint8 id;
+ uint8 len;
+ uint16 key_info;
+ uint8 key_len;
+ uint8 rsc[8];
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 20
+#define WAPI_VERSION 1
+#define WAPI_VERSION_LEN 2
+#define WAPI_OUI "\x00\x14\x72"
+#define WAPI_OUI_LEN DOT11_OUI_LEN
+#endif
+
+
+#define WMM_OUI "\x00\x50\xF2"
+#define WMM_OUI_LEN 3
+#define WMM_OUI_TYPE 2
+#define WMM_VERSION 1
+#define WMM_VERSION_LEN 1
+
+
+#define WMM_OUI_SUBTYPE_PARAMETER 1
+#define WMM_PARAMETER_IE_LEN 24
+
+
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ struct ether_addr tdls_init_mac;
+ struct ether_addr tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+ uint8 id;
+ uint8 len;
+ uint32 offset;
+ uint32 interval;
+ uint32 awake_win_slots;
+ uint32 max_wake_win;
+ uint16 idle_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN 18
+
+
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+ uint8 id;
+ uint8 len;
+ uint16 switch_time;
+ uint16 switch_timeout;
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4
+
+
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+ uint8 id;
+ uint8 len;
+ uint8 tid;
+ uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN 3
+
+
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+ uint8 id;
+ uint8 len;
+ uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN 1
+#define TDLS_PU_BUFFER_STATUS_AC_BK 1
+#define TDLS_PU_BUFFER_STATUS_AC_BE 2
+#define TDLS_PU_BUFFER_STATUS_AC_VI 4
+#define TDLS_PU_BUFFER_STATUS_AC_VO 8
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
new file mode 100644
index 00000000000000..3ee5a748695a64
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h 294267 2011-11-04 23:41:52Z $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP 1
+#define BTA_PROT_ACTIVITY_REPORT 2
+#define BTA_PROT_SECURITY 3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST 4
+#define BTA_PROT_LINK_SUPERVISION_REPLY 5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS 1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS 2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS 3
+#define BTA_TYPE_ID_CAPABILITIES 4
+#define BTA_TYPE_ID_VERSION 5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
new file mode 100644
index 00000000000000..f391e68c1104c4
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h
@@ -0,0 +1,131 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+ uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+ uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */
+ uint8 type; /* WME_TYPE */
+ uint8 subtype; /* WME_SUBTYPE_TSPEC */
+ uint8 version; /* WME_VERSION */
+ tsinfo_t tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint32 min_srv_interval; /* Minimum Service Interval (us) */
+ uint32 max_srv_interval; /* Maximum Service Interval (us) */
+ uint32 inactivity_interval; /* Inactivity Interval (us) */
+ uint32 suspension_interval; /* Suspension Interval (us) */
+ uint32 srv_start_time; /* Service Start Time (us) */
+ uint32 min_data_rate; /* Minimum Data Rate (bps) */
+ uint32 mean_data_rate; /* Mean Data Rate (bps) */
+ uint32 peak_data_rate; /* Peak Data Rate (bps) */
+ uint32 max_burst_size; /* Maximum Burst Size (bytes) */
+ uint32 delay_bound; /* Delay Bound (us) */
+ uint32 min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */
+#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */
+#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */
+#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \
+ TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \
+ TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+ ((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+ ((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
+#define DOT11E_STATUS_END_TS 37 /* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
new file mode 100644
index 00000000000000..116a226b1b424e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+
+#define PRIO_8021D_NONE 2
+#define PRIO_8021D_BK 1
+#define PRIO_8021D_BE 0
+#define PRIO_8021D_EE 3
+#define PRIO_8021D_CL 4
+#define PRIO_8021D_VI 5
+#define PRIO_8021D_VO 6
+#define PRIO_8021D_NC 7
+#define MAXPRIO 7
+#define NUMPRIO (MAXPRIO + 1)
+
+#define ALLPRIO -1
+
+
+#define PRIO2PREC(prio) \
+ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
new file mode 100644
index 00000000000000..e54b2e38187280
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h
@@ -0,0 +1,82 @@
+/*
+ * Broadcom Ethernettype protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h 294352 2011-11-06 19:23:00Z $
+ */
+
+
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+
+
+
+
+#define BCMILCP_SUBTYPE_RATE 1
+#define BCMILCP_SUBTYPE_LINK 2
+#define BCMILCP_SUBTYPE_CSA 3
+#define BCMILCP_SUBTYPE_LARQ 4
+#define BCMILCP_SUBTYPE_VENDOR 5
+#define BCMILCP_SUBTYPE_FLH 17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
+#define BCMILCP_SUBTYPE_CERT 32770
+#define BCMILCP_SUBTYPE_SES 32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED 0
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BCMILCP_BCM_SUBTYPE_SES 2
+
+
+#define BCMILCP_BCM_SUBTYPE_DPT 4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+ uint16 subtype;
+ uint16 length;
+ uint8 version;
+ uint8 oui[3];
+
+ uint16 usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
new file mode 100644
index 00000000000000..3bbbbbc5b4b43e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h
@@ -0,0 +1,328 @@
+/*
+ * Broadcom Event protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h 326276 2012-04-06 23:16:42Z $
+ *
+ */
+
+
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION 2
+#define BCM_MSG_IFNAME_MAX 16
+
+
+#define WLC_EVENT_MSG_LINK 0x01
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02
+#define WLC_EVENT_MSG_GROUP 0x04
+#define WLC_EVENT_MSG_UNKBSS 0x08
+#define WLC_EVENT_MSG_UNKIF 0x10
+
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint16 version;
+ uint16 flags;
+ uint32 event_type;
+ uint32 status;
+ uint32 reason;
+ uint32 auth_type;
+ uint32 datalen;
+ struct ether_addr addr;
+ char ifname[BCM_MSG_IFNAME_MAX];
+ uint8 ifidx;
+ uint8 bsscfgidx;
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+ struct ether_header eth;
+ bcmeth_hdr_t bcm_hdr;
+ wl_event_msg_t event;
+
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+
+#define WLC_E_SET_SSID 0
+#define WLC_E_JOIN 1
+#define WLC_E_START 2
+#define WLC_E_AUTH 3
+#define WLC_E_AUTH_IND 4
+#define WLC_E_DEAUTH 5
+#define WLC_E_DEAUTH_IND 6
+#define WLC_E_ASSOC 7
+#define WLC_E_ASSOC_IND 8
+#define WLC_E_REASSOC 9
+#define WLC_E_REASSOC_IND 10
+#define WLC_E_DISASSOC 11
+#define WLC_E_DISASSOC_IND 12
+#define WLC_E_QUIET_START 13
+#define WLC_E_QUIET_END 14
+#define WLC_E_BEACON_RX 15
+#define WLC_E_LINK 16
+#define WLC_E_MIC_ERROR 17
+#define WLC_E_NDIS_LINK 18
+#define WLC_E_ROAM 19
+#define WLC_E_TXFAIL 20
+#define WLC_E_PMKID_CACHE 21
+#define WLC_E_RETROGRADE_TSF 22
+#define WLC_E_PRUNE 23
+#define WLC_E_AUTOAUTH 24
+#define WLC_E_EAPOL_MSG 25
+#define WLC_E_SCAN_COMPLETE 26
+#define WLC_E_ADDTS_IND 27
+#define WLC_E_DELTS_IND 28
+#define WLC_E_BCNSENT_IND 29
+#define WLC_E_BCNRX_MSG 30
+#define WLC_E_BCNLOST_MSG 31
+#define WLC_E_ROAM_PREP 32
+#define WLC_E_PFN_NET_FOUND 33
+#define WLC_E_PFN_NET_LOST 34
+#define WLC_E_RESET_COMPLETE 35
+#define WLC_E_JOIN_START 36
+#define WLC_E_ROAM_START 37
+#define WLC_E_ASSOC_START 38
+#define WLC_E_IBSS_ASSOC 39
+#define WLC_E_RADIO 40
+#define WLC_E_PSM_WATCHDOG 41
+#define WLC_E_PROBREQ_MSG 44
+#define WLC_E_SCAN_CONFIRM_IND 45
+#define WLC_E_PSK_SUP 46
+#define WLC_E_COUNTRY_CODE_CHANGED 47
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48
+#define WLC_E_ICV_ERROR 49
+#define WLC_E_UNICAST_DECODE_ERROR 50
+#define WLC_E_MULTICAST_DECODE_ERROR 51
+#define WLC_E_TRACE 52
+#ifdef WLBTAMP
+#define WLC_E_BTA_HCI_EVENT 53
+#endif
+#define WLC_E_IF 54
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55
+#define WLC_E_RSSI 56
+#define WLC_E_PFN_SCAN_COMPLETE 57
+#define WLC_E_EXTLOG_MSG 58
+#define WLC_E_ACTION_FRAME 59
+#define WLC_E_ACTION_FRAME_COMPLETE 60
+#define WLC_E_PRE_ASSOC_IND 61
+#define WLC_E_PRE_REASSOC_IND 62
+#define WLC_E_CHANNEL_ADOPTED 63
+#define WLC_E_AP_STARTED 64
+#define WLC_E_DFS_AP_STOP 65
+#define WLC_E_DFS_AP_RESUME 66
+#define WLC_E_WAI_STA_EVENT 67
+#define WLC_E_WAI_MSG 68
+#define WLC_E_ESCAN_RESULT 69
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
+#define WLC_E_PROBRESP_MSG 71
+#define WLC_E_P2P_PROBREQ_MSG 72
+#define WLC_E_DCS_REQUEST 73
+
+#define WLC_E_FIFO_CREDIT_MAP 74
+
+#define WLC_E_ACTION_FRAME_RX 75
+#define WLC_E_WAKE_EVENT 76
+#define WLC_E_RM_COMPLETE 77
+#define WLC_E_HTSFSYNC 78
+#define WLC_E_OVERLAY_REQ 79
+#define WLC_E_CSA_COMPLETE_IND 80
+#define WLC_E_EXCESS_PM_WAKE_EVENT 81
+#define WLC_E_PFN_SCAN_NONE 82
+#define WLC_E_PFN_SCAN_ALLGONE 83
+#define WLC_E_GTK_PLUMBED 84
+#define WLC_E_ASSOC_IND_NDIS 85
+#define WLC_E_REASSOC_IND_NDIS 86
+#define WLC_E_ASSOC_REQ_IE 87
+#define WLC_E_ASSOC_RESP_IE 88
+#define WLC_E_ASSOC_RECREATED 89
+#define WLC_E_ACTION_FRAME_RX_NDIS 90
+#define WLC_E_AUTH_REQ 91
+#define WLC_E_TDLS_PEER_EVENT 92
+#define WLC_E_SPEEDY_RECREATE_FAIL 93
+#define WLC_E_LAST 94
+
+
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_t;
+
+extern const bcmevent_name_t bcmevent_names[];
+extern const int bcmevent_names_size;
+
+
+#define WLC_E_STATUS_SUCCESS 0
+#define WLC_E_STATUS_FAIL 1
+#define WLC_E_STATUS_TIMEOUT 2
+#define WLC_E_STATUS_NO_NETWORKS 3
+#define WLC_E_STATUS_ABORT 4
+#define WLC_E_STATUS_NO_ACK 5
+#define WLC_E_STATUS_UNSOLICITED 6
+#define WLC_E_STATUS_ATTEMPT 7
+#define WLC_E_STATUS_PARTIAL 8
+#define WLC_E_STATUS_NEWSCAN 9
+#define WLC_E_STATUS_NEWASSOC 10
+#define WLC_E_STATUS_11HQUIET 11
+#define WLC_E_STATUS_SUPPRESS 12
+#define WLC_E_STATUS_NOCHANS 13
+#define WLC_E_STATUS_CS_ABORT 15
+#define WLC_E_STATUS_ERROR 16
+
+
+#define WLC_E_REASON_INITIAL_ASSOC 0
+#define WLC_E_REASON_LOW_RSSI 1
+#define WLC_E_REASON_DEAUTH 2
+#define WLC_E_REASON_DISASSOC 3
+#define WLC_E_REASON_BCNS_LOST 4
+#define WLC_E_REASON_MINTXRATE 9
+#define WLC_E_REASON_TXFAIL 10
+
+
+#define WLC_E_REASON_FAST_ROAM_FAILED 5
+#define WLC_E_REASON_DIRECTED_ROAM 6
+#define WLC_E_REASON_TSPEC_REJECTED 7
+#define WLC_E_REASON_BETTER_AP 8
+
+
+#define WLC_E_REASON_REQUESTED_ROAM 11
+
+
+#define WLC_E_PRUNE_ENCR_MISMATCH 1
+#define WLC_E_PRUNE_BCAST_BSSID 2
+#define WLC_E_PRUNE_MAC_DENY 3
+#define WLC_E_PRUNE_MAC_NA 4
+#define WLC_E_PRUNE_REG_PASSV 5
+#define WLC_E_PRUNE_SPCT_MGMT 6
+#define WLC_E_PRUNE_RADAR 7
+#define WLC_E_RSN_MISMATCH 8
+#define WLC_E_PRUNE_NO_COMMON_RATES 9
+#define WLC_E_PRUNE_BASIC_RATES 10
+#define WLC_E_PRUNE_CIPHER_NA 12
+#define WLC_E_PRUNE_KNOWN_STA 13
+#define WLC_E_PRUNE_WDS_PEER 15
+#define WLC_E_PRUNE_QBSS_LOAD 16
+#define WLC_E_PRUNE_HOME_AP 17
+
+
+#define WLC_E_SUP_OTHER 0
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1
+#define WLC_E_SUP_BAD_UCAST_WEP128 2
+#define WLC_E_SUP_BAD_UCAST_WEP40 3
+#define WLC_E_SUP_UNSUP_KEY_LEN 4
+#define WLC_E_SUP_PW_KEY_CIPHER 5
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7
+#define WLC_E_SUP_NO_INSTALL_FLAG 8
+#define WLC_E_SUP_MSG3_NO_GTK 9
+#define WLC_E_SUP_GRP_KEY_CIPHER 10
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
+#define WLC_E_SUP_SEND_FAIL 13
+#define WLC_E_SUP_DEAUTH 14
+#define WLC_E_SUP_WPA_PSK_TMO 15
+
+
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+ uint16 version;
+ uint16 channel;
+ int32 rssi;
+ uint32 mactime;
+ uint32 rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+
+typedef struct wl_event_data_if {
+ uint8 ifidx;
+ uint8 opcode;
+ uint8 reserved;
+ uint8 bssidx;
+ uint8 role;
+} wl_event_data_if_t;
+
+
+#define WLC_E_IF_ADD 1
+#define WLC_E_IF_DEL 2
+#define WLC_E_IF_CHANGE 3
+
+
+#define WLC_E_IF_ROLE_STA 0
+#define WLC_E_IF_ROLE_AP 1
+#define WLC_E_IF_ROLE_WDS 2
+#define WLC_E_IF_ROLE_P2P_GO 3
+#define WLC_E_IF_ROLE_P2P_CLIENT 4
+#ifdef WLBTAMP
+#define WLC_E_IF_ROLE_BTA_CREATOR 5
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6
+#endif
+
+
+#define WLC_E_LINK_BCN_LOSS 1
+#define WLC_E_LINK_DISASSOC 2
+#define WLC_E_LINK_ASSOC_REC 3
+#define WLC_E_LINK_BSSCFG_DIS 4
+
+
+#define WLC_E_OVL_DOWNLOAD 0
+#define WLC_E_OVL_UPDATE_IND 1
+
+
+#define WLC_E_TDLS_PEER_DISCOVERED 0
+#define WLC_E_TDLS_PEER_CONNECTED 1
+#define WLC_E_TDLS_PEER_DISCONNECTED 2
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
new file mode 100644
index 00000000000000..d5c3b76da5a835
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h 290206 2011-10-17 19:13:51Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define IP_VER_OFFSET 0x0
+#define IP_VER_MASK 0xf0
+#define IP_VER_SHIFT 4
+#define IP_VER_4 4
+#define IP_VER_6 6
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1
+#define IP_PROT_IGMP 0x2
+#define IP_PROT_TCP 0x6
+#define IP_PROT_UDP 0x11
+#define IP_PROT_ICMP6 0x3a
+
+
+#define IPV4_VER_HL_OFFSET 0
+#define IPV4_TOS_OFFSET 1
+#define IPV4_PKTLEN_OFFSET 2
+#define IPV4_PKTFLAG_OFFSET 6
+#define IPV4_PROT_OFFSET 9
+#define IPV4_CHKSUM_OFFSET 10
+#define IPV4_SRC_IP_OFFSET 12
+#define IPV4_DEST_IP_OFFSET 16
+#define IPV4_OPTIONS_OFFSET 20
+
+
+#define IPV4_VER_MASK 0xf0
+#define IPV4_VER_SHIFT 4
+
+#define IPV4_HLEN_MASK 0x0f
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN 4
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define IPV4_TOS_DSCP_MASK 0xfc
+#define IPV4_TOS_DSCP_SHIFT 2
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0
+#define IPV4_TOS_PREC_SHIFT 5
+
+#define IPV4_TOS_LOWDELAY 0x10
+#define IPV4_TOS_THROUGHPUT 0x8
+#define IPV4_TOS_RELIABILITY 0x4
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV 0x8000
+#define IPV4_FRAG_DONT 0x4000
+#define IPV4_FRAG_MORE 0x2000
+#define IPV4_FRAG_OFFSET_MASK 0x1fff
+
+#define IPV4_ADDR_STR_LEN 16
+
+
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+ uint8 addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+ uint8 version_ihl;
+ uint8 tos;
+ uint16 tot_len;
+ uint16 id;
+ uint16 frag;
+ uint8 ttl;
+ uint8 prot;
+ uint16 hdr_chksum;
+ uint8 src_ip[IPV4_ADDR_LEN];
+ uint8 dst_ip[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+
+#define IPV6_PAYLOAD_LEN_OFFSET 4
+#define IPV6_NEXT_HDR_OFFSET 6
+#define IPV6_HOP_LIMIT_OFFSET 7
+#define IPV6_SRC_IP_OFFSET 8
+#define IPV6_DEST_IP_OFFSET 24
+
+
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16
+
+
+#define IP_TOS46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+
+#define IPV6_EXTHDR_HOP 0
+#define IPV6_EXTHDR_ROUTING 43
+#define IPV6_EXTHDR_FRAGMENT 44
+#define IPV6_EXTHDR_AUTH 51
+#define IPV6_EXTHDR_NONE 59
+#define IPV6_EXTHDR_DEST 60
+
+#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \
+ ((prot) == IPV6_EXTHDR_ROUTING) || \
+ ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+ ((prot) == IPV6_EXTHDR_AUTH) || \
+ ((prot) == IPV6_EXTHDR_NONE) || \
+ ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 40
+
+#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+ uint8 nexthdr;
+ uint8 hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+ uint8 nexthdr;
+ uint8 rsvd;
+ uint16 frag_off;
+ uint32 ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+ uint16 len = 0, hlen;
+ struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+ while (IPV6_EXTHDR(eh->nexthdr)) {
+ if (eh->nexthdr == IPV6_EXTHDR_NONE)
+ return -1;
+ else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+ hlen = 8;
+ else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+ hlen = (eh->hdrlen + 2) << 2;
+ else
+ hlen = IPV6_EXTHDR_LEN(eh);
+
+ len += hlen;
+ eh = (struct ipv6_exthdr *)(h + len);
+ }
+
+ *proto = eh->nexthdr;
+ return len;
+}
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
new file mode 100644
index 00000000000000..9533391827418b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * $Id: bcmipv6.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define ICMPV6_HEADER_TYPE 0x3A
+#define ICMPV6_PKT_TYPE_NS 135
+#define ICMPV6_PKT_TYPE_NA 136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1
+
+#define IPV6_VERSION 6
+#define IPV6_HOP_LIMIT 255
+
+#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \
+ a[5] | a[6] | a[7] | a[8] | a[9] | \
+ a[10] | a[11] | a[12] | a[13] | \
+ a[14] | a[15]) == 0)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+ uint8 addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+ uint8 icmp6_type;
+ uint8 icmp6_code;
+ uint16 icmp6_cksum;
+ BWL_PRE_PACKED_STRUCT union {
+ uint32 reserved;
+ BWL_PRE_PACKED_STRUCT struct nd_advt {
+ uint32 reserved1:5,
+ override:1,
+ solicited:1,
+ router:1,
+ reserved2:24;
+ } BWL_POST_PACKED_STRUCT nd_advt;
+ } BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+ uint8 priority:4,
+ version:4;
+ uint8 flow_lbl[3];
+ uint16 payload_len;
+ uint8 nexthdr;
+ uint8 hop_limit;
+ struct ipv6_addr saddr;
+ struct ipv6_addr daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg {
+ struct icmp6_hdr icmph;
+ struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+ uint8 type;
+ uint8 len;
+ uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* !defined(_bcmipv6_h_) */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
new file mode 100644
index 00000000000000..8617985dd36d23
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h
@@ -0,0 +1,441 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h 294267 2011-11-04 23:41:52Z $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+ uint16 opcode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE 255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+ uint8 plh;
+ uint8 offset[2]; /* length so far */
+ uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+ uint8 plh;
+ uint8 offset[2];
+ uint8 len[2];
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+ uint8 plh;
+ uint8 key_length;
+ uint8 key_type;
+ uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+ uint8 plh;
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+ uint8 id;
+ uint8 service_type;
+ uint8 max_sdu[2];
+ uint8 sdu_ia_time[4];
+ uint8 access_latency[4];
+ uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+ uint8 llh[2];
+ uint8 txflow[16];
+ uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+ uint8 plh;
+ uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+ uint16 bredr;
+ plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+ hci_handle_t handle;
+ uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+ uint8 llh[2];
+ uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+ uint8 llh[2];
+ uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC 0
+#define EFS_SVCTYPE_BEST_EFFORT 1
+#define EFS_SVCTYPE_GUARANTEED 2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+ uint8 ecode;
+ uint8 plen;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete 0x0E
+#define HCI_Command_Status 0x0F
+#define HCI_Flush_Occurred 0x11
+#define HCI_Enhanced_Flush_Complete 0x39
+#define HCI_Physical_Link_Complete 0x40
+#define HCI_Channel_Select 0x41
+#define HCI_Disconnect_Physical_Link_Complete 0x42
+#define HCI_Logical_Link_Complete 0x45
+#define HCI_Disconnect_Logical_Link_Complete 0x46
+#define HCI_Flow_Spec_Modify_Complete 0x47
+#define HCI_Number_of_Completed_Data_Blocks 0x48
+#define HCI_Short_Range_Mode_Change_Complete 0x4C
+#define HCI_Status_Change_Event 0x4D
+#define HCI_Vendor_Specific 0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask 0x0001
+#define HCI_Channel_Select_Event_Mask 0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004
+#define HCI_Logical_Link_Complete_Event_Mask 0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000
+#define HCI_Status_Change_Event_Mask 0x2000
+#define HCI_All_Event_Mask 0x31e7
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+ uint8 status;
+ uint8 cmdpkts;
+ uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+ uint8 cmdpkts;
+ uint16 opcode;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint16 len;
+ uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+ uint8 status;
+ uint8 AMP_status;
+ uint32 bandwidth;
+ uint32 gbandwidth;
+ uint32 latency;
+ uint32 PDU_size;
+ uint8 ctrl_type;
+ uint16 PAL_cap;
+ uint16 AMP_ASSOC_len;
+ uint32 max_flush_timeout;
+ uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+ uint8 status;
+ uint16 llh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+ uint8 status;
+ uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+ uint8 status;
+ uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+ uint8 status;
+ uint16 ACL_pkt_len;
+ uint16 data_block_len;
+ uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+ uint16 handle;
+ uint16 pkts;
+ uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+ uint16 num_blocks;
+ uint8 num_handles;
+ data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+ uint8 status;
+ uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+ uint8 status;
+ uint8 plh;
+ uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+ uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+ uint8 status;
+ uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+ uint8 status;
+ hci_handle_t handle;
+ uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+ uint8 status;
+ uint8 ld_aware;
+ uint8 ld[2];
+ uint8 ld_opts;
+ uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+ uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+ uint8 len;
+ uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+ uint8 status;
+ uint8 hci_version;
+ uint16 hci_revision;
+ uint8 pal_version;
+ uint16 mfg_name;
+ uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE 64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+ uint8 status;
+ uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+ uint8 status;
+ uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS 0x00
+#define HCI_ERR_ILLEGAL_COMMAND 0x01
+#define HCI_ERR_NO_CONNECTION 0x02
+#define HCI_ERR_MEMORY_FULL 0x07
+#define HCI_ERR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09
+#define HCI_ERR_CONNECTION_EXISTS 0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED 0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10
+#define HCI_ERR_UNSUPPORTED_VALUE 0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16
+#define HCI_ERR_UNSPECIFIED 0x1F
+#define HCI_ERR_UNIT_KEY_USED 0x26
+#define HCI_ERR_QOS_REJECTED 0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39
+#define HCI_ERR_CHANNEL_MOVE 0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+ uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+ uint16 dlen; /* data total length */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+ uint8 ScheduleKnown;
+ uint8 NumReports;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+ uint32 StartTime;
+ uint32 Duration;
+ uint32 Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN 0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
new file mode 100644
index 00000000000000..8936d1641a3de2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h
@@ -0,0 +1,193 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * $Id: eapol.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_header eth; /* 802.3/Ethernet header */
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+ unsigned char body[1]; /* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+ unsigned char version; /* EAPOL protocol version */
+ unsigned char type; /* EAPOL type */
+ unsigned short length; /* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION 2
+#define WPA_EAPOL_VERSION 1
+#define LEAP_EAPOL_VERSION 1
+#define SES_EAPOL_VERSION 1
+
+/* EAPOL types */
+#define EAP_PACKET 0
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN 8
+#define EAPOL_KEY_IV_LEN 16
+#define EAPOL_KEY_SIG_LEN 16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short length; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */
+ unsigned char index; /* Key Flags & Index */
+ unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */
+ unsigned char key[1]; /* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK 0x80
+#define EAPOL_KEY_BROADCAST 0
+#define EAPOL_KEY_UNICAST 0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK 0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN 8
+#define EAPOL_WPA_KEY_NONCE_LEN 32
+#define EAPOL_WPA_KEY_IV_LEN 16
+#define EAPOL_WPA_KEY_RSC_LEN 8
+#define EAPOL_WPA_KEY_ID_LEN 8
+#define EAPOL_WPA_KEY_MIC_LEN 16
+#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE 32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ unsigned char type; /* Key Descriptor Type */
+ unsigned short key_info; /* Key Information (unaligned) */
+ unsigned short key_len; /* Key Length (unaligned) */
+ unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
+ unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
+ unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
+ unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
+ unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
+ unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */
+ unsigned short data_len; /* Key Data Length */
+ unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1 0x01
+#define WPA_KEY_DESC_V2 0x02
+#define WPA_KEY_DESC_V3 0x03
+#define WPA_KEY_PAIRWISE 0x08
+#define WPA_KEY_INSTALL 0x40
+#define WPA_KEY_ACK 0x80
+#define WPA_KEY_MIC 0x100
+#define WPA_KEY_SECURE 0x200
+#define WPA_KEY_ERROR 0x400
+#define WPA_KEY_REQ 0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0 0x00
+#define WPA_KEY_INDEX_1 0x10
+#define WPA_KEY_INDEX_2 0x20
+#define WPA_KEY_INDEX_3 0x30
+#define WPA_KEY_INDEX_MASK 0x30
+#define WPA_KEY_INDEX_SHIFT 0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA 0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 type;
+ uint8 length;
+ uint8 oui[3];
+ uint8 subtype;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK 1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2
+#define WPA2_KEY_DATA_SUBTYPE_MAC 3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID 4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK 9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 flags;
+ uint8 reserved;
+ uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2
+
+#define WPA2_GTK_INDEX_MASK 0x03
+#define WPA2_GTK_INDEX_SHIFT 0x00
+
+#define WPA2_GTK_TRANSMIT 0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 key_id;
+ uint8 ipn[6];
+ uint8 key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 reserved[2];
+ uint8 mac[ETHER_ADDR_LEN];
+ uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD 0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
new file mode 100644
index 00000000000000..e45518564c1713
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h
@@ -0,0 +1,162 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+
+#include <packed_section_start.h>
+
+
+
+#define ETHER_ADDR_LEN 6
+
+
+#define ETHER_TYPE_LEN 2
+
+
+#define ETHER_CRC_LEN 4
+
+
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+
+#define ETHER_MIN_LEN 64
+
+
+#define ETHER_MIN_DATA 46
+
+
+#define ETHER_MAX_LEN 1518
+
+
+#define ETHER_MAX_DATA 1500
+
+
+#define ETHER_TYPE_MIN 0x0600
+#define ETHER_TYPE_IP 0x0800
+#define ETHER_TYPE_ARP 0x0806
+#define ETHER_TYPE_8021Q 0x8100
+#define ETHER_TYPE_IPV6 0x86dd
+#define ETHER_TYPE_BRCM 0x886c
+#define ETHER_TYPE_802_1X 0x888e
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
+#define ETHER_TYPE_WAI 0x88b4
+#define ETHER_TYPE_89_0D 0x890d
+
+#define ETHER_TYPE_IPV6 0x86dd
+
+
+#define ETHER_BRCM_SUBTYPE_LEN 4
+
+
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+
+
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \
+ ((uint8 *)ea)[0] = 0x01; \
+ ((uint8 *)ea)[1] = 0x00; \
+ ((uint8 *)ea)[2] = 0x5e; \
+ ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \
+ ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \
+ ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \
+}
+
+#ifndef __INCif_etherh
+
+BWL_PRE_PACKED_STRUCT struct ether_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+
+BWL_PRE_PACKED_STRUCT struct ether_addr {
+ uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif
+
+
+#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+
+#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+
+#define ether_cmp(a, b) (!(((short*)(a))[0] == ((short*)(b))[0]) | \
+ !(((short*)(a))[1] == ((short*)(b))[1]) | \
+ !(((short*)(a))[2] == ((short*)(b))[2]))
+
+
+#define ether_copy(s, d) { \
+ ((short*)(d))[0] = ((const short*)(s))[0]; \
+ ((short*)(d))[1] = ((const short*)(s))[1]; \
+ ((short*)(d))[2] = ((const short*)(s))[2]; }
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+
+#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \
+ ((uint8 *)(ea))[1] & \
+ ((uint8 *)(ea))[2] & \
+ ((uint8 *)(ea))[3] & \
+ ((uint8 *)(ea))[4] & \
+ ((uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \
+ ((uint8 *)(ea))[1] | \
+ ((uint8 *)(ea))[2] | \
+ ((uint8 *)(ea))[3] | \
+ ((uint8 *)(ea))[4] | \
+ ((uint8 *)(ea))[5]) == 0)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+ struct ether_header t; \
+ t = *(struct ether_header *)(s); \
+ *(struct ether_header *)(d) = t; \
+} while (0)
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
new file mode 100644
index 00000000000000..6a0518b0c91320
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h
@@ -0,0 +1,564 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h 311270 2012-01-28 00:11:54Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+
+#include <packed_section_start.h>
+
+
+
+#define P2P_OUI WFA_OUI
+#define P2P_VER WFA_OUI_TYPE_P2P
+
+#define P2P_IE_ID 0xdd
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+ uint8 id;
+ uint8 len;
+ uint8 OUI[3];
+ uint8 oui_type;
+ uint8 subelts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN 6
+
+#define P2P_ATTR_ID_OFF 0
+#define P2P_ATTR_LEN_OFF 1
+#define P2P_ATTR_DATA_OFF 3
+
+#define P2P_ATTR_ID_LEN 1
+#define P2P_ATTR_LEN_LEN 2
+#define P2P_ATTR_HDR_LEN 3
+
+
+#define P2P_SEID_STATUS 0
+#define P2P_SEID_MINOR_RC 1
+#define P2P_SEID_P2P_INFO 2
+#define P2P_SEID_DEV_ID 3
+#define P2P_SEID_INTENT 4
+#define P2P_SEID_CFG_TIMEOUT 5
+#define P2P_SEID_CHANNEL 6
+#define P2P_SEID_GRP_BSSID 7
+#define P2P_SEID_XT_TIMING 8
+#define P2P_SEID_INTINTADDR 9
+#define P2P_SEID_P2P_MGBTY 10
+#define P2P_SEID_CHAN_LIST 11
+#define P2P_SEID_ABSENCE 12
+#define P2P_SEID_DEV_INFO 13
+#define P2P_SEID_GROUP_INFO 14
+#define P2P_SEID_GROUP_ID 15
+#define P2P_SEID_P2P_IF 16
+#define P2P_SEID_OP_CHANNEL 17
+#define P2P_SEID_INVITE_FLAGS 18
+#define P2P_SEID_VNDR 221
+
+#define P2P_SE_VS_ID_SERVICES 0x1b
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 dev;
+ uint8 group;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+
+#define P2P_CAPSE_DEV_SERVICE_DIS 0x1
+#define P2P_CAPSE_DEV_CLIENT_DIS 0x2
+#define P2P_CAPSE_DEV_CONCURRENT 0x4
+#define P2P_CAPSE_DEV_INFRA_MAN 0x8
+#define P2P_CAPSE_DEV_LIMIT 0x10
+#define P2P_CAPSE_INVITE_PROC 0x20
+
+
+#define P2P_CAPSE_GRP_OWNER 0x1
+#define P2P_CAPSE_PERSIST_GRP 0x2
+#define P2P_CAPSE_GRP_LIMIT 0x4
+#define P2P_CAPSE_GRP_INTRA_BSS 0x8
+#define P2P_CAPSE_GRP_X_CONNECT 0x10
+#define P2P_CAPSE_GRP_PERSISTENT 0x20
+#define P2P_CAPSE_GRP_FORMATION 0x40
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 intent;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 go_tmo;
+ uint8 client_tmo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 op_class;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint8 ssid[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint8 ifaddrs;
+ uint8 ifaddr[1][6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+
+#define P2P_STATSE_SUCCESS 0
+
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1
+
+#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2
+
+#define P2P_STATSE_FAIL_LIMIT_REACHED 3
+
+#define P2P_STATSE_FAIL_INVALID_PARAMS 4
+
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5
+
+#define P2P_STATSE_FAIL_PROTO_ERROR 6
+
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7
+
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8
+
+#define P2P_STATSE_FAIL_INTENT 9
+
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10
+
+#define P2P_STATSE_FAIL_USER_REJECT 11
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 avail[2];
+ uint8 interval[2];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN 10
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 band;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+ uint8 band;
+ uint8 num_channels;
+ uint8 channels[WL_NUMCHANNELS];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 num_entries;
+ wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+ uint16 cat_id;
+ uint8 OUI[3];
+ uint8 oui_type;
+ uint16 sub_cat_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mac[6];
+ uint16 wps_cfg_meths;
+ uint8 pri_devtype[8];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN 8
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+ uint8 len;
+ uint8 devaddr[ETHER_ADDR_LEN];
+ uint8 ifaddr[ETHER_ADDR_LEN];
+ uint8 devcap;
+ uint8 cfg_meths[2];
+ uint8 pridt[P2P_DEV_TYPE_LEN];
+ uint8 secdts;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ struct ether_addr addr;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 mg_bitmap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+ uint8 eltId;
+ uint8 len[2];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 country[3];
+ uint8 op_class;
+ uint8 channel;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 flags;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+ uint8 category;
+ uint8 OUI[3];
+ uint8 type;
+ uint8 subtype;
+ uint8 dialog_token;
+ uint8 elts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY 0x7f
+
+#define P2P_AF_FIXED_LEN 7
+
+
+#define P2P_AF_NOTICE_OF_ABSENCE 0
+#define P2P_AF_PRESENCE_REQ 1
+#define P2P_AF_PRESENCE_RSP 2
+#define P2P_AF_GO_DISC_REQ 3
+
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+ uint8 category;
+ uint8 action;
+ uint8 oui[3];
+ uint8 oui_type;
+ uint8 subtype;
+ uint8 dialog_token;
+ uint8 elts[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN 8
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+
+
+#define P2P_PAF_GON_REQ 0
+#define P2P_PAF_GON_RSP 1
+#define P2P_PAF_GON_CONF 2
+#define P2P_PAF_INVITE_REQ 3
+#define P2P_PAF_INVITE_RSP 4
+#define P2P_PAF_DEVDIS_REQ 5
+#define P2P_PAF_DEVDIS_RSP 6
+#define P2P_PAF_PROVDIS_REQ 7
+#define P2P_PAF_PROVDIS_RSP 8
+
+
+#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+ uint8 cnt_type;
+ uint32 duration;
+ uint32 interval;
+ uint32 start;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+ uint8 eltId;
+ uint8 len[2];
+ uint8 index;
+ uint8 ops_ctw_parms;
+ wifi_p2p_noa_desc_t desc[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN 5
+
+
+#define P2P_NOA_DESC_CNT_RESERVED 0
+#define P2P_NOA_DESC_CNT_REPEAT 255
+#define P2P_NOA_DESC_TYPE_PREFERRED 1
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2
+
+
+#define P2P_NOA_CTW_MASK 0x7f
+#define P2P_NOA_OPS_MASK 0x80
+#define P2P_NOA_OPS_SHIFT 7
+
+#define P2P_CTW_MIN 10
+
+
+#define P2PSD_ACTION_CATEGORY 0x04
+
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a
+
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
+
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
+
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
+
+#define P2PSD_AD_EID 0x6c
+
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
+
+#define P2PSD_ADP_PROTO_ID 0x00
+
+#define P2PSD_GAS_OUI P2P_OUI
+
+#define P2PSD_GAS_OUI_SUBTYPE P2P_VER
+
+#define P2PSD_GAS_NQP_INFOID 0xDDDD
+
+#define P2PSD_GAS_COMEBACKDEALY 0x00
+
+
+
+typedef enum p2psd_svc_protype {
+ SVC_RPOTYPE_ALL = 0,
+ SVC_RPOTYPE_BONJOUR = 1,
+ SVC_RPOTYPE_UPNP = 2,
+ SVC_RPOTYPE_WSD = 3,
+ SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+
+typedef enum {
+ P2PSD_RESP_STATUS_SUCCESS = 0,
+ P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+ P2PSD_RESP_STATUS_DATA_NA = 2,
+ P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+ uint8 llm_pamebi;
+ uint8 adp_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+ uint8 id;
+ uint8 len;
+ wifi_p2psd_adp_tpl_t adp_tpl;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+ uint8 oui_subtype;
+ uint16 svc_updi;
+ uint8 svc_tlvs[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+ uint16 len;
+ uint8 svc_prot;
+ uint8 svc_tscid;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+ uint16 info_id;
+ uint16 len;
+ uint8 oui[3];
+ uint8 qreq_vsc[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qreq_len;
+ uint8 qreq_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+ uint16 len;
+ uint8 svc_prot;
+ uint8 svc_tscid;
+ uint8 status;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+ uint16 info_id;
+ uint16 len;
+ uint8 oui[3];
+ uint8 qresp_vsc[1];
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+ uint16 status;
+ uint16 cb_delay;
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qresp_len;
+ uint8 qresp_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+ uint16 status;
+ uint8 fragment_id;
+ uint16 cb_delay;
+ wifi_p2psd_adp_ie_t adp_ie;
+ uint16 qresp_len;
+ uint8 qresp_frm[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+ uint8 category;
+ uint8 action;
+ uint8 dialog_token;
+ uint8 query_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
new file mode 100644
index 00000000000000..a4900edd4ac6d1
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h
@@ -0,0 +1,75 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _SD_SPI_H
+#define _SD_SPI_H
+
+#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */
+#define SPI_START_S 31
+#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */
+#define SPI_DIR_S 30
+#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S 24
+#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */
+#define SPI_RW_S 23
+#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */
+#define SPI_FUNC_S 20
+#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */
+#define SPI_RAW_S 19
+#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */
+#define SPI_STUFF_S 18
+#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */
+#define SPI_BLKMODE_S 19
+#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */
+#define SPI_OPCODE_S 18
+#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */
+#define SPI_ADDR_S 1
+#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */
+#define SPI_STUFF0_S 0
+
+#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */
+#define SPI_RSP_START_S 7
+#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */
+#define SPI_RSP_PARAM_ERR_S 6
+#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */
+#define SPI_RSP_RFU5_S 5
+#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */
+#define SPI_RSP_FUNC_ERR_S 4
+#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */
+#define SPI_RSP_CRC_ERR_S 3
+#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */
+#define SPI_RSP_ILL_CMD_S 2
+#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */
+#define SPI_RSP_RFU1_S 1
+#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */
+#define SPI_RSP_IDLE_S 0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */
+#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK 0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
new file mode 100644
index 00000000000000..9c94985cf9528f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h
@@ -0,0 +1,69 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+#include <packed_section_start.h>
+
+#define VLAN_VID_MASK 0xfff
+#define VLAN_CFI_SHIFT 12
+#define VLAN_PRI_SHIFT 13
+
+#define VLAN_PRI_MASK 7
+
+#define VLAN_TAG_LEN 4
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN)
+
+#define VLAN_TPID 0x8100
+
+struct ethervlan_header {
+ uint8 ether_dhost[ETHER_ADDR_LEN];
+ uint8 ether_shost[ETHER_ADDR_LEN];
+ uint16 vlan_type;
+ uint16 vlan_tag;
+ uint16 ether_type;
+};
+
+#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+ struct ethervlan_header t; \
+ t = *(struct ethervlan_header *)(s); \
+ *(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
new file mode 100644
index 00000000000000..cc2ff5b315d7dd
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h
@@ -0,0 +1,203 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h 261155 2011-05-23 23:51:32Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+
+#include <packed_section_start.h>
+
+
+
+
+#define DOT11_RC_INVALID_WPA_IE 13
+#define DOT11_RC_MIC_FAILURE 14
+#define DOT11_RC_4WH_TIMEOUT 15
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
+#define DOT11_RC_WPA_IE_MISMATCH 17
+#define DOT11_RC_INVALID_MC_CIPHER 18
+#define DOT11_RC_INVALID_UC_CIPHER 19
+#define DOT11_RC_INVALID_AKMP 20
+#define DOT11_RC_BAD_WPA_VERSION 21
+#define DOT11_RC_INVALID_WPA_CAP 22
+#define DOT11_RC_8021X_AUTH_FAIL 23
+
+#define WPA2_PMKID_LEN 16
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 tag;
+ uint8 length;
+ uint8 oui[3];
+ uint8 oui_type;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN 4
+#define WPA_IE_FIXED_LEN 8
+#define WPA_IE_TAG_FIXED_LEN 6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 tag;
+ uint8 length;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT version;
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN 4
+#define WPA_RSN_IE_TAG_FIXED_LEN 2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ uint8 oui[3];
+ uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN 4
+
+
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN 2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 low;
+ uint8 high;
+ } BWL_POST_PACKED_STRUCT count;
+ wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+
+#define WPA_CIPHER_NONE 0
+#define WPA_CIPHER_WEP_40 1
+#define WPA_CIPHER_TKIP 2
+#define WPA_CIPHER_AES_OCB 3
+#define WPA_CIPHER_AES_CCM 4
+#define WPA_CIPHER_WEP_104 5
+#define WPA_CIPHER_BIP 6
+#define WPA_CIPHER_TPK 7
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4 11
+
+#define WAPI_CSE_WPI_SMS4 1
+#endif
+
+#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
+ (cipher) == WPA_CIPHER_WEP_40 || \
+ (cipher) == WPA_CIPHER_WEP_104 || \
+ (cipher) == WPA_CIPHER_TKIP || \
+ (cipher) == WPA_CIPHER_AES_OCB || \
+ (cipher) == WPA_CIPHER_AES_CCM || \
+ (cipher) == WPA_CIPHER_TPK)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
+ (cipher) == WAPI_CSE_WPI_SMS4)
+
+
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+ WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+ WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif
+
+
+#define WPA_TKIP_CM_DETECT 60
+#define WPA_TKIP_CM_BLOCK 60
+
+
+#define RSN_CAP_LEN 2
+
+
+#define RSN_CAP_PREAUTH 0x0001
+#define RSN_CAP_NOPAIRWISE 0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4
+#define RSN_CAP_1_REPLAY_CNTR 0
+#define RSN_CAP_2_REPLAY_CNTRS 1
+#define RSN_CAP_4_REPLAY_CNTRS 2
+#define RSN_CAP_16_REPLAY_CNTRS 3
+#ifdef MFP
+#define RSN_CAP_MFPR 0x0040
+#define RSN_CAP_MFPC 0x0080
+#endif
+
+
+#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+
+#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1)
+
+
+#define WPA_CAP_LEN RSN_CAP_LEN
+#define WPA_PMKID_CNT_LEN 2
+
+#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN 2
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
+
+
+#define WAPI_WAI_REQUEST 0x00F1
+#define WAPI_UNICAST_REKEY 0x00F2
+#define WAPI_STA_AGING 0x00F3
+#define WAPI_MUTIL_REKEY 0x00F4
+#define WAPI_STA_STATS 0x00F5
+
+#define WAPI_USK_REKEY_COUNT 0x4000000
+#define WAPI_MSK_REKEY_COUNT 0x4000000
+#endif
+
+
+#include <packed_section_end.h>
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/proto/wps.h b/drivers/net/wireless/bcmdhd/include/proto/wps.h
new file mode 100644
index 00000000000000..cccbfff4cfbb1e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/proto/wps.h
@@ -0,0 +1,379 @@
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id$
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL 0x1001
+#define WPS_ID_ASSOC_STATE 0x1002
+#define WPS_ID_AUTH_TYPE 0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS 0x1004
+#define WPS_ID_AUTHENTICATOR 0x1005
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_CONFIG_ERROR 0x1009
+#define WPS_ID_CONF_URL4 0x100A
+#define WPS_ID_CONF_URL6 0x100B
+#define WPS_ID_CONN_TYPE 0x100C
+#define WPS_ID_CONN_TYPE_FLAGS 0x100D
+#define WPS_ID_CREDENTIAL 0x100E
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_E_HASH1 0x1014
+#define WPS_ID_E_HASH2 0x1015
+#define WPS_ID_E_SNONCE1 0x1016
+#define WPS_ID_E_SNONCE2 0x1017
+#define WPS_ID_ENCR_SETTINGS 0x1018
+#define WPS_ID_ENCR_TYPE 0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS 0x1010
+#define WPS_ID_ENROLLEE_NONCE 0x101A
+#define WPS_ID_FEATURE_ID 0x101B
+#define WPS_ID_IDENTITY 0x101C
+#define WPS_ID_IDENTITY_PROOF 0x101D
+#define WPS_ID_KEY_WRAP_AUTH 0x101E
+#define WPS_ID_KEY_IDENTIFIER 0x101F
+#define WPS_ID_MAC_ADDR 0x1020
+#define WPS_ID_MANUFACTURER 0x1021
+#define WPS_ID_MSG_TYPE 0x1022
+#define WPS_ID_MODEL_NAME 0x1023
+#define WPS_ID_MODEL_NUMBER 0x1024
+#define WPS_ID_NW_INDEX 0x1026
+#define WPS_ID_NW_KEY 0x1027
+#define WPS_ID_NW_KEY_INDEX 0x1028
+#define WPS_ID_NEW_DEVICE_NAME 0x1029
+#define WPS_ID_NEW_PWD 0x102A
+#define WPS_ID_OOB_DEV_PWD 0x102C
+#define WPS_ID_OS_VERSION 0x102D
+#define WPS_ID_POWER_LEVEL 0x102F
+#define WPS_ID_PSK_CURRENT 0x1030
+#define WPS_ID_PSK_MAX 0x1031
+#define WPS_ID_PUBLIC_KEY 0x1032
+#define WPS_ID_RADIO_ENABLED 0x1033
+#define WPS_ID_REBOOT 0x1034
+#define WPS_ID_REGISTRAR_CURRENT 0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST 0x1037
+#define WPS_ID_REGISTRAR_MAX 0x1038
+#define WPS_ID_REGISTRAR_NONCE 0x1039
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_RESP_TYPE 0x103B
+#define WPS_ID_RF_BAND 0x103C
+#define WPS_ID_R_HASH1 0x103D
+#define WPS_ID_R_HASH2 0x103E
+#define WPS_ID_R_SNONCE1 0x103F
+#define WPS_ID_R_SNONCE2 0x1040
+#define WPS_ID_SEL_REGISTRAR 0x1041
+#define WPS_ID_SERIAL_NUM 0x1042
+#define WPS_ID_SC_STATE 0x1044
+#define WPS_ID_SSID 0x1045
+#define WPS_ID_TOT_NETWORKS 0x1046
+#define WPS_ID_UUID_E 0x1047
+#define WPS_ID_UUID_R 0x1048
+#define WPS_ID_VENDOR_EXT 0x1049
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_X509_CERT_REQ 0x104B
+#define WPS_ID_X509_CERT 0x104C
+#define WPS_ID_EAP_IDENTITY 0x104D
+#define WPS_ID_MSG_COUNTER 0x104E
+#define WPS_ID_PUBKEY_HASH 0x104F
+#define WPS_ID_REKEY_KEY 0x1050
+#define WPS_ID_KEY_LIFETIME 0x1051
+#define WPS_ID_PERM_CFG_METHODS 0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ID_PORTABLE_DEVICE 0x1056
+#define WPS_ID_AP_SETUP_LOCKED 0x1057
+#define WPS_ID_APP_LIST 0x1058
+#define WPS_ID_EAP_TYPE 0x1059
+#define WPS_ID_INIT_VECTOR 0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO 0x1061
+#define WPS_ID_8021X_ENABLED 0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY 0x1064
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2 0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID "\x00\x01\x37"
+#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested.
+ * Not supported in Windows 7
+ */
+#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S 2
+#define WPS_ID_ASSOC_STATE_S 2
+#define WPS_ID_AUTH_TYPE_S 2
+#define WPS_ID_AUTH_TYPE_FLAGS_S 2
+#define WPS_ID_AUTHENTICATOR_S 8
+#define WPS_ID_CONFIG_METHODS_S 2
+#define WPS_ID_CONFIG_ERROR_S 2
+#define WPS_ID_CONN_TYPE_S 1
+#define WPS_ID_CONN_TYPE_FLAGS_S 1
+#define WPS_ID_DEVICE_PWD_ID_S 2
+#define WPS_ID_ENCR_TYPE_S 2
+#define WPS_ID_ENCR_TYPE_FLAGS_S 2
+#define WPS_ID_FEATURE_ID_S 4
+#define WPS_ID_MAC_ADDR_S 6
+#define WPS_ID_MSG_TYPE_S 1
+#define WPS_ID_SC_STATE_S 1
+#define WPS_ID_RF_BAND_S 1
+#define WPS_ID_OS_VERSION_S 4
+#define WPS_ID_VERSION_S 1
+#define WPS_ID_SEL_REGISTRAR_S 1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S 1
+#define WPS_ID_RESP_TYPE_S 1
+#define WPS_ID_AP_SETUP_LOCKED_S 1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S 1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED 0
+#define WPS_ASSOC_CONN_SUCCESS 1
+#define WPS_ASSOC_CONFIG_FAIL 2
+#define WPS_ASSOC_ASSOC_FAIL 3
+#define WPS_ASSOC_IP_FAIL 4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN 0x0001
+#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2 0x0010
+#define WPS_AUTHTYPE_WPA2PSK 0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL 0x0004
+#define WPS_CONFMET_DISPLAY 0x0008
+#define WPS_CONFMET_EXT_NFC_TOK 0x0010
+#define WPS_CONFMET_INT_NFC_TOK 0x0020
+#define WPS_CONFMET_NFC_INTF 0x0040
+#define WPS_CONFMET_PBC 0x0080
+#define WPS_CONFMET_KEYPAD 0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC 0x0280
+#define WPS_CONFMET_PHY_PBC 0x0480
+#define WPS_CONFMET_VIRT_DISPLAY 0x2008
+#define WPS_CONFMET_PHY_DISPLAY 0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR 0
+#define WPS_ERROR_OOB_INT_READ_ERR 1
+#define WPS_ERROR_DECRYPT_CRC_FAIL 2
+#define WPS_ERROR_CHAN24_NOT_SUPP 3
+#define WPS_ERROR_CHAN50_NOT_SUPP 4
+#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR 11
+#define WPS_ERROR_MULTI_PBC_DETECTED 12
+#define WPS_ERROR_ROGUE_SUSPECTED 13
+#define WPS_ERROR_DEVICE_BUSY 14
+#define WPS_ERROR_SETUP_LOCKED 15
+#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS 0x01
+#define WPS_CONNTYPE_IBSS 0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT 0x0000
+#define WPS_DEVICEPWDID_USER_SPEC 0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002
+#define WPS_DEVICEPWDID_REKEY 0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN 0x0004
+#define WPS_DEVICEPWDID_REG_SPEC 0x0005
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE 0x0001
+#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only
+ * be advertised on the AP when Mixed Mode
+ * is enabled (Encryption Type is 0x000c).
+ */
+#define WPS_ENCRTYPE_AES 0x0008
+
+
+/* WPS Message Types */
+#define WPS_ID_BEACON 0x01
+#define WPS_ID_PROBE_REQ 0x02
+#define WPS_ID_PROBE_RESP 0x03
+#define WPS_ID_MESSAGE_M1 0x04
+#define WPS_ID_MESSAGE_M2 0x05
+#define WPS_ID_MESSAGE_M2D 0x06
+#define WPS_ID_MESSAGE_M3 0x07
+#define WPS_ID_MESSAGE_M4 0x08
+#define WPS_ID_MESSAGE_M5 0x09
+#define WPS_ID_MESSAGE_M6 0x0A
+#define WPS_ID_MESSAGE_M7 0x0B
+#define WPS_ID_MESSAGE_M8 0x0C
+#define WPS_ID_MESSAGE_ACK 0x0D
+#define WPS_ID_MESSAGE_NACK 0x0E
+#define WPS_ID_MESSAGE_DONE 0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6)
+
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER 1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2
+#define WPS_DEVICE_TYPE_CAT_PRINTER 3
+#define WPS_DEVICE_TYPE_CAT_CAMERA 4
+#define WPS_DEVICE_TYPE_CAT_STORAGE 5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */
+
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_MSGTYPE_REGISTRAR 0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR 0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ 0x01
+#define WPS_RFBAND_50GHZ 0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED 0x01
+#define WPS_SCSTATE_CONFIGURED 0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
diff --git a/drivers/net/wireless/bcmdhd/include/rwl_wifi.h b/drivers/net/wireless/bcmdhd/include/rwl_wifi.h
new file mode 100644
index 00000000000000..187c2bc6337db6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/rwl_wifi.h
@@ -0,0 +1,96 @@
+/*
+ * RWL definitions of
+ * Broadcom 802.11bang Networking Device Driver
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: rwl_wifi.h 281527 2011-09-02 17:12:53Z $
+ *
+ */
+
+#ifndef _rwl_wifi_h_
+#define _rwl_wifi_h_
+
+#if defined(RWL_WIFI) || defined(WIFI_REFLECTOR) || defined(RFAWARE)
+
+#define RWL_ACTION_WIFI_CATEGORY 127 /* Vendor-specific category value for WiFi */
+#define RWL_WIFI_OUI_BYTE0 0x00 /* BRCM-specific public OUI */
+#define RWL_WIFI_OUI_BYTE1 0x90
+#define RWL_WIFI_OUI_BYTE2 0x4c
+#define RWL_WIFI_ACTION_FRAME_SIZE sizeof(struct dot11_action_wifi_vendor_specific)
+#define RWL_WIFI_FIND_MY_PEER 0x09 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER 0x0A /* Server response to the client */
+#define RWL_WIFI_DEFAULT 0x00
+#define RWL_ACTION_WIFI_FRAG_TYPE 0x55 /* Fragment indicator for receiver */
+
+/*
+ * Information about the action frame data fields in the dot11_action_wifi_vendor_specific
+ * cdc structure (1 to 16). This does not include the status flag. Since this
+ * is not directly visible to the driver code, we can't use sizeof(struct cdc_ioctl).
+ * Hence Ref MAC address offset starts from byte 17.
+ * REF MAC ADDR (6 bytes (MAC Address len) from byte 17 to 22)
+ * DUT MAC ADDR (6 bytes after the REF MAC Address byte 23 to 28)
+ * unused (byte 29 to 49)
+ * REF/Client Channel offset (50)
+ * DUT/Server channel offset (51)
+ * ---------------------------------------------------------------------------------------
+ * cdc struct|REF MAC ADDR|DUT_MAC_ADDR|un used|REF Channel|DUT channel|Action frame Data|
+ * 1---------17-----------23-------------------50----------51----------52----------------1040
+ * REF MAC addr after CDC struct without status flag (status flag not used by wifi)
+ */
+
+#define RWL_REF_MAC_ADDRESS_OFFSET 17
+#define RWL_DUT_MAC_ADDRESS_OFFSET 23
+#define RWL_WIFI_CLIENT_CHANNEL_OFFSET 50
+#define RWL_WIFI_SERVER_CHANNEL_OFFSET 51
+
+#ifdef WIFI_REFLECTOR
+#include <bcmcdc.h>
+#define REMOTE_FINDSERVER_CMD 16
+#define RWL_WIFI_ACTION_CMD "wifiaction"
+#define RWL_WIFI_ACTION_CMD_LEN 11 /* With the NULL terminator */
+#define REMOTE_SET_CMD 1
+#define REMOTE_GET_CMD 2
+#define REMOTE_REPLY 4
+#define RWL_WIFI_DEFAULT_TYPE 0x00
+#define RWL_WIFI_DEFAULT_SUBTYPE 0x00
+#define RWL_ACTION_FRAME_DATA_SIZE 1024 /* fixed size for the wifi frame data */
+#define RWL_WIFI_CDC_HEADER_OFFSET 0
+#define RWL_WIFI_FRAG_DATA_SIZE 960 /* max size of the frag data */
+#define RWL_DEFAULT_WIFI_FRAG_COUNT 127 /* maximum fragment count */
+#define RWL_WIFI_RETRY 5 /* CMD retry count for wifi */
+#define RWL_WIFI_SEND 5 /* WIFI frame sent count */
+#define RWL_WIFI_SEND_DELAY 100 /* delay between two frames */
+#define MICROSEC_CONVERTOR_VAL 1000
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+typedef struct rem_packet {
+ rem_ioctl_t rem_cdc;
+ uchar message [RWL_ACTION_FRAME_DATA_SIZE];
+} rem_packet_t;
+
+struct send_packet {
+ char command [RWL_WIFI_ACTION_CMD_LEN];
+ dot11_action_wifi_vendor_specific_t response;
+} PACKED;
+typedef struct send_packet send_packet_t;
+
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+#endif /* WIFI_REFLECTOR */
+
+typedef struct rwl_request {
+ struct rwl_request* next_request;
+ struct dot11_action_wifi_vendor_specific action_frame;
+} rwl_request_t;
+
+
+#endif /* defined(RWL_WIFI) || defined(WIFI_REFLECTOR) */
+#endif /* _rwl_wifi_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h
new file mode 100644
index 00000000000000..f6e93df2c234b5
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h
@@ -0,0 +1,2235 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 325465 2012-04-03 11:16:11Z $
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+typedef struct eci_prerev35 {
+ uint32 eci_output;
+ uint32 eci_control;
+ uint32 eci_inputlo;
+ uint32 eci_inputmi;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolaritymi;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskmi;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventmi;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskmi;
+ uint32 eci_eventmaskhi;
+ uint32 PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+ uint32 eci_outputlo;
+ uint32 eci_outputhi;
+ uint32 eci_controllo;
+ uint32 eci_controlhi;
+ uint32 eci_inputlo;
+ uint32 eci_inputhi;
+ uint32 eci_inputintpolaritylo;
+ uint32 eci_inputintpolarityhi;
+ uint32 eci_intmasklo;
+ uint32 eci_intmaskhi;
+ uint32 eci_eventlo;
+ uint32 eci_eventhi;
+ uint32 eci_eventmasklo;
+ uint32 eci_eventmaskhi;
+ uint32 eci_auxtx;
+ uint32 eci_auxrx;
+ uint32 eci_datatag;
+ uint32 eci_uartescvalue;
+ uint32 eci_autobaudctr;
+ uint32 eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+ uint32 PAD[19];
+
+ uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+ uint32 chipid;
+ uint32 capabilities;
+ uint32 corecontrol;
+ uint32 bist;
+
+
+ uint32 otpstatus;
+ uint32 otpcontrol;
+ uint32 otpprog;
+ uint32 otplayout;
+
+
+ uint32 intstatus;
+ uint32 intmask;
+
+
+ uint32 chipcontrol;
+ uint32 chipstatus;
+
+
+ uint32 jtagcmd;
+ uint32 jtagir;
+ uint32 jtagdr;
+ uint32 jtagctrl;
+
+
+ uint32 flashcontrol;
+ uint32 flashaddress;
+ uint32 flashdata;
+ uint32 otplayoutextension;
+
+
+ uint32 broadcastaddress;
+ uint32 broadcastdata;
+
+
+ uint32 gpiopullup;
+ uint32 gpiopulldown;
+ uint32 gpioin;
+ uint32 gpioout;
+ uint32 gpioouten;
+ uint32 gpiocontrol;
+ uint32 gpiointpolarity;
+ uint32 gpiointmask;
+
+
+ uint32 gpioevent;
+ uint32 gpioeventintmask;
+
+
+ uint32 watchdog;
+
+
+ uint32 gpioeventintpolarity;
+
+
+ uint32 gpiotimerval;
+ uint32 gpiotimeroutmask;
+
+
+ uint32 clockcontrol_n;
+ uint32 clockcontrol_sb;
+ uint32 clockcontrol_pci;
+ uint32 clockcontrol_m2;
+ uint32 clockcontrol_m3;
+ uint32 clkdiv;
+ uint32 gpiodebugsel;
+ uint32 capabilities_ext;
+
+
+ uint32 pll_on_delay;
+ uint32 fref_sel_delay;
+ uint32 slow_clk_ctl;
+ uint32 PAD;
+
+
+ uint32 system_clk_ctl;
+ uint32 clkstatestretch;
+ uint32 PAD[2];
+
+
+ uint32 bp_addrlow;
+ uint32 bp_addrhigh;
+ uint32 bp_data;
+ uint32 PAD;
+ uint32 bp_indaccess;
+
+ uint32 gsioctrl;
+ uint32 gsioaddress;
+ uint32 gsiodata;
+
+
+ uint32 clkdiv2;
+
+ uint32 otpcontrol1;
+ uint32 fabid;
+
+
+ uint32 eromptr;
+
+
+ uint32 pcmcia_config;
+ uint32 pcmcia_memwait;
+ uint32 pcmcia_attrwait;
+ uint32 pcmcia_iowait;
+ uint32 ide_config;
+ uint32 ide_memwait;
+ uint32 ide_attrwait;
+ uint32 ide_iowait;
+ uint32 prog_config;
+ uint32 prog_waitcount;
+ uint32 flash_config;
+ uint32 flash_waitcount;
+ uint32 SECI_config;
+ uint32 SECI_status;
+ uint32 SECI_statusmask;
+ uint32 SECI_rxnibchanged;
+
+ uint32 PAD[20];
+
+
+ uint32 sromcontrol;
+ uint32 sromaddress;
+ uint32 sromdata;
+ uint32 PAD[1];
+
+ uint32 nflashctrl;
+ uint32 nflashconf;
+ uint32 nflashcoladdr;
+ uint32 nflashrowaddr;
+ uint32 nflashdata;
+ uint32 nflashwaitcnt0;
+ uint32 PAD[2];
+
+ uint32 seci_uart_data;
+ uint32 seci_uart_bauddiv;
+ uint32 seci_uart_fcr;
+ uint32 seci_uart_lcr;
+ uint32 seci_uart_mcr;
+ uint32 seci_uart_lsr;
+ uint32 seci_uart_msr;
+ uint32 seci_uart_baudadj;
+
+ uint32 clk_ctl_st;
+ uint32 hw_war;
+ uint32 PAD[70];
+
+
+ uint8 uart0data;
+ uint8 uart0imr;
+ uint8 uart0fcr;
+ uint8 uart0lcr;
+ uint8 uart0mcr;
+ uint8 uart0lsr;
+ uint8 uart0msr;
+ uint8 uart0scratch;
+ uint8 PAD[248];
+
+ uint8 uart1data;
+ uint8 uart1imr;
+ uint8 uart1fcr;
+ uint8 uart1lcr;
+ uint8 uart1mcr;
+ uint8 uart1lsr;
+ uint8 uart1msr;
+ uint8 uart1scratch;
+ uint32 PAD[126];
+
+
+
+ uint32 pmucontrol;
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel;
+ uint32 gpioenable;
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 PAD;
+ uint32 chipcontrol_addr;
+ uint32 chipcontrol_data;
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt;
+ uint32 pmu_xtalfreq;
+ uint32 PAD[100];
+ uint16 sromotp[512];
+
+ uint32 nand_revision;
+ uint32 nand_cmd_start;
+ uint32 nand_cmd_addr_x;
+ uint32 nand_cmd_addr;
+ uint32 nand_cmd_end_addr;
+ uint32 nand_cs_nand_select;
+ uint32 nand_cs_nand_xor;
+ uint32 PAD;
+ uint32 nand_spare_rd0;
+ uint32 nand_spare_rd4;
+ uint32 nand_spare_rd8;
+ uint32 nand_spare_rd12;
+ uint32 nand_spare_wr0;
+ uint32 nand_spare_wr4;
+ uint32 nand_spare_wr8;
+ uint32 nand_spare_wr12;
+ uint32 nand_acc_control;
+ uint32 PAD;
+ uint32 nand_config;
+ uint32 PAD;
+ uint32 nand_timing_1;
+ uint32 nand_timing_2;
+ uint32 nand_semaphore;
+ uint32 PAD;
+ uint32 nand_devid;
+ uint32 nand_devid_x;
+ uint32 nand_block_lock_status;
+ uint32 nand_intfc_status;
+ uint32 nand_ecc_corr_addr_x;
+ uint32 nand_ecc_corr_addr;
+ uint32 nand_ecc_unc_addr_x;
+ uint32 nand_ecc_unc_addr;
+ uint32 nand_read_error_count;
+ uint32 nand_corr_stat_threshold;
+ uint32 PAD[2];
+ uint32 nand_read_addr_x;
+ uint32 nand_read_addr;
+ uint32 nand_page_program_addr_x;
+ uint32 nand_page_program_addr;
+ uint32 nand_copy_back_addr_x;
+ uint32 nand_copy_back_addr;
+ uint32 nand_block_erase_addr_x;
+ uint32 nand_block_erase_addr;
+ uint32 nand_inv_read_addr_x;
+ uint32 nand_inv_read_addr;
+ uint32 PAD[2];
+ uint32 nand_blk_wr_protect;
+ uint32 PAD[3];
+ uint32 nand_acc_control_cs1;
+ uint32 nand_config_cs1;
+ uint32 nand_timing_1_cs1;
+ uint32 nand_timing_2_cs1;
+ uint32 PAD[20];
+ uint32 nand_spare_rd16;
+ uint32 nand_spare_rd20;
+ uint32 nand_spare_rd24;
+ uint32 nand_spare_rd28;
+ uint32 nand_cache_addr;
+ uint32 nand_cache_data;
+ uint32 nand_ctrl_config;
+ uint32 nand_ctrl_status;
+} chipcregs_t;
+
+#endif
+
+
+#define CC_CHIPID 0
+#define CC_CAPABILITIES 4
+#define CC_CHIPST 0x2c
+#define CC_EROMPTR 0xfc
+
+#define CC_OTPST 0x10
+#define CC_JTAGCMD 0x30
+#define CC_JTAGIR 0x34
+#define CC_JTAGDR 0x38
+#define CC_JTAGCTRL 0x3c
+#define CC_GPIOPU 0x58
+#define CC_GPIOPD 0x5c
+#define CC_GPIOIN 0x60
+#define CC_GPIOOUT 0x64
+#define CC_GPIOOUTEN 0x68
+#define CC_GPIOCTRL 0x6c
+#define CC_GPIOPOL 0x70
+#define CC_GPIOINTM 0x74
+#define CC_WATCHDOG 0x80
+#define CC_CLKC_N 0x90
+#define CC_CLKC_M0 0x94
+#define CC_CLKC_M1 0x98
+#define CC_CLKC_M2 0x9c
+#define CC_CLKC_M3 0xa0
+#define CC_CLKDIV 0xa4
+#define CC_SYS_CLK_CTL 0xc0
+#define CC_CLK_CTL_ST SI_CLK_CTL_ST
+#define PMU_CTL 0x600
+#define PMU_CAP 0x604
+#define PMU_ST 0x608
+#define PMU_RES_STATE 0x60c
+#define PMU_TIMER 0x614
+#define PMU_MIN_RES_MASK 0x618
+#define PMU_MAX_RES_MASK 0x61c
+#define CC_CHIPCTL_ADDR 0x650
+#define CC_CHIPCTL_DATA 0x654
+#define PMU_REG_CONTROL_ADDR 0x658
+#define PMU_REG_CONTROL_DATA 0x65C
+#define PMU_PLL_CONTROL_ADDR 0x660
+#define PMU_PLL_CONTROL_DATA 0x664
+#define CC_SROM_OTP 0x800
+
+#ifdef NFLASH_SUPPORT
+
+#define CC_NAND_REVISION 0xC00
+#define CC_NAND_CMD_START 0xC04
+#define CC_NAND_CMD_ADDR 0xC0C
+#define CC_NAND_SPARE_RD_0 0xC20
+#define CC_NAND_SPARE_RD_4 0xC24
+#define CC_NAND_SPARE_RD_8 0xC28
+#define CC_NAND_SPARE_RD_C 0xC2C
+#define CC_NAND_CONFIG 0xC48
+#define CC_NAND_DEVID 0xC60
+#define CC_NAND_DEVID_EXT 0xC64
+#define CC_NAND_INTFC_STATUS 0xC6C
+#endif
+
+
+#define CID_ID_MASK 0x0000ffff
+#define CID_REV_MASK 0x000f0000
+#define CID_REV_SHIFT 16
+#define CID_PKG_MASK 0x00f00000
+#define CID_PKG_SHIFT 20
+#define CID_CC_MASK 0x0f000000
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000
+#define CID_TYPE_SHIFT 28
+
+
+#define CC_CAP_UARTS_MASK 0x00000003
+#define CC_CAP_MIPSEB 0x00000004
+#define CC_CAP_UCLKSEL 0x00000018
+#define CC_CAP_UINTCLK 0x00000008
+#define CC_CAP_UARTGPIO 0x00000020
+#define CC_CAP_EXTBUS_MASK 0x000000c0
+#define CC_CAP_EXTBUS_NONE 0x00000000
+#define CC_CAP_EXTBUS_FULL 0x00000040
+#define CC_CAP_EXTBUS_PROG 0x00000080
+#define CC_CAP_FLASH_MASK 0x00000700
+#define CC_CAP_PLL_MASK 0x00038000
+#define CC_CAP_PWR_CTL 0x00040000
+#define CC_CAP_OTPSIZE 0x00380000
+#define CC_CAP_OTPSIZE_SHIFT 19
+#define CC_CAP_OTPSIZE_BASE 5
+#define CC_CAP_JTAGP 0x00400000
+#define CC_CAP_ROM 0x00800000
+#define CC_CAP_BKPLN64 0x08000000
+#define CC_CAP_PMU 0x10000000
+#define CC_CAP_ECI 0x20000000
+#define CC_CAP_SROM 0x40000000
+#define CC_CAP_NFLASH 0x80000000
+
+#define CC_CAP2_SECI 0x00000001
+#define CC_CAP2_GSIO 0x00000002
+
+
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001
+
+
+#define PLL_NONE 0x00000000
+#define PLL_TYPE1 0x00010000
+#define PLL_TYPE2 0x00020000
+#define PLL_TYPE3 0x00030000
+#define PLL_TYPE4 0x00008000
+#define PLL_TYPE5 0x00018000
+#define PLL_TYPE6 0x00028000
+#define PLL_TYPE7 0x00038000
+
+
+#define ILP_CLOCK 32000
+
+
+#define ALP_CLOCK 20000000
+
+
+#define HT_CLOCK 80000000
+
+
+#define CC_UARTCLKO 0x00000001
+#define CC_SE 0x00000002
+#define CC_ASYNCGPIO 0x00000004
+#define CC_UARTCLKEN 0x00000008
+
+
+#define CHIPCTRL_4321A0_DEFAULT 0x3a4
+#define CHIPCTRL_4321A1_DEFAULT 0x0a4
+#define CHIPCTRL_4321_PLL_DOWN 0x800000
+
+
+#define OTPS_OL_MASK 0x000000ff
+#define OTPS_OL_MFG 0x00000001
+#define OTPS_OL_OR1 0x00000002
+#define OTPS_OL_OR2 0x00000004
+#define OTPS_OL_GU 0x00000008
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100
+#define OTPS_GUP_SW 0x00000200
+#define OTPS_GUP_CI 0x00000400
+#define OTPS_GUP_FUSE 0x00000800
+#define OTPS_READY 0x00001000
+#define OTPS_RV(x) (1 << (16 + (x)))
+#define OTPS_RV_MASK 0x0fff0000
+#define OTPS_PROGOK 0x40000000
+
+
+#define OTPC_PROGSEL 0x00000001
+#define OTPC_PCOUNT_MASK 0x0000000e
+#define OTPC_PCOUNT_SHIFT 1
+#define OTPC_VSEL_MASK 0x000000f0
+#define OTPC_VSEL_SHIFT 4
+#define OTPC_TMM_MASK 0x00000700
+#define OTPC_TMM_SHIFT 8
+#define OTPC_ODM 0x00000800
+#define OTPC_PROGEN 0x80000000
+
+
+#define OTPC_40NM_PROGSEL_SHIFT 0
+#define OTPC_40NM_PCOUNT_SHIFT 1
+#define OTPC_40NM_PCOUNT_WR 0xA
+#define OTPC_40NM_PCOUNT_V1X 0xB
+#define OTPC_40NM_REGCSEL_SHIFT 5
+#define OTPC_40NM_REGCSEL_DEF 0x4
+#define OTPC_40NM_PROGIN_SHIFT 8
+#define OTPC_40NM_R2X_SHIFT 10
+#define OTPC_40NM_ODM_SHIFT 11
+#define OTPC_40NM_DF_SHIFT 15
+#define OTPC_40NM_VSEL_SHIFT 16
+#define OTPC_40NM_VSEL_WR 0xA
+#define OTPC_40NM_VSEL_V1X 0xA
+#define OTPC_40NM_VSEL_R1X 0x5
+#define OTPC_40NM_COFAIL_SHIFT 30
+
+#define OTPC1_CPCSEL_SHIFT 0
+#define OTPC1_CPCSEL_DEF 6
+#define OTPC1_TM_SHIFT 8
+#define OTPC1_TM_WR 0x84
+#define OTPC1_TM_V1X 0x84
+#define OTPC1_TM_R1X 0x4
+
+
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000
+
+
+#define OTPL_HWRGN_OFF_MASK 0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT 0
+#define OTPL_WRAP_REVID_MASK 0x00F80000
+#define OTPL_WRAP_REVID_SHIFT 19
+#define OTPL_WRAP_TYPE_MASK 0x00070000
+#define OTPL_WRAP_TYPE_SHIFT 16
+#define OTPL_WRAP_TYPE_65NM 0
+#define OTPL_WRAP_TYPE_40NM 1
+
+
+#define OTP_CISFORMAT_NEW 0x80000000
+
+
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+
+#define OTPPOC_READ_40NM 0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM 2
+#define OTPPOC_VERIFY_40NM 3
+#define OTPPOC_WORD_VERIFY_1_40NM 4
+#define OTPPOC_ROW_LOCK_40NM 5
+#define OTPPOC_STBY_40NM 6
+#define OTPPOC_WAKEUP_40NM 7
+#define OTPPOC_WORD_VERIFY_0_40NM 8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM 10
+#define OTPPOC_WORDPROG_40NM 11
+#define OTPPOC_BURNIN_40NM 12
+#define OTPPOC_AUTORELOAD_40NM 13
+#define OTPPOC_OVST_READ_40NM 14
+#define OTPPOC_OVST_PROG_40NM 15
+
+
+#define OTPLAYOUTEXT_FUSE_MASK 0x3FF
+
+
+
+#define JTAGM_CREV_OLD 10
+#define JTAGM_CREV_IRP 22
+#define JTAGM_CREV_RTI 28
+
+
+#define JCMD_START 0x80000000
+#define JCMD_BUSY 0x80000000
+#define JCMD_STATE_MASK 0x60000000
+#define JCMD_STATE_TLR 0x00000000
+#define JCMD_STATE_PIR 0x20000000
+#define JCMD_STATE_PDR 0x40000000
+#define JCMD_STATE_RTI 0x60000000
+#define JCMD0_ACC_MASK 0x0000f000
+#define JCMD0_ACC_IRDR 0x00000000
+#define JCMD0_ACC_DR 0x00001000
+#define JCMD0_ACC_IR 0x00002000
+#define JCMD0_ACC_RESET 0x00003000
+#define JCMD0_ACC_IRPDR 0x00004000
+#define JCMD0_ACC_PDR 0x00005000
+#define JCMD0_IRW_MASK 0x00000f00
+#define JCMD_ACC_MASK 0x000f0000
+#define JCMD_ACC_IRDR 0x00000000
+#define JCMD_ACC_DR 0x00010000
+#define JCMD_ACC_IR 0x00020000
+#define JCMD_ACC_RESET 0x00030000
+#define JCMD_ACC_IRPDR 0x00040000
+#define JCMD_ACC_PDR 0x00050000
+#define JCMD_ACC_PIR 0x00060000
+#define JCMD_ACC_IRDR_I 0x00070000
+#define JCMD_ACC_DR_I 0x00080000
+#define JCMD_IRW_MASK 0x00001f00
+#define JCMD_IRW_SHIFT 8
+#define JCMD_DRW_MASK 0x0000003f
+
+
+#define JCTRL_FORCE_CLK 4
+#define JCTRL_EXT_EN 2
+#define JCTRL_EN 1
+
+
+#define CLKD_SFLASH 0x0f000000
+#define CLKD_SFLASH_SHIFT 24
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+#define CLKD_JTAG 0x00000f00
+#define CLKD_JTAG_SHIFT 8
+#define CLKD_UART 0x000000ff
+
+#define CLKD2_SROM 0x00000003
+
+
+#define CI_GPIO 0x00000001
+#define CI_EI 0x00000002
+#define CI_TEMP 0x00000004
+#define CI_SIRQ 0x00000008
+#define CI_ECI 0x00000010
+#define CI_PMU 0x00000020
+#define CI_UART 0x00000040
+#define CI_WDRESET 0x80000000
+
+
+#define SCC_SS_MASK 0x00000007
+#define SCC_SS_LPO 0x00000000
+#define SCC_SS_XTAL 0x00000001
+#define SCC_SS_PCI 0x00000002
+#define SCC_LF 0x00000200
+#define SCC_LP 0x00000400
+#define SCC_FS 0x00000800
+#define SCC_IP 0x00001000
+#define SCC_XC 0x00002000
+#define SCC_XP 0x00004000
+#define SCC_CD_MASK 0xffff0000
+#define SCC_CD_SHIFT 16
+
+
+#define SYCC_IE 0x00000001
+#define SYCC_AE 0x00000002
+#define SYCC_FP 0x00000004
+#define SYCC_AR 0x00000008
+#define SYCC_HR 0x00000010
+#define SYCC_CD_MASK 0xffff0000
+#define SYCC_CD_SHIFT 16
+
+
+#define BPIA_BYTEEN 0x0000000f
+#define BPIA_SZ1 0x00000001
+#define BPIA_SZ2 0x00000003
+#define BPIA_SZ4 0x00000007
+#define BPIA_SZ8 0x0000000f
+#define BPIA_WRITE 0x00000100
+#define BPIA_START 0x00000200
+#define BPIA_BUSY 0x00000200
+#define BPIA_ERROR 0x00000400
+
+
+#define CF_EN 0x00000001
+#define CF_EM_MASK 0x0000000e
+#define CF_EM_SHIFT 1
+#define CF_EM_FLASH 0
+#define CF_EM_SYNC 2
+#define CF_EM_PCMCIA 4
+#define CF_DS 0x00000010
+#define CF_BS 0x00000020
+#define CF_CD_MASK 0x000000c0
+#define CF_CD_SHIFT 6
+#define CF_CD_DIV2 0x00000000
+#define CF_CD_DIV3 0x00000040
+#define CF_CD_DIV4 0x00000080
+#define CF_CE 0x00000100
+#define CF_SB 0x00000200
+
+
+#define PM_W0_MASK 0x0000003f
+#define PM_W1_MASK 0x00001f00
+#define PM_W1_SHIFT 8
+#define PM_W2_MASK 0x001f0000
+#define PM_W2_SHIFT 16
+#define PM_W3_MASK 0x1f000000
+#define PM_W3_SHIFT 24
+
+
+#define PA_W0_MASK 0x0000003f
+#define PA_W1_MASK 0x00001f00
+#define PA_W1_SHIFT 8
+#define PA_W2_MASK 0x001f0000
+#define PA_W2_SHIFT 16
+#define PA_W3_MASK 0x1f000000
+#define PA_W3_SHIFT 24
+
+
+#define PI_W0_MASK 0x0000003f
+#define PI_W1_MASK 0x00001f00
+#define PI_W1_SHIFT 8
+#define PI_W2_MASK 0x001f0000
+#define PI_W2_SHIFT 16
+#define PI_W3_MASK 0x1f000000
+#define PI_W3_SHIFT 24
+
+
+#define PW_W0_MASK 0x0000001f
+#define PW_W1_MASK 0x00001f00
+#define PW_W1_SHIFT 8
+#define PW_W2_MASK 0x001f0000
+#define PW_W2_SHIFT 16
+#define PW_W3_MASK 0x1f000000
+#define PW_W3_SHIFT 24
+
+#define PW_W0 0x0000000c
+#define PW_W1 0x00000a00
+#define PW_W2 0x00020000
+#define PW_W3 0x01000000
+
+
+#define FW_W0_MASK 0x0000003f
+#define FW_W1_MASK 0x00001f00
+#define FW_W1_SHIFT 8
+#define FW_W2_MASK 0x001f0000
+#define FW_W2_SHIFT 16
+#define FW_W3_MASK 0x1f000000
+#define FW_W3_SHIFT 24
+
+
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_PLL_PLLCTL_UPD 0x00000400
+#define PCTL_NOILP_ON_WAIT 0x00000200
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+
+#define CSTRETCH_HT 0xffff0000
+#define CSTRETCH_ALP 0x0000ffff
+
+
+#define GPIO_ONTIME_SHIFT 16
+
+
+#define CN_N1_MASK 0x3f
+#define CN_N2_MASK 0x3f00
+#define CN_N2_SHIFT 8
+#define CN_PLLC_MASK 0xf0000
+#define CN_PLLC_SHIFT 16
+
+
+#define CC_M1_MASK 0x3f
+#define CC_M2_MASK 0x3f00
+#define CC_M2_SHIFT 8
+#define CC_M3_MASK 0x3f0000
+#define CC_M3_SHIFT 16
+#define CC_MC_MASK 0x1f000000
+#define CC_MC_SHIFT 24
+
+
+#define CC_F6_2 0x02
+#define CC_F6_3 0x03
+#define CC_F6_4 0x05
+#define CC_F6_5 0x09
+#define CC_F6_6 0x11
+#define CC_F6_7 0x21
+
+#define CC_F5_BIAS 5
+
+#define CC_MC_BYPASS 0x08
+#define CC_MC_M1 0x04
+#define CC_MC_M1M2 0x02
+#define CC_MC_M1M2M3 0x01
+#define CC_MC_M1M3 0x11
+
+
+#define CC_T2_BIAS 2
+#define CC_T2M2_BIAS 3
+
+#define CC_T2MC_M1BYP 1
+#define CC_T2MC_M2BYP 2
+#define CC_T2MC_M3BYP 4
+
+
+#define CC_T6_MMASK 1
+#define CC_T6_M0 120000000
+#define CC_T6_M1 100000000
+#define SB2MIPS_T6(sb) (2 * (sb))
+
+
+#define CC_CLOCK_BASE1 24000000
+#define CC_CLOCK_BASE2 12500000
+
+
+#define CLKC_5350_N 0x0311
+#define CLKC_5350_M 0x04020009
+
+
+#define FLASH_NONE 0x000
+#define SFLASH_ST 0x100
+#define SFLASH_AT 0x200
+#define NFLASH 0x300
+#define PFLASH 0x700
+
+
+#define CC_CFG_EN 0x0001
+#define CC_CFG_EM_MASK 0x000e
+#define CC_CFG_EM_ASYNC 0x0000
+#define CC_CFG_EM_SYNC 0x0002
+#define CC_CFG_EM_PCMCIA 0x0004
+#define CC_CFG_EM_IDE 0x0006
+#define CC_CFG_DS 0x0010
+#define CC_CFG_CD_MASK 0x00e0
+#define CC_CFG_CE 0x0100
+#define CC_CFG_SB 0x0200
+#define CC_CFG_IS 0x0400
+
+
+#define CC_EB_BASE 0x1a000000
+#define CC_EB_PCMCIA_MEM 0x1a000000
+#define CC_EB_PCMCIA_IO 0x1a200000
+#define CC_EB_PCMCIA_CFG 0x1a400000
+#define CC_EB_IDE 0x1a800000
+#define CC_EB_PCMCIA1_MEM 0x1a800000
+#define CC_EB_PCMCIA1_IO 0x1aa00000
+#define CC_EB_PCMCIA1_CFG 0x1ac00000
+#define CC_EB_PROGIF 0x1b000000
+
+
+
+#define SFLASH_OPCODE 0x000000ff
+#define SFLASH_ACTION 0x00000700
+#define SFLASH_CS_ACTIVE 0x00001000
+#define SFLASH_START 0x80000000
+#define SFLASH_BUSY SFLASH_START
+
+
+#define SFLASH_ACT_OPONLY 0x0000
+#define SFLASH_ACT_OP1D 0x0100
+#define SFLASH_ACT_OP3A 0x0200
+#define SFLASH_ACT_OP3A1D 0x0300
+#define SFLASH_ACT_OP3A4D 0x0400
+#define SFLASH_ACT_OP3A4X4D 0x0500
+#define SFLASH_ACT_OP3A1X4D 0x0700
+
+
+#define SFLASH_ST_WREN 0x0006
+#define SFLASH_ST_WRDIS 0x0004
+#define SFLASH_ST_RDSR 0x0105
+#define SFLASH_ST_WRSR 0x0101
+#define SFLASH_ST_READ 0x0303
+#define SFLASH_ST_PP 0x0302
+#define SFLASH_ST_SE 0x02d8
+#define SFLASH_ST_BE 0x00c7
+#define SFLASH_ST_DP 0x00b9
+#define SFLASH_ST_RES 0x03ab
+#define SFLASH_ST_CSA 0x1000
+#define SFLASH_ST_SSE 0x0220
+
+#define SFLASH_MXIC_RDID 0x0390
+#define SFLASH_MXIC_MFID 0xc2
+
+
+#define SFLASH_ST_WIP 0x01
+#define SFLASH_ST_WEL 0x02
+#define SFLASH_ST_BP_MASK 0x1c
+#define SFLASH_ST_BP_SHIFT 2
+#define SFLASH_ST_SRWD 0x80
+
+
+#define SFLASH_AT_READ 0x07e8
+#define SFLASH_AT_PAGE_READ 0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS 0x01d7
+#define SFLASH_AT_BUF1_WRITE 0x0384
+#define SFLASH_AT_BUF2_WRITE 0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
+#define SFLASH_AT_BUF1_PROGRAM 0x0288
+#define SFLASH_AT_BUF2_PROGRAM 0x0289
+#define SFLASH_AT_PAGE_ERASE 0x0281
+#define SFLASH_AT_BLOCK_ERASE 0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define SFLASH_AT_BUF1_LOAD 0x0253
+#define SFLASH_AT_BUF2_LOAD 0x0255
+#define SFLASH_AT_BUF1_COMPARE 0x0260
+#define SFLASH_AT_BUF2_COMPARE 0x0261
+#define SFLASH_AT_BUF1_REPROGRAM 0x0258
+#define SFLASH_AT_BUF2_REPROGRAM 0x0259
+
+
+#define SFLASH_AT_READY 0x80
+#define SFLASH_AT_MISMATCH 0x40
+#define SFLASH_AT_ID_MASK 0x38
+#define SFLASH_AT_ID_SHIFT 3
+
+
+#define GSIO_START 0x80000000
+#define GSIO_BUSY GSIO_START
+
+
+
+#define UART_RX 0
+#define UART_TX 0
+#define UART_DLL 0
+#define UART_IER 1
+#define UART_DLM 1
+#define UART_IIR 2
+#define UART_FCR 2
+#define UART_LCR 3
+#define UART_MCR 4
+#define UART_LSR 5
+#define UART_MSR 6
+#define UART_SCR 7
+#define UART_LCR_DLAB 0x80
+#define UART_LCR_WLEN8 0x03
+#define UART_MCR_OUT2 0x08
+#define UART_MCR_LOOP 0x10
+#define UART_LSR_RX_FIFO 0x80
+#define UART_LSR_TDHR 0x40
+#define UART_LSR_THRE 0x20
+#define UART_LSR_BREAK 0x10
+#define UART_LSR_FRAMING 0x08
+#define UART_LSR_PARITY 0x04
+#define UART_LSR_OVERRUN 0x02
+#define UART_LSR_RXRDY 0x01
+#define UART_FCR_FIFO_ENABLE 1
+
+
+#define UART_IIR_FIFO_MASK 0xc0
+#define UART_IIR_INT_MASK 0xf
+#define UART_IIR_MDM_CHG 0x0
+#define UART_IIR_NOINT 0x1
+#define UART_IIR_THRE 0x2
+#define UART_IIR_RCVD_DATA 0x4
+#define UART_IIR_RCVR_STATUS 0x6
+#define UART_IIR_CHAR_TIME 0xc
+
+
+#define UART_IER_EDSSI 8
+#define UART_IER_ELSI 4
+#define UART_IER_ETBEI 2
+#define UART_IER_ERBFI 1
+
+
+#define PST_EXTLPOAVAIL 0x0100
+#define PST_WDRESET 0x0080
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+
+
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+
+
+#define PRRT_TIME_MASK 0x03ff
+#define PRRT_INTEN 0x0400
+#define PRRT_REQ_ACTIVE 0x0800
+#define PRRT_ALP_REQ 0x1000
+#define PRRT_HT_REQ 0x2000
+#define PRRT_HQ_REQ 0x4000
+
+
+#define PMURES_BIT(bit) (1 << (bit))
+
+
+#define PMURES_MAX_RESNUM 30
+
+
+#define PMU_CHIPCTL0 0
+
+
+#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
+#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
+
+
+#define PMU_CHIPCTL1 1
+#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
+
+#define PMU_CC1_IF_TYPE_MASK 0x00000030
+#define PMU_CC1_IF_TYPE_RMII 0x00000000
+#define PMU_CC1_IF_TYPE_MII 0x00000010
+#define PMU_CC1_IF_TYPE_RGMII 0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK 0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY 0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
+#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
+
+
+#define PMU_CHIPCTL2 2
+
+
+#define PMU_CHIPCTL3 3
+
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19
+#define PMU_CC3_ENABLE_RF_SHIFT 22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23
+
+
+
+
+
+#define PMU0_PLL0_PLLCTL0 0
+#define PMU0_PLL0_PC0_PDIV_MASK 1
+#define PMU0_PLL0_PC0_PDIV_FREQ 25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
+
+
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
+
+
+#define PMU0_PLL0_PLLCTL1 1
+#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
+#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
+#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
+#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
+#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
+
+
+#define PMU0_PLL0_PLLCTL2 2
+#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
+#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
+
+
+
+#define PMU1_PLL0_PLLCTL0 0
+#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
+#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
+
+
+#define PMU1_PLL0_PLLCTL1 1
+#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
+#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT 8
+#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT 16
+#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT 24
+#define PMU1_PLL0_PC1_M4DIV_BY_9 9
+#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+
+#define PMU1_PLL0_PLLCTL2 2
+#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
+#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT 8
+#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2
+#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU1_PLL0_PLLCTL3 3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU1_PLL0_PLLCTL4 4
+
+
+#define PMU1_PLL0_PLLCTL5 5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+
+#define PMU2_PHY_PLL_PLLCTL 4
+#define PMU2_SI_PLL_PLLCTL 10
+
+
+
+
+#define PMU2_PLL_PLLCTL0 0
+#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT 20
+#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL1 1
+#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT 0
+#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT 8
+#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT 16
+#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT 24
+
+
+#define PMU2_PLL_PLLCTL2 2
+#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT 0
+#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT 8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17
+#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
+
+
+#define PMU2_PLL_PLLCTL3 3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
+
+
+#define PMU2_PLL_PLLCTL4 4
+
+
+#define PMU2_PLL_PLLCTL5 5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
+
+
+#define PMU5_PLL_P1P2_OFF 0
+#define PMU5_PLL_P1_MASK 0x0f000000
+#define PMU5_PLL_P1_SHIFT 24
+#define PMU5_PLL_P2_MASK 0x00f00000
+#define PMU5_PLL_P2_SHIFT 20
+#define PMU5_PLL_M14_OFF 1
+#define PMU5_PLL_MDIV_MASK 0x000000ff
+#define PMU5_PLL_MDIV_WIDTH 8
+#define PMU5_PLL_NM5_OFF 2
+#define PMU5_PLL_NDIV_MASK 0xfff00000
+#define PMU5_PLL_NDIV_SHIFT 20
+#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000
+#define PMU5_PLL_NDIV_MODE_SHIFT 17
+#define PMU5_PLL_FMAB_OFF 3
+#define PMU5_PLL_MRAT_MASK 0xf0000000
+#define PMU5_PLL_MRAT_SHIFT 28
+#define PMU5_PLL_ABRAT_MASK 0x08000000
+#define PMU5_PLL_ABRAT_SHIFT 27
+#define PMU5_PLL_FDIV_MASK 0x07ffffff
+#define PMU5_PLL_PLLCTL_OFF 4
+#define PMU5_PLL_PCHI_OFF 5
+#define PMU5_PLL_PCHI_MASK 0x0000003f
+
+
+#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
+#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
+#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
+
+
+#define PMU5_MAINPLL_CPU 1
+#define PMU5_MAINPLL_MEM 2
+#define PMU5_MAINPLL_SI 3
+
+
+#define PMU4706_MAINPLL_PLL0 0
+#define PMU6_4706_PROCPLL_OFF 4
+#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT 16
+#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT 12
+#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT 3
+#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0
+
+#define PMU7_PLL_PLLCTL7 7
+#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 24
+#define PMU7_PLL_CTL7_M4DIV_BY_6 6
+#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL8 8
+#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT 0
+#define PMU7_PLL_CTL8_M5DIV_BY_8 8
+#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT 8
+#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18
+#define PMU7_PLL_PLLCTL11 11
+#define PMU7_PLL_PLLCTL11_MASK 0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL 0x22222200
+
+
+#define PMU15_PLL_PLLCTL0 0
+#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT 0
+#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT 2
+#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT 22
+#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT 24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27
+#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT 30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31
+
+#define PMU15_PLL_PLLCTL1 1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30
+
+#define PMU15_PLL_PLLCTL2 2
+#define PMU15_PLL_PC2_CTEN_MASK 0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT 0
+
+#define PMU15_PLL_PLLCTL3 3
+#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3
+
+#define PMU15_PLL_PLLCTL4 4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6
+#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT 9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13
+#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT 20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25
+#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT 26
+
+#define PMU15_PLL_PLLCTL5 5
+#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT 27
+
+#define PMU15_PLL_PLLCTL6 6
+#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT 0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20
+#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT 27
+
+#define PMU15_FREQTGT_480_DEFAULT 0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5
+#define PMU15_ARM_96MHZ 96000000
+#define PMU15_ARM_98MHZ 98400000
+#define PMU15_ARM_97MHZ 97000000
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT 0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN 0
+#define PMU17_PLLCTL0_BBPLL_DRST 3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8
+
+
+#define PMU4716_MAINPLL_PLL0 12
+
+
+#define PMU5356_MAINPLL_PLL0 0
+#define PMU5357_MAINPLL_PLL0 0
+
+
+#define RES4716_PROC_PLL_ON 0x00000040
+#define RES4716_PROC_HT_AVAIL 0x00000080
+
+
+#define CCTRL_471X_I2S_PINS_ENABLE 0x0080
+
+
+
+#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000
+#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000
+
+
+#define RES5354_EXT_SWITCHER_PWM 0
+#define RES5354_BB_SWITCHER_PWM 1
+#define RES5354_BB_SWITCHER_BURST 2
+#define RES5354_BB_EXT_SWITCHER_BURST 3
+#define RES5354_ILP_REQUEST 4
+#define RES5354_RADIO_SWITCHER_PWM 5
+#define RES5354_RADIO_SWITCHER_BURST 6
+#define RES5354_ROM_SWITCH 7
+#define RES5354_PA_REF_LDO 8
+#define RES5354_RADIO_LDO 9
+#define RES5354_AFE_LDO 10
+#define RES5354_PLL_LDO 11
+#define RES5354_BG_FILTBYP 12
+#define RES5354_TX_FILTBYP 13
+#define RES5354_RX_FILTBYP 14
+#define RES5354_XTAL_PU 15
+#define RES5354_XTAL_EN 16
+#define RES5354_BB_PLL_FILTBYP 17
+#define RES5354_RF_PLL_FILTBYP 18
+#define RES5354_BB_PLL_PU 19
+
+
+#define CCTRL5357_EXTPA (1<<14)
+#define CCTRL5357_ANT_MUX_2o3 (1<<15)
+#define CCTRL5357_NFLASH (1<<16)
+
+
+#define CCTRL43217_EXTPA_C0 (1<<13)
+#define CCTRL43217_EXTPA_C1 (1<<8)
+
+
+#define RES4328_EXT_SWITCHER_PWM 0
+#define RES4328_BB_SWITCHER_PWM 1
+#define RES4328_BB_SWITCHER_BURST 2
+#define RES4328_BB_EXT_SWITCHER_BURST 3
+#define RES4328_ILP_REQUEST 4
+#define RES4328_RADIO_SWITCHER_PWM 5
+#define RES4328_RADIO_SWITCHER_BURST 6
+#define RES4328_ROM_SWITCH 7
+#define RES4328_PA_REF_LDO 8
+#define RES4328_RADIO_LDO 9
+#define RES4328_AFE_LDO 10
+#define RES4328_PLL_LDO 11
+#define RES4328_BG_FILTBYP 12
+#define RES4328_TX_FILTBYP 13
+#define RES4328_RX_FILTBYP 14
+#define RES4328_XTAL_PU 15
+#define RES4328_XTAL_EN 16
+#define RES4328_BB_PLL_FILTBYP 17
+#define RES4328_RF_PLL_FILTBYP 18
+#define RES4328_BB_PLL_PU 19
+
+
+#define RES4325_BUCK_BOOST_BURST 0
+#define RES4325_CBUCK_BURST 1
+#define RES4325_CBUCK_PWM 2
+#define RES4325_CLDO_CBUCK_BURST 3
+#define RES4325_CLDO_CBUCK_PWM 4
+#define RES4325_BUCK_BOOST_PWM 5
+#define RES4325_ILP_REQUEST 6
+#define RES4325_ABUCK_BURST 7
+#define RES4325_ABUCK_PWM 8
+#define RES4325_LNLDO1_PU 9
+#define RES4325_OTP_PU 10
+#define RES4325_LNLDO3_PU 11
+#define RES4325_LNLDO4_PU 12
+#define RES4325_XTAL_PU 13
+#define RES4325_ALP_AVAIL 14
+#define RES4325_RX_PWRSW_PU 15
+#define RES4325_TX_PWRSW_PU 16
+#define RES4325_RFPLL_PWRSW_PU 17
+#define RES4325_LOGEN_PWRSW_PU 18
+#define RES4325_AFE_PWRSW_PU 19
+#define RES4325_BBPLL_PWRSW_PU 20
+#define RES4325_HT_AVAIL 21
+
+
+#define RES4325B0_CBUCK_LPOM 1
+#define RES4325B0_CBUCK_BURST 2
+#define RES4325B0_CBUCK_PWM 3
+#define RES4325B0_CLDO_PU 4
+
+
+#define RES4325C1_LNLDO2_PU 12
+
+
+#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4325_DEFCIS_SEL 0
+#define CST4325_SPROM_SEL 1
+#define CST4325_OTP_SEL 2
+#define CST4325_OTP_PWRDN 3
+#define CST4325_SDIO_USB_MODE_MASK 0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT 2
+#define CST4325_RCAL_VALID_MASK 0x00000008
+#define CST4325_RCAL_VALID_SHIFT 3
+#define CST4325_RCAL_VALUE_MASK 0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT 4
+#define CST4325_PMUTOP_2B_MASK 0x00000200
+#define CST4325_PMUTOP_2B_SHIFT 9
+
+#define RES4329_RESERVED0 0
+#define RES4329_CBUCK_LPOM 1
+#define RES4329_CBUCK_BURST 2
+#define RES4329_CBUCK_PWM 3
+#define RES4329_CLDO_PU 4
+#define RES4329_PALDO_PU 5
+#define RES4329_ILP_REQUEST 6
+#define RES4329_RESERVED7 7
+#define RES4329_RESERVED8 8
+#define RES4329_LNLDO1_PU 9
+#define RES4329_OTP_PU 10
+#define RES4329_RESERVED11 11
+#define RES4329_LNLDO2_PU 12
+#define RES4329_XTAL_PU 13
+#define RES4329_ALP_AVAIL 14
+#define RES4329_RX_PWRSW_PU 15
+#define RES4329_TX_PWRSW_PU 16
+#define RES4329_RFPLL_PWRSW_PU 17
+#define RES4329_LOGEN_PWRSW_PU 18
+#define RES4329_AFE_PWRSW_PU 19
+#define RES4329_BBPLL_PWRSW_PU 20
+#define RES4329_HT_AVAIL 21
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0
+#define CST4329_SPROM_SEL 1
+#define CST4329_OTP_SEL 2
+#define CST4329_OTP_PWRDN 3
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+
+#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4312_DEFCIS_SEL 0
+#define CST4312_SPROM_SEL 1
+#define CST4312_OTP_SEL 2
+#define CST4312_OTP_BAD 3
+
+
+#define RES4312_SWITCHER_BURST 0
+#define RES4312_SWITCHER_PWM 1
+#define RES4312_PA_REF_LDO 2
+#define RES4312_CORE_LDO_BURST 3
+#define RES4312_CORE_LDO_PWM 4
+#define RES4312_RADIO_LDO 5
+#define RES4312_ILP_REQUEST 6
+#define RES4312_BG_FILTBYP 7
+#define RES4312_TX_FILTBYP 8
+#define RES4312_RX_FILTBYP 9
+#define RES4312_XTAL_PU 10
+#define RES4312_ALP_AVAIL 11
+#define RES4312_BB_PLL_FILTBYP 12
+#define RES4312_RF_PLL_FILTBYP 13
+#define RES4312_HT_AVAIL 14
+
+
+#define RES4322_RF_LDO 0
+#define RES4322_ILP_REQUEST 1
+#define RES4322_XTAL_PU 2
+#define RES4322_ALP_AVAIL 3
+#define RES4322_SI_PLL_ON 4
+#define RES4322_HT_SI_AVAIL 5
+#define RES4322_PHY_PLL_ON 6
+#define RES4322_HT_PHY_AVAIL 7
+#define RES4322_OTP_PU 8
+
+
+#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT 6
+#define CST4322_NO_SPROM_OTP 0
+#define CST4322_SPROM_PRESENT 1
+#define CST4322_OTP_PRESENT 2
+#define CST4322_PCI_OR_USB 0x00000100
+#define CST4322_BOOT_MASK 0x00000600
+#define CST4322_BOOT_SHIFT 9
+#define CST4322_BOOT_FROM_SRAM 0
+#define CST4322_BOOT_FROM_ROM 1
+#define CST4322_BOOT_FROM_FLASH 2
+#define CST4322_BOOT_FROM_INVALID 3
+#define CST4322_ILP_DIV_EN 0x00000800
+#define CST4322_FLASH_TYPE_MASK 0x00001000
+#define CST4322_FLASH_TYPE_SHIFT 12
+#define CST4322_FLASH_TYPE_SHIFT_ST 0
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1
+#define CST4322_ARM_TAP_SEL 0x00002000
+#define CST4322_RES_INIT_MODE_MASK 0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT 14
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0
+#define CST4322_RES_INIT_MODE_ILPREQ 1
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2
+#define CST4322_RES_INIT_MODE_HTAVAIL 3
+#define CST4322_PCIPLLCLK_GATING 0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
+#define CST4322_PCI_CARDBUS_MODE 0x00040000
+
+
+#define CCTRL43224_GPIO_TOGGLE 0x8000
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
+
+
+#define RES43236_REGULATOR 0
+#define RES43236_ILP_REQUEST 1
+#define RES43236_XTAL_PU 2
+#define RES43236_ALP_AVAIL 3
+#define RES43236_SI_PLL_ON 4
+#define RES43236_HT_SI_AVAIL 5
+
+
+#define CCTRL43236_BT_COEXIST (1<<0)
+#define CCTRL43236_SECI (1<<1)
+#define CCTRL43236_EXT_LNA (1<<2)
+#define CCTRL43236_ANT_MUX_2o3 (1<<3)
+#define CCTRL43236_GSIO (1<<4)
+
+
+#define CST43236_SFLASH_MASK 0x00000040
+#define CST43236_OTP_SEL_MASK 0x00000080
+#define CST43236_OTP_SEL_SHIFT 7
+#define CST43236_HSIC_MASK 0x00000100
+#define CST43236_BP_CLK 0x00000200
+#define CST43236_BOOT_MASK 0x00001800
+#define CST43236_BOOT_SHIFT 11
+#define CST43236_BOOT_FROM_SRAM 0
+#define CST43236_BOOT_FROM_ROM 1
+#define CST43236_BOOT_FROM_FLASH 2
+#define CST43236_BOOT_FROM_INVALID 3
+
+
+#define RES43237_REGULATOR 0
+#define RES43237_ILP_REQUEST 1
+#define RES43237_XTAL_PU 2
+#define RES43237_ALP_AVAIL 3
+#define RES43237_SI_PLL_ON 4
+#define RES43237_HT_SI_AVAIL 5
+
+
+#define CCTRL43237_BT_COEXIST (1<<0)
+#define CCTRL43237_SECI (1<<1)
+#define CCTRL43237_EXT_LNA (1<<2)
+#define CCTRL43237_ANT_MUX_2o3 (1<<3)
+#define CCTRL43237_GSIO (1<<4)
+
+
+#define CST43237_SFLASH_MASK 0x00000040
+#define CST43237_OTP_SEL_MASK 0x00000080
+#define CST43237_OTP_SEL_SHIFT 7
+#define CST43237_HSIC_MASK 0x00000100
+#define CST43237_BP_CLK 0x00000200
+#define CST43237_BOOT_MASK 0x00001800
+#define CST43237_BOOT_SHIFT 11
+#define CST43237_BOOT_FROM_SRAM 0
+#define CST43237_BOOT_FROM_ROM 1
+#define CST43237_BOOT_FROM_FLASH 2
+#define CST43237_BOOT_FROM_INVALID 3
+
+
+#define RES43239_OTP_PU 9
+#define RES43239_MACPHY_CLKAVAIL 23
+#define RES43239_HT_AVAIL 24
+
+
+#define CST43239_SPROM_MASK 0x00000002
+#define CST43239_SFLASH_MASK 0x00000004
+#define CST43239_RES_INIT_MODE_SHIFT 7
+#define CST43239_RES_INIT_MODE_MASK 0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15))
+#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15))
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0)
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0))
+
+
+#define RES4324_OTP_PU 10
+#define RES4324_HT_AVAIL 29
+#define RES4324_MACPHY_CLKAVAIL 30
+
+
+#define CST4324_SPROM_MASK 0x00000080
+#define CST4324_SFLASH_MASK 0x00400000
+#define CST4324_RES_INIT_MODE_SHIFT 10
+#define CST4324_RES_INIT_MODE_MASK 0x00000c00
+#define CST4324_CHIPMODE_MASK 0x7
+#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2))
+#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6)
+
+
+#define RES4331_REGULATOR 0
+#define RES4331_ILP_REQUEST 1
+#define RES4331_XTAL_PU 2
+#define RES4331_ALP_AVAIL 3
+#define RES4331_SI_PLL_ON 4
+#define RES4331_HT_SI_AVAIL 5
+
+
+#define CCTRL4331_BT_COEXIST (1<<0)
+#define CCTRL4331_SECI (1<<1)
+#define CCTRL4331_EXT_LNA_G (1<<2)
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
+#define CCTRL4331_EXTPA_EN (1<<4)
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
+#define CCTRL4331_EXTPA_EN2 (1<<12)
+#define CCTRL4331_EXT_LNA_A (1<<13)
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
+#define CCTRL4331_EXTPA_ANA_EN (1<<24)
+
+
+#define CST4331_XTAL_FREQ 0x00000001
+#define CST4331_SPROM_OTP_SEL_MASK 0x00000006
+#define CST4331_SPROM_OTP_SEL_SHIFT 1
+#define CST4331_SPROM_PRESENT 0x00000002
+#define CST4331_OTP_PRESENT 0x00000004
+#define CST4331_LDO_RF 0x00000008
+#define CST4331_LDO_PAR 0x00000010
+
+
+#define RES4315_CBUCK_LPOM 1
+#define RES4315_CBUCK_BURST 2
+#define RES4315_CBUCK_PWM 3
+#define RES4315_CLDO_PU 4
+#define RES4315_PALDO_PU 5
+#define RES4315_ILP_REQUEST 6
+#define RES4315_LNLDO1_PU 9
+#define RES4315_OTP_PU 10
+#define RES4315_LNLDO2_PU 12
+#define RES4315_XTAL_PU 13
+#define RES4315_ALP_AVAIL 14
+#define RES4315_RX_PWRSW_PU 15
+#define RES4315_TX_PWRSW_PU 16
+#define RES4315_RFPLL_PWRSW_PU 17
+#define RES4315_LOGEN_PWRSW_PU 18
+#define RES4315_AFE_PWRSW_PU 19
+#define RES4315_BBPLL_PWRSW_PU 20
+#define RES4315_HT_AVAIL 21
+
+
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4315_DEFCIS_SEL 0x00000000
+#define CST4315_SPROM_SEL 0x00000001
+#define CST4315_OTP_SEL 0x00000002
+#define CST4315_OTP_PWRDN 0x00000003
+#define CST4315_SDIO_MODE 0x00000004
+#define CST4315_RCAL_VALID 0x00000008
+#define CST4315_RCAL_VALUE_MASK 0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT 4
+#define CST4315_PALDO_EXTPNP 0x00000200
+#define CST4315_CBUCK_MODE_MASK 0x00000c00
+#define CST4315_CBUCK_MODE_BURST 0x00000400
+#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
+
+
+#define RES4319_CBUCK_LPOM 1
+#define RES4319_CBUCK_BURST 2
+#define RES4319_CBUCK_PWM 3
+#define RES4319_CLDO_PU 4
+#define RES4319_PALDO_PU 5
+#define RES4319_ILP_REQUEST 6
+#define RES4319_LNLDO1_PU 9
+#define RES4319_OTP_PU 10
+#define RES4319_LNLDO2_PU 12
+#define RES4319_XTAL_PU 13
+#define RES4319_ALP_AVAIL 14
+#define RES4319_RX_PWRSW_PU 15
+#define RES4319_TX_PWRSW_PU 16
+#define RES4319_RFPLL_PWRSW_PU 17
+#define RES4319_LOGEN_PWRSW_PU 18
+#define RES4319_AFE_PWRSW_PU 19
+#define RES4319_BBPLL_PWRSW_PU 20
+#define RES4319_HT_AVAIL 21
+
+
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+#define CST4319_DEFCIS_SEL 0x00000000
+#define CST4319_SPROM_SEL 0x00000040
+#define CST4319_OTP_SEL 0x00000080
+#define CST4319_OTP_PWRDN 0x000000c0
+#define CST4319_SDIO_USB_MODE 0x00000100
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+#define CST4319_PALDO_EXTPNP 0x00010000
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
+#define PMU1_PLL0_CHIPCTL0 0
+#define PMU1_PLL0_CHIPCTL1 1
+#define PMU1_PLL0_CHIPCTL2 2
+#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT 19
+#define CCTL_4319USB_48MHZ_PLL_SEL 1
+#define CCTL_4319USB_24MHZ_PLL_SEL 2
+
+
+#define RES4336_CBUCK_LPOM 0
+#define RES4336_CBUCK_BURST 1
+#define RES4336_CBUCK_LP_PWM 2
+#define RES4336_CBUCK_PWM 3
+#define RES4336_CLDO_PU 4
+#define RES4336_DIS_INT_RESET_PD 5
+#define RES4336_ILP_REQUEST 6
+#define RES4336_LNLDO_PU 7
+#define RES4336_LDO3P3_PU 8
+#define RES4336_OTP_PU 9
+#define RES4336_XTAL_PU 10
+#define RES4336_ALP_AVAIL 11
+#define RES4336_RADIO_PU 12
+#define RES4336_BG_PU 13
+#define RES4336_VREG1p4_PU_PU 14
+#define RES4336_AFE_PWRSW_PU 15
+#define RES4336_RX_PWRSW_PU 16
+#define RES4336_TX_PWRSW_PU 17
+#define RES4336_BB_PWRSW_PU 18
+#define RES4336_SYNTH_PWRSW_PU 19
+#define RES4336_MISC_PWRSW_PU 20
+#define RES4336_LOGEN_PWRSW_PU 21
+#define RES4336_BBPLL_PWRSW_PU 22
+#define RES4336_MACPHY_CLKAVAIL 23
+#define RES4336_HT_AVAIL 24
+#define RES4336_RSVD 25
+
+
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+
+#define PCTL_4336_SERIAL_ENAB (1 << 24)
+
+
+#define RES4330_CBUCK_LPOM 0
+#define RES4330_CBUCK_BURST 1
+#define RES4330_CBUCK_LP_PWM 2
+#define RES4330_CBUCK_PWM 3
+#define RES4330_CLDO_PU 4
+#define RES4330_DIS_INT_RESET_PD 5
+#define RES4330_ILP_REQUEST 6
+#define RES4330_LNLDO_PU 7
+#define RES4330_LDO3P3_PU 8
+#define RES4330_OTP_PU 9
+#define RES4330_XTAL_PU 10
+#define RES4330_ALP_AVAIL 11
+#define RES4330_RADIO_PU 12
+#define RES4330_BG_PU 13
+#define RES4330_VREG1p4_PU_PU 14
+#define RES4330_AFE_PWRSW_PU 15
+#define RES4330_RX_PWRSW_PU 16
+#define RES4330_TX_PWRSW_PU 17
+#define RES4330_BB_PWRSW_PU 18
+#define RES4330_SYNTH_PWRSW_PU 19
+#define RES4330_MISC_PWRSW_PU 20
+#define RES4330_LOGEN_PWRSW_PU 21
+#define RES4330_BBPLL_PWRSW_PU 22
+#define RES4330_MACPHY_CLKAVAIL 23
+#define RES4330_HT_AVAIL 24
+#define RES4330_5gRX_PWRSW_PU 25
+#define RES4330_5gTX_PWRSW_PU 26
+#define RES4330_5g_LOGEN_PWRSW_PU 27
+
+
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6)
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6)
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0)
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4)
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6)
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7)
+#define CST4330_OTP_PRESENT 0x00000010
+#define CST4330_LPO_AUTODET_EN 0x00000020
+#define CST4330_ARMREMAP_0 0x00000040
+#define CST4330_SPROM_PRESENT 0x00000080
+#define CST4330_ILPDIV_EN 0x00000100
+#define CST4330_LPO_SEL 0x00000200
+#define CST4330_RES_INIT_MODE_SHIFT 10
+#define CST4330_RES_INIT_MODE_MASK 0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT 12
+#define CST4330_CBUCK_MODE_MASK 0x00003000
+#define CST4330_CBUCK_POWER_OK 0x00004000
+#define CST4330_BB_PLL_LOCKED 0x00008000
+#define SOCDEVRAM_BP_ADDR 0x1E000000
+#define SOCDEVRAM_ARM_ADDR 0x00800000
+
+
+#define PCTL_4330_SERIAL_ENAB (1 << 24)
+
+
+#define CCTRL_4330_GPIO_SEL 0x00000001
+#define CCTRL_4330_ERCX_SEL 0x00000002
+#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004
+#define CCTRL_4330_JTAG_DISABLE 0x00000008
+
+#define PMU_VREG0_ADDR 0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3
+
+
+#define RES4334_LPLDO_PU 0
+#define RES4334_RESET_PULLDN_DIS 1
+#define RES4334_PMU_BG_PU 2
+#define RES4334_HSIC_LDO_PU 3
+#define RES4334_CBUCK_LPOM_PU 4
+#define RES4334_CBUCK_PFM_PU 5
+#define RES4334_CLDO_PU 6
+#define RES4334_LPLDO2_LVM 7
+#define RES4334_LNLDO_PU 8
+#define RES4334_LDO3P3_PU 9
+#define RES4334_OTP_PU 10
+#define RES4334_XTAL_PU 11
+#define RES4334_WL_PWRSW_PU 12
+#define RES4334_LQ_AVAIL 13
+#define RES4334_LOGIC_RET 14
+#define RES4334_MEM_SLEEP 15
+#define RES4334_MACPHY_RET 16
+#define RES4334_WL_CORE_READY 17
+#define RES4334_ILP_REQ 18
+#define RES4334_ALP_AVAIL 19
+#define RES4334_MISC_PWRSW_PU 20
+#define RES4334_SYNTH_PWRSW_PU 21
+#define RES4334_RX_PWRSW_PU 22
+#define RES4334_RADIO_PU 23
+#define RES4334_WL_PMU_PU 24
+#define RES4334_VCO_LDO_PU 25
+#define RES4334_AFE_LDO_PU 26
+#define RES4334_RX_LDO_PU 27
+#define RES4334_TX_LDO_PU 28
+#define RES4334_HT_AVAIL 29
+#define RES4334_MACPHY_CLK_AVAIL 30
+
+
+#define CST4334_CHIPMODE_MASK 7
+#define CST4334_SDIO_MODE 0x00000000
+#define CST4334_SPI_MODE 0x00000004
+#define CST4334_HSIC_MODE 0x00000006
+#define CST4334_BLUSB_MODE 0x00000007
+#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT 0x00000010
+#define CST4334_LPO_AUTODET_EN 0x00000020
+#define CST4334_ARMREMAP_0 0x00000040
+#define CST4334_SPROM_PRESENT 0x00000080
+#define CST4334_ILPDIV_EN_MASK 0x00000100
+#define CST4334_ILPDIV_EN_SHIFT 8
+#define CST4334_LPO_SEL_MASK 0x00000200
+#define CST4334_LPO_SEL_SHIFT 9
+#define CST4334_RES_INIT_MODE_MASK 0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT 10
+
+
+#define PCTL_4334_GPIO3_ENAB (1 << 3)
+
+
+#define CCTRL4334_HSIC_LDO_PU (1 << 23)
+
+
+#define CCTRL1_4324_GPIO_SEL (1 << 0)
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)
+
+
+
+#define RES4313_BB_PU_RSRC 0
+#define RES4313_ILP_REQ_RSRC 1
+#define RES4313_XTAL_PU_RSRC 2
+#define RES4313_ALP_AVAIL_RSRC 3
+#define RES4313_RADIO_PU_RSRC 4
+#define RES4313_BG_PU_RSRC 5
+#define RES4313_VREG1P4_PU_RSRC 6
+#define RES4313_AFE_PWRSW_RSRC 7
+#define RES4313_RX_PWRSW_RSRC 8
+#define RES4313_TX_PWRSW_RSRC 9
+#define RES4313_BB_PWRSW_RSRC 10
+#define RES4313_SYNTH_PWRSW_RSRC 11
+#define RES4313_MISC_PWRSW_RSRC 12
+#define RES4313_BB_PLL_PWRSW_RSRC 13
+#define RES4313_HT_AVAIL_RSRC 14
+#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
+
+
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
+
+
+#define RES4314_LPLDO_PU 0
+#define RES4314_PMU_SLEEP_DIS 1
+#define RES4314_PMU_BG_PU 2
+#define RES4314_CBUCK_LPOM_PU 3
+#define RES4314_CBUCK_PFM_PU 4
+#define RES4314_CLDO_PU 5
+#define RES4314_LPLDO2_LVM 6
+#define RES4314_WL_PMU_PU 7
+#define RES4314_LNLDO_PU 8
+#define RES4314_LDO3P3_PU 9
+#define RES4314_OTP_PU 10
+#define RES4314_XTAL_PU 11
+#define RES4314_WL_PWRSW_PU 12
+#define RES4314_LQ_AVAIL 13
+#define RES4314_LOGIC_RET 14
+#define RES4314_MEM_SLEEP 15
+#define RES4314_MACPHY_RET 16
+#define RES4314_WL_CORE_READY 17
+#define RES4314_ILP_REQ 18
+#define RES4314_ALP_AVAIL 19
+#define RES4314_MISC_PWRSW_PU 20
+#define RES4314_SYNTH_PWRSW_PU 21
+#define RES4314_RX_PWRSW_PU 22
+#define RES4314_RADIO_PU 23
+#define RES4314_VCO_LDO_PU 24
+#define RES4314_AFE_LDO_PU 25
+#define RES4314_RX_LDO_PU 26
+#define RES4314_TX_LDO_PU 27
+#define RES4314_HT_AVAIL 28
+#define RES4314_MACPHY_CLK_AVAIL 29
+
+
+#define CST4314_OTP_ENABLED 0x00200000
+
+
+#define RES43228_NOT_USED 0
+#define RES43228_ILP_REQUEST 1
+#define RES43228_XTAL_PU 2
+#define RES43228_ALP_AVAIL 3
+#define RES43228_PLL_EN 4
+#define RES43228_HT_PHY_AVAIL 5
+
+
+#define CST43228_ILP_DIV_EN 0x1
+#define CST43228_OTP_PRESENT 0x2
+#define CST43228_SERDES_REFCLK_PADSEL 0x4
+#define CST43228_SDIO_MODE 0x8
+#define CST43228_SDIO_OTP_PRESENT 0x10
+#define CST43228_SDIO_RESET 0x20
+
+
+#define CST4706_PKG_OPTION (1<<0)
+#define CST4706_SFLASH_PRESENT (1<<1)
+#define CST4706_SFLASH_TYPE (1<<2)
+#define CST4706_MIPS_BENDIAN (1<<3)
+#define CST4706_PCIE1_DISABLE (1<<5)
+
+
+#define FLSTRCF4706_MASK 0x000000ff
+#define FLSTRCF4706_SF1 0x00000001
+#define FLSTRCF4706_PF1 0x00000002
+#define FLSTRCF4706_SF1_TYPE 0x00000004
+#define FLSTRCF4706_NF1 0x00000008
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070
+
+
+#define CCTRL4360_SECI_MODE (1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE (1 << 3)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8)
+#define CCTRL4360_BT_LGCY_MODE (1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21)
+
+
+#define RES4360_REGULATOR 0
+#define RES4360_ILP_AVAIL 1
+#define RES4360_ILP_REQ 2
+#define RES4360_XTAL_PU 3
+#define RES4360_ALP_AVAIL 4
+#define RES4360_BBPLLPWRSW_PU 5
+#define RES4360_HT_AVAIL 6
+#define RES4360_OTP_PU 7
+#define RES4360_USBLDO_PU 8
+#define RES4360_USBPLL_PWRSW_PU 9
+#define RES4360_LQ_AVAIL 10
+
+#define CST4360_XTAL_40MZ 0x00000001
+#define CST4360_SFLASH 0x00000002
+#define CST4360_SPROM_PRESENT 0x00000004
+#define CST4360_SFLASH_TYPE 0x00000004
+#define CST4360_OTP_ENABLED 0x00000008
+#define CST4360_REMAP_ROM 0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK 0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT 5
+#define CST4360_ILP_DIVEN 0x00000080
+#define CST4360_MODE_USB 0x00000100
+#define CST4360_SPROM_SIZE_MASK 0x00000600
+#define CST4360_SPROM_SIZE_SHIFT 9
+#define CST4360_BBPLL_LOCK 0x00000800
+#define CST4360_AVBBPLL_LOCK 0x00001000
+#define CST4360_USBBBPLL_LOCK 0x00002000
+
+#define CCTRL_4360_UART_SEL 0x2
+
+
+#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) & CST4360_MODE_USB)
+
+
+#define PMU_MAX_TRANSITION_DLY 15000
+
+
+#define PMURES_UP_TRANSITION 2
+
+
+
+#define SECI_MODE_UART 0x0
+#define SECI_MODE_SECI 0x1
+#define SECI_MODE_LEGACY_3WIRE_BT 0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3
+#define SECI_MODE_HALF_SECI 0x4
+
+#define SECI_RESET (1 << 0)
+#define SECI_RESET_BAR_UART (1 << 1)
+#define SECI_ENAB_SECI_ECI (1 << 2)
+#define SECI_ENAB_SECIOUT_DIS (1 << 3)
+#define SECI_MODE_MASK 0x7
+#define SECI_MODE_SHIFT 4
+#define SECI_UPD_SECI (1 << 7)
+
+#define SECI_SIGNOFF_0 0xDB
+#define SECI_SIGNOFF_1 0
+
+
+#define CLKCTL_STS_SECI_CLK_REQ (1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE (1 << 0)
+#define SECI_UART_MSR_RTS_STATE (1 << 1)
+#define SECI_UART_SECI_IN_STATE (1 << 2)
+#define SECI_UART_SECI_IN2_STATE (1 << 3)
+
+
+#define SECI_UART_LCR_STOP_BITS (1 << 0)
+#define SECI_UART_LCR_PARITY_EN (1 << 1)
+#define SECI_UART_LCR_PARITY (1 << 2)
+#define SECI_UART_LCR_RX_EN (1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL (1 << 4)
+#define SECI_UART_LCR_TXO_EN (1 << 5)
+#define SECI_UART_LCR_RTSO_EN (1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN (1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK (1 << 8)
+#define SECI_UART_LCR_TXCRC_INV (1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF (1 << 10)
+#define SECI_UART_LCR_TXCRC_EN (1 << 11)
+
+#define SECI_UART_MCR_TX_EN (1 << 0)
+#define SECI_UART_MCR_PRTS (1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN (1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN (1 << 4)
+#define SECI_UART_MCR_AUTO_RTS (1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT (1 << 9)
+
+
+
+
+#define ECI_BW_20 0x0
+#define ECI_BW_25 0x1
+#define ECI_BW_30 0x2
+#define ECI_BW_35 0x3
+#define ECI_BW_40 0x4
+#define ECI_BW_45 0x5
+#define ECI_BW_50 0x6
+#define ECI_BW_ALL 0x7
+
+
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h
new file mode 100644
index 00000000000000..44d68329e660d0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h
@@ -0,0 +1,275 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _SBCONFIG_H
+#define _SBCONFIG_H
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+#define SB_BUS_SIZE 0x10000
+#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE)
+
+
+#define SBCONFIGOFF 0xf00
+#define SBCONFIGSIZE 256
+
+#define SBIPSFLAG 0x08
+#define SBTPSFLAG 0x18
+#define SBTMERRLOGA 0x48
+#define SBTMERRLOG 0x50
+#define SBADMATCH3 0x60
+#define SBADMATCH2 0x68
+#define SBADMATCH1 0x70
+#define SBIMSTATE 0x90
+#define SBINTVEC 0x94
+#define SBTMSTATELOW 0x98
+#define SBTMSTATEHIGH 0x9c
+#define SBBWA0 0xa0
+#define SBIMCONFIGLOW 0xa8
+#define SBIMCONFIGHIGH 0xac
+#define SBADMATCH0 0xb0
+#define SBTMCONFIGLOW 0xb8
+#define SBTMCONFIGHIGH 0xbc
+#define SBBCONFIG 0xc0
+#define SBBSTATE 0xc8
+#define SBACTCNFG 0xd8
+#define SBFLAGST 0xe8
+#define SBIDLOW 0xf8
+#define SBIDHIGH 0xfc
+
+
+
+#define SBIMERRLOGA 0xea8
+#define SBIMERRLOG 0xeb0
+#define SBTMPORTCONNID0 0xed8
+#define SBTMPORTLOCK0 0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+ uint32 PAD[2];
+ uint32 sbipsflag;
+ uint32 PAD[3];
+ uint32 sbtpsflag;
+ uint32 PAD[11];
+ uint32 sbtmerrloga;
+ uint32 PAD;
+ uint32 sbtmerrlog;
+ uint32 PAD[3];
+ uint32 sbadmatch3;
+ uint32 PAD;
+ uint32 sbadmatch2;
+ uint32 PAD;
+ uint32 sbadmatch1;
+ uint32 PAD[7];
+ uint32 sbimstate;
+ uint32 sbintvec;
+ uint32 sbtmstatelow;
+ uint32 sbtmstatehigh;
+ uint32 sbbwa0;
+ uint32 PAD;
+ uint32 sbimconfiglow;
+ uint32 sbimconfighigh;
+ uint32 sbadmatch0;
+ uint32 PAD;
+ uint32 sbtmconfiglow;
+ uint32 sbtmconfighigh;
+ uint32 sbbconfig;
+ uint32 PAD;
+ uint32 sbbstate;
+ uint32 PAD[3];
+ uint32 sbactcnfg;
+ uint32 PAD[3];
+ uint32 sbflagst;
+ uint32 PAD[3];
+ uint32 sbidlow;
+ uint32 sbidhigh;
+} sbconfig_t;
+
+#endif
+
+
+#define SBIPS_INT1_MASK 0x3f
+#define SBIPS_INT1_SHIFT 0
+#define SBIPS_INT2_MASK 0x3f00
+#define SBIPS_INT2_SHIFT 8
+#define SBIPS_INT3_MASK 0x3f0000
+#define SBIPS_INT3_SHIFT 16
+#define SBIPS_INT4_MASK 0x3f000000
+#define SBIPS_INT4_SHIFT 24
+
+
+#define SBTPS_NUM0_MASK 0x3f
+#define SBTPS_F0EN0 0x40
+
+
+#define SBTMEL_CM 0x00000007
+#define SBTMEL_CI 0x0000ff00
+#define SBTMEL_EC 0x0f000000
+#define SBTMEL_ME 0x80000000
+
+
+#define SBIM_PC 0xf
+#define SBIM_AP_MASK 0x30
+#define SBIM_AP_BOTH 0x00
+#define SBIM_AP_TS 0x10
+#define SBIM_AP_TK 0x20
+#define SBIM_AP_RSV 0x30
+#define SBIM_IBE 0x20000
+#define SBIM_TO 0x40000
+#define SBIM_BY 0x01800000
+#define SBIM_RJ 0x02000000
+
+
+#define SBTML_RESET 0x0001
+#define SBTML_REJ_MASK 0x0006
+#define SBTML_REJ 0x0002
+#define SBTML_TMPREJ 0x0004
+
+#define SBTML_SICF_SHIFT 16
+
+
+#define SBTMH_SERR 0x0001
+#define SBTMH_INT 0x0002
+#define SBTMH_BUSY 0x0004
+#define SBTMH_TO 0x0020
+
+#define SBTMH_SISF_SHIFT 16
+
+
+#define SBBWA_TAB0_MASK 0xffff
+#define SBBWA_TAB1_MASK 0xffff
+#define SBBWA_TAB1_SHIFT 16
+
+
+#define SBIMCL_STO_MASK 0x7
+#define SBIMCL_RTO_MASK 0x70
+#define SBIMCL_RTO_SHIFT 4
+#define SBIMCL_CID_MASK 0xff0000
+#define SBIMCL_CID_SHIFT 16
+
+
+#define SBIMCH_IEM_MASK 0xc
+#define SBIMCH_TEM_MASK 0x30
+#define SBIMCH_TEM_SHIFT 4
+#define SBIMCH_BEM_MASK 0xc0
+#define SBIMCH_BEM_SHIFT 6
+
+
+#define SBAM_TYPE_MASK 0x3
+#define SBAM_AD64 0x4
+#define SBAM_ADINT0_MASK 0xf8
+#define SBAM_ADINT0_SHIFT 3
+#define SBAM_ADINT1_MASK 0x1f8
+#define SBAM_ADINT1_SHIFT 3
+#define SBAM_ADINT2_MASK 0x1f8
+#define SBAM_ADINT2_SHIFT 3
+#define SBAM_ADEN 0x400
+#define SBAM_ADNEG 0x800
+#define SBAM_BASE0_MASK 0xffffff00
+#define SBAM_BASE0_SHIFT 8
+#define SBAM_BASE1_MASK 0xfffff000
+#define SBAM_BASE1_SHIFT 12
+#define SBAM_BASE2_MASK 0xffff0000
+#define SBAM_BASE2_SHIFT 16
+
+
+#define SBTMCL_CD_MASK 0xff
+#define SBTMCL_CO_MASK 0xf800
+#define SBTMCL_CO_SHIFT 11
+#define SBTMCL_IF_MASK 0xfc0000
+#define SBTMCL_IF_SHIFT 18
+#define SBTMCL_IM_MASK 0x3000000
+#define SBTMCL_IM_SHIFT 24
+
+
+#define SBTMCH_BM_MASK 0x3
+#define SBTMCH_RM_MASK 0x3
+#define SBTMCH_RM_SHIFT 2
+#define SBTMCH_SM_MASK 0x30
+#define SBTMCH_SM_SHIFT 4
+#define SBTMCH_EM_MASK 0x300
+#define SBTMCH_EM_SHIFT 8
+#define SBTMCH_IM_MASK 0xc00
+#define SBTMCH_IM_SHIFT 10
+
+
+#define SBBC_LAT_MASK 0x3
+#define SBBC_MAX0_MASK 0xf0000
+#define SBBC_MAX0_SHIFT 16
+#define SBBC_MAX1_MASK 0xf00000
+#define SBBC_MAX1_SHIFT 20
+
+
+#define SBBS_SRD 0x1
+#define SBBS_HRD 0x2
+
+
+#define SBIDL_CS_MASK 0x3
+#define SBIDL_AR_MASK 0x38
+#define SBIDL_AR_SHIFT 3
+#define SBIDL_SYNCH 0x40
+#define SBIDL_INIT 0x80
+#define SBIDL_MINLAT_MASK 0xf00
+#define SBIDL_MINLAT_SHIFT 8
+#define SBIDL_MAXLAT 0xf000
+#define SBIDL_MAXLAT_SHIFT 12
+#define SBIDL_FIRST 0x10000
+#define SBIDL_CW_MASK 0xc0000
+#define SBIDL_CW_SHIFT 18
+#define SBIDL_TP_MASK 0xf00000
+#define SBIDL_TP_SHIFT 20
+#define SBIDL_IP_MASK 0xf000000
+#define SBIDL_IP_SHIFT 24
+#define SBIDL_RV_MASK 0xf0000000
+#define SBIDL_RV_SHIFT 28
+#define SBIDL_RV_2_2 0x00000000
+#define SBIDL_RV_2_3 0x10000000
+
+
+#define SBIDH_RC_MASK 0x000f
+#define SBIDH_RCE_MASK 0x7000
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000
+#define SBIDH_VC_SHIFT 16
+
+#define SB_COMMIT 0xfd8
+
+
+#define SB_VEND_BCM 0x4243
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
new file mode 100644
index 00000000000000..da1f1a10a65379
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h
@@ -0,0 +1,370 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+
+
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 addr;
+ uint32 ptr;
+ uint32 status;
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt;
+ dma32regs_t rcv;
+} dma32regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma32diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl;
+ uint32 addr;
+} dma32dd_t;
+
+
+#define D32RINGALIGN_BITS 12
+#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
+#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
+
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+
+#define XC_XE ((uint32)1 << 0)
+#define XC_SE ((uint32)1 << 1)
+#define XC_LE ((uint32)1 << 2)
+#define XC_FL ((uint32)1 << 4)
+#define XC_MR_MASK 0x000000C0
+#define XC_MR_SHIFT 6
+#define XC_PD ((uint32)1 << 11)
+#define XC_AE ((uint32)3 << 16)
+#define XC_AE_SHIFT 16
+#define XC_BL_MASK 0x001C0000
+#define XC_BL_SHIFT 18
+#define XC_PC_MASK 0x00E00000
+#define XC_PC_SHIFT 21
+#define XC_PT_MASK 0x03000000
+#define XC_PT_SHIFT 24
+
+
+#define DMA_MR_1 0
+#define DMA_MR_2 1
+
+
+
+#define DMA_BL_16 0
+#define DMA_BL_32 1
+#define DMA_BL_64 2
+#define DMA_BL_128 3
+#define DMA_BL_256 4
+#define DMA_BL_512 5
+#define DMA_BL_1024 6
+
+
+#define DMA_PC_0 0
+#define DMA_PC_4 1
+#define DMA_PC_8 2
+#define DMA_PC_16 3
+
+
+
+#define DMA_PT_1 0
+#define DMA_PT_2 1
+#define DMA_PT_4 2
+#define DMA_PT_8 3
+
+
+#define XP_LD_MASK 0xfff
+
+
+#define XS_CD_MASK 0x0fff
+#define XS_XS_MASK 0xf000
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000
+#define XS_XS_ACTIVE 0x1000
+#define XS_XS_IDLE 0x2000
+#define XS_XS_STOPPED 0x3000
+#define XS_XS_SUSP 0x4000
+#define XS_XE_MASK 0xf0000
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000
+#define XS_XE_DPE 0x10000
+#define XS_XE_DFU 0x20000
+#define XS_XE_BEBR 0x30000
+#define XS_XE_BEDA 0x40000
+#define XS_AD_MASK 0xfff00000
+#define XS_AD_SHIFT 20
+
+
+#define RC_RE ((uint32)1 << 0)
+#define RC_RO_MASK 0xfe
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8)
+#define RC_SH ((uint32)1 << 9)
+#define RC_OC ((uint32)1 << 10)
+#define RC_PD ((uint32)1 << 11)
+#define RC_AE ((uint32)3 << 16)
+#define RC_AE_SHIFT 16
+#define RC_BL_MASK 0x001C0000
+#define RC_BL_SHIFT 18
+#define RC_PC_MASK 0x00E00000
+#define RC_PC_SHIFT 21
+#define RC_PT_MASK 0x03000000
+#define RC_PT_SHIFT 24
+
+
+#define RP_LD_MASK 0xfff
+
+
+#define RS_CD_MASK 0x0fff
+#define RS_RS_MASK 0xf000
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000
+#define RS_RS_ACTIVE 0x1000
+#define RS_RS_IDLE 0x2000
+#define RS_RS_STOPPED 0x3000
+#define RS_RE_MASK 0xf0000
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000
+#define RS_RE_DPE 0x10000
+#define RS_RE_DFO 0x20000
+#define RS_RE_BEBW 0x30000
+#define RS_RE_BEDA 0x40000
+#define RS_AD_MASK 0xfff00000
+#define RS_AD_SHIFT 20
+
+
+#define FA_OFF_MASK 0xffff
+#define FA_SEL_MASK 0xf0000
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000
+#define FA_SEL_XDP 0x10000
+#define FA_SEL_RDD 0x40000
+#define FA_SEL_RDP 0x50000
+#define FA_SEL_XFD 0x80000
+#define FA_SEL_XFP 0x90000
+#define FA_SEL_RFD 0xc0000
+#define FA_SEL_RFP 0xd0000
+#define FA_SEL_RSD 0xe0000
+#define FA_SEL_RSP 0xf0000
+
+
+#define CTRL_BC_MASK 0x00001fff
+#define CTRL_AE ((uint32)3 << 16)
+#define CTRL_AE_SHIFT 16
+#define CTRL_PARITY ((uint32)3 << 18)
+#define CTRL_EOT ((uint32)1 << 28)
+#define CTRL_IOC ((uint32)1 << 29)
+#define CTRL_EOF ((uint32)1 << 30)
+#define CTRL_SOF ((uint32)1 << 31)
+
+
+#define CTRL_CORE_MASK 0x0ff00000
+
+
+
+
+typedef volatile struct {
+ uint32 control;
+ uint32 ptr;
+ uint32 addrlow;
+ uint32 addrhigh;
+ uint32 status0;
+ uint32 status1;
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx;
+ dma64regs_t rx;
+} dma64regp_t;
+
+typedef volatile struct {
+ uint32 fifoaddr;
+ uint32 fifodatalow;
+ uint32 fifodatahigh;
+ uint32 pad;
+} dma64diag_t;
+
+
+typedef volatile struct {
+ uint32 ctrl1;
+ uint32 ctrl2;
+ uint32 addrlow;
+ uint32 addrhigh;
+} dma64dd_t;
+
+
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+
+#define D64_XC_XE 0x00000001
+#define D64_XC_SE 0x00000002
+#define D64_XC_LE 0x00000004
+#define D64_XC_FL 0x00000010
+#define D64_XC_MR_MASK 0x000000C0
+#define D64_XC_MR_SHIFT 6
+#define D64_XC_PD 0x00000800
+#define D64_XC_AE 0x00030000
+#define D64_XC_AE_SHIFT 16
+#define D64_XC_BL_MASK 0x001C0000
+#define D64_XC_BL_SHIFT 18
+#define D64_XC_PC_MASK 0x00E00000
+#define D64_XC_PC_SHIFT 21
+#define D64_XC_PT_MASK 0x03000000
+#define D64_XC_PT_SHIFT 24
+
+
+#define D64_XP_LD_MASK 0x00001fff
+
+
+#define D64_XS0_CD_MASK 0x00001fff
+#define D64_XS0_XS_MASK 0xf0000000
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000
+#define D64_XS0_XS_ACTIVE 0x10000000
+#define D64_XS0_XS_IDLE 0x20000000
+#define D64_XS0_XS_STOPPED 0x30000000
+#define D64_XS0_XS_SUSP 0x40000000
+
+#define D64_XS1_AD_MASK 0x00001fff
+#define D64_XS1_XE_MASK 0xf0000000
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000
+#define D64_XS1_XE_DPE 0x10000000
+#define D64_XS1_XE_DFU 0x20000000
+#define D64_XS1_XE_DTE 0x30000000
+#define D64_XS1_XE_DESRE 0x40000000
+#define D64_XS1_XE_COREE 0x50000000
+
+
+#define D64_RC_RE 0x00000001
+#define D64_RC_RO_MASK 0x000000fe
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100
+#define D64_RC_SH 0x00000200
+#define D64_RC_OC 0x00000400
+#define D64_RC_PD 0x00000800
+#define D64_RC_AE 0x00030000
+#define D64_RC_AE_SHIFT 16
+#define D64_RC_BL_MASK 0x001C0000
+#define D64_RC_BL_SHIFT 18
+#define D64_RC_PC_MASK 0x00E00000
+#define D64_RC_PC_SHIFT 21
+#define D64_RC_PT_MASK 0x03000000
+#define D64_RC_PT_SHIFT 24
+
+
+#define DMA_CTRL_PEN (1 << 0)
+#define DMA_CTRL_ROC (1 << 1)
+#define DMA_CTRL_RXMULTI (1 << 2)
+#define DMA_CTRL_UNFRAMED (1 << 3)
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)
+
+
+#define D64_RP_LD_MASK 0x00001fff
+
+
+#define D64_RS0_CD_MASK 0x00001fff
+#define D64_RS0_RS_MASK 0xf0000000
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000
+#define D64_RS0_RS_ACTIVE 0x10000000
+#define D64_RS0_RS_IDLE 0x20000000
+#define D64_RS0_RS_STOPPED 0x30000000
+#define D64_RS0_RS_SUSP 0x40000000
+
+#define D64_RS1_AD_MASK 0x0001ffff
+#define D64_RS1_RE_MASK 0xf0000000
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000
+#define D64_RS1_RE_DPO 0x10000000
+#define D64_RS1_RE_DFU 0x20000000
+#define D64_RS1_RE_DTE 0x30000000
+#define D64_RS1_RE_DESRE 0x40000000
+#define D64_RS1_RE_COREE 0x50000000
+
+
+#define D64_FA_OFF_MASK 0xffff
+#define D64_FA_SEL_MASK 0xf0000
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000
+#define D64_FA_SEL_XDP 0x10000
+#define D64_FA_SEL_RDD 0x40000
+#define D64_FA_SEL_RDP 0x50000
+#define D64_FA_SEL_XFD 0x80000
+#define D64_FA_SEL_XFP 0x90000
+#define D64_FA_SEL_RFD 0xc0000
+#define D64_FA_SEL_RFP 0xd0000
+#define D64_FA_SEL_RSD 0xe0000
+#define D64_FA_SEL_RSP 0xf0000
+
+
+#define D64_CTRL_COREFLAGS 0x0ff00000
+#define D64_CTRL1_EOT ((uint32)1 << 28)
+#define D64_CTRL1_IOC ((uint32)1 << 29)
+#define D64_CTRL1_EOF ((uint32)1 << 30)
+#define D64_CTRL1_SOF ((uint32)1 << 31)
+
+
+#define D64_CTRL2_BC_MASK 0x00007fff
+#define D64_CTRL2_AE 0x00030000
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000
+
+
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff
+#define D64_RX_FRM_STS_OVFL 0x00800000
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000
+
+
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} dma_rxh_t;
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
new file mode 100644
index 00000000000000..36ef491beefe8f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h
@@ -0,0 +1,108 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h 323197 2012-03-23 09:57:58Z $
+ */
+
+#ifndef _SBPCMCIA_H
+#define _SBPCMCIA_H
+
+
+
+
+#define PCMCIA_FCR (0x700 / 2)
+
+#define FCR0_OFF 0
+#define FCR1_OFF (0x40 / 2)
+#define FCR2_OFF (0x80 / 2)
+#define FCR3_OFF (0xc0 / 2)
+
+#define PCMCIA_FCR0 (0x700 / 2)
+#define PCMCIA_FCR1 (0x740 / 2)
+#define PCMCIA_FCR2 (0x780 / 2)
+#define PCMCIA_FCR3 (0x7c0 / 2)
+
+
+
+#define PCMCIA_COR 0
+
+#define COR_RST 0x80
+#define COR_LEV 0x40
+#define COR_IRQEN 0x04
+#define COR_BLREN 0x01
+#define COR_FUNEN 0x01
+
+
+#define PCICIA_FCSR (2 / 2)
+#define PCICIA_PRR (4 / 2)
+#define PCICIA_SCR (6 / 2)
+#define PCICIA_ESR (8 / 2)
+
+
+#define PCM_MEMOFF 0x0000
+#define F0_MEMOFF 0x1000
+#define F1_MEMOFF 0x2000
+#define F2_MEMOFF 0x3000
+#define F3_MEMOFF 0x4000
+
+
+#define MEM_ADDR0 (0x728 / 2)
+#define MEM_ADDR1 (0x72a / 2)
+#define MEM_ADDR2 (0x72c / 2)
+
+
+#define PCMCIA_ADDR0 (0x072e / 2)
+#define PCMCIA_ADDR1 (0x0730 / 2)
+#define PCMCIA_ADDR2 (0x0732 / 2)
+
+#define MEM_SEG (0x0734 / 2)
+#define SROM_CS (0x0736 / 2)
+#define SROM_DATAL (0x0738 / 2)
+#define SROM_DATAH (0x073a / 2)
+#define SROM_ADDRL (0x073c / 2)
+#define SROM_ADDRH (0x073e / 2)
+#define SROM_INFO2 (0x0772 / 2)
+#define SROM_INFO (0x07be / 2)
+
+
+#define SROM_IDLE 0
+#define SROM_WRITE 1
+#define SROM_READ 2
+#define SROM_WEN 4
+#define SROM_WDS 7
+#define SROM_DONE 8
+
+
+#define SRI_SZ_MASK 0x03
+#define SRI_BLANK 0x04
+#define SRI_OTP 0x80
+
+
+
+#define SBTML_INT_ACK 0x40000
+#define SBTML_INT_EN 0x20000
+
+
+#define SBTMH_INT_STATUS 0x40000
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h
new file mode 100644
index 00000000000000..4b5a1cf465fc77
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h
@@ -0,0 +1,187 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h 308945 2012-01-18 02:15:27Z $
+ */
+
+#ifndef _SBSDIO_H
+#define _SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1
+#define SBSDIO_FUNC1_SLEEPCSR 0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE 0
+#define SBSDIO_SPROM_WRITE 1
+#define SBSDIO_SPROM_READ 2
+#define SBSDIO_SPROM_WEN 4
+#define SBSDIO_SPROM_WDS 7
+#define SBSDIO_SPROM_DONE 8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
+#define SROM_OTP 0x80 /* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
+ * 1: power on oscillator
+ * (for 4318 only)
+ */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
+ * to wait before sending data to host
+ */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK 0x7f
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
+
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL 0x40
+#define SBSDIO_Rev8_ALP_AVAIL 0x80
+#define SBSDIO_CSR_MASK 0x1F
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
+ * 8th byte
+ */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
+ * data comamnd
+ */
+
+#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
+
+#endif /* _SBSDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
new file mode 100644
index 00000000000000..2c5953568051af
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h
@@ -0,0 +1,293 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sbsdpcmdev_h_
+#define _sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+
+typedef volatile struct {
+ dma64regs_t xmt; /* dma tx */
+ uint32 PAD[2];
+ dma64regs_t rcv; /* dma rx */
+ uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+ dma64p_t dma64regs[2];
+ dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
+ uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+ dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
+ uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+ dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
+ dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
+ uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+ uint32 corecontrol; /* CoreControl, 0x000, rev8 */
+ uint32 corestatus; /* CoreStatus, 0x004, rev8 */
+ uint32 PAD[1];
+ uint32 biststatus; /* BistStatus, 0x00c, rev8 */
+
+ /* PCMCIA access */
+ uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
+ uint16 PAD[1];
+ uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
+ uint16 PAD[1];
+
+ /* interrupt */
+ uint32 intstatus; /* IntStatus, 0x020, rev8 */
+ uint32 hostintmask; /* IntHostMask, 0x024, rev8 */
+ uint32 intmask; /* IntSbMask, 0x028, rev8 */
+ uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
+ uint32 sbintmask; /* SBIntMask, 0x030, rev8 */
+ uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */
+ uint32 PAD[2];
+ uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
+ uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
+ uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
+ uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
+ uint32 PAD[3];
+
+ /* PCMCIA frame control */
+ uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
+ uint8 PAD[3];
+ uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
+ uint8 PAD[155];
+
+ /* interrupt batching control */
+ uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
+ uint32 PAD[3];
+
+ /* counters */
+ uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+ uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+ uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+ uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+ uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
+ uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+ uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+ uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+ uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+ uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+ uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+ uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+ uint32 PAD[40];
+ uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
+ uint32 PAD[7];
+
+ /* DMA engines */
+ volatile union {
+ pcmdma32_t pcm32;
+ sdiodma32_t sdiod32;
+ sdiodma64_t sdiod64;
+ } dma;
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
+ uint16 PAD[55];
+
+ /* PCMCIA backplane access */
+ uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
+ uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
+ uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
+ uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
+ uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
+ uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
+ uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
+ uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
+ uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
+ uint16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
+ uint32 PAD[464];
+
+ /* Sonics SiliconBackplane registers */
+ sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
+#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
+#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
+#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
+#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
+#define SDA_WRITE 0x01000000 /* Write bit */
+#define SDA_READ 0x00000000 /* Write bit cleared for Read */
+#define SDA_BUSY 0x80000000 /* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
+#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
+#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
+#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
+#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
+#define SDA_SBADDRMID 0x00b /* SbAddrMid */
+#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
+#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
+#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
+#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+
+/* intrcvlazy */
+#define IRL_TO_MASK 0x00ffffff /* timeout */
+#define IRL_FC_MASK 0xff000000 /* frame count */
+#define IRL_FC_SHIFT 24 /* frame count */
+
+/* rx header */
+typedef volatile struct {
+ uint16 len;
+ uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC 0x0001 /* CRC error detected */
+#define RXF_WOOS 0x0002 /* write frame out of sync */
+#define RXF_WF_TERM 0x0004 /* write frame terminated */
+#define RXF_ABORT 0x0008 /* write frame aborted */
+#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+
+#endif /* _sbsdpcmdev_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h
new file mode 100644
index 00000000000000..852d1151bc5a9c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h
@@ -0,0 +1,193 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $
+ */
+
+#ifndef _SBSOCRAM_H
+#define _SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+
+typedef volatile struct sbsocramregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[1];
+ uint32 bankinfo;
+ uint32 PAD[15];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl;
+ uint32 PAD[133];
+ uint32 sr_control;
+ uint32 sr_status;
+ uint32 sr_address;
+ uint32 sr_data;
+} sbsocramregs_t;
+
+#endif
+
+
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+
+#define SRCI_PT_MASK 0x00070000
+#define SRCI_PT_SHIFT 16
+
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SR_BSZ_BASE 14
+
+
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+
+#define SOCRAM_BANKINFO_SZMASK 0x7f
+#define SOCRAM_BANKIDX_ROM_MASK 0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
+
+#define SOCRAM_MEMTYPE_RAM 0
+#define SOCRAM_MEMTYPE_R0M 1
+#define SOCRAM_MEMTYPE_DEVRAM 2
+
+#define SOCRAM_BANKINFO_REG 0x40
+#define SOCRAM_BANKIDX_REG 0x10
+#define SOCRAM_BANKINFO_STDBY_MASK 0x400
+#define SOCRAM_BANKINFO_STDBY_TIMER 0x800
+
+
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT 17
+#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000
+
+
+#define SOCRAM_DEVRAMBANK_MASK 0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT 12
+
+
+#define SOCRAM_BANKINFO_SZBASE 8192
+#define SOCRAM_BANKSIZE_SHIFT 13
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h
new file mode 100644
index 00000000000000..b8eee1ffb40576
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdio.h
@@ -0,0 +1,617 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h 308973 2012-01-18 04:19:34Z $
+ */
+
+#ifndef _SDIO_H
+#define _SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+ uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */
+ uint8 sd_rev; /* RO, sd spec revision */
+ uint8 io_en; /* I/O enable */
+ uint8 io_rdy; /* I/O ready reg */
+ uint8 intr_ctl; /* Master and per function interrupt enable control */
+ uint8 intr_status; /* RO, interrupt pending status */
+ uint8 io_abort; /* read/write abort or reset all functions */
+ uint8 bus_inter; /* bus interface control */
+ uint8 capability; /* RO, card capability */
+
+ uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
+ uint8 cis_base_mid;
+ uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
+
+ /* suspend/resume registers */
+ uint8 bus_suspend; /* 0xC */
+ uint8 func_select; /* 0xD */
+ uint8 exec_flag; /* 0xE */
+ uint8 ready_flag; /* 0xF */
+
+ uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
+
+ uint8 power_control; /* 0x12 (SDIO version 1.10) */
+
+ uint8 speed_control; /* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV 0x00
+#define SDIOD_CCCR_SDREV 0x01
+#define SDIOD_CCCR_IOEN 0x02
+#define SDIOD_CCCR_IORDY 0x03
+#define SDIOD_CCCR_INTEN 0x04
+#define SDIOD_CCCR_INTPEND 0x05
+#define SDIOD_CCCR_IOABORT 0x06
+#define SDIOD_CCCR_BICTRL 0x07
+#define SDIOD_CCCR_CAPABLITIES 0x08
+#define SDIOD_CCCR_CISPTR_0 0x09
+#define SDIOD_CCCR_CISPTR_1 0x0A
+#define SDIOD_CCCR_CISPTR_2 0x0B
+#define SDIOD_CCCR_BUSSUSP 0x0C
+#define SDIOD_CCCR_FUNCSEL 0x0D
+#define SDIOD_CCCR_EXECFLAGS 0x0E
+#define SDIOD_CCCR_RDYFLAGS 0x0F
+#define SDIOD_CCCR_BLKSIZE_0 0x10
+#define SDIOD_CCCR_BLKSIZE_1 0x11
+#define SDIOD_CCCR_POWER_CONTROL 0x12
+#define SDIOD_CCCR_SPEED_CONTROL 0x13
+#define SDIOD_CCCR_UHSI_SUPPORT 0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH 0x15
+#define SDIOD_CCCR_INTR_EXTN 0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP 0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
+#define SDIOD_CCCR_BRCM_CARDCTL 0xf1
+#define SDIOD_CCCR_BRCM_SEPINT 0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK 0x0f /* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
+#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
+#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
+#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
+#define SDIO_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
+#define SDIO_CAP_SRW 0x04 /* support read wait */
+#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
+#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
+#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S 1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S 0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S 0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S 4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S 0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S 1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+ uint8 devctr; /* device interface, CSA control */
+ uint8 ext_dev; /* extended standard I/O device type code */
+ uint8 pwr_sel; /* power selection support */
+ uint8 PAD[6]; /* reserved */
+
+ uint8 cis_low; /* CIS LSB */
+ uint8 cis_mid;
+ uint8 cis_high; /* CIS MSB */
+ uint8 csa_low; /* code storage area, LSB */
+ uint8 csa_mid;
+ uint8 csa_high; /* code storage area, MSB */
+ uint8 csa_dat_win; /* data access window to function */
+
+ uint8 fnx_blk_size[2]; /* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS 8
+#define SDIOD_MAX_IOFUNCS 7
+
+/* SDIO Device FBR Start Address */
+#define SDIOD_FBR_STARTADDR 0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE 0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n) ((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
+#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0 0x09
+#define SDIOD_FBR_CISPTR_1 0x0A
+#define SDIOD_FBR_CISPTR_2 0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0 0x0C
+#define SDIOD_FBR_CSA_ADDR_1 0x0D
+#define SDIOD_FBR_CSA_ADDR_2 0x0E
+#define SDIOD_FBR_CSA_DATA 0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0 0x10
+#define SDIOD_FBR_BLKSIZE_1 0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
+#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART 1
+#define SDIOD_DIC_BLUETOOTH_A 2
+#define SDIOD_DIC_BLUETOOTH_B 3
+#define SDIOD_DIC_GPS 4
+#define SDIOD_DIC_CAMERA 5
+#define SDIOD_DIC_PHS 6
+#define SDIOD_DIC_WLAN 7
+#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
+#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+#define SDIO_FUNC_3 3
+#define SDIO_FUNC_4 4
+#define SDIO_FUNC_5 5
+#define SDIO_FUNC_6 6
+#define SDIO_FUNC_7 7
+
+#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
+#define SD_CARD_TYPE_IO 1 /* IO only card */
+#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
+#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE 31
+#define CARDREG_STATUS_BIT_COMCRCERROR 23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
+#define CARDREG_STATUS_BIT_ERROR 19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+
+
+#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND 1
+#define SD_CMD_MMC_SET_RCA 3
+#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD 7
+#define SD_CMD_SEND_CSD 9
+#define SD_CMD_SEND_CID 10
+#define SD_CMD_STOP_TRANSMISSION 12
+#define SD_CMD_SEND_STATUS 13
+#define SD_CMD_GO_INACTIVE_STATE 15
+#define SD_CMD_SET_BLOCKLEN 16
+#define SD_CMD_READ_SINGLE_BLOCK 17
+#define SD_CMD_READ_MULTIPLE_BLOCK 18
+#define SD_CMD_WRITE_BLOCK 24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
+#define SD_CMD_PROGRAM_CSD 27
+#define SD_CMD_SET_WRITE_PROT 28
+#define SD_CMD_CLR_WRITE_PROT 29
+#define SD_CMD_SEND_WRITE_PROT 30
+#define SD_CMD_ERASE_WR_BLK_START 32
+#define SD_CMD_ERASE_WR_BLK_END 33
+#define SD_CMD_ERASE 38
+#define SD_CMD_LOCK_UNLOCK 42
+#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
+#define SD_CMD_APP_CMD 55
+#define SD_CMD_GEN_CMD 56
+#define SD_CMD_READ_OCR 58
+#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS 13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
+#define SD_ACMD_SD_SEND_OP_COND 41
+#define SD_ACMD_SET_CLR_CARD_DETECT 42
+#define SD_ACMD_SEND_SCR 51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ 0 /* Read_Write: Read */
+#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
+#define SD_IO_RW_NORMAL 0 /* no RAW */
+#define SD_IO_RW_RAW 1 /* RAW */
+#define SD_IO_BYTE_MODE 0 /* Byte Mode */
+#define SD_IO_BLOCK_MODE 1 /* BlockMode */
+#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+ (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+ ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+ (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE 0
+#define SD_RSP_NO_1 1
+#define SD_RSP_NO_2 2
+#define SD_RSP_NO_3 3
+#define SD_RSP_NO_4 4
+#define SD_RSP_NO_5 5
+#define SD_RSP_NO_6 6
+
+ /* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
+#define SD_RSP_MR6_ERROR 0x2000
+
+ /* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT 0x80
+#define SD_RSP_MR1_PARAMETER_ERROR 0x40
+#define SD_RSP_MR1_RFU5 0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
+#define SD_RSP_MR1_COM_CRC_ERROR 0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
+#define SD_RSP_MR1_RFU1 0x02
+#define SD_RSP_MR1_IDLE_STATE 0x01
+
+ /* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR 0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
+#define SD_RSP_R5_ERROR 0x08
+#define SD_RSP_R5_RFU 0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
+#define SD_RSP_R5_OUT_OF_RANGE 0x01
+
+#define SD_RSP_R5_ERRBITS 0xCB
+
+
+/* ------------------------------------------------
+ * SDIO Commands and responses
+ *
+ * I/O only commands are:
+ * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0 0
+#define SDIOH_CMD_3 3
+#define SDIOH_CMD_5 5
+#define SDIOH_CMD_7 7
+#define SDIOH_CMD_11 11
+#define SDIOH_CMD_14 14
+#define SDIOH_CMD_15 15
+#define SDIOH_CMD_19 19
+#define SDIOH_CMD_52 52
+#define SDIOH_CMD_53 53
+#define SDIOH_CMD_59 59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE 0
+#define SDIOH_RSP_R1 1
+#define SDIOH_RSP_R2 2
+#define SDIOH_RSP_R3 3
+#define SDIOH_RSP_R4 4
+#define SDIOH_RSP_R5 5
+#define SDIOH_RSP_R6 6
+
+/*
+ * SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS 0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M BITFIELD_MASK(24)
+#define CMD5_OCR_S 0
+
+#define CMD5_S18R_M BITFIELD_MASK(1)
+#define CMD5_S18R_S 24
+
+#define CMD7_RCA_M BITFIELD_MASK(16)
+#define CMD7_RCA_S 16
+
+#define CMD14_RCA_M BITFIELD_MASK(16)
+#define CMD14_RCA_S 16
+#define CMD14_SLEEP_M BITFIELD_MASK(1)
+#define CMD14_SLEEP_S 15
+
+#define CMD_15_RCA_M BITFIELD_MASK(16)
+#define CMD_15_RCA_S 16
+
+#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
+ */
+#define CMD52_DATA_S 0
+#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD52_REG_ADDR_S 9
+#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
+#define CMD52_RAW_S 27
+#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD52_FUNCTION_S 28
+#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD52_RW_FLAG_S 31
+
+
+#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S 0
+#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
+#define CMD53_REG_ADDR_S 9
+#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
+#define CMD53_OP_CODE_S 26
+#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
+#define CMD53_BLK_MODE_S 27
+#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
+#define CMD53_FUNCTION_S 28
+#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
+#define CMD53_RW_FLAG_S 31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ * -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S 0
+
+#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S 24
+
+#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S 24
+#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
+#define RSP4_MEM_PRESENT_S 27
+#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S 28
+#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
+#define RSP4_CARD_READY_S 31
+
+#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
+ */
+#define RSP6_STATUS_S 0
+#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S 16
+
+#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S 3
+#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP1_APP_CMD_S 5
+#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S 8
+#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
+ * when Cmd was received
+ */
+#define RSP1_CURR_STATE_S 9
+#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
+#define RSP1_EARSE_RESET_S 13
+#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S 14
+#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S 15
+#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
+ * of CSD
+ */
+#define RSP1_CID_CSD_OVERW_S 16
+#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
+#define RSP1_ERROR_S 19
+#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
+#define RSP1_CC_ERROR_S 20
+#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
+ * to correct data
+ */
+#define RSP1_CARD_ECC_FAILED_S 21
+#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S 22
+#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
+ */
+#define RSP1_COM_CRC_ERROR_S 23
+#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S 24
+#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
+#define RSP1_CARD_LOCKED_S 25
+#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
+ * write-protected blocks
+ */
+#define RSP1_WP_VIOLATION_S 26
+#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S 27
+#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S 28
+#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
+#define RSP1_BLK_LEN_ERR_S 29
+#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
+#define RSP1_ADDR_ERR_S 30
+#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S 31
+
+
+#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
+#define RSP5_DATA_S 0
+#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
+#define RSP5_FLAGS_S 8
+#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S 16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S 0
+#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
+#define SPIRSP4_STUFF_S 16
+#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
+#define SPIRSP4_MEM_PRESENT_S 19
+#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S 20
+#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
+#define SPIRSP4_CARD_READY_S 23
+#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
+#define SPIRSP4_IDLE_STATE_S 24
+#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S 26
+#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S 27
+#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP4_FUNC_NUM_ERROR_S 28
+#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S 30
+#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP4_START_BIT_S 31
+
+#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
+#define SPIRSP5_DATA_S 16
+#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
+#define SPIRSP5_IDLE_STATE_S 24
+#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S 26
+#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S 27
+#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
+ */
+#define SPIRSP5_FUNC_NUM_ERROR_S 28
+#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S 30
+#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
+#define SPIRSP5_START_BIT_S 31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
+ */
+#define RSP6STAT_AKE_SEQ_ERROR_S 3
+#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
+#define RSP6STAT_APP_CMD_S 5
+#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
+ * (buff empty)
+ */
+#define RSP6STAT_READY_FOR_DATA_S 8
+#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
+ * Cmd reception
+ */
+#define RSP6STAT_CURR_STATE_S 9
+#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
+ */
+#define RSP6STAT_ERROR_S 13
+#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
+ * card state Bit 22
+ */
+#define RSP6STAT_ILLEGAL_CMD_S 14
+#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
+ * failed Bit 23
+ */
+#define RSP6STAT_COM_CRC_ERROR_S 15
+
+#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT 0
+#define CMD_OPTION_TUNING 1
+#endif /* _SDIO_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h
new file mode 100644
index 00000000000000..e847a52babed45
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdioh.h
@@ -0,0 +1,441 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h 299859 2011-12-01 03:53:27Z $
+ */
+
+#ifndef _SDIOH_H
+#define _SDIOH_H
+
+#define SD_SysAddr 0x000
+#define SD_BlockSize 0x004
+#define SD_BlockCount 0x006
+#define SD_Arg0 0x008
+#define SD_Arg1 0x00A
+#define SD_TransferMode 0x00C
+#define SD_Command 0x00E
+#define SD_Response0 0x010
+#define SD_Response1 0x012
+#define SD_Response2 0x014
+#define SD_Response3 0x016
+#define SD_Response4 0x018
+#define SD_Response5 0x01A
+#define SD_Response6 0x01C
+#define SD_Response7 0x01E
+#define SD_BufferDataPort0 0x020
+#define SD_BufferDataPort1 0x022
+#define SD_PresentState 0x024
+#define SD_HostCntrl 0x028
+#define SD_PwrCntrl 0x029
+#define SD_BlockGapCntrl 0x02A
+#define SD_WakeupCntrl 0x02B
+#define SD_ClockCntrl 0x02C
+#define SD_TimeoutCntrl 0x02E
+#define SD_SoftwareReset 0x02F
+#define SD_IntrStatus 0x030
+#define SD_ErrorIntrStatus 0x032
+#define SD_IntrStatusEnable 0x034
+#define SD_ErrorIntrStatusEnable 0x036
+#define SD_IntrSignalEnable 0x038
+#define SD_ErrorIntrSignalEnable 0x03A
+#define SD_CMD12ErrorStatus 0x03C
+#define SD_Capabilities 0x040
+#define SD_Capabilities3 0x044
+#define SD_MaxCurCap 0x048
+#define SD_MaxCurCap_Reserved 0x04C
+#define SD_ADMA_ErrStatus 0x054
+#define SD_ADMA_SysAddr 0x58
+#define SD_SlotInterruptStatus 0x0FC
+#define SD_HostControllerVersion 0x0FE
+#define SD_GPIO_Reg 0x100
+#define SD_GPIO_OE 0x104
+#define SD_GPIO_Enable 0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo 0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2 0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart 0x060
+#define SD3_PresetValCount 8
+/* preset-indiv regs */
+#define SD3_PresetVal_init 0x060
+#define SD3_PresetVal_default 0x062
+#define SD3_PresetVal_HS 0x064
+#define SD3_PresetVal_SDR12 0x066
+#define SD3_PresetVal_SDR25 0x068
+#define SD3_PresetVal_SDR50 0x06a
+#define SD3_PresetVal_SDR104 0x06c
+#define SD3_PresetVal_DDR50 0x06e
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX 0
+#define SD3_PRESETVAL_DESPEED_IX 1
+#define SD3_PRESETVAL_HISPEED_IX 2
+#define SD3_PRESETVAL_SDR12_IX 3
+#define SD3_PRESETVAL_SDR25_IX 4
+#define SD3_PRESETVAL_SDR50_IX 5
+#define SD3_PRESETVAL_SDR104_IX 6
+#define SD3_PRESETVAL_DDR50_IX 7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 0
+#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+ bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M BITFIELD_MASK(8)
+#define CAP_BASECLK_S 8
+#define CAP_MAXBLOCK_M BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S 16
+#define CAP_ADMA2_M BITFIELD_MASK(1)
+#define CAP_ADMA2_S 19
+#define CAP_ADMA1_M BITFIELD_MASK(1)
+#define CAP_ADMA1_S 20
+#define CAP_HIGHSPEED_M BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S 21
+#define CAP_DMA_M BITFIELD_MASK(1)
+#define CAP_DMA_S 22
+#define CAP_SUSPEND_M BITFIELD_MASK(1)
+#define CAP_SUSPEND_S 23
+#define CAP_VOLT_3_3_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S 24
+#define CAP_VOLT_3_0_M BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S 25
+#define CAP_VOLT_1_8_M BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S 26
+#define CAP_64BIT_HOST_M BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S 28
+
+#define SDIO_OCR_READ_FAIL (2)
+
+
+#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S 29
+
+#define CAP_SLOTTYPE_M BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S 30
+
+#define CAP3_MSBits_OFFSET (32)
+/* note: following are caps MSB32 bits.
+ So the bits start from 0, instead of 32. that is why
+ CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S 14
+
+#define PRESET_CLK_DIV_M BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S 0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S 0
+#define CAP_CURR_3_0_M BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S 8
+#define CAP_CURR_1_8_M BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S 16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S 0
+#define BLKSZ_BNDRY_M BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S 12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S 0
+#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S 1
+#define XFER_CMD_12_EN_M BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 2
+#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S 4
+#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S 5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 0
+#define RESP_TYPE_136 1
+#define RESP_TYPE_48 2
+#define RESP_TYPE_48_BUSY 3
+/* type field */
+#define CMD_TYPE_NORMAL 0
+#define CMD_TYPE_SUSPEND 1
+#define CMD_TYPE_RESUME 2
+#define CMD_TYPE_ABORT 3
+
+#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */
+#define CMD_RESP_TYPE_S 0
+#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */
+#define CMD_CRC_EN_S 3
+#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */
+#define CMD_INDEX_EN_S 4
+#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */
+#define CMD_DATA_EN_S 5
+#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc
+ */
+#define CMD_TYPE_S 6
+#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */
+#define CMD_INDEX_S 8
+
+/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */
+/* SD_PresentState : Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */
+#define PRES_CMD_INHIBIT_S 0
+#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */
+#define PRES_DAT_INHIBIT_S 1
+#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */
+#define PRES_DAT_BUSY_S 2
+#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */
+#define PRES_PRESENT_RSVD_S 3
+#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */
+#define PRES_WRITE_ACTIVE_S 8
+#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */
+#define PRES_READ_ACTIVE_S 9
+#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S 10
+#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */
+#define PRES_READ_DATA_RDY_S 11
+#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */
+#define PRES_CARD_PRESENT_S 16
+#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */
+#define PRES_CARD_STABLE_S 17
+#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */
+#define PRES_CARD_PRESENT_RAW_S 18
+#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */
+#define PRES_WRITE_ENABLED_S 19
+#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */
+#define PRES_DAT_SIGNAL_S 20
+#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */
+#define PRES_CMD_SIGNAL_S 24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */
+#define HOST_LED_S 0
+#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */
+#define HOST_DATA_WIDTH_S 1
+#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */
+#define HOST_DMA_SEL_S 3
+#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */
+#define HOST_HI_SPEED_EN_S 2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */
+
+#define HOST_CONTR_VER_2 (1)
+#define HOST_CONTR_VER_3 (2)
+
+/* misc defines */
+#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */
+#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */
+#define PWR_BUS_EN_S 0
+#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */
+#define PWR_VOLTS_S 1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */
+#define SW_RESET_ALL_S 0
+#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */
+#define SW_RESET_CMD_S 1
+#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */
+#define SW_RESET_DAT_S 2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S 0
+#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S 1
+#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S 2
+#define INTSTAT_DMA_INT_M BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S 3
+#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S 4
+#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S 5
+#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S 6
+#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S 7
+#define INTSTAT_CARD_INT_M BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S 8
+#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */
+#define INTSTAT_RETUNING_INT_S 12
+#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */
+#define INTSTAT_ERROR_INT_S 15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S 0
+#define ERRINT_CMD_CRC_M BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S 1
+#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S 2
+#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S 3
+#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S 4
+#define ERRINT_DATA_CRC_M BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S 5
+#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S 6
+#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S 7
+#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S 8
+#define ERRINT_VENDOR_M BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S 12
+#define ERRINT_ADMA_M BITFIELD_MASK(1)
+#define ERRINT_ADMA_S 9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT 0x0001
+#define ERRINT_CMD_CRC_BIT 0x0002
+#define ERRINT_CMD_ENDBIT_BIT 0x0004
+#define ERRINT_CMD_INDEX_BIT 0x0008
+#define ERRINT_DATA_TIMEOUT_BIT 0x0010
+#define ERRINT_DATA_CRC_BIT 0x0020
+#define ERRINT_DATA_ENDBIT_BIT 0x0040
+#define ERRINT_CURRENT_LIMIT_BIT 0x0080
+#define ERRINT_AUTO_CMD12_BIT 0x0100
+#define ERRINT_ADMA_BIT 0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+ ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+ ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl : Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */
+/* SD_IntrStatus : Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */
+/* SD_Capabilities : Offset 0x040 , size = bytes */
+/* SD_MaxCurCap : Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE 0
+#define SDIOH_ADMA1_MODE 1
+#define SDIOH_ADMA2_MODE 2
+#define SDIOH_ADMA2_64_MODE 3
+
+#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+ uint32 len_attr;
+ uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+ uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h
new file mode 100644
index 00000000000000..83f82de2649755
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+ int func;
+ int offset;
+ int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+
+#define NUM_PREV_TRANSACTIONS 16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h
new file mode 100644
index 00000000000000..ac52bc1b84e3ef
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/siutils.h
@@ -0,0 +1,313 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h 321982 2012-03-19 06:58:08Z $
+ */
+
+#ifndef _siutils_h_
+#define _siutils_h_
+
+
+struct si_pub {
+ uint socitype;
+
+ uint bustype;
+ uint buscoretype;
+ uint buscorerev;
+ uint buscoreidx;
+ int ccrev;
+ uint32 cccaps;
+ uint32 cccaps_ext;
+ int pmurev;
+ uint32 pmucaps;
+ uint boardtype;
+ uint boardrev;
+ uint boardvendor;
+ uint boardflags;
+ uint boardflags2;
+ uint chip;
+ uint chiprev;
+ uint chippkg;
+ uint32 chipst;
+ bool issim;
+ uint socirev;
+ bool pci_pr32414;
+
+};
+
+
+typedef const struct si_pub si_t;
+
+
+
+#define SI_OSH NULL
+
+#define BADIDX (SI_MAXCORES + 1)
+
+
+#define XTAL 0x1
+#define PLL 0x2
+
+
+#define CLK_FAST 0
+#define CLK_DYNAMIC 2
+
+
+#define GPIO_DRV_PRIORITY 0
+#define GPIO_APP_PRIORITY 1
+#define GPIO_HI_PRIORITY 2
+
+
+#define GPIO_PULLUP 0
+#define GPIO_PULLDN 1
+
+
+#define GPIO_REGEVT 0
+#define GPIO_REGEVT_INTMSK 1
+#define GPIO_REGEVT_INTPOL 2
+
+
+#define SI_DEVPATH_BUFSZ 16
+
+
+#define SI_DOATTACH 1
+#define SI_PCIDOWN 2
+#define SI_PCIUP 3
+
+#define ISSIM_ENAB(sih) 0
+
+
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih) (BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih) (0)
+#define CCPLL_ENAB(sih) (0)
+#else
+#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+
+#define CC_BTCOEX_EN_MASK 0x01
+
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+
+
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+ uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val) (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a) do {} while (0)
+static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+
+
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on);
+
+
+extern bool si_is_sprom_available(si_t *sih);
+extern bool si_is_sprom_enabled(si_t *sih);
+extern void si_sprom_enable(si_t *sih, bool enable);
+
+
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT 0
+#define CIS_SROM 1
+#define CIS_OTP 2
+
+
+#define DEFAULT_FAB 0x0
+#define CSM_FAB7 0x1
+#define TSMC_FAB12 0x2
+#define SMIC_FAB4 0x3
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+
+
+extern int si_devpath(si_t *sih, char *path, int size);
+
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern uint si_pll_reset(si_t *sih);
+
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h
new file mode 100644
index 00000000000000..ccae9779cdae88
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/spid.h
@@ -0,0 +1,153 @@
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: spid.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _SPI_H
+#define _SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+ uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */
+ uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */
+ uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay
+ * function selection, command/data error check
+ */
+ uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+ uint16 intr_reg; /* 0x04, Intr status register */
+ uint16 intr_en_reg; /* 0x06, Intr mask register */
+ uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */
+ uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */
+ uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */
+ uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */
+ uint32 test_read; /* 0x14, RO 0xfeedbead signature */
+ uint32 test_rw; /* 0x18, RW */
+ uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+ uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+ uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+ uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG 0x00
+#define SPID_RESPONSE_DELAY 0x01
+#define SPID_STATUS_ENABLE 0x02
+#define SPID_RESET_BP 0x03 /* (corerev >= 1) */
+#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG 0x08 /* 32 bits */
+#define SPID_F1_INFO_REG 0x0C /* 16 bits */
+#define SPID_F2_INFO_REG 0x0E /* 16 bits */
+#define SPID_F3_INFO_REG 0x10 /* 16 bits */
+#define SPID_TEST_READ 0x14 /* 32 bits */
+#define SPID_TEST_RW 0x18 /* 32 bits */
+#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */
+#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */
+#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */
+#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */
+#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */
+#define RESET_SPI 0x80 /* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW 0x0002
+#define F2_F3_FIFO_WR_OVERFLOW 0x0004
+#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */
+#define DATA_ERROR 0x0010 /* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE 0x0020
+#define F3_PACKET_AVAILABLE 0x0040
+#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0 0x0100
+#define MISC_INTR1 0x0200
+#define MISC_INTR2 0x0400
+#define MISC_INTR3 0x0800
+#define MISC_INTR4 0x1000
+#define F1_INTR 0x2000
+#define F2_INTR 0x4000
+#define F3_INTR 0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE 0x00000001
+#define STATUS_UNDERFLOW 0x00000002
+#define STATUS_OVERFLOW 0x00000004
+#define STATUS_F2_INTR 0x00000008
+#define STATUS_F3_INTR 0x00000010
+#define STATUS_F2_RX_READY 0x00000020
+#define STATUS_F3_RX_READY 0x00000040
+#define STATUS_HOST_CMD_DATA_ERR 0x00000080
+#define STATUS_F2_PKT_AVAILABLE 0x00000100
+#define STATUS_F2_PKT_LEN_MASK 0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT 9
+#define STATUS_F3_PKT_AVAILABLE 0x00100000
+#define STATUS_F3_PKT_LEN_MASK 0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT 21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 0x0001
+#define F1_RDY_FOR_DATA_TRANSFER 0x0002
+#define F1_MAX_PKT_SIZE 0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 0x0001
+#define F2_RDY_FOR_DATA_TRANSFER 0x0002
+#define F2_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 0x0001
+#define F3_RDY_FOR_DATA_TRANSFER 0x0002
+#define F3_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+
+#define SPI_MAX_PKT_LEN (2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0 0
+#define SPI_FUNC_1 1
+#define SPI_FUNC_2 2
+#define SPI_FUNC_3 3
+
+#define WAIT_F2RXFIFORDY 100
+#define WAIT_F2RXFIFORDY_DELAY 20
+
+#endif /* _SPI_H */
diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h
new file mode 100644
index 00000000000000..bf92a5651af0d9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h
@@ -0,0 +1,53 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h 260898 2011-05-20 23:11:12Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1 /* Version 1 */
+#define TRX_MAX_LEN 0x3B0000 /* Max length */
+#define TRX_NO_HEADER 1 /* Do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */
+#define TRX_MAX_OFFSET 3 /* Max number of individual files */
+
+struct trx_header {
+ uint32 magic; /* "HDR0" */
+ uint32 len; /* Length of file including header */
+ uint32 crc32; /* 32-bit CRC from flag_version to end of file */
+ uint32 flag_version; /* 0:15 flags, 16:31 version */
+ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
+};
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h
new file mode 100644
index 00000000000000..4eee5bab8ce01d
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/typedefs.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h 286783 2011-09-29 06:18:57Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+
+
+#include "site_typedefs.h"
+
+#else
+
+
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE false
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+
+#else
+
+
+#endif
+
+#if defined(__x86_64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif
+#endif
+#endif
+
+
+
+
+
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif
+
+
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif
+
+#if !defined(__DJGPP__)
+
+
+#if defined(__KERNEL__)
+
+
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>
+#endif
+
+#else
+
+
+#include <sys/types.h>
+
+#endif
+
+#endif
+
+
+
+
+#define USE_TYPEDEF_DEFAULTS
+
+#endif
+
+
+
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef unsigned char bool;
+#endif
+
+
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long ulong;
+#endif
+
+
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT32
+typedef float float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double float64;
+#endif
+
+
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else
+typedef float64 float_t;
+#endif
+
+#endif
+
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1
+#endif
+
+#define AUTO (-1)
+
+
+
+#ifndef PTRSZ
+#define PTRSZ sizeof(char*)
+#endif
+
+
+
+#if defined(__GNUC__) || defined(__lint)
+ #define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+ #define BWL_COMPILER_ARMCC
+#else
+ #error "Unknown compiler!"
+#endif
+
+
+#ifndef INLINE
+ #if defined(BWL_COMPILER_MICROSOFT)
+ #define INLINE __inline
+ #elif defined(BWL_COMPILER_GNU)
+ #define INLINE __inline__
+ #elif defined(BWL_COMPILER_ARMCC)
+ #define INLINE __inline
+ #else
+ #define INLINE
+ #endif
+#endif
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif
+
+
+#define UNUSED_PARAMETER(x) (void)(x)
+
+
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+
+#include <bcmdefs.h>
+#endif
diff --git a/drivers/net/wireless/bcmdhd/include/usbrdl.h b/drivers/net/wireless/bcmdhd/include/usbrdl.h
new file mode 100644
index 00000000000000..c90dccdcfad5df
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/usbrdl.h
@@ -0,0 +1,203 @@
+/*
+ * Broadcom USB remote download definitions
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: usbrdl.h 296577 2011-11-16 03:09:51Z $
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE 0 /* returns the rdl_state_t struct */
+#define DL_CHECK_CRC 1 /* currently unused */
+#define DL_GO 2 /* execute downloaded image */
+#define DL_START 3 /* initialize dl state */
+#define DL_REBOOT 4 /* reboot the device in 2 seconds */
+#define DL_GETVER 5 /* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED 6 /* execute the downloaded code and set reset event
+ * to occur in 2 seconds. It is the responsibility
+ * of the downloaded code to clear this event
+ */
+#define DL_EXEC 7 /* jump to a supplied address */
+#define DL_RESETCFG 8 /* To support single enum on dongle
+ * - Not used by bootloader
+ */
+#define DL_DEFER_RESP_OK 9 /* Potentially defer the response to setup
+ * if resp unavailable
+ */
+
+#define DL_HWCMD_MASK 0xfc /* Mask for hardware read commands: */
+#define DL_RDHW 0x10 /* Read a hardware address (Ctl-in) */
+#define DL_RDHW32 0x10 /* Read a 32 bit word */
+#define DL_RDHW16 0x11 /* Read 16 bits */
+#define DL_RDHW8 0x12 /* Read an 8 bit byte */
+#define DL_WRHW 0x14 /* Write a hardware address (Ctl-out) */
+#define DL_WRHW_BLK 0x13 /* Block write to hardware access */
+
+#define DL_CMD_RDHW 1 /* read data from a backplane address */
+#define DL_CMD_WRHW 2 /* write data to a backplane address */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define DL_JTCONF 0x15 /* Get JTAG configuration (Ctl_in)
+ * Set JTAG configuration (Ctl-out)
+ */
+#define DL_JTON 0x16 /* Turn on jtag master (Ctl-in) */
+#define DL_JTOFF 0x17 /* Turn on jtag master (Ctl-in) */
+#define DL_RDRJT 0x18 /* Read a JTAG register (Ctl-in) */
+#define DL_WRJT 0x19 /* Write a hardware address over JTAG (Ctl/Bulk-out) */
+#define DL_WRRJT 0x1a /* Write a JTAG register (Ctl/Bulk-out) */
+#define DL_JTRST 0x1b /* Reset jtag fsm on jtag DUT (Ctl-in) */
+
+#define DL_RDJT 0x1c /* Read a hardware address over JTAG (Ctl-in) */
+#define DL_RDJT32 0x1c /* Read 32 bits */
+#define DL_RDJT16 0x1e /* Read 16 bits (sz = 4 - low bits) */
+#define DL_RDJT8 0x1f /* Read 8 bits */
+
+#define DL_MRDJT 0x20 /* Multiple read over JTAG (Ctl-out+Bulk-in) */
+#define DL_MRDJT32 0x20 /* M-read 32 bits */
+#define DL_MRDJT16 0x22 /* M-read 16 bits (sz = 4 - low bits) */
+#define DL_MRDJT6 0x23 /* M-read 8 bits */
+#define DL_MRDIJT 0x24 /* M-read over JTAG (Ctl-out+Bulk-in) with auto-increment */
+#define DL_MRDIJT32 0x24 /* M-read 32 bits w/ai */
+#define DL_MRDIJT16 0x26 /* M-read 16 bits w/ai (sz = 4 - low bits) */
+#define DL_MRDIJT8 0x27 /* M-read 8 bits w/ai */
+#define DL_MRDDJT 0x28 /* M-read over JTAG (Ctl-out+Bulk-in) with auto-decrement */
+#define DL_MRDDJT32 0x28 /* M-read 32 bits w/ad */
+#define DL_MRDDJT16 0x2a /* M-read 16 bits w/ad (sz = 4 - low bits) */
+#define DL_MRDDJT8 0x2b /* M-read 8 bits w/ad */
+#define DL_MWRJT 0x2c /* Multiple write over JTAG (Bulk-out) */
+#define DL_MWRIJT 0x2d /* With auto-increment */
+#define DL_MWRDJT 0x2e /* With auto-decrement */
+#define DL_VRDJT 0x2f /* Vector read over JTAG (Bulk-out+Bulk-in) */
+#define DL_VWRJT 0x30 /* Vector write over JTAG (Bulk-out+Bulk-in) */
+#define DL_SCJT 0x31 /* Jtag scan (Bulk-out+Bulk-in) */
+
+#define DL_CFRD 0x33 /* Reserved for dmamem use */
+#define DL_CFWR 0x34 /* Reserved for dmamem use */
+#define DL_GET_NVRAM 0x35 /* Query nvram parameter */
+
+#define DL_DBGTRIG 0xFF /* Trigger bRequest type to aid debug */
+
+#define DL_JTERROR 0x80000000
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* states */
+#define DL_WAITING 0 /* waiting to rx first pkt that includes the hdr info */
+#define DL_READY 1 /* hdr was good, waiting for more of the compressed image */
+#define DL_BAD_HDR 2 /* hdr was corrupted */
+#define DL_BAD_CRC 3 /* compressed image was corrupted */
+#define DL_RUNNABLE 4 /* download was successful, waiting for go cmd */
+#define DL_START_FAIL 5 /* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG 6 /* host specified nvram data exceeds DL_NVRAM value */
+#define DL_IMAGE_TOOBIG 7 /* download image too big (exceeds DATA_START for rdl) */
+
+#define TIMEOUT 5000 /* Timeout for usb commands */
+
+struct bcm_device_id {
+ char *name;
+ uint32 vend;
+ uint32 prod;
+};
+
+typedef struct {
+ uint32 state;
+ uint32 bytes;
+} rdl_state_t;
+
+typedef struct {
+ uint32 chip; /* Chip id */
+ uint32 chiprev; /* Chip rev */
+ uint32 ramsize; /* Size of RAM */
+ uint32 remapbase; /* Current remap base address */
+ uint32 boardtype; /* Type of board */
+ uint32 boardrev; /* Board revision */
+} bootrom_id_t;
+
+/* struct for backplane & jtag accesses */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 addr; /* backplane address for write */
+ uint32 len; /* length of data: 1, 2, 4 bytes */
+ uint32 data; /* data to write */
+} hwacc_t;
+
+/* struct for backplane */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 addr; /* backplane address for write */
+ uint32 len; /* length of data: 1, 2, 4 bytes */
+ uint8 data[1]; /* data to write */
+} hwacc_blk_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct {
+ uint32 chip; /* Chip id */
+ uint32 chiprev; /* Chip rev */
+ uint32 ccrev; /* Chipcommon core rev */
+ uint32 siclock; /* Backplane clock */
+} jtagd_id_t;
+
+/* Jtag configuration structure */
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint8 clkd; /* Jtag clock divisor */
+ uint8 disgpio; /* Gpio to disable external driver */
+ uint8 irsz; /* IR size for readreg/writereg */
+ uint8 drsz; /* DR size for readreg/writereg */
+
+ uint8 bigend; /* Big endian */
+ uint8 mode; /* Current mode */
+ uint16 delay; /* Delay between jtagm "simple commands" */
+
+ uint32 retries; /* Number of retries for jtagm operations */
+ uint32 ctrl; /* Jtag control reg copy */
+ uint32 ir_lvbase; /* Bits to add to IR values in LV tap */
+ uint32 dretries; /* Number of retries for dma operations */
+} jtagconf_t;
+
+/* struct for jtag scan */
+#define MAX_USB_IR_BITS 256
+#define MAX_USB_DR_BITS 3072
+#define USB_IR_WORDS (MAX_USB_IR_BITS / 32)
+#define USB_DR_WORDS (MAX_USB_DR_BITS / 32)
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ uint32 irsz; /* IR size in bits */
+ uint32 drsz; /* DR size in bits */
+ uint32 ts; /* Terminal state (def, pause, rti) */
+ uint32 data[USB_IR_WORDS + USB_DR_WORDS]; /* IR & DR data */
+} scjt_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* struct for querying nvram params from bootloader */
+#define QUERY_STRING_MAX 32
+typedef struct {
+ uint32 cmd; /* tag to identify the cmd */
+ char var[QUERY_STRING_MAX]; /* param name */
+} nvparam_t;
+
+typedef void (*exec_fn_t)(void *sih);
+
+#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE)
+#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE)
+
+#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */
+
+#define RDL_CHUNK 1500 /* size of each dl transfer */
+
+/* bootloader makes special use of trx header "offsets" array */
+#define TRX_OFFSETS_DLFWLEN_IDX 0 /* Size of the fw; used in uncompressed case */
+#define TRX_OFFSETS_JUMPTO_IDX 1 /* RAM address for jumpto after download */
+#define TRX_OFFSETS_NVM_LEN_IDX 2 /* Length of appended NVRAM data */
+
+#define TRX_OFFSETS_DLBASE_IDX 0 /* RAM start address for download */
+
+#endif /* _USB_RDL_H */
diff --git a/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h b/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h
new file mode 100644
index 00000000000000..95f616a5fc72d2
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlc_extlog_idstr.h
@@ -0,0 +1,117 @@
+/*
+ * EXTLOG Module log ID to log Format String mapping table
+ *
+ * Copyright (C) 2012, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: wlc_extlog_idstr.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef _WLC_EXTLOG_IDSTR_H_
+#define _WLC_EXTLOG_IDSTR_H_
+
+#include "wlioctl.h"
+
+/* Strings corresponding to the IDs defined in wlioctl.h
+ * This file is only included by the apps and not included by the external driver
+ * Formats of pre-existing ids should NOT be changed
+ */
+log_idstr_t extlog_fmt_str[ ] = {
+ {FMTSTR_DRIVER_UP_ID, 0, LOG_ARGTYPE_NULL,
+ "Driver is Up\n"},
+
+ {FMTSTR_DRIVER_DOWN_ID, 0, LOG_ARGTYPE_NULL,
+ "Driver is Down\n"},
+
+ {FMTSTR_SUSPEND_MAC_FAIL_ID, 0, LOG_ARGTYPE_INT,
+ "wlc_suspend_mac_and_wait() failed with psmdebug 0x%08x\n"},
+
+ {FMTSTR_NO_PROGRESS_ID, 0, LOG_ARGTYPE_INT,
+ "No Progress on TX for %d seconds\n"},
+
+ {FMTSTR_RFDISABLE_ID, 0, LOG_ARGTYPE_INT,
+ "Detected a change in RF Disable Input 0x%x\n"},
+
+ {FMTSTR_REG_PRINT_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Register %s = 0x%x\n"},
+
+ {FMTSTR_EXPTIME_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Strong RF interference detected\n"},
+
+ {FMTSTR_JOIN_START_ID, FMTSTRF_USER, LOG_ARGTYPE_STR,
+ "Searching for networks with ssid %s\n"},
+
+ {FMTSTR_JOIN_COMPLETE_ID, FMTSTRF_USER, LOG_ARGTYPE_STR,
+ "Successfully joined network with BSSID %s\n"},
+
+ {FMTSTR_NO_NETWORKS_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "No networks found. Please check if the network exists and is in range\n"},
+
+ {FMTSTR_SECURITY_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "AP rejected due to security mismatch. Change the security settings and try again...\n"},
+
+ {FMTSTR_RATE_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "AP rejected due to rate mismatch\n"},
+
+ {FMTSTR_AP_PRUNED_ID, 0, LOG_ARGTYPE_INT,
+ "AP rejected due to reason %d\n"},
+
+ {FMTSTR_KEY_INSERTED_ID, 0, LOG_ARGTYPE_INT,
+ "Inserting keys for algorithm %d\n"},
+
+ {FMTSTR_DEAUTH_ID, FMTSTRF_USER, LOG_ARGTYPE_STR_INT,
+ "Received Deauth from %s with Reason %d\n"},
+
+ {FMTSTR_DISASSOC_ID, FMTSTRF_USER, LOG_ARGTYPE_STR_INT,
+ "Received Disassoc from %s with Reason %d\n"},
+
+ {FMTSTR_LINK_UP_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Link Up\n"},
+
+ {FMTSTR_LINK_DOWN_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Link Down\n"},
+
+ {FMTSTR_RADIO_HW_OFF_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Radio button is turned OFF. Please turn it on...\n"},
+
+ {FMTSTR_RADIO_HW_ON_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Hardware Radio button is turned ON\n"},
+
+ {FMTSTR_EVENT_DESC_ID, 0, LOG_ARGTYPE_INT_STR,
+ "Generated event id %d: (result status) is (%s)\n"},
+
+ {FMTSTR_PNP_SET_POWER_ID, 0, LOG_ARGTYPE_INT,
+ "Device going into power state %d\n"},
+
+ {FMTSTR_RADIO_SW_OFF_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Software Radio is disabled. Please enable it through the UI...\n"},
+
+ {FMTSTR_RADIO_SW_ON_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Software Radio is enabled\n"},
+
+ {FMTSTR_PWD_MISMATCH_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Potential passphrase mismatch. Please try a different one...\n"},
+
+ {FMTSTR_FATAL_ERROR_ID, 0, LOG_ARGTYPE_INT,
+ "Fatal Error: intstatus 0x%x\n"},
+
+ {FMTSTR_AUTH_FAIL_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Authentication to %s Failed with status %d\n"},
+
+ {FMTSTR_ASSOC_FAIL_ID, 0, LOG_ARGTYPE_STR_INT,
+ "Association to %s Failed with status %d\n"},
+
+ {FMTSTR_IBSS_FAIL_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Unable to start IBSS since PeerNet is already active\n"},
+
+ {FMTSTR_EXTAP_FAIL_ID, FMTSTRF_USER, LOG_ARGTYPE_NULL,
+ "Unable to start Ext-AP since PeerNet is already active\n"},
+
+ {FMTSTR_MAX_ID, 0, 0, "\0"}
+};
+
+#endif /* _WLC_EXTLOG_IDSTR_H_ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
new file mode 100644
index 00000000000000..237c30786c689f
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h
@@ -0,0 +1,225 @@
+/*
+* Copyright (C) 1999-2012, Broadcom Corporation
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+*
+* As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module. An independent module is a module which is not
+* derived from this software. The special exception does not apply to any
+* modifications of the software.
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h 322519 2012-03-21 01:15:45Z $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+ /* Use TLV to convey WLFC information.
+ ---------------------------------------------------------------------------
+ | Type | Len | value | Description
+ ---------------------------------------------------------------------------
+ | 1 | 1 | (handle) | MAC OPEN
+ ---------------------------------------------------------------------------
+ | 2 | 1 | (handle) | MAC CLOSE
+ ---------------------------------------------------------------------------
+ | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+ ---------------------------------------------------------------------------
+ | 4 | 4 | see pkttag comments | TXSTATUS
+ ---------------------------------------------------------------------------
+ | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware]
+ ---------------------------------------------------------------------------
+ | 6 | 8 | (handle, ifid, MAC) | MAC ADD
+ ---------------------------------------------------------------------------
+ | 7 | 8 | (handle, ifid, MAC) | MAC DEL
+ ---------------------------------------------------------------------------
+ | 8 | 1 | (rssi) | RSSI - RSSI value for the packet.
+ ---------------------------------------------------------------------------
+ | 9 | 1 | (interface ID) | Interface OPEN
+ ---------------------------------------------------------------------------
+ | 10 | 1 | (interface ID) | Interface CLOSE
+ ---------------------------------------------------------------------------
+ | 11 | 8 | fifo credit returns map | FIFO credits back to the host
+ | | | |
+ | | | | --------------------------------------
+ | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+ | | | | --------------------------------------
+ | | | |
+ ---------------------------------------------------------------------------
+ | 12 | 2 | MAC handle, | Host provides a bitmap of pending
+ | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn.
+ | | | | [host->firmware]
+ ---------------------------------------------------------------------------
+ | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific
+ | | | | MAC destination.
+ ---------------------------------------------------------------------------
+ | 15 | 1 | interface ID | NIC period start
+ ---------------------------------------------------------------------------
+ | 16 | 7 | interface ID | NIC period end
+ ---------------------------------------------------------------------------
+ | 255 | N/A | N/A | FILLER - This is a special type
+ | | | | that has no length or value.
+ | | | | Typically used for padding.
+ ---------------------------------------------------------------------------
+ */
+
+#define WLFC_CTL_TYPE_MAC_OPEN 1
+#define WLFC_CTL_TYPE_MAC_CLOSE 2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3
+#define WLFC_CTL_TYPE_TXSTATUS 4
+#define WLFC_CTL_TYPE_PKTTAG 5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD 6
+#define WLFC_CTL_TYPE_MACDESC_DEL 7
+#define WLFC_CTL_TYPE_RSSI 8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN 9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14
+
+#define WLFC_CTL_TYPE_NIC_PRD_START 15
+#define WLFC_CTL_TYPE_NIC_PRD_END 16
+
+#define WLFC_CTL_TYPE_FILLER 255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI 1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE 1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS 4
+#define WLFC_CTL_VALUE_LEN_PKTTAG 4
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
+#define WLFC_CTL_VALUE_LEN_NIC_PRD_START 1
+#define WLFC_CTL_VALUE_LEN_NIC_PRD_END 7
+
+
+#define WLFC_PKTID_GEN_MASK 0x80000000
+#define WLFC_PKTID_GEN_SHIFT 31
+
+#define WLFC_PKTID_GEN(x) (((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT)
+#define WLFC_PKTID_SETGEN(x, gen) (x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \
+ (((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK)
+
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+
+#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT 27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+ WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT 24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+ (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \
+ ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE 32
+#define WLFC_MAX_IFNUM 16
+#define WLFC_MAC_DESC_ID_INVALID 0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
+ (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
+ ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WL_TXSTATUS_GENERATION_MASK 1
+#define WL_TXSTATUS_GENERATION_SHIFT 31
+
+#define WLFC_PKTFLAG_SET_GENERATION(x, gen) ((x) = \
+ ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+ (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WLFC_PKTFLAG_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+ WL_TXSTATUS_GENERATION_MASK)
+
+#define WLFC_MAX_PENDING_DATALEN 120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD 0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1
+/* WL firmware suppressed a packet because MAC is
+ already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
+
+#define WLFC_D11_STATUS_INTERPRET(txs) \
+ (((txs)->status.suppr_ind != 0) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD)
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+ {printf("WLFC: %s():%d:caller:%p\n", \
+ __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+ banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS 256
+#define WLHOST_REORDERDATA_LEN 10
+#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET 0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET 4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET 6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8
+
+#define WLHOST_REORDERDATA_DEL_FLOW 0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL 0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID 0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08
+#define WLHOST_REORDERDATA_NEW_HOLE 0x10
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h
new file mode 100644
index 00000000000000..44cce45b4d9a33
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h
@@ -0,0 +1,5002 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h 326733 2012-04-10 18:54:41Z $
+ */
+
+#ifndef _wlioctl_h_
+#define _wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ 16
+#endif
+
+/* Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+ cdc_ioctl_t msg;
+ uint data_len;
+ char intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE sizeof(rem_ioctl_t)
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+ struct ether_addr da;
+ uint16 len;
+ uint32 packetId;
+ uint8 data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+ uint8 ssid_len; /* the length of SSID */
+ uint8 ssid[32]; /* SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+ uint32 channel;
+ int32 dwell_time;
+ struct ether_addr BSSID;
+ wl_action_frame_t action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL 0
+#define MFP_TEST_FLAG_ANY_KEY 1
+typedef struct wl_sa_query {
+ uint32 flag;
+ uint8 action;
+ uint16 id;
+ struct ether_addr da;
+} wl_sa_query_t;
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ uint8 channel; /* Channel no. */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+ uint32 ie_length; /* byte length of Information Elements */
+ /* variable length Information Elements */
+} wl_bss_info_107_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ chanspec_t chanspec; /* chanspec for bss */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+
+ uint8 n_cap; /* BSS is 802.11N Capable */
+ uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
+ uint8 ctl_ch; /* 802.11N BSS control channel number */
+ uint32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ uint8 flags; /* flags */
+ uint8 reserved[3]; /* Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /* offset at which IEs start, from beginning */
+ uint32 ie_length; /* byte length of Information Elements */
+ /* Add new fields here */
+ /* variable length Information Elements */
+} wl_bss_info_108_t;
+
+#define WL_BSS_INFO_VERSION 109 /* current version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+ uint32 version; /* version field */
+ uint32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ struct ether_addr BSSID;
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ chanspec_t chanspec; /* chanspec for bss */
+ uint16 atim_window; /* units are Kusec */
+ uint8 dtim_period; /* DTIM period */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ int8 phy_noise; /* noise (in dBm) */
+
+ uint8 n_cap; /* BSS is 802.11N Capable */
+ uint32 nbss_cap; /* 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /* 802.11N BSS control channel number */
+ uint8 padding1[3]; /* explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /* VHT rx mcs map */
+ uint16 vht_txmcsmap; /* VHT tx mcs map */
+ uint8 flags; /* flags */
+ uint8 vht_cap; /* BSS is vht capable */
+ uint8 reserved[2]; /* Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /* offset at which IEs start, from beginning */
+ uint32 ie_length; /* byte length of Information Elements */
+ int16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+} wl_bss_info_t;
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info was received on channel (vs offchannel) */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ 0x00000100
+
+typedef struct wl_bsscfg {
+ uint32 wsec;
+ uint32 WPA_auth;
+ uint32 wsec_index;
+ uint32 associated;
+ uint32 BSS;
+ uint32 phytest_on;
+ struct ether_addr prev_BSSID;
+ struct ether_addr BSSID;
+ uint32 targetbss_wpa2_flags;
+ uint32 assoc_type;
+ uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_bss_config {
+ uint32 atim_window;
+ uint32 beacon_period;
+ uint32 chanspec;
+} wl_bss_config_t;
+
+#define DLOAD_HANDLER_VER 1 /* Downloader version */
+#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 0x0001
+
+/* generic download types & flags */
+enum {
+ DL_TYPE_UCODE = 1,
+ DL_TYPE_CLM = 2
+};
+
+/* ucode type values */
+enum {
+ UCODE_FW,
+ INIT_VALS,
+ BS_INIT_VALS
+};
+
+struct wl_dload_data {
+ uint16 flag;
+ uint16 dload_type;
+ uint32 len;
+ uint32 crc;
+ uint8 data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+ uint32 ucode_type;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_num;
+ uint8 data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+ uint32 ds_id;
+ uint32 clm_total_len;
+ uint32 num_chunks;
+ uint32 chunk_len;
+ uint32 chunk_offset;
+ uint8 data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+typedef struct wlc_ssid {
+ uint32 SSID_len;
+ uchar SSID[32];
+} wlc_ssid_t;
+
+#define MAX_PREFERRED_AP_NUM 5
+typedef struct wlc_fastssidinfo {
+ uint32 SSID_channel[MAX_PREFERRED_AP_NUM];
+ wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT wnm_url_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct chan_scandata {
+ uint8 txpower;
+ uint8 pad;
+ chanspec_t channel; /* Channel num, bw, ctrl_sb and band */
+ uint32 channel_mintime;
+ uint32 channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+ EXTDSCAN_FOREGROUND_SCAN,
+ EXTDSCAN_BACKGROUND_SCAN,
+ EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID 5
+
+typedef struct wl_extdscan_params {
+ int8 nprobes; /* 0, passive, otherwise active */
+ int8 split_scan; /* split scan */
+ int8 band; /* band */
+ int8 pad;
+ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
+ uint32 tx_rate; /* in 500ksec units */
+ wl_scan_type_t scan_type; /* enum */
+ int32 channel_num;
+ chan_scandata_t channel_list[1]; /* list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY 2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
+#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
+#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+
+typedef struct wl_scan_params {
+ wlc_ssid_t ssid; /* default: {0, ""} */
+ struct ether_addr bssid; /* default: bcast */
+ int8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ uint8 scan_type; /* flags, 0 use default */
+ int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /* -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ int32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in channel_list, 0
+ * means default (use all available channels)
+ *
+ * high half is entries in wlc_ssid_t array that
+ * follows channel_list, aligned for int32 (4 bytes)
+ * meaning an odd channel count implies a 2-byte pad
+ * between end of channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the fixed
+ * parameter portion is assumed, otherwise ssid in
+ * the fixed portion is ignored
+ */
+ uint16 channel_list[1]; /* list of chanspecs */
+} wl_scan_params_t;
+
+/* size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define ISCAN_REQ_VERSION 1
+
+/* incremental scan struct */
+typedef struct wl_iscan_params {
+ uint32 version;
+ uint16 action;
+ uint16 scan_duration;
+ wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/* 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+/* size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+/* Used in EXT_STA */
+#define DNGL_RXCTXT_SIZE 45
+
+#if defined(SIMPLE_ISCAN)
+#define ISCAN_RETRY_CNT 5
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+#define ISCAN_STATE_PENDING 2
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+#endif /* SIMPLE_ISCAN */
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+ uint32 version;
+ uint16 action;
+ uint16 sync_id;
+ wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+ uint32 buflen;
+ uint32 version;
+ uint16 sync_id;
+ uint16 bss_count;
+ wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+/* incremental scan results struct */
+typedef struct wl_iscan_results {
+ uint32 status;
+ wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/* size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+typedef struct wl_probe_params {
+ wlc_ssid_t ssid;
+ struct ether_addr bssid;
+ struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_MAXRATES_IN_SET 16 /* max # of rates in a rateset */
+typedef struct wl_rateset {
+ uint32 count; /* # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+ uint32 count; /* # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
+} wl_rateset_args_t;
+
+/* uint32 list */
+typedef struct wl_uint32_list {
+ /* in - # of elements, out - # of entries */
+ uint32 count;
+ /* variable length uint32 list */
+ uint32 element[1];
+} wl_uint32_list_t;
+
+/* used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+ struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */
+ int32 chanspec_num; /* 0: all available channels,
+ * otherwise count of chanspecs in chanspec_list
+ */
+ chanspec_t chanspec_list[1]; /* list of chanspecs */
+} wl_assoc_params_t;
+#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/* used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+ wlc_ssid_t ssid;
+ wl_assoc_params_t params; /* optional field, but it must include the fixed portion
+ * of the wl_assoc_params_t struct when it does present.
+ */
+} wl_join_params_t;
+#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \
+ WL_ASSOC_PARAMS_FIXED_SIZE)
+/* scan params for extended join */
+typedef struct wl_join_scan_params {
+ uint8 scan_type; /* 0 use default, active or passive scan */
+ int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ int32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ int32 home_time; /* -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+} wl_join_scan_params_t;
+
+/* extended join params */
+typedef struct wl_extjoin_params {
+ wlc_ssid_t ssid; /* {0, ""}: wildcard scan */
+ wl_join_scan_params_t scan;
+ wl_join_assoc_params_t assoc; /* optional field, but it must include the fixed portion
+ * of the wl_join_assoc_params_t struct when it does
+ * present.
+ */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \
+ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#ifndef D11AC_IOTYPES
+
+/* defines used by the nrate iovar */
+#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define NRATE_STF_SHIFT 8 /* stf mode shift */
+#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
+
+#else /* D11AC_IOTYPES */
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK 0x00000300
+#define WL_RSPEC_TXEXP_SHIFT 8
+#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */
+#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ 0x00010000
+#define WL_RSPEC_BW_40MHZ 0x00020000
+#define WL_RSPEC_BW_80MHZ 0x00030000
+#define WL_RSPEC_BW_160MHZ 0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */
+#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI 0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */
+#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */
+#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */
+#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */
+
+#endif /* D11AC_IOTYPES */
+
+#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
+#define ANTENNA_NUM_2 2
+#define ANTENNA_NUM_3 3
+#define ANTENNA_NUM_4 4
+
+#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
+#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
+#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
+
+#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
+
+typedef struct {
+ uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
+ uint8 num_antcfg; /* number of available antenna configurations */
+} wlc_antselcfg_t;
+
+#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+
+#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
+
+#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
+#define IBSS_HI 25 /* Hi in-bss congestion percentage */
+#define OBSS_MED 12
+#define OBSS_HI 25
+#define INTERFER_MED 5
+#define INTERFER_HI 10
+
+#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
+#define CCA_FLAGS_PREFER_1_6_11 0x10
+#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
+
+typedef struct {
+ uint32 duration; /* millisecs spent sampling this channel */
+ uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
+ /* move if cur bss moves channels) */
+ uint32 congest_obss; /* traffic not in our bss */
+ uint32 interference; /* millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /* second timestamp */
+} cca_congest_t;
+
+typedef struct {
+ chanspec_t chanspec; /* Which channel? */
+ uint8 num_secs; /* How many secs worth of data */
+ cca_congest_t secs[1]; /* Data */
+} cca_congest_channel_req_t;
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE 0 /* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */
+
+/* interference sources */
+enum interference_source {
+ ITFR_NONE = 0, /* interference */
+ ITFR_PHONE, /* wireless phone */
+ ITFR_VIDEO_CAMERA, /* wireless video camera */
+ ITFR_MICROWAVE_OVEN, /* microwave oven */
+ ITFR_BABY_MONITOR, /* wireless baby monitor */
+ ITFR_BLUETOOTH, /* bluetooth */
+ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /* wireless camera or baby monitor */
+ ITFR_BLUETOOTH_OR_BABY_MONITOR, /* bluetooth or baby monitor */
+ ITFR_VIDEO_CAMERA_OR_PHONE, /* video camera or phone */
+ ITFR_UNIDENTIFIED /* interference from unidentified source */
+};
+
+/* structure for interference source report */
+typedef struct {
+ uint32 flags; /* flags. bit definitions below */
+ uint32 source; /* last detected interference source */
+ uint32 timestamp; /* second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED 1 /* interference detected */
+#define ITFR_HOME_CHANNEL 2 /* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */
+
+#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
+
+typedef struct wl_country {
+ char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in
+ * the Country IE
+ */
+ int32 rev; /* revision specifier for ccode
+ * on set, -1 indicates unspecified.
+ * on get, rev >= 0
+ */
+ char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code.
+ * variable length, but fixed size in
+ * struct allows simple allocation for
+ * expected country strings <= 3 chars.
+ */
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+ uint32 buflen;
+ uint32 band;
+ char country_abbrev[WLC_CNTRY_BUF_SZ];
+ uint32 count;
+ uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+ uint32 buflen;
+ uint32 band_set;
+ uint32 band;
+ uint32 count;
+ char country_abbrev[1];
+} wl_country_list_t;
+
+#define WL_NUM_RPI_BINS 8
+#define WL_RM_TYPE_BASIC 1
+#define WL_RM_TYPE_CCA 2
+#define WL_RM_TYPE_RPI 3
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE (1<<1)
+#define WL_RM_FLAG_INCAPABLE (1<<2)
+#define WL_RM_FLAG_REFUSED (1<<3)
+
+typedef struct wl_rm_req_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /* token for this measurement */
+ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /* TSF low 32-bits */
+ uint32 dur; /* TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+ uint32 token; /* overall measurement set token */
+ uint32 count; /* number of measurement requests */
+ void *cb; /* completion callback function: may be NULL */
+ void *cb_arg; /* arg to completion callback function */
+ wl_rm_req_elt_t req[1]; /* variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+ int8 type;
+ int8 flags;
+ chanspec_t chanspec;
+ uint32 token; /* token for this measurement */
+ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /* TSF low 32-bits */
+ uint32 dur; /* TUs */
+ uint32 len; /* byte length of data block */
+ uint8 data[1]; /* variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+ uint8 rpi[WL_RPI_REP_BIN_NUM];
+ int8 rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+ uint32 token; /* overall measurement set token */
+ uint32 len; /* length of measurement report block */
+ wl_rm_rep_elt_t rep[1]; /* variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN 8
+
+
+typedef enum sup_auth_status {
+ /* Basic supplicant authentication states */
+ WLC_SUP_DISCONNECTED = 0,
+ WLC_SUP_CONNECTING,
+ WLC_SUP_IDREQUIRED,
+ WLC_SUP_AUTHENTICATING,
+ WLC_SUP_AUTHENTICATED,
+ WLC_SUP_KEYXCHANGE,
+ WLC_SUP_KEYED,
+ WLC_SUP_TIMEOUT,
+ WLC_SUP_LAST_BASIC_STATE,
+
+ /* Extended supplicant authentication states */
+ /* Waiting to receive handshake msg M1 */
+ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+ /* Preparing to send handshake msg M2 */
+ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+ /* Waiting to receive handshake msg M3 */
+ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+ WLC_SUP_KEYXCHANGE_PREP_M4, /* Preparing to send handshake msg M4 */
+ WLC_SUP_KEYXCHANGE_WAIT_G1, /* Waiting to receive handshake msg G1 */
+ WLC_SUP_KEYXCHANGE_PREP_G2 /* Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#define CRYPTO_ALGO_NALG 7
+#ifdef BCMWAPI_WPI
+#define CRYPTO_ALGO_SMS4 11
+#endif /* BCMWAPI_WPI */
+#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+#define WSEC_GEN_MFP_ACT_ERROR 0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020
+
+#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
+#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+
+typedef struct wl_wsec_key {
+ uint32 index; /* key index */
+ uint32 len; /* key length */
+ uint8 data[DOT11_MAX_KEY_SIZE]; /* key data */
+ uint32 pad_1[18];
+ uint32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ uint32 flags; /* misc flags */
+ uint32 pad_2[2];
+ int pad_3;
+ int iv_initialized; /* has IV been initialized already? */
+ int pad_4;
+ /* Rx IV */
+ struct {
+ uint32 hi; /* upper 32 bits of IV */
+ uint16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ uint32 pad_5[2];
+ struct ether_addr ea; /* per station */
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN 8
+#define WSEC_MAX_PSK_LEN 64
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE (1<<0)
+
+/* receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct {
+ ushort key_len; /* octets in key material */
+ ushort flags; /* key handling qualification */
+ uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */
+} wsec_pmk_t;
+
+/* wireless security bitvec */
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED 0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+#ifdef MFP
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
+#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
+#endif /* MFP */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI 0x0400
+#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
+#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+/* pmkid */
+#define MAXPMKID 16
+
+typedef struct _pmkid {
+ struct ether_addr BSSID;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+ uint32 npmkid;
+ pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+ struct ether_addr BSSID;
+ uint8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ uint32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+typedef struct wl_assoc_info {
+ uint32 req_len;
+ uint32 resp_len;
+ uint32 flags;
+ struct dot11_assoc_req req;
+ struct ether_addr reassoc_bssid; /* used in reassoc's */
+ struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct wl_led_info {
+ uint32 index; /* led index */
+ uint32 behavior;
+ uint8 activehi;
+} wl_led_info_t;
+
+
+/* srom read/write struct passed through ioctl */
+typedef struct {
+ uint byteoff; /* byte offset */
+ uint nbytes; /* number of bytes */
+ uint16 buf[1];
+} srom_rw_t;
+
+/* similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+ uint32 source; /* cis source */
+ uint32 byteoff; /* byte offset */
+ uint32 nbytes; /* number of bytes */
+ /* data follows here */
+} cis_rw_t;
+
+#define WLC_CIS_DEFAULT 0 /* built-in default */
+#define WLC_CIS_SROM 1 /* source is sprom */
+#define WLC_CIS_OTP 2 /* source is otp */
+
+/* R_REG and W_REG struct passed through ioctl */
+typedef struct {
+ uint32 byteoff; /* byte offset of the field in d11regs_t */
+ uint32 val; /* read/write value of the field */
+ uint32 size; /* sizeof the field */
+ uint band; /* band (optional) */
+} rw_reg_t;
+
+/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
+
+typedef struct {
+ uint16 auto_ctrl; /* WL_ATTEN_XX */
+ uint16 bb; /* Baseband attenuation */
+ uint16 radio; /* Radio attenuation */
+ uint16 txctl1; /* Radio TX_CTL1 value */
+} atten_t;
+
+/* Per-AC retry parameters */
+struct wme_tx_params_s {
+ uint8 short_retry;
+ uint8 short_fallback;
+ uint8 long_retry;
+ uint8 long_fallback;
+ uint16 max_rate; /* In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
+#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
+/* value >= 0 causes
+ * - input to be set to that value
+ * - PCL to be off
+ */
+
+/* Used to get specific link/ac parameters */
+typedef struct {
+ int ac;
+ uint8 val;
+ struct ether_addr ea;
+} link_val_t;
+
+#define BCM_MAC_STATUS_INDICATION (0x40010200L)
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+typedef struct {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint16 cap; /* sta's advertised capabilities */
+ uint32 flags; /* flags defined below */
+ uint32 idle; /* time since data pkt rx'd from sta */
+ struct ether_addr ea; /* Station address */
+ wl_rateset_t rateset; /* rateset in use */
+ uint32 in; /* seconds elapsed since associated */
+ uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */
+ uint32 tx_pkts; /* # of packets transmitted */
+ uint32 tx_failures; /* # of packets failed */
+ uint32 rx_ucast_pkts; /* # of unicast packets received */
+ uint32 rx_mcast_pkts; /* # of multicast packets received */
+ uint32 tx_rate; /* Rate of last successful tx frame */
+ uint32 rx_rate; /* Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts)
+
+#define WL_STA_VER 3
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM 0x1 /* Running a Broadcom driver */
+#define WL_STA_WME 0x2 /* WMM association */
+#define WL_STA_UNUSED 0x4
+#define WL_STA_AUTHE 0x8 /* Authenticated */
+#define WL_STA_ASSOC 0x10 /* Associated */
+#define WL_STA_AUTHO 0x20 /* Authorized */
+#define WL_STA_WDS 0x40 /* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP 0x80 /* WDS traffic/probes flowing properly */
+#define WL_STA_PS 0x100 /* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE 0x200 /* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK 0x400 /* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI 0x800 /* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO 0x1000 /* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP 0x2000 /* STA 802.11n capable */
+#define WL_STA_SCBSTATS 0x4000 /* Per STA debug stats */
+
+#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+/* Used to get specific STA parameters */
+typedef struct {
+ uint32 val;
+ struct ether_addr ea;
+} scb_val_t;
+
+/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+ uint32 code;
+ scb_val_t ioctl_args;
+} authops_t;
+
+/* channel encoding */
+typedef struct channel_info {
+ int hw_channel;
+ int target_channel;
+ int scan_channel;
+} channel_info_t;
+
+/* For ioctls that take a list of MAC addresses */
+struct maclist {
+ uint count; /* number of MAC addresses */
+ struct ether_addr ea[1]; /* variable length array of MAC addresses */
+};
+
+/* get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+ uint rx_good_pkt;
+ uint rx_bad_pkt;
+ uint tx_good_pkt;
+ uint tx_bad_pkt;
+ uint rx_ocast_good_pkt; /* unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN 0
+#define LQ_IDX_MAX 1
+#define LQ_IDX_AVG 2
+#define LQ_IDX_SUM 2
+#define LQ_IDX_LAST 3
+#define LQ_STOP_MONITOR 0
+#define LQ_START_MONITOR 1
+
+/* Get averages RSSI, Rx PHY rate and SNR values */
+typedef struct {
+ int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */
+ int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */
+ int isvalid; /* Flag indicating whether above data is valid */
+} wl_lq_t; /* Link Quality */
+
+typedef enum wl_wakeup_reason_type {
+ LCD_ON = 1,
+ LCD_OFF,
+ DRC1_WAKE,
+ DRC2_WAKE,
+ REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+/* Unique filter id */
+ uint32 id;
+
+/* stores the reason for the last wake up */
+ uint8 reason;
+} wl_wr_t;
+
+/* Get MAC specific rate histogram command */
+typedef struct {
+ struct ether_addr ea; /* MAC Address */
+ uint8 ac_cat; /* Access Category */
+ uint8 num_pkts; /* Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t; /* MAC Specific Rate Histogram command */
+
+/* Get MAC rate histogram response */
+typedef struct {
+ uint32 rate[WLC_MAXRATE + 1]; /* Rates */
+ uint32 mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX]; /* MCS counts */
+ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /* VHT counts */
+ uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */
+} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
+
+/* Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ uint8 set; /* 1=set IOCTL; 0=query IOCTL */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+} wl_ioctl_t;
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype set /* subtype param */
+#define ioctl_pid used /* pid param */
+#define ioctl_status needed /* status param */
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+ uint vendorid; /* PCI vendor id */
+ uint deviceid; /* device id of chip */
+ uint radiorev; /* radio revision */
+ uint chiprev; /* chip revision */
+ uint corerev; /* core revision */
+ uint boardid; /* board identifier (usu. PCI sub-device id) */
+ uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */
+ uint boardrev; /* board revision */
+ uint driverrev; /* driver version */
+ uint ucoderev; /* microcode version */
+ uint bus; /* bus type */
+ uint chipnum; /* chip number */
+ uint phytype; /* phy type */
+ uint phyrev; /* phy revision */
+ uint anarev; /* anacore rev */
+ uint chippkg; /* chip package info */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH 48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+ uint instance;
+ char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+/* structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+ uint16 magic;
+ uint16 fifo;
+ uint16 size;
+} wl_txfifo_sz_t;
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC 0xa5a5
+
+/* Transfer info about an IOVar from the driver */
+/* Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+ uint8 module;
+ uint8 type;
+ char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+/* bump this number if you change the ioctl interface */
+#ifdef D11AC_IOTYPES
+#define WLC_IOCTL_VERSION 2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1
+#else
+#define WLC_IOCTL_VERSION 1
+#endif /* D11AC_IOTYPES */
+
+#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
+#endif
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */
+#define WLC_GET_RATE 12
+#define WLC_GET_MAX_RATE 13
+#define WLC_GET_INSTANCE 14
+/* #define WLC_GET_FRAG 15 */ /* no longer supported */
+/* #define WLC_SET_FRAG 16 */ /* no longer supported */
+/* #define WLC_GET_RTS 17 */ /* no longer supported */
+/* #define WLC_SET_RTS 18 */ /* no longer supported */
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+#define WLC_TERMINATED 28
+/* #define WLC_DUMP_SCB 28 */ /* no longer supported */
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+#define WLC_GET_FIXRATE 42
+#define WLC_SET_FIXRATE 43
+/* #define WLC_GET_WEP 42 */ /* no longer supported */
+/* #define WLC_SET_WEP 43 */ /* no longer supported */
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60 /* diag */
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+/* #define WLC_GET_TXPWR 65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR 66 */ /* no longer supported */
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+/* #define WLC_GET_LOCALE 73 */ /* no longer supported */
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK 90 /* ndis only */
+#define WLC_SET_FORCELINK 91 /* ndis only */
+#define WLC_FREQ_ACCURACY 92 /* diag */
+#define WLC_CARRIER_SUPPRESS 93 /* diag */
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
+/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114 /* current rateset */
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+/* #define WLC_GET_MRATE 153 */ /* no longer supported */
+/* #define WLC_SET_MRATE 154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#define WLC_GET_PHYLIST 180
+#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
+#define WLC_DECRYPT_STATUS 182 /* ndis only */
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196 /* diag */
+#define WLC_SET_SHMEM 197 /* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST 200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+#define WLC_SET_LED 209
+#define WLC_GET_LED 210
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#define WLC_GET_VAR 262 /* get value of named variable */
+#define WLC_SET_VAR 263 /* set named variable to value */
+#define WLC_NVRAM_GET 264 /* deprecated */
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#define WLC_SET_WSEC_PMK 268
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */
+/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */
+#define WLC_GET_CMD 309
+/* #define WLC_LAST 310 */ /* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE 317
+#define WLC_LAST 318
+
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE 0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE 0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS 1
+#define WL_DECRYPT_STATUS_FAILURE 2
+#define WL_DECRYPT_STATUS_UNKNOWN 3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS 0
+#define WLC_UPGRADE_PENDING 1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+/* struct passed in for WLC_NDCONFIG_ITEM */
+typedef struct {
+ char *name;
+ void *param;
+} ndconfig_item_t;
+#endif
+
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
+#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE (1U<<31)
+#define WL_TXPWR_NEG (1U<<30)
+
+#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
+
+#define WL_PHY_PAVAR_VER 1 /* pavars version */
+
+typedef struct wl_po {
+ uint16 phy_type; /* Phy type */
+ uint16 band;
+ uint16 cckpo;
+ uint32 ofdmpo;
+ uint16 mcspo[8];
+} wl_po_t;
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
+#define WL_DIAG_MEMORY 3 /* d11 memory test */
+#define WL_DIAG_LED 4 /* LED test */
+#define WL_DIAG_REG 5 /* d11/phy register test */
+#define WL_DIAG_SROM 6 /* srom read/crc test */
+#define WL_DIAG_DMA 7 /* DMA test */
+#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
+#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
+#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
+
+/* band types */
+#define WLC_BAND_AUTO 0 /* auto-select */
+#define WLC_BAND_5G 1 /* 5 Ghz */
+#define WLC_BAND_2G 2 /* 2.4 Ghz */
+#define WLC_BAND_ALL 3 /* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0 1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_LCN40 10
+#define WLC_PHY_TYPE_AC 11
+#define WLC_PHY_TYPE_NULL 0xf
+
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
+#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+#define WLC_N_PREAMBLE_GF_BRCM 2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+#define WLC_BW_20MHZ_BIT (1<<0)
+#define WLC_BW_40MHZ_BIT (1<<1)
+#define WLC_BW_80MHZ_BIT (1<<2)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+#define WLC_VHT_SGI_80 0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL 0x02
+
+/* Values for PM */
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
+
+#define LISTEN_INTERVAL 10
+/* interference mitigation options */
+#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
+#define INTERFERE_NONE 0 /* off */
+#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
+#define WLAN_MANUAL 2 /* ACI: no auto detection */
+#define WLAN_AUTO 3 /* ACI: auto detect */
+#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
+
+typedef struct wl_aci_args {
+ int enter_aci_thresh; /* Trigger level to start detecting ACI */
+ int exit_aci_thresh; /* Trigger level to exit ACI mode */
+ int usec_spin; /* microsecs to delay between rssi samples */
+ int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+ uint16 nphy_adcpwr_enter_thresh; /* ADC power to enter ACI mitigation mode */
+ uint16 nphy_adcpwr_exit_thresh; /* ADC power to exit ACI mitigation mode */
+ uint16 nphy_repeat_ctr; /* Number of tries per channel to compute power */
+ uint16 nphy_num_samples; /* Number of samples to compute power on one channel */
+ uint16 nphy_undetect_window_sz; /* num of undetects to exit ACI Mitigation mode */
+ uint16 nphy_b_energy_lo_aci; /* low ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_md_aci; /* mid ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_hi_aci; /* high ACI power energy threshold for bphy */
+ uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
+ uint16 nphy_noise_noassoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_glitch_th_up;
+ uint16 nphy_noise_assoc_glitch_th_dn;
+ uint16 nphy_noise_assoc_aci_glitch_th_up;
+ uint16 nphy_noise_assoc_aci_glitch_th_dn;
+ uint16 nphy_noise_assoc_enter_th;
+ uint16 nphy_noise_noassoc_enter_th;
+ uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+ uint16 nphy_noise_noassoc_crsidx_incr;
+ uint16 nphy_noise_assoc_crsidx_incr;
+ uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define TRIGGER_NOW 0
+#define TRIGGER_CRS 0x01
+#define TRIGGER_CRSDEASSERT 0x02
+#define TRIGGER_GOODFCS 0x04
+#define TRIGGER_BADFCS 0x08
+#define TRIGGER_BADPLCP 0x10
+#define TRIGGER_CRSGLITCH 0x20
+#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */
+#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+ /* version 0 fields */
+ uint8 coll_us;
+ int cores;
+ /* add'l version 1 fields */
+ uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */
+ uint16 length; /* length of entire structure */
+ int8 trigger;
+ uint16 timeout;
+ uint16 mode;
+ uint32 pre_dur;
+ uint32 post_dur;
+ uint8 gpio_sel;
+ bool downsamp;
+ bool be_deaf;
+ bool agc; /* loop from init gain and going down */
+ bool filter; /* override high pass corners to lowest */
+ /* add'l version 2 fields */
+ uint8 trigger_state;
+ uint8 module_sel1;
+ uint8 module_sel2;
+ uint16 nsamps;
+} wl_samplecollect_args_t;
+
+#define WL_SAMPLEDATA_HEADER_TYPE 1
+#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
+#define WL_SAMPLEDATA_TYPE 2
+#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */
+#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */
+#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+ uint16 version; /* structure version */
+ uint16 size; /* size of structure */
+ uint16 tag; /* Header/Data */
+ uint16 length; /* data length */
+ uint32 flag; /* bit def */
+} wl_sampledata_t;
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* wl_radar_args_t */
+typedef struct {
+ int npulses; /* required number of pulses at n * t_int */
+ int ncontig; /* required number of pulses at t_int */
+ int min_pw; /* minimum pulse width (20 MHz clocks) */
+ int max_pw; /* maximum pulse width (20 MHz clocks) */
+ uint16 thresh0; /* Radar detection, thresh 0 */
+ uint16 thresh1; /* Radar detection, thresh 1 */
+ uint16 blank; /* Radar detection, blank control */
+ uint16 fmdemodcfg; /* Radar detection, fmdemod config */
+ int npulses_lp; /* Radar detection, minimum long pulses */
+ int min_pw_lp; /* Minimum pulsewidth for long pulses */
+ int max_pw_lp; /* Maximum pulsewidth for long pulses */
+ int min_fm_lp; /* Minimum fm for long pulses */
+ int max_span_lp; /* Maximum deltat for long pulses */
+ int min_deltat; /* Minimum spacing between pulses */
+ int max_deltat; /* Maximum spacing between pulses */
+ uint16 autocorr; /* Radar detection, autocorr on or off */
+ uint16 st_level_time; /* Radar detection, start_timing level */
+ uint16 t2_min; /* minimum clocks needed to remain in state 2 */
+ uint32 version; /* version */
+ uint32 fra_pulse_err; /* sample error margin for detecting French radar pulsed */
+ int npulses_fra; /* Radar detection, minimum French pulses set */
+ int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */
+ int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */
+ uint16 percal_mask; /* defines which period cal is masked from radar detection */
+ int quant; /* quantization resolution to pulse positions */
+ uint32 min_burst_intv_lp; /* minimum burst to burst interval for bin3 radar */
+ uint32 max_burst_intv_lp; /* maximum burst to burst interval for bin3 radar */
+ int nskip_rst_lp; /* number of skipped pulses before resetting lp buffer */
+ int max_pw_tol; /* maximum tollerance allowed in detected pulse width for radar detection */
+ uint16 feature_mask; /* 16-bit mask to specify enabled features */
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+ uint32 version; /* version */
+ uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+ uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+ uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+ uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+ uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+ uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+ uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+ uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+} wl_radar_thr_t;
+
+#define WL_RADAR_THR_VERSION 2
+#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */
+#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */
+#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
+ * detection once
+ */
+#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* RSSI per antenna */
+typedef struct {
+ uint32 version; /* version field */
+ uint32 count; /* number of valid antenna rssi */
+ int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */
+} wl_rssi_ant_t;
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */
+#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */
+#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */
+#define WL_DFS_CACSTATE_CSA 3 /* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */
+#define WL_DFS_CACSTATES 7 /* this many states exist */
+
+/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+ uint state; /* noted by WL_DFS_CACSTATE_XX. */
+ uint duration; /* time spent in ms in state. */
+ /* as dfs enters ISM state, it removes the operational channel from quiet channel
+ * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+ */
+ chanspec_t chanspec_cleared;
+ /* chanspec cleared used to be a uint, add another to uint16 to maintain size */
+ uint16 pad;
+} wl_dfs_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+ uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */
+ uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */
+ uint8 txpwr_local_max; /* local max according to the AP */
+ uint8 txpwr_local_constraint; /* local constraint according to the AP */
+ uint8 txpwr_chan_reg_max; /* Regulatory max for this channel */
+ uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */
+ uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */
+ uint8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */
+ uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */
+ int8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY 45
+#define WL_TX_POWER_MCS20_FIRST 12
+#define WL_TX_POWER_MCS20_NUM 16
+#define WL_TX_POWER_MCS40_FIRST 28
+#define WL_TX_POWER_MCS40_NUM 17
+
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF
+ * chain without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */
+ uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */
+ uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
+ uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */
+} tx_power_legacy2_t;
+
+/* TX Power index defines */
+#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT 10
+#define WL_NUM_RATES_MCS32 1
+
+#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK
+#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM
+#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS_2_STREAM WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS32 WL_NUM_RATES_MCS32
+#define WL_TX_POWER_CCK_NUM WL_NUM_RATES_CCK
+#define WL_TX_POWER_OFDM_NUM WL_NUM_RATES_OFDM
+#define WL_TX_POWER_MCS_1_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_2_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_32_NUM WL_NUM_RATES_MCS32
+
+#define WL_NUM_2x2_ELEMENTS 4
+#define WL_NUM_3x3_ELEMENTS 6
+
+typedef struct txppr {
+ /* start of 20MHz tx power limits */
+ uint8 b20_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 40MHz tx power limits */
+ uint8 b40_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b40_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b40_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 20in40MHz tx power limits */
+ uint8 b20in40_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20in40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20in40_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20in40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20in40_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* 20 in 40 MHz Legacy OFDM CDD */
+ uint8 b20in40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20in40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20in40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20in40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20in40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20in40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20in40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20in40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20in40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20in40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20in40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 80MHz tx power limits */
+ uint8 b80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 20in80MHz tx power limits */
+ uint8 b20in80_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b20in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b20in80_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b20in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b20in80_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b20in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b20in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b20in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b20in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b20in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b20in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b20in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b20in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b20in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b20in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b20in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b20in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b20in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ /* start of 40in80MHz tx power limits */
+ uint8 b40in80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
+ uint8 b40in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
+
+ uint8 b40in80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
+ uint8 b40in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
+ uint8 b40in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
+
+ uint8 b40in80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
+ uint8 b40in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* MHz Legacy OFDM CDD */
+ uint8 b40in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
+ uint8 b40in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
+ uint8 b40in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
+ uint8 b40in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
+
+ uint8 b40in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
+ uint8 b40in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
+ uint8 b40in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
+ uint8 b40in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
+ uint8 b40in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
+ uint8 b40in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
+ uint8 b40in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
+ uint8 b40in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
+
+ uint8 mcs32; /* C_CHECK - THIS NEEDS TO BE REMOVED THROUGHOUT THE CODE */
+} txppr_t;
+
+/* 20MHz */
+#define WL_TX_POWER_CCK_FIRST OFFSETOF(txppr_t, b20_1x1dsss)
+#define WL_TX_POWER_OFDM20_FIRST OFFSETOF(txppr_t, b20_1x1ofdm)
+#define WL_TX_POWER_MCS20_SISO_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
+#define WL_TX_POWER_20_S1x1_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
+
+#define WL_TX_POWER_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2dsss)
+#define WL_TX_POWER_OFDM20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
+#define WL_TX_POWER_20_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS20_STBC_FIRST OFFSETOF(txppr_t, b20_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS20_SDM_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
+#define WL_TX_POWER_20_S2x2_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3dsss)
+#define WL_TX_POWER_OFDM20_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_ofdm)
+#define WL_TX_POWER_20_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_mcs0)
+#define WL_TX_POWER_20_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3stbc_mcs0)
+#define WL_TX_POWER_20_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3sdm_mcs8)
+#define WL_TX_POWER_20_S3x3_FIRST OFFSETOF(txppr_t, b20_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20_S1X1_VHT OFFSETOF(txppr_t, b20_1x1vht)
+#define WL_TX_POWER_20_S1X2_CDD_VHT OFFSETOF(txppr_t, b20_1x2cdd_vht)
+#define WL_TX_POWER_20_S2X2_STBC_VHT OFFSETOF(txppr_t, b20_2x2stbc_vht)
+#define WL_TX_POWER_20_S2X2_VHT OFFSETOF(txppr_t, b20_2x2sdm_vht)
+#define WL_TX_POWER_20_S1X3_CDD_VHT OFFSETOF(txppr_t, b20_1x3cdd_vht)
+#define WL_TX_POWER_20_S2X3_STBC_VHT OFFSETOF(txppr_t, b20_2x3stbc_vht)
+#define WL_TX_POWER_20_S2X3_VHT OFFSETOF(txppr_t, b20_2x3sdm_vht)
+#define WL_TX_POWER_20_S3X3_VHT OFFSETOF(txppr_t, b20_3x3sdm_vht)
+
+/* 40MHz */
+#define WL_TX_POWER_40_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40_dummy1x1dsss)
+#define WL_TX_POWER_OFDM40_FIRST OFFSETOF(txppr_t, b40_1x1ofdm)
+#define WL_TX_POWER_MCS40_SISO_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
+#define WL_TX_POWER_40_S1x1_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
+
+#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40_dummy1x2dsss)
+#define WL_TX_POWER_OFDM40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
+#define WL_TX_POWER_40_S1x2_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS40_STBC_FIRST OFFSETOF(txppr_t, b40_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS40_SDM_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
+#define WL_TX_POWER_40_S2x2_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
+
+#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_dummy1x3dsss)
+#define WL_TX_POWER_OFDM40_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_ofdm)
+#define WL_TX_POWER_40_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_mcs0)
+#define WL_TX_POWER_40_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3stbc_mcs0)
+#define WL_TX_POWER_40_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3sdm_mcs8)
+#define WL_TX_POWER_40_S3x3_FIRST OFFSETOF(txppr_t, b40_3x3sdm_mcs16)
+
+#define WL_TX_POWER_40_S1X1_VHT OFFSETOF(txppr_t, b40_1x1vht)
+#define WL_TX_POWER_40_S1X2_CDD_VHT OFFSETOF(txppr_t, b40_1x2cdd_vht)
+#define WL_TX_POWER_40_S2X2_STBC_VHT OFFSETOF(txppr_t, b40_2x2stbc_vht)
+#define WL_TX_POWER_40_S2X2_VHT OFFSETOF(txppr_t, b40_2x2sdm_vht)
+#define WL_TX_POWER_40_S1X3_CDD_VHT OFFSETOF(txppr_t, b40_1x3cdd_vht)
+#define WL_TX_POWER_40_S2X3_STBC_VHT OFFSETOF(txppr_t, b40_2x3stbc_vht)
+#define WL_TX_POWER_40_S2X3_VHT OFFSETOF(txppr_t, b40_2x3sdm_vht)
+#define WL_TX_POWER_40_S3X3_VHT OFFSETOF(txppr_t, b40_3x3sdm_vht)
+
+/* 20 in 40MHz */
+#define WL_TX_POWER_20UL_CCK_FIRST OFFSETOF(txppr_t, b20in40_1x1dsss)
+#define WL_TX_POWER_20UL_OFDM_FIRST OFFSETOF(txppr_t, b20in40_1x1ofdm)
+#define WL_TX_POWER_20UL_S1x1_FIRST OFFSETOF(txppr_t, b20in40_1x1mcs0)
+
+#define WL_TX_POWER_CCK_20U_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2dsss)
+#define WL_TX_POWER_20UL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_ofdm)
+#define WL_TX_POWER_20UL_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_mcs0)
+#define WL_TX_POWER_20UL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2stbc_mcs0)
+#define WL_TX_POWER_20UL_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_20U_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3dsss)
+#define WL_TX_POWER_20UL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_ofdm)
+#define WL_TX_POWER_20UL_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_mcs0)
+#define WL_TX_POWER_20UL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3stbc_mcs0)
+#define WL_TX_POWER_20UL_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3sdm_mcs8)
+#define WL_TX_POWER_20UL_S3x3_FIRST OFFSETOF(txppr_t, b20in40_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20UL_S1X1_VHT OFFSETOF(txppr_t, b20in40_1x1vht)
+#define WL_TX_POWER_20UL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in40_1x2cdd_vht)
+#define WL_TX_POWER_20UL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in40_2x2stbc_vht)
+#define WL_TX_POWER_20UL_S2X2_VHT OFFSETOF(txppr_t, b20in40_2x2sdm_vht)
+#define WL_TX_POWER_20UL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in40_1x3cdd_vht)
+#define WL_TX_POWER_20UL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in40_2x3stbc_vht)
+#define WL_TX_POWER_20UL_S2X3_VHT OFFSETOF(txppr_t, b20in40_2x3sdm_vht)
+#define WL_TX_POWER_20UL_S3X3_VHT OFFSETOF(txppr_t, b20in40_3x3sdm_vht)
+
+/* 80MHz */
+#define WL_TX_POWER_80_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b80_dummy1x1dsss)
+#define WL_TX_POWER_OFDM80_FIRST OFFSETOF(txppr_t, b80_1x1ofdm)
+#define WL_TX_POWER_MCS80_SISO_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
+#define WL_TX_POWER_80_S1x1_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
+
+#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b80_dummy1x2dsss)
+#define WL_TX_POWER_OFDM80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_ofdm)
+#define WL_TX_POWER_MCS80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
+#define WL_TX_POWER_80_S1x2_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
+#define WL_TX_POWER_MCS80_STBC_FIRST OFFSETOF(txppr_t, b80_2x2stbc_mcs0)
+#define WL_TX_POWER_MCS80_SDM_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
+#define WL_TX_POWER_80_S2x2_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_dummy1x3dsss)
+#define WL_TX_POWER_OFDM80_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_ofdm)
+#define WL_TX_POWER_80_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_mcs0)
+#define WL_TX_POWER_80_STBC_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3stbc_mcs0)
+#define WL_TX_POWER_80_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3sdm_mcs8)
+#define WL_TX_POWER_80_S3x3_FIRST OFFSETOF(txppr_t, b80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_80_S1X1_VHT OFFSETOF(txppr_t, b80_1x1vht)
+#define WL_TX_POWER_80_S1X2_CDD_VHT OFFSETOF(txppr_t, b80_1x2cdd_vht)
+#define WL_TX_POWER_80_S2X2_STBC_VHT OFFSETOF(txppr_t, b80_2x2stbc_vht)
+#define WL_TX_POWER_80_S2X2_VHT OFFSETOF(txppr_t, b80_2x2sdm_vht)
+#define WL_TX_POWER_80_S1X3_CDD_VHT OFFSETOF(txppr_t, b80_1x3cdd_vht)
+#define WL_TX_POWER_80_S2X3_STBC_VHT OFFSETOF(txppr_t, b80_2x3stbc_vht)
+#define WL_TX_POWER_80_S2X3_VHT OFFSETOF(txppr_t, b80_2x3sdm_vht)
+#define WL_TX_POWER_80_S3X3_VHT OFFSETOF(txppr_t, b80_3x3sdm_vht)
+
+/* 20 in 80MHz */
+#define WL_TX_POWER_20UUL_CCK_FIRST OFFSETOF(txppr_t, b20in80_1x1dsss)
+#define WL_TX_POWER_20UUL_OFDM_FIRST OFFSETOF(txppr_t, b20in80_1x1ofdm)
+#define WL_TX_POWER_20UUL_S1x1_FIRST OFFSETOF(txppr_t, b20in80_1x1mcs0)
+
+#define WL_TX_POWER_CCK_20UU_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2dsss)
+#define WL_TX_POWER_20UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_ofdm)
+#define WL_TX_POWER_20UUL_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_mcs0)
+#define WL_TX_POWER_20UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2stbc_mcs0)
+#define WL_TX_POWER_20UUL_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_20UU_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3dsss)
+#define WL_TX_POWER_20UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_ofdm)
+#define WL_TX_POWER_20UUL_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_mcs0)
+#define WL_TX_POWER_20UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3stbc_mcs0)
+#define WL_TX_POWER_20UUL_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3sdm_mcs8)
+#define WL_TX_POWER_20UUL_S3x3_FIRST OFFSETOF(txppr_t, b20in80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_20UUL_S1X1_VHT OFFSETOF(txppr_t, b20in80_1x1vht)
+#define WL_TX_POWER_20UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in80_1x2cdd_vht)
+#define WL_TX_POWER_20UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in80_2x2stbc_vht)
+#define WL_TX_POWER_20UUL_S2X2_VHT OFFSETOF(txppr_t, b20in80_2x2sdm_vht)
+#define WL_TX_POWER_20UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in80_1x3cdd_vht)
+#define WL_TX_POWER_20UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in80_2x3stbc_vht)
+#define WL_TX_POWER_20UUL_S2X3_VHT OFFSETOF(txppr_t, b20in80_2x3sdm_vht)
+#define WL_TX_POWER_20UUL_S3X3_VHT OFFSETOF(txppr_t, b20in80_3x3sdm_vht)
+
+/* 40 in 80MHz */
+#define WL_TX_POWER_40UUL_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40in80_dummy1x1dsss)
+#define WL_TX_POWER_40UUL_OFDM_FIRST OFFSETOF(txppr_t, b40in80_1x1ofdm)
+#define WL_TX_POWER_40UUL_S1x1_FIRST OFFSETOF(txppr_t, b40in80_1x1mcs0)
+
+#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40in80_dummy1x2dsss)
+#define WL_TX_POWER_40UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_ofdm)
+#define WL_TX_POWER_40UUL_S1x2_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_mcs0)
+#define WL_TX_POWER_40UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2stbc_mcs0)
+#define WL_TX_POWER_40UUL_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2sdm_mcs8)
+
+#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_dummy1x3dsss)
+#define WL_TX_POWER_40UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_ofdm)
+#define WL_TX_POWER_40UUL_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_mcs0)
+#define WL_TX_POWER_40UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3stbc_mcs0)
+#define WL_TX_POWER_40UUL_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3sdm_mcs8)
+#define WL_TX_POWER_40UUL_S3x3_FIRST OFFSETOF(txppr_t, b40in80_3x3sdm_mcs16)
+
+#define WL_TX_POWER_40UUL_S1X1_VHT OFFSETOF(txppr_t, b40in80_1x1vht)
+#define WL_TX_POWER_40UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b40in80_1x2cdd_vht)
+#define WL_TX_POWER_40UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b40in80_2x2stbc_vht)
+#define WL_TX_POWER_40UUL_S2X2_VHT OFFSETOF(txppr_t, b40in80_2x2sdm_vht)
+#define WL_TX_POWER_40UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b40in80_1x3cdd_vht)
+#define WL_TX_POWER_40UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b40in80_2x3stbc_vht)
+#define WL_TX_POWER_40UUL_S2X3_VHT OFFSETOF(txppr_t, b40in80_2x3sdm_vht)
+#define WL_TX_POWER_40UUL_S3X3_VHT OFFSETOF(txppr_t, b40in80_3x3sdm_vht)
+
+#define WL_TX_POWER_MCS_32 OFFSETOF(txppr_t, mcs32) /* C_CHECK remove later */
+
+#define WL_TX_POWER_RATES sizeof(struct txppr)
+
+/* sslpnphy specifics */
+#define WL_TX_POWER_MCS20_SISO_FIRST_SSN WL_TX_POWER_MCS20_SISO_FIRST
+#define WL_TX_POWER_MCS40_SISO_FIRST_SSN WL_TX_POWER_MCS40_SISO_FIRST
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+
+typedef struct {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 ppr[WL_TX_POWER_RATES]; /* Latest target power */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION 0
+#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION 43
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ 0
+#define WL_BW_40MHZ 1
+#define WL_BW_80MHZ 2
+
+/* tx_power_t.flags bits */
+#ifdef PPR_API
+#define WL_TX_POWER2_F_ENABLED 1
+#define WL_TX_POWER2_F_HW 2
+#define WL_TX_POWER2_F_MIMO 4
+#define WL_TX_POWER2_F_SISO 8
+#define WL_TX_POWER2_F_HT 0x10
+#else
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+#endif
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /* Maximum target power among all rates */
+ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
+ uint8 user_limit[WL_TX_POWER_RATES]; /* User limit */
+ int8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
+ int8 target[WL_TX_POWER_RATES]; /* Latest target power */
+ int8 clm_limits[WL_NUMRATES]; /* regulatory limits - 20, 40 or 80MHz */
+ int8 clm_limits_subchan1[WL_NUMRATES]; /* regulatory limits - 20in40 or 40in80 */
+ int8 clm_limits_subchan2[WL_NUMRATES]; /* regulatory limits - 20in80MHz */
+ int8 sar; /* SAR limit for display by wl executable */
+ int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */
+ uint8 version; /* Version of the data format wlu <--> driver */
+ uint8 display_core; /* Displayed curpower core */
+#ifdef PPR_API
+} tx_power_new_t;
+#else
+} tx_power_t;
+#endif
+
+typedef struct tx_inst_power {
+ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+
+typedef struct {
+ uint32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ uint8 local_max; /* local max according to the AP */
+ uint8 local_constraint; /* local constraint according to the AP */
+ int8 antgain[2]; /* Ant gain for each band - from SROM */
+ uint8 rf_cores; /* count of RF Cores being reported */
+ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment
+ */
+ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ uint8 tx_power_max[4]; /* Maximum target power among all rates */
+ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
+ txppr_t user_limit; /* User limit */
+ txppr_t reg_limit; /* Regulatory power limit */
+ txppr_t board_limit; /* Max power board can support (SROM) */
+ txppr_t target; /* Latest target power */
+} wl_txpwr_t;
+
+#define WL_NUM_TXCHAIN_MAX 4
+typedef struct wl_txchain_pwr_offsets {
+ int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC 1
+#define WLC_MEASURE_CHANNEL_BASIC 2
+#define WLC_MEASURE_CHANNEL_CCA 3
+#define WLC_MEASURE_CHANNEL_RPI 4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
+
+#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define WL_BTC_DISABLE 0 /* disable BT coexistence */
+#define WL_BTC_FULLTDM 1 /* full TDM COEX */
+#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */
+#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */
+#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT 8 /* set the default mode for the device */
+#define WL_INF_BTC_DISABLE 0
+#define WL_INF_BTC_ENABLE 1
+#define WL_INF_BTC_AUTO 3
+
+/* BTC wire used by "btc_wire" iovar */
+#define WL_BTC_DEFWIRE 0 /* use default wire setting */
+#define WL_BTC_2WIRE 2 /* use 2-wire BTC */
+#define WL_BTC_3WIRE 3 /* use 3-wire BTC */
+#define WL_BTC_4WIRE 4 /* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT (1 << 0)
+#define WL_BTC_FLAG_BT_DEF (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP (1 << 5)
+#define WL_BTC_FLAG_ECI (1 << 6)
+#define WL_BTC_FLAG_LIGHT (1 << 7)
+#define WL_BTC_FLAG_PARALLEL (1 << 8)
+#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */
+
+/* Message levels */
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_PHYCAL_VAL 0x00100000 /* retired in TOT on 6/10/2009 */
+#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL 0x04000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_PROTO_VAL 0x00000020
+#define WL_BTA_VAL 0x00000040
+#define WL_CHANINT_VAL 0x00000080
+#define WL_THERMAL_VAL 0x00000100 /* retired in TOT on 6/10/2009 */
+#define WL_P2P_VAL 0x00000200
+#define WL_ITFR_VAL 0x00000400
+#define WL_MCHAN_VAL 0x00000800
+#define WL_TDLS_VAL 0x00001000
+#define WL_MCNX_VAL 0x00002000
+#define WL_PROT_VAL 0x00004000
+#define WL_PSTA_VAL 0x00008000
+#define WL_TBTT_VAL 0x00010000
+#define WL_NIC_VAL 0x00020000
+#define WL_PWRSEL_VAL 0x00040000
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL 0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define WL_LED_OFF 0 /* always off */
+#define WL_LED_ON 1 /* always on */
+#define WL_LED_ACTIVITY 2 /* activity */
+#define WL_LED_RADIO 3 /* radio enabled */
+#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
+#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
+#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
+#define WL_LED_ASSOC 10 /* associated state indicator */
+#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
+#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
+#define WL_LED_WI4 13
+#define WL_LED_WI5 14
+#define WL_LED_BLINKSLOW 15 /* blink slow */
+#define WL_LED_BLINKMED 16 /* blink med */
+#define WL_LED_BLINKFAST 17 /* blink fast */
+#define WL_LED_BLINKCUSTOM 18 /* blink custom */
+#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
+ /* keep on for 300 sec */
+#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
+#define WL_LED_NUMBEHAVIOR 22
+
+/* led behavior numeric value format */
+#define WL_LED_BEH_MASK 0x7f /* behavior mask */
+#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#define WL_NUMCHANSPECS 110
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
+#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */
+
+/* number of bytes needed to define a 128-bit mask for MAC event reporting */
+#define WL_EVENTING_MASK_LEN 16
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value. RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ * uint8 akm[DOT11_OUI_LEN+1]; akm suite
+ * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost balue in dB
+ */
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI 1 /* by RSSI */
+#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */
+#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+struct tsinfo_arg {
+ uint8 octets[3];
+};
+
+#define NFIFO 6 /* # tx/rx fifopairs */
+
+#define WL_CNT_T_VERSION 8 /* current version of wl_cnt_t struct */
+
+typedef struct {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors (derived: sum of others) */
+ uint32 txctl; /* tx management frames */
+ uint32 txprshort; /* tx short preamble frames */
+ uint32 txserr; /* tx status errors */
+ uint32 txnobuf; /* tx out of buffers errors */
+ uint32 txnoassoc; /* tx discard because we're not associated */
+ uint32 txrunt; /* tx runt frames */
+ uint32 txchit; /* tx header cache hit (fastpath) */
+ uint32 txcmiss; /* tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /* tx fifo underflows */
+ uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txphycrs;
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+ uint32 rxerror; /* rx data errors (derived: sum of others) */
+ uint32 rxctl; /* rx management frames */
+ uint32 rxnobuf; /* rx out of buffers errors */
+ uint32 rxnondata; /* rx non data frames in the data channel errors */
+ uint32 rxbadds; /* rx bad DS errors */
+ uint32 rxbadcm; /* rx bad control or management frames */
+ uint32 rxfragerr; /* rx fragmentation errors */
+ uint32 rxrunt; /* rx runt frames */
+ uint32 rxgiant; /* rx giant frames */
+ uint32 rxnoscb; /* rx no scb error */
+ uint32 rxbadproto; /* rx invalid frames */
+ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
+ uint32 rxbadda; /* rx frames tossed for invalid da */
+ uint32 rxfilter; /* rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /* rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /* tx/rx dma descriptor errors */
+ uint32 dmada; /* tx/rx dma data errors */
+ uint32 dmape; /* tx/rx dma descriptor protocol errors */
+ uint32 reset; /* reset count */
+ uint32 tbtt; /* cnts the TBTT int's */
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail; /* callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /* number of RTS sent out by the MAC */
+ uint32 txctsfrm; /* number of CTS sent out by the MAC */
+ uint32 txackfrm; /* number of ACK frames sent out */
+ uint32 txdnlfrm; /* Not used */
+ uint32 txbcnfrm; /* beacons transmitted */
+ uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /* parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /* Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /* beacons received from other BSS */
+ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /* Number of PMQ overflows */
+ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /* obsolete */
+ uint32 frmscons; /* obsolete */
+ uint32 txnack; /* obsolete */
+ uint32 txglitch_nack; /* obsolete */
+ uint32 txburst; /* obsolete */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /* TKIPReplays */
+ uint32 ccmpfmterr; /* CCMPFormatErrors */
+ uint32 ccmpreplay; /* CCMPReplays */
+ uint32 ccmpundec; /* CCMPDecryptErrors */
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
+ uint32 wepundec; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr; /* dot11WEPICVErrorCount */
+ uint32 decsuccess; /* DecryptSuccessCount */
+ uint32 tkipicverr; /* TKIPICVErrorCount */
+ uint32 wepexcluded; /* dot11WEPExcludedCount */
+
+ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /* Count PSM watchdogs */
+ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /* PRQ entries read in */
+ uint32 prq_undirected_entries; /* which were bcast bss & ssid */
+ uint32 prq_bad_entries; /* which could not be translated to info */
+ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /* count of radio disables */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+
+ uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /* count for sgi transmit */
+ uint32 rxmpdu_sgi; /* count for sgi received */
+ uint32 txmpdu_stbc; /* count for stbc transmit */
+ uint32 rxmpdu_stbc; /* count for stbc received */
+
+ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /* TKIPReplays */
+ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /* CCMPReplays */
+ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /* DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
+
+ uint32 dma_hang; /* count for dma hang */
+ uint32 reinit; /* count for reinit */
+
+ uint32 pstatxucast; /* count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /* count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /* count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /* count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */
+
+ uint32 cso_passthrough; /* hw cso required but passthrough */
+} wl_cnt_t;
+
+typedef struct {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txerror; /* tx data errors (derived: sum of others) */
+ uint32 txctl; /* tx management frames */
+ uint32 txprshort; /* tx short preamble frames */
+ uint32 txserr; /* tx status errors */
+ uint32 txnobuf; /* tx out of buffers errors */
+ uint32 txnoassoc; /* tx discard because we're not associated */
+ uint32 txrunt; /* tx runt frames */
+ uint32 txchit; /* tx header cache hit (fastpath) */
+ uint32 txcmiss; /* tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /* tx fifo underflows */
+ uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txphycrs;
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+ uint32 rxerror; /* rx data errors (derived: sum of others) */
+ uint32 rxctl; /* rx management frames */
+ uint32 rxnobuf; /* rx out of buffers errors */
+ uint32 rxnondata; /* rx non data frames in the data channel errors */
+ uint32 rxbadds; /* rx bad DS errors */
+ uint32 rxbadcm; /* rx bad control or management frames */
+ uint32 rxfragerr; /* rx fragmentation errors */
+ uint32 rxrunt; /* rx runt frames */
+ uint32 rxgiant; /* rx giant frames */
+ uint32 rxnoscb; /* rx no scb error */
+ uint32 rxbadproto; /* rx invalid frames */
+ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
+ uint32 rxbadda; /* rx frames tossed for invalid da */
+ uint32 rxfilter; /* rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /* rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /* tx/rx dma descriptor errors */
+ uint32 dmada; /* tx/rx dma data errors */
+ uint32 dmape; /* tx/rx dma descriptor protocol errors */
+ uint32 reset; /* reset count */
+ uint32 tbtt; /* cnts the TBTT int's */
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail; /* callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /* number of RTS sent out by the MAC */
+ uint32 txctsfrm; /* number of CTS sent out by the MAC */
+ uint32 txackfrm; /* number of ACK frames sent out */
+ uint32 txdnlfrm; /* Not used */
+ uint32 txbcnfrm; /* beacons transmitted */
+ uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /* parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /* Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /* beacons received from other BSS */
+ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /* Number of PMQ overflows */
+ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack;
+ uint32 frmscons;
+ uint32 txnack;
+ uint32 txglitch_nack; /* obsolete */
+ uint32 txburst; /* obsolete */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /* TKIPReplays */
+ uint32 ccmpfmterr; /* CCMPFormatErrors */
+ uint32 ccmpreplay; /* CCMPReplays */
+ uint32 ccmpundec; /* CCMPDecryptErrors */
+ uint32 fourwayfail; /* FourWayHandshakeFailures */
+ uint32 wepundec; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr; /* dot11WEPICVErrorCount */
+ uint32 decsuccess; /* DecryptSuccessCount */
+ uint32 tkipicverr; /* TKIPICVErrorCount */
+ uint32 wepexcluded; /* dot11WEPExcludedCount */
+
+ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /* TKIPReplays */
+ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /* CCMPReplays */
+ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /* DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
+
+ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
+ uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+ uint32 psmwds; /* Count PSM watchdogs */
+ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /* PRQ entries read in */
+ uint32 prq_undirected_entries; /* which were bcast bss & ssid */
+ uint32 prq_bad_entries; /* which could not be translated to info */
+ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* pkteng rx frame stats */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+ uint32 rfdisable; /* count of radio disables */
+ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+
+ uint32 txmpdu_sgi; /* count for sgi transmit */
+ uint32 rxmpdu_sgi; /* count for sgi received */
+ uint32 txmpdu_stbc; /* count for stbc transmit */
+ uint32 rxmpdu_stbc; /* count for stbc received */
+} wl_cnt_ver_six_t;
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WL_DELTA_STATS_T_VERSION 1 /* current version of wl_delta_stats_t struct */
+
+typedef struct {
+ uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /* tx data frames */
+ uint32 txbyte; /* tx data bytes */
+ uint32 txretrans; /* tx mac retransmits */
+ uint32 txfail; /* tx failures */
+
+ /* receive stat counters */
+ uint32 rxframe; /* rx data frames */
+ uint32 rxbyte; /* rx data bytes */
+
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+} wl_delta_stats_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
+
+typedef struct {
+ uint32 packets;
+ uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+ uint16 version; /* see definition of WL_WME_CNT_VERSION */
+ uint16 length; /* length of entire structure */
+
+ wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */
+ wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */
+ wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */
+ wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */
+
+ wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */
+
+ wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+ uint32 low;
+ uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+ uint16 version; /* Version for mkeep_alive */
+ uint16 length; /* length of fixed parameters in the structure */
+ uint32 period_msec;
+ uint16 len_bytes;
+ uint8 keep_alive_id; /* 0 - 3 for N = 4 */
+ uint8 data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION 1
+#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION 500
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#ifdef WLBA
+
+#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */
+
+/* block ack related stats */
+typedef struct wlc_ba_cnt {
+ uint16 version; /* WLC_BA_CNT_VERSION */
+ uint16 length; /* length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txpdu; /* pdus sent */
+ uint32 txsdu; /* sdus sent */
+ uint32 txfc; /* tx side flow controlled packets */
+ uint32 txfci; /* tx side flow control initiated */
+ uint32 txretrans; /* retransmitted pdus */
+ uint32 txbatimer; /* ba resend due to timer */
+ uint32 txdrop; /* dropped packets */
+ uint32 txaddbareq; /* addba req sent */
+ uint32 txaddbaresp; /* addba resp sent */
+ uint32 txdelba; /* delba sent */
+ uint32 txba; /* ba sent */
+ uint32 txbar; /* bar sent */
+ uint32 txpad[4]; /* future */
+
+ /* receive side counters */
+ uint32 rxpdu; /* pdus recd */
+ uint32 rxqed; /* pdus buffered before sending up */
+ uint32 rxdup; /* duplicate pdus */
+ uint32 rxnobuf; /* pdus discarded due to no buf */
+ uint32 rxaddbareq; /* addba req recd */
+ uint32 rxaddbaresp; /* addba resp recd */
+ uint32 rxdelba; /* delba recd */
+ uint32 rxba; /* ba recd */
+ uint32 rxbar; /* bar recd */
+ uint32 rxinvba; /* invalid ba recd */
+ uint32 rxbaholes; /* ba recd with holes */
+ uint32 rxunexp; /* unexpected packets */
+ uint32 rxpad[4]; /* future */
+} wlc_ba_cnt_t;
+#endif /* WLBA */
+
+/* structure for per-tid ampdu control */
+struct ampdu_tid_control {
+ uint8 tid; /* tid */
+ uint8 enable; /* enable/disable */
+};
+
+/* structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+ struct ether_addr ea; /* Station address */
+ uint8 tid; /* tid */
+};
+/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+ uint8 tid; /* tid */
+ uint8 retry; /* retry value */
+};
+
+/* Different discovery modes for dpt */
+#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */
+#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */
+#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */
+#define DPT_PATHSEL_APPATH 2 /* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+
+/* structure for dpt iovars */
+typedef struct dpt_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode; /* mode: depends on iovar */
+ uint32 pad; /* future */
+} dpt_iovar_t;
+
+/* flags to indicate DPT status */
+#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */
+#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */
+#define DPT_STATUS_FAILED 0x04 /* DPT link failed */
+
+#define DPT_FNAME_LEN 48 /* Max length of friendly name */
+
+typedef struct dpt_status {
+ uint8 status; /* flags to indicate status */
+ uint8 fnlen; /* length of friendly name */
+ uchar name[DPT_FNAME_LEN]; /* friendly name */
+ uint32 rssi; /* RSSI of the link */
+ sta_info_t sta; /* sta info */
+} dpt_status_t;
+
+/* structure for dpt list */
+typedef struct dpt_list {
+ uint32 num; /* number of entries in struct */
+ dpt_status_t status[1]; /* per station info */
+} dpt_list_t;
+
+/* structure for dpt friendly name */
+typedef struct dpt_fname {
+ uint8 len; /* length of friendly name */
+ uchar name[DPT_FNAME_LEN]; /* friendly name */
+} dpt_fname_t;
+
+#define BDD_FNAME_LEN 32 /* Max length of friendly name */
+typedef struct bdd_fname {
+ uint8 len; /* length of friendly name */
+ uchar name[BDD_FNAME_LEN]; /* friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/* For ioctls that take a list of TSPEC */
+struct tslist {
+ int count; /* number of tspecs */
+ struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */
+};
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */
+
+/* structure for tdls iovars */
+typedef struct tdls_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode; /* mode: depends on iovar */
+ chanspec_t chanspec;
+ uint32 pad; /* future */
+} tdls_iovar_t;
+
+/* modes */
+#define TDLS_WFD_IE_TX 0
+#define TDLS_WFD_IE_RX 1
+#define TDLS_WFD_IE_SIZE 255
+/* structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+ struct ether_addr ea; /* Station address */
+ uint8 mode;
+ uint8 length;
+ uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+#endif /* WLTDLS */
+
+/* structure for addts/delts arguments */
+typedef struct tspec_arg {
+ uint16 version; /* see definition of TSPEC_ARG_VERSION */
+ uint16 length; /* length of entire structure */
+ uint flag; /* bit field */
+ /* TSPEC Arguments */
+ struct tsinfo_arg tsinfo; /* TS Info bit field */
+ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */
+ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */
+ uint min_srv_interval; /* Minimum Service Interval (us) */
+ uint max_srv_interval; /* Maximum Service Interval (us) */
+ uint inactivity_interval; /* Inactivity Interval (us) */
+ uint suspension_interval; /* Suspension Interval (us) */
+ uint srv_start_time; /* Service Start Time (us) */
+ uint min_data_rate; /* Minimum Data Rate (bps) */
+ uint mean_data_rate; /* Mean Data Rate (bps) */
+ uint peak_data_rate; /* Peak Data Rate (bps) */
+ uint max_burst_size; /* Maximum Burst Size (bytes) */
+ uint delay_bound; /* Delay Bound (us) */
+ uint min_phy_rate; /* Minimum PHY Rate (bps) */
+ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+ uint16 medium_time; /* Medium Time (32 us/s periods) */
+ uint8 dialog_token; /* dialog token */
+} tspec_arg_t;
+
+/* tspec arg for desired station */
+typedef struct tspec_per_sta_arg {
+ struct ether_addr ea;
+ struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/* structure for max bandwidth for each access category */
+typedef struct wme_max_bandwidth {
+ uint32 ac[AC_COUNT]; /* max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80
+#define WLC_WOWL_MAX_KEEPALIVE 2
+
+/* define for flag */
+#define TSPEC_PENDING 0 /* TSPEC pending */
+#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
+#define TSPEC_REJECTED 2 /* TSPEC rejected */
+#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
+#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
+
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO 0x0004
+#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+/* Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+ uint32 ac; /* access class */
+ uint32 lifetime; /* Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/* Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+ uint8 mode; /* value 0 or 1 */
+ uint8 count; /* count # of beacons before switching */
+ chanspec_t chspec; /* chanspec */
+ uint8 reg; /* regulatory class */
+} wl_chan_switch_t;
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0) value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ * the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+enum {
+ PFN_LIST_ORDER,
+ PFN_RSSI
+};
+
+enum {
+ DISABLE,
+ ENABLE
+};
+
+enum {
+ OFF_ADAPT,
+ SMART_ADAPT,
+ STRICT_ADAPT,
+ SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+#define SUPPRESS_SSID_BIT 9
+#define ENABLE_NET_OFFLOAD_BIT 10
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+#define SUPPRESS_SSID_MASK 0x0200
+#define ENABLE_NET_OFFLOAD_MASK 0x0400
+
+#define PFN_VERSION 2
+#define PFN_SCANRESULT_VERSION 1
+#define MAX_PFN_LIST_COUNT 16
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+/* PFN network info structure */
+typedef struct wl_pfn_subnet_info {
+ struct ether_addr BSSID;
+ uint8 channel; /* channel number only */
+ uint8 SSID_len;
+ uint8 SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+ wl_pfn_subnet_info_t pfnsubnet;
+ int16 RSSI; /* receive signal strength (in dBm) */
+ uint16 timestamp; /* age in seconds */
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_scanresults {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+/* PFN data structure */
+typedef struct wl_pfn_param {
+ int32 version; /* PNO parameters version */
+ int32 scan_freq; /* Scan frequency */
+ int32 lost_network_timeout; /* Timeout in sec. to declare
+ * discovered network as lost
+ */
+ int16 flags; /* Bit field to control features
+ * of PFN such as sort criteria auto
+ * enable switch and background scan
+ */
+ int16 rssi_margin; /* Margin to avoid jitter for choosing a
+ * PFN based on RSSI sort criteria
+ */
+ uint8 bestn; /* number of best networks in each scan */
+ uint8 mscan; /* number of scans recorded */
+ uint8 repeat; /* Minimum number of scan intervals
+ *before scan frequency changes in adaptive scan
+ */
+ uint8 exp; /* Exponent of 2 for maximum scan interval */
+ int32 slow_freq; /* slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+ struct ether_addr macaddr;
+ /* Bit4: suppress_lost, Bit3: suppress_found */
+ uint16 flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+
+typedef struct wl_pfn_cfg {
+ uint32 reporttype;
+ int32 channel_num;
+ uint16 channel_list[WL_NUMCHANNELS];
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+typedef struct wl_pfn {
+ wlc_ssid_t ssid; /* ssid name and its length */
+ int32 flags; /* bit2: hidden */
+ int32 infra; /* BSS Vs IBSS */
+ int32 auth; /* Open Vs Closed */
+ int32 wpa_auth; /* WPA type */
+ int32 wsec; /* wsec value */
+} wl_pfn_t;
+#define WL_PFN_HIDDEN_BIT 2
+#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK 0x4
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+struct toe_ol_stats_t {
+ /* Num of tx packets that don't need to be checksummed */
+ uint32 tx_summed;
+
+ /* Num of tx packets where checksum is filled by offload engine */
+ uint32 tx_iph_fill;
+ uint32 tx_tcp_fill;
+ uint32 tx_udp_fill;
+ uint32 tx_icmp_fill;
+
+ /* Num of rx packets where toe finds out if checksum is good or bad */
+ uint32 rx_iph_good;
+ uint32 rx_iph_bad;
+ uint32 rx_tcp_good;
+ uint32 rx_tcp_bad;
+ uint32 rx_udp_good;
+ uint32 rx_udp_bad;
+ uint32 rx_icmp_good;
+ uint32 rx_icmp_bad;
+
+ /* Num of tx packets in which csum error is injected */
+ uint32 tx_tcp_errinj;
+ uint32 tx_udp_errinj;
+ uint32 tx_icmp_errinj;
+
+ /* Num of rx packets in which csum error is injected */
+ uint32 rx_tcp_errinj;
+ uint32 rx_udp_errinj;
+ uint32 rx_icmp_errinj;
+};
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+
+/* Arp offload statistic counts */
+struct arp_ol_stats_t {
+ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */
+
+ uint32 arp_table_entries; /* ARP table entries */
+ uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */
+
+ uint32 host_request; /* ARP requests from host */
+ uint32 host_reply; /* ARP replies from host */
+ uint32 host_service; /* ARP requests from host serviced by ARP Agent */
+
+ uint32 peer_request; /* ARP requests received from network */
+ uint32 peer_request_drop; /* ARP requests from network that were dropped */
+ uint32 peer_reply; /* ARP replies received from network */
+ uint32 peer_reply_drop; /* ARP replies from network that were dropped */
+ uint32 peer_service; /* ARP request from host serviced by ARP Agent */
+};
+
+/* NS offload statistic counts */
+struct nd_ol_stats_t {
+ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */
+ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */
+ uint32 peer_request; /* NS requests received from network */
+ uint32 peer_request_drop; /* NS requests from network that were dropped */
+ uint32 peer_reply_drop; /* NA replies from network that were dropped */
+ uint32 peer_service; /* NS request from host serviced by firmware */
+};
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/* NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+ uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */
+ uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */
+ uint8 data[1]; /* Variable length packet to transmit. Contents should include
+ * entire ethernet packet (enet header, IP header, UDP header,
+ * and UDP payload) in network byte order.
+ */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH /* Pattern matching filter */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+ uint32 offset; /* Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
+ uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
+ * at offset 0. Pattern immediately follows mask.
+ */
+} wl_pkt_filter_pattern_t;
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+ uint32 id; /* Unique filter id, specified by app. */
+ uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ uint32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */
+ } u;
+} wl_pkt_filter_t;
+
+#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+
+/* IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+ uint32 id; /* Unique filter id */
+ uint32 enable; /* Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+ uint32 num; /* Number of installed packet filters */
+ wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+ uint32 num_pkts_matched; /* # filter matches for specified filter id */
+ uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */
+ uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/* Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+ uint32 cmd; /* common ioctl definition */
+ uint32 len; /* length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES 4
+
+/* These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+ (((cmd) == WLC_GET_MAGIC) || \
+ ((cmd) == WLC_GET_VERSION) || \
+ ((cmd) == WLC_GET_AP) || \
+ ((cmd) == WLC_GET_INSTANCE))
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
+
+typedef struct wl_pkteng {
+ uint32 flags;
+ uint32 delay; /* Inter-packet delay */
+ uint32 nframes; /* Number of frames */
+ uint32 length; /* Packet length */
+ uint8 seqno; /* Enable/disable sequence no. */
+ struct ether_addr dest; /* Destination address */
+ struct ether_addr src; /* Source address */
+} wl_pkteng_t;
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+typedef struct wl_pkteng_stats {
+ uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */
+ int32 rssi; /* RSSI */
+ int32 snr; /* signal to noise ratio */
+ uint16 rxpktcnt[NUM_80211_RATES+1];
+} wl_pkteng_stats_t;
+
+
+#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */
+#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */
+#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */
+#define WL_WOWL_TST (1 << 5) /* Wakeup after test */
+#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */
+
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */
+
+typedef struct {
+ uint32 masksize; /* Size of the mask in #of bytes */
+ uint32 offset; /* Offset to start looking for the packet in # of bytes */
+ uint32 patternoffset; /* Offset of start of pattern in the structure */
+ uint32 patternsize; /* Size of the pattern itself in #of bytes */
+ uint32 id; /* id */
+ uint32 reasonsize; /* Size of the wakeup reason code */
+ uint32 flags; /* Flags to tell the pattern type and other properties */
+ /* Mask follows the structure above */
+ /* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct {
+ uint count;
+ wl_wowl_pattern_t pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct {
+ uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */
+ uint16 ucode_wakeind; /* What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+
+/* per AC rate control related data structure */
+typedef struct wl_txrate_class {
+ uint8 init_rate;
+ uint8 min_rate;
+ uint8 max_rate;
+} wl_txrate_class_t;
+
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */
+
+/* structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+ int16 passive_dwell;
+ int16 active_dwell;
+ int16 bss_widthscan_interval;
+ int16 passive_total;
+ int16 active_total;
+ int16 chanwidth_transition_delay;
+ int16 activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+ uint32 rate_limit_msec; /* # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */
+ int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event
+ * will be posted each time the RSSI of received
+ * beacons/packets crosses a level.
+ */
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+ uint8 info;
+ uint8 num;
+ uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/* IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN 4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+typedef struct {
+ uint32 num_addrs;
+ char addr_type[WL_IOV_MAC_PARAM_LEN];
+ struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_params_t;
+
+
+/* Parameter block for PKTQ_LOG statistics */
+typedef struct {
+ uint32 requested; /* packets requested to be stored */
+ uint32 stored; /* packets stored */
+ uint32 saved; /* packets saved,
+ because a lowest priority queue has given away one packet
+ */
+ uint32 selfsaved; /* packets saved,
+ because an older packet from the same queue has been dropped
+ */
+ uint32 full_dropped; /* packets dropped,
+ because pktq is full with higher precedence packets
+ */
+ uint32 dropped; /* packets dropped because pktq per that precedence is full */
+ uint32 sacrificed; /* packets dropped,
+ in order to save one from a queue of a highest priority
+ */
+ uint32 busy; /* packets droped because of hardware/transmission error */
+ uint32 retry; /* packets re-sent because they were not received */
+ uint32 ps_retry; /* packets retried again prior to moving power save mode */
+ uint32 retry_drop; /* packets finally dropped after retry limit */
+ uint32 max_avail; /* the high-water mark of the queue capacity for packets -
+ goes to zero as queue fills
+ */
+ uint32 max_used; /* the high-water mark of the queue utilisation for packets -
+ increases with use ('inverse' of max_avail)
+ */
+ uint32 queue_capacity; /* the maximum capacity of the queue */
+} pktq_log_counters_v01_t;
+
+#define sacrified sacrificed
+
+typedef struct {
+ uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
+ pktq_log_counters_v01_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ char headings[1];
+} pktq_log_format_v01_t;
+
+
+typedef struct {
+ uint32 version;
+ wl_iov_mac_params_t params;
+ union {
+ pktq_log_format_v01_t v01;
+ } pktq_log;
+} wl_iov_pktq_log_t;
+
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER 0x0100
+
+#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON 0x0001
+#define LOG_MODULE_ASSOC 0x0002
+#define LOG_MODULE_EVENT 0x0004
+#define LOG_MODULE_MAX 3 /* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE 0
+#define WL_LOG_LEVEL_ERR 1
+#define WL_LOG_LEVEL_WARN 2
+#define WL_LOG_LEVEL_INFO 3
+#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT 1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL 0
+#define LOG_ARGTYPE_STR 1 /* %s */
+#define LOG_ARGTYPE_INT 2 /* %d */
+#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */
+#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */
+
+typedef struct wlc_extlog_cfg {
+ int max_number;
+ uint16 module; /* bitmap */
+ uint8 level;
+ uint8 flag;
+ uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+ uint32 time;
+ uint16 module;
+ uint16 id;
+ uint8 level;
+ uint8 sub_unit;
+ uint8 seq_num;
+ int32 arg;
+ char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+ uint32 from_last;
+ uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+ uint16 id;
+ uint16 flag;
+ uint8 arg_type;
+ const char *fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER 1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+ FMTSTR_DRIVER_UP_ID = 0,
+ FMTSTR_DRIVER_DOWN_ID = 1,
+ FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+ FMTSTR_NO_PROGRESS_ID = 3,
+ FMTSTR_RFDISABLE_ID = 4,
+ FMTSTR_REG_PRINT_ID = 5,
+ FMTSTR_EXPTIME_ID = 6,
+ FMTSTR_JOIN_START_ID = 7,
+ FMTSTR_JOIN_COMPLETE_ID = 8,
+ FMTSTR_NO_NETWORKS_ID = 9,
+ FMTSTR_SECURITY_MISMATCH_ID = 10,
+ FMTSTR_RATE_MISMATCH_ID = 11,
+ FMTSTR_AP_PRUNED_ID = 12,
+ FMTSTR_KEY_INSERTED_ID = 13,
+ FMTSTR_DEAUTH_ID = 14,
+ FMTSTR_DISASSOC_ID = 15,
+ FMTSTR_LINK_UP_ID = 16,
+ FMTSTR_LINK_DOWN_ID = 17,
+ FMTSTR_RADIO_HW_OFF_ID = 18,
+ FMTSTR_RADIO_HW_ON_ID = 19,
+ FMTSTR_EVENT_DESC_ID = 20,
+ FMTSTR_PNP_SET_POWER_ID = 21,
+ FMTSTR_RADIO_SW_OFF_ID = 22,
+ FMTSTR_RADIO_SW_ON_ID = 23,
+ FMTSTR_PWD_MISMATCH_ID = 24,
+ FMTSTR_FATAL_ERROR_ID = 25,
+ FMTSTR_AUTH_FAIL_ID = 26,
+ FMTSTR_ASSOC_FAIL_ID = 27,
+ FMTSTR_IBSS_FAIL_ID = 28,
+ FMTSTR_EXTAP_FAIL_ID = 29,
+ FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+ uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */
+ uint32 offset; /* offset into overlay region to write code */
+ uint32 len; /* overlay code len */
+ /* overlay code follows this struct */
+} wl_ioctl_overlay_t;
+
+#define OVERLAY_IDX_MASK 0x000000ff
+#define OVERLAY_IDX_SHIFT 0
+#define OVERLAY_FLAGS_MASK 0xffffff00
+#define OVERLAY_FLAGS_SHIFT 8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD 0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL 0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP 0x400
+
+#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
+#endif /* DONGLEOVERLAYS */
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+/* require strict packing */
+#include <packed_section_start.h>
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN 4 /* length of the set command string:
+ * "add", "del" (+ NUL)
+ */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */
+
+#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ vndr_ie_t vndr_ie_data; /* vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int iecount; /* number of entries in the vndr_ie_list[] array */
+ vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */
+ vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ tlv_t ie_data; /* IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int iecount; /* number of entries in the ie_list[] array */
+ ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */
+ ie_buf_t ie_buffer; /* buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */
+ uint8 id; /* IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+ struct ether_addr staAddr;
+ uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+ sta_prbreq_wps_ie_hdr_t hdr;
+ uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+ uint32 totLen;
+ uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+ char dest[ETHER_ADDR_LEN]; /* destination MAC */
+ uint8 prio; /* Packet Priority */
+ uint8 flags; /* Flags */
+ uint32 tsf_l; /* TSF timer low */
+ uint32 tsf_h; /* TSF timer high */
+ uint16 rates; /* Main Rates */
+ uint16 txstatus; /* TX Status */
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif /* WLMEDIA_TXFAILEVENT */
+
+/* no strict structure packing */
+#include <packed_section_end.h>
+
+/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER 0x0100
+#define MAX_ASSRTSTR_LEN 64
+
+typedef struct assert_record {
+ uint32 time;
+ uint8 seq_num;
+ char str[MAX_ASSRTSTR_LEN];
+} assert_record_t;
+
+typedef struct assertlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ assert_record_t logs[1];
+} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN 8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+#ifdef BCMWAPI_WAI
+#define IV_LEN 16
+struct wapi_sta_msg_t
+{
+ uint16 msg_type;
+ uint16 datalen;
+ uint8 vap_mac[6];
+ uint8 reserve_data1[2];
+ uint8 sta_mac[6];
+ uint8 reserve_data2[2];
+ uint8 gsn[IV_LEN];
+ uint8 wie[256];
+};
+#endif /* BCMWAPI_WAI */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE 0 /* disabled */
+#define CHANIM_DETECT 1 /* detection only */
+#define CHANIM_EXT 2 /* external state machine */
+#define CHANIM_ACT 3 /* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT 0
+#define APCS_IOCTL 1
+#define APCS_CHANIM 2
+#define APCS_CSTIMER 3
+#define APCS_BTA 4
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD 10
+
+/* CHANIM */
+#define CCASTATS_TXDUR 0
+#define CCASTATS_INBSS 1
+#define CCASTATS_OBSS 2
+#define CCASTATS_NOCTG 3
+#define CCASTATS_NOPKT 4
+#define CCASTATS_DOZE 5
+#define CCASTATS_TXOP 6
+#define CCASTATS_GDTXDUR 7
+#define CCASTATS_BDTXDUR 8
+#define CCASTATS_MAX 9
+
+/* chanim acs record */
+typedef struct {
+ bool valid;
+ uint8 trigger;
+ chanspec_t selected_chspc;
+ int8 bgnoise;
+ uint32 glitch_cnt;
+ uint8 ccastats;
+ uint timestamp;
+} chanim_acs_record_t;
+
+typedef struct {
+ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+ uint8 count;
+ uint timestamp;
+} wl_acs_record_t;
+
+typedef struct chanim_stats {
+ uint32 glitchcnt; /* normalized as per second count */
+ uint32 badplcp; /* normalized as per second count */
+ uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */
+ int8 bgnoise; /* background noise level (in dBm) */
+ chanspec_t chanspec;
+ uint32 timestamp;
+} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 1
+#define WL_CHANIM_COUNT_ALL 0xff
+#define WL_CHANIM_COUNT_ONE 0x1
+
+typedef struct {
+ uint32 buflen;
+ uint32 version;
+ uint32 count;
+ chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/* Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE 0x1
+
+/* scb probe parameter */
+typedef struct {
+ uint32 scb_timeout;
+ uint32 scb_activity_time;
+ uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/* selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+ uint32 count;
+ uint16 code; /* SC or RC code */
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+ uint32 version;
+ uint16 length; /* reserved for future usage */
+ uint8 type;
+ uint8 codetype;
+ uint32 ignored_cnt;
+ uint32 malformed_cnt;
+ uint32 count_total; /* count included the interested group */
+ wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+ SMFS_CODETYPE_SC,
+ SMFS_CODETYPE_RC
+};
+
+/* reuse two number in the sc/rc space */
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED 0xFFFD
+
+typedef enum smfs_type {
+ SMFS_TYPE_AUTH,
+ SMFS_TYPE_ASSOC,
+ SMFS_TYPE_REASSOC,
+ SMFS_TYPE_DISASSOC_TX,
+ SMFS_TYPE_DISASSOC_RX,
+ SMFS_TYPE_DEAUTH_TX,
+ SMFS_TYPE_DEAUTH_RX,
+ SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+ /* Tx IQ/LO calibration coeffs */
+ int16 tx_iqlocal_a;
+ int16 tx_iqlocal_b;
+ int8 tx_iqlocal_ci;
+ int8 tx_iqlocal_cq;
+ int8 tx_iqlocal_di;
+ int8 tx_iqlocal_dq;
+ int8 tx_iqlocal_ei;
+ int8 tx_iqlocal_eq;
+ int8 tx_iqlocal_fi;
+ int8 tx_iqlocal_fq;
+
+ /* Rx IQ calibration coeffs */
+ int16 rx_iqcal_a;
+ int16 rx_iqcal_b;
+
+ uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */
+ uint32 papd_epsilon_table[64]; /* PAPD epsilon table */
+ int16 papd_epsilon_offset; /* PAPD epsilon offset */
+ uint8 curr_tx_pwrindex; /* Tx power index */
+ int8 idle_tssi; /* Idle TSSI */
+ int8 est_tx_pwr; /* Estimated Tx Power (dB) */
+ int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */
+ uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */
+ uint16 init_gaincode; /* initgain required for ACI */
+ int8 estirr_tx;
+ int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+ int version;
+ int8 num_phy_cores; /* number of cores */
+ int8 curr_temperature; /* on-chip temperature sensor reading */
+ chanspec_t chspec; /* channspec for this state */
+ bool aci_state; /* ACI state: ON/OFF */
+ uint16 crsminpower; /* crsminpower required for ACI */
+ uint16 crsminpowerl; /* crsminpowerl required for ACI */
+ uint16 crsminpoweru; /* crsminpoweru required for ACI */
+ wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif /* PHYMON */
+
+/* discovery state */
+typedef struct wl_p2p_disc_st {
+ uint8 state; /* see state */
+ chanspec_t chspec; /* valid in listen state */
+ uint16 dwell; /* valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN 1
+#define WL_P2P_DISC_ST_SEARCH 2
+
+/* scan request */
+typedef struct wl_p2p_scan {
+ uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */
+ uint8 reserved[3];
+ /* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/* i/f request */
+typedef struct wl_p2p_if {
+ struct ether_addr addr;
+ uint8 type; /* see i/f type */
+ chanspec_t chspec; /* for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT 0
+#define WL_P2P_IF_GO 1
+#define WL_P2P_IF_DYNBCN_GO 2
+#define WL_P2P_IF_DEV 3
+
+/* i/f query */
+typedef struct wl_p2p_ifq {
+ uint bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/* OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+ uint8 ops; /* 0: disable 1: enable */
+ uint8 ctw; /* >= 10 */
+} wl_p2p_ops_t;
+
+/* absence and presence request */
+typedef struct wl_p2p_sched_desc {
+ uint32 start;
+ uint32 interval;
+ uint32 duration;
+ uint32 count; /* see count */
+} wl_p2p_sched_desc_t;
+
+/* count */
+#define WL_P2P_SCHED_RSVD 0
+#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
+
+typedef struct wl_p2p_sched {
+ uint8 type; /* see schedule type */
+ uint8 action; /* see schedule action */
+ uint8 option; /* see schedule option */
+ wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+#define WL_P2P_SCHED_FIXED_LEN 3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
+#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with
+ * start being an offset of the 'current' TSF
+ */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe
+ * requests
+ */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */
+
+#ifdef WLNIC
+/* nic_cnx iovar */
+typedef struct wl_nic_cnx {
+ uint8 opcode;
+ struct ether_addr addr;
+ /* the following are valid for WL_NIC_CNX_CONN */
+ uint8 SSID_len;
+ uint8 SSID[32];
+ struct ether_addr abssid;
+ uint8 join_period;
+} wl_nic_cnx_t;
+
+/* opcode */
+#define WL_NIC_CNX_ADD 0 /* add NIC connection */
+#define WL_NIC_CNX_DEL 1 /* delete NIC connection */
+#define WL_NIC_CNX_IDX 2 /* query NIC connection index */
+#define WL_NIC_CNX_CONN 3 /* join/create network */
+#define WL_NIC_CNX_DIS 4 /* disconnect from network */
+
+/* nic_cfg iovar */
+typedef struct wl_nic_cfg {
+ uint8 version;
+ uint8 beacon_mode;
+ uint16 beacon_interval;
+ uint8 diluted_beacon_period;
+ uint8 repeat_EQC;
+ uint8 scan_length;
+ uint8 scan_interval;
+ uint8 scan_probability;
+ uint8 awake_window_length;
+ int8 TSF_correction;
+ uint8 ASID;
+ uint8 channel_usage_mode;
+} wl_nic_cfg_t;
+
+/* version */
+#define WL_NIC_CFG_VER 1
+
+/* beacon_mode */
+#define WL_NIC_BCN_NORM 0
+#define WL_NIC_BCN_DILUTED 1
+
+/* channel_usage_mode */
+#define WL_NIC_CHAN_STATIC 0
+#define WL_NIC_CHAN_CYCLE 1
+
+/* nic_cfg iovar */
+typedef struct wl_nic_frm {
+ uint8 type;
+ struct ether_addr da;
+ uint8 body[1];
+} wl_nic_frm_t;
+
+/* type */
+#define WL_NIC_FRM_MYNET 1
+#define WL_NIC_FRM_ACTION 2
+
+/* i/f query */
+typedef struct wl_nic_ifq {
+ uint bsscfgidx;
+ char ifname[BCM_MSG_IFNAME_MAX];
+} wl_nic_ifq_t;
+#endif /* WLNIC */
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE 0x77
+#define BCM_ACTION_RFAWARE_DCS 0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR 0x1
+#define BCM_DCS_UNKNOWN 0xFF
+
+typedef struct wl_bcmdcs_data {
+ uint reason;
+ chanspec_t chspec;
+} wl_bcmdcs_data_t;
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED 0
+#define PSTA_MODE_PROXY 1
+#define PSTA_MODE_REPEATER 2
+
+
+/* NAT configuration */
+typedef struct {
+ uint32 ipaddr; /* interface ip address */
+ uint32 ipaddr_mask; /* interface ip address mask */
+ uint32 ipaddr_gateway; /* gateway ip address */
+ uint8 mac_gateway[6]; /* gateway mac address */
+ uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */
+ uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */
+ uint8 GUID[38]; /* interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+ uint op; /* operation code */
+ bool pub_if; /* set for public if, clear for private if */
+ nat_if_info_t if_info; /* interface info */
+} nat_cfg_t;
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
+#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED 1 /* NAT is enabled */
+#define NAT_STATE_DISABLED 2 /* NAT is disabled */
+
+typedef struct {
+ int state; /* NAT state returned */
+} nat_state_t;
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS 0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040
+#endif /* PROP_TXSTATUS */
+
+#define BTA_STATE_LOG_SZ 64
+
+/* BTAMP Statemachine states */
+enum {
+ HCIReset = 1,
+ HCIReadLocalAMPInfo,
+ HCIReadLocalAMPASSOC,
+ HCIWriteRemoteAMPASSOC,
+ HCICreatePhysicalLink,
+ HCIAcceptPhysicalLinkRequest,
+ HCIDisconnectPhysicalLink,
+ HCICreateLogicalLink,
+ HCIAcceptLogicalLink,
+ HCIDisconnectLogicalLink,
+ HCILogicalLinkCancel,
+ HCIAmpStateChange,
+ HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+ uint32 txfifobmp;
+ uint32 hwtxfifoflush;
+ struct ether_addr ea;
+} flush_txfifo_t;
+
+#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */
+
+enum {
+ SPATIAL_MODE_2G_IDX = 0,
+ SPATIAL_MODE_5G_LOW_IDX,
+ SPATIAL_MODE_5G_MID_IDX,
+ SPATIAL_MODE_5G_HIGH_IDX,
+ SPATIAL_MODE_5G_UPPER_IDX,
+ SPATIAL_MODE_MAX_IDX
+};
+
+/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+ int num; /* Number of memory pools */
+ bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER 0x0001
+#define IPV4_NETBT_FILTER 0x0002
+#define IPV4_LLMNR_FILTER 0x0004
+#define IPV4_SSDP_FILTER 0x0008
+#define IPV4_WSD_FILTER 0x0010
+#define IPV6_NETBT_FILTER 0x0200
+#define IPV6_LLMNR_FILTER 0x0400
+#define IPV6_SSDP_FILTER 0x0800
+#define IPV6_WSD_FILTER 0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE 0x00000001
+
+typedef struct {
+ uint32 ipaddr;
+ uint32 ipaddr_netmask;
+ uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Only support traffic clasification */
+#define TRF_MGMT_FLAG_DISABLE_PRIORITY_TAGGING 0x0004 /* Don't override packet's priority */
+
+/* Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+ trf_mgmt_priority_low = 0, /* Maps to 802.1p BO */
+ trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */
+ trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */
+ trf_mgmt_priority_invalid = (trf_mgmt_priority_high + 1)
+} trf_mgmt_priority_class_t;
+
+/* Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+ uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */
+ uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */
+ uint32 host_ip_addr; /* My IP address to determine subnet */
+ uint32 host_subnet_mask; /* My subnet mask */
+ uint32 downlink_bandwidth; /* In units of kbps */
+ uint32 uplink_bandwidth; /* In units of kbps */
+ uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed tx bandwidth */
+ uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/* Traffic management filter */
+typedef struct trf_mgmt_filter {
+ struct ether_addr dst_ether_addr; /* His L2 address */
+ uint32 dst_ip_addr; /* His IP address */
+ uint16 dst_port; /* His L4 port */
+ uint16 src_port; /* My L4 port */
+ uint16 prot; /* L4 protocol (only TCP or UDP) */
+ uint16 flags; /* TBD. For now, this must be zero. */
+ trf_mgmt_priority_class_t priority; /* Priority for filtered packets */
+} trf_mgmt_filter_t;
+
+/* Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list {
+ uint32 num_filters;
+ trf_mgmt_filter_t filter[1];
+} trf_mgmt_filter_list_t;
+
+/* Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+ uint32 maximum_bytes_per_second;
+ uint32 maximum_bytes_per_sampling_period;
+ uint32 total_bytes_consumed_per_second;
+ uint32 total_bytes_consumed_per_sampling_period;
+ uint32 total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/* Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+ uint32 gauranteed_bandwidth_percentage;
+ uint32 guaranteed_bytes_per_second;
+ uint32 guaranteed_bytes_per_sampling_period;
+ uint32 num_bytes_produced_per_second;
+ uint32 num_bytes_consumed_per_second;
+ uint32 num_queued_packets; /* Number of packets in queue */
+ uint32 num_queued_bytes; /* Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/* Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+ trf_mgmt_global_info_t tx_global_shaping_info;
+ trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_global_info_t rx_global_shaping_info;
+ trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/* Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+ uint32 num_processed_packets; /* Number of packets processed */
+ uint32 num_processed_bytes; /* Number of bytes processed */
+ uint32 num_discarded_packets; /* Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/* Traffic management statisics array */
+typedef struct trf_mgmt_stats_array {
+ trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+typedef struct powersel_params {
+ /* LPC Params exposed via IOVAR */
+ int32 tp_ratio_thresh; /* Throughput ratio threshold */
+ uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */
+ uint8 pwr_stab_thresh; /* Number of successes before power step down */
+ uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */
+} powersel_params_t;
+
+#endif /* _wlioctl_h_ */
diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c
new file mode 100644
index 00000000000000..ef9c733742af19
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/linux_osl.c
@@ -0,0 +1,1053 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c 311099 2012-01-27 14:46:59Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+#ifdef BCMASSERT_LOG
+#include <bcm_assert_log.h>
+#endif
+
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd
+#define BCM_MEM_FILENAME_LEN 24
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define STATIC_BUF_MAX_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+ struct semaphore static_sem;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define STATIC_PKT_MAX_NUM 8
+
+typedef struct bcm_static_pkt {
+ struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+ struct semaphore osl_pkt_sem;
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+#endif
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ void *osh;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+#ifdef CTFPOOL
+ ctfpool_t *ctfpool;
+#endif
+ uint magic;
+ void *pdev;
+ atomic_t malloced;
+ uint failed;
+ uint bustype;
+ bcm_mem_link_t *dbgmem_list;
+ spinlock_t dbgmem_lock;
+ spinlock_t pktalloc_lock;
+};
+
+
+
+
+uint32 g_assert_type = FALSE;
+
+static int16 linuxbcmerrormap[] =
+{ 0,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -E2BIG,
+ -E2BIG,
+ -EBUSY,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EFAULT,
+ -ENOMEM,
+ -EOPNOTSUPP,
+ -EMSGSIZE,
+ -EINVAL,
+ -EPERM,
+ -ENOMEM,
+ -EINVAL,
+ -ERANGE,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EINVAL,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -EIO,
+ -EIO,
+ -ENODEV,
+ -EINVAL,
+ -ENODATA,
+
+
+
+#if BCME_LAST != -42
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+
+
+int
+osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+
+ return linuxbcmerrormap[-bcmerror];
+}
+
+extern uint8* dhd_os_prealloc(void *osh, int section, int size);
+
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ osl_t *osh;
+
+ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ atomic_set(&osh->malloced, 0);
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ spin_lock_init(&(osh->dbgmem_lock));
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ case PCMCIA_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case JTAG_BUS:
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ case RPC_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+ if (!bcm_static_buf) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
+ STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ }
+ else
+ printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+ sema_init(&bcm_static_buf->static_sem, 1);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
+
+ if (!bcm_static_skb) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
+ for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
+ bcm_static_skb->pkt_use[i] = 0;
+
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+ }
+#endif
+
+ spin_lock_init(&(osh->pktalloc_lock));
+
+ return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+static struct sk_buff *osl_alloc_skb(unsigned int len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ gfp_t flags = GFP_ATOMIC;
+
+ return __dev_alloc_skb(len, flags);
+#else
+ return dev_alloc_skb(len);
+#endif
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
+#endif
+
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+
+ if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+
+ skb = osl_alloc_skb(osh->ctfpool->obj_size);
+ if (skb == NULL) {
+ printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+ osh->ctfpool->obj_size);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+
+ skb->next = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = skb;
+ osh->ctfpool->fast_frees++;
+ osh->ctfpool->curr_obj++;
+
+
+ CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+
+ PKTFAST(osh, skb) = FASTBUF;
+
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ return skb;
+}
+
+
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+
+ while ((osh->ctfpool->refills > 0) && (thresh--)) {
+ osl_ctfpool_add(osh);
+ osh->ctfpool->refills--;
+ }
+}
+
+
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+ osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
+ ASSERT(osh->ctfpool);
+ bzero(osh->ctfpool, sizeof(ctfpool_t));
+
+ osh->ctfpool->max_obj = numobj;
+ osh->ctfpool->obj_size = size;
+
+ spin_lock_init(&osh->ctfpool->lock);
+
+ while (numobj--) {
+ if (!osl_ctfpool_add(osh))
+ return -1;
+ osh->ctfpool->fast_frees--;
+ }
+
+ return 0;
+}
+
+
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+ struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+
+ skb = osh->ctfpool->head;
+
+ while (skb != NULL) {
+ nskb = skb->next;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ osh->ctfpool->curr_obj--;
+ }
+
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->head = NULL;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ kfree(osh->ctfpool);
+ osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+ struct bcmstrbuf *bb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif
+
+ bb = b;
+
+ ASSERT((osh != NULL) && (bb != NULL));
+
+ bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+ osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+ osh->ctfpool->curr_obj, osh->ctfpool->refills);
+ bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+ osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+ osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+
+ if (osh->ctfpool == NULL)
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ if (osh->ctfpool->head == NULL) {
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->slow_allocs++;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ ASSERT(len <= osh->ctfpool->obj_size);
+
+
+ skb = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = (void *)skb->next;
+
+ osh->ctfpool->fast_allocs++;
+ osh->ctfpool->curr_obj--;
+ ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+
+ skb->next = skb->prev = NULL;
+ skb->data = skb->head + 16;
+ skb->tail = skb->head + 16;
+
+ skb->len = 0;
+ skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+ skb->list = NULL;
+#endif
+ atomic_set(&skb->users, 1);
+
+ return skb;
+}
+#endif
+
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+#ifndef WL_UMK
+ struct sk_buff *nskb;
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
+
+#ifndef WL_UMK
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced--;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+#endif
+ return (struct sk_buff *)pkt;
+}
+
+
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+{
+#ifndef WL_UMK
+ struct sk_buff *nskb;
+ unsigned long flags;
+#endif
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
+
+#ifndef WL_UMK
+
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+#endif
+ return (void *)pkt;
+}
+
+
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+#ifdef CTFPOOL
+
+ skb = osl_pktfastget(osh, len);
+ if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
+#else
+ if ((skb = osl_alloc_skb(len))) {
+#endif
+ skb_put(skb, len);
+ skb->priority = 0;
+
+
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ }
+
+ return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+ ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ skb->tstamp.tv.sec = 0;
+#else
+ skb->stamp.tv_sec = 0;
+#endif
+
+
+ skb->dev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->ip_summed = 0;
+ skb->destructor = NULL;
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+
+
+ CTFPOOL_LOCK(ctfpool, flags);
+ skb->next = (struct sk_buff *)ctfpool->head;
+ ctfpool->head = (void *)skb;
+
+ ctfpool->fast_frees++;
+ ctfpool->curr_obj++;
+
+ ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+ CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif
+
+
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+ unsigned long flags;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+ PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+
+
+#ifdef CTFPOOL
+ if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
+ osl_pktfastfree(osh, skb);
+ else {
+#else
+ {
+#endif
+
+ if (skb->destructor)
+
+ dev_kfree_skb_any(skb);
+ else
+
+ dev_kfree_skb(skb);
+ }
+ spin_lock_irqsave(&osh->pktalloc_lock, flags);
+ osh->pub.pktalloced--;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
+ skb = nskb;
+ }
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i = 0;
+ struct sk_buff *skb;
+
+ if (len > (PAGE_SIZE*2)) {
+ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ return osl_pktget(osh, len);
+ }
+
+ down(&bcm_static_skb->osl_pkt_sem);
+
+ if (len <= PAGE_SIZE) {
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_4k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+ }
+
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
+ break;
+ }
+
+ if (i != STATIC_PKT_MAX_NUM) {
+ bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
+ up(&bcm_static_skb->osl_pkt_sem);
+ skb = bcm_static_skb->skb_8k[i];
+ skb->tail = skb->data + len;
+ skb->len = len;
+ return skb;
+ }
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+ int i;
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_4k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ down(&bcm_static_skb->osl_pkt_sem);
+ bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+
+ return osl_pktfree(osh, p, send);
+}
+#endif
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+
+ return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+}
+
+
+uint
+osl_pci_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+
+uint
+osl_pci_slot(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+
+
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ int i = 0;
+ if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+ {
+ down(&bcm_static_buf->static_sem);
+
+ for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+ {
+ if (bcm_static_buf->buf_use[i] == 0)
+ break;
+ }
+
+ if (i == STATIC_BUF_MAX_NUM)
+ {
+ up(&bcm_static_buf->static_sem);
+ printk("all static buff in use!\n");
+ goto original;
+ }
+
+ bcm_static_buf->buf_use[i] = 1;
+ up(&bcm_static_buf->static_sem);
+
+ bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+ if (osh)
+ atomic_add(size, &osh->malloced);
+
+ return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+ }
+ }
+original:
+#endif
+
+ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ atomic_add(size, &osh->malloced);
+
+ return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf)
+ {
+ if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+ <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+ {
+ int buf_idx = 0;
+
+ buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+ down(&bcm_static_buf->static_sem);
+ bcm_static_buf->buf_use[buf_idx] = 0;
+ up(&bcm_static_buf->static_sem);
+
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->malloced);
+ }
+ return;
+ }
+ }
+#endif
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(size, &osh->malloced);
+ }
+ kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (atomic_read(&osh->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+ return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
+{
+ uint16 align = (1 << align_bits);
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+ *alloced = size;
+
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+uint BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+ char tempbuf[256];
+ const char *basename;
+
+ basename = strrchr(file, '/');
+
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+#ifdef BCMASSERT_LOG
+ snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+ exp, basename, line);
+
+ bcm_assert_log(tempbuf);
+#endif
+
+
+}
+#endif
+
+void
+osl_delay(uint usec)
+{
+ uint d;
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+
+
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+ void * p;
+ unsigned long irqflags;
+
+
+ PKTCTFMAP(osh, skb);
+
+ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+ return NULL;
+
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb)) {
+ ctfpool_t *ctfpool;
+
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+ PKTCLRFAST(osh, p);
+ PKTCLRFAST(osh, skb);
+ ctfpool->refills++;
+ }
+#endif
+
+
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+
+ spin_lock_irqsave(&osh->pktalloc_lock, irqflags);
+ osh->pub.pktalloced++;
+ spin_unlock_irqrestore(&osh->pktalloc_lock, irqflags);
+ return (p);
+}
+
+
+
+
+
+
+
+void *
+osl_os_open_image(char *filename)
+{
+ struct file *fp;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+
+ if (IS_ERR(fp))
+ fp = NULL;
+
+ return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+ struct file *fp = (struct file *)image;
+ int rdlen;
+
+ if (!image)
+ return 0;
+
+ rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ if (rdlen > 0)
+ fp->f_pos += rdlen;
+
+ return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c
new file mode 100644
index 00000000000000..68cfcb27a9c444
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/sbutils.c
@@ -0,0 +1,1001 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c 310902 2012-01-26 19:45:33Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+ uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+
+#define SET_SBREG(sii, r, mask, val) \
+ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
+#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
+#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+ uint8 tmp;
+ uint32 val, intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ val = R_REG(sii->osh, sbr);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+ uint8 tmp;
+ volatile uint32 dummy;
+ uint32 intr_val = 0;
+
+
+ /*
+ * compact flash only has 11 bits address, while we needs 12 bits address.
+ * MEM_SEG will be OR'd with other 11 bits address in hardware,
+ * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+ * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+ */
+ if (PCMCIA(sii)) {
+ INTR_OFF(sii, intr_val);
+ tmp = 1;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+ }
+
+ if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+ dummy = R_REG(sii->osh, sbr);
+ BCM_REFERENCE(dummy);
+ W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+ dummy = R_REG(sii->osh, sbr);
+ BCM_REFERENCE(dummy);
+ W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+ } else
+ W_REG(sii->osh, sbr, v);
+
+ if (PCMCIA(sii)) {
+ tmp = 0;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+ INTR_RESTORE(sii, intr_val);
+ }
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+ si_info_t *sii;
+ void *corereg;
+ sbconfig_t *sb;
+ uint origidx, intflag, intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+ corereg = si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(corereg != NULL);
+ sb = REGS2SB(corereg);
+ intflag = R_SBREG(sii, &sb->sbflagst);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+
+ return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 vec;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ if (siflag == -1)
+ vec = 0;
+ else
+ vec = 1 << siflag;
+ W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+ uint i;
+
+ for (i = 0; i < sii->numcores; i ++)
+ if (sba == sii->coresba[i])
+ return i;
+ return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+ uint32 sbaddr;
+
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS: {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+ break;
+ }
+
+ case PCI_BUS:
+ sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = 0;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ sbaddr = (uint32)tmp << 12;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ sbaddr |= (uint32)tmp << 16;
+ OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ sbaddr |= (uint32)tmp << 24;
+ break;
+ }
+
+ case SPI_BUS:
+ case SDIO_BUS:
+ sbaddr = (uint32)(uintptr)sii->curmap;
+ break;
+
+
+ default:
+ sbaddr = BADCOREADDR;
+ break;
+ }
+
+ return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint sbidh;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+ sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+ return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+ (val << SBTML_SICF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatelow, w);
+ }
+
+ /* return the new value
+ * for write operation, the following readback ensures the completion of write opration.
+ */
+ return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+ (val << SBTMH_SISF_SHIFT);
+ W_SBREG(sii, &sb->sbtmstatehigh, w);
+ }
+
+ /* return the new value */
+ return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ return ((R_SBREG(sii, &sb->sbtmstatelow) &
+ (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ if (regoff >= SBCONFIGOFF) {
+ w = (R_SBREG(sii, r) & ~mask) | val;
+ W_SBREG(sii, r, w);
+ } else {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+ }
+
+ /* readback */
+ if (regoff >= SBCONFIGOFF)
+ w = R_SBREG(sii, r);
+ else {
+ if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+ (coreidx == SI_CC_IDX) &&
+ (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+ w = val;
+ } else
+ w = R_REG(sii->osh, r);
+ }
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ sb_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES 2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+ uint next;
+ uint ncc = 0;
+ uint i;
+
+ if (bus >= SB_MAXBUSES) {
+ SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+ return 0;
+ }
+ SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+ /* Scan all cores on the bus starting from core 0.
+ * Core addresses must be contiguous on each bus.
+ */
+ for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+ sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+ /* keep and reuse the initial register mapping */
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
+ SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+ sii->regs[next] = regs;
+ }
+
+ /* change core to 'next' and read its coreid */
+ sii->curmap = _sb_setcoreidx(sii, next);
+ sii->curidx = next;
+
+ sii->coreid[next] = sb_coreid(&sii->pub);
+
+ /* core specific processing... */
+ /* chipc provides # cores */
+ if (sii->coreid[next] == CC_CORE_ID) {
+ chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+ uint32 ccrev = sb_corerev(&sii->pub);
+
+ /* determine numcores - this is the total # cores in the chip */
+ if (((ccrev == 4) || (ccrev >= 6)))
+ numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+ CID_CC_SHIFT;
+ else {
+ /* Older chips */
+ uint chip = CHIPID(sii->pub.chip);
+
+ if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
+ numcores = 6;
+ else if (chip == BCM4704_CHIP_ID)
+ numcores = 9;
+ else if (chip == BCM5365_CHIP_ID)
+ numcores = 7;
+ else {
+ SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+ chip));
+ ASSERT(0);
+ numcores = 1;
+ }
+ }
+ SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+ sii->pub.issim ? "QT" : ""));
+ }
+ /* scan bridged SB(s) and add results to the end of the list */
+ else if (sii->coreid[next] == OCP_CORE_ID) {
+ sbconfig_t *sb = REGS2SB(sii->curmap);
+ uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+ uint nsbcc;
+
+ sii->numcores = next + 1;
+
+ if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+ continue;
+ nsbba &= 0xfffff000;
+ if (_sb_coreidx(sii, nsbba) != BADIDX)
+ continue;
+
+ nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+ if (sbba == SI_ENUM_BASE)
+ numcores -= nsbcc;
+ ncc += nsbcc;
+ }
+ }
+
+ SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+ sii->numcores = i + ncc;
+ return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+ si_info_t *sii;
+ uint32 origsba;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+ /* Save the current core info and validate it later till we know
+ * for sure what is good and what is bad.
+ */
+ origsba = _sb_coresba(sii);
+
+ /* scan all SB(s) starting from SI_ENUM_BASE */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+ sii->curmap = _sb_setcoreidx(sii, coreidx);
+ sii->curidx = coreidx;
+
+ return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+ uint32 sbaddr = sii->coresba[coreidx];
+ void *regs;
+
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+ regs = sii->curmap;
+ break;
+
+ case PCMCIA_BUS: {
+ uint8 tmp = (sbaddr >> 12) & 0x0f;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+ tmp = (sbaddr >> 16) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+ tmp = (sbaddr >> 24) & 0xff;
+ OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+ regs = sii->curmap;
+ break;
+ }
+ case SPI_BUS:
+ case SDIO_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ regs = sii->regs[coreidx];
+ break;
+
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+ sbconfig_t *sb;
+ volatile uint32 *addrm;
+
+ sb = REGS2SB(sii->curmap);
+
+ switch (asidx) {
+ case 0:
+ addrm = &sb->sbadmatch0;
+ break;
+
+ case 1:
+ addrm = &sb->sbadmatch1;
+ break;
+
+ case 2:
+ addrm = &sb->sbadmatch2;
+ break;
+
+ case 3:
+ addrm = &sb->sbadmatch3;
+ break;
+
+ default:
+ SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+ return 0;
+ }
+
+ return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+ sb = REGS2SB(sii->curmap);
+
+ /* + 1 because of enumeration space */
+ return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ origidx = sii->curidx;
+ ASSERT(GOODIDX(origidx));
+
+ INTR_OFF(sii, intr_val);
+
+ /* switch over to chipcommon core if there is one, else use pci */
+ if (sii->pub.ccrev != NOREV) {
+ chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(ccregs != NULL);
+
+ /* do the buffer registers update */
+ W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+ W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+ } else
+ ASSERT(0);
+
+ /* restore core index */
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /* if core is already in reset, just return */
+ if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+ return;
+
+ /* if clocks are not enabled, put into reset and return */
+ if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+ goto disable;
+
+ /* set target reject and spin until busy is clear (preserve core-specific bits) */
+ OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+ SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+ OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+ dummy = R_SBREG(sii, &sb->sbimstate);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+ SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(10);
+
+ /* don't forget to clear the initiator reject bit */
+ if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+ AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+ /* leave reset and reject asserted */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ sbconfig_t *sb;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+ sb = REGS2SB(sii->curmap);
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ sb_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+
+ /* set reset while enabling the clock and forcing them on throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+ W_SBREG(sii, &sb->sbtmstatehigh, 0);
+ }
+ if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+ AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ W_SBREG(sii, &sb->sbtmstatelow,
+ ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+
+ /* leave clock enabled */
+ W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+ dummy = R_SBREG(sii, &sb->sbtmstatelow);
+ BCM_REFERENCE(dummy);
+ OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ * SI_BUS => mips
+ * JTAG_BUS => chipc
+ * PCI_BUS => pci or pcie
+ * PCMCIA_BUS => pcmcia
+ * SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 tmp, ret = 0xffffffff;
+ sbconfig_t *sb;
+
+ sii = SI_INFO(sih);
+
+ if ((to & ~TO_MASK) != 0)
+ return ret;
+
+ /* Figure out the master core */
+ if (idx == BADIDX) {
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ idx = sii->pub.buscoreidx;
+ break;
+ case JTAG_BUS:
+ idx = SI_CC_IDX;
+ break;
+ case PCMCIA_BUS:
+ case SDIO_BUS:
+ idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+ break;
+ case SI_BUS:
+ idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+ break;
+ default:
+ ASSERT(0);
+ }
+ if (idx == BADIDX)
+ return ret;
+ }
+
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+ tmp = R_SBREG(sii, &sb->sbimconfiglow);
+ ret = tmp & TO_MASK;
+ W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+ sb_commit(sih);
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+ uint32 base;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ base = 0;
+
+ if (type == 0) {
+ base = admatch & SBAM_BASE0_MASK;
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE1_MASK;
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ base = admatch & SBAM_BASE2_MASK;
+ }
+
+ return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+ uint32 size;
+ uint type;
+
+ type = admatch & SBAM_TYPE_MASK;
+ ASSERT(type < 3);
+
+ size = 0;
+
+ if (type == 0) {
+ size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+ } else if (type == 1) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+ } else if (type == 2) {
+ ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
+ size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+ }
+
+ return (size);
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c
new file mode 100644
index 00000000000000..2d2b7db68045e3
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils.c
@@ -0,0 +1,2356 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c 323774 2012-03-27 00:16:45Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs);
+
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+int do_4360_pcie2_war = 0;
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ return (NULL);
+ }
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+ return (si_t *)sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
+
+/* generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+ static bool ksii_attached = FALSE;
+
+ if (!ksii_attached) {
+ void *regs;
+ regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+
+ if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+ SI_BUS, NULL,
+ osh != SI_OSH ? &ksii.vars : NULL,
+ osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+ SI_ERROR(("si_kattach: si_doattach failed\n"));
+ REG_UNMAP(regs);
+ return NULL;
+ }
+ REG_UNMAP(regs);
+
+ /* save ticks normalized to ms for si_watchdog_ms() */
+ if (PMUCTL_ENAB(&ksii.pub)) {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ } else {
+ wd_msticks = ALP_CLOCK / 1000;
+ }
+
+ ksii_attached = TRUE;
+ SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+ ksii.pub.ccrev, wd_msticks));
+ }
+
+ return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+ /* need to set memseg flag for CF card first before any sb registers access */
+ if (BUSTYPE(bustype) == PCMCIA_BUS)
+ sii->memseg = TRUE;
+
+
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval));
+ return FALSE;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ }
+
+
+ return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+ uint *origidx, void *regs)
+{
+ bool pci, pcie, pcie_gen2 = FALSE;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr)cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (sii->pub.ccrev >= 35)
+ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = (uint)NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = (uint)NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, sii->coresba[i], sii->regs[i]));
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ if (cid == PCIE2_CORE_ID)
+ pcie_gen2 = TRUE;
+ }
+ } else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+ (cid == PCMCIA_CORE_ID)) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ ((cid == PCMCIA_CORE_ID) ||
+ (cid == SDIOD_CORE_ID))) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->coresba[i])) ||
+ (regs == sii->regs[i]))
+ *origidx = i;
+ }
+
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = pcirev;
+ sii->pub.buscoreidx = pciidx;
+ } else if (pcie) {
+ if (pcie_gen2)
+ sii->pub.buscoretype = PCIE2_CORE_ID;
+ else
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+
+ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+ sii->pub.buscorerev));
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+ (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+ OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+ /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+ * already running.
+ */
+ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+ if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+ si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+ si_core_disable(&sii->pub, 0);
+ }
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+
+
+
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+ uint bustype, void *sdh, char **vars, uint *varsz)
+{
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar*)sii, sizeof(si_info_t));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ if (!regs)
+ return NULL;
+ cc = (chipcregs_t *)regs;
+ } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+ cc = (chipcregs_t *)sii->curmap;
+ } else {
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ }
+
+ sih->bustype = bustype;
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+ bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ if (!cc) {
+ SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+ return NULL;
+ }
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+ (sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+ sih->chippkg = BCM4329_182PIN_PKG_ID;
+ }
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+ SI_MSG(("Found chip type SB (0x%08x)\n", w));
+ sb_scan(&sii->pub, regs, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+ SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+ /* pass chipc address instead of original core base */
+ ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ goto exit;
+ }
+
+ if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+ >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+ CST4322_SPROM_PRESENT))) {
+ SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+ return NULL;
+ }
+
+ /* assume current core is CC */
+ if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+ CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+ (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+ if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+ uint clkdiv;
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ /* otp_clk_div is even number, 120/14 < 9mhz */
+ clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+ SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+ }
+ OSL_DELAY(10);
+ }
+
+ if (bustype == PCI_BUS) {
+
+ }
+
+ pvars = NULL;
+ BCM_REFERENCE(pvars);
+
+
+
+ if (sii->pub.ccrev >= 20) {
+ uint32 gpiopullup = 0, gpiopulldown = 0;
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ /* 4314/43142 has pin muxing, don't clear gpio bits */
+ if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+ gpiopullup |= 0x402e0;
+ gpiopulldown |= 0x20500;
+ }
+
+ W_REG(osh, &cc->gpiopullup, gpiopullup);
+ W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+ si_setcoreidx(sih, origidx);
+ }
+
+
+ /* clear any previous epidiag-induced target abort */
+ ASSERT(!si_taclear(sih, FALSE));
+
+ return (sii);
+
+exit:
+
+ return NULL;
+}
+
+/* may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+
+
+ sii = SI_INFO(sih);
+
+ if (sii == NULL)
+ return;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ for (idx = 0; idx < SI_MAXCORES; idx++)
+ if (sii->regs[idx]) {
+ REG_UNMAP(sii->regs[idx]);
+ sii->regs[idx] = NULL;
+ }
+
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (sii != &ksii)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_intflag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return R_REG(sii->osh, ((uint32 *)(uintptr)
+ (sii->oob_router + OOB_STATUSA)));
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint
+si_flag(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_flag(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_setint(sih, siflag);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+ si_info_t *sii;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ sii = SI_INFO(sih);
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (sii->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corevendor(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corerev(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+/* return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii;
+ uint found;
+ uint i;
+
+ sii = SI_INFO(sih);
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+
+ return (BADIDX);
+}
+
+/* return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/* return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx))
+ return (NULL);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, idx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, idx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, coreidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+ void *cc;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return (void *)CCREGS_FAST(sii);
+ else if (coreid == sih->buscoretype)
+ return (void *)PCIEREGS(sii);
+ }
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+ return;
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_numaddrspaces(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspace(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspace(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspacesize(sih, asidx);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_addrspacesize(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+ /* Only supported for SOCI_AI */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_coreaddrspaceX(sih, asidx, addr, size);
+ else
+ *size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_cflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_cflags_wo(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_sflags(sih, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_iscoreup(sih);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_iscoreup(sih);
+ else {
+ ASSERT(0);
+ return FALSE;
+ }
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+ /* only for AI back plane chips */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return (ai_wrap_reg(sih, offset, mask, val));
+ return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ return ub_corereg(sih, coreidx, regoff, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_disable(sih, bits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ sb_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_reset(sih, bits, resetbits);
+ else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+ ub_core_reset(sih, bits, resetbits);
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+ switch (x) {
+ case CC_F6_2: return 2;
+ case CC_F6_3: return 3;
+ case CC_F6_4: return 4;
+ case CC_F6_5: return 5;
+ case CC_F6_6: return 6;
+ case CC_F6_7: return 7;
+ default: return 0;
+ }
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) ||
+ (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS: return (clock);
+ case CC_MC_M1: return (clock / m1);
+ case CC_MC_M1M2: return (clock / (m1 * m2));
+ case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
+ case CC_MC_M1M3: return (clock / (m1 * m3));
+ default: return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+ uint nb, maxt;
+
+ if (PMUCTL_ENAB(sih)) {
+
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+ (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+ si_setcore(sih, USB20D_CORE_ID, 0);
+ si_core_disable(sih, 1);
+ si_setcore(sih, CC_CORE_ID, 0);
+ }
+
+ nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+ /* The mips compiler uses the sllv instruction,
+ * so we specially handle the 32-bit case.
+ */
+ if (nb == 32)
+ maxt = 0xffffffff;
+ else
+ maxt = ((1 << nb) - 1);
+
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+ } else {
+ maxt = (1 << 28) - 1;
+ if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+ }
+}
+
+/* trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+ si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+ return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+ return FALSE;
+}
+
+
+
+/* return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+ chipcregs_t *cc;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ if (sii->pub.ccrev < 6) {
+ if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+ PCI_CFG_GPIO_SCS))
+ return (SCC_SS_PCI);
+ else
+ return (SCC_SS_XTAL);
+ } else if (sii->pub.ccrev < 10) {
+ cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+ return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+ } else /* Insta-clock */
+ return (SCC_SS_XTAL);
+}
+
+/* return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+ uint32 slowclk;
+ uint div;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ /* shouldn't be here unless we've established the chip has dynamic clk control */
+ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+ slowclk = si_slowclk_src(sii);
+ if (sii->pub.ccrev < 6) {
+ if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+ else
+ return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+ } else if (sii->pub.ccrev < 10) {
+ div = 4 *
+ (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+ if (slowclk == SCC_SS_LPO)
+ return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+ else if (slowclk == SCC_SS_XTAL)
+ return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+ else if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+ else
+ ASSERT(0);
+ } else {
+ /* Chipc rev 10 is InstaClock */
+ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+ div = 4 * (div + 1);
+ return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+ }
+ return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+ chipcregs_t *cc = (chipcregs_t *)chipcregs;
+ uint slowmaxfreq, pll_delay, slowclk;
+ uint pll_on_delay, fref_sel_delay;
+
+ pll_delay = PLL_DELAY;
+
+ /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+ * since the xtal will also be powered down by dynamic clk control logic.
+ */
+
+ slowclk = si_slowclk_src(sii);
+ if (slowclk != SCC_SS_XTAL)
+ pll_delay += XTAL_ON_DELAY;
+
+ /* Starting with 4318 it is ILP that is used for the delays */
+ slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+ W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+ W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/* initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx = 0;
+ chipcregs_t *cc;
+ bool fast;
+
+ if (!CCCTL_ENAB(sih))
+ return;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return;
+ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+ return;
+ ASSERT(cc != NULL);
+
+ /* set all Instaclk chip ILP to 1 MHz */
+ if (sih->ccrev >= 10)
+ SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+ si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+}
+
+
+/* change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/*
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ * to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return 0xffffffff;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return 0xffffffff;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+ uint regoff;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ /* gpio led powersave reg */
+ return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (sih->ccrev < 20)
+ return 0xffffffff;
+
+ offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+ uint offs;
+
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+ bool level, gpio_handler_t cb, void *arg)
+{
+ si_info_t *sii;
+ gpioh_item_t *gi;
+
+ ASSERT(event);
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return NULL;
+
+ if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+ return NULL;
+
+ bzero(gi, sizeof(gpioh_item_t));
+ gi->event = event;
+ gi->handler = cb;
+ gi->arg = arg;
+ gi->level = level;
+
+ gi->next = sii->gpioh_head;
+ sii->gpioh_head = gi;
+
+ return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+ si_info_t *sii;
+ gpioh_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return;
+
+ ASSERT(sii->gpioh_head != NULL);
+ if ((void*)sii->gpioh_head == gpioh) {
+ sii->gpioh_head = sii->gpioh_head->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ } else {
+ p = sii->gpioh_head;
+ n = p->next;
+ while (n) {
+ if ((void*)n == gpioh) {
+ p->next = n->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+
+ ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+ si_info_t *sii;
+ gpioh_item_t *h;
+ uint32 level = si_gpioin(sih);
+ uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+ uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+ uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+ sii = SI_INFO(sih);
+ for (h = sii->gpioh_head; h != NULL; h = h->next) {
+ if (h->handler) {
+ uint32 status = (h->level ? level : edge) & h->event;
+ uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+ /* polarity bitval is opposite of status bitval */
+ if (status ^ polarity)
+ h->handler(status, h->arg);
+ }
+ }
+
+ si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+ uint offs;
+
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/* Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ if (!set)
+ *enable = *protect = *remap = 0;
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (set) {
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+ if (*enable) {
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+ if (*protect)
+ bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+ if ((corerev >= 16) && *remap)
+ bankinfo |=
+ (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+ }
+ W_REG(sii->osh, &regs->bankinfo, bankinfo);
+ }
+ else if (i == 0) {
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+ *enable = 1;
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+ *protect = 1;
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+ *remap = 1;
+ }
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup, remap = FALSE;
+ uint corerev;
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+ remap = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+ return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+ if (si_socdevram_size(sih) > 0)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0, banksz;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+ /*
+ * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+ * Only four banks are accessible to ARM
+ */
+ if ((corerev == 16) && (nb == 5))
+ nb = 4;
+
+ for (i = 0; i < nb; i++) {
+ bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+ banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+ memsize += banksz;
+ } else {
+ /* Account only consecutive banks for now */
+ break;
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+/* Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else if ((corerev <= 7) || (corerev == 12)) {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb --;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+ return (32 * 1024);
+ }
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev >= 16) {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++) {
+ W_REG(sii->osh, &regs->bankidx, i);
+ if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+ memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+
+void
+si_btcgpiowar(si_t *sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ chipcregs_t *cc;
+
+ sii = SI_INFO(sih);
+
+ /* Make sure that there is ChipCommon core present &&
+ * UART_TX is strapped to 1
+ */
+ if (!(sih->cccaps & CC_CAP_UARTGPIO))
+ return;
+
+ /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+ if (on) {
+ /* Enable bt_shd0 on gpio4: */
+ val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ val = R_REG(sii->osh, &cc->chipcontrol);
+ si_setcoreidx(sih, origidx);
+ return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ /* Ext PA Controls for 4331 12x9 Package */
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ /* Ext PA Controls for 4331 12x12 Package */
+ if (sih->chiprev > 0) {
+ W_REG(sii->osh, &cc->chipcontrol, val |
+ (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+ } else {
+ W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+ }
+ }
+ } else {
+ val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* switch muxed pins, on: SROM, off: FEMCTRL */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ val &= ~(CCTRL4360_SECI_MODE |
+ CCTRL4360_BTSWCTRL_MODE |
+ CCTRL4360_EXTRA_FEMCTRL_MODE |
+ CCTRL4360_BT_LGCY_MODE |
+ CCTRL4360_CORE2FEMCTRL4_ON);
+
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ bool sel_chip;
+
+ sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43431_CHIP_ID);
+ sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+ if (!sel_chip)
+ return;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (enter_wowl) {
+ val |= CCTRL4331_EXTPA_EN;
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+ si_setcoreidx(sih, origidx);
+}
+
+uint
+si_pll_reset(si_t *sih)
+{
+ uint err = 0;
+
+ return (err);
+}
+
+/* Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ /* EPA Fix */
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+/* WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+ W_REG(sii->osh, &cc->gpioouten,
+ R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ /* BT fix */
+ W_REG(sii->osh, &cc->chipcontrol,
+ R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+ W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+ W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+ uint32 w;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(sii->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ break;
+ }
+ return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+ if (sih->ccrev >= 31) {
+ si_info_t *sii;
+ uint origidx;
+ chipcregs_t *cc;
+ uint32 sromctrl;
+
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return FALSE;
+
+ sii = SI_INFO(sih);
+ origidx = sii->curidx;
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+ si_setcoreidx(sih, origidx);
+ return (sromctrl & SRC_PRESENT);
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4312_CHIP_ID:
+ return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
+ case BCM4325_CHIP_ID:
+ return (sih->chipst & CST4325_SPROM_SEL) != 0;
+ case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID:
+ case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID:
+ case BCM4342_CHIP_ID: {
+ uint32 spromotp;
+ spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+ CST4322_SPROM_OTP_SEL_SHIFT;
+ return (spromotp & CST4322_SPROM_PRESENT) != 0;
+ }
+ case BCM4329_CHIP_ID:
+ return (sih->chipst & CST4329_SPROM_SEL) != 0;
+ case BCM4315_CHIP_ID:
+ return (sih->chipst & CST4315_SPROM_SEL) != 0;
+ case BCM4319_CHIP_ID:
+ return (sih->chipst & CST4319_SPROM_SEL) != 0;
+ case BCM4336_CHIP_ID:
+ case BCM43362_CHIP_ID:
+ return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+ case BCM4330_CHIP_ID:
+ return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ case BCM4331_CHIP_ID:
+ case BCM43431_CHIP_ID:
+ return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+ case BCM43239_CHIP_ID:
+ return ((sih->chipst & CST43239_SPROM_MASK) &&
+ !(sih->chipst & CST43239_SFLASH_MASK));
+ case BCM4324_CHIP_ID:
+ return ((sih->chipst & CST4324_SPROM_MASK) &&
+ !(sih->chipst & CST4324_SFLASH_MASK));
+ case BCM43131_CHIP_ID:
+ case BCM43217_CHIP_ID:
+ case BCM43227_CHIP_ID:
+ case BCM43228_CHIP_ID:
+ case BCM43428_CHIP_ID:
+ return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+ default:
+ return TRUE;
+ }
+}
diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h
new file mode 100644
index 00000000000000..9a3270f74242c0
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/siutils_priv.h
@@ -0,0 +1,246 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+#define SI_ERROR(args)
+
+#define SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define SI_VMSG(args) printf args
+#else
+#define SI_VMSG(args)
+#endif
+
+#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+ void *arg;
+ bool level;
+ gpio_handler_t handler;
+ uint32 event;
+ struct gpioh_item *next;
+} gpioh_item_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+ struct si_pub pub; /* back plane public state (must be first field) */
+
+ void *osh; /* osl os handle */
+ void *sdh; /* bcmsdh handle */
+
+ uint dev_coreid; /* the core provides driver functions */
+ void *intr_arg; /* interrupt callback function arg */
+ si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
+ si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+ si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+ void *pch; /* PCI/E core handle */
+
+ gpioh_item_t *gpioh_head; /* GPIO event handlers list */
+
+ bool memseg; /* flag to toggle MEM_SEG register */
+
+ char *vars;
+ uint varsz;
+
+ void *curmap; /* current regs va */
+ void *regs[SI_MAXCORES]; /* other regs va */
+
+ uint curidx; /* current core index */
+ uint numcores; /* # discovered cores */
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
+ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
+ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
+
+ void *curwrap; /* current wrapper va */
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+
+ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 oob_router; /* oob router registers for axi */
+} si_info_t;
+
+#define SI_INFO(sih) (si_info_t *)(uintptr)sih
+
+#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+ ISALIGNED((x), SI_CORE_SIZE))
+#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR 0
+#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES)
+#define NOREV -1 /* Invalid rev */
+
+#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \
+ ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+ if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+ if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define LPOMINFREQ 25000 /* low power oscillator min */
+#define LPOMAXFREQ 43000 /* low power oscillator max */
+#define XTALMINFREQ 19800000 /* 20 MHz - 1% */
+#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */
+#define PCIMINFREQ 25000000 /* 25 MHz */
+#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */
+
+#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */
+#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */
+
+#define PCI_FORCEHT(si) \
+ (((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+ ((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+ void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b) (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif /* _siutils_priv_h_ */
diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h
new file mode 100644
index 00000000000000..673dce08aad73a
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/uamp_api.h
@@ -0,0 +1,176 @@
+/*
+ * Name: uamp_api.h
+ *
+ * Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h 294267 2011-11-04 23:41:52Z $
+ *
+ */
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+** Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool BOOLEAN;
+typedef uint8 UINT8;
+typedef uint16 UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1 1
+#define UAMP_ID_2 2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */
+#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */
+#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */
+#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+ tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function: UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters amp_id: AMP device identifier that generated the event
+** amp_evt: event id
+** p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+** external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function: UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters p_cback: Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer. This value
+** will be included in AMP messages sent to the
+** BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function: UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function: UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer to write
+** num_bytes: number of bytes to write
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns: number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function: UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+** UAMP_EVT_RX_READY callback event.
+**
+** Parameters: app_id: AMP identifer.
+** p_buf: pointer to buffer for holding incoming AMP data
+** buf_size: size of p_buf
+** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns: number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c
new file mode 100644
index 00000000000000..850694f86f9f12
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.c
@@ -0,0 +1,845 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c 323797 2012-03-27 01:27:20Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <bcmsdbus.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/platform_device.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+#include <linux/wlan_plat.h>
+#else
+#include <linux/wifi_tiwlan.h>
+#endif
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START "START"
+#define CMD_STOP "STOP"
+#define CMD_SCAN_ACTIVE "SCAN-ACTIVE"
+#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
+#define CMD_RSSI "RSSI"
+#define CMD_LINKSPEED "LINKSPEED"
+#define CMD_RXFILTER_START "RXFILTER-START"
+#define CMD_RXFILTER_STOP "RXFILTER-STOP"
+#define CMD_RXFILTER_ADD "RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE "BTCOEXMODE"
+#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
+#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
+#define CMD_SETFWPATH "SETFWPATH"
+#define CMD_SETBAND "SETBAND"
+#define CMD_GETBAND "GETBAND"
+#define CMD_COUNTRY "COUNTRY"
+#define CMD_P2P_SET_NOA "P2P_SET_NOA"
+#define CMD_P2P_SET_PS "P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
+
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
+#define CMD_PNOSETUP_SET "PNOSETUP "
+#define CMD_PNOENABLE_SET "PNOFORCE"
+#define CMD_PNODEBUG_SET "PNODEBUG"
+
+#define PNO_TLV_PREFIX 'S'
+#define PNO_TLV_VERSION '1'
+#define PNO_TLV_SUBVERSION '2'
+#define PNO_TLV_RESERVED '0'
+#define PNO_TLV_TYPE_SSID_IE 'S'
+#define PNO_TLV_TYPE_TIME 'T'
+#define PNO_TLV_FREQ_REPEAT 'R'
+#define PNO_TLV_FREQ_EXPO_MAX 'M'
+
+typedef struct cmd_tlv {
+ char prefix;
+ char version;
+ char subver;
+ char reserved;
+} cmd_tlv_t;
+#endif /* PNO_SUPPORT */
+
+typedef struct android_wifi_priv_cmd {
+ char *buf;
+ int used_len;
+ int total_len;
+} android_wifi_priv_cmd;
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+void dhd_customer_gpio_wlan_ctrl(int onoff);
+uint dhd_dev_reset(struct net_device *dev, uint8 flag);
+void dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif
+extern int dhd_os_check_if_up(void *dhdp);
+extern void *bcmsdh_get_drvdata(void);
+#ifdef PROP_TXSTATUS
+extern int dhd_wlfc_init(dhd_pub_t *dhd);
+extern void dhd_wlfc_deinit(dhd_pub_t *dhd);
+#endif
+
+extern bool ap_fw_loaded;
+#ifdef CUSTOMER_HW2
+extern char iface_name[IFNAMSIZ];
+#endif
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+ int link_speed;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_link_speed(net, &link_speed);
+ if (error)
+ return -1;
+
+ /* Convert Kbps to Android Mbps */
+ link_speed = link_speed / 1000;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+ DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+ return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+ wlc_ssid_t ssid = {0};
+ int rssi;
+ int bytes_written = 0;
+ int error;
+
+ error = wldev_get_rssi(net, &rssi);
+ if (error)
+ return -1;
+
+ error = wldev_get_ssid(net, &ssid);
+ if (error)
+ return -1;
+ if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+ DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+ } else {
+ memcpy(command, ssid.SSID, ssid.SSID_len);
+ bytes_written = ssid.SSID_len;
+ }
+ bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+ DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+ int suspend_flag;
+ int ret_now;
+ int ret = 0;
+
+ suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+ if (suspend_flag != 0)
+ suspend_flag = 1;
+ ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+ if (ret_now != suspend_flag) {
+ if (!(ret = net_os_set_suspend(dev, ret_now)))
+ DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+ __FUNCTION__, ret_now, suspend_flag));
+ else
+ DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
+ return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+ uint band;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_band(dev, &band);
+ if (error)
+ return -1;
+ bytes_written = snprintf(command, total_len, "Band %d", band);
+ return bytes_written;
+}
+
+#ifdef PNO_SUPPORT
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+ wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ int res = -1;
+ int nssid = 0;
+ cmd_tlv_t *cmd_tlv_temp;
+ char *str_ptr;
+ int tlv_size_left;
+ int pno_time = 0;
+ int pno_repeat = 0;
+ int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+ int i;
+ char pno_in_example[] = {
+ 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+ 'S', '1', '2', '0',
+ 'S',
+ 0x05,
+ 'd', 'l', 'i', 'n', 'k',
+ 'S',
+ 0x04,
+ 'G', 'O', 'O', 'G',
+ 'T',
+ '0', 'B',
+ 'R',
+ '2',
+ 'M',
+ '2',
+ 0x00
+ };
+#endif /* PNO_SET_DEBUG */
+
+ DHD_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+ if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+ DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+ goto exit_proc;
+ }
+
+
+#ifdef PNO_SET_DEBUG
+ memcpy(command, pno_in_example, sizeof(pno_in_example));
+ for (i = 0; i < sizeof(pno_in_example); i++)
+ printf("%02X ", command[i]);
+ printf("\n");
+ total_len = sizeof(pno_in_example);
+#endif
+
+ str_ptr = command + strlen(CMD_PNOSETUP_SET);
+ tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+ cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+ memset(ssids_local, 0, sizeof(ssids_local));
+
+ if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+ (cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+ (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) {
+
+ str_ptr += sizeof(cmd_tlv_t);
+ tlv_size_left -= sizeof(cmd_tlv_t);
+
+ if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+ MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+ DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+ goto exit_proc;
+ } else {
+ if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+ DHD_ERROR(("%s scan duration corrupted field size %d\n",
+ __FUNCTION__, tlv_size_left));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+ if (str_ptr[0] != 0) {
+ if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+ DHD_ERROR(("%s pno repeat : corrupted field\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+ if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+ DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+ __FUNCTION__));
+ goto exit_proc;
+ }
+ str_ptr++;
+ pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+ DHD_INFO(("%s: pno_freq_expo_max=%d\n",
+ __FUNCTION__, pno_freq_expo_max));
+ }
+ }
+ } else {
+ DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+ goto exit_proc;
+ }
+
+ res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max);
+
+exit_proc:
+ return res;
+}
+#endif /* PNO_SUPPORT */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+ int ret;
+ int bytes_written = 0;
+
+ ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+ if (ret)
+ return 0;
+ bytes_written = sizeof(struct ether_addr);
+ return bytes_written;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+int wl_android_wifi_on(struct net_device *dev)
+{
+ int ret = 0;
+ int retry = POWERUP_MAX_RETRY;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (!g_wifi_on) {
+ do {
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
+ ret = sdioh_start(NULL, 0);
+ if (ret == 0)
+ break;
+ DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+ retry+1));
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ } while (retry-- >= 0);
+ if (ret != 0) {
+ DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+ goto exit;
+ }
+ ret = dhd_dev_reset(dev, FALSE);
+ sdioh_start(NULL, 1);
+ dhd_dev_init_ioctl(dev);
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_init(bcmsdh_get_drvdata());
+#endif
+ g_wifi_on = TRUE;
+ }
+
+exit:
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+ int ret = 0;
+
+ printk("%s in\n", __FUNCTION__);
+ if (!dev) {
+ DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ dhd_net_if_lock(dev);
+ if (g_wifi_on) {
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_deinit(bcmsdh_get_drvdata());
+#endif
+ dhd_dev_reset(dev, 1);
+ sdioh_stop(NULL);
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = FALSE;
+ }
+ dhd_net_if_unlock(dev);
+
+ return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+ if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+ return -1;
+ bcm_strncpy_s(fw_path, sizeof(fw_path),
+ command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1);
+ if (strstr(fw_path, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+ return 0;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+
+ net_os_wake_lock(net);
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
+ if (!command)
+ {
+ DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+ if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+ DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+ bytes_written = wl_android_wifi_on(net);
+ }
+ else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+ bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+ }
+
+ if (!g_wifi_on) {
+ DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __FUNCTION__, command, ifr->ifr_name));
+ ret = 0;
+ goto exit;
+ }
+
+ if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+ bytes_written = wl_android_wifi_off(net);
+ }
+ else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+ /* TBD: SCAN-ACTIVE */
+ }
+ else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+ /* TBD: SCAN-PASSIVE */
+ }
+ else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+ bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+ bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 1);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+ bytes_written = net_os_set_packet_filter(net, 0);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+ /* TBD: BTCOEXSCAN-START */
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+ /* TBD: BTCOEXSCAN-STOP */
+ }
+ else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+ uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+ if (mode == 1)
+ net_os_set_packet_filter(net, 0); /* DHCP starts */
+ else
+ net_os_set_packet_filter(net, 1); /* DHCP ends */
+#ifdef WL_CFG80211
+ bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
+#endif
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+ bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+ bytes_written = wldev_set_band(net, band);
+ }
+ else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+ bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+ bytes_written = wldev_set_country(net, country_code);
+ }
+#ifdef PNO_SUPPORT
+ else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+ bytes_written = dhd_dev_pno_reset(net);
+ }
+ else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+ bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+ uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+ bytes_written = dhd_dev_pno_enable(net, pfn_enabled);
+ }
+#endif
+ else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+ bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+ int skip = strlen(CMD_P2P_SET_NOA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+ int skip = strlen(CMD_P2P_SET_PS) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+ strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+ int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+ bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+ priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+ }
+#endif /* WL_CFG80211 */
+ else {
+ DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+ snprintf(command, 3, "OK");
+ bytes_written = strlen("OK");
+ }
+
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+ command[0] = '\0';
+ if (bytes_written >= priv_cmd.total_len) {
+ DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+ bytes_written = priv_cmd.total_len;
+ } else {
+ bytes_written++;
+ }
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+ DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ }
+ else {
+ ret = bytes_written;
+ }
+
+exit:
+ net_os_wake_unlock(net);
+ if (command) {
+ kfree(command);
+ }
+
+ return ret;
+}
+
+int wl_android_init(void)
+{
+ int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+ dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#ifdef CUSTOMER_HW2
+ if (!iface_name[0]) {
+ memset(iface_name, 0, IFNAMSIZ);
+ bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+ }
+#endif /* CUSTOMER_HW2 */
+ return ret;
+}
+
+int wl_android_exit(void)
+{
+ int ret = 0;
+
+ return ret;
+}
+
+void wl_android_post_init(void)
+{
+ if (!dhd_download_fw_on_driverload) {
+ /* Call customer gpio to turn off power with WL_REG_ON signal */
+ dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ g_wifi_on = 0;
+ }
+}
+/**
+ * Functions for Android WiFi card detection
+ */
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+
+static int g_wifidev_registered = 0;
+static struct semaphore wifi_control_sem;
+static struct wifi_platform_data *wifi_control_data = NULL;
+static struct resource *wifi_irqres = NULL;
+
+static int wifi_add_dev(void);
+static void wifi_del_dev(void);
+
+int wl_android_wifictrl_func_add(void)
+{
+ int ret = 0;
+ sema_init(&wifi_control_sem, 0);
+
+ ret = wifi_add_dev();
+ if (ret) {
+ DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
+ return ret;
+ }
+ g_wifidev_registered = 1;
+
+ /* Waiting callback after platform_driver_register is done or exit with error */
+ if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) {
+ ret = -EINVAL;
+ DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
+ }
+
+ return ret;
+}
+
+void wl_android_wifictrl_func_del(void)
+{
+ if (g_wifidev_registered)
+ {
+ wifi_del_dev();
+ g_wifidev_registered = 0;
+ }
+}
+
+void* wl_android_prealloc(int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ if (wifi_control_data && wifi_control_data->mem_prealloc) {
+ alloc_ptr = wifi_control_data->mem_prealloc(section, size);
+ if (alloc_ptr) {
+ DHD_INFO(("success alloc section %d\n", section));
+ if (size != 0L)
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ }
+
+ DHD_ERROR(("can't alloc section %d\n", section));
+ return NULL;
+}
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr)
+{
+ if (wifi_irqres) {
+ *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
+ return (int)wifi_irqres->start;
+ }
+#ifdef CUSTOM_OOB_GPIO_NUM
+ return CUSTOM_OOB_GPIO_NUM;
+#else
+ return -1;
+#endif
+}
+
+int wifi_set_power(int on, unsigned long msec)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_power) {
+ wifi_control_data->set_power(on);
+ }
+ if (msec)
+ msleep(msec);
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+int wifi_get_mac_addr(unsigned char *buf)
+{
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf)
+ return -EINVAL;
+ if (wifi_control_data && wifi_control_data->get_mac_addr) {
+ return wifi_control_data->get_mac_addr(buf);
+ }
+ return -EOPNOTSUPP;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+void *wifi_get_country_code(char *ccode)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (!ccode)
+ return NULL;
+ if (wifi_control_data && wifi_control_data->get_country_code) {
+ return wifi_control_data->get_country_code(ccode);
+ }
+ return NULL;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+static int wifi_set_carddetect(int on)
+{
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (wifi_control_data && wifi_control_data->set_carddetect) {
+ wifi_control_data->set_carddetect(on);
+ }
+ return 0;
+}
+
+static int wifi_probe(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+ if (wifi_irqres == NULL)
+ wifi_irqres = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "bcm4329_wlan_irq");
+ wifi_control_data = wifi_ctrl;
+ wifi_set_power(1, 0); /* Power On */
+ wifi_set_carddetect(1); /* CardDetect (0->1) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_remove(struct platform_device *pdev)
+{
+ struct wifi_platform_data *wifi_ctrl =
+ (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ DHD_ERROR(("## %s\n", __FUNCTION__));
+ wifi_control_data = wifi_ctrl;
+
+ wifi_set_power(0, 0); /* Power Off */
+ wifi_set_carddetect(0); /* CardDetect (1->0) */
+
+ up(&wifi_control_sem);
+ return 0;
+}
+
+static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
+ bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static int wifi_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
+ if (dhd_os_check_if_up(bcmsdh_get_drvdata()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static struct platform_driver wifi_device = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcmdhd_wlan",
+ }
+};
+
+static struct platform_driver wifi_device_legacy = {
+ .probe = wifi_probe,
+ .remove = wifi_remove,
+ .suspend = wifi_suspend,
+ .resume = wifi_resume,
+ .driver = {
+ .name = "bcm4329_wlan",
+ }
+};
+
+static int wifi_add_dev(void)
+{
+ DHD_TRACE(("## Calling platform_driver_register\n"));
+ platform_driver_register(&wifi_device);
+ platform_driver_register(&wifi_device_legacy);
+ return 0;
+}
+
+static void wifi_del_dev(void)
+{
+ DHD_TRACE(("## Unregister platform_driver_register\n"));
+ platform_driver_unregister(&wifi_device);
+ platform_driver_unregister(&wifi_device_legacy);
+}
+#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h
new file mode 100644
index 00000000000000..583a16735ecd1c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_android.h
@@ -0,0 +1,57 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.h 307885 2012-01-12 23:30:48Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+int wl_android_wifictrl_func_add(void);
+void wl_android_wifictrl_func_del(void);
+void* wl_android_prealloc(int section, unsigned long size);
+
+int wifi_get_irq_number(unsigned long *irq_flags_ptr);
+int wifi_set_power(int on, unsigned long msec);
+int wifi_get_mac_addr(unsigned char *buf);
+void *wifi_get_country_code(char *ccode);
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
new file mode 100644
index 00000000000000..068f96da696573
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c
@@ -0,0 +1,7302 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c 323022 2012-03-22 17:48:58Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+#ifdef BCMWAPI_WPI
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+
+static struct device *cfg80211_parent_dev = NULL;
+static int vsdb_supported = 0;
+struct wl_priv *wlcfg_drv_priv = NULL;
+
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAX_WAIT_TIME 1500
+#define WL_SCAN_ACTIVE_TIME 40 /* ms : Embedded default Active setting from DHD Driver */
+#define WL_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD Driver */
+#define WL_FRAME_LEN 300
+
+#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
+
+
+#define DNGL_FUNC(func, parameters) func parameters;
+#define COEX_DHCP
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+ .n_reg_rules = 5,
+ .alpha2 = "99",
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+ /* IEEE 802.11b/g, channels 12..13. No HT40
+ * channel fits here.
+ */
+ REG_RULE(2467-10, 2472+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS),
+ /* IEEE 802.11 channel 14 - Only JP enables
+ * this and for 802.11b only
+ */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_OFDM),
+ /* IEEE 802.11a, channel 36..64 */
+ REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ /* IEEE 802.11a, channel 100..165 */
+ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+};
+
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid);
+static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac,
+ struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev, bool enabled,
+ s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code);
+static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ s32 dbm);
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ void *cookie, void (*callback) (void *cookie,
+ struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 wl_notify_escan_complete(struct wl_priv *wl,
+ struct net_device *ndev, bool aborted, bool fw_abort);
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct wl_priv *wl);
+static void wl_destroy_event_handler(struct wl_priv *wl);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct wl_priv *wl);
+static void wl_flush_eq(struct wl_priv *wl);
+static unsigned long wl_lock_eq(struct wl_priv *wl);
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags);
+static void wl_init_eq_lock(struct wl_priv *wl);
+static void wl_init_event_handler(struct wl_priv *wl);
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
+static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type,
+ const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct wl_priv *wl);
+static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * wl profile utilities
+ */
+static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, s32 item);
+static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+#endif
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+ struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct wl_priv *wl);
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct wl_priv *wl);
+
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev);
+static void wl_free_wdev(struct wl_priv *wl);
+
+static s32 wl_inform_bss(struct wl_priv *wl);
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr,
+ struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * wl_priv memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct wl_priv *wl);
+static void wl_deinit_priv_mem(struct wl_priv *wl);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct wl_priv *wl);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct wl_priv *wl);
+static s32 __wl_cfg80211_down(struct wl_priv *wl);
+static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev);
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
+static void wl_link_up(struct wl_priv *wl);
+static void wl_link_down(struct wl_priv *wl);
+static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_update_wiphybands(struct wl_priv *wl);
+
+/*
+ * iscan handler
+ */
+static void wl_iscan_timer(unsigned long data);
+static void wl_term_iscan(struct wl_priv *wl);
+static s32 wl_init_scan(struct wl_priv *wl);
+static s32 wl_iscan_thread(void *data);
+static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request,
+ u16 action);
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request);
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
+static s32 wl_invoke_iscan(struct wl_priv *wl);
+static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list);
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan);
+static s32 wl_iscan_done(struct wl_priv *wl);
+static s32 wl_iscan_pending(struct wl_priv *wl);
+static s32 wl_iscan_inprogress(struct wl_priv *wl);
+static s32 wl_iscan_aborted(struct wl_priv *wl);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
+ int nprobes, int *out_params_size);
+static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac);
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+
+#define CHECK_SYS_UP(wlpriv) \
+do { \
+ struct net_device *ndev = wl_to_prmry_ndev(wlpriv); \
+ if (unlikely(!wl_get_drv_status(wlpriv, READY, ndev))) { \
+ WL_INFO(("device is not ready\n")); \
+ return -EIO; \
+ } \
+} while (0)
+
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
+ (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX 50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+ "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+ "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+ "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+ "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+ "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+ "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+ "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+ "PFN_NET_LOST",
+ "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+ "IBSS_ASSOC",
+ "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+ "PROBREQ_MSG",
+ "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+ "EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+ "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+ "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+ "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+ "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+ "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+ "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+ "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+ "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+ "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif /* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+ { \
+ .bitrate = RATE_TO_BASE100KBPS(_rateid), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+static struct ieee80211_rate __wl_rates[] = {
+ RATETAB_ENT(WLC_RATE_1M, 0),
+ RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(WLC_RATE_6M, 0),
+ RATETAB_ENT(WLC_RATE_9M, 0),
+ RATETAB_ENT(WLC_RATE_12M, 0),
+ RATETAB_ENT(WLC_RATE_18M, 0),
+ RATETAB_ENT(WLC_RATE_24M, 0),
+ RATETAB_ENT(WLC_RATE_36M, 0),
+ RATETAB_ENT(WLC_RATE_48M, 0),
+ RATETAB_ENT(WLC_RATE_54M, 0)
+};
+
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size 8
+#define wl_g_rates (__wl_rates + 0)
+#define wl_g_rates_size 12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+ .channels = __wl_2ghz_channels,
+ .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+ .bitrates = wl_g_rates,
+ .n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+ .band = IEEE80211_BAND_5GHZ,
+ .channels = __wl_5ghz_a_channels,
+ .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+#ifdef BCMWAPI_WPI
+ WLAN_CIPHER_SUITE_SMS4
+#endif
+};
+
+/* IOCtl version read from targeted driver */
+static int ioctl_version;
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+ chanspec_t chspec;
+
+ /* get the channel number */
+ chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+ /* convert the band */
+ if (LCHSPEC_IS2G(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chspec |= WL_CHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (LCHSPEC_IS20(legacy_chspec)) {
+ chspec |= WL_CHANSPEC_BW_20;
+ } else {
+ chspec |= WL_CHANSPEC_BW_40;
+ if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+ chspec |= WL_CHANSPEC_CTL_SB_L;
+ } else {
+ chspec |= WL_CHANSPEC_CTL_SB_U;
+ }
+ }
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+ chanspec_t lchspec;
+
+ if (wf_chspec_malformed(chspec)) {
+ WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec));
+ return INVCHANSPEC;
+ }
+
+ /* get the channel number */
+ lchspec = CHSPEC_CHANNEL(chspec);
+
+ /* convert the band */
+ if (CHSPEC_IS2G(chspec)) {
+ lchspec |= WL_LCHANSPEC_BAND_2G;
+ } else {
+ lchspec |= WL_LCHANSPEC_BAND_5G;
+ }
+
+ /* convert the bw and sideband */
+ if (CHSPEC_IS20(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_20;
+ lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+ } else if (CHSPEC_IS40(chspec)) {
+ lchspec |= WL_LCHANSPEC_BW_40;
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+ lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+ } else {
+ lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+ }
+ } else {
+ /* cannot express the bandwidth */
+ char chanbuf[CHANSPEC_STR_LEN];
+ WL_ERR((
+ "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
+ return INVCHANSPEC;
+ }
+
+ return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_to_legacy(chanspec);
+ if (chanspec == INVCHANSPEC) {
+ return chanspec;
+ }
+ }
+ chanspec = htodchanspec(chanspec);
+
+ return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+
+ chanspec_t chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+ chanspec = dtohchanspec(chanspec);
+ if (ioctl_version == 1) {
+ chanspec = wl_chspec_from_legacy(chanspec);
+ }
+
+ return chanspec;
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* For debug: Dump the contents of the encoded wps ie buffe */
+static void
+wl_validate_wps_ie(char *wps_ie, bool *pbc)
+{
+ #define WPS_IE_FIXED_LEN 6
+ u16 len = (u16) wps_ie[TLV_LEN_OFF];
+ u8 *subel = wps_ie+ WPS_IE_FIXED_LEN;
+ u16 subelt_id;
+ u16 subelt_len;
+ u16 val;
+ u8 *valptr = (uint8*) &val;
+
+ WL_DBG(("wps_ie len=%d\n", len));
+
+ len -= 4; /* for the WPS IE's OUI, oui_type fields */
+
+ while (len >= 4) { /* must have attr id, attr len fields */
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_id = HTON16(val);
+
+ valptr[0] = *subel++;
+ valptr[1] = *subel++;
+ subelt_len = HTON16(val);
+
+ len -= 4; /* for the attr id, attr len fields */
+ len -= subelt_len; /* for the remaining fields in this attribute */
+ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+ subel, subelt_id, subelt_len));
+
+ if (subelt_id == WPS_ID_VERSION) {
+ WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_REQ_TYPE) {
+ WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel));
+ } else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_DEVICE_NAME) {
+ char devname[100];
+ memcpy(devname, subel, subelt_len);
+ devname[subelt_len] = '\0';
+ WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+ devname, subelt_len));
+ } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+ *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+ } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+ valptr[0] = *(subel + 6);
+ valptr[1] = *(subel + 7);
+ WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+ } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+ valptr[0] = *subel;
+ valptr[1] = *(subel + 1);
+ WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+ ": cat=%u\n", HTON16(val)));
+ } else {
+ WL_DBG((" unknown attr 0x%x\n", subelt_id));
+ }
+
+ subel += subelt_len;
+ }
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+ if (vsdb_supported) {
+ return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+ else {
+ chanspec_t chspec;
+ int err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct ether_addr bssid;
+ struct wl_bss_info *bss = NULL;
+
+ if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
+ /* STA interface is not associated. So start the new interface on a temp
+ * channel . Later proper channel will be applied by the above framework
+ * via set_channel (cfg80211 API).
+ */
+ WL_DBG(("Not associated. Return a temp channel. \n"));
+ return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+
+
+ *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, wl->extra_buf,
+ sizeof(WL_EXTRA_BUF_MAX), false))) {
+ WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+ chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ }
+ else {
+ bss = (struct wl_bss_info *) (wl->extra_buf + 4);
+ chspec = bss->chanspec;
+ WL_DBG(("Valid BSS Found. chanspec:%d \n", bss->chanspec));
+ }
+ return chspec;
+ }
+}
+
+static struct net_device* wl_cfg80211_add_monitor_if(char *name)
+{
+ struct net_device* ndev = NULL;
+
+ dhd_add_monitor(name, &ndev);
+ WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev;
+}
+
+static struct net_device *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 err;
+ s32 timeout = -1;
+ s32 wlif_type = -1;
+ s32 mode = 0;
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *_ndev;
+ struct ether_addr primary_mac;
+ int (*net_attach)(void *dhdp, int ifidx);
+ bool rollback_lock = false;
+
+ /* Use primary I/F for sending cmds down to firmware */
+ _ndev = wl_to_prmry_ndev(wl);
+
+ WL_DBG(("if name: %s, type: %d\n", name, type));
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ WL_ERR(("Unsupported interface type\n"));
+ mode = WL_MODE_IBSS;
+ return NULL;
+ case NL80211_IFTYPE_MONITOR:
+ return wl_cfg80211_add_monitor_if(name);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ wlif_type = WL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ wlif_type = WL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ return NULL;
+ break;
+ }
+
+ if (!name) {
+ WL_ERR(("name is NULL\n"));
+ return NULL;
+ }
+ if (wl->iface_cnt == IFACE_MAX_CNT)
+ return ERR_PTR(-ENOMEM);
+ if (wl->p2p_supported && (wlif_type != -1)) {
+ if (wl_get_p2p_status(wl, IF_DELETING)) {
+ /* wait till IF_DEL is complete
+ * release the lock for the unregister to proceed
+ */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n",
+ __func__));
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ /* put back the rtnl_lock again */
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ if (timeout > 0) {
+ WL_ERR(("IF DEL is Success\n"));
+
+ } else {
+ WL_ERR(("timeount < 0, return -EAGAIN\n"));
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+ if (wl->p2p && !wl->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ wl_cfgp2p_init_discovery(wl);
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ }
+
+ memset(wl->p2p->vir_ifname, 0, IFNAMSIZ);
+ strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+
+ wl_notify_escan_complete(wl, _ndev, true, true);
+
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "wl p2p_ifadd"
+ */
+ wl_set_p2p_status(wl, IF_ADD);
+ err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+
+ if (unlikely(err)) {
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_ADD) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) {
+
+ struct wireless_dev *vwdev;
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return ERR_PTR(-ENOMEM);
+ }
+ vwdev->wiphy = wl->wdev->wiphy;
+ WL_INFO((" virtual interface(%s) is created memalloc done \n",
+ wl->p2p->vir_ifname));
+ vwdev->iftype = type;
+ _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ _ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy));
+ vwdev->netdev = _ndev;
+ wl_set_drv_status(wl, READY, _ndev);
+ wl->p2p->vif_created = true;
+ wl_set_mode_by_netdev(wl, _ndev, mode);
+ net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION);
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ if (net_attach && !net_attach(wl->pub, _ndev->ifindex)) {
+ wl_alloc_netinfo(wl, _ndev, vwdev, mode);
+ WL_DBG((" virtual interface(%s) is "
+ "created net attach done\n", wl->p2p->vir_ifname));
+ } else {
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ goto fail;
+ }
+ /* put back the rtnl_lock again */
+ if (rollback_lock)
+ rtnl_lock();
+ return _ndev;
+
+ } else {
+ wl_clr_p2p_status(wl, IF_ADD);
+ WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname));
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ wl->p2p->vif_created = false;
+ }
+ }
+fail:
+ return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ether_addr p2p_mac;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 timeout = -1;
+ s32 ret = 0;
+ WL_DBG(("Enter\n"));
+
+ if (wl->p2p_net == dev) {
+ /* Since there is no ifidx corresponding to p2p0, cmds to
+ * firmware should be routed through primary I/F
+ */
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ if (wl->p2p_supported) {
+ memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN);
+ if (wl->p2p->vif_created) {
+ if (wl_get_drv_status(wl, SCANNING, dev)) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ wldev_iovar_setint(dev, "mpc", 1);
+ wl_set_p2p_status(wl, IF_DELETING);
+ ret = wl_cfgp2p_ifdel(wl, &p2p_mac);
+ /* Firmware could not delete the interface so we will not get WLC_E_IF
+ * event for cleaning the dhd virtual nw interace
+ * So lets do it here. Failures from fw will ensure the application to do
+ * ifconfig <inter> down and up sequnce, which will reload the fw
+ * however we should cleanup the linux network virtual interfaces
+ */
+ /* Request framework to RESET and clean up */
+ if (ret) {
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ WL_ERR(("Firmware returned an error (%d) from p2p_ifdel"
+ "HANG Notification sent to %s\n", ret, ndev->name));
+ wl_cfg80211_hang(ndev, WLAN_REASON_UNSPECIFIED);
+ }
+
+ /* Wait for any pending scan req to get aborted from the sysioc context */
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(wl, IF_DELETING)) {
+ WL_DBG(("IFDEL operation done\n"));
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
+ ret = dhd_del_monitor(dev);
+ }
+ }
+ return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ s32 ap = 0;
+ s32 infra = 0;
+ s32 wlif_type;
+ s32 mode = 0;
+ chanspec_t chspec;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ ap = 1;
+ WL_ERR(("type (%d) : currently we do not support this type\n",
+ type));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ap) {
+ wl_set_mode_by_netdev(wl, ndev, mode);
+ if (wl->p2p_supported && wl->p2p->vif_created) {
+ WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created,
+ p2p_on(wl)));
+ wldev_iovar_setint(ndev, "mpc", 0);
+ wl_notify_escan_complete(wl, ndev, true, true);
+
+ /* In concurrency case, STA may be already associated in a particular
+ * channel. so retrieve the current channel of primary interface and
+ * then start the virtual interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+ wlif_type = ap ? WL_P2P_IF_GO : WL_P2P_IF_CLIENT;
+ WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+ ndev->name, ap, infra, type));
+ wl_set_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
+ wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ wl_set_mode_by_netdev(wl, ndev, mode);
+ wl_clr_p2p_status(wl, IF_CHANGING);
+ wl_clr_p2p_status(wl, IF_CHANGED);
+ } else if (ndev == wl_to_prmry_ndev(wl) &&
+ !wl_get_drv_status(wl, AP_CREATED, ndev)) {
+ wl_set_drv_status(wl, AP_CREATING, ndev);
+ if (!wl->ap_info &&
+ !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+ WL_ERR(("struct ap_saved_ie allocation failed\n"));
+ return -ENOMEM;
+ }
+ } else {
+ WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+ return -EINVAL;
+ }
+ }
+
+ ndev->ieee80211_ptr->iftype = type;
+ return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
+ void* _net_attach)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ s32 ret = BCME_OK;
+ WL_DBG(("Enter"));
+ if (!ndev) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+ if (wl->p2p_supported && wl_get_p2p_status(wl, IF_ADD)) {
+ WL_DBG(("IF_ADD event called from dongle, old interface name: %s,"
+ "new name: %s\n", ndev->name, wl->p2p->vir_ifname));
+ /* Assign the net device to CONNECT BSSCFG */
+ strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1);
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx;
+ wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach;
+ ndev->ifindex = idx;
+ wl_clr_p2p_status(wl, IF_ADD);
+
+ wake_up_interruptible(&wl->netif_change_event);
+ } else {
+ ret = BCME_NOTREADY;
+ }
+ return ret;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *ndev)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ bool rollback_lock = false;
+ s32 index = 0;
+ if (!ndev || !ndev->name) {
+ WL_ERR(("net is NULL\n"));
+ return 0;
+ }
+
+ if (p2p_is_on(wl) && wl->p2p->vif_created &&
+ wl_get_p2p_status(wl, IF_DELETING)) {
+ if (wl->scan_request &&
+ (wl->escan_info.ndev == ndev)) {
+ /* Abort any pending scan requests */
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (!rtnl_is_locked()) {
+ rtnl_lock();
+ rollback_lock = true;
+ }
+ WL_DBG(("ESCAN COMPLETED\n"));
+ wl_notify_escan_complete(wl, ndev, true, false);
+ if (rollback_lock)
+ rtnl_unlock();
+ }
+ WL_ERR(("IF_DEL event called from dongle, net %x, vif name: %s\n",
+ (unsigned int)ndev, wl->p2p->vir_ifname));
+
+ memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
+ index = wl_cfgp2p_find_idx(wl, ndev);
+ wl_to_p2p_bss_ndev(wl, index) = NULL;
+ wl_to_p2p_bss_bssidx(wl, index) = 0;
+ wl->p2p->vif_created = false;
+ wl_cfgp2p_clear_management_ie(wl,
+ index);
+ wl_clr_p2p_status(wl, IF_DELETING);
+ WL_DBG(("index : %d\n", index));
+
+ }
+ /* Wake up any waiting thread */
+ wake_up_interruptible(&wl->netif_change_event);
+
+ return 0;
+}
+
+s32
+wl_cfg80211_is_progress_ifadd(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_ADD))
+ is_progress = 1;
+ return is_progress;
+}
+
+s32
+wl_cfg80211_is_progress_ifchange(void)
+{
+ s32 is_progress = 0;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING))
+ is_progress = 1;
+ return is_progress;
+}
+
+
+s32
+wl_cfg80211_notify_ifchange(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ if (wl_get_p2p_status(wl, IF_CHANGING)) {
+ wl_set_p2p_status(wl, IF_CHANGED);
+ wake_up_interruptible(&wl->netif_change_event);
+ }
+ return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+ u32 n_ssids;
+ u32 n_channels;
+ u16 channel;
+ chanspec_t chanspec;
+ s32 i, offset;
+ char *ptr;
+ wlc_ssid_t ssid;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+ memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+ WL_SCAN(("Preparing Scan request\n"));
+ WL_SCAN(("nprobes=%d\n", params->nprobes));
+ WL_SCAN(("active_time=%d\n", params->active_time));
+ WL_SCAN(("passive_time=%d\n", params->passive_time));
+ WL_SCAN(("home_time=%d\n", params->home_time));
+ WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ /* if request is null just exit so it will be all channel broadcast scan */
+ if (!request)
+ return;
+
+ n_ssids = request->n_ssids;
+ n_channels = request->n_channels;
+
+ /* Copy channel array if applicable */
+ WL_SCAN(("### List of channelspecs to scan ###\n"));
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ chanspec = 0;
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+
+ params->channel_list[i] = channel;
+ params->channel_list[i] &= WL_CHANSPEC_CHAN_MASK;
+ params->channel_list[i] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, params->channel_list[i]));
+ params->channel_list[i] = wl_chspec_host_to_driver(params->channel_list[i]);
+ }
+ } else {
+ WL_SCAN(("Scanning all channels\n"));
+ }
+
+ /* Copy ssid array if applicable */
+ WL_SCAN(("### List of SSIDs to scan ###\n"));
+ if (n_ssids > 0) {
+ offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+ offset = roundup(offset, sizeof(u32));
+ ptr = (char*)params + offset;
+ for (i = 0; i < n_ssids; i++) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ ssid.SSID_len = request->ssids[i].ssid_len;
+ memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+ if (!ssid.SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
+ }
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ /* Adding mask to channel numbers */
+ params->channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action)
+{
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size =
+ (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
+ struct wl_iscan_params *params;
+ s32 err = 0;
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+ if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length is not sufficient\n"));
+ err = -ENOMEM;
+ goto done;
+ }
+ err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err)) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : iscan canceled\n"));
+ } else {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ kfree(params);
+done:
+ return err;
+}
+
+static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 passive_scan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_SCANING;
+
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_DBG(("error (%d)\n", err));
+ return err;
+ }
+ wl->iscan_kickstart = true;
+ wl_run_iscan(iscan, request, WL_SCAN_ACTION_START);
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
+
+ memset(valid_chan_list, 0, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
+ }
+
+ return err;
+}
+static s32
+wl_run_escan(struct wl_priv *wl, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ wl_escan_params_t *params = NULL;
+ struct cfg80211_scan_request *scan_request = wl->scan_request;
+ u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ u32 num_chans = 0;
+ s32 channel;
+ s32 n_valid_chan;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i, j, n_nodfs = 0;
+ u16 *default_chan_list = NULL;
+ wl_uint32_list_t *list;
+ struct net_device *dev = NULL;
+ WL_DBG(("Enter \n"));
+
+
+ if (!wl->p2p_supported || ((ndev == wl_to_prmry_ndev(wl)) &&
+ !p2p_scan(wl))) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
+
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
+
+ /* Allocate space for populating ssids in wl_iscan_params struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
+ }
+ params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (request != NULL)
+ wl_scan_prep(&params->params, request);
+ params->version = htod32(ESCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->sync_id = htod16(0x1234);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ kfree(params);
+ err = -ENOMEM;
+ goto exit;
+ }
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (unlikely(err))
+ WL_ERR((" Escan set error (%d)\n", err));
+ kfree(params);
+ }
+ else if (p2p_is_on(wl) && p2p_scan(wl)) {
+ /* P2P SCAN TRIGGER */
+ s32 _freq = 0;
+ n_nodfs = 0;
+ if (scan_request && scan_request->n_channels) {
+ num_chans = scan_request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+ list = (wl_uint32_list_t *) chan_buf;
+ n_valid_chan = dtoh32(list->count);
+ for (i = 0; i < num_chans; i++)
+ {
+ _freq = scan_request->channels[i]->center_freq;
+ channel = ieee80211_frequency_to_channel(_freq);
+ /* remove DFS channels */
+ if (channel < 52 || channel > 140) {
+ for (j = 0; j < n_valid_chan; j++) {
+ /* allows only supported channel on
+ * current reguatory
+ */
+ if (channel == (dtoh32(list->element[j])))
+ default_chan_list[n_nodfs++] =
+ channel;
+ }
+ }
+
+ }
+ }
+ if (num_chans == 3 && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ WL_INFO(("P2P SEARCH PHASE START \n"));
+ } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) &&
+ (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP)) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_INFO(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ num_chans = n_nodfs;
+
+ } else {
+ WL_INFO(("P2P SCAN STATE START \n"));
+ num_chans = n_nodfs;
+ }
+
+ }
+ err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ kfree(default_chan_list);
+ }
+exit:
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ return err;
+}
+
+
+static s32
+wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = BCME_OK;
+ s32 passive_scan;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+ wl->escan_info.ndev = ndev;
+ wl->escan_info.wiphy = wiphy;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results = (wl_scan_results_t *) wl->escan_info.escan_buf;
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START);
+ return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ struct ether_addr primary_mac;
+ wpa_ie_fixed_t *wps_ie;
+ s32 passive_scan;
+ bool iscan_req;
+ bool escan_req = false;
+ bool p2p_ssid;
+ s32 err = 0;
+ s32 i;
+ u32 wpsie_len = 0;
+ u8 wpsie[IE_MAX_LEN];
+
+ /* If scan req comes for p2p0, send it over primary I/F
+ * Scan results will be delivered corresponding to cfg80211_scan_request
+ */
+ if (ndev == wl->p2p_net) {
+ ndev = wl_to_prmry_ndev(wl);
+ }
+
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (wl_get_drv_status_all(wl, SCANNING)) {
+ WL_ERR(("Scanning already\n"));
+ return -EAGAIN;
+ }
+ if (wl_get_drv_status(wl, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ return -EAGAIN;
+ }
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
+ }
+
+ /* Arm scan timeout timer */
+ mod_timer(&wl->scan_timeout, jiffies + WL_SCAN_TIMER_INTERVAL_MS * HZ / 1000);
+ iscan_req = false;
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) {
+ iscan_req = true;
+ } else if (wl->escan_on) {
+ escan_req = true;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len && IS_P2P_SSID(ssids[i].ssid)) {
+ p2p_ssid = true;
+ break;
+ }
+ }
+ if (p2p_ssid) {
+ if (wl->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(wl) == false) {
+ /* p2p on at the first time */
+ p2p_on(wl) = true;
+ wl_cfgp2p_set_firm_p2p(wl);
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ }
+ p2p_scan(wl) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (wl->p2p_supported) {
+ p2p_scan(wl) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+ if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl)))
+ p2p_scan(wl) = true;
+
+ if (p2p_scan(wl) == false) {
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(wl,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!wl->p2p_supported || !p2p_scan(wl)) {
+ if (ndev == wl_to_prmry_ndev(wl)) {
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie(
+ (u8 *)request->ie,
+ request->ie_len)) != NULL) {
+ wpsie_len =
+ wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, ndev, -1,
+ VNDR_IE_PRBREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ }
+ }
+ } else { /* scan in ibss */
+ /* we don't do iscan in ibss */
+ ssids = this_ssid;
+ }
+ wl->scan_request = request;
+ wl_set_drv_status(wl, SCANNING, ndev);
+ if (iscan_req) {
+ err = wl_do_iscan(wl, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+ } else if (escan_req) {
+ if (wl->p2p_supported) {
+ if (p2p_on(wl) && p2p_scan(wl)) {
+
+ err = wl_cfgp2p_enable_discovery(wl, ndev,
+ request->ie, request->ie_len);
+
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ err = wl_do_escan(wl, wiphy, ndev, request);
+ if (likely(!err))
+ return err;
+ else
+ goto scan_out;
+
+
+ } else {
+ memset(&sr->ssid, 0, sizeof(sr->ssid));
+ sr->ssid.SSID_len =
+ min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
+ if (sr->ssid.SSID_len) {
+ memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
+ sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
+ WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n",
+ sr->ssid.SSID, sr->ssid.SSID_len));
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
+ }
+ WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+ passive_scan = wl->active_scan ? 0 : 1;
+ err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan), false);
+ if (unlikely(err)) {
+ WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+ goto scan_out;
+ }
+ err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid,
+ sizeof(sr->ssid), false);
+ if (err) {
+ if (err == -EBUSY) {
+ WL_ERR(("system busy : scan for \"%s\" "
+ "canceled\n", sr->ssid.SSID));
+ } else {
+ WL_ERR(("WLC_SCAN error (%d)\n", err));
+ }
+ goto scan_out;
+ }
+ }
+
+ return 0;
+
+scan_out:
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ wl->scan_request = NULL;
+ return err;
+}
+
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ CHECK_SYS_UP(wl);
+
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+ s32 err = 0;
+
+ err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+ retry = htod32(retry);
+ err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), false);
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ WL_DBG(("Enter\n"));
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (wl->conf->rts_threshold != wiphy->rts_threshold)) {
+ wl->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, wl->conf->rts_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (wl->conf->frag_threshold != wiphy->frag_threshold)) {
+ wl->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, wl->conf->frag_threshold);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (wl->conf->retry_long != wiphy->retry_long)) {
+ wl->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, wl->conf->retry_long, true);
+ if (!err)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (wl->conf->retry_short != wiphy->retry_short)) {
+ wl->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, wl->conf->retry_short, false);
+ if (!err) {
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss;
+ struct ieee80211_channel *chan;
+ struct wl_join_params join_params;
+ struct cfg80211_ssid ssid;
+ s32 scan_retry = 0;
+ s32 err = 0;
+ bool rollback_lock = false;
+
+ WL_TRACE(("In\n"));
+ CHECK_SYS_UP(wl);
+ if (params->bssid) {
+ WL_ERR(("Invalid bssid\n"));
+ return -EOPNOTSUPP;
+ }
+ bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+ if (!bss) {
+ memcpy(ssid.ssid, params->ssid, params->ssid_len);
+ ssid.ssid_len = params->ssid_len;
+ do {
+ if (unlikely
+ (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+ -EBUSY)) {
+ wl_delay(150);
+ } else {
+ break;
+ }
+ } while (++scan_retry < WL_SCAN_RETRY_MAX);
+ /* to allow scan_inform to propagate to cfg80211 plane */
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+
+ /* wait 4 secons till scan done.... */
+ schedule_timeout_interruptible(4 * HZ);
+ if (rollback_lock)
+ rtnl_lock();
+ bss = cfg80211_get_ibss(wiphy, NULL,
+ params->ssid, params->ssid_len);
+ }
+ if (bss) {
+ wl->ibss_starter = false;
+ WL_DBG(("Found IBSS\n"));
+ } else {
+ wl->ibss_starter = true;
+ }
+ chan = params->channel;
+ if (chan)
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ memset(&join_params, 0, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+ params->ssid_len);
+ join_params.ssid.SSID_len = htod32(params->ssid_len);
+ if (params->bssid)
+ memcpy(&join_params.params.bssid, params->bssid,
+ ETHER_ADDR_LEN);
+ else
+ memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ sizeof(join_params), false);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ wl_link_down(wl);
+
+ return err;
+}
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ val = WPA2_AUTH_PSK| WPA2_AUTH_UNSPECIFIED;
+ else
+ val = WPA_AUTH_DISABLED;
+
+ if (is_wps_conn(sme))
+ val = WPA_AUTH_DISABLED;
+
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ WL_DBG((" * wl_set_wpa_version, set wpa_auth"
+ " to WPA_AUTH_WAPI 0x400"));
+ val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+ }
+#endif
+ WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wpa_auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->wpa_versions = sme->crypto.wpa_versions;
+ return err;
+}
+
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG((" %s \n", __FUNCTION__));
+
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie,
+ sme->ie_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ if (unlikely(err)) {
+ WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
+ return err;
+ }
+ } else
+ WL_DBG((" * skip \n"));
+ return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ switch (sme->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ val = 0;
+ WL_DBG(("open system\n"));
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ val = 1;
+ WL_DBG(("shared key\n"));
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ val = 2;
+ WL_DBG(("automatic\n"));
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ WL_DBG(("network eap\n"));
+ default:
+ val = 2;
+ WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+ break;
+ }
+
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->auth_type = sme->auth_type;
+ return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 pval = 0;
+ s32 gval = 0;
+ s32 err = 0;
+#ifdef BCMWAPI_WPI
+ s32 val = 0;
+#endif
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ switch (sme->crypto.ciphers_pairwise[0]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ pval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ val = SMS4_ENABLED;
+ pval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ }
+ if (sme->crypto.cipher_group) {
+ switch (sme->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ gval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ gval = AES_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ gval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ val = SMS4_ENABLED;
+ gval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+
+ WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+ if (is_wps_conn(sme)) {
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+ } else {
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+ WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
+ } else {
+#endif
+ WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec",
+ pval | gval, bssidx);
+#ifdef BCMWAPI_WPI
+ }
+#endif
+ }
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+ sec->cipher_group = sme->crypto.cipher_group;
+
+ return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ s32 val = 0;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (sme->crypto.n_akm_suites) {
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+#ifdef BCMWAPI_WPI
+ else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_WAPI_CERT:
+ val = WAPI_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_WAPI_PSK:
+ val = WAPI_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
+ }
+ }
+#endif
+ WL_DBG(("setting wpa_auth to %d\n", val));
+
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("could not set wpa_auth (%d)\n", err));
+ return err;
+ }
+ }
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec->wpa_auth = sme->crypto.akm_suites[0];
+
+ return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wl_security *sec;
+ struct wl_wsec_key key;
+ s32 val;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key len (%d)\n", sme->key_len));
+ if (sme->key_len) {
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise));
+ if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+#ifdef BCMWAPI_WPI
+ NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
+#else
+ NL80211_WPA_VERSION_2)) &&
+#endif
+ (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+#ifdef BCMWAPI_WPI
+ WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
+#else
+ WLAN_CIPHER_SUITE_WEP104)))
+#endif
+ {
+ memset(&key, 0, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = WL_PRIMARY_KEY;
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo));
+ WL_DBG(("key \"%s\"\n", key.data));
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
+ WL_DBG(("set auth_type to shared key\n"));
+ val = 1; /* shared key */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set auth failed (%d)\n", err));
+ return err;
+ }
+ }
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct ieee80211_channel *chan = sme->channel;
+ wl_extjoin_params_t *ext_join_params;
+ struct wl_join_params join_params;
+ size_t join_params_size;
+ s32 err = 0;
+ wpa_ie_fixed_t *wpa_ie;
+ wpa_ie_fixed_t *wps_ie;
+ bcm_tlv_t *wpa2_ie;
+ u8* wpaie = 0;
+ u32 wpaie_len = 0;
+ u32 wpsie_len = 0;
+ u32 chan_cnt = 0;
+ u8 wpsie[IE_MAX_LEN];
+ struct ether_addr bssid;
+ WL_DBG(("In\n"));
+ CHECK_SYS_UP(wl);
+
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ /* Clean BSSID */
+ bzero(&bssid, sizeof(bssid));
+ wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+ if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) {
+ /* we only allow to connect using virtual interface in case of P2P */
+ if (p2p_is_on(wl) && is_wps_conn(sme)) {
+ WL_DBG(("ASSOC1 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ /* Have to apply WPS IE + P2P IE in assoc req frame */
+ wl_cfgp2p_set_management_ie(wl, dev,
+ wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG,
+ wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie,
+ wl_to_p2p_bss_saved_ie(wl,
+ P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ } else if (p2p_is_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
+ /* This is the connect req after WPS is done [credentials exchanged]
+ * currently identified with WPA_VERSION_2 .
+ * Update the previously set IEs with
+ * the newly received IEs from Supplicant. This will remove the WPS IE from
+ * the Assoc Req.
+ */
+ WL_DBG(("ASSOC2 p2p index : %d sme->ie_len %d\n",
+ wl_cfgp2p_find_idx(wl, dev), sme->ie_len));
+ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev),
+ VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+ }
+
+ } else if (dev == wl_to_prmry_ndev(wl)) {
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if (wpa_ie != NULL || wpa2_ie != NULL) {
+ wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+ wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+ wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+ wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ } else {
+ wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ }
+
+ /* find the WPSIE */
+ memset(wpsie, 0, sizeof(wpsie));
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie,
+ sme->ie_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ memcpy(wpsie, wps_ie, wpsie_len);
+ } else {
+ wpsie_len = 0;
+ }
+ err = wl_cfgp2p_set_management_ie(wl, dev, -1,
+ VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len);
+ if (unlikely(err)) {
+ return err;
+ }
+ }
+ if (unlikely(!sme->ssid)) {
+ WL_ERR(("Invalid ssid\n"));
+ return -EOPNOTSUPP;
+ }
+ if (chan) {
+ wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ chan_cnt = 1;
+ WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
+ chan->center_freq));
+ } else
+ wl->channel = 0;
+#ifdef BCMWAPI_WPI
+ WL_DBG(("1. enable wapi auth\n"));
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+ WL_DBG(("2. set wapi ie \n"));
+ err = wl_set_set_wapi_ie(dev, sme);
+ if (unlikely(err))
+ return err;
+ } else
+ WL_DBG(("2. Not wapi ie \n"));
+#endif
+ WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+ WL_DBG(("3. set wapi version \n"));
+ err = wl_set_wpa_version(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid wpa_version\n"));
+ return err;
+ }
+#ifdef BCMWAPI_WPI
+ if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
+ WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
+ else {
+ WL_DBG(("4. wl_set_auth_type\n"));
+#endif
+ err = wl_set_auth_type(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid auth type\n"));
+ return err;
+ }
+#ifdef BCMWAPI_WPI
+ }
+#endif
+
+ err = wl_set_set_cipher(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid ciper\n"));
+ return err;
+ }
+
+ err = wl_set_key_mgmt(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid key mgmt\n"));
+ return err;
+ }
+
+ err = wl_set_set_sharedkey(dev, sme);
+ if (unlikely(err)) {
+ WL_ERR(("Invalid shared key\n"));
+ return err;
+ }
+
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ goto exit;
+ }
+ ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+ ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ ext_join_params->scan.nprobes = 2;
+ /* increate dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan.active_time = WL_SCAN_ACTIVE_TIME*3;
+ ext_join_params->scan.passive_time = WL_SCAN_PASSIVE_TIME*3;
+ ext_join_params->scan.home_time = -1;
+
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ if (chan_cnt) {
+ u16 channel, band, bw, ctl_sb;
+ chanspec_t chspec;
+ channel = wl->channel;
+ band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+ }
+ ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING, dev);
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, wl_cfgp2p_find_idx(wl, dev), &wl->ioctl_buf_sync);
+ kfree(ext_join_params);
+ if (err) {
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ if (err == BCME_UNSUPPORTED) {
+ WL_DBG(("join iovar is not supported\n"));
+ goto set_ssid;
+ } else
+ WL_ERR(("error (%d)\n", err));
+ } else
+ goto exit;
+
+set_ssid:
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
+
+ join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ if (sme->bssid)
+ memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+ else
+ memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+ wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
+ WL_DBG(("join_param_size %d\n", join_params_size));
+
+ if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len));
+ }
+ wl_set_drv_status(wl, CONNECTING, dev);
+ err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+ if (err) {
+ WL_ERR(("error (%d)\n", err));
+ wl_clr_drv_status(wl, CONNECTING, dev);
+ }
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scbval;
+ bool act = false;
+ s32 err = 0;
+ u8 *curbssid;
+ WL_ERR(("Reason %d\n", reason_code));
+ CHECK_SYS_UP(wl);
+ act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT);
+ curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID);
+ if (act) {
+ /*
+ * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+ */
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ wl_set_drv_status(wl, DISCONNECTING, dev);
+ scbval.val = reason_code;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ if (unlikely(err)) {
+ wl_clr_drv_status(wl, DISCONNECTING, dev);
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ u16 txpwrmw;
+ s32 err = 0;
+ s32 disable = 0;
+
+ CHECK_SYS_UP(wl);
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+ return -EINVAL;
+ }
+ break;
+ case NL80211_TX_POWER_FIXED:
+ if (dbm < 0) {
+ WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+ return -EINVAL;
+ }
+ break;
+ }
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ disable = htod32(disable);
+ err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ return err;
+ }
+
+ if (dbm > 0xffff)
+ txpwrmw = 0xffff;
+ else
+ txpwrmw = (u16) dbm;
+ err = wldev_iovar_setint(ndev, "qtxpower",
+ (s32) (bcm_mw_to_qdbm(txpwrmw)));
+ if (unlikely(err)) {
+ WL_ERR(("qtxpower error (%d)\n", err));
+ return err;
+ }
+ wl->conf->tx_power = dbm;
+
+ return err;
+}
+
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 txpwrdbm;
+ u8 result;
+ s32 err = 0;
+
+ CHECK_SYS_UP(wl);
+ err = wldev_iovar_getint(ndev, "qtxpower", &txpwrdbm);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ *dbm = (s32) bcm_qdbm_to_mw(result);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool unicast, bool multicast)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ u32 index;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ if (wsec & WEP_ENABLED) {
+ /* Just select a new current key */
+ index = (u32) key_idx;
+ index = htod32(index);
+ err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+ sizeof(index), true);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+ }
+ return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_wsec_key key;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+ s32 mode = wl_get_mode_by_netdev(wl, dev);
+ memset(&key, 0, sizeof(key));
+ key.index = (u32) key_idx;
+
+ if (!ETHER_ISMULTI(mac_addr))
+ memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+ key.len = (u32) params->key_len;
+
+ /* check for key index change */
+ if (key.len == 0) {
+ /* key delete */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("key delete error (%d)\n", err));
+ return err;
+ }
+ } else {
+ if (key.len > sizeof(key.data)) {
+ WL_ERR(("Invalid key length (%d)\n", key.len));
+ return -EINVAL;
+ }
+ WL_DBG(("Setting the key index %d\n", key.index));
+ memcpy(key.data, params->key, key.len);
+
+ if ((mode == WL_MODE_BSS) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ u8 keybuf[8];
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
+
+ /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+ if (params->seq && params->seq_len == 6) {
+ /* rx iv */
+ u8 *ivptr;
+ ivptr = (u8 *) params->seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = true;
+ }
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+ swap_key_from_BE(&key);
+#if defined(CONFIG_WIRELESS_EXT)
+ dhd_wait_pend8021x(dev);
+#endif
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wl_wsec_key key;
+ s32 val = 0;
+ s32 wsec = 0;
+ s32 err = 0;
+ u8 keybuf[8];
+ s32 bssidx = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 mode = wl_get_mode_by_netdev(wl, dev);
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ if (mac_addr) {
+ wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+ goto exit;
+ }
+ memset(&key, 0, sizeof(key));
+
+ key.len = (u32) params->key_len;
+ key.index = (u32) key_idx;
+
+ if (unlikely(key.len > sizeof(key.data))) {
+ WL_ERR(("Too long key length (%u)\n", key.len));
+ return -EINVAL;
+ }
+ memcpy(key.data, params->key, key.len);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ val = TKIP_ENABLED;
+ /* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+ if (mode == WL_MODE_BSS) {
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ val = SMS4_ENABLED;
+ break;
+#endif /* BCMWAPI_WPI */
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
+ }
+
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ return err;
+ }
+
+exit:
+ err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("get wsec error (%d)\n", err));
+ return err;
+ }
+
+ wsec |= val;
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("set wsec error (%d)\n", err));
+ return err;
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("Enter\n"));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+
+ key.flags = WL_PRIMARY_KEY;
+ key.algo = CRYPTO_ALGO_OFF;
+ key.index = (u32) key_idx;
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ /* Set the new key/index */
+ swap_key_from_BE(&key);
+ wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ if (unlikely(err)) {
+ if (err == -EINVAL) {
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ WL_DBG(("invalid key index (%d)\n", key_idx));
+ }
+ } else {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ }
+ return err;
+ }
+ return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie, struct key_params * params))
+{
+ struct key_params params;
+ struct wl_wsec_key key;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_security *sec;
+ s32 wsec;
+ s32 err = 0;
+ s32 bssidx = wl_cfgp2p_find_idx(wl, dev);
+
+ WL_DBG(("key index (%d)\n", key_idx));
+ CHECK_SYS_UP(wl);
+ memset(&key, 0, sizeof(key));
+ key.index = key_idx;
+ swap_key_to_BE(&key);
+ memset(&params, 0, sizeof(params));
+ params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+ memcpy(params.key, key.data, params.key_len);
+
+ wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+ return err;
+ }
+ switch (wsec & ~SES_OW_ENABLED) {
+ case WEP_ENABLED:
+ sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP40;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+ params.cipher = WLAN_CIPHER_SUITE_WEP104;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ }
+ break;
+ case TKIP_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case AES_ENABLED:
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+#ifdef BCMWAPI_WPI
+ case WLAN_CIPHER_SUITE_SMS4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+ break;
+#endif
+ default:
+ WL_ERR(("Invalid algo (0x%x)\n", wsec));
+ return -EINVAL;
+ }
+
+ callback(cookie, &params);
+ return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx)
+{
+ WL_INFO(("Not supported\n"));
+ return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ scb_val_t scb_val;
+ s32 rssi;
+ s32 rate;
+ s32 err = 0;
+ sta_info_t *sta;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+ dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ CHECK_SYS_UP(wl);
+ if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) {
+ err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+ ETHER_ADDR_LEN, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ return err;
+ }
+ sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sta = (sta_info_t *)wl->ioctl_buf;
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->connected_time = sta->in;
+ }
+ WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+ sta->idle * 1000));
+#endif
+ } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS) {
+ u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID);
+ if (!wl_get_drv_status(wl, CONNECTED, dev) ||
+ (dhd_is_associated(dhd, NULL) == FALSE)) {
+ WL_ERR(("NOT assoc\n"));
+ err = -ENODEV;
+ goto get_station_err;
+ }
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACSTR" != "MACSTR"\n",
+ MAC2STR(mac), MAC2STR(curmacp)));
+ }
+
+ /* Report the current tx rate */
+ err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
+ rate = dtoh32(rate);
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+ }
+
+ memset(&scb_val, 0, sizeof(scb_val));
+ scb_val.val = 0;
+ err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+ sizeof(scb_val_t), false);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
+ }
+ rssi = dtoh32(scb_val.val);
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+
+get_station_err:
+ if (err) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ WL_ERR(("force cfg80211_disconnected\n"));
+ wl_clr_drv_status(wl, CONNECTED, dev);
+ cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ }
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, s32 timeout)
+{
+ s32 pm;
+ s32 err = 0;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ CHECK_SYS_UP(wl);
+
+ if (wl->p2p_net == dev) {
+ return err;
+ }
+
+ pm = PM_OFF; /* enabled ? PM_FAST : PM_OFF; */
+ /* Do not enable the power save after assoc if it is p2p interface */
+ if (wl->p2p && wl->p2p->vif_created) {
+ WL_DBG(("Do not enable the power save for p2p interfaces even after assoc\n"));
+ pm = PM_OFF;
+ }
+ pm = htod32(pm);
+ WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+ err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+ if (unlikely(err)) {
+ if (err == -ENODEV)
+ WL_DBG(("net_device is not ready yet\n"));
+ else
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ return err;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+ u32 ret = 0;
+
+ if (bit16 & 0xff00) {
+ ret += 8;
+ bit16 >>= 8;
+ }
+
+ if (bit16 & 0xf0) {
+ ret += 4;
+ bit16 >>= 4;
+ }
+
+ if (bit16 & 0xc) {
+ ret += 2;
+ bit16 >>= 2;
+ }
+
+ if (bit16 & 2)
+ ret += bit16 & 2;
+ else if (bit16)
+ ret += bit16;
+
+ return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
+ WL_INFO(("device is not ready\n"));
+ return 0;
+ }
+
+ wl_invoke_iscan(wl);
+
+ return err;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_info *iter, *next;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ unsigned long flags;
+ if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
+ WL_INFO(("device is not ready : status (%d)\n",
+ (int)wl->status));
+ return 0;
+ }
+ for_each_ndev(wl, iter, next)
+ wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ wl_term_iscan(wl);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ for_each_ndev(wl, iter, next) {
+ wl_clr_drv_status(wl, SCANNING, iter->ndev);
+ wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ for_each_ndev(wl, iter, next) {
+ if (wl_get_drv_status(wl, CONNECTING, iter->ndev)) {
+ wl_bss_connect_done(wl, iter->ndev, NULL, NULL, false);
+ }
+ }
+#endif /* DHD_CLEAR_ON_SUSPEND */
+ return 0;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+ s32 err)
+{
+ int i, j;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct net_device *primary_dev = wl_to_prmry_ndev(wl);
+
+ if (!pmk_list) {
+ printk("pmk_list is NULL\n");
+ return -EINVAL;
+ }
+ /* pmk list is supported only for STA interface i.e. primary interface
+ * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+ */
+ if (primary_dev != dev) {
+ WL_INFO(("Not supporting Flushing pmklist on virtual"
+ " interfaces than primary interface\n"));
+ return err;
+ }
+
+ WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ WL_DBG(("PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID));
+ for (j = 0; j < WPA2_PMKID_LEN; j++) {
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+ }
+ }
+ if (likely(!err)) {
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list), wl->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ }
+
+ return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ if (i < WL_NUM_PMKIDS_MAX) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+ WPA2_PMKID_LEN);
+ if (i == wl->pmk_list->pmkids.npmkid)
+ wl->pmk_list->pmkids.npmkid++;
+ } else {
+ err = -EINVAL;
+ }
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n",
+ wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].
+ PMKID[i]));
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct _pmkid_list pmkid;
+ s32 err = 0;
+ int i;
+
+ CHECK_SYS_UP(wl);
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+ memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+ }
+
+ for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp
+ (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+
+ if ((wl->pmk_list->pmkids.npmkid > 0) &&
+ (i < wl->pmk_list->pmkids.npmkid)) {
+ memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+ for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ ETHER_ADDR_LEN);
+ memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
+ &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ wl->pmk_list->pmkids.npmkid--;
+ } else {
+ err = -EINVAL;
+ }
+
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+
+ return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ s32 err = 0;
+ CHECK_SYS_UP(wl);
+ memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
+ err = wl_update_pmklist(dev, wl->pmk_list, err);
+ return err;
+
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+ wl_scan_params_t *params;
+ int params_size;
+ int num_chans;
+
+ *out_params_size = 0;
+
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size));
+ return params;
+ }
+ memset(params, 0, params_size);
+ params->nprobes = nprobes;
+
+ num_chans = (channel == 0) ? 0 : 1;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(10);
+ if (channel == -1)
+ params->channel_list[0] = htodchanspec(channel);
+ else
+ params->channel_list[0] = wl_ch_host_to_driver(channel);
+
+ /* Our scan params have 1 channel and 0 ssids */
+ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ *out_params_size = params_size; /* rtn size to the caller */
+ return params;
+}
+
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel * channel,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ s32 target_channel;
+ u32 id;
+ struct ether_addr primary_mac;
+ struct net_device *ndev = NULL;
+
+ s32 err = BCME_OK;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex));
+
+ if (wl->p2p_net == dev) {
+ ndev = wl_to_prmry_ndev(wl);
+ } else {
+ ndev = dev;
+ }
+
+ if (wl_get_drv_status(wl, SCANNING, ndev)) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ }
+
+ target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+ memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+ wl->remain_on_chan_type = channel_type;
+ id = ++wl->last_roc_id;
+ if (id == 0)
+ id = ++wl->last_roc_id;
+ *cookie = id;
+ cfg80211_ready_on_channel(dev, *cookie, channel,
+ channel_type, duration, GFP_KERNEL);
+ if (wl->p2p && !wl->p2p->on) {
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
+
+ /* In case of p2p_listen command, supplicant send remain_on_channel
+ * without turning on P2P
+ */
+
+ p2p_on(wl) = true;
+ err = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0);
+
+ if (unlikely(err)) {
+ goto exit;
+ }
+ }
+ if (p2p_is_on(wl))
+ wl_cfgp2p_discover_listen(wl, target_channel, duration);
+
+
+exit:
+ return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev,
+ u64 cookie)
+{
+ s32 err = 0;
+ WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex));
+ return err;
+}
+static s32
+wl_cfg80211_send_pending_tx_act_frm(struct wl_priv *wl)
+{
+ wl_af_params_t *tx_act_frm;
+ struct net_device *dev = wl->afx_hdl->dev;
+ if (!p2p_is_on(wl))
+ return -1;
+
+ if (dev == wl->p2p_net) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ tx_act_frm = wl->afx_hdl->pending_tx_act_frm;
+ WL_DBG(("Sending the action frame\n"));
+ wl->afx_hdl->pending_tx_act_frm = NULL;
+ if (tx_act_frm != NULL) {
+ /* Suspend P2P discovery's search-listen to prevent it from
+ * starting a scan or changing the channel.
+ */
+ wl_clr_drv_status(wl, SENDING_ACT_FRM, wl->afx_hdl->dev);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ wl_notify_escan_complete(wl, dev, true, true);
+ wl_cfgp2p_discover_enable_search(wl, false);
+ tx_act_frm->channel = wl->afx_hdl->peer_chan;
+ wl->afx_hdl->ack_recv = (wl_cfgp2p_tx_action_frame(wl, dev,
+ tx_act_frm, wl->afx_hdl->bssidx)) ? false : true;
+ }
+ return 0;
+}
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_instance;
+ struct wl_priv *wl = wlcfg_drv_priv;
+ afx_instance = container_of(work, struct afx_hdl, work);
+ if (afx_instance != NULL) {
+ wl_cfgp2p_act_frm_search(wl, wl->afx_hdl->dev,
+ wl->afx_hdl->bssidx, 0);
+ }
+}
+
+static bool
+wl_cfg80211_send_at_common_channel(struct wl_priv *wl,
+ struct net_device *dev,
+ wl_af_params_t *af_params)
+{
+ WL_DBG((" enter ) \n"));
+ /* initialize afx_hdl */
+ wl->afx_hdl->pending_tx_act_frm = af_params;
+ wl->afx_hdl->bssidx = wl_cfgp2p_find_idx(wl, dev);
+ wl->afx_hdl->dev = dev;
+ wl->afx_hdl->retry = 0;
+ wl->afx_hdl->peer_chan = WL_INVALID;
+ wl->afx_hdl->ack_recv = false;
+ memcpy(wl->afx_hdl->pending_tx_dst_addr.octet,
+ af_params->action_frame.da.octet,
+ sizeof(wl->afx_hdl->pending_tx_dst_addr.octet));
+ /* Loop to wait until we have sent the pending tx action frame or the
+ * pending action frame tx is cancelled.
+ */
+ while ((wl->afx_hdl->retry < WL_CHANNEL_SYNC_RETRY) &&
+ (wl->afx_hdl->peer_chan == WL_INVALID)) {
+ wl_set_drv_status(wl, SENDING_ACT_FRM, dev);
+ wl_set_drv_status(wl, SCANNING, dev);
+ WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+ wl->afx_hdl->retry));
+ /* Do find_peer_for_action */
+ schedule_work(&wl->afx_hdl->work);
+ wait_for_completion(&wl->act_frm_scan);
+ wl->afx_hdl->retry++;
+ }
+ if (wl->afx_hdl->peer_chan != WL_INVALID)
+ wl_cfg80211_send_pending_tx_act_frm(wl);
+ else {
+ WL_ERR(("Couldn't find the peer after %d retries\n",
+ wl->afx_hdl->retry));
+ }
+ wl->afx_hdl->dev = NULL;
+ wl->afx_hdl->bssidx = WL_INVALID;
+ wl_clr_drv_status(wl, SENDING_ACT_FRM, dev);
+ if (wl->afx_hdl->ack_recv)
+ return true; /* ACK */
+ else
+ return false; /* NO ACK */
+}
+
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev,
+ struct ieee80211_channel *channel, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8* buf, size_t len,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+ bool no_cck, bool dont_wait_for_ack,
+#endif
+ u64 *cookie)
+{
+ wl_action_frame_t *action_frame;
+ wl_af_params_t *af_params;
+ wifi_p2p_ie_t *p2p_ie;
+ wpa_ie_fixed_t *wps_ie;
+ scb_val_t scb_val;
+ wifi_wfd_ie_t *wfd_ie;
+ const struct ieee80211_mgmt *mgmt;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct net_device *dev = NULL;
+ s32 err = BCME_OK;
+ s32 bssidx = 0;
+ u32 p2pie_len = 0;
+ u32 wpsie_len = 0;
+ u32 wfdie_len = 0;
+ u32 id;
+ u32 retry = 0;
+ bool ack = false;
+ wifi_p2p_pub_act_frame_t *act_frm = NULL;
+ wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+
+ WL_DBG(("Enter \n"));
+
+ if (ndev == wl->p2p_net) {
+ dev = wl_to_prmry_ndev(wl);
+ } else {
+ /* If TX req is for any valid ifidx. Use as is */
+ dev = ndev;
+ }
+
+ /* find bssidx based on ndev */
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+ if (bssidx == -1) {
+
+ WL_ERR(("Can not find the bssidx for dev( %p )\n", dev));
+ return -ENODEV;
+ }
+ if (p2p_is_on(wl)) {
+ /* Suspend P2P discovery search-listen to prevent it from changing the
+ * channel.
+ */
+ if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ return -EFAULT;
+ }
+ }
+ *cookie = 0;
+ id = wl->send_action_id++;
+ if (id == 0)
+ id = wl->send_action_id++;
+ *cookie = id;
+ mgmt = (const struct ieee80211_mgmt *)buf;
+ if (ieee80211_is_mgmt(mgmt->frame_control)) {
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ s32 ie_len = len - ie_offset;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ }
+ if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Total length of WFD Information Element */
+ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id);
+ }
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len))
+ != NULL) {
+ /* Order of Vendor IE is 1) WPS IE +
+ * 2) P2P IE created by supplicant
+ * So, it is ok to find start address of WPS IE
+ * to save IEs
+ */
+ wpsie_len = wps_ie->length + sizeof(wps_ie->length) +
+ sizeof(wps_ie->tag);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ VNDR_IE_PRBRSP_FLAG,
+ (u8 *)wps_ie, wpsie_len + p2pie_len + wfdie_len);
+ }
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+ } else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+ ieee80211_is_deauth(mgmt->frame_control)) {
+ memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+ scb_val.val = mgmt->u.disassoc.reason_code;
+ wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t), true);
+ WL_DBG(("Disconnect STA : %s\n",
+ bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf)));
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL);
+ goto exit;
+
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ /* Abort the dwell time of any previous off-channel
+ * action frame that may be still in effect. Sending
+ * off-channel action frames relies on the driver's
+ * scan engine. If a previous off-channel action frame
+ * tx is still in progress (including the dwell time),
+ * then this new action frame will not be sent out.
+ */
+ wl_notify_escan_complete(wl, dev, true, true);
+
+ }
+
+ } else {
+ WL_ERR(("Driver only allows MGMT packet type\n"));
+ goto exit;
+ }
+
+ af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+ if (af_params == NULL)
+ {
+ WL_ERR(("unable to allocate frame\n"));
+ return -ENOMEM;
+ }
+
+ action_frame = &af_params->action_frame;
+
+ /* Add the packet Id */
+ action_frame->packetId = *cookie;
+ WL_DBG(("action frame %d\n", action_frame->packetId));
+ /* Add BSSID */
+ memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+ memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = len - DOT11_MGMT_HDR_LEN;
+ WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+ /* Add the channel */
+ af_params->channel =
+ ieee80211_frequency_to_channel(channel->center_freq);
+
+ if (channel->band == IEEE80211_BAND_5GHZ) {
+ WL_DBG(("5GHz channel %d", af_params->channel));
+ err = wldev_ioctl(dev, WLC_SET_CHANNEL,
+ &af_params->channel, sizeof(af_params->channel), true);
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d\n", err));
+ }
+ }
+
+ /* Add the dwell time
+ * Dwell time to stay off-channel to wait for a response action frame
+ * after transmitting an GO Negotiation action frame
+ */
+ af_params->dwell_time = WL_DWELL_TIME;
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+ if (wl_cfgp2p_is_pub_action(action_frame->data, action_frame->len)) {
+ act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data);
+ WL_DBG(("P2P PUB action_frame->len: %d chan %d category %d subtype %d\n",
+ action_frame->len, af_params->channel,
+ act_frm->category, act_frm->subtype));
+ } else if (wl_cfgp2p_is_p2p_action(action_frame->data, action_frame->len)) {
+ p2p_act_frm = (wifi_p2p_action_frame_t *) (action_frame->data);
+ WL_DBG(("P2P action_frame->len: %d chan %d category %d subtype %d\n",
+ action_frame->len, af_params->channel,
+ p2p_act_frm->category, p2p_act_frm->subtype));
+ } else if (wl_cfgp2p_is_gas_action(action_frame->data, action_frame->len)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (action_frame->data);
+ WL_DBG(("Service Discovery action_frame->len: %d chan %d category %d action %d\n",
+ action_frame->len, af_params->channel,
+ sd_act_frm->category, sd_act_frm->action));
+
+ }
+ wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len);
+ /*
+ * To make sure to send successfully action frame, we have to turn off mpc
+ */
+
+ if (act_frm && ((act_frm->subtype == P2P_PAF_GON_REQ) ||
+ (act_frm->subtype == P2P_PAF_GON_RSP) ||
+ (act_frm->subtype == P2P_PAF_GON_CONF) ||
+ (act_frm->subtype == P2P_PAF_PROVDIS_REQ))) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ }
+
+ if (act_frm && act_frm->subtype == P2P_PAF_DEVDIS_REQ) {
+ af_params->dwell_time = WL_LONG_DWELL_TIME;
+ } else if (act_frm &&
+ (act_frm->subtype == P2P_PAF_PROVDIS_REQ ||
+ act_frm->subtype == P2P_PAF_PROVDIS_RSP ||
+ act_frm->subtype == P2P_PAF_GON_RSP)) {
+ af_params->dwell_time = WL_MED_DWELL_TIME;
+ }
+
+ if (IS_P2P_SOCIAL(af_params->channel) &&
+ (IS_P2P_PUB_ACT_REQ(act_frm, action_frame->len) ||
+ IS_GAS_REQ(sd_act_frm, action_frame->len)) &&
+ wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ /* channel offload require P2P IE for Probe request
+ * otherwise, we will use wl_cfgp2p_tx_action_frame directly.
+ * channel offload for action request frame
+ */
+
+ /* channel offload for action request frame */
+ ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params);
+ } else {
+ ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true;
+ if (!ack) {
+ if (wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ /* if the NO ACK occurs, the peer device will be on
+ * listen channel of the peer
+ * So, we have to find the peer and send action frame on
+ * that channel.
+ */
+ ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params);
+ } else {
+ for (retry = 0; retry < WL_CHANNEL_SYNC_RETRY; retry++) {
+ ack = (wl_cfgp2p_tx_action_frame(wl, dev,
+ af_params, bssidx)) ? false : true;
+ if (ack)
+ break;
+ }
+
+ }
+
+ }
+
+ }
+ cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, ack, GFP_KERNEL);
+ if (act_frm && act_frm->subtype == P2P_PAF_GON_CONF) {
+ wldev_iovar_setint(dev, "mpc", 1);
+ }
+ kfree(af_params);
+exit:
+ return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+
+ WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg));
+
+ if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+
+ return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct bss_parameters *params)
+{
+ if (params->use_cts_prot >= 0) {
+ }
+
+ if (params->use_short_preamble >= 0) {
+ }
+
+ if (params->use_short_slot_time >= 0) {
+ }
+
+ if (params->basic_rates) {
+ }
+
+ if (params->ap_isolate >= 0) {
+ }
+
+ if (params->ht_opmode >= 0) {
+ }
+
+ return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ s32 channel;
+ s32 err = BCME_OK;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+
+ if (wl->p2p_net == dev) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+ dev->ifindex, channel_type, channel));
+ err = wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), true);
+ if (err < 0) {
+ WL_ERR(("WLC_SET_CHANNEL error %d chip may not be supporting this channel\n", err));
+ }
+ return err;
+}
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+{
+ s32 err = BCME_OK;
+
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+
+ return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ s32 len = 0;
+ s32 err = BCME_OK;
+ u16 auth = 0; /* d11 open authentication */
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u8* tmp;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ if (wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpa2ie->len;
+ /* check the mcast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ tmp = mcast->oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ gval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ gval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ gval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ gval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("No Security Info\n"));
+ break;
+ }
+ len -= WPA_SUITE_LEN;
+ /* check the unicast cipher */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ ltoh16_ua(&ucast->count);
+ tmp = ucast->list[0].oui;
+ switch (tmp[DOT11_OUI_LEN]) {
+ case WPA_CIPHER_NONE:
+ pval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ pval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ pval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ pval = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ pval = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* check the AKM */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1];
+ ltoh16_ua(&mgmt->count);
+ tmp = (u8 *)&mgmt->list[0];
+ switch (tmp[DOT11_OUI_LEN]) {
+ case RSN_AKM_NONE:
+ wpa_auth = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ wpa_auth = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth = WPA2_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ s32 len = 0;
+ u32 i;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 tmp = 0;
+
+ if (wpaie == NULL)
+ goto exit;
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFO(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ tmp = 0;
+ switch (mcast->type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ gval |= tmp;
+ }
+ }
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO(("no unicast suite\n"));
+ goto exit;
+ }
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ tmp = 0;
+ switch (ucast->list[i].type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ pval |= tmp;
+ }
+ }
+ }
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFO((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ tmp = 0;
+ switch (mgmt->list[i].type) {
+ case RSN_AKM_NONE:
+ tmp = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ tmp = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ tmp = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ wpa_auth |= tmp;
+ }
+ }
+
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+#if 0
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ s32 err = BCME_OK;
+ bcm_tlv_t *ssid_ie;
+ wlc_ssid_t ssid;
+ struct wl_priv *wl = wiphy_priv(wiphy);
+ struct wl_join_params join_params;
+ wpa_ie_fixed_t *wps_ie;
+ wpa_ie_fixed_t *wpa_ie;
+ bcm_tlv_t *wpa2_ie;
+ wifi_p2p_ie_t *p2p_ie;
+ wifi_wfd_ie_t *wfd_ie;
+ bool is_bssup = false;
+ bool update_bss = false;
+ bool pbc = false;
+ u16 wpsie_len = 0;
+ u16 p2pie_len = 0;
+ u32 wfdie_len = 0;
+ u8 beacon_ie[IE_MAX_LEN];
+ s32 ie_offset = 0;
+ s32 bssidx = 0;
+ s32 infra = 1;
+ s32 join_params_size = 0;
+ s32 ap = 0;
+ WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+ info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+ if (wl->p2p_net == dev) {
+ dev = wl_to_prmry_ndev(wl);
+ }
+
+ bssidx = wl_cfgp2p_find_idx(wl, dev);
+ if (p2p_is_on(wl) &&
+ (bssidx == wl_to_p2p_bss_bssidx(wl,
+ P2PAPI_BSSCFG_CONNECTION))) {
+ memset(beacon_ie, 0, sizeof(beacon_ie));
+ /* We don't need to set beacon for P2P_GO,
+ * but need to parse ssid from beacon_parameters
+ * because there is no way to set ssid
+ */
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+ wl->p2p->ssid.SSID_len = ssid_ie->len;
+ WL_DBG(("SSID (%s) in Head \n", ssid_ie->data));
+
+ } else {
+ WL_ERR(("No SSID in beacon \n"));
+ }
+
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+
+
+ /* find the P2PIE */
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) {
+ /* Total length of P2P Information Element */
+ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id);
+ memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len);
+
+ } else {
+ WL_ERR(("No P2PIE in beacon \n"));
+ }
+
+ /* find the WFD IEs */
+ if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)info->tail, info->tail_len)) != NULL) {
+ /* Total length of P2P Information Element */
+ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id);
+ if ((wpsie_len + p2pie_len + wfdie_len) < IE_MAX_LEN) {
+ memcpy(&beacon_ie[wpsie_len + p2pie_len], wfd_ie, wfdie_len);
+ } else {
+ WL_ERR(("Found WFD IE but there is no space, (%d)(%d)(%d)\n",
+ wpsie_len, p2pie_len, wfdie_len));
+ wfdie_len = 0;
+ }
+ } else {
+ WL_ERR(("No WFDIE in beacon \n"));
+ }
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len + p2pie_len + wfdie_len);
+
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+
+ if (!is_bssup && (wpa2_ie != NULL)) {
+ wldev_iovar_setint(dev, "mpc", 0);
+ if ((err = wl_validate_wpa2ie(dev, wpa2_ie, bssidx)) < 0) {
+ WL_ERR(("WPA2 IE parsing error"));
+ goto exit;
+ }
+ err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ goto exit;
+ }
+ err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid,
+ sizeof(wl->p2p->ssid), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &wl->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GO SSID setting error %d\n", err));
+ goto exit;
+ }
+ if ((err = wl_cfgp2p_bss(wl, dev, bssidx, 1)) < 0) {
+ WL_ERR(("GO Bring up error %d\n", err));
+ goto exit;
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATING, dev)) {
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ ap = 1;
+ /* find the SSID */
+ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+ info->head_len - ie_offset,
+ DOT11_MNG_SSID_ID)) != NULL) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ memcpy(ssid.SSID, ssid_ie->data, ssid_ie->len);
+ WL_DBG(("SSID is (%s) in Head \n", ssid.SSID));
+ ssid.SSID_len = ssid_ie->len;
+ wldev_iovar_setint(dev, "mpc", 0);
+ wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+ wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ wl->ap_info->security_mode = false;
+ return BCME_ERROR;
+ }
+ wl->ap_info->security_mode = true;
+ if (wl->ap_info->rsn_ie) {
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ if (wl->ap_info->wpa_ie) {
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ if (wl->ap_info->wps_ie) {
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = NULL;
+ }
+ if (wpa_ie != NULL) {
+ /* WPAIE */
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ /* RSNIE */
+ wl->ap_info->wpa_ie = NULL;
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else {
+ wl_validate_opensecurity(dev, bssidx);
+ wl->ap_info->security_mode = false;
+ }
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else {
+ WL_DBG(("No WPSIE in beacon \n"));
+ }
+ if (info->interval) {
+ if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+ &info->interval, sizeof(s32), true)) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32), true)) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ return err;
+ }
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ join_params_size = sizeof(join_params.ssid);
+ memcpy(join_params.ssid.SSID, ssid.SSID, ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(ssid.SSID_len);
+ /* create softap */
+ if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+ join_params_size, true)) == 0) {
+ wl_clr_drv_status(wl, AP_CREATING, dev);
+ wl_set_drv_status(wl, AP_CREATED, dev);
+ }
+ }
+ } else if (wl_get_drv_status(wl, AP_CREATED, dev)) {
+ ap = 1;
+ /* find the WPSIE */
+ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) {
+ wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+ /*
+ * Should be compared with saved ie before saving it
+ */
+ wl_validate_wps_ie((char *) wps_ie, &pbc);
+ memcpy(beacon_ie, wps_ie, wpsie_len);
+ wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG,
+ beacon_ie, wpsie_len);
+ if (wl->ap_info->wps_ie &&
+ memcmp(wl->ap_info->wps_ie, wps_ie, wpsie_len)) {
+ WL_DBG((" WPS IE is changed\n"));
+ kfree(wl->ap_info->wps_ie);
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ } else if (wl->ap_info->wps_ie == NULL) {
+ WL_DBG((" WPS IE is added\n"));
+ wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL);
+ /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc);
+ }
+ /* find the RSN_IE */
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len,
+ DOT11_MNG_RSN_ID)) != NULL) {
+ WL_DBG((" WPA2 IE is found\n"));
+ }
+ /* find the WPA_IE */
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail,
+ info->tail_len)) != NULL) {
+ WL_DBG((" WPA IE is found\n"));
+ }
+ if ((wpa_ie != NULL || wpa2_ie != NULL)) {
+ if (!wl->ap_info->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ if (wpa_ie != NULL) {
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ } else if (wl->ap_info->wpa_ie) {
+ /* change from WPA mode to WPA2 mode */
+ if (wpa2_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->wpa_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ else if (memcmp(wl->ap_info->wpa_ie,
+ wpa_ie, wpa_ie->length +
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+ kfree(wl->ap_info->wpa_ie);
+ update_bss = true;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->rsn_ie = NULL;
+ }
+ } else {
+ /* change from WPA2 mode to WPA mode */
+ if (wpa_ie != NULL) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = NULL;
+ wl->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else if (memcmp(wl->ap_info->rsn_ie,
+ wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ kfree(wl->ap_info->rsn_ie);
+ wl->ap_info->rsn_ie = kmemdup(wpa2_ie,
+ wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ wl->ap_info->wpa_ie = NULL;
+ }
+ }
+ if (update_bss) {
+ wl->ap_info->security_mode = true;
+ wl_cfgp2p_bss(wl, dev, bssidx, 0);
+ if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfgp2p_bss(wl, dev, bssidx, 1);
+ }
+ }
+ } else {
+ WL_ERR(("No WPSIE in beacon \n"));
+ }
+ }
+exit:
+ if (err)
+ wldev_iovar_setint(dev, "mpc", 1);
+ return err;
+}
+#endif
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = wl_cfg80211_add_virtual_iface,
+ .del_virtual_intf = wl_cfg80211_del_virtual_iface,
+ .change_virtual_intf = wl_cfg80211_change_virtual_iface,
+ .scan = wl_cfg80211_scan,
+ .set_wiphy_params = wl_cfg80211_set_wiphy_params,
+ .join_ibss = wl_cfg80211_join_ibss,
+ .leave_ibss = wl_cfg80211_leave_ibss,
+ .get_station = wl_cfg80211_get_station,
+ .set_tx_power = wl_cfg80211_set_tx_power,
+ .get_tx_power = wl_cfg80211_get_tx_power,
+ .add_key = wl_cfg80211_add_key,
+ .del_key = wl_cfg80211_del_key,
+ .get_key = wl_cfg80211_get_key,
+ .set_default_key = wl_cfg80211_config_default_key,
+ .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = wl_cfg80211_set_power_mgmt,
+ .connect = wl_cfg80211_connect,
+ .disconnect = wl_cfg80211_disconnect,
+ .suspend = wl_cfg80211_suspend,
+ .resume = wl_cfg80211_resume,
+ .set_pmksa = wl_cfg80211_set_pmksa,
+ .del_pmksa = wl_cfg80211_del_pmksa,
+ .flush_pmksa = wl_cfg80211_flush_pmksa,
+ .remain_on_channel = wl_cfg80211_remain_on_channel,
+ .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+ .mgmt_tx = wl_cfg80211_mgmt_tx,
+ .mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+ .change_bss = wl_cfg80211_change_bss,
+ .set_channel = wl_cfg80211_set_channel,
+#if 0
+ .set_beacon = wl_cfg80211_add_set_beacon,
+ .add_beacon = wl_cfg80211_add_set_beacon,
+#endif
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+ s32 err = 0;
+
+ switch (mode) {
+ case WL_MODE_BSS:
+ return NL80211_IFTYPE_STATION;
+ case WL_MODE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ case WL_MODE_AP:
+ return NL80211_IFTYPE_AP;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
+
+ return err;
+}
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev)
+{
+ s32 err = 0;
+ wdev->wiphy =
+ wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv));
+ if (unlikely(!wdev->wiphy)) {
+ WL_ERR(("Couldn not allocate wiphy device\n"));
+ err = -ENOMEM;
+ return err;
+ }
+ set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+ wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+ /* Report how many SSIDs Driver can support per Scan request */
+ wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+ wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR);
+
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wdev->wiphy->cipher_suites = __wl_cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+ wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+ wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+ wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif /* !WL_POWERSAVE_DISABLED */
+ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_4ADDR_AP |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+ WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+ WIPHY_FLAG_4ADDR_STATION;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif
+ WL_DBG(("Registering custom regulatory)\n"));
+ wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+ /* Now we can register wiphy with cfg80211 module */
+ err = wiphy_register(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+ wiphy_free(wdev->wiphy);
+ }
+ return err;
+}
+
+static void wl_free_wdev(struct wl_priv *wl)
+{
+ struct wireless_dev *wdev = wl->wdev;
+ struct wiphy *wiphy;
+ if (!wdev) {
+ WL_ERR(("wdev is invalid\n"));
+ return;
+ }
+ wiphy = wdev->wiphy;
+ wiphy_unregister(wdev->wiphy);
+ wdev->wiphy->dev.parent = NULL;
+
+ wl_delete_all_netinfo(wl);
+ wiphy_free(wiphy);
+ /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "wl",
+ * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+ */
+}
+
+static s32 wl_inform_bss(struct wl_priv *wl)
+{
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ s32 i;
+
+ bss_list = wl->bss_list;
+ WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ break;
+ }
+ return err;
+}
+
+static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+{
+ struct wiphy *wiphy = wiphy_from_scan(wl);
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct wl_cfg80211_bss_info *notif_bss_info;
+ struct wl_scan_req *sr = wl_to_sr(wl);
+ struct beacon_proberesp *beacon_proberesp;
+ struct cfg80211_bss *cbss = NULL;
+ s32 mgmt_type;
+ s32 signal;
+ u32 freq;
+ s32 err = 0;
+
+ if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+ return err;
+ }
+ notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+ - sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (unlikely(!notif_bss_info)) {
+ WL_ERR(("notif_bss_info alloc failed\n"));
+ return -ENOMEM;
+ }
+ mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+ notif_bss_info->channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+
+ if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ notif_bss_info->rssi = dtoh16(bi->RSSI);
+ memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+ mgmt_type = wl->active_scan ?
+ IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+ if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+ }
+ beacon_proberesp = wl->active_scan ?
+ (struct beacon_proberesp *)&mgmt->u.probe_resp :
+ (struct beacon_proberesp *)&mgmt->u.beacon;
+ beacon_proberesp->timestamp = 0;
+ beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+ beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+ wl_rst_ie(wl);
+
+ wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+ wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+ offsetof(struct wl_cfg80211_bss_info, frame_buf));
+ notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable) + wl_get_ielen(wl);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+ channel = ieee80211_get_channel(wiphy, freq);
+
+ WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+ "mgmt_type %d frame_len %d\n", bi->SSID,
+ notif_bss_info->rssi, notif_bss_info->channel,
+ mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+ notif_bss_info->frame_len));
+
+ signal = notif_bss_info->rssi * 100;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ if (wl->p2p_net && wl->scan_request &&
+ ((wl->scan_request->dev == wl->p2p_net) ||
+ (wl->scan_request->dev == wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)))) {
+#else
+ if (p2p_is_on(wl) && (p2p_scan(wl) ||
+ (wl->scan_request->dev == wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)))) {
+#endif
+ /* find the P2PIE, if we do not find it, we will discard this frame */
+ wifi_p2p_ie_t * p2p_ie;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)beacon_proberesp->variable,
+ wl_get_ielen(wl))) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe response/beacon\n"));
+ kfree(notif_bss_info);
+ return err;
+ }
+ }
+
+ cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ le16_to_cpu(notif_bss_info->frame_len), signal, GFP_KERNEL);
+ if (unlikely(!cbss)) {
+ WL_ERR(("cfg80211_inform_bss_frame error\n"));
+ kfree(notif_bss_info);
+ return -EINVAL;
+ }
+
+ cfg80211_put_bss(cbss);
+ kfree(notif_bss_info);
+
+ return err;
+}
+
+static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ u16 flags = ntoh16(e->flags);
+
+ WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+ if (event == WLC_E_SET_SSID) {
+ if (status == WLC_E_STATUS_SUCCESS) {
+ if (!wl_is_ibssmode(wl, ndev))
+ return true;
+ }
+ } else if (event == WLC_E_LINK) {
+ if (flags & WLC_EVENT_MSG_LINK)
+ return true;
+ }
+
+ WL_DBG(("wl_is_linkup false\n"));
+ return false;
+}
+
+static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+
+ if (event == WLC_E_DEAUTH_IND ||
+ event == WLC_E_DISASSOC_IND ||
+ event == WLC_E_DISASSOC ||
+ event == WLC_E_DEAUTH) {
+ return true;
+ } else if (event == WLC_E_LINK) {
+ if (!(flags & WLC_EVENT_MSG_LINK))
+ return true;
+ }
+
+ return false;
+}
+
+static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+{
+ u32 event = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+
+ if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+ return true;
+ if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+ return true;
+
+ return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ u32 len = ntoh32(e->datalen);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+ bool isfree = false;
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ s32 freq;
+ s32 channel;
+ u8 body[WL_FRAME_LEN];
+ u16 fc = 0;
+
+ struct ieee80211_supported_band *band;
+ struct ether_addr da;
+ struct ether_addr bssid;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ channel_info_t ci;
+#else
+ struct station_info sinfo;
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !WL_CFG80211_STA_EVENT */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+ memset(body, 0, sizeof(body));
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+ if (wl_get_mode_by_netdev(wl, ndev) == WL_INVALID)
+ return WL_INVALID;
+
+ if (len > WL_FRAME_LEN) {
+ WL_ERR(("Received frame length %d from dongle is greater than"
+ " allocated body buffer len %d", len, WL_FRAME_LEN));
+ goto exit;
+ }
+ memcpy(body, data, len);
+ wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+ NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync);
+ memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ switch (event) {
+ case WLC_E_ASSOC_IND:
+ fc = FC_ASSOC_REQ;
+ break;
+ case WLC_E_REASSOC_IND:
+ fc = FC_REASSOC_REQ;
+ break;
+ case WLC_E_DISASSOC_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH_IND:
+ fc = FC_DISASSOC;
+ break;
+ case WLC_E_DEAUTH:
+ fc = FC_DISASSOC;
+ break;
+ default:
+ fc = 0;
+ goto exit;
+ }
+ if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false)))
+ return err;
+
+ channel = dtoh32(ci.hw_channel);
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+ err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+ &mgmt_frame, &len, body);
+ if (err < 0)
+ goto exit;
+ isfree = true;
+
+ if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+ }
+
+exit:
+ if (isfree)
+ kfree(mgmt_frame);
+ return err;
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */
+ sinfo.filled = 0;
+ if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+ reason == DOT11_SC_SUCCESS) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ if (!data) {
+ WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+ return -EINVAL;
+ }
+ sinfo.assoc_req_ies = data;
+ sinfo.assoc_req_ies_len = len;
+ cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+ } else if (event == WLC_E_DISASSOC_IND) {
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+ } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+ }
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */
+ return err;
+}
+
+static s32
+wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+
+ if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ wl_notify_connect_status_ap(wl, ndev, e, data);
+ } else {
+ WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+ ntoh32(e->event_type), ntoh32(e->status), ndev));
+ if (wl_is_linkup(wl, e, ndev)) {
+ wl_link_up(wl);
+ act = true;
+ wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ if (wl_is_ibssmode(wl, ndev)) {
+ printk("cfg80211_ibss_joined\n");
+ cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
+ GFP_KERNEL);
+ WL_DBG(("joined in IBSS network\n"));
+ } else {
+ if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ printk("wl_bss_connect_done succeeded\n");
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)
+ wl_read_prof(wl, ndev, WL_PROF_SSID))->SSID));
+ }
+ }
+
+ } else if (wl_is_linkdown(wl, e)) {
+ if (wl->scan_request) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ } else {
+ del_timer_sync(&wl->scan_timeout);
+ wl_iscan_aborted(wl);
+ }
+ }
+ if (wl_get_drv_status(wl, CONNECTED, ndev)) {
+ scb_val_t scbval;
+ u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ printk("link down, call cfg80211_disconnected\n");
+ wl_clr_drv_status(wl, CONNECTED, ndev);
+ if (! wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ /* To make sure disconnect, explictly send dissassoc
+ * for BSSID 00:00:00:00:00:00 issue
+ */
+ scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+ wl_link_down(wl);
+ wl_init_prof(wl, ndev);
+ }
+ }
+ else if (wl_get_drv_status(wl, CONNECTING, ndev)) {
+ printk("link down, during connecting\n");
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ }
+ wl_clr_drv_status(wl, DISCONNECTING, ndev);
+
+ } else if (wl_is_nonetwork(wl, e)) {
+ printk("connect failed event=%d e->status 0x%x\n",
+ event, (int)ntoh32(e->status));
+ /* Clean up any pending scan request */
+ if (wl->scan_request) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ } else {
+ del_timer_sync(&wl->scan_timeout);
+ wl_iscan_aborted(wl);
+ }
+ }
+ if (wl_get_drv_status(wl, CONNECTING, ndev))
+ wl_bss_connect_done(wl, ndev, e, data, false);
+ } else {
+ printk("%s nothing\n", __FUNCTION__);
+ }
+ }
+ return err;
+}
+
+static s32
+wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ bool act;
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 status = be32_to_cpu(e->status);
+ WL_DBG(("Enter \n"));
+ if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) {
+ if (wl_get_drv_status(wl, CONNECTED, ndev))
+ wl_bss_roaming_done(wl, ndev, e, data);
+ else
+ wl_bss_connect_done(wl, ndev, e, data, true);
+ act = true;
+ wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ }
+ return err;
+}
+
+static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev)
+{
+ wl_assoc_info_t assoc_info;
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc info (%d)\n", err));
+ return err;
+ }
+ memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t));
+ assoc_info.req_len = htod32(assoc_info.req_len);
+ assoc_info.resp_len = htod32(assoc_info.resp_len);
+ assoc_info.flags = htod32(assoc_info.flags);
+ if (conn_info->req_ie_len) {
+ conn_info->req_ie_len = 0;
+ bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+ }
+ if (conn_info->resp_ie_len) {
+ conn_info->resp_ie_len = 0;
+ bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+ }
+ if (assoc_info.req_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc req (%d)\n", err));
+ return err;
+ }
+ conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+ if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+ conn_info->req_ie_len -= ETHER_ADDR_LEN;
+ }
+ if (conn_info->req_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->req_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->req_ie_len = 0;
+ }
+ if (assoc_info.resp_len) {
+ err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, wl->extra_buf,
+ WL_ASSOC_INFO_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("could not get assoc resp (%d)\n", err));
+ return err;
+ }
+ conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+ memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len);
+ else {
+ WL_ERR(("%s IE size %d above max %d size \n",
+ __FUNCTION__, conn_info->resp_ie_len, MAX_REQ_LINE));
+ return err;
+ }
+ } else {
+ conn_info->resp_ie_len = 0;
+ }
+ WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+ conn_info->resp_ie_len));
+
+ return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+ size_t *join_params_size)
+{
+ chanspec_t chanspec = 0;
+
+ if (ch != 0) {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
+
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+
+ WL_DBG(("%s join_params->params.chanspec_list[0]= %X\n",
+ __FUNCTION__, join_params->params.chanspec_list[0]));
+
+ }
+}
+
+static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct cfg80211_bss *bss;
+ struct wl_bss_info *bi;
+ struct wlc_ssid *ssid;
+ struct bcm_tlv *tim;
+ s32 beacon_interval;
+ s32 dtim_period;
+ size_t ie_len;
+ u8 *ie;
+ u8 *curbssid;
+ s32 err = 0;
+ struct wiphy *wiphy;
+
+ wiphy = wl_to_wiphy(wl);
+
+ if (wl_is_ibssmode(wl, ndev))
+ return err;
+
+ ssid = (struct wlc_ssid *)wl_read_prof(wl, ndev, WL_PROF_SSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+ ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ mutex_lock(&wl->usr_sync);
+ if (!bss) {
+ WL_DBG(("Could not find the AP\n"));
+ *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+ wl->extra_buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (struct wl_bss_info *)(wl->extra_buf + 4);
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ err = -EIO;
+ goto update_bss_info_out;
+ }
+ err = wl_inform_single_bss(wl, bi);
+ if (unlikely(err))
+ goto update_bss_info_out;
+
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
+ } else {
+ WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+ beacon_interval = bss->beacon_interval;
+ cfg80211_put_bss(bss);
+ }
+
+ tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ if (tim) {
+ dtim_period = tim->data[1];
+ } else {
+ /*
+ * active scan was done so we could not get dtim
+ * information out of probe response.
+ * so we speficially query dtim information.
+ */
+ err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+ &dtim_period, sizeof(dtim_period), false);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+ goto update_bss_info_out;
+ }
+ }
+
+ wl_update_prof(wl, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(wl, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+
+static s32
+wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid;
+
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ cfg80211_roamed(ndev,
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)
+ NULL,
+#endif
+ curbssid,
+ conn_info->req_ie, conn_info->req_ie_len,
+ conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+ WL_DBG(("Report roaming result\n"));
+
+ wl_set_drv_status(wl, CONNECTED, ndev);
+
+ return err;
+}
+
+static s32
+wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, bool completed)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+ s32 err = 0;
+ u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+
+ WL_DBG((" enter\n"));
+ if (wl->scan_request) {
+ wl_notify_escan_complete(wl, ndev, true, true);
+ }
+ if (wl_get_drv_status(wl, CONNECTING, ndev)) {
+ wl_clr_drv_status(wl, CONNECTING, ndev);
+ if (completed) {
+ wl_get_assoc_ies(wl, ndev);
+ wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(wl, ndev);
+ wl_update_pmklist(ndev, wl->pmk_list, err);
+ wl_set_drv_status(wl, CONNECTED, ndev);
+ }
+ cfg80211_connect_result(ndev,
+ curbssid,
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
+ GFP_KERNEL);
+ if (completed)
+ WL_INFO(("Report connect result - connection succeeded\n"));
+ else
+ WL_ERR(("Report connect result - connection failed\n"));
+ }
+ return err;
+}
+
+static s32
+wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ u16 flags = ntoh16(e->flags);
+ enum nl80211_key_type key_type;
+
+ mutex_lock(&wl->usr_sync);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ key_type = NL80211_KEYTYPE_GROUP;
+ else
+ key_type = NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ NULL, GFP_KERNEL);
+ mutex_unlock(&wl->usr_sync);
+
+ return 0;
+}
+
+static s32
+wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(wl, SCANNING, ndev)) {
+ WL_ERR(("scan is not ready \n"));
+ return err;
+ }
+ if (wl->iscan_on && wl->iscan_kickstart)
+ return wl_wakeup_iscan(wl_to_iscan(wl));
+
+ mutex_lock(&wl->usr_sync);
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform), false);
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ wl->bss_list = wl->scan_results;
+ bss_list = wl->bss_list;
+ memset(bss_list, 0, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+ if (unlikely(err)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(wl);
+
+scan_done_out:
+ del_timer_sync(&wl->scan_timeout);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ WL_DBG(("cfg80211_scan_done\n"));
+ cfg80211_scan_done(wl->scan_request, false);
+ wl->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ mutex_unlock(&wl->usr_sync);
+ return err;
+}
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody)
+{
+ struct dot11_management_header *hdr;
+ u32 totlen = 0;
+ s32 err = 0;
+ u8 *offset;
+ u32 prebody_len = *body_len;
+ switch (fc) {
+ case FC_ASSOC_REQ:
+ /* capability , listen interval */
+ totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+ break;
+
+ case FC_REASSOC_REQ:
+ /* capability, listen inteval, ap address */
+ totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+ *body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+ break;
+ }
+ totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+ *pheader = kzalloc(totlen, GFP_KERNEL);
+ if (*pheader == NULL) {
+ WL_ERR(("memory alloc failed \n"));
+ return -ENOMEM;
+ }
+ hdr = (struct dot11_management_header *) (*pheader);
+ hdr->fc = htol16(fc);
+ hdr->durid = 0;
+ hdr->seq = 0;
+ offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+ bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+ bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+ bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+ bcopy((const char*)pbody, offset, prebody_len);
+ *body_len = totlen;
+ return err;
+}
+static s32
+wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct ieee80211_supported_band *band;
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct ether_addr da;
+ struct ether_addr bssid;
+ bool isfree = false;
+ s32 err = 0;
+ s32 freq;
+ struct net_device *dev = NULL;
+ wifi_p2p_pub_act_frame_t *act_frm = NULL;
+ wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+ wl_event_rx_frame_data_t *rxframe =
+ (wl_event_rx_frame_data_t*)data;
+ u32 event = ntoh32(e->event_type);
+ u8 *mgmt_frame;
+ u8 bsscfgidx = e->bsscfgidx;
+ u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+ u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+
+ if (wl->p2p_net == ndev) {
+ dev = wl_to_prmry_ndev(wl);
+ } else {
+ dev = ndev;
+ }
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+ freq = ieee80211_channel_to_frequency(channel);
+ (void)band->band;
+#else
+ freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+ if (event == WLC_E_ACTION_FRAME_RX) {
+ wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr",
+ NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync);
+
+ wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+ &mgmt_frame, &mgmt_frame_len,
+ (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+ if (err < 0) {
+ WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n",
+ __func__, mgmt_frame_len, channel, freq));
+ goto exit;
+ }
+ isfree = true;
+ if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ act_frm = (wifi_p2p_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ p2p_act_frm = (wifi_p2p_action_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ (void) p2p_act_frm;
+ } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+ (&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+ (void) sd_act_frm;
+ }
+ wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN);
+ /*
+ * After complete GO Negotiation, roll back to mpc mode
+ */
+ if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) ||
+ (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) {
+ wldev_iovar_setint(dev, "mpc", 1);
+ }
+ } else {
+ mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+ }
+
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__,
+ mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+
+ if (isfree)
+ kfree(mgmt_frame);
+exit:
+ return 0;
+}
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+ WL_DBG(("Enter \n"));
+ conf->frag_threshold = (u32)-1;
+ conf->rts_threshold = (u32)-1;
+ conf->retry_short = (u32)-1;
+ conf->retry_long = (u32)-1;
+ conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev)
+{
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ memset(profile, 0, sizeof(struct wl_profile));
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct wl_priv *wl)
+{
+ memset(wl->evt_handler, 0, sizeof(wl->evt_handler));
+
+ wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+ wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+ wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+ wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+
+}
+
+static s32 wl_init_priv_mem(struct wl_priv *wl)
+{
+ WL_DBG(("Enter \n"));
+ wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->scan_results)) {
+ WL_ERR(("Scan results alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+ if (unlikely(!wl->conf)) {
+ WL_ERR(("wl_conf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->scan_req_int =
+ (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+ if (unlikely(!wl->scan_req_int)) {
+ WL_ERR(("Scan req alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!wl->ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!wl->escan_ioctl_buf)) {
+ WL_ERR(("Ioctl buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!wl->extra_buf)) {
+ WL_ERR(("Extra buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+ if (unlikely(!wl->iscan)) {
+ WL_ERR(("Iscan buf alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+ if (unlikely(!wl->pmk_list)) {
+ WL_ERR(("pmk list alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL);
+ if (unlikely(!wl->sta_info)) {
+ WL_ERR(("sta info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl->afx_hdl = (void *)kzalloc(sizeof(*wl->afx_hdl), GFP_KERNEL);
+ if (unlikely(!wl->afx_hdl)) {
+ WL_ERR(("afx hdl alloc failed\n"));
+ goto init_priv_mem_out;
+ } else {
+ init_completion(&wl->act_frm_scan);
+ INIT_WORK(&wl->afx_hdl->work, wl_cfg80211_afx_handler);
+ }
+ return 0;
+
+init_priv_mem_out:
+ wl_deinit_priv_mem(wl);
+
+ return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct wl_priv *wl)
+{
+ kfree(wl->scan_results);
+ wl->scan_results = NULL;
+ kfree(wl->conf);
+ wl->conf = NULL;
+ kfree(wl->scan_req_int);
+ wl->scan_req_int = NULL;
+ kfree(wl->ioctl_buf);
+ wl->ioctl_buf = NULL;
+ kfree(wl->escan_ioctl_buf);
+ wl->escan_ioctl_buf = NULL;
+ kfree(wl->extra_buf);
+ wl->extra_buf = NULL;
+ kfree(wl->iscan);
+ wl->iscan = NULL;
+ kfree(wl->pmk_list);
+ wl->pmk_list = NULL;
+ kfree(wl->sta_info);
+ wl->sta_info = NULL;
+ if (wl->afx_hdl) {
+ cancel_work_sync(&wl->afx_hdl->work);
+ kfree(wl->afx_hdl);
+ wl->afx_hdl = NULL;
+ }
+
+ if (wl->ap_info) {
+ kfree(wl->ap_info->wpa_ie);
+ kfree(wl->ap_info->rsn_ie);
+ kfree(wl->ap_info->wps_ie);
+ kfree(wl->ap_info);
+ wl->ap_info = NULL;
+ }
+}
+
+static s32 wl_create_event_handler(struct wl_priv *wl)
+{
+ int ret = 0;
+ WL_DBG(("Enter \n"));
+
+ /* Do not use DHD in cfg driver */
+ wl->event_tsk.thr_pid = -1;
+ PROC_START(wl_event_handler, wl, &wl->event_tsk, 0);
+ if (wl->event_tsk.thr_pid < 0)
+ ret = -ENOMEM;
+ return ret;
+}
+
+static void wl_destroy_event_handler(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0)
+ PROC_STOP(&wl->event_tsk);
+}
+
+static void wl_term_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ WL_TRACE(("In\n"));
+ if (wl->iscan_on && iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ WL_INFO(("SIGTERM\n"));
+ send_sig(SIGTERM, iscan->tsk, 1);
+ WL_DBG(("kthread_stop\n"));
+ kthread_stop(iscan->tsk);
+ iscan->tsk = NULL;
+ }
+}
+
+static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
+{
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(wl, SCANNING, ndev)) {
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ WL_ERR(("Scan complete while device not scanning\n"));
+ return;
+ }
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ wl_clr_drv_status(wl, SCANNING, ndev);
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ wl->iscan_kickstart = false;
+}
+
+static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
+{
+ if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
+ WL_DBG(("wake up iscan\n"));
+ up(&iscan->sync);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static s32
+wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
+ struct wl_scan_results **bss_list)
+{
+ struct wl_iscan_results list;
+ struct wl_scan_results *results;
+ struct wl_iscan_results *list_buf;
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
+ list_buf = (struct wl_iscan_results *)iscan->scan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WL_ISCAN_BUF_MAX);
+ err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
+ WL_ISCAN_BUF_MAX, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_DBG(("results->count = %d\n", results->count));
+ WL_DBG(("results->buflen = %d\n", results->buflen));
+ *status = dtoh32(list_buf->status);
+ *bss_list = results;
+
+ return err;
+}
+
+static s32 wl_iscan_done(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_notify_iscan_complete(iscan, false);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_pending(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_inprogress(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ mutex_lock(&wl->usr_sync);
+ wl_inform_bss(wl);
+ wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+ mutex_unlock(&wl->usr_sync);
+ /* Reschedule the timer */
+ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
+ iscan->timer_on = 1;
+
+ return err;
+}
+
+static s32 wl_iscan_aborted(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl->iscan;
+ s32 err = 0;
+
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ mutex_lock(&wl->usr_sync);
+ wl_notify_iscan_complete(iscan, true);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static s32 wl_iscan_thread(void *data)
+{
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+ struct wl_priv *wl = iscan_to_wl(iscan);
+ u32 status;
+ int err = 0;
+
+ allow_signal(SIGTERM);
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (likely(!down_interruptible(&iscan->sync))) {
+ if (kthread_should_stop())
+ break;
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ mutex_lock(&wl->usr_sync);
+ err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
+ if (unlikely(err)) {
+ status = WL_SCAN_RESULTS_ABORTED;
+ WL_ERR(("Abort iscan\n"));
+ }
+ mutex_unlock(&wl->usr_sync);
+ iscan->iscan_handler[status] (wl);
+ }
+ if (iscan->timer_on) {
+ del_timer_sync(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+ WL_DBG(("%s was terminated\n", __func__));
+
+ return 0;
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ if (wl->scan_request) {
+ WL_ERR(("timer expired\n"));
+ if (wl->escan_on)
+ wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true);
+ else
+ wl_notify_iscan_complete(wl_to_iscan(wl), true);
+ }
+}
+static void wl_iscan_timer(unsigned long data)
+{
+ struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+
+ if (iscan) {
+ iscan->timer_on = 0;
+ WL_DBG(("timer expired\n"));
+ wl_wakeup_iscan(iscan);
+ }
+}
+
+static s32 wl_invoke_iscan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on && !iscan->tsk) {
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return err;
+}
+
+static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan)
+{
+ memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler));
+ iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
+ iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
+ iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
+ iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
+}
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+ unsigned long state,
+ void *ndev)
+{
+ struct net_device *dev = ndev;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+ WL_DBG(("Enter \n"));
+ if (!wdev || !wl || dev == wl_to_prmry_ndev(wl))
+ return NOTIFY_DONE;
+ switch (state) {
+ case NETDEV_UNREGISTER:
+ /* after calling list_del_rcu(&wdev->list) */
+ wl_dealloc_netinfo(wl, ndev);
+ break;
+ case NETDEV_GOING_DOWN:
+ /* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+ * In front of door, the function checks
+ * whether current scan is working or not.
+ * If the scanning is still working, wdev_cleanup_work call WARN_ON and
+ * make the scan done forcibly.
+ */
+ if (wl_get_drv_status(wl, SCANNING, dev)) {
+ if (wl->escan_on) {
+ wl_notify_escan_complete(wl, dev, true, true);
+ }
+ }
+ break;
+ }
+ return NOTIFY_DONE;
+}
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+ .notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+
+static s32 wl_notify_escan_complete(struct wl_priv *wl,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ wl_scan_params_t *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+
+ WL_DBG(("Enter \n"));
+
+ if (wl->scan_request) {
+ if (wl->scan_request->dev == wl->p2p_net)
+ dev = wl_to_prmry_ndev(wl);
+ else
+ dev = wl->scan_request->dev;
+ }
+ else {
+ WL_ERR(("wl->scan_request is NULL may be internal scan."
+ "doing scan_abort for ndev %p primary %p p2p_net %p",
+ ndev, wl_to_prmry_ndev(wl), wl->p2p_net));
+ dev = ndev;
+ }
+ if (fw_abort) {
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+ if (params == NULL) {
+ WL_ERR(("scan params allocation failed \n"));
+ err = -ENOMEM;
+ } else {
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+ if (err < 0) {
+ WL_ERR(("scan abort failed \n"));
+ }
+ }
+ }
+ del_timer_sync(&wl->scan_timeout);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (likely(wl->scan_request)) {
+ cfg80211_scan_done(wl->scan_request, aborted);
+ wl->scan_request = NULL;
+ }
+ if (p2p_is_on(wl))
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, dev);
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ if (params)
+ kfree(params);
+
+ return err;
+}
+
+static s32 wl_escan_handler(struct wl_priv *wl,
+ struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_bss_info_t *bi;
+ wl_escan_result_t *escan_result;
+ wl_bss_info_t *bss = NULL;
+ wl_scan_results_t *list;
+ u32 bi_length;
+ u32 i;
+ u8 *p2p_dev_addr = NULL;
+
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(wl, SCANNING)) {
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM))
+ ndev = wl->afx_hdl->dev;
+ else
+ ndev = wl->escan_info.ndev;
+
+ }
+ if (!ndev || !wl->escan_on ||
+ !wl_get_drv_status(wl, SCANNING, ndev)) {
+ WL_ERR(("escan is not ready ndev %p wl->escan_on %d drv_status 0x%x\n",
+ ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev)));
+ return err;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+ escan_result = (wl_escan_result_t *) data;
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+
+ if (!(wl_to_wiphy(wl)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_ERR(("Ignoring IBSS result\n"));
+ goto exit;
+ }
+ }
+
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ wl->afx_hdl->pending_tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = CHSPEC_CHANNEL(
+ wl_chspec_driver_to_host(bi->chanspec));
+ WL_DBG(("ACTION FRAME SCAN : Peer found, channel : %d\n", channel));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl->afx_hdl->peer_chan = channel;
+ complete(&wl->act_frm_scan);
+ goto exit;
+ }
+
+ } else {
+ list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+ }
+#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+ if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ bss->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+ (bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ }
+
+ goto exit;
+ }
+ }
+ memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ }
+
+ }
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ WL_INFO(("ESCAN COMPLETED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, false, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else if (status == WLC_E_STATUS_ABORT) {
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ wl_clr_p2p_status(wl, SCANNING);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ WL_INFO(("ESCAN ABORTED\n"));
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, true, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+ else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+ WL_INFO(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(wl, SCANNING);
+ wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ if (wl->afx_hdl->peer_chan == WL_INVALID)
+ complete(&wl->act_frm_scan);
+ } else if (likely(wl->scan_request)) {
+ mutex_lock(&wl->usr_sync);
+ wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf;
+ wl_inform_bss(wl);
+ wl_notify_escan_complete(wl, ndev, true, false);
+ mutex_unlock(&wl->usr_sync);
+ }
+ }
+exit:
+ return err;
+}
+
+static s32 wl_init_scan(struct wl_priv *wl)
+{
+ struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ int err = 0;
+
+ if (wl->iscan_on) {
+ iscan->dev = wl_to_prmry_ndev(wl);
+ iscan->state = WL_ISCAN_STATE_IDLE;
+ wl_init_iscan_handler(iscan);
+ iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (unsigned long) iscan;
+ iscan->timer.function = wl_iscan_timer;
+ sema_init(&iscan->sync, 0);
+ iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ if (IS_ERR(iscan->tsk)) {
+ WL_ERR(("Could not create iscan thread\n"));
+ iscan->tsk = NULL;
+ return -ENOMEM;
+ }
+ iscan->data = wl;
+ } else if (wl->escan_on) {
+ wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+ /* Init scan_timeout timer */
+ init_timer(&wl->scan_timeout);
+ wl->scan_timeout.data = (unsigned long) wl;
+ wl->scan_timeout.function = wl_scan_timeout;
+
+ return err;
+}
+
+static s32 wl_init_priv(struct wl_priv *wl)
+{
+ struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ s32 err = 0;
+
+ wl->scan_request = NULL;
+ wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+ wl->iscan_on = false;
+ wl->escan_on = true;
+ wl->roam_on = false;
+ wl->iscan_kickstart = false;
+ wl->active_scan = true;
+ wl->rf_blocked = false;
+ spin_lock_init(&wl->cfgdrv_lock);
+ mutex_init(&wl->ioctl_buf_sync);
+ init_waitqueue_head(&wl->netif_change_event);
+ wl_init_eq(wl);
+ err = wl_init_priv_mem(wl);
+ if (err)
+ return err;
+ if (wl_create_event_handler(wl))
+ return -ENOMEM;
+ wl_init_event_handler(wl);
+ mutex_init(&wl->usr_sync);
+ err = wl_init_scan(wl);
+ if (err)
+ return err;
+ wl_init_conf(wl->conf);
+ wl_init_prof(wl, ndev);
+ wl_link_down(wl);
+ DNGL_FUNC(dhd_cfg80211_init, (wl));
+
+ return err;
+}
+
+static void wl_deinit_priv(struct wl_priv *wl)
+{
+ DNGL_FUNC(dhd_cfg80211_deinit, (wl));
+ wl_destroy_event_handler(wl);
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ del_timer_sync(&wl->scan_timeout);
+ wl_term_iscan(wl);
+ wl_deinit_priv_mem(wl);
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+}
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+ WL_TRACE(("Enter \n"));
+
+ if (wl_cfgp2p_register_ndev(wl) < 0) {
+ WL_ERR(("%s: P2P attach failed. \n", __func__));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static s32 wl_cfg80211_detach_p2p(void)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct wireless_dev *wdev = wl->p2p_wdev;
+
+ WL_DBG(("Enter \n"));
+ if (!wdev || !wl) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+
+ wl_cfgp2p_unregister_ndev(wl);
+
+ wl->p2p_wdev = NULL;
+ wl->p2p_net = NULL;
+ WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+ kfree(wdev);
+
+ return 0;
+}
+#endif /* defined(WLP2P) && defined(WL_ENABLE_P2P_IF) */
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct wl_priv * wl = NULL;
+ s32 err = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ wl = wlcfg_drv_priv;
+ if (wl && !wl_get_drv_status(wl, READY, ndev)) {
+ if (wl->wdev &&
+ wl_cfgp2p_supported(wl, ndev)) {
+#if !defined(WL_ENABLE_P2P_IF)
+ wl->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+#endif
+ if ((err = wl_cfgp2p_init_priv(wl)) != 0)
+ goto fail;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ if (wl->p2p_net) {
+ /* Update MAC addr for p2p0 interface here. */
+ memcpy(wl->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ wl->p2p_net->dev_addr[0] |= 0x02;
+ printk("%s: p2p_dev_addr="MACSTR "\n",
+ wl->p2p_net->name, MAC2STR(wl->p2p_net->dev_addr));
+ } else {
+ WL_ERR(("p2p_net not yet populated."
+ " Couldn't update the MAC Address for p2p0 \n"));
+ return -ENODEV;
+ }
+#endif /* defined(WLP2P) && (WL_ENABLE_P2P_IF) */
+
+ wl->p2p_supported = true;
+ }
+ } else
+ return -ENODEV;
+ wl_set_drv_status(wl, READY, ndev);
+fail:
+ return err;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+{
+ struct wireless_dev *wdev;
+ struct wl_priv *wl;
+ s32 err = 0;
+ struct device *dev;
+
+ WL_TRACE(("In\n"));
+ if (!ndev) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+ dev = wl_cfg80211_get_parent_dev();
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+ err = wl_setup_wiphy(wdev, dev);
+ if (unlikely(err)) {
+ kfree(wdev);
+ return -ENOMEM;
+ }
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+ wl = (struct wl_priv *)wiphy_priv(wdev->wiphy);
+ wl->wdev = wdev;
+ wl->pub = data;
+ INIT_LIST_HEAD(&wl->net_list);
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ err = wl_alloc_netinfo(wl, ndev, wdev, WL_MODE_BSS);
+ if (err) {
+ WL_ERR(("Failed to alloc net_info (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = wl_init_priv(wl);
+ if (err) {
+ WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ err = wl_setup_rfkill(wl, TRUE);
+ if (err) {
+ WL_ERR(("Failed to setup rfkill %d\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ if (err) {
+ WL_ERR(("Failed to register notifierl %d\n", err));
+ goto cfg80211_attach_out;
+ }
+#if defined(COEX_DHCP)
+ if (wl_cfg80211_btcoex_init(wl))
+ goto cfg80211_attach_out;
+#endif
+
+ wlcfg_drv_priv = wl;
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ err = wl_cfg80211_attach_p2p();
+ if (err)
+ goto cfg80211_attach_out;
+#endif
+
+ return err;
+
+cfg80211_attach_out:
+ err = wl_setup_rfkill(wl, FALSE);
+ wl_free_wdev(wl);
+ return err;
+}
+
+void wl_cfg80211_detach(void *para)
+{
+ struct wl_priv *wl;
+
+ (void)para;
+ wl = wlcfg_drv_priv;
+
+ WL_TRACE(("In\n"));
+
+#if defined(COEX_DHCP)
+ wl_cfg80211_btcoex_deinit(wl);
+#endif
+
+#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF)
+ wl_cfg80211_detach_p2p();
+#endif
+ wl_setup_rfkill(wl, FALSE);
+ if (wl->p2p_supported)
+ wl_cfgp2p_deinit_priv(wl);
+ wl_deinit_priv(wl);
+ wlcfg_drv_priv = NULL;
+ wl_cfg80211_clear_parent_dev();
+ wl_free_wdev(wl);
+ /* PLEASE do NOT call any function after wl_free_wdev, the driver's private structure "wl",
+ * which is the private part of wiphy, has been freed in wl_free_wdev !!!!!!!!!!!
+ */
+}
+
+static void wl_wakeup_event(struct wl_priv *wl)
+{
+ if (wl->event_tsk.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(wl->pub);
+ up(&wl->event_tsk.sema);
+ }
+}
+
+static int wl_is_p2p_event(struct wl_event_q *e)
+{
+ switch (e->etype) {
+ /* We have to seperate out the P2P events received
+ * on primary interface so that it can be send up
+ * via p2p0 interface.
+ */
+ case WLC_E_P2P_PROBREQ_MSG:
+ case WLC_E_P2P_DISC_LISTEN_COMPLETE:
+ case WLC_E_ACTION_FRAME_RX:
+ case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
+ case WLC_E_ACTION_FRAME_COMPLETE:
+
+ if (e->emsg.ifidx != 0) {
+ WL_TRACE(("P2P Event on Virtual I/F (ifidx:%d) \n",
+ e->emsg.ifidx));
+ /* We are only bothered about the P2P events received
+ * on primary interface. For rest of them return false
+ * so that it is sent over the interface corresponding
+ * to the ifidx.
+ */
+ return FALSE;
+ } else {
+ WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)."
+ " Sent it to p2p0 \n", e->emsg.ifidx));
+ return TRUE;
+ }
+ break;
+
+ default:
+ WL_TRACE(("NON-P2P Event %d on ifidx (ifidx:%d) \n",
+ e->etype, e->emsg.ifidx));
+ return FALSE;
+ }
+}
+
+static s32 wl_event_handler(void *data)
+{
+ struct net_device *netdev;
+ struct wl_priv *wl = NULL;
+ struct wl_event_q *e;
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+ wl = (struct wl_priv *)tsk->parent;
+ DAEMONIZE("dhd_cfg80211_event");
+ complete(&tsk->completed);
+
+ while (down_interruptible (&tsk->sema) == 0) {
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated)
+ break;
+ while ((e = wl_deq_event(wl))) {
+ WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+ /* All P2P device address related events comes on primary interface since
+ * there is no corresponding bsscfg for P2P interface. Map it to p2p0
+ * interface.
+ */
+ if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_net)) {
+ netdev = wl->p2p_net;
+ } else {
+ netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx);
+ }
+ if (!netdev)
+ netdev = wl_to_prmry_ndev(wl);
+ if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) {
+ wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata);
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ wl_put_event(e);
+ }
+ DHD_OS_WAKE_UNLOCK(wl->pub);
+ }
+ WL_ERR(("%s was terminated\n", __func__));
+ complete_and_exit(&tsk->completed, 0);
+ return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+ u32 event_type = ntoh32(e->event_type);
+ struct wl_priv *wl = wlcfg_drv_priv;
+
+#if (WL_DBG_LEVEL > 0)
+ s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+ wl_dbg_estr[event_type] : (s8 *) "Unknown";
+ WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+ }
+ else if (event_type == WLC_E_PFN_NET_LOST) {
+ WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+ }
+
+ if (likely(!wl_enq_event(wl, ndev, event_type, e, data)))
+ wl_wakeup_event(wl);
+}
+
+static void wl_init_eq(struct wl_priv *wl)
+{
+ wl_init_eq_lock(wl);
+ INIT_LIST_HEAD(&wl->eq_list);
+}
+
+static void wl_flush_eq(struct wl_priv *wl)
+{
+ struct wl_event_q *e;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ while (!list_empty(&wl->eq_list)) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_unlock_eq(wl, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+{
+ struct wl_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_lock_eq(wl);
+ if (likely(!list_empty(&wl->eq_list))) {
+ e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_unlock_eq(wl, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg,
+ void *data)
+{
+ struct wl_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ WL_ERR(("event alloc failed\n"));
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_lock_eq(wl);
+ list_add_tail(&e->eq_list, &wl->eq_list);
+ wl_unlock_eq(wl, flags);
+
+ return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+ kfree(e);
+}
+
+static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype)
+{
+ s32 infra = 0;
+ s32 err = 0;
+ s32 mode = 0;
+ switch (iftype) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR(("type (%d) : currently we do not support this mode\n",
+ iftype));
+ err = -EINVAL;
+ return err;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR(("invalid type (%d)\n", iftype));
+ return err;
+ }
+ infra = htod32(infra);
+ err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ return err;
+ }
+
+ wl_set_mode_by_netdev(wl, ndev, mode);
+
+ return 0;
+}
+
+static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+ s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ s8 eventmask[WL_EVENTING_MASK_LEN];
+ s32 err = 0;
+
+ /* Setup event_msgs */
+ bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+ if (unlikely(err)) {
+ WL_ERR(("Get event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+ memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+ if (add) {
+ setbit(eventmask, event);
+ } else {
+ clrbit(eventmask, event);
+ }
+ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf));
+ err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(err)) {
+ WL_ERR(("Set event_msgs error (%d)\n", err));
+ goto eventmsg_out;
+ }
+
+eventmsg_out:
+ return err;
+
+}
+
+s32 wl_update_wiphybands(struct wl_priv *wl)
+{
+ struct wiphy *wiphy;
+ u32 bandlist[3];
+ u32 nband = 0;
+ u32 i = 0;
+ s32 err = 0;
+ int nmode = 0;
+ int bw_40 = 0;
+ int index = 0;
+
+ WL_DBG(("Entry"));
+ memset(bandlist, 0, sizeof(bandlist));
+ err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_BANDLIST, bandlist,
+ sizeof(bandlist), false);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ wiphy = wl_to_wiphy(wl);
+ nband = bandlist[0];
+ wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+
+ err = wldev_iovar_getint(wl_to_prmry_ndev(wl), "nmode", &nmode);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(wl_to_prmry_ndev(wl), "mimo_bw_cap", &bw_40);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ }
+
+ for (i = 1; i <= nband && i < sizeof(bandlist); i++) {
+ index = -1;
+ if (bandlist[i] == WLC_BAND_5G) {
+ wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &__wl_band_5ghz_a;
+ index = IEEE80211_BAND_5GHZ;
+ } else if (bandlist[i] == WLC_BAND_2G) {
+ wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &__wl_band_2ghz;
+ index = IEEE80211_BAND_2GHZ;
+ }
+
+ if ((index >= 0) && nmode) {
+ wiphy->bands[index]->ht_cap.cap =
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40
+ | IEEE80211_HT_CAP_MAX_AMSDU;
+ wiphy->bands[index]->ht_cap.ht_supported = TRUE;
+ wiphy->bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ wiphy->bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ }
+
+ if ((index >= 0) && bw_40) {
+ wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ }
+ }
+
+ wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+ return err;
+}
+
+static s32 __wl_cfg80211_up(struct wl_priv *wl)
+{
+ s32 err = 0;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+ WL_DBG(("In\n"));
+
+ err = dhd_config_dongle(wl, false);
+ if (unlikely(err))
+ return err;
+
+ err = wl_config_ifmode(wl, ndev, wdev->iftype);
+ if (unlikely(err && err != -EINPROGRESS)) {
+ WL_ERR(("wl_config_ifmode failed\n"));
+ }
+ err = wl_update_wiphybands(wl);
+ if (unlikely(err)) {
+ WL_ERR(("wl_update_wiphybands failed\n"));
+ }
+
+ err = dhd_monitor_init(wl->pub);
+ err = wl_invoke_iscan(wl);
+ wl_set_drv_status(wl, READY, ndev);
+ return err;
+}
+
+static s32 __wl_cfg80211_down(struct wl_priv *wl)
+{
+ s32 err = 0;
+ unsigned long flags;
+ struct net_info *iter, *next;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ WL_DBG(("In\n"));
+ /* Check if cfg80211 interface is already down */
+ if (!wl_get_drv_status(wl, READY, ndev))
+ return err; /* it is even not ready */
+ for_each_ndev(wl, iter, next)
+ wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
+
+ wl_term_iscan(wl);
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ if (wl->scan_request) {
+ cfg80211_scan_done(wl->scan_request, true);
+ wl->scan_request = NULL;
+ }
+ for_each_ndev(wl, iter, next) {
+ wl_clr_drv_status(wl, READY, iter->ndev);
+ wl_clr_drv_status(wl, SCANNING, iter->ndev);
+ wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ wl_clr_drv_status(wl, CONNECTING, iter->ndev);
+ wl_clr_drv_status(wl, CONNECTED, iter->ndev);
+ wl_clr_drv_status(wl, DISCONNECTING, iter->ndev);
+ wl_clr_drv_status(wl, AP_CREATED, iter->ndev);
+ wl_clr_drv_status(wl, AP_CREATING, iter->ndev);
+ }
+ wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype =
+ NL80211_IFTYPE_STATION;
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+
+ DNGL_FUNC(dhd_cfg80211_down, (wl));
+ wl_flush_eq(wl);
+ wl_link_down(wl);
+ if (wl->p2p_supported)
+ wl_cfgp2p_down(wl);
+ dhd_monitor_uninit();
+
+ return err;
+}
+
+s32 wl_cfg80211_up(void *para)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+ int val = 1;
+
+ (void)para;
+ WL_DBG(("In\n"));
+ wl = wlcfg_drv_priv;
+
+ if ((err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_VERSION, &val,
+ sizeof(int), false) < 0)) {
+ WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+ return err;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
+ return BCME_VERSION;
+ }
+ ioctl_version = val;
+ WL_ERR(("WLC_GET_VERSION=%d\n", ioctl_version));
+
+ mutex_lock(&wl->usr_sync);
+ wl_cfg80211_attach_post(wl_to_prmry_ndev(wl));
+ err = __wl_cfg80211_up(wl);
+ if (err)
+ WL_ERR(("__wl_cfg80211_up failed\n"));
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ WL_ERR(("In : chip crash eventing\n"));
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+ if (wl != NULL) {
+ wl_link_down(wl);
+ }
+ return 0;
+}
+
+s32 wl_cfg80211_down(void *para)
+{
+ struct wl_priv *wl;
+ s32 err = 0;
+
+ (void)para;
+ WL_DBG(("In\n"));
+ wl = wlcfg_drv_priv;
+ mutex_lock(&wl->usr_sync);
+ err = __wl_cfg80211_down(wl);
+ mutex_unlock(&wl->usr_sync);
+
+ return err;
+}
+
+static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item)
+{
+ unsigned long flags;
+ void *rptr = NULL;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ if (!profile)
+ return NULL;
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SEC:
+ rptr = &profile->sec;
+ break;
+ case WL_PROF_ACT:
+ rptr = &profile->active;
+ break;
+ case WL_PROF_BSSID:
+ rptr = profile->bssid;
+ break;
+ case WL_PROF_SSID:
+ rptr = &profile->ssid;
+ break;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ if (!rptr)
+ WL_ERR(("invalid item (%d)\n", item));
+ return rptr;
+}
+
+static s32
+wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data, s32 item)
+{
+ s32 err = 0;
+ struct wlc_ssid *ssid;
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+
+ if (!profile)
+ return WL_INVALID;
+ spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (wlc_ssid_t *) data;
+ memset(profile->ssid.SSID, 0,
+ sizeof(profile->ssid.SSID));
+ memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+ profile->ssid.SSID_len = ssid->SSID_len;
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+ else
+ memset(profile->bssid, 0, ETHER_ADDR_LEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&profile->sec, data, sizeof(profile->sec));
+ break;
+ case WL_PROF_ACT:
+ profile->active = *(bool *)data;
+ break;
+ case WL_PROF_BEACONINT:
+ profile->beacon_interval = *(u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ profile->dtim_period = *(u8 *)data;
+ break;
+ default:
+ WL_ERR(("unsupported item (%d)\n", item));
+ err = -EOPNOTSUPP;
+ break;
+ }
+ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+ /*
+ * prohibit to change debug level
+ * by insmod parameter.
+ * eventually debug level will be configured
+ * in compile time by using CONFIG_XXX
+ */
+ /* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev)
+{
+ return wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct wl_priv *wl)
+{
+ return wl->ibss_starter;
+}
+
+static void wl_rst_ie(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
+
+ return err;
+}
+
+static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei_stream crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+ ie->offset += ie_size;
+
+ return err;
+}
+
+static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+ s32 err = 0;
+
+ if (unlikely(ie->offset > dst_size)) {
+ WL_ERR(("dst_size is not enough\n"));
+ return -ENOSPC;
+ }
+ memcpy(dst, &ie->buf[0], ie->offset);
+
+ return err;
+}
+
+static u32 wl_get_ielen(struct wl_priv *wl)
+{
+ struct wl_ie *ie = wl_to_ie(wl);
+
+ return ie->offset;
+}
+
+static void wl_link_up(struct wl_priv *wl)
+{
+ wl->link_up = true;
+}
+
+static void wl_link_down(struct wl_priv *wl)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(wl);
+
+ WL_DBG(("In\n"));
+ wl->link_up = false;
+ conn_info->req_ie_len = 0;
+ conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct wl_priv *wl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->eq_lock, flags);
+ return flags;
+}
+
+static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&wl->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct wl_priv *wl)
+{
+ spin_lock_init(&wl->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+ if (ms < 1000 / HZ) {
+ cond_resched();
+ mdelay(ms);
+ } else {
+ msleep(ms);
+ }
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+ struct wl_priv *wl = wlcfg_drv_priv;
+ struct ether_addr p2pif_addr;
+ struct ether_addr primary_mac;
+ if (!wl->p2p)
+ return -1;
+ if (!p2p_is_on(wl)) {
+ get_primary_mac(wl, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+ } else {
+ memcpy(p2pdev_addr->octet,
+ wl->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+ }
+
+
+ return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_get_p2p_noa(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+ struct wl_priv *wl;
+ wl = wlcfg_drv_priv;
+
+ return wl_cfgp2p_set_p2p_ps(wl, net, buf, len);
+}
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type)
+{
+ struct wl_priv *wl;
+ struct net_device *ndev = NULL;
+ s32 ret = 0;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ wl = wlcfg_drv_priv;
+ if (wl->p2p && wl->p2p->vif_created) {
+ ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION);
+ } else if (wl_get_drv_status(wl, AP_CREATING, net) ||
+ wl_get_drv_status(wl, AP_CREATED, net)) {
+ ndev = net;
+ bssidx = 0;
+ }
+ if (ndev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
+ }
+ if (pktflag)
+ ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len);
+ }
+
+ return ret;
+}
+
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+ struct wl_priv *wl = (struct wl_priv *)data;
+
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!wl)
+ return -EINVAL;
+
+ wl->rf_blocked = blocked;
+
+ return 0;
+}
+
+static int wl_setup_rfkill(struct wl_priv *wl, bool setup)
+{
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!wl)
+ return -EINVAL;
+ if (setup) {
+ wl->rfkill = rfkill_alloc("brcmfmac-wifi",
+ wl_cfg80211_get_parent_dev(),
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl);
+
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rfkill_register(wl->rfkill);
+
+ if (err)
+ rfkill_destroy(wl->rfkill);
+ } else {
+ if (!wl->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rfkill_unregister(wl->rfkill);
+ rfkill_destroy(wl->rfkill);
+ }
+
+err_out:
+ return err;
+}
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+ return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+ cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+ cfg80211_parent_dev = NULL;
+}
+
+static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac)
+{
+ wldev_iovar_getbuf_bsscfg(wl_to_prmry_ndev(wl), "cur_etheraddr", NULL,
+ 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
+ memcpy(mac->octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+ struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
+
+ if (!wl || !wl->wdev)
+ return -EINVAL;
+
+ if (dhd_do_driver_init(wl->wdev->netdev) < 0)
+ return -1;
+
+ return 0;
+}
+
+void wl_cfg80211_enable_trace(int level)
+{
+ wl_dbg_level |= WL_DBG_DBG;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
new file mode 100644
index 00000000000000..edf8a300daad7b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h
@@ -0,0 +1,658 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h 316895 2012-02-24 00:05:41Z $
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct wl_priv;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WL_DBG_NONE 0
+#define WL_DBG_TRACE (1 << 4)
+#define WL_DBG_SCAN (1 << 3)
+#define WL_DBG_DBG (1 << 2)
+#define WL_DBG_INFO (1 << 1)
+#define WL_DBG_ERR (1 << 0)
+
+/* 0 invalidates all debug messages. default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define WL_ERR(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_INFO
+#undef WL_INFO
+#endif
+#define WL_INFO(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFG80211-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define WL_SCAN(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_SCAN) { \
+ printk(KERN_ERR "CFG80211-SCAN) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define WL_TRACE(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_TRACE) { \
+ printk(KERN_ERR "CFG80211-TRACE) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFG80211-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+} while (0)
+#else /* !(WL_DBG_LEVEL > 0) */
+#define WL_DBG(args)
+#endif /* (WL_DBG_LEVEL > 0) */
+
+
+#define WL_SCAN_RETRY_MAX 3
+#define WL_NUM_PMKIDS_MAX MAXPMKID
+#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_TLV_INFO_MAX 1024
+#define WL_SCAN_IE_LEN_MAX 2048
+#define WL_BSS_INFO_MAX 2048
+#define WL_ASSOC_INFO_MAX 512
+#define WL_IOCTL_LEN_MAX 1024
+#define WL_EXTRA_BUF_MAX 2048
+#define WL_ISCAN_BUF_MAX 2048
+#define WL_ISCAN_TIMER_INTERVAL_MS 3000
+#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX 256
+#define WL_FILE_NAME_MAX 256
+#define WL_DWELL_TIME 200
+#define WL_MED_DWELL_TIME 400
+#define WL_LONG_DWELL_TIME 1000
+#define IFACE_MAX_CNT 2
+
+#define WL_SCAN_TIMER_INTERVAL_MS 8000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 5
+#define WL_INVALID -1
+
+/* driver status */
+enum wl_status {
+ WL_STATUS_READY = 0,
+ WL_STATUS_SCANNING,
+ WL_STATUS_SCAN_ABORTING,
+ WL_STATUS_CONNECTING,
+ WL_STATUS_CONNECTED,
+ WL_STATUS_DISCONNECTING,
+ WL_STATUS_AP_CREATING,
+ WL_STATUS_AP_CREATED,
+ WL_STATUS_SENDING_ACT_FRM
+};
+
+/* wi-fi mode */
+enum wl_mode {
+ WL_MODE_BSS,
+ WL_MODE_IBSS,
+ WL_MODE_AP
+};
+
+/* driver profile list */
+enum wl_prof_list {
+ WL_PROF_MODE,
+ WL_PROF_SSID,
+ WL_PROF_SEC,
+ WL_PROF_IBSS,
+ WL_PROF_BAND,
+ WL_PROF_BSSID,
+ WL_PROF_ACT,
+ WL_PROF_BEACONINT,
+ WL_PROF_DTIMPERIOD
+};
+
+/* driver iscan state */
+enum wl_iscan_state {
+ WL_ISCAN_STATE_IDLE,
+ WL_ISCAN_STATE_SCANING
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+ WL_FW_LOADING_DONE,
+ WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+ WL_BEACON = 0x1,
+ WL_PROBE_RESP = 0x2,
+ WL_ASSOC_RESP = 0x4
+};
+/* beacon / probe_response */
+struct beacon_proberesp {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+ u32 frag_threshold;
+ u32 rts_threshold;
+ u32 retry_short;
+ u32 retry_long;
+ s32 tx_power;
+ struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct wl_priv *wl,
+ struct net_device *ndev, const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+ u16 band;
+ u16 channel;
+ s16 rssi;
+ u16 frame_len;
+ u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+ struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+ u16 offset;
+ u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+ u32 wpa_versions;
+ u32 auth_type;
+ u32 cipher_pairwise;
+ u32 cipher_group;
+ u32 wpa_auth;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+ u8 beacon_interval; /* in millisecond */
+ u8 atim; /* in millisecond */
+ s8 join_only;
+ u8 band;
+ u8 channel;
+};
+
+/* wl driver profile */
+struct wl_profile {
+ u32 mode;
+ s32 band;
+ struct wlc_ssid ssid;
+ struct wl_security sec;
+ struct wl_ibss ibss;
+ u8 bssid[ETHER_ADDR_LEN];
+ u16 beacon_interval;
+ u8 dtim_period;
+ bool active;
+};
+
+struct net_info {
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wl_profile profile;
+ s32 mode;
+ unsigned long sme_state;
+ struct list_head list; /* list of all net_info structure */
+};
+typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl);
+
+/* iscan controller */
+struct wl_iscan_ctrl {
+ struct net_device *dev;
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ s32 state;
+ struct task_struct *tsk;
+ struct semaphore sync;
+ ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST];
+ void *data;
+ s8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s8 scan_buf[WL_ISCAN_BUF_MAX];
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+ u8 req_ie[MAX_REQ_LINE];
+ s32 req_ie_len;
+ u8 resp_ie[MAX_REQ_LINE];
+ s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+ const struct firmware *fw_entry;
+ unsigned long status;
+ u32 ptr;
+ s8 fw_name[WL_FILE_NAME_MAX];
+ s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+ u32 req_len;
+ u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+ u32 escan_state;
+ u8 escan_buf[ESCAN_BUF_SIZE];
+ struct wiphy *wiphy;
+ struct net_device *ndev;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ u8 *wps_ie;
+ bool security_mode;
+};
+struct btcoex_info {
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ u32 ts_dhcp_start; /* ms ts ecord time stats */
+ u32 ts_dhcp_ok; /* ms ts ecord time stats */
+ bool dhcp_done; /* flag, indicates that host done with
+ * dhcp before t1/t2 expiration
+ */
+ s32 bt_state;
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+struct sta_info {
+ /* Structure to hold WPS IE for a STA */
+ u8 probe_req_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
+ u32 assoc_req_ie_len;
+};
+
+struct afx_hdl {
+ wl_af_params_t *pending_tx_act_frm;
+ struct ether_addr pending_tx_dst_addr;
+ struct net_device *dev;
+ struct work_struct work;
+ u32 bssidx;
+ u32 retry;
+ s32 peer_chan;
+ bool ack_recv;
+};
+
+/* private data of cfg80211 interface */
+struct wl_priv {
+ struct wireless_dev *wdev; /* representing wl cfg80211 device */
+
+ struct wireless_dev *p2p_wdev; /* representing wl cfg80211 device for P2P */
+ struct net_device *p2p_net; /* reference to p2p0 interface */
+
+ struct wl_conf *conf;
+ struct cfg80211_scan_request *scan_request; /* scan request object */
+ EVENT_HANDLER evt_handler[WLC_E_LAST];
+ struct list_head eq_list; /* used for event queue */
+ struct list_head net_list; /* used for struct net_info */
+ spinlock_t eq_lock; /* for event queue synchronization */
+ spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */
+ struct completion act_frm_scan;
+ struct mutex usr_sync; /* maily for up/down synchronization */
+ struct wl_scan_results *bss_list;
+ struct wl_scan_results *scan_results;
+
+ /* scan request object for internal purpose */
+ struct wl_scan_req *scan_req_int;
+ /* information element object for internal purpose */
+ struct wl_ie ie;
+ struct wl_iscan_ctrl *iscan; /* iscan controller */
+
+ /* association information container */
+ struct wl_connect_info conn_info;
+
+ struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
+ tsk_ctl_t event_tsk; /* task of main event handler thread */
+ void *pub;
+ u32 iface_cnt;
+ u32 channel; /* current channel */
+ bool iscan_on; /* iscan on/off switch */
+ bool iscan_kickstart; /* indicate iscan already started */
+ bool escan_on; /* escan on/off switch */
+ struct escan_info escan_info; /* escan information */
+ bool active_scan; /* current scan mode */
+ bool ibss_starter; /* indicates this sta is ibss starter */
+ bool link_up; /* link/connection up flag */
+
+ /* indicate whether chip to support power save mode */
+ bool pwr_save;
+ bool roam_on; /* on/off switch for self-roaming */
+ bool scan_tried; /* indicates if first scan attempted */
+ u8 *ioctl_buf; /* ioctl buffer */
+ struct mutex ioctl_buf_sync;
+ u8 *escan_ioctl_buf;
+ u8 *extra_buf; /* maily to grab assoc information */
+ struct dentry *debugfsdir;
+ struct rfkill *rfkill;
+ bool rf_blocked;
+ struct ieee80211_channel remain_on_chan;
+ enum nl80211_channel_type remain_on_chan_type;
+ u64 send_action_id;
+ u64 last_roc_id;
+ wait_queue_head_t netif_change_event;
+ struct afx_hdl *afx_hdl;
+ struct ap_info *ap_info;
+ struct sta_info *sta_info;
+ struct p2p_info *p2p;
+ bool p2p_supported;
+ struct btcoex_info *btcoex_info;
+ struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+};
+
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+ return bss = bss ?
+ (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32
+wl_alloc_netinfo(struct wl_priv *wl, struct net_device *ndev,
+ struct wireless_dev * wdev, s32 mode)
+{
+ struct net_info *_net_info;
+ s32 err = 0;
+ if (wl->iface_cnt == IFACE_MAX_CNT)
+ return -ENOMEM;
+ _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+ if (!_net_info)
+ err = -ENOMEM;
+ else {
+ _net_info->mode = mode;
+ _net_info->ndev = ndev;
+ _net_info->wdev = wdev;
+ wl->iface_cnt++;
+ list_add(&_net_info->list, &wl->net_list);
+ }
+ return err;
+}
+static inline void
+wl_dealloc_netinfo(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ list_del(&_net_info->list);
+ wl->iface_cnt--;
+ if (_net_info->wdev) {
+ kfree(_net_info->wdev);
+ ndev->ieee80211_ptr = NULL;
+ }
+ kfree(_net_info);
+ }
+ }
+
+}
+static inline void
+wl_delete_all_netinfo(struct wl_priv *wl)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_del(&_net_info->list);
+ if (_net_info->wdev)
+ kfree(_net_info->wdev);
+ kfree(_net_info);
+ }
+ wl->iface_cnt = 0;
+}
+static inline bool
+wl_get_status_all(struct wl_priv *wl, s32 status)
+
+{
+ struct net_info *_net_info, *next;
+ u32 cnt = 0;
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (_net_info->ndev &&
+ test_bit(status, &_net_info->sme_state))
+ cnt++;
+ }
+ return cnt? true: false;
+}
+static inline void
+wl_set_status_by_netdev(struct wl_priv *wl, s32 status,
+ struct net_device *ndev, u32 op)
+{
+
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ switch (op) {
+ case 1:
+ set_bit(status, &_net_info->sme_state);
+ break;
+ case 2:
+ clear_bit(status, &_net_info->sme_state);
+ break;
+ case 4:
+ change_bit(status, &_net_info->sme_state);
+ break;
+ }
+ }
+
+ }
+
+}
+
+static inline u32
+wl_get_status_by_netdev(struct wl_priv *wl, s32 status,
+ struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return test_bit(status, &_net_info->sme_state);
+ }
+ return 0;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return _net_info->mode;
+ }
+ return -1;
+}
+
+
+static inline void
+wl_set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev,
+ s32 mode)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->mode = mode;
+ }
+}
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+{
+ struct net_info *_net_info, *next;
+
+ list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ return &_net_info->profile;
+ }
+ return NULL;
+}
+#define wl_to_wiphy(w) (w->wdev->wiphy)
+#define wl_to_prmry_ndev(w) (w->wdev->netdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define wl_to_sr(w) (w->scan_req_int)
+#define wl_to_ie(w) (&w->ie)
+#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
+#define wl_to_iscan(w) (w->iscan)
+#define wl_to_conn(w) (&w->conn_info)
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(wl, stat) \
+ (wl_get_status_all(wl, WL_STATUS_ ## stat))
+#define wl_get_drv_status(wl, stat, ndev) \
+ (wl_get_status_by_netdev(wl, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 2))
+#define wl_chg_drv_status(wl, stat, ndev) \
+ (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i) \
+ for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(wl, iter, next) \
+ list_for_each_entry_safe(iter, next, &wl->net_list, list)
+
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+ ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+ (!_sme->crypto.n_ciphers_pairwise) && \
+ (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void *para);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+ void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+extern s32 wl_cfg80211_up(void *para);
+extern s32 wl_cfg80211_down(void *para);
+extern s32 wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
+ void* _net_attach);
+extern s32 wl_cfg80211_ifdel_ops(struct net_device *net);
+extern s32 wl_cfg80211_notify_ifdel(struct net_device *ndev);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_is_progress_ifchange(void);
+extern s32 wl_cfg80211_is_progress_ifadd(void);
+extern s32 wl_cfg80211_notify_ifchange(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+ enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(int level);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+
+#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
new file mode 100644
index 00000000000000..eb76fd0078d3d9
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c
@@ -0,0 +1,1967 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.c 321498 2012-03-15 12:54:13Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static s32
+wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete);
+
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+ .ndo_open = wl_cfgp2p_if_open,
+ .ndo_stop = wl_cfgp2p_if_stop,
+ .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+ .ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+
+ if (frame == NULL)
+ return false;
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+ return true;
+ }
+
+ return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ wifi_p2p_action_frame_t *act_frm;
+
+ if (frame == NULL)
+ return false;
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+ return true;
+ }
+
+ return false;
+}
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ if (frame_len < sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)
+ return false;
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+ else
+ return false;
+
+}
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ wifi_p2p_action_frame_t *act_frm;
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+ if (!frame || frame_len <= 2)
+ return;
+
+ if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Req Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_GON_RSP:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Rsp Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_GON_CONF:
+ CFGP2P_DBG(("%s P2P Group Owner Negotiation Confirm Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_INVITE_REQ:
+ CFGP2P_DBG(("%s P2P Invitation Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_INVITE_RSP:
+ CFGP2P_DBG(("%s P2P Invitation Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ CFGP2P_DBG(("%s P2P Device Discoverability Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ CFGP2P_DBG(("%s P2P Device Discoverability Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ CFGP2P_DBG(("%s P2P Provision Discovery Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ CFGP2P_DBG(("%s P2P Provision Discovery Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P Public Action Frame\n",
+ (tx)? "TX": "RX"));
+
+ }
+
+ } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (wifi_p2p_action_frame_t *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ CFGP2P_DBG(("%s P2P Notice of Absence Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ CFGP2P_DBG(("%s P2P Presence Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ CFGP2P_DBG(("%s P2P Presence Response Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ CFGP2P_DBG(("%s P2P Discoverability Request Frame\n",
+ (tx)? "TX": "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P Action Frame\n",
+ (tx)? "TX": "RX"));
+ }
+
+ } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ CFGP2P_DBG(("%s P2P GAS Initial Request\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ CFGP2P_DBG(("%s P2P GAS Initial Response\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ CFGP2P_DBG(("%s P2P GAS Comback Request\n",
+ (tx)? "TX" : "RX"));
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ CFGP2P_DBG(("%s P2P GAS Comback Response\n",
+ (tx)? "TX" : "RX"));
+ break;
+ default:
+ CFGP2P_DBG(("%s Unknown P2P GAS Frame\n",
+ (tx)? "TX" : "RX"));
+ }
+
+
+ }
+
+}
+
+/*
+ * Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct wl_priv *wl)
+{
+ if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+ CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+ return -ENOMEM;
+ }
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+
+ INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE);
+ INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+ INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
+ spin_lock_init(&wl->p2p->timer_lock);
+ return BCME_OK;
+
+}
+/*
+ * Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl)
+{
+ CFGP2P_DBG(("In\n"));
+ if (wl->p2p) {
+ kfree(wl->p2p);
+ wl->p2p = NULL;
+ }
+ wl->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
+{
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+ s32 ret = BCME_OK;
+ s32 val = 0;
+ /* Do we have to check whether APSTA is enabled or not ? */
+ wldev_iovar_getint(ndev, "apsta", &val);
+ if (val == 0) {
+ val = 1;
+ wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+ wldev_iovar_setint(ndev, "apsta", val);
+ wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+ }
+ val = 1;
+ /* Disable firmware roaming for P2P */
+ wldev_iovar_setint(ndev, "roam_off", val);
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+ sizeof(null_eth_addr), wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
+ if (ret && ret != BCME_UNSUPPORTED) {
+ CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+ }
+ return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_DBG(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ return err;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the BSS to create
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
+{
+ s32 ret;
+ struct net_device *netdev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n",
+ netdev->ifindex, mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+ ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (unlikely(ret < 0)) {
+ printk("'wl p2p_ifdel' error %d\n", ret);
+ }
+ return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec)
+{
+ wl_p2p_if_t ifreq;
+ s32 err;
+ struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+
+ ifreq.type = if_type;
+ ifreq.chspec = chspec;
+ memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+ CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n",
+ ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2],
+ ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5],
+ (if_type == WL_P2P_IF_GO) ? "go" : "client",
+ (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+ err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+
+ if (unlikely(err < 0)) {
+ printk("'wl p2p_ifupd' error %d\n", err);
+ }
+ return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac : MAC address of the created BSS
+ * @index : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
+{
+ s32 ret;
+ u8 getbuf[64];
+ struct net_device *dev = wl_to_prmry_ndev(wl);
+
+ CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac->octet[0], mac->octet[1], mac->octet[2],
+ mac->octet[3], mac->octet[4], mac->octet[5]));
+
+ ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+ sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+ if (ret == 0) {
+ memcpy(index, getbuf, sizeof(index));
+ CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index));
+ }
+
+ return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
+{
+ s32 ret = BCME_OK;
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+ }
+
+ return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+ wl_p2p_disc_st_t discovery_mode;
+ s32 ret;
+ struct net_device *dev;
+ CFGP2P_DBG(("enter\n"));
+
+ if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) {
+ CFGP2P_ERR((" %d index out of range\n", bssidx));
+ return -1;
+ }
+
+ dev = wl_to_p2p_bss_ndev(wl, bssidx);
+ if (unlikely(dev == NULL)) {
+ CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+ return BCME_NOTFOUND;
+ }
+
+ /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+ discovery_mode.state = mode;
+ discovery_mode.chspec = wl_ch_host_to_driver(channel);
+ discovery_mode.dwell = listen_ms;
+ ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+ sizeof(discovery_mode), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &wl->ioctl_buf_sync);
+
+ return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
+{
+ s32 ret;
+ struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+
+ ret = wldev_iovar_getint(dev, "p2p_dev", index);
+ CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+ return ret;
+ }
+ return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl)
+{
+
+ s32 index = 0;
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
+ CFGP2P_ERR(("do nothing, already initialized\n"));
+ return ret;
+ }
+
+ ret = wl_cfgp2p_set_discovery(wl, 1);
+ if (ret < 0) {
+ CFGP2P_ERR(("set discover error\n"));
+ return ret;
+ }
+ /* Enable P2P Discovery in the WL Driver */
+ ret = wl_cfgp2p_get_disc_idx(wl, &index);
+
+ if (ret < 0) {
+ return ret;
+ }
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
+
+ /* Set the initial discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret != 0)) {
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ wl_cfgp2p_set_discovery(wl, 0);
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ return 0;
+ }
+ return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @wl : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG(("enter\n"));
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR(("do nothing, not initialized\n"));
+ return -1;
+ }
+ /* Set the discovery state to SCAN */
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+ ret = wl_cfgp2p_set_discovery(wl, 0);
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
+ * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+ * BSS.
+ */
+
+ /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+ * have no discovery BSS.
+ */
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+ return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @wl : wl_private data
+ * @ie : probe request ie (WPS IE + P2P IE)
+ * @ie_len : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev,
+ const u8 *ie, u32 ie_len)
+{
+ s32 ret = BCME_OK;
+ if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+ goto set_ie;
+ }
+
+ wl_set_p2p_status(wl, DISCOVERY_ON);
+
+ CFGP2P_DBG(("enter\n"));
+
+ ret = wl_cfgp2p_init_discovery(wl);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" init discovery error %d\n", ret));
+ goto exit;
+ }
+ /* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+ * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+ * Some peer devices may not initiate WPS with us if this bit is not set.
+ */
+ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
+ "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR((" wsec error %d\n", ret));
+ }
+set_ie:
+ ret = wl_cfgp2p_set_management_ie(wl, dev,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE),
+ VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @wl : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" enter\n"));
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+
+ if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ CFGP2P_ERR((" do nothing, not initialized\n"));
+ goto exit;
+ }
+
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+
+ if (unlikely(ret < 0)) {
+
+ CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+ }
+ /* Do a scan abort to stop the driver's scan engine in case it is still
+ * waiting out an action frame tx dwell time.
+ */
+#ifdef NOT_YET
+ if (wl_get_p2p_status(wl, SCANNING)) {
+ p2pwlu_scan_abort(hdl, FALSE);
+ }
+#endif
+ wl_clr_p2p_status(wl, DISCOVERY_ON);
+ ret = wl_cfgp2p_deinit_discovery(wl);
+
+exit:
+ return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
+ u32 num_chans, u16 *channels,
+ s32 search_state, u16 action, u32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 memsize;
+ s32 eparams_size;
+ u32 i;
+ s8 *memblk;
+ wl_p2p_scan_t *p2p_params;
+ wl_escan_params_t *eparams;
+ wlc_ssid_t ssid;
+ /* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 50
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+ struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ wl_set_p2p_status(wl, SCANNING);
+ /* Allocate scan params which need space for 3 channels and 0 ssids */
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
+
+ memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+ memblk = scanparambuf;
+ if (memsize > sizeof(scanparambuf)) {
+ CFGP2P_ERR((" scanpar buf too small (%u > %u)\n",
+ memsize, sizeof(scanparambuf)));
+ return -1;
+ }
+ memset(memblk, 0, memsize);
+ memset(wl->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+ if (search_state == WL_P2P_DISC_ST_SEARCH) {
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+ ssid.SSID_len = htod32(0);
+
+ } else if (search_state == WL_P2P_DISC_ST_SCAN) {
+ /* SCAN STATE 802.11 SCAN
+ * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+ * So if P2P_find command with type=progressive,
+ * we have to set ssid to P2P WILDCARD because
+ * we just do broadcast scan unless setting SSID
+ */
+ strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID);
+ ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN);
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ }
+
+
+ /* Fill in the P2P scan structure at the start of the iovar param block */
+ p2p_params = (wl_p2p_scan_t*) memblk;
+ p2p_params->type = 'E';
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ if (active)
+ eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+ else
+ eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+ memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+ if (ssid.SSID_len)
+ memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+ eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES);
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+ if (wl_get_drv_status_all(wl, CONNECTED))
+ eparams->params.active_time = htod32(-1);
+ else if (num_chans == 3)
+ eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ else
+ eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ eparams->params.passive_time = htod32(-1);
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+ }
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ eparams->sync_id = htod16(0x1234);
+ CFGP2P_INFO(("SCAN CHANNELS : "));
+
+ for (i = 0; i < num_chans; i++) {
+ if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+ else CFGP2P_INFO((",%d", channels[i]));
+ }
+
+ CFGP2P_INFO(("\n"));
+
+ ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+ memblk, memsize, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+ s32 bssidx, s32 channel)
+{
+ s32 ret = 0;
+ u32 chan_cnt = 0;
+ u16 *default_chan_list = NULL;
+ if (!p2p_is_on(wl))
+ return -BCME_ERROR;
+ CFGP2P_ERR((" Enter\n"));
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ if (channel)
+ chan_cnt = 1;
+ else
+ chan_cnt = SOCIAL_CHAN_CNT;
+ default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ CFGP2P_ERR(("channel list allocation failed \n"));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ default_chan_list[0] = channel;
+ } else {
+ default_chan_list[0] = SOCIAL_CHAN_1;
+ default_chan_list[1] = SOCIAL_CHAN_2;
+ default_chan_list[2] = SOCIAL_CHAN_3;
+ }
+ ret = wl_cfgp2p_escan(wl, ndev, true, SOCIAL_CHAN_CNT,
+ default_chan_list, WL_P2P_DISC_ST_SEARCH,
+ WL_SCAN_ACTION_START, bssidx);
+ kfree(default_chan_list);
+exit:
+ return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
+ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device for bssidx
+ * @bssidx : bssidx for BSS
+ * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ * VNDR_IE_ASSOCREQ_FLAG)
+ * @ie : VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+ /* Vendor-specific Information Element ID */
+#define VNDR_SPEC_ELEMENT_ID 0xdd
+ s32 ret = BCME_OK;
+ u32 pos;
+ u8 *ie_buf;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u8 ie_id, ie_len;
+ u8 delete = 0;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
+ if (p2p_is_on(wl) && bssidx != -1) {
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx));
+ break;
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(probe_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx));
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_req, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx));
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = IE_TYPE(assoc_res, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx));
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = IE_TYPE(beacon, bssidx);
+ mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx);
+ mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx));
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = wl->ap_info->probe_res_ie;
+ mgmt_ie_len = &wl->ap_info->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = wl->ap_info->beacon_ie;
+ mgmt_ie_len = &wl->ap_info->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else if (bssidx == -1 && wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
+ switch (pktflag) {
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->probe_req_ie;
+ mgmt_ie_len = &wl->sta_info->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = wl->sta_info->assoc_req_ie;
+ mgmt_ie_len = &wl->sta_info->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+ bssidx = 0;
+ } else {
+ CFGP2P_ERR(("not suitable type\n"));
+ return -1;
+ }
+
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ CFGP2P_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
+ } else {
+ if (mgmt_ie_buf != NULL) {
+ if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) {
+ CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+ goto exit;
+ }
+ pos = 0;
+ delete = 1;
+ ie_buf = (u8 *) mgmt_ie_buf;
+ while (pos < *mgmt_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag,
+ ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3,
+ ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+
+ }
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (vndr_ie && vndr_ie_len) {
+ /* save the current IE in wl struct */
+ memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len);
+ *mgmt_ie_len = vndr_ie_len;
+ pos = 0;
+ ie_buf = (u8 *) vndr_ie;
+ delete = 0;
+ while (pos < vndr_ie_len) {
+ ie_id = ie_buf[pos++];
+ ie_len = ie_buf[pos++];
+ if ((ie_id == DOT11_MNG_VS_ID) &&
+ (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) ||
+ wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) {
+ CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :"
+ "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos],
+ ie_buf[pos+1], ie_buf[pos+2]));
+ ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag,
+ ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3,
+ ie_len-3, delete);
+ }
+ pos += ie_len;
+ }
+ }
+ }
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+ return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @wl : wl_private data
+ * @bssidx : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
+{
+#define INIT_IE(IE_TYPE, BSS_TYPE) \
+ do { \
+ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ } while (0);
+ if (bssidx < 0) {
+ CFGP2P_ERR(("invalid bssidx\n"));
+ return BCME_BADARG;
+ }
+ INIT_IE(probe_req, bssidx);
+ INIT_IE(probe_res, bssidx);
+ INIT_IE(assoc_req, bssidx);
+ INIT_IE(assoc_res, bssidx);
+ INIT_IE(beacon, bssidx);
+ return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return TRUE;
+ }
+
+ if (tlvs == NULL)
+ return FALSE;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_p2p_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_wfd_ie_t *)ie;
+ }
+ }
+ return NULL;
+}
+static s32
+wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ s32 iecount;
+
+ vndr_ie_setbuf_t *ie_setbuf;
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+ CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+ return -1;
+ }
+
+ buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1;
+ ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+ CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len));
+ if (!ie_setbuf) {
+
+ CFGP2P_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+ if (delete)
+ strcpy(ie_setbuf->cmd, "del");
+ else
+ strcpy(ie_setbuf->cmd, "add");
+ /* Buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int));
+ pktflag = htod32(pktflag);
+ memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag,
+ &pktflag, sizeof(uint32));
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+ ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len
+ = (uchar)(data_len + VNDR_IE_MIN_LEN);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3);
+ memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len);
+ err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len,
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ CFGP2P_INFO(("vndr_ie iovar returns %d\n", err));
+ kfree(ie_setbuf);
+ return err;
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @wl : wl_private data
+ * @ndev : net device to search bssidx
+ * Returns bssidx for ndev
+ */
+s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev)
+{
+ u32 i;
+ s32 index = -1;
+
+ if (ndev == NULL) {
+ CFGP2P_ERR((" ndev is NULL\n"));
+ goto exit;
+ }
+ if (!wl->p2p_supported) {
+ return P2PAPI_BSSCFG_PRIMARY;
+ }
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
+ index = wl_to_p2p_bss_bssidx(wl, i);
+ break;
+ }
+ }
+ if (index == -1)
+ return P2PAPI_BSSCFG_PRIMARY;
+exit:
+ return index;
+}
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
+ wl_set_p2p_status(wl, LISTEN_EXPIRED);
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ spin_lock_bh(&wl->p2p->timer_lock);
+ del_timer_sync(&wl->p2p->listen_timer);
+ spin_unlock_bh(&wl->p2p->timer_lock);
+ }
+ cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id, &wl->remain_on_chan,
+ wl->remain_on_chan_type, GFP_KERNEL);
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ return ret;
+
+}
+
+/*
+ * Timer expire callback function for LISTEN
+ * We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ * so lets do it from thread context.
+ */
+static void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+ wl_event_msg_t msg;
+ struct wl_priv *wl = (struct wl_priv *) data;
+
+ CFGP2P_DBG((" Enter\n"));
+ msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+}
+
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters :
+ * @wl : wl_private data
+ * @channel : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
+{
+#define INIT_TIMER(timer, func, duration, extra_delay) \
+ do { \
+ init_timer(timer); \
+ timer->function = func; \
+ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ timer->data = (unsigned long) wl; \
+ add_timer(timer); \
+ } while (0);
+
+ s32 ret = BCME_OK;
+ struct timer_list *_timer;
+ CFGP2P_DBG((" Enter Channel : %d, Duration : %d\n", channel, duration_ms));
+ if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
+
+ CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+ ret = BCME_NOTREADY;
+ goto exit;
+ }
+ if (timer_pending(&wl->p2p->listen_timer)) {
+ CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+ goto exit;
+
+ } else
+ wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+
+ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ _timer = &wl->p2p->listen_timer;
+
+ /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+ * otherwise we will wait up to duration_ms + 200ms
+ */
+ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, 200);
+
+#undef INIT_TIMER
+exit:
+ return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
+{
+ s32 ret = BCME_OK;
+ CFGP2P_DBG((" Enter\n"));
+ if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
+
+ CFGP2P_DBG((" do nothing, discovery is off\n"));
+ return ret;
+ }
+ if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
+ CFGP2P_DBG(("already : %d\n", enable));
+ return ret;
+ }
+
+ wl_chg_p2p_status(wl, SEARCH_ENABLED);
+ /* When disabling Search, reset the WL driver's p2p discovery state to
+ * WL_P2P_DISC_ST_SCAN.
+ */
+ if (!enable) {
+ wl_clr_p2p_status(wl, SCANNING);
+ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ }
+
+ return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 ret = BCME_OK;
+ u32 event_type = ntoh32(e->event_type);
+ u32 status = ntoh32(e->status);
+ CFGP2P_DBG((" Enter\n"));
+ if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+ if (status == WLC_E_STATUS_SUCCESS) {
+ wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+ }
+ else {
+ wl_set_p2p_status(wl, ACTION_TX_NOACK);
+ CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+ }
+ } else {
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+ "status : %d\n", status));
+ wake_up_interruptible(&wl->netif_change_event);
+ }
+ return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx)
+{
+ s32 ret = BCME_OK;
+ s32 timeout = 0;
+
+
+ CFGP2P_INFO(("\n"));
+ CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+ af_params->channel, af_params->dwell_time));
+
+ wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(wl, ACTION_TX_NOACK);
+#define MAX_WAIT_TIME 2000
+ if (bssidx == P2PAPI_BSSCFG_PRIMARY)
+ bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+
+ ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+
+ if (ret < 0) {
+
+ CFGP2P_ERR((" sending action frame is failed\n"));
+ goto exit;
+ }
+ timeout = wait_event_interruptible_timeout(wl->netif_change_event,
+ (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) || wl_get_p2p_status(wl, ACTION_TX_NOACK)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+
+ if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
+ CFGP2P_INFO(("tx action frame operation is completed\n"));
+ ret = BCME_OK;
+ } else {
+ ret = BCME_ERROR;
+ CFGP2P_INFO(("tx action frame operation is failed\n"));
+ }
+exit:
+ CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+#undef MAX_WAIT_TIME
+ return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+ struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+ memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+ memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+ out_dev_addr->octet[0] |= 0x02;
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address.
+ */
+ memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+ out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+ u16 len = ie->len;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+ CFGP2P_DBG((" Enter\n"));
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ if (subelt_id == P2P_SEID_INTINTADDR) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_DEV_INFO) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+ } else if (subelt_id == P2P_SEID_GROUP_ID) {
+ memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+ CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+ } return;
+ } else {
+ CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+ }
+ subel += subelt_len;
+ }
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup(). DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
+
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+ if (result != 0) {
+ CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
+ CFGP2P_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
+ }
+ return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+ s32 ret = BCME_OK;
+ s32 val = up ? 1 : 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+ CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+
+ if (ret != 0) {
+ CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+ }
+
+ return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
+{
+ s32 ret = BCME_OK;
+ s32 p2p_supported = 0;
+ ret = wldev_iovar_getint(ndev, "p2p",
+ &p2p_supported);
+ if (ret < 0) {
+ CFGP2P_ERR(("wl p2p error %d\n", ret));
+ return 0;
+ }
+ if (p2p_supported == 1) {
+ CFGP2P_INFO(("p2p is supported\n"));
+ } else {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ p2p_supported = 0;
+ }
+ return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct wl_priv *wl)
+{
+ if (timer_pending(&wl->p2p->listen_timer))
+ del_timer_sync(&wl->p2p->listen_timer);
+ wl_cfgp2p_deinit_priv(wl);
+ return 0;
+}
+s32
+wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ s32 ret = -1;
+ int count, start, duration;
+ wl_p2p_sched_t dongle_noa;
+
+ CFGP2P_DBG((" Enter\n"));
+
+ memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+ if (wl->p2p && wl->p2p->vif_created) {
+
+ wl->p2p->noa.desc[0].start = 0;
+
+ sscanf(buf, "%d %d %d", &count, &start, &duration);
+ CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+ count, start, duration));
+ if (count != -1)
+ wl->p2p->noa.desc[0].count = count;
+
+ /* supplicant gives interval as start */
+ if (start != -1)
+ wl->p2p->noa.desc[0].interval = start;
+
+ if (duration != -1)
+ wl->p2p->noa.desc[0].duration = duration;
+
+ if (wl->p2p->noa.desc[0].count != 255) {
+ wl->p2p->noa.desc[0].start = 200;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+ }
+ else {
+ /* Continuous NoA interval. */
+ dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ if ((wl->p2p->noa.desc[0].interval == 102) ||
+ (wl->p2p->noa.desc[0].interval == 100)) {
+ wl->p2p->noa.desc[0].start = 100 -
+ wl->p2p->noa.desc[0].duration;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+ }
+ else {
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ }
+ }
+ /* Put the noa descriptor in dongle format for dongle */
+ dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count);
+ if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration);
+ }
+ else {
+ dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000);
+ dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000);
+ }
+ dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000);
+
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_noa", &dongle_noa, sizeof(dongle_noa), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
+ &wl->ioctl_buf_sync);
+
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+ }
+ return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len)
+{
+
+ wifi_p2p_noa_desc_t *noa_desc;
+ int len = 0, i;
+ char _buf[200];
+
+ CFGP2P_DBG((" Enter\n"));
+ buf[0] = '\0';
+ if (wl->p2p && wl->p2p->vif_created) {
+ if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) {
+ _buf[0] = 1; /* noa index */
+ _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) |
+ (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */
+ len += 2;
+ if (wl->p2p->noa.desc[0].count) {
+ noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+ noa_desc->cnt_type = wl->p2p->noa.desc[0].count;
+ noa_desc->duration = wl->p2p->noa.desc[0].duration;
+ noa_desc->interval = wl->p2p->noa.desc[0].interval;
+ noa_desc->start = wl->p2p->noa.desc[0].start;
+ len += sizeof(wifi_p2p_noa_desc_t);
+ }
+ if (buf_len <= len * 2) {
+ CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+ "returning noa in string format\n", buf_len));
+ return -1;
+ }
+ /* We have to convert the buffer data into ASCII strings */
+ for (i = 0; i < len; i++) {
+ sprintf(buf, "%02x", _buf[i]);
+ buf += 2;
+ }
+ buf[i*2] = '\0';
+ }
+ }
+ else {
+ CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+ return -1;
+ }
+ return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+{
+ int ps, ctw;
+ int ret = -1;
+ s32 legacy_ps;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl->p2p && wl->p2p->vif_created) {
+ sscanf(buf, "%d %d %d", &legacy_ps, &ps, &ctw);
+ CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+ if (ctw != -1) {
+ wl->p2p->ops.ctw = ctw;
+ ret = 0;
+ }
+ if (ps != -1) {
+ wl->p2p->ops.ops = ps;
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops),
+ wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ if (ret < 0) {
+ CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+ }
+ }
+
+ if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+ ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
+ if (unlikely(ret)) {
+ CFGP2P_ERR(("error (%d)\n", ret));
+ }
+ }
+ else
+ CFGP2P_ERR(("ilegal setting\n"));
+ }
+ else {
+ CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+ ret = -1;
+ }
+ return ret;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+ wifi_p2p_ie_t *ie = NULL;
+ u16 len = 0;
+ u8 *subel;
+ u8 subelt_id;
+ u16 subelt_len;
+
+ if (!buf) {
+ WL_ERR(("P2P IE not present"));
+ return 0;
+ }
+
+ ie = (wifi_p2p_ie_t*) buf;
+ len = ie->len;
+
+ /* Point subel to the P2P IE's subelt field.
+ * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+ */
+ subel = ie->subelts;
+ len -= 4; /* exclude OUI + OUI_TYPE */
+
+ while (len >= 3) {
+ /* attribute id */
+ subelt_id = *subel;
+ subel += 1;
+ len -= 1;
+
+ /* 2-byte little endian */
+ subelt_len = *subel++;
+ subelt_len |= *subel++ << 8;
+
+ len -= 2;
+ len -= subelt_len; /* for the remaining subelt fields */
+
+ if (subelt_id == element_id) {
+ /* This will point to start of subelement attrib after
+ * attribute id & len
+ */
+ return subel;
+ }
+
+ /* Go to next subelement */
+ subel += subelt_len;
+ }
+
+ /* Not Found */
+ return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT 0x01
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+ wifi_p2p_ie_t * p2p_ie = NULL;
+ u8 *capability = NULL;
+ bool p2p_go = 0;
+ u8 *ptr = NULL;
+
+ if (!(p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, bi->ie_length))) {
+ WL_ERR(("P2P IE not found"));
+ return NULL;
+ }
+
+ if (!(capability = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_P2P_INFO))) {
+ WL_ERR(("P2P Capability attribute not found"));
+ return NULL;
+ }
+
+ /* Check Group capability for Group Owner bit */
+ p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+ if (!p2p_go) {
+ return bi->BSSID.octet;
+ }
+
+ /* In probe responses, DEVICE INFO attribute will be present */
+ if (!(ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_INFO))) {
+ /* If DEVICE_INFO is not found, this might be a beacon frame.
+ * check for DEVICE_ID in the beacon frame.
+ */
+ ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_ID);
+ }
+
+ if (!ptr)
+ WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+ return ptr;
+}
+
+s32
+wl_cfgp2p_register_ndev(struct wl_priv *wl)
+{
+ int ret = 0;
+ struct net_device* net = NULL;
+ struct wireless_dev *wdev;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+ /* Allocate etherdev, including space for private structure */
+ if (!(net = alloc_etherdev(sizeof(wl)))) {
+ CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+ goto fail;
+ }
+
+ strcpy(net->name, "p2p%d");
+ net->name[IFNAMSIZ - 1] = '\0';
+
+ /* Copy the reference to wl_priv */
+ memcpy((void *)netdev_priv(net), &wl, sizeof(wl));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->do_ioctl = wl_cfgp2p_do_ioctl;
+ net->hard_start_xmit = wl_cfgp2p_start_xmit;
+ net->open = wl_cfgp2p_if_open;
+ net->stop = wl_cfgp2p_if_stop;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+ /* Register with a dummy MAC addr */
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+
+ wdev->wiphy = wl->wdev->wiphy;
+
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+ net->ieee80211_ptr = wdev;
+
+ SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+ /* Associate p2p0 network interface with new wdev */
+ wdev->netdev = net;
+
+ /* store p2p net ptr for further reference. Note that iflist won't have this
+ * entry as there corresponding firmware interface is a "Hidden" interface.
+ */
+ if (wl->p2p_net) {
+ CFGP2P_ERR(("p2p_net defined already.\n"));
+ return -EINVAL;
+ } else {
+ wl->p2p_wdev = wdev;
+ wl->p2p_net = net;
+ }
+
+ ret = register_netdev(net);
+ if (ret) {
+ CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+ goto fail;
+ }
+
+ printk("%s: P2P Interface Registered\n", net->name);
+
+ return ret;
+fail:
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+
+ if (net) {
+ unregister_netdev(net);
+ free_netdev(net);
+ }
+
+ return -ENODEV;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct wl_priv *wl)
+{
+
+ if (!wl || !wl->p2p_net) {
+ CFGP2P_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+
+ unregister_netdev(wl->p2p_net);
+ free_netdev(wl->p2p_net);
+
+ return 0;
+}
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ CFGP2P_DBG(("(%s) is not used for data operations. Droping the packet. \n", ndev->name));
+ return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
+ struct net_device *ndev = wl_to_prmry_ndev(wl);
+
+ /* There is no ifidx corresponding to p2p0 in our firmware. So we should
+ * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+ * For Android PRIV CMD handling map it to primary I/F
+ */
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+ } else {
+ CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+ __FUNCTION__, cmd));
+ return -1;
+ }
+
+ return ret;
+}
+
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+
+ if (!wdev)
+ return -EINVAL;
+
+ /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+ * do it here. This will make sure that in concurrent mode, supplicant
+ * is not dependent on a particular order of interface initialization.
+ * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+ * -iwlan0.
+ */
+ wl_cfg80211_do_driver_init(net);
+
+ wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+ | BIT(NL80211_IFTYPE_P2P_GO));
+
+ return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+ struct wireless_dev *wdev = net->ieee80211_ptr;
+
+ if (!wdev)
+ return -EINVAL;
+
+ wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+ & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO)));
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
new file mode 100644
index 00000000000000..427cb4a11bd149
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h
@@ -0,0 +1,284 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.h 316197 2012-02-21 13:16:39Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct wl_priv;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not
+ * confuse this with a bsscfg index. This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+#define IE_MAX_LEN 300
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+ u8 p2p_probe_req_ie[IE_MAX_LEN];
+ u8 p2p_probe_res_ie[IE_MAX_LEN];
+ u8 p2p_assoc_req_ie[IE_MAX_LEN];
+ u8 p2p_assoc_res_ie[IE_MAX_LEN];
+ u8 p2p_beacon_ie[IE_MAX_LEN];
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_res_ie_len;
+ u32 p2p_assoc_req_ie_len;
+ u32 p2p_assoc_res_ie_len;
+ u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+ u32 bssidx;
+ struct net_device *dev;
+ struct p2p_saved_ie saved_ie;
+ void *private_data;
+};
+
+struct p2p_info {
+ bool on; /* p2p on/off switch */
+ bool scan;
+ bool vif_created;
+ s8 vir_ifname[IFNAMSIZ];
+ unsigned long status;
+ struct ether_addr dev_addr;
+ struct ether_addr int_addr;
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ wl_p2p_sched_t noa;
+ wl_p2p_ops_t ops;
+ wlc_ssid_t ssid;
+ spinlock_t timer_lock;
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+ WLP2P_STATUS_DISCOVERY_ON = 0,
+ WLP2P_STATUS_SEARCH_ENABLED,
+ WLP2P_STATUS_IF_ADD,
+ WLP2P_STATUS_IF_DEL,
+ WLP2P_STATUS_IF_DELETING,
+ WLP2P_STATUS_IF_CHANGING,
+ WLP2P_STATUS_IF_CHANGED,
+ WLP2P_STATUS_LISTEN_EXPIRED,
+ WLP2P_STATUS_ACTION_TX_COMPLETED,
+ WLP2P_STATUS_ACTION_TX_NOACK,
+ WLP2P_STATUS_SCANNING
+};
+
+
+#define wl_to_p2p_bss_ndev(w, type) ((wl)->p2p->bss_idx[type].dev)
+#define wl_to_p2p_bss_bssidx(w, type) ((wl)->p2p->bss_idx[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(w, type) ((wl)->p2p->bss_idx[type].saved_ie)
+#define wl_to_p2p_bss_private(w, type) ((wl)->p2p->bss_idx[type].private_data)
+#define wl_to_p2p_bss(wl, type) ((wl)->p2p->bss_idx[type])
+#define wl_get_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : test_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_set_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : set_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_clr_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : clear_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define wl_chg_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:change_bit(WLP2P_STATUS_ ## stat, \
+ &(wl)->p2p->status))
+#define p2p_on(wl) ((wl)->p2p->on)
+#define p2p_scan(wl) ((wl)->p2p->scan)
+#define p2p_is_on(wl) ((wl)->p2p && (wl)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+#define CFGP2P_ERR(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_ERR "CFGP2P-ERROR) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_INFO(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_INFO) { \
+ printk(KERN_ERR "CFGP2P-INFO) %s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#define CFGP2P_DBG(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_ERR "CFGP2P-DEBUG) %s :", __func__); \
+ printk args; \
+ } \
+ } while (0)
+
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len);
+extern s32
+wl_cfgp2p_init_priv(struct wl_priv *wl);
+extern void
+wl_cfgp2p_deinit_priv(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode,
+ u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct wl_priv *wl);
+extern s32
+wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans,
+ u16 *channels,
+ s32 search_state, u16 action, u32 bssidx);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+ s32 bssidx, s32 channel);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+ s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+ wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+ struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct wl_priv *wl);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct wl_priv *wl);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct wl_priv *wl);
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define SOCIAL_CHAN_CNT 3
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+ ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+ (frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+#define IS_P2P_PUB_ACT_REQ(frame, len) (wl_cfgp2p_is_pub_action(frame, len) && \
+ ((frame->subtype == P2P_PAF_GON_REQ) || \
+ (frame->subtype == P2P_PAF_INVITE_REQ) || \
+ (frame->subtype == P2P_PAF_PROVDIS_REQ)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid) (memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) == 0)
+#endif /* _wl_cfgp2p_h_ */
diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h
new file mode 100644
index 00000000000000..7050508c5e4db6
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_dbg.h
@@ -0,0 +1,63 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h 324578 2012-03-29 21:30:09Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#if 0 && (VERSION_MAJOR > 9)
+#include <IOKit/apple80211/IO8Log.h>
+#define WL_PRINT(args) do { printf args; IO8Log args; } while (0)
+#else
+#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0)
+#endif
+
+
+
+#define WL_NONE(args)
+
+#define WL_ERROR(args)
+#define WL_TRACE(args)
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args) WL_PRINT(args)
+#define WL_WSEC_DUMP(args) WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c
new file mode 100644
index 00000000000000..be080250ce5b6b
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.c
@@ -0,0 +1,3737 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c 312290 2012-02-02 02:52:18Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+
+typedef const struct si_pub si_t;
+#include <wlioctl.h>
+
+
+#include <wl_dbg.h>
+#include <wl_iw.h>
+
+#ifdef BCMWAPI_WPI
+
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1 0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4 0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;
+#endif
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+ uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define DAEMONIZE(a) daemonize(a); \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM);
+#else
+#define RAISE_RX_SOFTIRQ() \
+ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+ do { if (a) \
+ strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+ } while (0);
+#endif
+
+#define ISCAN_STATE_IDLE 0
+#define ISCAN_STATE_SCANING 1
+
+
+#define WLC_IW_ISCAN_MAXLEN 2048
+typedef struct iscan_buf {
+ struct iscan_buf * next;
+ char iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+ struct net_device *dev;
+ struct timer_list timer;
+ uint32 timer_ms;
+ uint32 timer_on;
+ int iscan_state;
+ iscan_buf_t * list_hdr;
+ iscan_buf_t * list_cur;
+
+
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+
+
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+
+
+typedef struct priv_link {
+ wl_iw_t *wliw;
+} priv_link_t;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev)
+#endif
+
+
+#define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = htod32(key->index);
+ key->len = htod32(key->len);
+ key->algo = htod32(key->algo);
+ key->flags = htod32(key->flags);
+ key->rxiv.hi = htod32(key->rxiv.hi);
+ key->rxiv.lo = htod16(key->rxiv.lo);
+ key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+ wl_wsec_key_t *key
+)
+{
+ key->index = dtoh32(key->index);
+ key->len = dtoh32(key->len);
+ key->algo = dtoh32(key->algo);
+ key->flags = dtoh32(key->flags);
+ key->rxiv.hi = dtoh32(key->rxiv.hi);
+ key->rxiv.lo = dtoh16(key->rxiv.lo);
+ key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+ struct net_device *dev,
+ int cmd,
+ void *arg,
+ int len
+)
+{
+ struct ifreq ifr;
+ wl_ioctl_t ioc;
+ mm_segment_t fs;
+ int ret;
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+
+ strcpy(ifr.ifr_name, dev->name);
+ ifr.ifr_data = (caddr_t) &ioc;
+
+#ifndef LINUX_HYBRID
+
+ dev_open(dev);
+#endif
+
+ fs = get_fs();
+ set_fs(get_ds());
+#if defined(WL_USE_NETDEV_OPS)
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+ ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+ set_fs(fs);
+
+ return ret;
+}
+
+
+
+static int
+dev_wlc_intvar_set(
+ struct net_device *dev,
+ char *name,
+ int val)
+{
+ char buf[WLC_IOCTL_SMLEN];
+ uint len;
+
+ val = htod32(val);
+ len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ ASSERT(len);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+static int
+dev_iw_iovar_setbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+ struct net_device *dev,
+ char *iovar,
+ void *param,
+ int paramlen,
+ void *bufptr,
+ int buflen)
+{
+ int iolen;
+
+ iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ ASSERT(iolen);
+ BCM_REFERENCE(iolen);
+
+ return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+ struct net_device *dev,
+ char *name,
+ char *buf, int len)
+{
+ char *ioctlbuf;
+ uint buflen;
+ int error;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+
+ buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(buflen);
+ error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+ kfree(ioctlbuf);
+ return error;
+}
+#endif
+
+
+
+static int
+dev_wlc_bufvar_get(
+ struct net_device *dev,
+ char *name,
+ char *buf, int buflen)
+{
+ char *ioctlbuf;
+ int error;
+
+ uint len;
+
+ ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+ if (!ioctlbuf)
+ return -ENOMEM;
+ len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ ASSERT(len);
+ BCM_REFERENCE(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+ if (!error)
+ bcopy(ioctlbuf, buf, buflen);
+
+ kfree(ioctlbuf);
+ return (error);
+}
+
+
+
+static int
+dev_wlc_intvar_get(
+ struct net_device *dev,
+ char *name,
+ int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ uint len;
+ uint data_null;
+
+ len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+ ASSERT(len);
+ error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+ *retval = dtoh32(var.val);
+
+ return (error);
+}
+
+
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+ __u16 cmd;
+ __u16 flags;
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ void *wrqu, char *extra);
+#endif
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int dc = *(int *)extra;
+ int error;
+
+ error = dev_wlc_intvar_set(dev, "leddc", dc);
+ return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int mode = *(int *)extra;
+ int error;
+
+ mode = htod32(mode);
+ error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+ return error;
+}
+
+static int
+wl_iw_set_pm(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ int pm = *(int *)extra;
+ int error;
+
+ pm = htod32(pm);
+ error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+ return error;
+}
+#endif
+
+int
+wl_iw_send_priv_event(
+ struct net_device *dev,
+ char *flag
+)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd;
+
+ cmd = IWEVCUSTOM;
+ memset(&wrqu, 0, sizeof(wrqu));
+ if (strlen(flag) > sizeof(extra))
+ return -1;
+
+ strcpy(extra, flag);
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+ return 0;
+}
+
+static int
+wl_iw_config_commit(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ void *zwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+ struct sockaddr bssid;
+
+ WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+ return error;
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+ if (!ssid.SSID_len)
+ return 0;
+
+ bzero(&bssid, sizeof(struct sockaddr));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_name(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *cwrq,
+ char *extra
+)
+{
+ int phytype, err;
+ uint band[3];
+ char cap[5];
+
+ WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+ cap[0] = 0;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+ goto done;
+ if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+ goto done;
+
+ band[0] = dtoh32(band[0]);
+ switch (phytype) {
+ case WLC_PHY_TYPE_A:
+ strcpy(cap, "a");
+ break;
+ case WLC_PHY_TYPE_B:
+ strcpy(cap, "b");
+ break;
+ case WLC_PHY_TYPE_LP:
+ case WLC_PHY_TYPE_G:
+ if (band[0] >= 2)
+ strcpy(cap, "abg");
+ else
+ strcpy(cap, "bg");
+ break;
+ case WLC_PHY_TYPE_N:
+ if (band[0] >= 2)
+ strcpy(cap, "abgn");
+ else
+ strcpy(cap, "bgn");
+ break;
+ }
+done:
+ snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+ return 0;
+}
+
+static int
+wl_iw_set_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ int error, chan;
+ uint sf = 0;
+
+ WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+
+ if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+ chan = fwrq->m;
+ }
+
+
+ else {
+
+ if (fwrq->e >= 6) {
+ fwrq->e -= 6;
+ while (fwrq->e--)
+ fwrq->m *= 10;
+ } else if (fwrq->e < 6) {
+ while (fwrq->e++ < 6)
+ fwrq->m /= 10;
+ }
+
+ if (fwrq->m > 4000 && fwrq->m < 5000)
+ sf = WF_CHAN_FACTOR_4_G;
+
+ chan = wf_mhz2channel(fwrq->m, sf);
+ }
+ chan = htod32(chan);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *fwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+
+
+ fwrq->m = dtoh32(ci.hw_channel);
+ fwrq->e = dtoh32(0);
+ return 0;
+}
+
+static int
+wl_iw_set_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int infra = 0, ap = 0, error = 0;
+
+ WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+ switch (*uwrq) {
+ case IW_MODE_MASTER:
+ infra = ap = 1;
+ break;
+ case IW_MODE_ADHOC:
+ case IW_MODE_AUTO:
+ break;
+ case IW_MODE_INFRA:
+ infra = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ infra = htod32(infra);
+ ap = htod32(ap);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+ return error;
+
+
+ return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 *uwrq,
+ char *extra
+)
+{
+ int error, infra = 0, ap = 0;
+
+ WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+ return error;
+
+ infra = dtoh32(infra);
+ ap = dtoh32(ap);
+ *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+ return 0;
+}
+
+static int
+wl_iw_get_range(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ struct iw_range *range = (struct iw_range *) extra;
+ static int channels[MAXCHANNEL+1];
+ wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+ wl_rateset_t rateset;
+ int error, i, k;
+ uint sf, ch;
+
+ int phytype;
+ int bw_cap = 0, sgi_tx = 0, nmode = 0;
+ channel_info_t ci;
+ uint8 nrate_list2copy = 0;
+ uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+ {14, 29, 43, 58, 87, 116, 130, 144},
+ {27, 54, 81, 108, 162, 216, 243, 270},
+ {30, 60, 90, 120, 180, 240, 270, 300}};
+
+ WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+
+
+ range->min_nwid = range->max_nwid = 0;
+
+
+ list->count = htod32(MAXCHANNEL);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+ return error;
+ for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+ range->freq[i].i = dtoh32(list->element[i]);
+
+ ch = dtoh32(list->element[i]);
+ if (ch <= CH_MAX_2G_CHANNEL)
+ sf = WF_CHAN_FACTOR_2_4_G;
+ else
+ sf = WF_CHAN_FACTOR_5_G;
+
+ range->freq[i].m = wf_channel2mhz(ch, sf);
+ range->freq[i].e = 6;
+ }
+ range->num_frequency = range->num_channels = i;
+
+
+ range->max_qual.qual = 5;
+
+ range->max_qual.level = 0x100 - 200;
+
+ range->max_qual.noise = 0x100 - 200;
+
+ range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+
+ range->avg_qual.qual = 3;
+
+ range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+
+ range->avg_qual.noise = 0x100 - 75;
+#endif
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ rateset.count = dtoh32(rateset.count);
+ range->num_bitrates = rateset.count;
+ for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000;
+ dev_wlc_intvar_get(dev, "nmode", &nmode);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+ return error;
+
+ if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
+ (phytype == WLC_PHY_TYPE_LCN40))) {
+ dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
+ dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
+ dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t));
+ ci.hw_channel = dtoh32(ci.hw_channel);
+
+ if (bw_cap == 0 ||
+ (bw_cap == 2 && ci.hw_channel <= 14)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 0;
+ else
+ nrate_list2copy = 1;
+ }
+ if (bw_cap == 1 ||
+ (bw_cap == 2 && ci.hw_channel >= 36)) {
+ if (sgi_tx == 0)
+ nrate_list2copy = 2;
+ else
+ nrate_list2copy = 3;
+ }
+ range->num_bitrates += 8;
+ for (k = 0; i < range->num_bitrates; k++, i++) {
+
+ range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+ }
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+ return error;
+ i = dtoh32(i);
+ if (i == WLC_PHY_TYPE_A)
+ range->throughput = 24000000;
+ else
+ range->throughput = 1500000;
+
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+ range->num_encoding_sizes = 4;
+ range->encoding_size[0] = WEP1_KEY_SIZE;
+ range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+ range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+ range->encoding_size[2] = 0;
+#endif
+ range->encoding_size[3] = AES_KEY_SIZE;
+
+
+ range->min_pmp = 0;
+ range->max_pmp = 0;
+ range->min_pmt = 0;
+ range->max_pmt = 0;
+ range->pmp_flags = 0;
+ range->pm_capa = 0;
+
+
+ range->num_txpower = 2;
+ range->txpower[0] = 1;
+ range->txpower[1] = 255;
+ range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 19;
+
+
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+
+ range->min_retry = 1;
+ range->max_retry = 255;
+
+ range->min_r_time = 0;
+ range->max_r_time = 0;
+#endif
+
+#if WIRELESS_EXT > 17
+ range->enc_capa = IW_ENC_CAPA_WPA;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+ range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+#if (defined(BCMSUP_PSK) && defined(WLFBT))
+
+ range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+#endif
+
+
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+
+ range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif
+
+ return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+static int
+wl_iw_set_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ int i;
+
+ WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+ for (i = 0; i < iw->spy_num; i++)
+ memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+ memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+ return 0;
+}
+
+static int
+wl_iw_get_spy(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ dwrq->length = iw->spy_num;
+ for (i = 0; i < iw->spy_num; i++) {
+ memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+ addr[i].sa_family = AF_UNIX;
+ memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+ iw->spy_qual[i].updated = 0;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_set_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+ if (awrq->sa_family != ARPHRD_ETHER) {
+ WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+
+ if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+ WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+ }
+ return 0;
+ }
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_wap(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+ awrq->sa_family = ARPHRD_ETHER;
+ memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+
+ (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *awrq,
+ char *extra
+)
+{
+ struct iw_mlme *mlme;
+ scb_val_t scbval;
+ int error = -EINVAL;
+
+ WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+ mlme = (struct iw_mlme *)extra;
+ if (mlme == NULL) {
+ WL_ERROR(("Invalid ioctl data.\n"));
+ return error;
+ }
+
+ scbval.val = mlme->reason_code;
+ bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+ if (mlme->cmd == IW_MLME_DISASSOC) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ }
+ else if (mlme->cmd == IW_MLME_DEAUTH) {
+ scbval.val = htod32(scbval.val);
+ error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+ sizeof(scb_val_t));
+ }
+ else {
+ WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+ return error;
+ }
+
+ return error;
+}
+#endif
+
+static int
+wl_iw_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int error, i;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+
+ kfree(list);
+
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ iscan_buf_t * buf;
+ iscan_info_t *iscan = g_iscan;
+
+ struct sockaddr *addr = (struct sockaddr *) extra;
+ struct iw_quality qual[IW_MAX_AP];
+ wl_bss_info_t *bi = NULL;
+ int i;
+
+ WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_aplist(dev, info, dwrq, extra);
+ }
+
+ buf = iscan->list_hdr;
+
+ while (buf) {
+ list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ bi = NULL;
+ for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+ continue;
+
+
+ memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ addr[dwrq->length].sa_family = ARPHRD_ETHER;
+ qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+ qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+ qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+
+#if WIRELESS_EXT > 18
+ qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+ qual[dwrq->length].updated = 7;
+#endif
+
+ dwrq->length++;
+ }
+ buf = buf->next;
+ }
+ if (dwrq->length) {
+ memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+
+ dwrq->flags = 1;
+ }
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 13
+static int
+wl_iw_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+
+ WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+
+ (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ iscan_info_t *iscan = g_iscan;
+
+ WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_set_scan(dev, info, wrqu, extra);
+ }
+ if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+ return 0;
+ }
+
+
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+
+ iscan->list_cur = iscan->list_hdr;
+ iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+ wl_iw_set_event_mask(dev);
+ wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpaie;
+
+
+ if ((ie[1] >= 6) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+
+
+ uint8 *ie = *wpsie;
+
+
+ if ((ie[1] >= 4) &&
+ !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+ return TRUE;
+ }
+
+
+ ie += ie[1] + 2;
+
+ *tlvs_len -= (int)(ie - *tlvs);
+
+ *tlvs = ie;
+ return FALSE;
+}
+#endif
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+ size_t len, int uppercase)
+{
+ size_t i;
+ char *pos = buf, *end = buf + buf_size;
+ int ret;
+ if (buf_size == 0)
+ return 0;
+ for (i = 0; i < len; i++) {
+ ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+ data[i]);
+ if (ret < 0 || ret >= end - pos) {
+ end[-1] = '\0';
+ return pos - buf;
+ }
+ pos += ret;
+ }
+ end[-1] = '\0';
+ return pos - buf;
+}
+
+
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+ return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+ struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+ struct iw_event iwe;
+ char *event;
+#ifdef BCMWAPI_WPI
+ char *buf;
+ int custom_event_len;
+#endif
+
+ event = *event_p;
+ if (bi->ie_length) {
+
+ bcm_tlv_t *ie;
+ uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ int ptr_len = bi->ie_length;
+
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+#if defined(WLFBT)
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+#endif
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+
+ if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+ if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ break;
+ }
+ }
+
+#ifdef BCMWAPI_WPI
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr_len = bi->ie_length;
+
+ while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+ WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else
+ iwe.cmd = IWEVCUSTOM;
+ custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+ iwe.u.data.length = custom_event_len;
+
+ buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+ if (buf == NULL)
+ {
+ WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+ break;
+ }
+
+ memcpy(buf, "wapi_ie=", 8);
+ wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+ wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+ wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+#endif
+ break;
+ }
+#endif
+ *event_p = event;
+ }
+
+#endif
+ return 0;
+}
+static int
+wl_iw_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ channel_info_t ci;
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int error, i, j;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ uint buflen = dwrq->length;
+
+ WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+ return error;
+ ci.scan_channel = dtoh32(ci.scan_channel);
+ if (ci.scan_channel)
+ return -EAGAIN;
+
+
+ list = kmalloc(buflen, GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+ memset(list, 0, buflen);
+ list->buflen = htod32(buflen);
+ if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+ kfree(list);
+ return error;
+ }
+ list->buflen = dtoh32(list->buflen);
+ list->version = dtoh32(list->version);
+ list->count = dtoh32(list->count);
+
+ ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+ for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ buflen));
+
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+
+ kfree(list);
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_scan_results_t *list;
+ struct iw_event iwe;
+ wl_bss_info_t *bi = NULL;
+ int ii, j;
+ int apcnt;
+ char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_info_t *iscan = g_iscan;
+ iscan_buf_t * p_buf;
+
+ WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ return wl_iw_get_scan(dev, info, dwrq, extra);
+ }
+
+
+ if (iscan->iscan_state == ISCAN_STATE_SCANING)
+ return -EAGAIN;
+
+ apcnt = 0;
+ p_buf = iscan->list_hdr;
+
+ while (p_buf != iscan->list_cur) {
+ list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+ if (list->version != WL_BSS_INFO_VERSION) {
+ WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+ }
+
+ bi = NULL;
+ for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+ bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+ ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+ WLC_IW_ISCAN_MAXLEN));
+
+
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end)
+ return -E2BIG;
+
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+ iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+
+ if (bi->rateset.count) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+ return -E2BIG;
+
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
+ }
+ }
+ p_buf = p_buf->next;
+ }
+
+ dwrq->length = event - extra;
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+#endif
+
+
+static int
+wl_iw_set_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+
+ memset(&ssid, 0, sizeof(ssid));
+ if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+ memcpy(ssid.SSID, extra, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid))))
+ return error;
+ }
+
+ else {
+ scb_val_t scbval;
+ bzero(&scbval, sizeof(scb_val_t));
+ if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t))))
+ return error;
+ }
+ return 0;
+}
+
+static int
+wl_iw_get_essid(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wlc_ssid_t ssid;
+ int error;
+
+ WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("Error getting the SSID\n"));
+ return error;
+ }
+
+ ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+
+ memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+ dwrq->length = ssid.SSID_len;
+
+ dwrq->flags = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+
+ if (dwrq->length > sizeof(iw->nickname))
+ return -E2BIG;
+
+ memcpy(iw->nickname, extra, dwrq->length);
+ iw->nickname[dwrq->length - 1] = '\0';
+
+ return 0;
+}
+
+static int
+wl_iw_get_nick(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_iw_t *iw = IW_DEV_IF(dev);
+ WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+ if (!extra)
+ return -EINVAL;
+
+ strcpy(extra, iw->nickname);
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+static int wl_iw_set_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ wl_rateset_t rateset;
+ int error, rate, i, error_bg, error_a;
+
+ WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+ return error;
+
+ rateset.count = dtoh32(rateset.count);
+
+ if (vwrq->value < 0) {
+
+ rate = rateset.rates[rateset.count - 1] & 0x7f;
+ } else if (vwrq->value < rateset.count) {
+
+ rate = rateset.rates[vwrq->value] & 0x7f;
+ } else {
+
+ rate = vwrq->value / 500000;
+ }
+
+ if (vwrq->fixed) {
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+ error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+ } else {
+
+
+ error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+
+ error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+ if (error_bg && error_a)
+ return (error_bg | error_a);
+
+
+ for (i = 0; i < rateset.count; i++)
+ if ((rateset.rates[i] & 0x7f) > rate)
+ break;
+ rateset.count = htod32(i);
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+ return error;
+ }
+
+ return 0;
+}
+
+static int wl_iw_get_rate(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rate;
+
+ WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+ return error;
+ rate = dtoh32(rate);
+ vwrq->value = rate * 500000;
+
+ return 0;
+}
+
+static int
+wl_iw_set_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+ if (vwrq->disabled)
+ rts = DOT11_DEFAULT_RTS_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+ return -EINVAL;
+ else
+ rts = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_rts(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, rts;
+
+ WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+ return error;
+
+ vwrq->value = rts;
+ vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, frag;
+
+ WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+ if (vwrq->disabled)
+ frag = DOT11_DEFAULT_FRAG_LEN;
+ else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+ return -EINVAL;
+ else
+ frag = vwrq->value;
+
+ if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_frag(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, fragthreshold;
+
+ WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+ if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+ return error;
+
+ vwrq->value = fragthreshold;
+ vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+static int
+wl_iw_set_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable;
+ uint16 txpwrmw;
+ WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+
+ disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+ disable += WL_RADIO_SW_DISABLE << 16;
+
+ disable = htod32(disable);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+ return error;
+
+
+ if (disable & WL_RADIO_SW_DISABLE)
+ return 0;
+
+
+ if (!(vwrq->flags & IW_TXPOW_MWATT))
+ return -EINVAL;
+
+
+ if (vwrq->value < 0)
+ return 0;
+
+ if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+ else txpwrmw = (uint16)vwrq->value;
+
+
+ error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+ return error;
+}
+
+static int
+wl_iw_get_txpow(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, disable, txpwrdbm;
+ uint8 result;
+
+ WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+ (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+ return error;
+
+ disable = dtoh32(disable);
+ result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+ vwrq->value = (int32)bcm_qdbm_to_mw(result);
+ vwrq->fixed = 0;
+ vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+ vwrq->flags = IW_TXPOW_MWATT;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+
+ if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+ return -EINVAL;
+
+
+ if (vwrq->flags & IW_RETRY_LIMIT) {
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+ !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif
+
+ lrl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+ return error;
+ }
+
+#if WIRELESS_EXT > 20
+ if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+ !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+ if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif
+
+ srl = htod32(vwrq->value);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int
+wl_iw_get_retry(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, lrl, srl;
+
+ WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+ vwrq->disabled = 0;
+
+
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ return -EINVAL;
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+ return error;
+
+ lrl = dtoh32(lrl);
+ srl = dtoh32(srl);
+
+
+ if (vwrq->flags & IW_RETRY_MAX) {
+ vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ vwrq->value = lrl;
+ } else {
+ vwrq->flags = IW_RETRY_LIMIT;
+ vwrq->value = srl;
+ if (srl != lrl)
+ vwrq->flags |= IW_RETRY_MIN;
+ }
+
+ return 0;
+}
+#endif
+
+static int
+wl_iw_set_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec;
+
+ WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+
+ if (key.index == DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+ } else {
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ return -EINVAL;
+ }
+
+
+ wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+ return error;
+
+
+ if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+
+ val = htod32(key.index);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ } else {
+ key.len = dwrq->length;
+
+ if (dwrq->length > sizeof(key.data))
+ return -EINVAL;
+
+ memcpy(key.data, extra, dwrq->length);
+
+ key.flags = WL_PRIMARY_KEY;
+ switch (key.len) {
+ case WEP1_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WEP128_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ case TKIP_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+#endif
+ case AES_KEY_SIZE:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ swap_key_from_BE(&key);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+ return error;
+ }
+
+
+ val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+ val = htod32(val);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_encode(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error, val, wsec, auth;
+
+ WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+
+ bzero(&key, sizeof(wl_wsec_key_t));
+
+ if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+
+ for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+ val = key.index;
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+ return error;
+ val = dtoh32(val);
+ if (val)
+ break;
+ }
+ } else
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+ key.index = 0;
+
+
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+ (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+ return error;
+
+ swap_key_to_BE(&key);
+
+ wsec = dtoh32(wsec);
+ auth = dtoh32(auth);
+
+ dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+
+ dwrq->flags = key.index + 1;
+ if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+
+ dwrq->flags |= IW_ENCODE_DISABLED;
+ }
+ if (auth) {
+
+ dwrq->flags |= IW_ENCODE_RESTRICTED;
+ }
+
+
+ if (dwrq->length && extra)
+ memcpy(extra, key.data, dwrq->length);
+
+ return 0;
+}
+
+static int
+wl_iw_set_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+ pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+ pm = htod32(pm);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+ return error;
+
+ return 0;
+}
+
+static int
+wl_iw_get_power(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error, pm;
+
+ WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+ if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+ return error;
+
+ pm = dtoh32(pm);
+ vwrq->disabled = pm ? 0 : 1;
+ vwrq->flags = IW_POWER_ALL_R;
+
+ return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+ uchar buf[WLC_IOCTL_SMLEN] = {0};
+ uchar *p = buf;
+ int wapi_ie_size;
+
+ WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+ if (extra[0] == DOT11_MNG_WAPI_ID)
+ {
+ wapi_ie_size = iwp->length;
+ memcpy(p, extra, iwp->length);
+ dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+ }
+ else
+#endif
+ dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+ return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *iwp,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+ iwp->length = 64;
+ dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+ return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
+{
+ wl_wsec_key_t key;
+ int error;
+ struct iw_encode_ext *iwe;
+
+ WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+ memset(&key, 0, sizeof(key));
+ iwe = (struct iw_encode_ext *)extra;
+
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+ }
+
+
+ key.index = 0;
+ if (dwrq->flags & IW_ENCODE_INDEX)
+ key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ key.len = iwe->key_len;
+
+
+ if (!ETHER_ISMULTI(iwe->addr.sa_data))
+ bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+
+ if (key.len == 0) {
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+
+ key.index = htod32(key.index);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+ &key.index, sizeof(key.index));
+ if (error)
+ return error;
+ }
+
+ else {
+ swap_key_from_BE(&key);
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ }
+#if (defined(BCMSUP_PSK) && defined(WLFBT))
+
+ else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+ int j;
+ wsec_pmk_t pmk;
+ char keystring[WSEC_MAX_PSK_LEN + 1];
+ char* charptr = keystring;
+ uint len;
+
+
+ for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+ sprintf(charptr, "%02x", iwe->key[j]);
+ charptr += 2;
+ }
+ len = strlen(keystring);
+ pmk.key_len = htod16(len);
+ bcopy(keystring, pmk.key, len);
+ pmk.flags = htod16(WSEC_PASSPHRASE);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+ if (error)
+ return error;
+ }
+#endif
+
+ else {
+ if (iwe->key_len > sizeof(key.data))
+ return -EINVAL;
+
+ WL_WSEC(("Setting the key index %d\n", key.index));
+ if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ WL_WSEC(("key is a Primary Key\n"));
+ key.flags = WL_PRIMARY_KEY;
+ }
+
+ bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+ if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+ uint8 keybuf[8];
+ bcopy(&key.data[24], keybuf, sizeof(keybuf));
+ bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+ bcopy(keybuf, &key.data[16], sizeof(keybuf));
+ }
+
+
+ if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ uchar *ivptr;
+ ivptr = (uchar *)iwe->rx_seq;
+ key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+ (ivptr[3] << 8) | ivptr[2];
+ key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+ key.iv_initialized = TRUE;
+ }
+
+ switch (iwe->alg) {
+ case IW_ENCODE_ALG_NONE:
+ key.algo = CRYPTO_ALGO_OFF;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (iwe->key_len == WEP1_KEY_SIZE)
+ key.algo = CRYPTO_ALGO_WEP1;
+ else
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ break;
+#ifdef BCMWAPI_WPI
+ case IW_ENCODE_ALG_SM4:
+ key.algo = CRYPTO_ALGO_SMS4;
+ if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ key.flags &= ~WL_PRIMARY_KEY;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ swap_key_from_BE(&key);
+
+ dhd_wait_pend8021x(dev);
+
+ error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+
+#if WIRELESS_EXT > 17
+struct {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+static int
+wl_iw_set_pmksa(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ struct iw_pmksa *iwpmksa;
+ uint i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+ WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+ iwpmksa = (struct iw_pmksa *)extra;
+ bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+ if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+ WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+ bzero((char *)&pmkid_list, sizeof(pmkid_list));
+ }
+ if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+ pmkid_list_t pmkid, *pmkidptr;
+ pmkidptr = &pmkid;
+ bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+ {
+ uint j;
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+ if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
+ for (; i < pmkid_list.pmkids.npmkid; i++) {
+ bcopy(&pmkid_array[i+1].BSSID,
+ &pmkid_array[i].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&pmkid_array[i+1].PMKID,
+ &pmkid_array[i].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ pmkid_list.pmkids.npmkid--;
+ }
+ if (iwpmksa->cmd == IW_PMKSA_ADD) {
+ bcopy(&iwpmksa->bssid.sa_data[0],
+ &pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
+ ETHER_ADDR_LEN);
+ bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
+ WPA2_PMKID_LEN);
+ {
+ uint j;
+ uint k;
+ k = pmkid_list.pmkids.npmkid;
+ BCM_REFERENCE(k);
+ WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+ bcm_ether_ntoa(&pmkid_array[k].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+ WL_TRACE(("\n"));
+ }
+ pmkid_list.pmkids.npmkid++;
+ }
+ WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+ uint j;
+ WL_TRACE(("PMKID[%d]: %s = ", i,
+ bcm_ether_ntoa(&pmkid_array[i].BSSID,
+ eabuf)));
+ for (j = 0; j < WPA2_PMKID_LEN; j++)
+ WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+ printf("\n");
+ }
+ WL_TRACE(("\n"));
+ dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+ return 0;
+}
+#endif
+
+static int
+wl_iw_get_encodeext(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+ return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error = 0;
+ int paramid;
+ int paramval;
+ uint32 cipher_combined;
+ int val = 0;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+ paramval = vwrq->value;
+
+ WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+ dev->name, paramid, paramval));
+
+ switch (paramid) {
+
+ case IW_AUTH_WPA_VERSION:
+
+ if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+ val = WPA_AUTH_DISABLED;
+ else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+ val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+ else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+ else if (paramval & IW_AUTH_WAPI_VERSION_1)
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+
+ if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+ iw->pwsec = paramval;
+ }
+ else {
+ iw->gwsec = paramval;
+ }
+
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+
+ cipher_combined = iw->gwsec | iw->pwsec;
+ val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+ if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+ val |= WEP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+ val |= TKIP_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+ val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+ val &= ~SMS4_ENABLED;
+ if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+ val |= SMS4_ENABLED;
+#endif
+
+ if (iw->privacy_invoked && !val) {
+ WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else if (val) {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ return error;
+#ifdef WLFBT
+ if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val | AES_ENABLED)) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+ return error;
+ }
+ else if (val == 0) {
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+ return error;
+ }
+#endif
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA_AUTH_PSK;
+ else
+ val = WPA_AUTH_UNSPECIFIED;
+ }
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ if (paramval & IW_AUTH_KEY_MGMT_PSK)
+ val = WPA2_AUTH_PSK;
+ else
+ val = WPA2_AUTH_UNSPECIFIED;
+ }
+#ifdef BCMWAPI_WPI
+ if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+ val = WAPI_AUTH_UNSPECIFIED;
+#endif
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ WL_ERROR(("Setting the D11auth %d\n", paramval));
+ if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+ val = 0;
+ else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+ val = 1;
+ else
+ error = 1;
+ if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+ return error;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (paramval == 0) {
+ val = 0;
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+ error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+ return error;
+ }
+ else {
+
+ }
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED: {
+ int wsec;
+
+ if (paramval == 0) {
+ iw->privacy_invoked = FALSE;
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ iw->privacy_invoked = TRUE;
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+ return error;
+
+ if (!WSEC_ENABLED(wsec)) {
+
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+ WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ } else {
+ if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+ WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+ return error;
+ }
+ }
+ }
+ break;
+ }
+
+
+#endif
+
+#ifdef BCMWAPI_WPI
+
+ case IW_AUTH_WAPI_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ return error;
+ if (paramval) {
+ val |= SMS4_ENABLED;
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+ __FUNCTION__, val, error));
+ return error;
+ }
+ if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+ WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
+ __FUNCTION__, WAPI_AUTH_UNSPECIFIED,
+ error));
+ return error;
+ }
+ }
+
+ break;
+
+#endif
+
+ default:
+ break;
+ }
+ return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq,
+ char *extra
+)
+{
+ int error;
+ int paramid;
+ int paramval = 0;
+ int val;
+ wl_iw_t *iw = IW_DEV_IF(dev);
+
+ WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+ paramid = vwrq->flags & IW_AUTH_INDEX;
+
+ switch (paramid) {
+ case IW_AUTH_WPA_VERSION:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+ paramval = IW_AUTH_WPA_VERSION_DISABLED;
+ else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA;
+ else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+ paramval = IW_AUTH_WPA_VERSION_WPA2;
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ paramval = iw->pwsec;
+ break;
+
+ case IW_AUTH_CIPHER_GROUP:
+ paramval = iw->gwsec;
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (VAL_PSK(val))
+ paramval = IW_AUTH_KEY_MGMT_PSK;
+ else
+ paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+ return error;
+ if (!val)
+ paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+ else
+ paramval = IW_AUTH_ALG_SHARED_KEY;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ return error;
+ if (val)
+ paramval = TRUE;
+ else
+ paramval = FALSE;
+ break;
+
+#if WIRELESS_EXT > 17
+
+ case IW_AUTH_ROAMING_CONTROL:
+ WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+
+ break;
+
+ case IW_AUTH_PRIVACY_INVOKED:
+ paramval = iw->privacy_invoked;
+ break;
+
+#endif
+ }
+ vwrq->value = paramval;
+ return 0;
+}
+#endif
+
+static const iw_handler wl_iw_handler[] =
+{
+ (iw_handler) wl_iw_config_commit,
+ (iw_handler) wl_iw_get_name,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_freq,
+ (iw_handler) wl_iw_get_freq,
+ (iw_handler) wl_iw_set_mode,
+ (iw_handler) wl_iw_get_mode,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_get_range,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_spy,
+ (iw_handler) wl_iw_get_spy,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wap,
+ (iw_handler) wl_iw_get_wap,
+#if WIRELESS_EXT > 17
+ (iw_handler) wl_iw_mlme,
+#else
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_iscan_get_aplist,
+#if WIRELESS_EXT > 13
+ (iw_handler) wl_iw_iscan_set_scan,
+ (iw_handler) wl_iw_iscan_get_scan,
+#else
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+#endif
+ (iw_handler) wl_iw_set_essid,
+ (iw_handler) wl_iw_get_essid,
+ (iw_handler) wl_iw_set_nick,
+ (iw_handler) wl_iw_get_nick,
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_rate,
+ (iw_handler) wl_iw_get_rate,
+ (iw_handler) wl_iw_set_rts,
+ (iw_handler) wl_iw_get_rts,
+ (iw_handler) wl_iw_set_frag,
+ (iw_handler) wl_iw_get_frag,
+ (iw_handler) wl_iw_set_txpow,
+ (iw_handler) wl_iw_get_txpow,
+#if WIRELESS_EXT > 10
+ (iw_handler) wl_iw_set_retry,
+ (iw_handler) wl_iw_get_retry,
+#endif
+ (iw_handler) wl_iw_set_encode,
+ (iw_handler) wl_iw_get_encode,
+ (iw_handler) wl_iw_set_power,
+ (iw_handler) wl_iw_get_power,
+#if WIRELESS_EXT > 17
+ (iw_handler) NULL,
+ (iw_handler) NULL,
+ (iw_handler) wl_iw_set_wpaie,
+ (iw_handler) wl_iw_get_wpaie,
+ (iw_handler) wl_iw_set_wpaauth,
+ (iw_handler) wl_iw_get_wpaauth,
+ (iw_handler) wl_iw_set_encodeext,
+ (iw_handler) wl_iw_get_encodeext,
+ (iw_handler) wl_iw_set_pmksa,
+#endif
+};
+
+#if WIRELESS_EXT > 12
+enum {
+ WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+ WL_IW_SET_VLANMODE,
+ WL_IW_SET_PM
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+ wl_iw_set_leddc,
+ wl_iw_set_vlanmode,
+ wl_iw_set_pm
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+ {
+ WL_IW_SET_LEDDC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_leddc"
+ },
+ {
+ WL_IW_SET_VLANMODE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_vlanmode"
+ },
+ {
+ WL_IW_SET_PM,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0,
+ "set_pm"
+ }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+ .num_standard = ARRAYSIZE(wl_iw_handler),
+ .num_private = ARRAY_SIZE(wl_iw_priv_handler),
+ .num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+ .standard = (iw_handler *) wl_iw_handler,
+ .private = wl_iw_priv_handler,
+ .private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+ get_wireless_stats: dhd_get_wireless_stats,
+#endif
+ };
+#endif
+
+int
+wl_iw_ioctl(
+ struct net_device *dev,
+ struct ifreq *rq,
+ int cmd
+)
+{
+ struct iwreq *wrq = (struct iwreq *) rq;
+ struct iw_request_info info;
+ iw_handler handler;
+ char *extra = NULL;
+ size_t token_size = 1;
+ int max_tokens = 0, ret = 0;
+
+ if (cmd < SIOCIWFIRST ||
+ IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+ !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ max_tokens = IW_ESSID_MAX_SIZE + 1;
+ break;
+
+ case SIOCSIWENCODE:
+ case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+ case SIOCSIWENCODEEXT:
+ case SIOCGIWENCODEEXT:
+#endif
+ max_tokens = IW_ENCODING_TOKEN_MAX;
+ break;
+
+ case SIOCGIWRANGE:
+ max_tokens = sizeof(struct iw_range);
+ break;
+
+ case SIOCGIWAPLIST:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_AP;
+ break;
+
+#if WIRELESS_EXT > 13
+ case SIOCGIWSCAN:
+ if (g_iscan)
+ max_tokens = wrq->u.data.length;
+ else
+ max_tokens = IW_SCAN_MAX_DATA;
+ break;
+#endif
+
+ case SIOCSIWSPY:
+ token_size = sizeof(struct sockaddr);
+ max_tokens = IW_MAX_SPY;
+ break;
+
+ case SIOCGIWSPY:
+ token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+ max_tokens = IW_MAX_SPY;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tokens && wrq->u.data.pointer) {
+ if (wrq->u.data.length > max_tokens)
+ return -E2BIG;
+
+ if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+ }
+
+ info.cmd = cmd;
+ info.flags = 0;
+
+ ret = handler(dev, &info, &wrq->u, extra);
+
+ if (extra) {
+ if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+ kfree(extra);
+ return -EFAULT;
+ }
+
+ kfree(extra);
+ }
+
+ return ret;
+}
+
+
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+ char* stringBuf, uint buflen)
+{
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent;
+ uint32 inStatus;
+ uint32 inReason;
+ const char* outName;
+ const char* outCause;
+ } conn_fail_event_map_t;
+
+
+# define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+
+
+ {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
+ "Conn", "Success"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+ "Conn", "NoNetworks"},
+ {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ConfigMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
+ "Conn", "EncrypMismatch"},
+ {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
+ "Conn", "RsnMismatch"},
+ {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "AuthTimeout"},
+ {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "AuthFail"},
+ {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
+ "Conn", "AuthNoAck"},
+ {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
+ "Conn", "ReassocFail"},
+ {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
+ "Conn", "ReassocTimeout"},
+ {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
+ "Conn", "ReassocAbort"},
+ {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
+ "Sup", "ConnSuccess"},
+ {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Sup", "WpaHandshakeFail"},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Deauth"},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "DisassocInd"},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
+ "Conn", "Disassoc"}
+ };
+
+ const char* name = "";
+ const char* cause = NULL;
+ int i;
+
+
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ name = row->outName;
+ cause = row->outCause;
+ break;
+ }
+ }
+
+
+ if (cause) {
+ memset(stringBuf, 0, buflen);
+ snprintf(stringBuf, buflen, "%s %s %02d %02d",
+ name, cause, status, reason);
+ WL_TRACE(("Connection status: %s\n", stringBuf));
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+#if (WIRELESS_EXT > 14)
+
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+ uint32 event = ntoh32(e->event_type);
+ uint32 status = ntoh32(e->status);
+ uint32 reason = ntoh32(e->reason);
+
+ if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+ return TRUE;
+ } else
+ {
+ return FALSE;
+ }
+}
+#endif
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256
+#endif
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+ int cmd = 0;
+ uint32 event_type = ntoh32(e->event_type);
+ uint16 flags = ntoh16(e->flags);
+ uint32 datalen = ntoh32(e->datalen);
+ uint32 status = ntoh32(e->status);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ switch (event_type) {
+ case WLC_E_TXFAIL:
+ cmd = IWEVTXDROP;
+ break;
+#if WIRELESS_EXT > 14
+ case WLC_E_JOIN:
+ case WLC_E_ASSOC_IND:
+ case WLC_E_REASSOC_IND:
+ cmd = IWEVREGISTERED;
+ break;
+ case WLC_E_DEAUTH_IND:
+ case WLC_E_DISASSOC_IND:
+ cmd = SIOCGIWAP;
+ wrqu.data.length = strlen(extra);
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ break;
+
+ case WLC_E_LINK:
+ case WLC_E_NDIS_LINK:
+ cmd = SIOCGIWAP;
+ wrqu.data.length = strlen(extra);
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+ bzero(&extra, ETHER_ADDR_LEN);
+ }
+ break;
+ case WLC_E_ACTION_FRAME:
+ cmd = IWEVCUSTOM;
+ if (datalen + 1 <= sizeof(extra)) {
+ wrqu.data.length = datalen + 1;
+ extra[0] = WLC_E_ACTION_FRAME;
+ memcpy(&extra[1], data, datalen);
+ WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+ }
+ break;
+
+ case WLC_E_ACTION_FRAME_COMPLETE:
+ cmd = IWEVCUSTOM;
+ if (sizeof(status) + 1 <= sizeof(extra)) {
+ wrqu.data.length = sizeof(status) + 1;
+ extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+ memcpy(&extra[1], &status, sizeof(status));
+ WL_TRACE(("wl_iw_event status %d \n", status));
+ }
+ break;
+#endif
+#if WIRELESS_EXT > 17
+ case WLC_E_MIC_ERROR: {
+ struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra;
+ cmd = IWEVMICHAELMICFAILURE;
+ wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+ if (flags & WLC_EVENT_MSG_GROUP)
+ micerrevt->flags |= IW_MICFAILURE_GROUP;
+ else
+ micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+ memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+ break;
+ }
+
+ case WLC_E_ASSOC_REQ_IE:
+ cmd = IWEVASSOCREQIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_ASSOC_RESP_IE:
+ cmd = IWEVASSOCRESPIE;
+ wrqu.data.length = datalen;
+ if (datalen < sizeof(extra))
+ memcpy(extra, data, datalen);
+ break;
+
+ case WLC_E_PMKID_CACHE: {
+ struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+ pmkid_cand_list_t *pmkcandlist;
+ pmkid_cand_t *pmkidcand;
+ int count;
+
+ if (data == NULL)
+ break;
+
+ cmd = IWEVPMKIDCAND;
+ pmkcandlist = data;
+ count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+ wrqu.data.length = sizeof(struct iw_pmkid_cand);
+ pmkidcand = pmkcandlist->pmkid_cand;
+ while (count) {
+ bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+ if (pmkidcand->preauth)
+ iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+ bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+ ETHER_ADDR_LEN);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ pmkidcand++;
+ count--;
+ }
+ break;
+ }
+#endif
+
+ case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+ cmd = SIOCGIWSCAN;
+#endif
+ WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+ if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+ (g_iscan->iscan_state != ISCAN_STATE_IDLE))
+ up(&g_iscan->sysioc_sem);
+ break;
+
+ default:
+
+ break;
+ }
+
+ if (cmd) {
+ if (cmd == SIOCGIWSCAN)
+ wireless_send_event(dev, cmd, &wrqu, NULL);
+ else
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+
+#if WIRELESS_EXT > 14
+
+ memset(extra, 0, sizeof(extra));
+ if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+ cmd = IWEVCUSTOM;
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(dev, cmd, &wrqu, extra);
+ }
+#endif
+
+#endif
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+ int res = 0;
+ wl_cnt_t cnt;
+ int phy_noise;
+ int rssi;
+ scb_val_t scb_val;
+
+ phy_noise = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+ goto done;
+
+ phy_noise = dtoh32(phy_noise);
+ WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+ scb_val.val = 0;
+ if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+ goto done;
+
+ rssi = dtoh32(scb_val.val);
+ WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ wstats->qual.qual = 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ wstats->qual.qual = 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ wstats->qual.qual = 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ wstats->qual.qual = 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ wstats->qual.qual = 4;
+ else
+ wstats->qual.qual = 5;
+
+
+ wstats->qual.level = 0x100 + rssi;
+ wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+ wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+ wstats->qual.updated |= 7;
+#endif
+
+#if WIRELESS_EXT > 11
+ WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+
+ memset(&cnt, 0, sizeof(wl_cnt_t));
+ res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+ if (res)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+ goto done;
+ }
+
+ cnt.version = dtoh16(cnt.version);
+ if (cnt.version != WL_CNT_T_VERSION) {
+ WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+ WL_CNT_T_VERSION, cnt.version));
+ goto done;
+ }
+
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt.rxundec);
+ wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+ wstats->discard.retries = dtoh32(cnt.txfail);
+ wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+ wstats->miss.beacon = 0;
+
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif
+
+done:
+ return res;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+ iscan_info_t *iscan = (iscan_info_t *)data;
+ iscan->timer_on = 0;
+ if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+ WL_TRACE(("timer trigger\n"));
+ up(&iscan->sysioc_sem);
+ }
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ setbit(eventmask, WLC_E_SCAN_COMPLETE);
+ dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+ int err = 0;
+
+ memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+ if (ssid && ssid->SSID_len)
+ memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+ return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+ int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+ wl_iscan_params_t *params;
+ int err = 0;
+
+ if (ssid && ssid->SSID_len) {
+ params_size += sizeof(wlc_ssid_t);
+ }
+ params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ return -ENOMEM;
+ }
+ memset(params, 0, params_size);
+ ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+ err = wl_iw_iscan_prep(&params->params, ssid);
+
+ if (!err) {
+ params->version = htod32(ISCAN_REQ_VERSION);
+ params->action = htod16(action);
+ params->scan_duration = htod16(0);
+
+
+ (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+ }
+
+ kfree(params);
+ return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+ iscan_buf_t * buf;
+ iscan_buf_t * ptr;
+ wl_iscan_results_t * list_buf;
+ wl_iscan_results_t list;
+ wl_scan_results_t *results;
+ uint32 status;
+
+
+ if (iscan->list_cur) {
+ buf = iscan->list_cur;
+ iscan->list_cur = buf->next;
+ }
+ else {
+ buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+ if (!buf)
+ return WL_SCAN_RESULTS_ABORTED;
+ buf->next = NULL;
+ if (!iscan->list_hdr)
+ iscan->list_hdr = buf;
+ else {
+ ptr = iscan->list_hdr;
+ while (ptr->next) {
+ ptr = ptr->next;
+ }
+ ptr->next = buf;
+ }
+ }
+ memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+ list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+ results = &list_buf->results;
+ results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->version = 0;
+ results->count = 0;
+
+ memset(&list, 0, sizeof(list));
+ list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+ (void) dev_iw_iovar_getbuf(
+ iscan->dev,
+ "iscanresults",
+ &list,
+ WL_ISCAN_RESULTS_FIXED_SIZE,
+ buf->iscan_buf,
+ WLC_IW_ISCAN_MAXLEN);
+ results->buflen = dtoh32(results->buflen);
+ results->version = dtoh32(results->version);
+ results->count = dtoh32(results->count);
+ WL_TRACE(("results->count = %d\n", results->count));
+
+ WL_TRACE(("results->buflen = %d\n", results->buflen));
+ status = dtoh32(list_buf->status);
+ return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+ union iwreq_data wrqu;
+ char extra[IW_CUSTOM_MAX + 1];
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memset(extra, 0, sizeof(extra));
+ wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, extra);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+ uint32 status;
+ iscan_info_t *iscan = (iscan_info_t *)data;
+
+ DAEMONIZE("iscan_sysioc");
+
+ status = WL_SCAN_RESULTS_PARTIAL;
+ while (down_interruptible(&iscan->sysioc_sem) == 0) {
+ if (iscan->timer_on) {
+ del_timer(&iscan->timer);
+ iscan->timer_on = 0;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+ status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ switch (status) {
+ case WL_SCAN_RESULTS_PARTIAL:
+ WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_lock();
+#endif
+
+ wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ rtnl_unlock();
+#endif
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_SUCCESS:
+ WL_TRACE(("iscanresults complete\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ case WL_SCAN_RESULTS_PENDING:
+ WL_TRACE(("iscanresults pending\n"));
+
+ iscan->timer.expires = jiffies + iscan->timer_ms*HZ/1000;
+ add_timer(&iscan->timer);
+ iscan->timer_on = 1;
+ break;
+ case WL_SCAN_RESULTS_ABORTED:
+ WL_TRACE(("iscanresults aborted\n"));
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+ wl_iw_send_scan_complete(iscan);
+ break;
+ default:
+ WL_TRACE(("iscanresults returned unknown status %d\n", status));
+ break;
+ }
+ }
+ complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+ iscan_info_t *iscan = NULL;
+
+ if (!dev)
+ return 0;
+
+ iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ if (!iscan)
+ return -ENOMEM;
+ memset(iscan, 0, sizeof(iscan_info_t));
+ iscan->sysioc_pid = -1;
+
+ g_iscan = iscan;
+ iscan->dev = dev;
+ iscan->iscan_state = ISCAN_STATE_IDLE;
+
+
+
+ iscan->timer_ms = 2000;
+ init_timer(&iscan->timer);
+ iscan->timer.data = (ulong)iscan;
+ iscan->timer.function = wl_iw_timerfunc;
+
+ sema_init(&iscan->sysioc_sem, 0);
+ init_completion(&iscan->sysioc_exited);
+ iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+ if (iscan->sysioc_pid < 0)
+ return -ENOMEM;
+ return 0;
+}
+
+void wl_iw_detach(void)
+{
+ iscan_buf_t *buf;
+ iscan_info_t *iscan = g_iscan;
+ if (!iscan)
+ return;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
+ }
+
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
+ }
+ kfree(iscan);
+ g_iscan = NULL;
+}
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h
new file mode 100644
index 00000000000000..2afb5a683bbf7e
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_iw.h
@@ -0,0 +1,161 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h 291086 2011-10-21 01:17:24Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 10
+#define GET_SSID "SSID="
+#define GET_CHANNEL "CH="
+#define GET_NPROBE "NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
+#define GET_HOME_DWELL "HOME="
+#define GET_SCAN_TYPE "TYPE="
+
+#define BAND_GET_CMD "GETBAND"
+#define BAND_SET_CMD "SETBAND"
+#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
+#define SETSUSPEND_CMD "SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
+
+#define PNOSETUP_SET_CMD "PNOSETUP "
+#define PNOENABLE_SET_CMD "PNOFORCE"
+#define PNODEBUG_SET_CMD "PNODEBUG"
+#define TXPOWER_SET_CMD "TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+typedef struct wl_iw_extra_params {
+ int target_channel;
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ];
+ char custom_locale[WLC_CNTRY_BUF_SZ];
+ int32 custom_locale_rev;
+};
+
+
+#define WL_IW_RSSI_MINVAL -200
+#define WL_IW_RSSI_NO_SIGNAL -91
+#define WL_IW_RSSI_VERY_LOW -80
+#define WL_IW_RSSI_LOW -70
+#define WL_IW_RSSI_GOOD -68
+#define WL_IW_RSSI_VERY_GOOD -58
+#define WL_IW_RSSI_EXCELLENT -57
+#define WL_IW_RSSI_INVALID 0
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN ((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
+
+#define G_SCAN_RESULTS 8*1024
+#define WE_ADD_EVENT_FIX 0x80
+#define G_WLAN_SET_ON 0
+#define G_WLAN_SET_OFF 1
+
+
+typedef struct wl_iw {
+ char nickname[IW_ESSID_MAX_SIZE];
+
+ struct iw_statistics wstats;
+
+ int spy_num;
+ uint32 pwsec;
+ uint32 gwsec;
+ bool privacy_invoked;
+ struct ether_addr spy_addr[IW_MAX_SPY];
+ struct iw_quality spy_qual[IW_MAX_SPY];
+ void *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+ struct timer_list *timer;
+ struct net_device *dev;
+ long sysioc_pid;
+ struct semaphore sysioc_sem;
+ struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+
+void wl_iw_detach(void);
+
+#define CSCAN_COMMAND "CSCAN "
+#define CSCAN_TLV_PREFIX 'S'
+#define CSCAN_TLV_VERSION 1
+#define CSCAN_TLV_SUBVERSION 0
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE 'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE 'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE 'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE 'P'
+#define CSCAN_TLV_TYPE_HOME_IE 'H'
+#define CSCAN_TLV_TYPE_STYPE_IE 'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+ iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+ iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif
diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
new file mode 100644
index 00000000000000..af2586326c2084
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c
@@ -0,0 +1,422 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_mon.c 280623 2011-08-30 14:49:39Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+ MONITOR_STATE_DEINIT = 0x0,
+ MONITOR_STATE_INIT = 0x1,
+ MONITOR_STATE_INTERFACE_ADDED = 0x2,
+ MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+ int radiotap_enabled;
+ struct net_device* real_ndev; /* The real interface that the monitor is on */
+ struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+ void *dhd_pub;
+ monitor_states_t monitor_state;
+ monitor_interface mon_if[DHD_MAX_IFS];
+ struct mutex lock; /* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+ .ndo_open = dhd_mon_if_open,
+ .ndo_stop = dhd_mon_if_stop,
+ .ndo_start_xmit = dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+ .ndo_set_mac_address = dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+ struct net_device *ndev_found = NULL;
+
+ int i;
+ int len = 0;
+ int last_name_len = 0;
+ struct net_device *ndev;
+
+ /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+ * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+ * iface would be mon-p2p0-0.
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+ /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+ * it matches, then this netdev is the corresponding real_netdev.
+ */
+ if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+ len = strlen("p2p");
+ } else {
+ /* if p2p- is not present, then the IFNAMSIZ have reached and name
+ * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+ */
+ len = 0;
+ }
+ if (ndev && strstr(name, (ndev->name + len))) {
+ if (strlen(ndev->name) > last_name_len) {
+ ndev_found = ndev;
+ last_name_len = strlen(ndev->name);
+ }
+ }
+ }
+
+ return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev)
+ return &g_monitor.mon_if[i];
+ }
+
+ return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+ int ret = 0;
+
+ MON_PRINT("enter\n");
+ return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ unsigned short frame_ctl;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ monitor_interface* mon_if;
+
+ MON_PRINT("enter\n");
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ goto fail;
+ }
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+ /* Check if the QoS bit is set */
+ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (dot11_hdr->frame_control & 0x0080)
+ qos_len = 2;
+ if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+ dot11_hdr_len += 6;
+
+ memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+ memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+ /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+ * for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+ pdata = (unsigned char*)skb->data;
+ memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+ memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+ PKTSETPRIO(skb, 0);
+
+ MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+ /* Use the real net device to transmit the packet */
+ ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+ return ret;
+ }
+fail:
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+ monitor_interface* mon_if;
+
+ mon_if = ndev_to_monif(ndev);
+ if (mon_if == NULL || mon_if->real_ndev == NULL) {
+ MON_PRINT(" cannot find matched net dev, skip the packet\n");
+ } else {
+ MON_PRINT("enter, if name: %s, matched if name %s\n",
+ ndev->name, mon_if->real_ndev->name);
+ }
+ return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+ int i;
+ int idx = -1;
+ int ret = 0;
+ struct net_device* ndev = NULL;
+ dhd_linux_monitor_t **dhd_mon;
+
+ mutex_lock(&g_monitor.lock);
+
+ MON_TRACE("enter, if name: %s\n", name);
+ if (!name || !new_ndev) {
+ MON_PRINT("invalid parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Find a vacancy
+ */
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (g_monitor.mon_if[i].mon_ndev == NULL) {
+ idx = i;
+ break;
+ }
+ if (idx == -1) {
+ MON_PRINT("exceeds maximum interfaces\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+ if (!ndev) {
+ MON_PRINT("failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(ndev->name, name, IFNAMSIZ);
+ ndev->name[IFNAMSIZ - 1] = 0;
+ ndev->netdev_ops = &dhd_mon_if_ops;
+
+ ret = register_netdevice(ndev);
+ if (ret) {
+ MON_PRINT(" register_netdevice failed (%d)\n", ret);
+ goto out;
+ }
+
+ *new_ndev = ndev;
+ g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+ g_monitor.mon_if[idx].mon_ndev = ndev;
+ g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+ dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+ *dhd_mon = &g_monitor;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+ MON_PRINT("net device returned: 0x%p\n", ndev);
+ MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+ if (ret && ndev)
+ free_netdev(ndev);
+
+ mutex_unlock(&g_monitor.lock);
+ return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+ int i;
+ bool rollback_lock = false;
+ if (!ndev)
+ return -EINVAL;
+ mutex_lock(&g_monitor.lock);
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (g_monitor.mon_if[i].mon_ndev == ndev ||
+ g_monitor.mon_if[i].real_ndev == ndev) {
+ g_monitor.mon_if[i].real_ndev = NULL;
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(g_monitor.mon_if[i].mon_ndev);
+ free_netdev(g_monitor.mon_if[i].mon_ndev);
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+ break;
+ }
+ }
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+
+ if (g_monitor.monitor_state !=
+ MONITOR_STATE_INTERFACE_DELETED)
+ MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n",
+ ndev);
+ mutex_unlock(&g_monitor.lock);
+
+ return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+ if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+ g_monitor.dhd_pub = dhd_pub;
+ mutex_init(&g_monitor.lock);
+ g_monitor.monitor_state = MONITOR_STATE_INIT;
+ }
+ return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+ int i;
+ struct net_device *ndev;
+ bool rollback_lock = false;
+ mutex_lock(&g_monitor.lock);
+ if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ndev = g_monitor.mon_if[i].mon_ndev;
+ if (ndev) {
+ if (rtnl_is_locked()) {
+ rtnl_unlock();
+ rollback_lock = true;
+ }
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ g_monitor.mon_if[i].real_ndev = NULL;
+ g_monitor.mon_if[i].mon_ndev = NULL;
+ if (rollback_lock) {
+ rtnl_lock();
+ rollback_lock = false;
+ }
+ }
+ }
+ g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+ }
+ mutex_unlock(&g_monitor.lock);
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c
new file mode 100644
index 00000000000000..f83df791e13357
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.c
@@ -0,0 +1,368 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+#define WLDEV_ERROR(args) \
+ do { \
+ printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \
+ printk args; \
+ } while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ s32 ret = 0;
+ struct wl_ioctl ioc;
+
+
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = cmd;
+ ioc.buf = arg;
+ ioc.len = len;
+ ioc.set = set;
+
+ ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+ return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+ s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, u32 buflen)
+{
+ s32 iolen = 0;
+
+ iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+ return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ if (buf_sync)
+ mutex_unlock(buf_sync);
+ return ret;
+}
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+
+ if (bssidx == 0) {
+ return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+ (s8 *) iovar_buf, buflen);
+ }
+
+ prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+ namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+ if (buflen < 0 || iolen > (u32)buflen)
+ {
+ WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+ return BCME_BUFTOOSHORT;
+ }
+
+ p = (s8 *)iovar_buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, iovar_name, namelen);
+ p += namelen;
+
+ /* bss config index as first param */
+ bssidx = htod32(bssidx);
+ memcpy(p, &bssidx, sizeof(u32));
+ p += sizeof(u32);
+
+ /* parameter buffer follows */
+ if (paramlen)
+ memcpy(p, param, paramlen);
+
+ return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+
+ wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+ s32 ret = 0;
+ s32 iovar_len;
+ if (buf_sync) {
+ mutex_lock(buf_sync);
+ }
+ iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+ ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+ if (buf_sync) {
+ mutex_unlock(buf_sync);
+ }
+ return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ val = htod32(val);
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ s32 err;
+
+ memset(iovar_buf, 0, sizeof(iovar_buf));
+ err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+ sizeof(iovar_buf), bssidx, NULL);
+ if (err == 0)
+ {
+ memcpy(pval, iovar_buf, sizeof(*pval));
+ *pval = dtoh32(*pval);
+ }
+ return err;
+}
+
+int wldev_get_link_speed(
+ struct net_device *dev, int *plink_speed)
+{
+ int error;
+
+ if (!plink_speed)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+ if (unlikely(error))
+ return error;
+
+ /* Convert internal 500Kbps to Kbps */
+ *plink_speed *= 500;
+ return error;
+}
+
+int wldev_get_rssi(
+ struct net_device *dev, int *prssi)
+{
+ scb_val_t scb_val;
+ int error;
+
+ if (!prssi)
+ return -ENOMEM;
+ bzero(&scb_val, sizeof(scb_val_t));
+
+ error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+ if (unlikely(error))
+ return error;
+
+ *prssi = dtoh32(scb_val.val);
+ return error;
+}
+
+int wldev_get_ssid(
+ struct net_device *dev, wlc_ssid_t *pssid)
+{
+ int error;
+
+ if (!pssid)
+ return -ENOMEM;
+ error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+ if (unlikely(error))
+ return error;
+ pssid->SSID_len = dtoh32(pssid->SSID_len);
+ return error;
+}
+
+int wldev_get_band(
+ struct net_device *dev, uint *pband)
+{
+ int error;
+
+ error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+ return error;
+}
+
+int wldev_set_band(
+ struct net_device *dev, uint band)
+{
+ int error = -1;
+
+ if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+ error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1);
+ }
+ return error;
+}
+
+int wldev_set_country(
+ struct net_device *dev, char *country_code)
+{
+ int error = -1;
+ wl_country_t cspec = {{0}, 0, {0}};
+ scb_val_t scbval;
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ if (!country_code)
+ return error;
+
+ error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0)
+ WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+
+ if ((error < 0) ||
+ (strncmp(country_code, smbuf, WLC_CNTRY_BUF_SZ) != 0)) {
+ bzero(&scbval, sizeof(scb_val_t));
+ error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), 1);
+ if (error < 0) {
+ WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ }
+ cspec.rev = -1;
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+ get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+ error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0) {
+ WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return error;
+ }
+ dhd_bus_country_set(dev, &cspec);
+ WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ return 0;
+}
diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h
new file mode 100644
index 00000000000000..16a071f5e5a69c
--- /dev/null
+++ b/drivers/net/wireless/bcmdhd/wldev_common.h
@@ -0,0 +1,110 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2012, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ * netdev_ops->ndo_do_ioctl in new kernels)
+ * @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+ struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+ struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+ struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ * indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+ const s8 *iovar_name, s8 *param, s32 paramlen,
+ s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ * WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+ struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec);
+extern int wldev_set_country(struct net_device *dev, char *country_code);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+ int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/parrot/Kconfig b/drivers/parrot/Kconfig
new file mode 100644
index 00000000000000..30b57cd2b7fa6a
--- /dev/null
+++ b/drivers/parrot/Kconfig
@@ -0,0 +1,30 @@
+menu "Parrot drivers"
+source "drivers/parrot/serial/Kconfig"
+source "drivers/parrot/block/Kconfig"
+source "drivers/parrot/nand/Kconfig"
+source "drivers/parrot/i2c/Kconfig"
+source "drivers/parrot/video/Kconfig"
+source "drivers/parrot/media/Kconfig"
+source "drivers/parrot/uio/Kconfig"
+source "drivers/parrot/gpu/Kconfig"
+source "drivers/parrot/dma/Kconfig"
+source "drivers/parrot/net/Kconfig"
+source "drivers/parrot/mmc/Kconfig"
+source "drivers/parrot/regulator/Kconfig"
+source "drivers/parrot/mfd/Kconfig"
+source "drivers/parrot/rtc/Kconfig"
+source "drivers/parrot/gpio/Kconfig"
+source "drivers/parrot/pwm/Kconfig"
+source "drivers/parrot/spi/Kconfig"
+source "drivers/parrot/sound/p7_aai/Kconfig"
+source "drivers/parrot/pinctrl/Kconfig"
+source "drivers/parrot/usb/serial/blackberry/Kconfig"
+source "drivers/parrot/input/Kconfig"
+source "drivers/parrot/input/touchscreen/Kconfig"
+source "drivers/parrot/gator/Kconfig"
+source "drivers/parrot/iio/Kconfig"
+source "drivers/parrot/crypto/Kconfig"
+source "drivers/parrot/power/Kconfig"
+source "drivers/parrot/lttng/Kconfig"
+source "drivers/parrot/cpufreq/Kconfig"
+endmenu
diff --git a/drivers/parrot/Makefile b/drivers/parrot/Makefile
new file mode 100644
index 00000000000000..432ef8b4dd064e
--- /dev/null
+++ b/drivers/parrot/Makefile
@@ -0,0 +1,32 @@
+#add parrot drivers here
+obj-y += pwm/
+obj-y += serial/
+obj-y += block/
+obj-y += usb/serial/blackberry/
+obj-y += i2c/
+obj-y += video/
+obj-y += media/
+obj-y += uio/
+obj-y += gpu/
+#obj-y += dma/
+obj-y += net/
+obj-y += mmc/
+obj-y += regulator/
+obj-y += spi/
+obj-y += gpio/
+obj-y += sound/p7_aai/
+obj-y += mfd/
+obj-y += rtc/
+obj-y += pwm/
+obj-y += pinctrl/
+obj-y += input/
+obj-y += gator/
+obj-y += iio/
+obj-y += clk/
+obj-y += crypto/
+obj-y += power/
+obj-y += lttng/
+obj-y += cpufreq/
+# Without this, built-in.o won't be created when it's empty, and the
+# final vmlinux link will fail.
+obj-y += dummy.o
diff --git a/drivers/parrot/block/Kconfig b/drivers/parrot/block/Kconfig
new file mode 100644
index 00000000000000..92e140e7dd18f8
--- /dev/null
+++ b/drivers/parrot/block/Kconfig
@@ -0,0 +1,11 @@
+config BLK_DEV_PHYBRD
+ tristate "RAM block device support for physically independant ram block"
+ depends on BLOCK
+ default MACH_PARROT_ZEBU
+ help
+ This driver is for Parrot7 Zebu emulation environment. It helps to set up
+ a file system on a physically independant ram block which allows modification
+ of file system (ddr partial dump -> file system mount and modification
+ on host machine -> ddr reinsertion -> execution resumes) without needing to
+ restart the linux boot sequence. This will accelerate the linux debugging
+ process under Zebu environment.
diff --git a/drivers/parrot/block/Makefile b/drivers/parrot/block/Makefile
new file mode 100644
index 00000000000000..d086673a2d86a2
--- /dev/null
+++ b/drivers/parrot/block/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BLK_DEV_PHYBRD) += phy-brd.o
diff --git a/drivers/parrot/block/phy-brd.c b/drivers/parrot/block/phy-brd.c
new file mode 100644
index 00000000000000..e8a4059650c818
--- /dev/null
+++ b/drivers/parrot/block/phy-brd.c
@@ -0,0 +1,209 @@
+/**
+ * linux/driver/parrot/block/phy-brd.c - Physical Block RamDisk implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Guangye Tian <guangye.tian@parrot.com>
+ * date: 26-Jan-2011
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+#include <mach/memory.h>
+#include "phy-brd.h"
+
+#define KERNEL_SECTOR_SIZE 512
+
+static int brd_phy_major = 0;
+static int hardsect_size = 512;
+
+/* Reserve last 128MB of system memory as ram disk */
+static unsigned long brd_ram_start;
+static unsigned int brd_ram_size; /* ram block size in byte */
+
+struct brd_phy_device {
+ int size; /* Device size in bytes */
+ void __iomem *data;
+ struct resource *res;
+ struct request_queue *queue;
+ struct gendisk *gd;
+ spinlock_t lock;
+};
+
+static struct brd_phy_device *brd_phy_dev = NULL;
+
+/*
+ * Handle an I/O request
+ */
+static void brd_phy_transfer(struct brd_phy_device *dev, unsigned long sector,
+ unsigned long nsect, char *buffer, int write)
+{
+ unsigned long offset = sector * KERNEL_SECTOR_SIZE;
+ unsigned long nbytes = nsect * KERNEL_SECTOR_SIZE;
+
+ if ((offset + nbytes) > dev->size) {
+ printk(KERN_NOTICE "Beyond-end write (%ld %ld)\n", offset, nbytes);
+ return;
+ }
+
+ if (write)
+ memcpy(dev->data + offset, buffer, nbytes);
+ else
+ memcpy(buffer, dev->data + offset, nbytes);
+}
+
+/*
+ * Transfer a single BIO
+ */
+static int brd_phy_xfer_bio(struct brd_phy_device *dev, struct bio *bio)
+{
+ int i;
+ struct bio_vec *bvec;
+ sector_t sector = bio->bi_sector;
+
+ bio_for_each_segment(bvec, bio, i) {
+ char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
+ brd_phy_transfer(dev, sector, bio_cur_bytes(bio) >> 9,
+ buffer, bio_data_dir(bio) == WRITE);
+ sector += bio_cur_bytes(bio) >> 9;
+ __bio_kunmap_atomic(bio, KM_USER0);
+ }
+ return 0;
+}
+
+static void brd_phy_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct brd_phy_device *dev = q->queuedata;
+ int status;
+
+ status = brd_phy_xfer_bio(dev, bio);
+
+ bio_endio(bio, status);
+}
+
+int (*getgeo)(struct block_device *, struct hd_geometry *);
+
+int brd_phy_getgeo(struct block_device *dev, struct hd_geometry *geo)
+{
+ return 0;
+}
+
+static const struct block_device_operations brd_phy_fops = {
+ .owner = THIS_MODULE,
+ .getgeo = brd_phy_getgeo
+};
+
+static int __devinit brd_phy_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct resource* const res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENOENT;
+
+ brd_phy_major = register_blkdev(brd_phy_major, PHYBRD_DRV_NAME);
+ if (brd_phy_major < 0)
+ return brd_phy_major;
+
+ brd_phy_dev = kzalloc(sizeof(struct brd_phy_device), GFP_KERNEL);
+ if (brd_phy_dev == NULL) {
+ err = -ENOMEM;
+ goto out_unregister;
+ }
+
+ brd_ram_start = res->start;
+ brd_ram_size = resource_size(res);
+ brd_phy_dev->res = request_mem_region(brd_ram_start, brd_ram_size, PHYBRD_DRV_NAME);
+ if (brd_phy_dev->res == NULL) {
+ err = -EBUSY;
+ goto no_mem_region;
+ }
+
+ brd_phy_dev->data = ioremap_wc(brd_ram_start, brd_ram_size);
+ if (brd_phy_dev->data == NULL) {
+ err = -EIO;
+ goto no_ioremap;
+ }
+
+ brd_phy_dev->size = brd_ram_size;
+
+ spin_lock_init(&brd_phy_dev->lock);
+
+ brd_phy_dev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (brd_phy_dev->queue == NULL) {
+ err = -ENOMEM;
+ goto no_request_queue;
+ }
+ blk_queue_make_request(brd_phy_dev->queue, brd_phy_make_request);
+
+ blk_queue_physical_block_size(brd_phy_dev->queue, hardsect_size);
+
+ brd_phy_dev->queue->queuedata = brd_phy_dev;
+
+ brd_phy_dev->gd = alloc_disk(1);
+ if (brd_phy_dev->gd == NULL) {
+ err = -ENOMEM;
+ goto no_request_queue;
+ }
+
+ brd_phy_dev->gd->major = brd_phy_major;
+ brd_phy_dev->gd->first_minor = 0;
+ brd_phy_dev->gd->fops = &brd_phy_fops;
+ brd_phy_dev->gd->queue = brd_phy_dev->queue;
+ brd_phy_dev->gd->private_data = brd_phy_dev;
+ snprintf(brd_phy_dev->gd->disk_name, sizeof("phyram"), "phyram");
+ set_capacity(brd_phy_dev->gd, brd_ram_size/KERNEL_SECTOR_SIZE);
+ add_disk(brd_phy_dev->gd);
+
+ dev_info(&pdev->dev, "loaded\n");
+ return 0;
+
+no_request_queue:
+ iounmap(brd_phy_dev->data);
+no_ioremap:
+ release_mem_region(brd_phy_dev->res->start, resource_size(brd_phy_dev->res));
+no_mem_region:
+ kfree(brd_phy_dev);
+out_unregister:
+ unregister_blkdev(brd_phy_major, PHYBRD_DRV_NAME);
+ return err;
+}
+
+static int __devexit brd_phy_remove(struct platform_device *pdev)
+{
+ if (brd_phy_dev->gd) {
+ del_gendisk(brd_phy_dev->gd);
+ }
+
+ if (brd_phy_dev->queue) {
+ blk_cleanup_queue(brd_phy_dev->queue);
+ }
+
+ iounmap(brd_phy_dev->data);
+ release_resource(brd_phy_dev->res);
+ unregister_blkdev(brd_phy_major, PHYBRD_DRV_NAME);
+ kfree(brd_phy_dev);
+ return 0;
+}
+
+static struct platform_driver brd_phy_driver = {
+ .probe = brd_phy_probe,
+ .remove = __devexit_p(brd_phy_remove),
+ .driver = {
+ .name = PHYBRD_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(brd_phy_driver);
+
+MODULE_AUTHOR("Guangye Tian <guangye.tian@parrot.com>");
+MODULE_DESCRIPTION("Physical block ramdisk driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/block/phy-brd.h b/drivers/parrot/block/phy-brd.h
new file mode 100644
index 00000000000000..05be847d6861a3
--- /dev/null
+++ b/drivers/parrot/block/phy-brd.h
@@ -0,0 +1,22 @@
+/**
+ * linux/driver/parrot/block/phy-brd.h - Physical Block RamDisk interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 18-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _PHY_BRD_H
+#define _PHY_BRD_H
+
+#if defined(CONFIG_BLK_DEV_PHYBRD) || \
+ defined(CONFIG_BLK_DEV_PHYBRD_MODULE)
+
+#define PHYBRD_DRV_NAME "phy-brd"
+
+#endif
+
+#endif
diff --git a/drivers/parrot/clk/Makefile b/drivers/parrot/clk/Makefile
new file mode 100644
index 00000000000000..43cc9679812a00
--- /dev/null
+++ b/drivers/parrot/clk/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_COMMON_CLK) += clock.o
+obj-$(CONFIG_COMMON_CLK) += clk-pll.o
+obj-$(CONFIG_COMMON_CLK) += clk-avipll.o
+obj-$(CONFIG_COMMON_CLK) += clk-gbc.o
+obj-$(CONFIG_COMMON_CLK) += clk-xin.o
+obj-$(CONFIG_COMMON_CLK) += clk-sd.o
diff --git a/drivers/parrot/clk/clk-avipll.c b/drivers/parrot/clk/clk-avipll.c
new file mode 100644
index 00000000000000..09b3392be043ee
--- /dev/null
+++ b/drivers/parrot/clk/clk-avipll.c
@@ -0,0 +1,272 @@
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include "p7clk-provider.h"
+#include "clock.h"
+
+
+#define to_p7clk_avipll(_hw) container_of(_hw, struct clk_p7_avipll, hw)
+
+/**
+ * p7_prepare_avipll() - Prepare a two-level PLL
+ * @clk_hw: a pointer to @clk_hw in a @clk_p7_avipll.
+ */
+static int p7_prepare_avipll(struct clk_hw* clk_hw)
+{
+ struct clk_p7_avipll const* const pll = to_p7clk_avipll(clk_hw);
+ unsigned long const master_msk = BIT(pll->lock_bit_idx);
+ unsigned long const slave_msk = BIT(pll->lock_bit_idx + 1);
+ unsigned long const status_addr = pll->lock_reg;
+ unsigned long const master_addr = pll->pll_conf_reg;
+ unsigned long const slave_addr = master_addr + sizeof(u32);
+ union p7_pll_reg master = { .word = readl(master_addr) };
+ union p7_pll_reg slave = { .word = readl(slave_addr) };
+ int err = 0;
+
+ if (! master.fields.reset &&
+ (readl(status_addr) & master_msk)) {
+ /* Master PLL is powered on, and locked: already enabled.
+ * Note that the pll should not be gated at this point.
+ */
+ if (! slave.fields.reset &&
+ (readl(status_addr) & slave_msk))
+ /* Slave is also enabled: nothing to do. */
+ return err;
+ }
+ else {
+ master.fields.reset = 0; /* power on */
+ p7_write_pll_conf(master_addr, master.word);
+ if (! master.fields.bypass) {
+ err = p7_activate_pll(status_addr, master_msk);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Setup slave PLL. */
+ slave.fields.reset = 0; /* power on */
+ p7_write_pll_conf(slave_addr, slave.word);
+ if (! slave.fields.bypass)
+ err = p7_activate_pll(status_addr, slave_msk);
+
+ return err;
+}
+
+/**
+ * p7_unprepare_avipll() - Unrepare a two-level PLL
+ * @clk_hw: a pointer to @clk_hw in a @clk_p7_avipll.
+ */
+static void p7_unprepare_avipll(struct clk_hw* clk_hw)
+{
+ struct clk_p7_avipll const* const pll = to_p7clk_avipll(clk_hw);
+ unsigned long const master_addr = pll->pll_conf_reg;
+ unsigned long const slave_addr = master_addr + sizeof(u32);
+ union p7_pll_reg master = { .word = readl(master_addr) };
+ union p7_pll_reg slave = { .word = readl(slave_addr) };
+
+ /* power off slave PLL. */
+ slave.fields.reset = 1;
+ writel(slave.word, slave_addr);
+
+ /* Power off master PLL. */
+ master.fields.reset = 1;
+ writel(master.word, master_addr);
+}
+
+/**
+ * p7_recalc_avipll_rate() - Return two level/AVI PLL rate.
+ * @clk_hw: a pointer to @clk_hw in a @clk_p7_avipll.
+ * @parent_rate: rate of the parent clock.
+ */
+static unsigned long p7_recalc_avipll_rate(struct clk_hw* clk_hw,
+ unsigned long parent_rate)
+{
+ struct clk_p7_avipll * const pll = to_p7clk_avipll(clk_hw);
+ unsigned long const master_addr = pll->pll_conf_reg;
+ unsigned long const slave_addr = master_addr + sizeof(u32);
+ union p7_pll_reg master;
+ union p7_pll_reg slave;
+ unsigned long rate;
+ unsigned long flags;
+
+ spin_lock_irqsave(pll->lock, flags);
+ master.word = readl(master_addr);
+ slave.word = readl(slave_addr);
+ spin_unlock_irqrestore(pll->lock, flags);
+ rate = p7_pll_rate(master, parent_rate);
+ rate = p7_pll_rate(slave, rate);
+
+ return rate;
+}
+
+/**
+ * p7_set_avipll_rate() - Set two level/AVI PLL frequency.
+ * @clk_hw: a pointer to @clk_hw in a @clk_p7_avipll.
+ * @rate: frequency to configure.
+ * @parent_rate: parent clock's rate.
+ *
+ * Warning: frequency passed in argument will likely be rounded to a possible
+ * PLL hierarchy operation point.
+ */
+static int p7_set_avipll_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_p7_avipll * const pll = to_p7clk_avipll(clk_hw);
+ unsigned long const master_addr = pll->pll_conf_reg;
+ unsigned long const slave_addr = master_addr + sizeof(u32);
+ union p7_pll_reg master, new_master;
+ union p7_pll_reg slave, new_slave;
+ int idx, gate;
+ unsigned long flags = 0;
+ int err = 0;
+
+ idx = p7_get_clkcfg(pll->cfg_idx, rate, pll->cfg_nr);
+ if (idx < 0)
+ return idx;
+ new_master.word = pll->master_cfg[idx];
+ new_slave.word = pll->slave_cfg[idx];
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+ master.word = readl(master_addr);
+ slave.word = readl(slave_addr);
+
+
+ if (master.fields.cfg == new_master.fields.cfg &&
+ master.fields.bypass == new_master.fields.bypass &&
+ slave.fields.cfg == new_slave.fields.cfg &&
+ slave.fields.bypass == new_slave.fields.bypass)
+ goto unlock;
+
+ gate = !! master.fields.enable;
+ if (gate) {
+ /*
+ * Drivers are responsible for calling clk_disable (gating the
+ * clock) before changing the rate. We do as sanity check here,
+ * but we shouldn't even look at this bit here...
+ */
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * write the config in both PLL and activate after
+ * the spinlock is released
+ */
+ if (master.fields.cfg != new_master.fields.cfg ||
+ master.fields.bypass != new_master.fields.bypass) {
+ master.fields.cfg = new_master.fields.cfg;
+ master.fields.bypass = new_master.fields.bypass;
+ p7_write_pll_conf(master_addr, master.word);
+ }
+
+ if (slave.fields.cfg != new_slave.fields.cfg ||
+ slave.fields.bypass != new_slave.fields.bypass) {
+ slave.fields.cfg = new_slave.fields.cfg;
+ slave.fields.bypass = new_slave.fields.bypass;
+ p7_write_pll_conf(slave_addr, slave.word);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ if (__clk_get_prepare_count(clk_hw->clk)) {
+ err = p7_activate_pll(pll->lock_reg, BIT(pll->lock_bit_idx));
+ if (err)
+ return err;
+ err = p7_activate_pll(pll->lock_reg, BIT(pll->lock_bit_idx + 1));
+ }
+ return err;
+
+unlock:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ return err;
+}
+
+/**
+ * p7_round_avipll_rate() - Round two level/AVI PLL frequency.
+ * @clk_hw: a pointer to @clk_hw in a @clk_p7_avipll.
+ * @rate: frequency to configure.
+ * @parent_rate: parent clock's rate pointer.
+ *
+ * Round rate to the closest supported value given the parent clock rate.
+ */
+static long p7_round_avipll_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long* parent_rate)
+{
+ struct clk_p7_avipll* const pll = to_p7clk_avipll(clk_hw);
+ int const idx = p7_get_clkcfg(pll->cfg_idx,
+ rate,
+ pll->cfg_nr);
+ if (idx < 0)
+ return idx;
+
+ return (long) pll->cfg_idx[idx];
+}
+
+
+struct clk_ops const p7_clk_avipll_ops = {
+ .prepare = &p7_prepare_avipll,
+ .unprepare = &p7_unprepare_avipll,
+ .recalc_rate = &p7_recalc_avipll_rate,
+ .set_rate = &p7_set_avipll_rate,
+ .round_rate = &p7_round_avipll_rate,
+};
+
+struct clk *clk_register_avipll(struct device *dev, const char *name,
+ const char *parent_name, const unsigned long flags,
+ const unsigned long lock_reg, const u8 lock_bit_idx,
+ const unsigned long pll_reg, unsigned long const * const cfg_idx,
+ unsigned long const * const master_cfg, unsigned long const * const slave_cfg,
+ const size_t cfg_nr, spinlock_t *lock)
+{
+ struct clk_p7_avipll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(struct clk_p7_pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate clk pll\n", __func__);
+ clk = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ init.name = name;
+ init.ops = &p7_clk_avipll_ops;
+ init.flags = flags | CLK_SET_RATE_GATE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->lock_reg = lock_reg;
+ pll->lock_bit_idx = lock_bit_idx;
+ pll->pll_conf_reg = pll_reg;
+ pll->cfg_idx = cfg_nr ? kmemdup(cfg_idx, cfg_nr * sizeof(unsigned long), GFP_KERNEL) : NULL;
+ pll->master_cfg = cfg_nr ? kmemdup(master_cfg, cfg_nr * sizeof(unsigned long), GFP_KERNEL) : NULL;
+ pll->slave_cfg = cfg_nr ? kmemdup(slave_cfg, cfg_nr * sizeof(unsigned long), GFP_KERNEL) : NULL;
+ pll->cfg_nr = cfg_nr;
+ pll->lock = lock;
+ pll->hw.init = &init;
+
+ if (cfg_nr && ((!pll->cfg_idx) || (!pll->master_cfg) || (!pll->slave_cfg))) {
+ pr_err("%s: could not allocate memory for pll info\n", __func__);
+ clk = ERR_PTR(-ENOMEM);
+ goto free_pll_content;
+ }
+
+ clk = clk_register(dev, &pll->hw);
+ if (! IS_ERR(clk))
+ return clk;
+
+free_pll_content:
+ if (pll->cfg_idx)
+ kfree(pll->cfg_idx);
+ if (pll->master_cfg)
+ kfree(pll->master_cfg);
+ if (pll->slave_cfg)
+ kfree(pll->slave_cfg);
+ kfree(pll);
+err:
+ return clk;
+}
+EXPORT_SYMBOL(clk_register_avipll);
diff --git a/drivers/parrot/clk/clk-gbc.c b/drivers/parrot/clk/clk-gbc.c
new file mode 100644
index 00000000000000..53e36bf8349aa8
--- /dev/null
+++ b/drivers/parrot/clk/clk-gbc.c
@@ -0,0 +1,285 @@
+/*
+ * drivers/parrot/clk/clk-gbc.c
+ *
+ * Copyright (C) 2010-2013 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+//#define DEBUG
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "p7clk-provider.h"
+#include "clock.h"
+
+#define to_p7clk_gbc(_hw) container_of(_hw, struct clk_p7_gbc, hw)
+
+/**
+ * p7_enable_clkgbc() - Enable clock at the GBC level and reset child block if
+ * required.
+ * @gbc: GBC clock descriptor
+ */
+static int p7_enable_clkgbc(struct p7_clk_gbc const* gbc)
+{
+ /* /!\ WARNING /!\
+ *
+ * This code could be written using __raw_(read|write)l, however, due to
+ * a hardware bug, bursts to the AVI R2 GBC registers can cause a lock
+ * up of the AXI interconnect. Other revisions of the IP are not
+ * concerned.
+ */
+ u32 clk;
+ u32 clk_on;
+ u32 clk_off;
+
+ pr_devel("p7 gdc enable %lx\n", gbc->clk_reg);
+ if (!gbc->clk_reg)
+ return 0;
+
+ clk = readl(gbc->clk_reg);
+ clk_on = clk | gbc->clk_msk;
+ clk_off = clk & ~gbc->clk_msk;
+
+ if (gbc->reset_reg) {
+ u32 reset;
+ u32 reset_on;
+ u32 reset_off;
+
+ reset = readl(gbc->reset_reg);
+ reset_on = reset | (1 << gbc->reset_offset);
+ reset_off = reset & ~(1 << gbc->reset_offset);
+
+ if (clk == clk_on && reset == reset_off)
+ /* if the clock is already enabled and the IP is not
+ * reset we don't have anything left to do. */
+ return 0;
+
+ /* We have to disable the clock when we (un)assert the reset
+ * (see section 3.2.4: Clock and reset sequence of the user
+ * manual). */
+ writel(clk_off, gbc->clk_reg);
+
+ /* Force reset of the GBC child block */
+ writel(reset_on, gbc->reset_reg);
+
+ /* Enable clock to make the synchronous reset take effect */
+ writel(clk_on, gbc->clk_reg);
+ cpu_relax();
+ /* Then disable once again to de-reset the block */
+ writel(clk_off, gbc->clk_reg);
+ writel(reset_off, gbc->reset_reg);
+
+ /* At this point we're sure the block is correctly reset and the
+ * clock is disabled */
+ }
+
+ /* Enable clock */
+ writel(clk_on, gbc->clk_reg);
+
+ return 0;
+}
+
+/**
+ * p7_disable_clkgbc() - Disable clock at the GBC level.
+ * @gbc: GBC clock descriptor
+ */
+static void p7_disable_clkgbc(struct p7_clk_gbc const* gbc)
+{
+ pr_devel("p7 gdc disable %lx\n", gbc->clk_reg);
+ /* Disable clock. */
+ writel(readl(gbc->clk_reg) & ~gbc->clk_msk, gbc->clk_reg);
+}
+
+/**
+ * p7_gbc_enable_clk() - Enable a gbc clock.
+ * @clk_hw: a pointer to a clk_hw embedded in a @clk_p7_gbc struct.
+ */
+static int p7_gbc_enable_clk(struct clk_hw* clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+ int ret;
+
+ if (clk->lock)
+ spin_lock(clk->lock);
+ ret = p7_enable_clkgbc(&clk->gbc);
+ if (clk->lock)
+ spin_unlock(clk->lock);
+
+ return ret;
+}
+
+/**
+ * p7_gbc_disable_clk() - Disable a gbc clock.
+ * @clk_hw: a pointer to a clk_hw embedded in a @clk_p7_gbc struct.
+ */
+static void p7_gbc_disable_clk(struct clk_hw* clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+
+ if (clk->lock)
+ spin_lock(clk->lock);
+ p7_disable_clkgbc(&clk->gbc);
+ if (clk->lock)
+ spin_unlock(clk->lock);
+}
+
+static int p7_gbc_is_enabled_clk(struct clk_hw *clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+ struct p7_clk_gbc const* gbc = &clk->gbc;
+ int ret = 0;
+
+ /* this callback is used to disable unused clock, return
+ 1 if at least one bit is enabled
+ */
+ if (readl(gbc->clk_reg) & gbc->clk_msk)
+ ret = 1;
+
+ pr_devel("p7 is gbc %s %lx\n", ret?"enabled":"disabled",
+ gbc->clk_reg);
+ return ret;
+}
+
+static int p7_gbc_is_prepared_clk(struct clk_hw *clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+ struct p7_clk_gbc const* gbc = &clk->gbc;
+ int ret = 0;
+ u32 power;
+
+ if (!gbc->power_reg)
+ return 1;
+
+ power = readl(gbc->power_reg);
+
+ if (power & (P7_POWER_ON << (gbc->reset_offset * 4)))
+ ret = 1;
+ else if (power & (P7_POWER_OFF << (gbc->reset_offset * 4)))
+ ret = 0;
+ else {
+ ret = 1;
+ pr_warn("p7 gbc invalid prepared state 0x%x %lx\n",
+ power, gbc->clk_reg);
+ }
+
+ pr_devel("p7 is gbc %s %lx\n", ret?"prepared":"unprepared",
+ gbc->clk_reg);
+
+ return ret;
+}
+
+static int p7_gbc_prepare_clk(struct clk_hw *clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+ struct p7_clk_gbc const* gbc = &clk->gbc;
+ unsigned int timeout = 0;
+ u32 power;
+
+ if (!gbc->power_reg)
+ return 0;
+
+ pr_devel("p7 gbc prepare 0x%lx\n", gbc->power_reg);
+ power = readl(gbc->power_reg);
+ if ((power & 0x6) == 0)
+ pr_warn("p7 gbc invalid prepared state 0x%x %lx\n",
+ power, gbc->clk_reg);
+
+ writel(power |
+ (P7_REQUEST_POWER_ON << (gbc->reset_offset * 4)),
+ gbc->power_reg);
+ /*wait until power on flag is on*/
+ do {
+ if (timeout++ > 0x8ffff) {
+ pr_warn("p7 gbc prepare timeout 0x%lx = 0x%x\n",
+ gbc->power_reg, readl(gbc->power_reg));
+ return -ENODEV;
+ }
+ cpu_relax();
+ } while (!(readl(gbc->power_reg) & (P7_POWER_ON << (gbc->reset_offset * 4))));
+
+ return 0;
+}
+
+static void p7_gbc_unprepare_clk(struct clk_hw *clk_hw)
+{
+ struct clk_p7_gbc *clk = to_p7clk_gbc(clk_hw);
+ struct p7_clk_gbc const* gbc = &clk->gbc;
+ unsigned int timeout = 0;
+
+ if (!gbc->power_reg)
+ return;
+
+ pr_devel("p7 gbc unprepare 0x%lx\n", gbc->power_reg);
+ writel(readl(gbc->power_reg) &
+ ~(P7_REQUEST_POWER_ON << (gbc->reset_offset * 4)),
+ gbc->power_reg);
+ /*wait until power off flag is on*/
+ do {
+ if (timeout++ > 0x8ffff) {
+ pr_warn("p7 gbc unprepare timeout 0x%lx = 0x%x\n",
+ gbc->power_reg, readl(gbc->power_reg));
+ break;
+ }
+ cpu_relax();
+ } while (!(readl(gbc->power_reg) & (P7_POWER_OFF << (gbc->reset_offset * 4))));
+}
+
+
+struct clk_ops const p7_clk_gbc_ops = {
+ .enable = p7_gbc_enable_clk,
+ .disable = p7_gbc_disable_clk,
+ .is_enabled = p7_gbc_is_enabled_clk,
+ .prepare = p7_gbc_prepare_clk,
+ .unprepare = p7_gbc_unprepare_clk,
+ .is_prepared= p7_gbc_is_prepared_clk,
+};
+
+struct clk *clk_register_gbc(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long clk_reg, u32 clk_msk,
+ unsigned long reset_reg, u32 reset_offset,
+ unsigned long power_reg,
+ spinlock_t* lock)
+{
+ struct clk_p7_gbc *gbc;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gbc = kzalloc(sizeof(struct clk_p7_gbc), GFP_KERNEL);
+ if (!gbc) {
+ pr_err("%s: could not allocate gbc clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &p7_clk_gbc_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ gbc->gbc.clk_reg = clk_reg;
+ gbc->gbc.clk_msk = clk_msk;
+ gbc->gbc.power_reg = power_reg;
+ gbc->gbc.reset_reg = reset_reg;
+ gbc->gbc.reset_offset = reset_offset;
+ gbc->lock = lock;
+ gbc->hw.init = &init;
+
+ pr_devel("p7 gbc register %s 0x%lx 0x%lx\n", name, reset_reg, power_reg);
+ /* if there is no reset : use clk_register_gate ! */
+ WARN_ON(reset_reg == 0);
+
+ clk = clk_register(dev, &gbc->hw);
+
+ if (IS_ERR(clk))
+ kfree(gbc);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_register_gbc);
diff --git a/drivers/parrot/clk/clk-pll.c b/drivers/parrot/clk/clk-pll.c
new file mode 100644
index 00000000000000..aedcd47e445bc8
--- /dev/null
+++ b/drivers/parrot/clk/clk-pll.c
@@ -0,0 +1,321 @@
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include "p7clk-provider.h"
+#include "clock.h"
+
+
+#define to_p7clk_pll(_hw) container_of(_hw, struct clk_p7_pll, hw)
+
+/**
+ * p7_prepare_pll() - Prepare PLL
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ */
+static int p7_prepare_pll(struct clk_hw* clk_hw)
+{
+ struct clk_p7_pll const* const pll = to_p7clk_pll(clk_hw);
+ unsigned long const msk = BIT(pll->lock_bit_idx);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg curr = { .word = readl(addr) };
+ int err = 0;
+
+ pr_debug("p7_prepare_pll %p %x\n", clk_hw, curr.word);
+ if (curr.fields.reset || /* powered on ? */
+ ! (readl(pll->lock_reg) & msk)) { /* locked ? */
+ curr.fields.reset = 0;
+ p7_write_pll_conf(addr, curr.word);
+ if (curr.fields.bypass)
+ return err;
+
+ err = p7_activate_pll(pll->lock_reg, msk);
+ if (err) {
+ /*
+ * The PLL is supposed to be locked when they are not
+ * reset and not bypassed, but we may have to update
+ * the config bits to restart the lock sequence. Toggle
+ * the last bit of config field to do so.
+ */
+ curr.fields.cfg ^= 1;
+ p7_write_pll_conf(addr, curr.word);
+ curr.fields.cfg ^= 1;
+ p7_write_pll_conf(addr, curr.word);
+ err = p7_activate_pll(pll->lock_reg, msk);
+ }
+
+ }
+
+ return err;
+}
+
+/**
+ * p7_unprepare_pll() - Unprepare PLL
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ */
+static void p7_unprepare_pll(struct clk_hw* clk_hw)
+{
+ struct clk_p7_pll const* const pll = to_p7clk_pll(clk_hw);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg curr = { .word = readl(addr) };
+
+ pr_debug("p7_unprepare_pll %p\n", clk_hw);
+ /* power off */
+ curr.fields.reset = 1;
+ writel(curr.word, addr);
+}
+
+/**
+ * p7_enable_pll() - Enable PLL
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ *
+ * If the PLL is not gateable, no-op.
+ */
+static int p7_enable_pll(struct clk_hw *clk_hw)
+{
+ struct clk_p7_pll const* const pll = to_p7clk_pll(clk_hw);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg pll_conf;
+ unsigned long flags = 0;
+
+ pr_debug("p7_enable_pll %p\n", clk_hw);
+ if (! pll->gateable)
+ goto ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+ pll_conf.word = readl(addr);
+ if (pll_conf.fields.enable)
+ goto unlock;
+ pll_conf.fields.enable = 1;
+ writel(pll_conf.word, addr);
+
+unlock:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ret:
+ return 0;
+}
+
+/**
+ * p7_disable_pll() - Disable PLL
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ */
+static void p7_disable_pll(struct clk_hw *clk_hw)
+{
+ struct clk_p7_pll const* const pll = to_p7clk_pll(clk_hw);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg pll_conf;
+ unsigned long flags = 0;
+
+ pr_debug("p7_disable_pll %p\n", clk_hw);
+ if (! pll->gateable)
+ return;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+ pll_conf.word = readl(addr);
+ if (! pll_conf.fields.enable)
+ goto ret;
+ pll_conf.fields.enable = 0;
+ writel(pll_conf.word, addr);
+ret:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+/**
+ * p7_get_pll_rate() - Return master / one level pll rate in HZ.
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ * @parent_rate: rate of the parent clock
+ */
+static unsigned long p7_get_pll_rate(struct clk_hw* clk_hw,
+ unsigned long parent_rate)
+{
+ struct clk_p7_pll* const pll = to_p7clk_pll(clk_hw);
+ unsigned long rate;
+ union p7_pll_reg curr;
+
+ curr.word = readl(pll->pll_conf_reg);
+ rate = p7_pll_rate(curr, parent_rate);
+
+ pr_debug("p7_get_pll_rate %p rate=%lu (word=%x)\n", clk_hw, rate, curr.word);
+ return rate;
+}
+
+/**
+ * p7_set_pll_rate() - Set master / one level pll frequency in HZ.
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ * @rate: rate to configure in HZ.
+ * @parent_rate: rate of the parent clock
+ *
+ * Warning: frequency passed in argument will likely be rounded to a possible
+ * PLL operation point.
+ */
+static int p7_set_pll_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_p7_pll *pll = to_p7clk_pll(clk_hw);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg curr, new;
+ int idx;
+ unsigned long flags = 0;
+ int err = 0;
+
+ pr_debug("p7_set_pll_rate %p rate=%lu parent=%lu\n",
+ clk_hw, rate, parent_rate);
+ idx = p7_get_clkcfg(pll->cfg_idx, rate, pll->cfg_nr);
+ if (idx < 0)
+ return idx;
+ new.word = pll->pll_cfg[idx];
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+ curr.word = readl(addr);
+ if (curr.fields.cfg == new.fields.cfg &&
+ curr.fields.bypass == new.fields.bypass)
+ goto unlock;
+
+ curr.fields.cfg = new.fields.cfg;
+ curr.fields.bypass = new.fields.bypass;
+ p7_write_pll_conf(addr, curr.word);
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ /*
+ * If the PLL is reset, the lock bit will never be set to 1. So
+ * we avoid locking if this operation doesn't make sense.
+ */
+ if ((!curr.fields.reset) && (!curr.fields.bypass))
+ err = p7_activate_pll(pll->lock_reg, BIT(pll->lock_bit_idx));
+ return err;
+
+unlock:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ return err;
+}
+
+/**
+ * p7_set_pll_rate() - Set master / one level pll frequency in HZ.
+ * @clk_hw: a @clk_hw in a @clk_p7_pll
+ * @rate: rate to configure in HZ.
+ * @parent_rate: pointer to the rate of the parent clock
+ */
+static long p7_round_pll_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long* parent_rate)
+{
+ struct clk_p7_pll* const pll = to_p7clk_pll(clk_hw);
+ int const idx = p7_get_clkcfg(pll->cfg_idx,
+ rate,
+ pll->cfg_nr);
+ pr_debug("p7_round_pll_rate %p rate=%lu parent=%lu : idx=%d\n",
+ clk_hw, rate, *parent_rate, idx);
+ if (idx < 0)
+ return 0;
+
+ return (long) pll->cfg_idx[idx];
+}
+
+static void p7_pll_init(struct clk_hw *clk_hw)
+{
+ struct clk_p7_pll* const pll = to_p7clk_pll(clk_hw);
+ unsigned long const addr = pll->pll_conf_reg;
+ union p7_pll_reg curr;
+ unsigned long flags = 0;
+
+
+ if (!pll->gateable)
+ return;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+ curr.word = readl(addr);
+ if (curr.fields.enable) {
+ curr.fields.enable = 0;
+ writel(curr.word, addr);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+
+struct clk_ops const p7_clk_pll_ops = {
+ .prepare = &p7_prepare_pll,
+ .unprepare = &p7_unprepare_pll,
+ .enable = &p7_enable_pll,
+ .disable = &p7_disable_pll,
+ .recalc_rate = &p7_get_pll_rate,
+ .set_rate = &p7_set_pll_rate,
+ .round_rate = &p7_round_pll_rate,
+ .init = &p7_pll_init,
+};
+
+struct clk_ops const p7_clk_fixed_pll_ops = {
+ .prepare = &p7_prepare_pll,
+ .unprepare = &p7_unprepare_pll,
+ .enable = &p7_enable_pll,
+ .disable = &p7_disable_pll,
+ .recalc_rate = &p7_get_pll_rate,
+ .init = &p7_pll_init,
+};
+
+struct clk *clk_register_pll(struct device *dev, const char *name,
+ const char *parent_name, const bool gateable, const unsigned long flags,
+ const unsigned long lock_reg, const u8 lock_bit_idx,
+ const unsigned long pll_reg, unsigned long const * const cfg_idx,
+ unsigned long const * const pll_cfg, const size_t cfg_nr,
+ spinlock_t *lock)
+{
+ struct clk_p7_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(struct clk_p7_pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate clk pll\n", __func__);
+ clk = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ init.name = name;
+ if (cfg_nr)
+ init.ops = &p7_clk_pll_ops;
+ else
+ init.ops = &p7_clk_fixed_pll_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->gateable = gateable;
+ pll->lock_reg = lock_reg;
+ pll->lock_bit_idx = lock_bit_idx;
+ pll->pll_conf_reg = pll_reg;
+ pll->cfg_idx = cfg_nr ? kmemdup(cfg_idx, cfg_nr * sizeof(unsigned long), GFP_KERNEL) : NULL;
+ pll->pll_cfg = cfg_nr ? kmemdup(pll_cfg, cfg_nr * sizeof(unsigned long), GFP_KERNEL) : NULL;
+ pll->cfg_nr = cfg_nr;
+ pll->lock = lock;
+ pll->hw.init = &init;
+
+ if (cfg_nr && ((!pll->cfg_idx) || (!pll->pll_cfg))) {
+ pr_err("%s: could not allocate memory for pll info\n", __func__);
+ clk = ERR_PTR(-ENOMEM);
+ goto free_pll_content;
+ }
+
+ clk = clk_register(dev, &pll->hw);
+
+ pr_debug("clk_register_pll %p for %s\n", &pll->hw, name);
+ if (! IS_ERR(clk))
+ return clk;
+
+free_pll_content:
+ if (pll->cfg_idx)
+ kfree(pll->cfg_idx);
+ if (pll->pll_cfg)
+ kfree(pll->pll_cfg);
+ kfree(pll);
+err:
+ return clk;
+}
+EXPORT_SYMBOL(clk_register_pll);
diff --git a/drivers/parrot/clk/clk-sd.c b/drivers/parrot/clk/clk-sd.c
new file mode 100644
index 00000000000000..0a69aed6e01a56
--- /dev/null
+++ b/drivers/parrot/clk/clk-sd.c
@@ -0,0 +1,120 @@
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "p7clk-provider.h"
+
+/*
+ * SD clock (ACS3 internal clock derived from XIN clock)
+ */
+
+/*
+ * Given a GBC (pre-)divisor value, compute XIN clock rate in Hz depending on
+ * the parent clock frequency passed in argument.
+ */
+static unsigned long p7_sdclk_div2rate(unsigned long prate,
+ u32 xin_div)
+{
+ unsigned long const xrate = (prate + xin_div) / (xin_div + 1);
+
+#ifdef DEBUG
+ BUG_ON(xrate > SDIOx_XIN_CLK_RATE_MAX);
+#endif
+ return xrate;
+}
+
+/*
+ * Given a parent clock frequency in Hz, compute the GBC (pre-)divisor value
+ * to reach the XIN clock rate passed in argument.
+ */
+static u32 p7_sdclk_rate2div(unsigned long prate, unsigned long rate,
+ unsigned long *newrate)
+{
+ unsigned long nrate = 0;
+ unsigned long sdiv; /* SDIO controller divisor. */
+ u32 xdiv = 10, xdiv_; /* SDIO XIN clock GBC (pre-)divisor. */
+
+ /*
+ * We test the different values of the SDIO controller divisor.
+ * With sdiv = 1, we cannot control the duty cycle
+ */
+ for (sdiv = 2; sdiv <= 256; sdiv = sdiv * 2) {
+ /* If we cannot a higher rate with this new divisor,
+ * we stop the loop. */
+ if ((prate / sdiv) < nrate) break;
+
+ /* We test the different values of the SDIO XIN clock GBC
+ * (pre-)divisor. */
+ for (xdiv_ = 0; xdiv_ <= 10; xdiv_ ++) {
+ unsigned long xin_rate = prate / (xdiv_ + 1);
+ unsigned long sd_rate = xin_rate / sdiv;
+
+ /* SDIO XIN clock can't be higher than 260MHz */
+ if (xin_rate > SDIOx_XIN_CLK_RATE_MAX) continue;
+
+ /* While the rate calculated is too high, we try a new
+ * of sdiv. */
+ if (sd_rate <= rate) {
+ if (sd_rate >= nrate) {
+ nrate = sd_rate;
+ xdiv = xdiv_;
+ }
+ break;
+ }
+ }
+ }
+ *newrate = nrate;
+ return xdiv;
+}
+
+/*
+ * Determine SD clock rate and XIN clock rate.
+ */
+static long p7_determine_sdclk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_clk)
+{
+ u32 xin_div;
+ long nrate;
+ unsigned long fastrate = clk_get_rate(clk_get_parent(*best_parent_clk));
+
+ xin_div = p7_sdclk_rate2div(fastrate, rate, &nrate);
+ *best_parent_rate = p7_sdclk_div2rate(fastrate, xin_div);
+
+ return nrate;
+}
+
+/*
+ * SD clock operations.
+ */
+struct clk_ops const p7_clk_sd_ops = {
+ .determine_rate = p7_determine_sdclk_rate,
+};
+
+struct clk *clk_register_sd(struct device *dev, const char *name,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ hw = kzalloc(sizeof(struct clk_hw), GFP_KERNEL);
+ if (! hw) {
+ pr_err("%s: could not allocate sdclk hw\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &p7_clk_sd_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ hw->init = &init;
+
+ clk = clk_register(dev, hw);
+ if (IS_ERR(clk))
+ kfree(hw);
+
+ return clk;
+}
+EXPORT_SYMBOL(clk_register_sd);
diff --git a/drivers/parrot/clk/clk-xin.c b/drivers/parrot/clk/clk-xin.c
new file mode 100644
index 00000000000000..8d93c7b5b523f7
--- /dev/null
+++ b/drivers/parrot/clk/clk-xin.c
@@ -0,0 +1,158 @@
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "p7clk-provider.h"
+
+/*
+ * SDIO GBC based XIN clock.
+ *
+ * Depicts a leaf clock in the overall clock hierarchy. It behaves as a
+ * a gated clock divider, providing SDIO controller with an
+ * ASYMETRIC input clock used to derive the external SDIO bus clock.
+ * The parent clock MUST support enable/disable counting.
+ *
+ * Warning: the asymetric nature of SDIO XIN clock enforces it to be further divided
+ * (using the SDIO controller divisor) to generate the needed symetric bus clock.
+ */
+
+#define to_p7clk_xin(_hw) container_of(_hw, struct clk_p7_xin, hw)
+
+/*
+ * Enable GBC based SDIO XIN clock.
+ */
+static int p7_enable_xinclk(struct clk_hw* clk_hw)
+{
+ struct clk_p7_xin const* const xin = to_p7clk_xin(clk_hw);
+
+ /* Enable clock */
+ __raw_writel(readl(xin->clk_reg) | SDIOx_XIN_CLK_EN, xin->clk_reg);
+ return 0;
+}
+
+/*
+ * Disable GBC based SDIO XIN clock.
+ */
+static void p7_disable_xinclk(struct clk_hw* clk_hw)
+{
+ struct clk_p7_xin const* const xin = to_p7clk_xin(clk_hw);
+
+ /* Disable clock */
+ __raw_writel(readl(xin->clk_reg) & ~SDIOx_XIN_CLK_EN, xin->clk_reg);
+}
+
+/*
+ * Return current SDIO XIN clock frequency in HZ.
+ */
+static unsigned long p7_recalc_xinclk_rate(struct clk_hw* clk_hw,
+ unsigned long parent_rate)
+{
+ struct clk_p7_xin* const xin = to_p7clk_xin(clk_hw);
+ unsigned int const div = (readl(xin->clk_reg) >> SDIOx_XIN_CLK_DIV_SFT) &
+ SDIOx_XIN_CLK_DIV_MSK;
+ return (parent_rate + div) / (div + 1);
+}
+
+/*
+ * Return best rounded XIN clock frequency in Hz.
+ *
+ * Round frequency passed in argument so that once divided using SD host divisor
+ * the produced SD clock complies with following constraints:
+ * - clock is symetric (XIN clock is not) ;
+ * - SD clock output (i.e., once devided by host controller) is as close as
+ * possible to desired rate.
+ * Host controller divisor values are limited to powers of 2 to comply with legacy
+ * SD specifications.
+ */
+static long p7_round_xinclk_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long const prate = *parent_rate;
+
+ if (rate && !(prate % rate))
+ return rate;
+ else {
+ int i;
+ for (i = 0; i <= 10; i++) {
+ unsigned long nrate = prate / (i + 1);
+ if ((rate > nrate) && (nrate <= SDIOx_XIN_CLK_RATE_MAX))
+ return nrate;
+ }
+
+ return prate / 11;
+ }
+}
+
+/*
+ * Set SDIO XIN cloxk frequency in HZ.
+ *
+ * Warning: frequency passed in argument will be rounded using the SDIO
+ * controller and GBC divisors combination closest to targeted
+ * frequency. Be aware that SDIO XIN clock is asymetric (see @p7_sin_clk).
+ */
+static int p7_set_xinclk_rate(struct clk_hw* clk_hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_p7_xin* const xin = to_p7clk_xin(clk_hw);
+ u32 const xdiv = parent_rate / rate - 1;
+ int const enabled = __raw_readl(xin->clk_reg) & SDIOx_XIN_CLK_EN;
+
+ /* Clock must be disabled before modifying divisor. */
+ if (enabled)
+ /* Disable clock. */
+ __raw_writel(0, xin->clk_reg);
+
+ /* Setup divisor. */
+ __raw_writel(xdiv << SDIOx_XIN_CLK_DIV_SFT, xin->clk_reg);
+
+ /*
+ * Re-enabling the clock has to be implemented using a separate write
+ * instruction.
+ */
+ if (enabled)
+ /* Re-enable clock if needed. */
+ __raw_writel((xdiv << SDIOx_XIN_CLK_DIV_SFT) | SDIOx_XIN_CLK_EN,
+ xin->clk_reg);
+
+ return 0;
+}
+
+/*
+ * XIN clock operations
+ */
+struct clk_ops const p7_clk_xin_ops = {
+ .enable = &p7_enable_xinclk,
+ .disable = &p7_disable_xinclk,
+ .recalc_rate = &p7_recalc_xinclk_rate,
+ .set_rate = &p7_set_xinclk_rate,
+ .round_rate = &p7_round_xinclk_rate
+};
+
+struct clk *clk_register_xin(struct device *dev, const char *name,
+ const char *parent_name, const unsigned long clk_reg)
+{
+ struct clk_p7_xin *xin;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ xin = kzalloc(sizeof(struct clk_p7_xin), GFP_KERNEL);
+ if (! xin) {
+ pr_err("%s: could not allocate clk xin\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &p7_clk_xin_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ xin->clk_reg = clk_reg;
+ xin->hw.init = &init;
+
+ clk = clk_register(dev, &xin->hw);
+ if (IS_ERR(clk))
+ kfree(xin);
+
+ return clk;
+}
+EXPORT_SYMBOL(clk_register_xin);
diff --git a/drivers/parrot/clk/clock.c b/drivers/parrot/clk/clock.c
new file mode 100644
index 00000000000000..c4dd2ce26cf3aa
--- /dev/null
+++ b/drivers/parrot/clk/clock.c
@@ -0,0 +1,98 @@
+/*
+ * drivers/parrot/clk/clock.c
+ *
+ * Copyright (C) 2010-2013 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include "clock.h"
+
+
+/**
+ * p7_get_clkcfg() - Get clock frequency setup descriptor index for the frequency
+ * passed in argument.
+ * @config: a table of clock frequency descriptor sorted in strict ascending
+ * order.
+ * @frequency: frequency in HZ
+ * @count: clock frequency table number of element
+ *
+ * @p7_get_clkcfg is used to find the best configuration of a divisor
+ * (or PLL) amongst possible frequency it may handle.
+ */
+int p7_get_clkcfg(unsigned long const* config, unsigned int frequency,
+ size_t count)
+{
+ unsigned int min = 0;
+ unsigned int max = count - 1;
+
+ BUG_ON(! config);
+ BUG_ON(! frequency);
+ BUG_ON(! count);
+
+ if (frequency < config[0])
+ return -ERANGE;
+
+ while (max - min > 1) {
+ unsigned int const idx = (min + max) / 2;
+
+ if (frequency == config[idx])
+ return idx;
+ else if (frequency > config[idx])
+ min = idx;
+ else
+ max = idx;
+ }
+
+ if (frequency > config[max])
+ return max;
+ else if ((config[max] - frequency) < (frequency - config[min]))
+ return max;
+ else
+ return min;
+}
+
+/**
+ * p7_activate_pll() - Enable PLL
+ * @reg_addr: PLL setup register virtual address
+ * @config: configuration word used to setup PLL
+ * status_msk: mask used to poll for PLL ready condition
+ */
+int p7_activate_pll(unsigned long status_reg, u32 status_msk)
+{
+ unsigned long const timeout = jiffies + 10;
+
+ do {
+ if (readl(status_reg) & status_msk)
+ return 0;
+
+ cpu_relax();
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
+}
+
+unsigned long p7_pll_rate(union p7_pll_reg cfg, unsigned long in_hz)
+{
+ if (cfg.fields.bypass)
+ return in_hz;
+
+ return (((in_hz / 1000 *
+ ((cfg.fields.cfg & ((1UL << 8) - 1)) + 1)) /* DIVf + 1 */
+ /
+ (((cfg.fields.cfg >> 8) & ((1UL << 6) - 1)) + 1)) /* DIVr + 1 */
+ >> ((cfg.fields.cfg >> 14) & ((1UL << 3) - 1))) /* 2 ^ DIVq */
+ * 1000;
+}
+
+void p7_write_pll_conf(unsigned long address, u32 val)
+{
+ writel(val, address);
+ wmb();
+}
diff --git a/drivers/parrot/clk/clock.h b/drivers/parrot/clk/clock.h
new file mode 100644
index 00000000000000..a967b9c77e8af8
--- /dev/null
+++ b/drivers/parrot/clk/clock.h
@@ -0,0 +1,38 @@
+/*
+ * drivers/parrot/clk/clock.h
+ *
+ * Copyright (C) 2010-2013 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @author Damien Riegel <damien.riegel.ext@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _P7_CLOCK_COMMON_H
+#define _P7_CLOCK_COMMON_H
+
+#include "p7clk-provider.h"
+
+/**
+ * union p7_pll_reg - PLL register descriptor.
+ */
+union p7_pll_reg {
+ u32 word;
+ struct {
+ unsigned cfg:20;
+ unsigned enable:1;
+ unsigned bypass:1;
+ unsigned reset:1;
+ } fields;
+};
+
+int p7_get_clkcfg(unsigned long const* config, unsigned int frequency, size_t count);
+int p7_activate_pll(unsigned long status_reg, u32 status_msk);
+unsigned long p7_pll_rate(union p7_pll_reg cfg, unsigned long in_hz);
+void p7_write_pll_conf(unsigned long addr, u32 val);
+
+#endif
diff --git a/drivers/parrot/clk/p7clk-provider.h b/drivers/parrot/clk/p7clk-provider.h
new file mode 100644
index 00000000000000..32aab9b24b01c2
--- /dev/null
+++ b/drivers/parrot/clk/p7clk-provider.h
@@ -0,0 +1,146 @@
+/*
+ * drivers/parrot/clk/p7clk-provider.h
+ *
+ * Copyright (c) 2010-2013
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @author Damien Riegel <damien.riegel.ext@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _P7_CLK_PROVIDER_H
+#define _P7_CLK_PROVIDER_H
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_COMMON_CLK
+
+/**
+ * struct p7_clk_gbc - GBC clock descriptor.
+ * @clk_reg: virtual address of register used to enable clock
+ * @clk_msk: mask used to enable/disable clock
+ * @reset_reg: virtual address of register used to reset child block
+ *
+ * @p7_clk_gbc defines GBC informations required to perform clock and reset
+ * related operations.
+ * @reset_reg is optional and unused if null
+ */
+struct p7_clk_gbc {
+ unsigned long clk_reg;
+ u32 clk_msk;
+ unsigned long reset_reg;
+ u32 reset_offset;
+ unsigned long power_reg;
+ u32 power_msk;
+};
+
+/**
+ * struct clk_p7_gbc - Gate and reset individual IPs
+ * @gbc: gbc info for this clock
+ * @lock: spinlock pointer
+ */
+struct clk_p7_gbc {
+ struct clk_hw hw;
+ struct p7_clk_gbc gbc;
+ spinlock_t* lock;
+};
+
+extern struct clk_ops const p7_clk_gbc_ops;
+extern struct clk *clk_register_gbc(struct device*, const char *,
+ const char *, unsigned long,
+ unsigned long, u32, unsigned long, u32,
+ unsigned long, spinlock_t*);
+
+
+/**
+ * struct clk_p7_pll - One level (master) PLL descriptor.
+ * @gateable: boolean to indicate if this PLL is gateable
+ * @lock_reg: address of the lock register
+ * @lock_bit_idx: lock-bit index in @lock_reg
+ * @pll_conf_reg: PLL configuration register address
+ * @cfg_nr: @pll_cfg number of entries
+ * @cfg_idx: PLLs frequency table
+ * @pll_cfg: table of possible PLL's frequency configurations
+ * @lock: spinlock pointer
+ */
+struct clk_p7_pll {
+ struct clk_hw hw;
+ bool gateable;
+ unsigned long lock_reg;
+ u8 lock_bit_idx;
+ unsigned long pll_conf_reg;
+ size_t cfg_nr;
+ unsigned long const* cfg_idx;
+ unsigned long const* pll_cfg;
+ spinlock_t* lock;
+};
+
+extern struct clk_ops const p7_clk_pll_ops;
+extern struct clk_ops const p7_clk_fixed_pll_ops;
+extern struct clk *clk_register_pll(struct device *, const char *,
+ const char *, const bool, const unsigned long,
+ const unsigned long, const u8,
+ const unsigned long, unsigned long const * const,
+ unsigned long const * const, const size_t, spinlock_t *);
+/**
+ * struct p7_avipll - Two level AVI PLL descriptor.
+ * @lock_reg: address of the lock register
+ * @lock_bit_idx: lock-bit index in @lock_reg
+ * @pll_conf_reg: PLL configuration register address
+ * @cfg_nr: PLLs configuration table number of entries
+ * @cfg_idx: PLLs frequency table
+ * @master_cfg: table of first level possible PLL's frequency configurations
+ * @slave_cfg: table of second level possible PLL's frequency configurations
+ * @lock: spinlock pointer
+ */
+struct clk_p7_avipll {
+ struct clk_hw hw;
+ unsigned long lock_reg;
+ u8 lock_bit_idx;
+ unsigned long pll_conf_reg;
+ size_t cfg_nr;
+ unsigned long const* cfg_idx;
+ unsigned long const* master_cfg;
+ unsigned long const* slave_cfg;
+ spinlock_t* lock;
+};
+
+extern struct clk_ops const p7_clk_avipll_ops;
+extern struct clk *clk_register_avipll(struct device *, const char *,
+ const char *, const unsigned long,
+ const unsigned long, const u8,
+ const unsigned long, unsigned long const * const,
+ unsigned long const * const, unsigned long const * const,
+ const size_t, spinlock_t *);
+
+/**
+ * struct clk_p7_xin - SDIO Xin clock
+ * @clk_reg: register address
+ */
+struct clk_p7_xin {
+ struct clk_hw hw;
+ unsigned long clk_reg;
+};
+
+extern struct clk_ops const p7_clk_xin_ops;
+extern struct clk *clk_register_xin(struct device *, const char *,
+ const char *, const unsigned long);
+
+#define SDIOx_XIN_CLK_EN (1UL)
+#define SDIOx_XIN_CLK_DIV_SFT (1UL)
+#define SDIOx_XIN_CLK_DIV_MSK (0xfUL)
+#define SDIOx_XIN_CLK_RATE_MAX (260000000UL)
+
+extern struct clk *clk_register_sd(struct device *, const char *, const char *);
+
+#define P7_POWER_OFF (1 << 2)
+#define P7_POWER_ON (1 << 1)
+#define P7_POWER_REQ (1 << 0)
+#define P7_REQUEST_POWER_ON 1
+#define P7_REQUEST_POWER_OFF 0
+
+#endif /* CONFIG_COMMON_CLK */
+#endif /* _P7_CLK_PROVIDER_H */
diff --git a/drivers/parrot/cpufreq/Kconfig b/drivers/parrot/cpufreq/Kconfig
new file mode 100644
index 00000000000000..97a85791adea1b
--- /dev/null
+++ b/drivers/parrot/cpufreq/Kconfig
@@ -0,0 +1,4 @@
+config ARM_P7_CPUFREQ
+ bool "P7 CPU Frequency scaling support"
+ depends on ARCH_PARROT7
+ select CPU_FREQ
diff --git a/drivers/parrot/cpufreq/Makefile b/drivers/parrot/cpufreq/Makefile
new file mode 100644
index 00000000000000..f00ad1fbd31412
--- /dev/null
+++ b/drivers/parrot/cpufreq/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_P7_CPUFREQ) += cpufreq-p7.o
diff --git a/drivers/parrot/cpufreq/cpufreq-p7.c b/drivers/parrot/cpufreq/cpufreq-p7.c
new file mode 100644
index 00000000000000..bb523b08115845
--- /dev/null
+++ b/drivers/parrot/cpufreq/cpufreq-p7.c
@@ -0,0 +1,179 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#define NUM_CPUS 2
+
+static struct clk* p7_cpu_clk;
+
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 780000 },
+ { 1, 390000 },
+ { 2, 195000 },
+ { 3, 97500 },
+ { 4, CPUFREQ_TABLE_END },
+};
+
+static int p7_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int p7_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NR_CPUS)
+ return 0;
+
+ rate = clk_get_rate(p7_cpu_clk) / 1000;
+
+ return rate;
+}
+
+static int p7_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+ unsigned int i;
+ unsigned int freq;
+ struct cpufreq_freqs freqs;
+
+ cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &i);
+
+ freqs.new = freq_table[i].frequency;
+ freqs.old = p7_getspeed(policy->cpu);
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new && policy->cur == freqs.new)
+ return 0;
+
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ freq = freqs.new * 1000;
+
+ pr_debug("%s: %u MHz --> %u MHz\n", __func__,
+ freqs.old / 1000, freqs.new / 1000);
+
+ ret = clk_set_rate(p7_cpu_clk, freq);
+
+ freqs.new = freq / 1000;
+
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return ret;
+}
+
+static int p7_cpu_init(struct cpufreq_policy *policy)
+{
+ int i;
+ struct clk *p7_sys_clk;
+ unsigned long rate;
+
+ p7_sys_clk = clk_get_sys("sys_clk", NULL);
+ if (IS_ERR(p7_sys_clk)) {
+ pr_err("cpufreq-p7: failed to get sys_clk\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(p7_sys_clk);
+ rate /= 1000;
+ clk_put(p7_sys_clk);
+
+ /*
+ * (From P7 UserManual)
+ * cpu_clk / sys_clk
+ * This clocks are generated from fast_clk, and can be dynamically
+ * divided. So the only thing needed to do is to configure the division
+ * ratio. For now, the recommended ratio are sys_clk=fast_clk/2 and
+ * cpu_clk=fast_clk or cpu_clk=fast_clk/2. Other ratio requires extra
+ * care. This is done in SYSTEM_CLKGEN_CPU_DIV and SYSTEM_CLKGEN_SYS_DIV
+ *
+ * Note : sys_clk frequency must not be greater than cpu_clk frequency.
+ */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ {
+ if (freq_table[i].frequency < rate)
+ {
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+ break;
+ }
+ }
+
+ cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = p7_getspeed(policy->cpu);
+
+ /*
+ * Both processors share the voltage and clock. So both CPUs
+ * needs to be scaled together and hence needs software
+ * co-ordination. Use cpufreq affected_cpus interface to
+ * handle this scenario.
+ */
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ cpumask_setall(policy->cpus);
+
+ p7_cpu_clk = clk_get_sys("cpu_clk", NULL);
+ if (IS_ERR(p7_cpu_clk)) {
+ pr_err("cpufreq-p7: failed to get cpu_clk\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int p7_cpu_exit(struct cpufreq_policy *policy)
+{
+ clk_put(p7_cpu_clk);
+
+ return 0;
+}
+
+static struct freq_attr *p7_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver p7_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = p7_verify_speed,
+ .target = p7_target,
+ .get = p7_getspeed,
+ .init = p7_cpu_init,
+ .exit = p7_cpu_exit,
+ .name = "p7",
+ .attr = p7_cpufreq_attr,
+};
+
+static int __init p7_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&p7_cpufreq_driver);
+}
+
+static void __exit p7_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&p7_cpufreq_driver);
+}
+
+MODULE_AUTHOR("Aurelien Lefebvre <aurelien.lefebvre@parrot.com>");
+MODULE_DESCRIPTION("cpufreq driver for p7");
+MODULE_LICENSE("GPL");
+module_init(p7_cpufreq_init);
+module_exit(p7_cpufreq_exit);
diff --git a/drivers/parrot/crypto/Kconfig b/drivers/parrot/crypto/Kconfig
new file mode 100644
index 00000000000000..0b62ccf30b209d
--- /dev/null
+++ b/drivers/parrot/crypto/Kconfig
@@ -0,0 +1,11 @@
+
+config CRYPTO_DEV_P7
+ tristate "Support for P7 crypto hw accelerator"
+ depends on NET
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_USER_API_HASH
+ select CRYPTO_USER_API_SKCIPHER
+ help
+ P7 processors have AES module accelerator. Select this if you
+ want to use the P7 module for AES algorithms.
+
diff --git a/drivers/parrot/crypto/Makefile b/drivers/parrot/crypto/Makefile
new file mode 100644
index 00000000000000..3b974c073a8c34
--- /dev/null
+++ b/drivers/parrot/crypto/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for CRYPTO drivers for PARROT
+#
+
+ccflags-y += -I$(srctree)/drivers/parrot
+obj-$(CONFIG_CRYPTO_DEV_P7) += p7-crypto.o
diff --git a/drivers/parrot/crypto/p7-crypto.c b/drivers/parrot/crypto/p7-crypto.c
new file mode 100644
index 00000000000000..7a3b67d5830ee0
--- /dev/null
+++ b/drivers/parrot/crypto/p7-crypto.c
@@ -0,0 +1,1427 @@
+/**
+ *************************************************
+ * @file p7-crypto.c
+ * @brief P7 Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-19
+ ************************************************
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include "p7-crypto_regs.h"
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+
+#define IDLE_TIMEOUT 200 /* msec */
+
+#define ACRYPTO_OP_DECODE 0
+#define ACRYPTO_OP_ENCODE 1
+
+#define ACRYPTO_MODE_ECB 0
+#define ACRYPTO_MODE_CBC 1
+#define ACRYPTO_MODE_OFB 2
+#define ACRYPTO_MODE_CTR 3
+
+#define ACRYPTO_TYPE_AES_128 0
+#define ACRYPTO_TYPE_AES_192 1
+#define ACRYPTO_TYPE_AES_256 2
+#define ACRYPTO_TYPE_3DES 3
+#define ACRYPTO_TYPE_DES 4
+
+#define P7CA_NR_KEYSLOTS 8
+struct p7ca_key_slot {
+ struct list_head node;
+ int slot_num;
+};
+
+#define P7CA_NR_DESCSLOTS 16
+struct p7ca_desc_slot {
+ int slot_num;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+
+ size_t buflen;
+ void *buf_in;
+ size_t dma_size;
+ dma_addr_t dma_addr_in;
+ void *buf_out;
+ dma_addr_t dma_addr_out;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+};
+
+struct p7ca_ctx {
+ struct p7ca_dev *dd;
+ struct p7ca_key_slot *slot;
+ int keylen;
+ u8 key[AES_KEYSIZE_256];
+ unsigned long flags;
+};
+
+struct p7ca_reqctx {
+ struct p7ca_desc_slot *slot;
+ u8 *iv;
+ unsigned int ivsize;
+ u8 op, type, mode, unused;
+ struct ablkcipher_walk walk;
+ int count;
+};
+
+#define FLAGS_CMD_FAST BIT(0)
+#define FLAGS_SUSPENDED BIT(1)
+#define FLAGS_NEW_KEY BIT(2)
+#define FLAGS_DESC_UNUSED BIT(3)
+#define FLAGS_DESC_USED BIT(4)
+
+#define P7CA_QUEUE_LENGTH 50
+#define P7CA_CACHE_SIZE 0
+
+struct p7ca_dev {
+ unsigned long phys_base;
+ void __iomem *io_base;
+ struct clk *iclk;
+ struct device *dev;
+ unsigned long flags;
+ int err;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct work_struct done_task;
+ struct work_struct queue_task;
+ struct p7ca_key_slot *key_slots;
+ struct p7ca_desc_slot *desc_slots;
+ int cur_desc;
+ int irq;
+ unsigned long irq_flags;
+};
+
+static struct p7ca_dev *p7ca_dev;
+
+/* keep registered devices data here */
+static struct list_head dev_list;
+static DEFINE_SPINLOCK(list_lock);
+
+static inline u32 p7ca_read(struct p7ca_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+
+static inline void p7ca_write(struct p7ca_dev *dd, u32 offset,
+ u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void p7ca_write_mask(struct p7ca_dev *dd, u32 offset,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = p7ca_read(dd, offset);
+ val &= ~mask;
+ val |= value;
+ p7ca_write(dd, offset, val);
+}
+
+static void p7ca_write_n(struct p7ca_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ p7ca_write(dd, offset, *value);
+}
+
+static int p7ca_write_ctrl(struct p7ca_dev *dd, struct p7ca_desc_slot *slot)
+{
+ unsigned long end, flags;
+ struct p7ca_ctx *ctx = crypto_tfm_ctx(slot->req->base.tfm);
+ struct p7ca_reqctx *rctx = ablkcipher_request_ctx(slot->req);
+ u32 val = 0;
+
+ if (!ctx->slot)
+ return -EINVAL;
+
+ val |= P7CA_DESC_CTRL_INT_ENABLE;
+
+ if (rctx->op == ACRYPTO_OP_DECODE)
+ val |= P7CA_DESC_CTRL_DECODE;
+
+ switch (rctx->mode) {
+ case ACRYPTO_MODE_ECB:
+ val |= P7CA_DESC_CTRL_CIPH_MODE_ECB;
+ break;
+ case ACRYPTO_MODE_CBC:
+ val |= P7CA_DESC_CTRL_CIPH_MODE_CBC;
+ break;
+ case ACRYPTO_MODE_OFB:
+ val |= P7CA_DESC_CTRL_CIPH_MODE_OFB;
+ break;
+ case ACRYPTO_MODE_CTR:
+ val |= P7CA_DESC_CTRL_CIPH_MODE_CTR;
+ break;
+ default:
+ goto err_out;
+ }
+
+ switch (rctx->type) {
+ case ACRYPTO_TYPE_AES_128:
+ if (ctx->keylen != AES_KEYSIZE_128)
+ goto err_out;
+ val |= P7CA_DESC_CTRL_CIPH_ALG_AES_128;
+ break;
+ case ACRYPTO_TYPE_AES_192:
+ if (ctx->keylen != AES_KEYSIZE_192)
+ goto err_out;
+ val |= P7CA_DESC_CTRL_CIPH_ALG_AES_192;
+ break;
+ case ACRYPTO_TYPE_AES_256:
+ if (ctx->keylen != AES_KEYSIZE_256)
+ goto err_out;
+ val |= P7CA_DESC_CTRL_CIPH_ALG_AES_256;
+ break;
+ case ACRYPTO_TYPE_3DES:
+ if (ctx->keylen != DES3_EDE_KEY_SIZE)
+ goto err_out;
+ val |= P7CA_DESC_CTRL_CIPH_ALG_TDEA;
+ break;
+ case ACRYPTO_TYPE_DES:
+ if (ctx->keylen != DES_KEY_SIZE)
+ goto err_out;
+ val |= P7CA_DESC_CTRL_CIPH_ALG_DES;
+ break;
+ default:
+ goto err_out;
+ }
+
+ if (ctx->flags & FLAGS_NEW_KEY) {
+ /* Workaround to fix a IP Crypto bug:
+ * If you try to configure new descriptors
+ * when one or more descriptors are active,
+ * dma result may be corrupted */
+ end = msecs_to_jiffies(IDLE_TIMEOUT) + jiffies;
+ while (((p7ca_read(dd, P7CA_REG_CTRL_STATUS)
+ & CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_MASK) != 0)) {
+ if (time_after_eq(jiffies, end)) {
+ dev_err(dd->dev, "Idle ip crypto request timed out\n");
+ goto err_out;
+ }
+ cond_resched();
+ }
+
+ /* copy the key to the key slot */
+ p7ca_write_n(dd, P7CA_REG_KEY(ctx->slot->slot_num),
+ (u32 *)ctx->key,
+ ctx->keylen / sizeof(u32));
+
+ val |= P7CA_DESC_CTRL_NEW_KEY_SET |
+ P7CA_DESC_CTRL_KEY_INDEX(ctx->slot->slot_num);
+ ctx->flags &= ~FLAGS_NEW_KEY;
+ }
+
+ if (((rctx->mode == ACRYPTO_MODE_CBC)
+ || (rctx->mode == ACRYPTO_MODE_OFB)
+ || (rctx->mode == ACRYPTO_MODE_CTR))
+ && rctx->iv) {
+ /* Workaround to fix a IP Crypto bug:
+ * If you try to configure new descriptors
+ * when one or more descriptors are active,
+ * dma result may be corrupted */
+ end = msecs_to_jiffies(IDLE_TIMEOUT) + jiffies;
+ while (((p7ca_read(dd, P7CA_REG_CTRL_STATUS)
+ & CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_MASK) != 0)) {
+ if (time_after_eq(jiffies, end)) {
+ dev_err(dd->dev, "Idle ip crypto request timed out\n");
+ goto err_out;
+ }
+ cond_resched();
+ }
+
+ /* copy the IV to the iv slot */
+ p7ca_write_n(dd, P7CA_REG_IV(ctx->slot->slot_num), (u32 *)rctx->iv,
+ rctx->ivsize / sizeof(u32));
+
+ /* Needed to reload initial vector */
+ val |= P7CA_DESC_CTRL_NEW_KEY_SET |
+ P7CA_DESC_CTRL_KEY_INDEX(ctx->slot->slot_num);
+ }
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (++dd->cur_desc >= P7CA_NR_DESCSLOTS)
+ dd->cur_desc = 0;
+ slot->slot_num = dd->cur_desc;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ p7ca_write(dd, P7CA_REG_DESC_CTRL(slot->slot_num), val);
+
+ return 0;
+
+err_out:
+ return -EINVAL;
+}
+
+static void p7ca_release_key_slot(struct p7ca_key_slot *slot)
+{
+ spin_lock(&list_lock);
+ list_add_tail(&slot->node, &dev_list);
+ spin_unlock(&list_lock);
+}
+
+static struct p7ca_key_slot *p7ca_find_key_slot(void)
+{
+ struct p7ca_key_slot *slot = NULL;
+ int empty;
+
+ spin_lock(&list_lock);
+ empty = list_empty(&dev_list);
+ if (!empty) {
+ slot = list_first_entry(&dev_list, struct p7ca_key_slot, node);
+ list_del(&slot->node);
+ }
+ spin_unlock(&list_lock);
+
+ return slot;
+}
+
+static void p7ca_dma_cleanup(struct p7ca_dev *dd, struct p7ca_desc_slot *slot)
+{
+ if (slot->buf_in != NULL)
+ dma_free_coherent(dd->dev, PAGE_SIZE << P7CA_CACHE_SIZE,
+ slot->buf_in, slot->dma_addr_in);
+ if (slot->buf_out != NULL)
+ dma_free_coherent(dd->dev, PAGE_SIZE << P7CA_CACHE_SIZE,
+ slot->buf_out, slot->dma_addr_out);
+}
+
+static irqreturn_t p7ca_interrupt(int irq, void *dev_id)
+{
+ unsigned long reg, flags;
+ struct p7ca_dev *dd = dev_id;
+
+ spin_lock_irqsave(&dd->lock, flags);
+
+ p7ca_write(dd,
+ P7CA_REG_IRQ_MEM,
+ P7CA_IRQ_MEM_CLEAR);
+
+ /* Read status and ACK all interrupts */
+ reg = p7ca_read(dd, P7CA_REG_IRQ_STATUS);
+ p7ca_write(dd, P7CA_REG_IRQ_STATUS, reg);
+ dd->irq_flags |= reg;
+
+ schedule_work(&dd->done_task);
+
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int p7ca_dma_alloc_desc(struct p7ca_dev *dd, struct p7ca_desc_slot *slot)
+{
+ int err = -ENOMEM;
+
+ slot->buf_in = dma_alloc_coherent(dd->dev, PAGE_SIZE << P7CA_CACHE_SIZE,
+ &slot->dma_addr_in, GFP_KERNEL);
+ slot->buf_out = dma_alloc_coherent(dd->dev, PAGE_SIZE << P7CA_CACHE_SIZE,
+ &slot->dma_addr_out, GFP_KERNEL);
+
+ if (!slot->buf_in || !slot->buf_out) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ goto err_alloc;
+ }
+ slot->buflen = PAGE_SIZE;
+ slot->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ return 0;
+
+err_alloc:
+ p7ca_dma_cleanup(dd, slot);
+
+ dev_err(dd->dev, "error: %d\n", err);
+
+ return err;
+}
+
+static void p7ca_free_desc(struct p7ca_dev *dd, struct p7ca_desc_slot *slot, int isLast)
+{
+ if (!slot)
+ return;
+
+ slot->flags = FLAGS_DESC_UNUSED;
+ slot->slot_num = -1;
+}
+
+static struct p7ca_desc_slot *p7ca_get_desc_by_num(struct p7ca_dev *dd, int num)
+{
+ int i;
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++) {
+ if (dd->desc_slots[i].slot_num == num)
+ return &dd->desc_slots[i];
+ }
+
+ return NULL;
+}
+
+static struct p7ca_desc_slot *p7ca_get_desc(struct p7ca_dev *dd)
+{
+ int i;
+
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++) {
+ if (dd->desc_slots[i].flags == FLAGS_DESC_UNUSED) {
+ dd->desc_slots[i].flags = FLAGS_DESC_USED;
+ return &dd->desc_slots[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void sg_copy_buf(struct p7ca_dev *dd, void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int sg_copy(struct p7ca_dev *dd, struct scatterlist **sg,
+ size_t *offset, void *buf,
+ size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ /*
+ * buflen and total are AES_BLOCK_SIZE size aligned,
+ * so count should be also aligned
+ */
+ sg_copy_buf(dd, buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
+static int p7ca_crypt_dma(struct p7ca_dev *dd,
+ dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length,
+ struct p7ca_desc_slot *slot)
+{
+ int len32;
+
+ slot->dma_size = length;
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+ p7ca_write(dd, P7CA_REG_DESC_SIZE(slot->slot_num), len32);
+ p7ca_write(dd, P7CA_REG_DESC_SOURCE(slot->slot_num), dma_addr_in);
+ p7ca_write(dd, P7CA_REG_DESC_DESTINATION(slot->slot_num), dma_addr_out);
+
+ /* Start DMA */
+ p7ca_write_mask(dd, P7CA_REG_DESC_CTRL(slot->slot_num), P7CA_DESC_CTRL_DESC_VALID,
+ P7CA_DESC_CTRL_DESC_VALID);
+
+ return 0;
+}
+
+static int p7ca_crypt_dma_start(struct p7ca_dev *dd, struct p7ca_desc_slot *slot)
+{
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+
+ if (sg_is_last(slot->in_sg) && sg_is_last(slot->out_sg)) {
+ /* check for alignment */
+ in = IS_ALIGNED((u32)slot->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)slot->out_sg->offset, sizeof(u32));
+
+ fast = in && out;
+ }
+
+ if (fast) {
+ count = min(slot->total, sg_dma_len(slot->in_sg));
+ count = min(count, sg_dma_len(slot->out_sg));
+
+ if (count != slot->total) {
+ dev_err(dd->dev, "request length != buffer length\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, slot->in_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, slot->out_sg, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, slot->in_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(slot->in_sg);
+ addr_out = sg_dma_address(slot->out_sg);
+
+ dd->flags |= FLAGS_CMD_FAST;
+
+ } else {
+ /* use cache buffers */
+ count = sg_copy(dd, &slot->in_sg, &slot->in_offset, slot->buf_in,
+ slot->buflen, slot->total, 0);
+
+ addr_in = slot->dma_addr_in;
+ addr_out = slot->dma_addr_out;
+
+ dd->flags &= ~FLAGS_CMD_FAST;
+
+ }
+
+ slot->total -= count;
+
+ err = p7ca_crypt_dma(dd, addr_in, addr_out, count, slot);
+ if (err) {
+ dma_unmap_sg(dd->dev, slot->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, slot->out_sg, 1, DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+static void p7ca_finish_req(struct p7ca_dev *dd,
+ struct p7ca_desc_slot *slot,
+ int err)
+{
+ if (slot->req->base.complete)
+ slot->req->base.complete(&slot->req->base, err);
+}
+
+static int p7ca_crypt_dma_stop(struct p7ca_dev *dd, struct p7ca_desc_slot *slot)
+{
+ int err = 0;
+ size_t count;
+ struct p7ca_reqctx *rctx = ablkcipher_request_ctx(slot->req);
+
+ if (dd->flags & FLAGS_CMD_FAST) {
+ dma_unmap_sg(dd->dev, slot->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, slot->in_sg, 1, DMA_TO_DEVICE);
+
+ } else {
+ /* copy data */
+ count = sg_copy(dd, &slot->out_sg, &slot->out_offset, slot->buf_out,
+ slot->buflen, slot->dma_size, 1);
+
+ if (count != slot->dma_size) {
+ err = -EINVAL;
+ dev_err(dd->dev, "not all data converted: %u\n", count);
+ } else {
+ if (rctx != NULL && slot->buf_out != NULL) {
+ if (((rctx->mode == ACRYPTO_MODE_CBC)
+ || (rctx->mode == ACRYPTO_MODE_OFB)
+ || (rctx->mode == ACRYPTO_MODE_CTR))
+ && slot->total
+ && rctx->ivsize) {
+ memcpy(rctx->iv, slot->buf_out + slot->dma_size - rctx->ivsize,
+ rctx->ivsize);
+ }
+ }
+ }
+ }
+
+ return err;
+}
+
+static int p7ca_handle_queue(struct p7ca_dev *dd,
+ struct ablkcipher_request *req)
+{
+ struct p7ca_desc_slot *slot;
+ struct crypto_async_request *async_req, *backlog;
+ struct p7ca_key_slot *key_slot;
+ unsigned long flags;
+ int err, ret = 0;
+ struct p7ca_ctx *ctx;
+
+ spin_lock_irqsave(&dd->lock, flags);
+
+ if (dd->flags & FLAGS_SUSPENDED) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return -EAGAIN;
+ }
+
+ if (req)
+ ret = ablkcipher_enqueue_request(&dd->queue, req);
+
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ req = ablkcipher_request_cast(async_req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ ctx->dd = dd;
+
+ if (!ctx->slot) {
+ key_slot = p7ca_find_key_slot();
+ if (key_slot)
+ ctx->slot = key_slot;
+ else {
+ dev_dbg(dd->dev, "%s:no empty key slot\n", __func__);
+ spin_lock_irqsave(&dd->lock, flags);
+ ablkcipher_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&dd->lock, flags);
+ slot = p7ca_get_desc(dd);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!slot) {
+ dev_dbg(dd->dev, "%s:no empty desc slot\n", __func__);
+ spin_lock_irqsave(&dd->lock, flags);
+ ablkcipher_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ /* assign new request to device */
+ slot->req = req;
+ slot->total = req->nbytes;
+ slot->in_offset = 0;
+ slot->in_sg = req->src;
+ slot->out_offset = 0;
+ slot->out_sg = req->dst;
+
+ err = p7ca_write_ctrl(dd, slot);
+ if (!err)
+ err = p7ca_crypt_dma_start(dd, slot);
+
+ if (err) {
+
+ /* aes_task will not finish it, so do it here */
+ p7ca_finish_req(dd, slot, err);
+ p7ca_free_desc(dd, slot, 1);
+ schedule_work(&dd->queue_task);
+ return ret;
+ }
+
+ return ret; /* return ret, which is enqueue return value */
+}
+
+static void p7ca_done_task(struct work_struct *work)
+{
+ struct p7ca_dev *dd = container_of(work, struct p7ca_dev, done_task);
+ unsigned long flags;
+ struct p7ca_desc_slot *slot;
+ int i, err;
+
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++) {
+
+ if (!(dd->irq_flags & P7CA_IRQ_STATUS_EN(i)))
+ continue;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ slot = p7ca_get_desc_by_num(dd, i);
+ dd->irq_flags &= ~P7CA_IRQ_STATUS_EN(i);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ err = p7ca_crypt_dma_stop(dd, slot);
+
+ if (slot->total && !err) {
+
+ err = p7ca_write_ctrl(dd, slot);
+ if (!err) {
+ err = p7ca_crypt_dma_start(dd, slot);
+ if (!err)
+ continue; /* DMA started. Not fininishing. */
+ }
+ }
+
+ p7ca_finish_req(dd, slot, err);
+ p7ca_free_desc(dd, slot, 0);
+ }
+
+ p7ca_handle_queue(dd, NULL);
+}
+
+static void p7ca_queue_task(struct work_struct *work)
+{
+ struct p7ca_dev *dd = container_of(work, struct p7ca_dev, queue_task);
+ p7ca_handle_queue(dd, NULL);
+}
+
+static int p7ca_crypt(struct ablkcipher_request *req, u8 op,
+ u8 type, u8 mode)
+{
+ struct p7ca_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct p7ca_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct p7ca_dev *dd = p7ca_dev;
+ u32 blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+ unsigned ivsize;
+
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+
+ if (!IS_ALIGNED(req->nbytes, blocksize)) {
+ dev_err(dd->dev, "request size(%d) is not exact amount of blocks nbytes(%d)\n",
+ req->nbytes,
+ blocksize);
+ return -EINVAL;
+ }
+
+ if (ctx->keylen != AES_KEYSIZE_128 && type == ACRYPTO_TYPE_AES_128) {
+ if (ctx->keylen == AES_KEYSIZE_192)
+ type = ACRYPTO_TYPE_AES_192;
+ else if (ctx->keylen == AES_KEYSIZE_256)
+ type = ACRYPTO_TYPE_AES_256;
+ }
+
+ rctx->op = op;
+ rctx->mode = mode;
+ rctx->type = type;
+ rctx->iv = req->info;
+ rctx->ivsize = ivsize;
+
+ return p7ca_handle_queue(dd, req);
+}
+
+/* ********************** ALG API ************************************ */
+
+static int p7ca_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p7ca_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE
+ && keylen != AES_KEYSIZE_128
+ && keylen != AES_KEYSIZE_192
+ && keylen != AES_KEYSIZE_256) {
+ return -EINVAL;
+ }
+
+ if (key) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ }
+
+ ctx->flags |= FLAGS_NEW_KEY;
+
+ return 0;
+}
+
+static int p7ca_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_CTR);
+}
+
+static int p7ca_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_AES_128,
+ ACRYPTO_MODE_CTR);
+}
+
+static int p7ca_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_des_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_des_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_DES,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_3des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_3des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_ECB);
+}
+
+static int p7ca_3des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_3des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_CBC);
+}
+
+static int p7ca_3des_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_ENCODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_3des_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return p7ca_crypt(req,
+ ACRYPTO_OP_DECODE,
+ ACRYPTO_TYPE_3DES,
+ ACRYPTO_MODE_OFB);
+}
+
+static int p7ca_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct p7ca_reqctx);
+
+ return 0;
+}
+
+static void p7ca_cra_exit(struct crypto_tfm *tfm)
+{
+ struct p7ca_ctx *ctx =
+ crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
+
+ if (ctx && ctx->slot)
+ p7ca_release_key_slot(ctx->slot);
+}
+
+/* ********************** ALGS ************************************ */
+
+static struct crypto_alg algs[] = {
+ /* 3DES: ECB CBC and OFB are supported */
+ {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-3des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_3des_ecb_encrypt,
+ .decrypt = p7ca_3des_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_3des_cbc_encrypt,
+ .decrypt = p7ca_3des_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "ofb-3des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_3des_ofb_encrypt,
+ .decrypt = p7ca_3des_ofb_decrypt,
+ }
+ },
+ /* DES: ECB CBC and OFB are supported */
+ {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_des_ecb_encrypt,
+ .decrypt = p7ca_des_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_des_cbc_encrypt,
+ .decrypt = p7ca_des_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "ofb-des-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_des_ofb_encrypt,
+ .decrypt = p7ca_des_ofb_decrypt,
+ }
+ },
+ /* AES: ECB CBC OFB and CTR are supported */
+ {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_aes_ecb_encrypt,
+ .decrypt = p7ca_aes_ecb_decrypt,
+ }
+ },
+ {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_aes_cbc_encrypt,
+ .decrypt = p7ca_aes_cbc_decrypt,
+ }
+ },
+ {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_aes_ofb_encrypt,
+ .decrypt = p7ca_aes_ofb_decrypt,
+ }
+ },
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-p7",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p7ca_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = p7ca_cra_init,
+ .cra_exit = p7ca_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = p7ca_setkey,
+ .encrypt = p7ca_aes_ctr_encrypt,
+ .decrypt = p7ca_aes_ctr_decrypt,
+ }
+ },
+};
+
+static int p7ca_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct p7ca_dev *dd;
+ struct resource *res;
+ int err = -ENOMEM, i, j;
+
+ dd = kzalloc(sizeof(struct p7ca_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dd->dev, "unable to alloc data struct.\n");
+ return err;
+ }
+
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ dd->key_slots = kzalloc(sizeof(struct p7ca_key_slot) *
+ P7CA_NR_KEYSLOTS, GFP_KERNEL);
+ if (dd->key_slots == NULL) {
+ dev_err(dd->dev, "unable to alloc slot struct.\n");
+ goto err_alloc_key_slots;
+ }
+
+ dd->desc_slots = kzalloc(sizeof(struct p7ca_desc_slot) *
+ P7CA_NR_DESCSLOTS, GFP_KERNEL);
+ if (dd->desc_slots == NULL) {
+ dev_err(dd->dev, "unable to alloc slot struct.\n");
+ goto err_alloc_desc_slots;
+ }
+ dd->cur_desc = -1;
+
+ spin_lock_init(&dd->lock);
+ crypto_init_queue(&dd->queue, P7CA_QUEUE_LENGTH);
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dd->dev, "invalid resource type\n");
+ err = -ENODEV;
+ goto err_res;
+ }
+ dd->phys_base = res->start;
+
+ dd->iclk = clk_get(dev, NULL);
+ if (IS_ERR(dd->iclk)) {
+ dev_err(dd->dev, "clock intialization failed.\n");
+ err = PTR_ERR(dd->iclk);
+ goto err_res;
+ }
+
+ err = clk_prepare_enable(dd->iclk);
+ if (err) {
+ dev_err(dd->dev, "Cant start clock\n") ;
+ goto err_putclk;
+ }
+
+ dd->irq = platform_get_irq(pdev, 0);
+
+ err = request_irq(dd->irq,
+ p7ca_interrupt,
+ 0,
+ dev_name(&pdev->dev),
+ dd);
+ if (err) {
+ dev_err(dd->dev, "request irq failed (%d)\n", err);
+ goto err_putclk;
+ }
+
+ dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ if (!dd->io_base) {
+ dev_err(dd->dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto err_io;
+ }
+
+ INIT_WORK(&dd->done_task,
+ p7ca_done_task);
+ INIT_WORK(&dd->queue_task,
+ p7ca_queue_task);
+
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++) {
+ dd->desc_slots[i].slot_num = -1;
+ dd->desc_slots[i].flags = FLAGS_DESC_UNUSED;
+ err = p7ca_dma_alloc_desc(dd,
+ &dd->desc_slots[i]);
+ if (err) {
+ dev_err(dd->dev, "error dma alloc(%d)\n", err);
+ goto err_dma;
+ }
+ }
+
+ INIT_LIST_HEAD(&dev_list);
+
+ spin_lock_init(&list_lock);
+ spin_lock(&list_lock);
+ for (i = 0; i < P7CA_NR_KEYSLOTS; i++) {
+ dd->key_slots[i].slot_num = i;
+ INIT_LIST_HEAD(&dd->key_slots[i].node);
+ list_add_tail(&dd->key_slots[i].node, &dev_list);
+ }
+ spin_unlock(&list_lock);
+
+ p7ca_dev = dd;
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ INIT_LIST_HEAD(&algs[i].cra_list);
+ err = crypto_register_alg(&algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ p7ca_write(dd,
+ P7CA_REG_IRQ_EN,
+ P7CA_IRQ_EN_ENABLE);
+
+ dev_info(dev, "registered\n");
+
+ return 0;
+
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&algs[j]);
+
+err_dma:
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++)
+ p7ca_dma_cleanup(dd,
+ &dd->desc_slots[i]);
+
+ cancel_work_sync(&dd->done_task);
+ cancel_work_sync(&dd->queue_task);
+ iounmap(dd->io_base);
+err_io:
+ free_irq(dd->irq, dd);
+ clk_disable_unprepare(dd->iclk);
+err_putclk:
+ clk_put(dd->iclk);
+err_alloc_desc_slots:
+ kfree(dd->desc_slots);
+err_alloc_key_slots:
+ kfree(dd->key_slots);
+err_res:
+ kfree(dd);
+ p7ca_dev = NULL;
+
+ dev_err(dd->dev, "initialization failed.\n");
+ return err;
+}
+
+static int p7ca_remove(struct platform_device *pdev)
+{
+ struct p7ca_dev *dd = platform_get_drvdata(pdev);
+ int i;
+
+ if (!dd)
+ return -ENODEV;
+
+ spin_lock(&list_lock);
+ list_del(&dev_list);
+ spin_unlock(&list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_alg(&algs[i]);
+
+ cancel_work_sync(&dd->done_task);
+ cancel_work_sync(&dd->queue_task);
+
+ p7ca_write(dd,
+ P7CA_REG_IRQ_EN, 0);
+
+ for (i = 0; i < P7CA_NR_DESCSLOTS; i++)
+ p7ca_dma_cleanup(dd,
+ &dd->desc_slots[i]);
+
+ free_irq(dd->irq, dd);
+
+ iounmap(dd->io_base);
+ clk_disable_unprepare(dd->iclk);
+ clk_put(dd->iclk);
+ kfree(dd->key_slots);
+ kfree(dd->desc_slots);
+ kfree(dd);
+ p7ca_dev = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7ca_suspend(struct device *dev)
+{
+ struct p7ca_dev *dd = p7ca_dev;
+
+ dd->flags |= FLAGS_SUSPENDED;
+ p7ca_write(dd,
+ P7CA_REG_IRQ_EN, 0);
+ clk_disable_unprepare(dd->iclk);
+
+ return 0;
+}
+
+static int p7ca_resume(struct device *dev)
+{
+ struct p7ca_dev *dd = p7ca_dev;
+
+ clk_prepare_enable(dd->iclk);
+ p7ca_write(dd,
+ P7CA_REG_IRQ_EN,
+ P7CA_IRQ_EN_ENABLE);
+ dd->flags &= ~FLAGS_SUSPENDED;
+ p7ca_handle_queue(dd, NULL);
+
+ return 0;
+}
+#else
+#define p7ca_suspend NULL
+#define p7ca_resume NULL
+#endif
+
+static const struct dev_pm_ops p7ca_dev_pm_ops = {
+ .suspend = p7ca_suspend,
+ .resume = p7ca_resume,
+};
+
+static struct platform_driver p7ca_driver = {
+ .probe = p7ca_probe,
+ .remove = p7ca_remove,
+ .driver = {
+ .name = "p7-crypto",
+ .owner = THIS_MODULE,
+ .pm = &p7ca_dev_pm_ops,
+ },
+};
+
+static int __init p7ca_mod_init(void)
+{
+ int err;
+
+ pr_info("loading %s driver\n", "p7-crypto");
+
+ err = platform_driver_register(&p7ca_driver);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void __exit p7ca_mod_exit(void)
+{
+ platform_driver_unregister(&p7ca_driver);
+}
+
+module_init(p7ca_mod_init);
+module_exit(p7ca_mod_exit);
+
+MODULE_DESCRIPTION("P7 AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Karl Leplat");
+
diff --git a/drivers/parrot/crypto/p7-crypto_regs.h b/drivers/parrot/crypto/p7-crypto_regs.h
new file mode 100644
index 00000000000000..877699461415d8
--- /dev/null
+++ b/drivers/parrot/crypto/p7-crypto_regs.h
@@ -0,0 +1,70 @@
+/**
+********************************************************************************
+* @file p7-crypto_regs.h
+* @brief P7 Analogic to Digital Converter driver
+*
+* Copyright (C) 2013 Parrot S.A.
+*
+* @author Karl Leplat <karl.leplat@parrot.com>
+* @date 2013-09-19
+********************************************************************************
+*/
+#ifndef __REG_P7CA_H__
+#define __REG_P7CA_H__
+
+#define P7CA_REG_CTRL_STATUS 0x00
+#define P7CA_REG_IRQ_STATUS 0x04
+#define P7CA_REG_FIFO_DATA_OUTPUT_FILL_LEVEL 0x08
+#define P7CA_REG_FIFO_DATA_INPUT_FILL_LEVEL 0x0c
+#define P7CA_REG_DMA_WRITE_COUNT_DOWN 0x10
+#define P7CA_REG_DMA_READ_COUNT_DOWN 0x14
+
+#define P7CA_REG_DESC_CTRL(_id_) (0x100 + 16*(_id_))
+#define P7CA_REG_DESC_SIZE(_id_) (0x104 + 16*(_id_))
+#define P7CA_REG_DESC_SOURCE(_id_) (0x108 + 16*(_id_))
+#define P7CA_REG_DESC_DESTINATION(_id_) (0x10c + 16*(_id_))
+
+#define P7CA_REG_KEY(_id_) (0x200 + 32*(_id_))
+#define P7CA_REG_IV(_id_) (0x300 + 16*(_id_))
+
+#define P7CA_REG_IRQ_MEM 0x400
+#define P7CA_REG_IRQ_EN 0x404
+
+#define P7CA_IRQ_EN_ENABLE 1
+#define P7CA_IRQ_MEM_CLEAR 1
+
+#define P7CA_CTRL_STATUS_CURRENT_DESC (1 << 0)
+#define P7CA_CTRL_STATUS_NB_OF_ACTIVE_DESC (1 << 8)
+#define P7CA_CTRL_STATUS_ACTIVE_DESC_INT_THRES (1 << 16)
+
+#define P7CA_IRQ_STATUS_EN(_id_) (1 << (_id_))
+#define P7CA_IRQ_STATUS_THRESH (1 << 16)
+
+#define P7CA_DESC_CTRL_CIPH_ALG_IDX 1
+#define P7CA_DESC_CTRL_CIPH_MODE_IDX 4
+#define P7CA_DESC_CTRL_DECODE (1 << 0)
+#define P7CA_DESC_CTRL_NEW_KEY_SET (1 << 7)
+#define P7CA_DESC_CTRL_INT_ENABLE (1 << 11)
+#define P7CA_DESC_CTRL_DESC_VALID (1 << 12)
+#define P7CA_DESC_CTRL_DESC_DEST_ENDIAN (1 << 13)
+#define P7CA_DESC_CTRL_DESC_SRC_ENDIAN (1 << 14)
+
+#define P7CA_DESC_CTRL_CIPH_ALG_DES (0x00 << P7CA_DESC_CTRL_CIPH_ALG_IDX)
+#define P7CA_DESC_CTRL_CIPH_ALG_TDEA (0x01 << P7CA_DESC_CTRL_CIPH_ALG_IDX)
+#define P7CA_DESC_CTRL_CIPH_ALG_AES_128 (0x02 << P7CA_DESC_CTRL_CIPH_ALG_IDX)
+#define P7CA_DESC_CTRL_CIPH_ALG_AES_192 (0x03 << P7CA_DESC_CTRL_CIPH_ALG_IDX)
+#define P7CA_DESC_CTRL_CIPH_ALG_AES_256 (0x04 << P7CA_DESC_CTRL_CIPH_ALG_IDX)
+
+#define P7CA_DESC_CTRL_CIPH_MODE_ECB (0x00 << P7CA_DESC_CTRL_CIPH_MODE_IDX)
+#define P7CA_DESC_CTRL_CIPH_MODE_CBC (0x01 << P7CA_DESC_CTRL_CIPH_MODE_IDX)
+#define P7CA_DESC_CTRL_CIPH_MODE_CTR (0x02 << P7CA_DESC_CTRL_CIPH_MODE_IDX)
+#define P7CA_DESC_CTRL_CIPH_MODE_OFB (0x03 << P7CA_DESC_CTRL_CIPH_MODE_IDX)
+#define P7CA_DESC_CTRL_CIPH_MODE_F8 (0x04 << P7CA_DESC_CTRL_CIPH_MODE_IDX)
+
+#define P7CA_DESC_CTRL_KEY_INDEX(x) ((x) << 8)
+
+#define CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_WIDTH 5
+#define CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_SHIFT 8
+#define CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_MASK ((uint32_t) ((1<<CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_WIDTH) - 1) << CRYPTO_CTRL_AND_STATUS_NB_OF_ACTIVE_DESC_SHIFT)
+#endif /* __REG_P7CA_H__ */
+
diff --git a/drivers/parrot/dma/Kconfig b/drivers/parrot/dma/Kconfig
new file mode 100644
index 00000000000000..9ca4dc472a0ba4
--- /dev/null
+++ b/drivers/parrot/dma/Kconfig
@@ -0,0 +1,7 @@
+config DMA_PARROT7
+ bool "Parrot7 PL330 DMA controller workarounds"
+ depends on PL330_DMA
+ default ARCH_VEXPRESS_P7FPGA || ARCH_PARROT7
+ help
+ Enable workarounds for Parrot7 only implementation of PL330 based DMA controller.
+
diff --git a/drivers/parrot/dummy.c b/drivers/parrot/dummy.c
new file mode 100644
index 00000000000000..dae2744e6d9bef
--- /dev/null
+++ b/drivers/parrot/dummy.c
@@ -0,0 +1 @@
+/* I don't want to be deleted by distclean */
diff --git a/drivers/parrot/gator/Kconfig b/drivers/parrot/gator/Kconfig
new file mode 100644
index 00000000000000..433057d572f153
--- /dev/null
+++ b/drivers/parrot/gator/Kconfig
@@ -0,0 +1,39 @@
+config GATOR
+ tristate "Gator module for ARM's Streamline Performance Analyzer"
+ default m if (ARM || ARM64)
+ depends on PROFILING
+ depends on HIGH_RES_TIMERS
+ depends on LOCAL_TIMERS || !(ARM && SMP)
+ depends on PERF_EVENTS
+ depends on HW_PERF_EVENTS || !(ARM || ARM64)
+ select TRACING
+ help
+ Gator module for ARM's Streamline Performance Analyzer
+
+config GATOR_WITH_MALI_SUPPORT
+ bool
+
+choice
+ prompt "Enable Mali GPU support in Gator"
+ depends on GATOR
+ optional
+ help
+ Enable Mali GPU support in Gator
+
+config GATOR_MALI_4XXMP
+ bool "Mali-400MP or Mali-450MP"
+ select GATOR_WITH_MALI_SUPPORT
+
+config GATOR_MALI_MIDGARD
+ bool "Mali-T60x, Mali-T62x, Mali-T72x or Mali-T76x"
+ select GATOR_WITH_MALI_SUPPORT
+
+endchoice
+
+config GATOR_MALI_PATH
+ string "Path to Mali driver"
+ depends on GATOR_WITH_MALI_SUPPORT
+ default "drivers/parrot/gpu/mali400"
+ help
+ The gator code adds this to its include path so it can get the Mali
+ trace headers with: #include "linux/mali_linux_trace.h"
diff --git a/drivers/parrot/gator/LICENSE b/drivers/parrot/gator/LICENSE
new file mode 100644
index 00000000000000..d159169d105089
--- /dev/null
+++ b/drivers/parrot/gator/LICENSE
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/drivers/parrot/gator/Makefile b/drivers/parrot/gator/Makefile
new file mode 100644
index 00000000000000..524749ec48d5a6
--- /dev/null
+++ b/drivers/parrot/gator/Makefile
@@ -0,0 +1,78 @@
+ifneq ($(KERNELRELEASE),)
+
+# Uncomment the following line to enable kernel stack unwinding within gator, or update gator_backtrace.c
+# EXTRA_CFLAGS += -DGATOR_KERNEL_STACK_UNWINDING
+
+obj-$(CONFIG_GATOR) := gator.o
+
+gator-y := gator_main.o \
+ gator_events_block.o \
+ gator_events_irq.o \
+ gator_events_meminfo.o \
+ gator_events_mmapped.o \
+ gator_events_net.o \
+ gator_events_perf_pmu.o \
+ gator_events_sched.o \
+
+# Convert the old GATOR_WITH_MALI_SUPPORT to the new kernel flags
+ifneq ($(GATOR_WITH_MALI_SUPPORT),)
+ CONFIG_GATOR_WITH_MALI_SUPPORT := y
+ ifeq ($(GATOR_WITH_MALI_SUPPORT),MALI_MIDGARD)
+ CONFIG_GATOR_MALI_4XXMP := n
+ CONFIG_GATOR_MALI_MIDGARD := y
+ else
+ CONFIG_GATOR_MALI_4XXMP := y
+ CONFIG_GATOR_MALI_MIDGARD := n
+ endif
+ EXTRA_CFLAGS += -DMALI_SUPPORT=$(GATOR_WITH_MALI_SUPPORT)
+ ifneq ($(GATOR_MALI_INTERFACE_STYLE),)
+ EXTRA_CFLAGS += -DGATOR_MALI_INTERFACE_STYLE=$(GATOR_MALI_INTERFACE_STYLE)
+ endif
+endif
+
+ifeq ($(CONFIG_GATOR_WITH_MALI_SUPPORT),y)
+ ifeq ($(CONFIG_GATOR_MALI_MIDGARD),y)
+ gator-y += gator_events_mali_midgard.o \
+ gator_events_mali_midgard_hw.o
+ include $(src)/mali_midgard.mk
+ else
+ gator-y += gator_events_mali_4xx.o
+ endif
+ gator-y += gator_events_mali_common.o
+
+ ifneq ($(CONFIG_GATOR_MALI_PATH),)
+ ccflags-y += -I$(CONFIG_GATOR_MALI_PATH)
+ endif
+ ccflags-$(CONFIG_GATOR_MALI_4XXMP) += -DMALI_SUPPORT=MALI_4xx
+ ccflags-$(CONFIG_GATOR_MALI_MIDGARD) += -DMALI_SUPPORT=MALI_MIDGARD
+endif
+
+# GATOR_TEST controls whether to include (=1) or exclude (=0) test code.
+GATOR_TEST ?= 0
+EXTRA_CFLAGS += -DGATOR_TEST=$(GATOR_TEST)
+
+# Should the original or new block_rq_complete API be used?
+OLD_BLOCK_RQ_COMPLETE := $(shell grep -A3 block_rq_complete $(srctree)/include/trace/events/block.h | grep nr_bytes -q; echo $$?)
+EXTRA_CFLAGS += -DOLD_BLOCK_RQ_COMPLETE=$(OLD_BLOCK_RQ_COMPLETE)
+
+gator-$(CONFIG_ARM) += gator_events_armv6.o \
+ gator_events_armv7.o \
+ gator_events_l2c-310.o \
+ gator_events_scorpion.o
+
+gator-$(CONFIG_ARM64) +=
+
+else
+
+all:
+ @echo
+ @echo "usage:"
+ @echo " make -C <kernel_build_dir> M=\`pwd\` ARCH=arm CROSS_COMPILE=<...> modules"
+ @echo
+ $(error)
+
+clean:
+ rm -f *.o .*.cmd modules.order Module.symvers gator.ko gator.mod.c
+ rm -rf .tmp_versions
+
+endif
diff --git a/drivers/parrot/gator/gator.h b/drivers/parrot/gator/gator.h
new file mode 100644
index 00000000000000..5cc73a388c4f71
--- /dev/null
+++ b/drivers/parrot/gator/gator.h
@@ -0,0 +1,152 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GATOR_H_
+#define GATOR_H_
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+
+#define GATOR_PERF_SUPPORT (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+#define GATOR_PERF_PMU_SUPPORT (GATOR_PERF_SUPPORT && defined(CONFIG_PERF_EVENTS) && (!(defined(__arm__) || defined(__aarch64__)) || defined(CONFIG_HW_PERF_EVENTS)))
+#define GATOR_NO_PERF_SUPPORT (!(GATOR_PERF_SUPPORT))
+#define GATOR_CPU_FREQ_SUPPORT ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)) && defined(CONFIG_CPU_FREQ))
+#define GATOR_IKS_SUPPORT defined(CONFIG_BL_SWITCHER)
+
+/* cpu ids */
+#define ARM1136 0xb36
+#define ARM1156 0xb56
+#define ARM1176 0xb76
+#define ARM11MPCORE 0xb02
+#define CORTEX_A5 0xc05
+#define CORTEX_A7 0xc07
+#define CORTEX_A8 0xc08
+#define CORTEX_A9 0xc09
+#define CORTEX_A15 0xc0f
+#define CORTEX_A17 0xc0e
+#define SCORPION 0x00f
+#define SCORPIONMP 0x02d
+#define KRAITSIM 0x049
+#define KRAIT 0x04d
+#define KRAIT_S4_PRO 0x06f
+#define CORTEX_A53 0xd03
+#define CORTEX_A57 0xd07
+#define AARCH64 0xd0f
+#define OTHER 0xfff
+
+/* gpu enums */
+#define MALI_4xx 1
+#define MALI_MIDGARD 2
+
+#define MAXSIZE_CORE_NAME 32
+
+struct gator_cpu {
+ const int cpuid;
+ /* Human readable name */
+ const char core_name[MAXSIZE_CORE_NAME];
+ /* gatorfs event and Perf PMU name */
+ const char *const pmnc_name;
+ /* compatible from Documentation/devicetree/bindings/arm/cpus.txt */
+ const char *const dt_name;
+ const int pmnc_counters;
+};
+
+const struct gator_cpu *gator_find_cpu_by_cpuid(const u32 cpuid);
+const struct gator_cpu *gator_find_cpu_by_pmu_name(const char *const name);
+
+/******************************************************************************
+ * Filesystem
+ ******************************************************************************/
+struct dentry *gatorfs_mkdir(struct super_block *sb, struct dentry *root,
+ char const *name);
+
+int gatorfs_create_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val);
+
+int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val);
+
+/******************************************************************************
+ * Tracepoints
+ ******************************************************************************/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
+# error Kernels prior to 2.6.32 not supported
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+# define GATOR_DEFINE_PROBE(probe_name, proto) \
+ static void probe_##probe_name(PARAMS(proto))
+# define GATOR_REGISTER_TRACE(probe_name) \
+ register_trace_##probe_name(probe_##probe_name)
+# define GATOR_UNREGISTER_TRACE(probe_name) \
+ unregister_trace_##probe_name(probe_##probe_name)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
+# define GATOR_DEFINE_PROBE(probe_name, proto) \
+ static void probe_##probe_name(void *data, PARAMS(proto))
+# define GATOR_REGISTER_TRACE(probe_name) \
+ register_trace_##probe_name(probe_##probe_name, NULL)
+# define GATOR_UNREGISTER_TRACE(probe_name) \
+ unregister_trace_##probe_name(probe_##probe_name, NULL)
+#else
+# define GATOR_DEFINE_PROBE(probe_name, proto) \
+ extern struct tracepoint *gator_tracepoint_##probe_name; \
+ static void probe_##probe_name(void *data, PARAMS(proto))
+# define GATOR_REGISTER_TRACE(probe_name) \
+ ((gator_tracepoint_##probe_name == NULL) || tracepoint_probe_register(gator_tracepoint_##probe_name, probe_##probe_name, NULL))
+# define GATOR_UNREGISTER_TRACE(probe_name) \
+ tracepoint_probe_unregister(gator_tracepoint_##probe_name, probe_##probe_name, NULL)
+#endif
+
+/******************************************************************************
+ * Events
+ ******************************************************************************/
+struct gator_interface {
+ /* Complementary function to init */
+ void (*shutdown)(void);
+ int (*create_files)(struct super_block *sb, struct dentry *root);
+ int (*start)(void);
+ /* Complementary function to start */
+ void (*stop)(void);
+ int (*online)(int **buffer, bool migrate);
+ int (*offline)(int **buffer, bool migrate);
+ /* called in process context but may not be running on core 'cpu' */
+ void (*online_dispatch)(int cpu, bool migrate);
+ /* called in process context but may not be running on core 'cpu' */
+ void (*offline_dispatch)(int cpu, bool migrate);
+ int (*read)(int **buffer, bool sched_switch);
+ int (*read64)(long long **buffer);
+ int (*read_proc)(long long **buffer, struct task_struct *);
+ struct list_head list;
+};
+
+int gator_events_install(struct gator_interface *interface);
+int gator_events_get_key(void);
+u32 gator_cpuid(void);
+
+void gator_backtrace_handler(struct pt_regs *const regs);
+
+void gator_marshal_activity_switch(int core, int key, int activity, int pid);
+
+#if !GATOR_IKS_SUPPORT
+
+#define get_physical_cpu() smp_processor_id()
+#define lcpu_to_pcpu(lcpu) lcpu
+#define pcpu_to_lcpu(pcpu) pcpu
+
+#else
+
+#define get_physical_cpu() lcpu_to_pcpu(get_logical_cpu())
+int lcpu_to_pcpu(const int lcpu);
+int pcpu_to_lcpu(const int pcpu);
+
+#endif
+
+#define get_logical_cpu() smp_processor_id()
+#define on_primary_core() (get_logical_cpu() == 0)
+
+#endif /* GATOR_H_ */
diff --git a/drivers/parrot/gator/gator_annotate.c b/drivers/parrot/gator/gator_annotate.c
new file mode 100644
index 00000000000000..ff9a3cef7b2e3b
--- /dev/null
+++ b/drivers/parrot/gator/gator_annotate.c
@@ -0,0 +1,189 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/current.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(annotate_lock);
+static bool collect_annotations;
+
+static int annotate_copy(struct file *file, char const __user *buf, size_t count)
+{
+ int cpu = 0;
+ int write = per_cpu(gator_buffer_write, cpu)[ANNOTATE_BUF];
+
+ if (file == NULL) {
+ /* copy from kernel */
+ memcpy(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count);
+ } else {
+ /* copy from user space */
+ if (copy_from_user(&per_cpu(gator_buffer, cpu)[ANNOTATE_BUF][write], buf, count) != 0)
+ return -1;
+ }
+ per_cpu(gator_buffer_write, cpu)[ANNOTATE_BUF] = (write + count) & gator_buffer_mask[ANNOTATE_BUF];
+
+ return 0;
+}
+
+static ssize_t annotate_write(struct file *file, char const __user *buf, size_t count_orig, loff_t *offset)
+{
+ int pid, cpu, header_size, available, contiguous, length1, length2, size, count = count_orig & 0x7fffffff;
+ bool interrupt_context;
+
+ if (*offset)
+ return -EINVAL;
+
+ interrupt_context = in_interrupt();
+ /* Annotations are not supported in interrupt context, but may work
+ * if you comment out the the next four lines of code. By doing so,
+ * annotations in interrupt context can result in deadlocks and lost
+ * data.
+ */
+ if (interrupt_context) {
+ pr_warning("gator: Annotations are not supported in interrupt context. Edit gator_annotate.c in the gator driver to enable annotations in interrupt context.\n");
+ return -EINVAL;
+ }
+
+ retry:
+ /* synchronize between cores and with collect_annotations */
+ spin_lock(&annotate_lock);
+
+ if (!collect_annotations) {
+ /* Not collecting annotations, tell the caller everything was written */
+ size = count_orig;
+ goto annotate_write_out;
+ }
+
+ /* Annotation only uses a single per-cpu buffer as the data must be in order to the engine */
+ cpu = 0;
+
+ if (current == NULL)
+ pid = 0;
+ else
+ pid = current->pid;
+
+ /* determine total size of the payload */
+ header_size = MAXSIZE_PACK32 * 3 + MAXSIZE_PACK64;
+ available = buffer_bytes_available(cpu, ANNOTATE_BUF) - header_size;
+ size = count < available ? count : available;
+
+ if (size <= 0) {
+ /* Buffer is full, wait until space is available */
+ spin_unlock(&annotate_lock);
+
+ /* Drop the annotation as blocking is not allowed in interrupt context */
+ if (interrupt_context)
+ return -EINVAL;
+
+ wait_event_interruptible(gator_annotate_wait, buffer_bytes_available(cpu, ANNOTATE_BUF) > header_size || !collect_annotations);
+
+ /* Check to see if a signal is pending */
+ if (signal_pending(current))
+ return -EINTR;
+
+ goto retry;
+ }
+
+ /* synchronize shared variables annotateBuf and annotatePos */
+ if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF]) {
+ u64 time = gator_get_time();
+
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
+ gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, time);
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, size);
+
+ /* determine the sizes to capture, length1 + length2 will equal size */
+ contiguous = contiguous_space_available(cpu, ANNOTATE_BUF);
+ if (size < contiguous) {
+ length1 = size;
+ length2 = 0;
+ } else {
+ length1 = contiguous;
+ length2 = size - contiguous;
+ }
+
+ if (annotate_copy(file, buf, length1) != 0) {
+ size = -EINVAL;
+ goto annotate_write_out;
+ }
+
+ if (length2 > 0 && annotate_copy(file, &buf[length1], length2) != 0) {
+ size = -EINVAL;
+ goto annotate_write_out;
+ }
+
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, ANNOTATE_BUF, time);
+ }
+
+annotate_write_out:
+ spin_unlock(&annotate_lock);
+
+ /* return the number of bytes written */
+ return size;
+}
+
+#include "gator_annotate_kernel.c"
+
+static int annotate_release(struct inode *inode, struct file *file)
+{
+ int cpu = 0;
+
+ /* synchronize between cores */
+ spin_lock(&annotate_lock);
+
+ if (per_cpu(gator_buffer, cpu)[ANNOTATE_BUF] && buffer_check_space(cpu, ANNOTATE_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
+ uint32_t pid = current->pid;
+
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, get_physical_cpu());
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, pid);
+ /* time */
+ gator_buffer_write_packed_int64(cpu, ANNOTATE_BUF, 0);
+ /* size */
+ gator_buffer_write_packed_int(cpu, ANNOTATE_BUF, 0);
+ }
+
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, ANNOTATE_BUF, gator_get_time());
+
+ spin_unlock(&annotate_lock);
+
+ return 0;
+}
+
+static const struct file_operations annotate_fops = {
+ .write = annotate_write,
+ .release = annotate_release
+};
+
+static int gator_annotate_create_files(struct super_block *sb, struct dentry *root)
+{
+ return gatorfs_create_file_perm(sb, root, "annotate", &annotate_fops, 0666);
+}
+
+static int gator_annotate_start(void)
+{
+ collect_annotations = true;
+ return 0;
+}
+
+static void gator_annotate_stop(void)
+{
+ /* the spinlock here will ensure that when this function exits, we are not in the middle of an annotation */
+ spin_lock(&annotate_lock);
+ collect_annotations = false;
+ wake_up(&gator_annotate_wait);
+ spin_unlock(&annotate_lock);
+}
diff --git a/drivers/parrot/gator/gator_annotate_kernel.c b/drivers/parrot/gator/gator_annotate_kernel.c
new file mode 100644
index 00000000000000..69471f99e5fb37
--- /dev/null
+++ b/drivers/parrot/gator/gator_annotate_kernel.c
@@ -0,0 +1,200 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define ESCAPE_CODE 0x1c
+#define STRING_ANNOTATION 0x06
+#define NAME_CHANNEL_ANNOTATION 0x07
+#define NAME_GROUP_ANNOTATION 0x08
+#define VISUAL_ANNOTATION 0x04
+#define MARKER_ANNOTATION 0x05
+
+static void kannotate_write(const char *ptr, unsigned int size)
+{
+ int retval;
+ int pos = 0;
+ loff_t offset = 0;
+
+ while (pos < size) {
+ retval = annotate_write(NULL, &ptr[pos], size - pos, &offset);
+ if (retval < 0) {
+ pr_warning("gator: kannotate_write failed with return value %d\n", retval);
+ return;
+ }
+ pos += retval;
+ }
+}
+
+static void marshal_u16(char *buf, u16 val)
+{
+ buf[0] = val & 0xff;
+ buf[1] = (val >> 8) & 0xff;
+}
+
+static void marshal_u32(char *buf, u32 val)
+{
+ buf[0] = val & 0xff;
+ buf[1] = (val >> 8) & 0xff;
+ buf[2] = (val >> 16) & 0xff;
+ buf[3] = (val >> 24) & 0xff;
+}
+
+void gator_annotate_channel(int channel, const char *str)
+{
+ const u16 str_size = strlen(str) & 0xffff;
+ char header[8];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = STRING_ANNOTATION;
+ marshal_u32(header + 2, channel);
+ marshal_u16(header + 6, str_size);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+EXPORT_SYMBOL(gator_annotate_channel);
+
+void gator_annotate(const char *str)
+{
+ gator_annotate_channel(0, str);
+}
+EXPORT_SYMBOL(gator_annotate);
+
+void gator_annotate_channel_color(int channel, int color, const char *str)
+{
+ const u16 str_size = (strlen(str) + 4) & 0xffff;
+ char header[12];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = STRING_ANNOTATION;
+ marshal_u32(header + 2, channel);
+ marshal_u16(header + 6, str_size);
+ marshal_u32(header + 8, color);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size - 4);
+}
+EXPORT_SYMBOL(gator_annotate_channel_color);
+
+void gator_annotate_color(int color, const char *str)
+{
+ gator_annotate_channel_color(0, color, str);
+}
+EXPORT_SYMBOL(gator_annotate_color);
+
+void gator_annotate_channel_end(int channel)
+{
+ char header[8];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = STRING_ANNOTATION;
+ marshal_u32(header + 2, channel);
+ marshal_u16(header + 6, 0);
+ kannotate_write(header, sizeof(header));
+}
+EXPORT_SYMBOL(gator_annotate_channel_end);
+
+void gator_annotate_end(void)
+{
+ gator_annotate_channel_end(0);
+}
+EXPORT_SYMBOL(gator_annotate_end);
+
+void gator_annotate_name_channel(int channel, int group, const char *str)
+{
+ const u16 str_size = strlen(str) & 0xffff;
+ char header[12];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = NAME_CHANNEL_ANNOTATION;
+ marshal_u32(header + 2, channel);
+ marshal_u32(header + 6, group);
+ marshal_u16(header + 10, str_size);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+EXPORT_SYMBOL(gator_annotate_name_channel);
+
+void gator_annotate_name_group(int group, const char *str)
+{
+ const u16 str_size = strlen(str) & 0xffff;
+ char header[8];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = NAME_GROUP_ANNOTATION;
+ marshal_u32(header + 2, group);
+ marshal_u16(header + 6, str_size);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+EXPORT_SYMBOL(gator_annotate_name_group);
+
+void gator_annotate_visual(const char *data, unsigned int length, const char *str)
+{
+ const u16 str_size = strlen(str) & 0xffff;
+ char header[4];
+ char header_length[4];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = VISUAL_ANNOTATION;
+ marshal_u16(header + 2, str_size);
+ marshal_u32(header_length, length);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size);
+ kannotate_write(header_length, sizeof(header_length));
+ kannotate_write(data, length);
+}
+EXPORT_SYMBOL(gator_annotate_visual);
+
+void gator_annotate_marker(void)
+{
+ char header[4];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = MARKER_ANNOTATION;
+ marshal_u16(header + 2, 0);
+ kannotate_write(header, sizeof(header));
+}
+EXPORT_SYMBOL(gator_annotate_marker);
+
+void gator_annotate_marker_str(const char *str)
+{
+ const u16 str_size = strlen(str) & 0xffff;
+ char header[4];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = MARKER_ANNOTATION;
+ marshal_u16(header + 2, str_size);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size);
+}
+EXPORT_SYMBOL(gator_annotate_marker_str);
+
+void gator_annotate_marker_color(int color)
+{
+ char header[8];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = MARKER_ANNOTATION;
+ marshal_u16(header + 2, 4);
+ marshal_u32(header + 4, color);
+ kannotate_write(header, sizeof(header));
+}
+EXPORT_SYMBOL(gator_annotate_marker_color);
+
+void gator_annotate_marker_color_str(int color, const char *str)
+{
+ const u16 str_size = (strlen(str) + 4) & 0xffff;
+ char header[8];
+
+ header[0] = ESCAPE_CODE;
+ header[1] = MARKER_ANNOTATION;
+ marshal_u16(header + 2, str_size);
+ marshal_u32(header + 4, color);
+ kannotate_write(header, sizeof(header));
+ kannotate_write(str, str_size - 4);
+}
+EXPORT_SYMBOL(gator_annotate_marker_color_str);
diff --git a/drivers/parrot/gator/gator_backtrace.c b/drivers/parrot/gator/gator_backtrace.c
new file mode 100644
index 00000000000000..76c941d009a9af
--- /dev/null
+++ b/drivers/parrot/gator/gator_backtrace.c
@@ -0,0 +1,208 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * EABI backtrace stores {fp,lr} on the stack.
+ */
+struct stack_frame_eabi {
+ union {
+ struct {
+ unsigned long fp;
+ /* May be the fp in the case of a leaf function or clang */
+ unsigned long lr;
+ /* If lr is really the fp, lr2 is the corresponding lr */
+ unsigned long lr2;
+ };
+ /* Used to read 32 bit fp/lr from a 64 bit kernel */
+ struct {
+ u32 fp_32;
+ /* same as lr above */
+ u32 lr_32;
+ /* same as lr2 above */
+ u32 lr2_32;
+ };
+ };
+};
+
+static void gator_add_trace(int cpu, unsigned long address)
+{
+ off_t offset = 0;
+ unsigned long cookie = get_address_cookie(cpu, current, address & ~1, &offset);
+
+ if (cookie == NO_COOKIE || cookie == UNRESOLVED_COOKIE)
+ offset = address;
+
+ marshal_backtrace(offset & ~1, cookie, 0);
+}
+
+static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int depth)
+{
+#if defined(__arm__) || defined(__aarch64__)
+ struct stack_frame_eabi *curr;
+ struct stack_frame_eabi bufcurr;
+#if defined(__arm__)
+ const bool is_compat = false;
+ unsigned long fp = regs->ARM_fp;
+ unsigned long sp = regs->ARM_sp;
+ unsigned long lr = regs->ARM_lr;
+ const int gcc_frame_offset = sizeof(unsigned long);
+#else
+ /* Is userspace aarch32 (32 bit) */
+ const bool is_compat = compat_user_mode(regs);
+ unsigned long fp = (is_compat ? regs->regs[11] : regs->regs[29]);
+ unsigned long sp = (is_compat ? regs->compat_sp : regs->sp);
+ unsigned long lr = (is_compat ? regs->compat_lr : regs->regs[30]);
+ const int gcc_frame_offset = (is_compat ? sizeof(u32) : 0);
+#endif
+ /* clang frame offset is always zero */
+ int is_user_mode = user_mode(regs);
+
+ /* pc (current function) has already been added */
+
+ if (!is_user_mode)
+ return;
+
+ /* Add the lr (parent function), entry preamble may not have
+ * executed
+ */
+ gator_add_trace(cpu, lr);
+
+ /* check fp is valid */
+ if (fp == 0 || fp < sp)
+ return;
+
+ /* Get the current stack frame */
+ curr = (struct stack_frame_eabi *)(fp - gcc_frame_offset);
+ if ((unsigned long)curr & 3)
+ return;
+
+ while (depth-- && curr) {
+ if (!access_ok(VERIFY_READ, curr, sizeof(struct stack_frame_eabi)) ||
+ __copy_from_user_inatomic(&bufcurr, curr, sizeof(struct stack_frame_eabi))) {
+ return;
+ }
+
+ fp = (is_compat ? bufcurr.fp_32 : bufcurr.fp);
+ lr = (is_compat ? bufcurr.lr_32 : bufcurr.lr);
+
+#define calc_next(reg) ((reg) - gcc_frame_offset)
+ /* Returns true if reg is a valid fp */
+#define validate_next(reg, curr) \
+ ((reg) != 0 && (calc_next(reg) & 3) == 0 && (unsigned long)(curr) < calc_next(reg))
+
+ /* Try lr from the stack as the fp because gcc leaf functions do
+ * not push lr. If gcc_frame_offset is non-zero, the lr will also
+ * be the clang fp. This assumes code is at a lower address than
+ * the stack
+ */
+ if (validate_next(lr, curr)) {
+ fp = lr;
+ lr = (is_compat ? bufcurr.lr2_32 : bufcurr.lr2);
+ }
+
+ gator_add_trace(cpu, lr);
+
+ if (!validate_next(fp, curr))
+ return;
+
+ /* Move to the next stack frame */
+ curr = (struct stack_frame_eabi *)calc_next(fp);
+ }
+#endif
+}
+
+#if defined(__arm__) || defined(__aarch64__)
+static int report_trace(struct stackframe *frame, void *d)
+{
+ unsigned int *depth = d, cookie = NO_COOKIE;
+ unsigned long addr = frame->pc;
+
+ if (*depth) {
+#if defined(MODULE)
+ unsigned int cpu = get_physical_cpu();
+ struct module *mod = __module_address(addr);
+
+ if (mod) {
+ cookie = get_cookie(cpu, current, mod->name, false);
+ addr = addr - (unsigned long)mod->module_core;
+ }
+#endif
+ marshal_backtrace(addr & ~1, cookie, 1);
+ (*depth)--;
+ }
+
+ return *depth == 0;
+}
+#endif
+
+/* Uncomment the following line to enable kernel stack unwinding within gator, note it can also be defined from the Makefile */
+/* #define GATOR_KERNEL_STACK_UNWINDING */
+
+#if (defined(__arm__) || defined(__aarch64__)) && !defined(GATOR_KERNEL_STACK_UNWINDING)
+/* Disabled by default */
+MODULE_PARM_DESC(kernel_stack_unwinding, "Allow kernel stack unwinding.");
+static bool kernel_stack_unwinding;
+module_param(kernel_stack_unwinding, bool, 0644);
+#endif
+
+static void kernel_backtrace(int cpu, struct pt_regs *const regs)
+{
+#if defined(__arm__) || defined(__aarch64__)
+#ifdef GATOR_KERNEL_STACK_UNWINDING
+ int depth = gator_backtrace_depth;
+#else
+ int depth = (kernel_stack_unwinding ? gator_backtrace_depth : 1);
+#endif
+ struct stackframe frame;
+
+ if (depth == 0)
+ depth = 1;
+#if defined(__arm__)
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+#else
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+#endif
+ walk_stackframe(&frame, report_trace, &depth);
+#else
+ marshal_backtrace(PC_REG & ~1, NO_COOKIE, 1);
+#endif
+}
+
+static void gator_add_sample(int cpu, struct pt_regs *const regs, u64 time)
+{
+ bool in_kernel;
+ unsigned long exec_cookie;
+
+ if (!regs)
+ return;
+
+ in_kernel = !user_mode(regs);
+ exec_cookie = get_exec_cookie(cpu, current);
+
+ if (!marshal_backtrace_header(exec_cookie, current->tgid, current->pid, time))
+ return;
+
+ if (in_kernel) {
+ kernel_backtrace(cpu, regs);
+ } else {
+ /* Cookie+PC */
+ gator_add_trace(cpu, PC_REG);
+
+ /* Backtrace */
+ if (gator_backtrace_depth)
+ arm_backtrace_eabi(cpu, regs, gator_backtrace_depth);
+ }
+
+ marshal_backtrace_footer(time);
+}
diff --git a/drivers/parrot/gator/gator_buffer.c b/drivers/parrot/gator/gator_buffer.c
new file mode 100644
index 00000000000000..910d5aa1506697
--- /dev/null
+++ b/drivers/parrot/gator/gator_buffer.c
@@ -0,0 +1,171 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+static void marshal_frame(int cpu, int buftype)
+{
+ int frame;
+ bool write_cpu;
+
+ if (!per_cpu(gator_buffer, cpu)[buftype])
+ return;
+
+ switch (buftype) {
+ case SUMMARY_BUF:
+ write_cpu = false;
+ frame = FRAME_SUMMARY;
+ break;
+ case BACKTRACE_BUF:
+ write_cpu = true;
+ frame = FRAME_BACKTRACE;
+ break;
+ case NAME_BUF:
+ write_cpu = true;
+ frame = FRAME_NAME;
+ break;
+ case COUNTER_BUF:
+ write_cpu = false;
+ frame = FRAME_COUNTER;
+ break;
+ case BLOCK_COUNTER_BUF:
+ write_cpu = true;
+ frame = FRAME_BLOCK_COUNTER;
+ break;
+ case ANNOTATE_BUF:
+ write_cpu = false;
+ frame = FRAME_ANNOTATE;
+ break;
+ case SCHED_TRACE_BUF:
+ write_cpu = true;
+ frame = FRAME_SCHED_TRACE;
+ break;
+ case IDLE_BUF:
+ write_cpu = false;
+ frame = FRAME_IDLE;
+ break;
+ case ACTIVITY_BUF:
+ write_cpu = false;
+ frame = FRAME_ACTIVITY;
+ break;
+ default:
+ write_cpu = false;
+ frame = -1;
+ break;
+ }
+
+ /* add response type */
+ if (gator_response_type > 0)
+ gator_buffer_write_packed_int(cpu, buftype, gator_response_type);
+
+ /* leave space for 4-byte unpacked length */
+ per_cpu(gator_buffer_write, cpu)[buftype] = (per_cpu(gator_buffer_write, cpu)[buftype] + sizeof(s32)) & gator_buffer_mask[buftype];
+
+ /* add frame type and core number */
+ gator_buffer_write_packed_int(cpu, buftype, frame);
+ if (write_cpu)
+ gator_buffer_write_packed_int(cpu, buftype, cpu);
+}
+
+static int buffer_bytes_available(int cpu, int buftype)
+{
+ int remaining, filled;
+
+ filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_read, cpu)[buftype];
+ if (filled < 0)
+ filled += gator_buffer_size[buftype];
+
+ remaining = gator_buffer_size[buftype] - filled;
+
+ if (per_cpu(buffer_space_available, cpu)[buftype])
+ /* Give some extra room; also allows space to insert the overflow error packet */
+ remaining -= 200;
+ else
+ /* Hysteresis, prevents multiple overflow messages */
+ remaining -= 2000;
+
+ return remaining;
+}
+
+static bool buffer_check_space(int cpu, int buftype, int bytes)
+{
+ int remaining = buffer_bytes_available(cpu, buftype);
+
+ if (remaining < bytes)
+ per_cpu(buffer_space_available, cpu)[buftype] = false;
+ else
+ per_cpu(buffer_space_available, cpu)[buftype] = true;
+
+ return per_cpu(buffer_space_available, cpu)[buftype];
+}
+
+static int contiguous_space_available(int cpu, int buftype)
+{
+ int remaining = buffer_bytes_available(cpu, buftype);
+ int contiguous = gator_buffer_size[buftype] - per_cpu(gator_buffer_write, cpu)[buftype];
+
+ if (remaining < contiguous)
+ return remaining;
+ return contiguous;
+}
+
+static void gator_commit_buffer(int cpu, int buftype, u64 time)
+{
+ int type_length, commit, length, byte;
+ unsigned long flags;
+
+ if (!per_cpu(gator_buffer, cpu)[buftype])
+ return;
+
+ /* post-populate the length, which does not include the response type length nor the length itself, i.e. only the length of the payload */
+ local_irq_save(flags);
+ type_length = gator_response_type ? 1 : 0;
+ commit = per_cpu(gator_buffer_commit, cpu)[buftype];
+ length = per_cpu(gator_buffer_write, cpu)[buftype] - commit;
+ if (length < 0)
+ length += gator_buffer_size[buftype];
+ length = length - type_length - sizeof(s32);
+
+ if (length <= FRAME_HEADER_SIZE) {
+ /* Nothing to write, only the frame header is present */
+ local_irq_restore(flags);
+ return;
+ }
+
+ for (byte = 0; byte < sizeof(s32); byte++)
+ per_cpu(gator_buffer, cpu)[buftype][(commit + type_length + byte) & gator_buffer_mask[buftype]] = (length >> byte * 8) & 0xFF;
+
+ per_cpu(gator_buffer_commit, cpu)[buftype] = per_cpu(gator_buffer_write, cpu)[buftype];
+
+ if (gator_live_rate > 0) {
+ while (time > per_cpu(gator_buffer_commit_time, cpu))
+ per_cpu(gator_buffer_commit_time, cpu) += gator_live_rate;
+ }
+
+ marshal_frame(cpu, buftype);
+ local_irq_restore(flags);
+
+ /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
+ if (per_cpu(in_scheduler_context, cpu)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /* mod_timer can not be used in interrupt context in RT-Preempt full */
+ mod_timer(&gator_buffer_wake_up_timer, jiffies + 1);
+#endif
+ } else {
+ up(&gator_buffer_wake_sem);
+ }
+}
+
+static void buffer_check(int cpu, int buftype, u64 time)
+{
+ int filled = per_cpu(gator_buffer_write, cpu)[buftype] - per_cpu(gator_buffer_commit, cpu)[buftype];
+
+ if (filled < 0)
+ filled += gator_buffer_size[buftype];
+ if (filled >= ((gator_buffer_size[buftype] * 3) / 4))
+ gator_commit_buffer(cpu, buftype, time);
+}
diff --git a/drivers/parrot/gator/gator_buffer_write.c b/drivers/parrot/gator/gator_buffer_write.c
new file mode 100644
index 00000000000000..654ec606cfad66
--- /dev/null
+++ b/drivers/parrot/gator/gator_buffer_write.c
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+static void gator_buffer_write_packed_int(int cpu, int buftype, int x)
+{
+ uint32_t write = per_cpu(gator_buffer_write, cpu)[buftype];
+ uint32_t mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+ int packedBytes = 0;
+ int more = true;
+
+ while (more) {
+ /* low order 7 bits of x */
+ char b = x & 0x7f;
+
+ x >>= 7;
+
+ if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0))
+ more = false;
+ else
+ b |= 0x80;
+
+ buffer[(write + packedBytes) & mask] = b;
+ packedBytes++;
+ }
+
+ per_cpu(gator_buffer_write, cpu)[buftype] = (write + packedBytes) & mask;
+}
+
+static void gator_buffer_write_packed_int64(int cpu, int buftype, long long x)
+{
+ uint32_t write = per_cpu(gator_buffer_write, cpu)[buftype];
+ uint32_t mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+ int packedBytes = 0;
+ int more = true;
+
+ while (more) {
+ /* low order 7 bits of x */
+ char b = x & 0x7f;
+
+ x >>= 7;
+
+ if ((x == 0 && (b & 0x40) == 0) || (x == -1 && (b & 0x40) != 0))
+ more = false;
+ else
+ b |= 0x80;
+
+ buffer[(write + packedBytes) & mask] = b;
+ packedBytes++;
+ }
+
+ per_cpu(gator_buffer_write, cpu)[buftype] = (write + packedBytes) & mask;
+}
+
+static void gator_buffer_write_bytes(int cpu, int buftype, const char *x, int len)
+{
+ int i;
+ u32 write = per_cpu(gator_buffer_write, cpu)[buftype];
+ u32 mask = gator_buffer_mask[buftype];
+ char *buffer = per_cpu(gator_buffer, cpu)[buftype];
+
+ for (i = 0; i < len; i++) {
+ buffer[write] = x[i];
+ write = (write + 1) & mask;
+ }
+
+ per_cpu(gator_buffer_write, cpu)[buftype] = write;
+}
+
+static void gator_buffer_write_string(int cpu, int buftype, const char *x)
+{
+ int len = strlen(x);
+
+ gator_buffer_write_packed_int(cpu, buftype, len);
+ gator_buffer_write_bytes(cpu, buftype, x, len);
+}
diff --git a/drivers/parrot/gator/gator_cookies.c b/drivers/parrot/gator/gator_cookies.c
new file mode 100644
index 00000000000000..c43cce81522649
--- /dev/null
+++ b/drivers/parrot/gator/gator_cookies.c
@@ -0,0 +1,446 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/* must be power of 2 */
+#define COOKIEMAP_ENTRIES 1024
+/* must be a power of 2 - 512/4 = 128 entries */
+#define TRANSLATE_BUFFER_SIZE 512
+#define TRANSLATE_TEXT_SIZE 256
+#define MAX_COLLISIONS 2
+
+static uint32_t *gator_crc32_table;
+static unsigned int translate_buffer_mask;
+
+struct cookie_args {
+ struct task_struct *task;
+ const char *text;
+};
+
+static DEFINE_PER_CPU(char *, translate_text);
+static DEFINE_PER_CPU(uint32_t, cookie_next_key);
+static DEFINE_PER_CPU(uint64_t *, cookie_keys);
+static DEFINE_PER_CPU(uint32_t *, cookie_values);
+static DEFINE_PER_CPU(int, translate_buffer_read);
+static DEFINE_PER_CPU(int, translate_buffer_write);
+static DEFINE_PER_CPU(struct cookie_args *, translate_buffer);
+
+static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq);
+static void wq_cookie_handler(struct work_struct *unused);
+static DECLARE_WORK(cookie_work, wq_cookie_handler);
+static struct timer_list app_process_wake_up_timer;
+static void app_process_wake_up_handler(unsigned long unused_data);
+
+static uint32_t cookiemap_code(uint64_t value64)
+{
+ uint32_t value = (uint32_t)((value64 >> 32) + value64);
+ uint32_t cookiecode = (value >> 24) & 0xff;
+
+ cookiecode = cookiecode * 31 + ((value >> 16) & 0xff);
+ cookiecode = cookiecode * 31 + ((value >> 8) & 0xff);
+ cookiecode = cookiecode * 31 + ((value >> 0) & 0xff);
+ cookiecode &= (COOKIEMAP_ENTRIES - 1);
+ return cookiecode * MAX_COLLISIONS;
+}
+
+static uint32_t gator_chksum_crc32(const char *data)
+{
+ register unsigned long crc;
+ const unsigned char *block = data;
+ int i, length = strlen(data);
+
+ crc = 0xFFFFFFFF;
+ for (i = 0; i < length; i++)
+ crc = ((crc >> 8) & 0x00FFFFFF) ^ gator_crc32_table[(crc ^ *block++) & 0xFF];
+
+ return (crc ^ 0xFFFFFFFF);
+}
+
+/*
+ * Exists
+ * Pre: [0][1][v][3]..[n-1]
+ * Post: [v][0][1][3]..[n-1]
+ */
+static uint32_t cookiemap_exists(uint64_t key)
+{
+ unsigned long x, flags, retval = 0;
+ int cpu = get_physical_cpu();
+ uint32_t cookiecode = cookiemap_code(key);
+ uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]);
+ uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]);
+
+ /* Can be called from interrupt handler or from work queue */
+ local_irq_save(flags);
+ for (x = 0; x < MAX_COLLISIONS; x++) {
+ if (keys[x] == key) {
+ uint32_t value = values[x];
+
+ for (; x > 0; x--) {
+ keys[x] = keys[x - 1];
+ values[x] = values[x - 1];
+ }
+ keys[0] = key;
+ values[0] = value;
+ retval = value;
+ break;
+ }
+ }
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+/*
+ * Add
+ * Pre: [0][1][2][3]..[n-1]
+ * Post: [v][0][1][2]..[n-2]
+ */
+static void cookiemap_add(uint64_t key, uint32_t value)
+{
+ int cpu = get_physical_cpu();
+ int cookiecode = cookiemap_code(key);
+ uint64_t *keys = &(per_cpu(cookie_keys, cpu)[cookiecode]);
+ uint32_t *values = &(per_cpu(cookie_values, cpu)[cookiecode]);
+ int x;
+
+ for (x = MAX_COLLISIONS - 1; x > 0; x--) {
+ keys[x] = keys[x - 1];
+ values[x] = values[x - 1];
+ }
+ keys[0] = key;
+ values[0] = value;
+}
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+static void translate_buffer_write_args(int cpu, struct task_struct *task, const char *text)
+{
+ unsigned long flags;
+ int write;
+ int next_write;
+ struct cookie_args *args;
+
+ local_irq_save(flags);
+
+ write = per_cpu(translate_buffer_write, cpu);
+ next_write = (write + 1) & translate_buffer_mask;
+
+ /* At least one entry must always remain available as when read == write, the queue is empty not full */
+ if (next_write != per_cpu(translate_buffer_read, cpu)) {
+ args = &per_cpu(translate_buffer, cpu)[write];
+ args->task = task;
+ args->text = text;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+ get_task_struct(task);
+#endif
+ per_cpu(translate_buffer_write, cpu) = next_write;
+ }
+
+ local_irq_restore(flags);
+}
+#endif
+
+static void translate_buffer_read_args(int cpu, struct cookie_args *args)
+{
+ unsigned long flags;
+ int read;
+
+ local_irq_save(flags);
+
+ read = per_cpu(translate_buffer_read, cpu);
+ *args = per_cpu(translate_buffer, cpu)[read];
+ per_cpu(translate_buffer_read, cpu) = (read + 1) & translate_buffer_mask;
+
+ local_irq_restore(flags);
+}
+
+static void wq_cookie_handler(struct work_struct *unused)
+{
+ struct cookie_args args;
+ int cpu = get_physical_cpu(), cookie;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started != 0) {
+ while (per_cpu(translate_buffer_read, cpu) != per_cpu(translate_buffer_write, cpu)) {
+ translate_buffer_read_args(cpu, &args);
+ cookie = get_cookie(cpu, args.task, args.text, true);
+ marshal_link(cookie, args.task->tgid, args.task->pid);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+ put_task_struct(args.task);
+#endif
+ }
+ }
+
+ mutex_unlock(&start_mutex);
+}
+
+static void app_process_wake_up_handler(unsigned long unused_data)
+{
+ /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
+ schedule_work(&cookie_work);
+}
+
+/* Retrieve full name from proc/pid/cmdline for java processes on Android */
+static int translate_app_process(const char **text, int cpu, struct task_struct *task, bool from_wq)
+{
+ void *maddr;
+ unsigned int len;
+ unsigned long addr;
+ struct mm_struct *mm;
+ struct page *page = NULL;
+ struct vm_area_struct *page_vma;
+ int bytes, offset, retval = 0;
+ char *buf = per_cpu(translate_text, cpu);
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Push work into a work queue if in atomic context as the kernel
+ * functions below might sleep. Rely on the in_interrupt variable
+ * rather than in_irq() or in_interrupt() kernel functions, as the
+ * value of these functions seems inconsistent during a context
+ * switch between android/linux versions
+ */
+ if (!from_wq) {
+ /* Check if already in buffer */
+ int pos = per_cpu(translate_buffer_read, cpu);
+
+ while (pos != per_cpu(translate_buffer_write, cpu)) {
+ if (per_cpu(translate_buffer, cpu)[pos].task == task)
+ goto out;
+ pos = (pos + 1) & translate_buffer_mask;
+ }
+
+ translate_buffer_write_args(cpu, task, *text);
+
+ /* Not safe to call in RT-Preempt full in schedule switch context */
+ mod_timer(&app_process_wake_up_timer, jiffies + 1);
+ goto out;
+ }
+#endif
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto outmm;
+ addr = mm->arg_start;
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > TRANSLATE_TEXT_SIZE)
+ len = TRANSLATE_TEXT_SIZE;
+
+ down_read(&mm->mmap_sem);
+ while (len) {
+ if (get_user_pages(task, mm, addr, 1, 0, 1, &page, &page_vma) <= 0)
+ goto outsem;
+
+ maddr = kmap(page);
+ offset = addr & (PAGE_SIZE - 1);
+ bytes = len;
+ if (bytes > PAGE_SIZE - offset)
+ bytes = PAGE_SIZE - offset;
+
+ copy_from_user_page(page_vma, page, addr, buf, maddr + offset, bytes);
+
+ /* release page allocated by get_user_pages() */
+ kunmap(page);
+ page_cache_release(page);
+
+ len -= bytes;
+ buf += bytes;
+ addr += bytes;
+
+ *text = per_cpu(translate_text, cpu);
+ retval = 1;
+ }
+
+ /* On app_process startup, /proc/pid/cmdline is initially "zygote" then "<pre-initialized>" but changes after an initial startup period */
+ if (strcmp(*text, "zygote") == 0 || strcmp(*text, "<pre-initialized>") == 0)
+ retval = 0;
+
+outsem:
+ up_read(&mm->mmap_sem);
+outmm:
+ mmput(mm);
+out:
+ return retval;
+}
+
+static const char APP_PROCESS[] = "app_process";
+
+static uint32_t get_cookie(int cpu, struct task_struct *task, const char *text, bool from_wq)
+{
+ unsigned long flags, cookie;
+ uint64_t key;
+
+ key = gator_chksum_crc32(text);
+ key = (key << 32) | (uint32_t)task->tgid;
+
+ cookie = cookiemap_exists(key);
+ if (cookie)
+ return cookie;
+
+ /* On 64-bit android app_process can be app_process32 or app_process64 */
+ if (strncmp(text, APP_PROCESS, sizeof(APP_PROCESS) - 1) == 0) {
+ if (!translate_app_process(&text, cpu, task, from_wq))
+ return UNRESOLVED_COOKIE;
+ }
+
+ /* Can be called from interrupt handler or from work queue or from scheduler trace */
+ local_irq_save(flags);
+
+ cookie = UNRESOLVED_COOKIE;
+ if (marshal_cookie_header(text)) {
+ cookie = per_cpu(cookie_next_key, cpu) += nr_cpu_ids;
+ cookiemap_add(key, cookie);
+ marshal_cookie(cookie, text);
+ }
+
+ local_irq_restore(flags);
+
+ return cookie;
+}
+
+static int get_exec_cookie(int cpu, struct task_struct *task)
+{
+ struct mm_struct *mm = task->mm;
+ const char *text;
+
+ /* kernel threads have no address space */
+ if (!mm)
+ return NO_COOKIE;
+
+ if (task && task->mm && task->mm->exe_file) {
+ text = task->mm->exe_file->f_path.dentry->d_name.name;
+ return get_cookie(cpu, task, text, false);
+ }
+
+ return UNRESOLVED_COOKIE;
+}
+
+static unsigned long get_address_cookie(int cpu, struct task_struct *task, unsigned long addr, off_t *offset)
+{
+ unsigned long cookie = NO_COOKIE;
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+ const char *text;
+
+ if (!mm)
+ return cookie;
+
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ continue;
+
+ if (vma->vm_file) {
+ text = vma->vm_file->f_path.dentry->d_name.name;
+ cookie = get_cookie(cpu, task, text, false);
+ *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start;
+ } else {
+ /* must be an anonymous map */
+ *offset = addr;
+ }
+
+ break;
+ }
+
+ if (!vma)
+ cookie = UNRESOLVED_COOKIE;
+
+ return cookie;
+}
+
+static int cookies_initialize(void)
+{
+ uint32_t crc, poly;
+ int i, j, cpu, size, err = 0;
+
+ translate_buffer_mask = TRANSLATE_BUFFER_SIZE / sizeof(per_cpu(translate_buffer, 0)[0]) - 1;
+
+ for_each_present_cpu(cpu) {
+ per_cpu(cookie_next_key, cpu) = nr_cpu_ids + cpu;
+
+ size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint64_t);
+ per_cpu(cookie_keys, cpu) = kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(cookie_keys, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ memset(per_cpu(cookie_keys, cpu), 0, size);
+
+ size = COOKIEMAP_ENTRIES * MAX_COLLISIONS * sizeof(uint32_t);
+ per_cpu(cookie_values, cpu) = kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(cookie_values, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ memset(per_cpu(cookie_values, cpu), 0, size);
+
+ per_cpu(translate_buffer, cpu) = kmalloc(TRANSLATE_BUFFER_SIZE, GFP_KERNEL);
+ if (!per_cpu(translate_buffer, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+
+ per_cpu(translate_buffer_write, cpu) = 0;
+ per_cpu(translate_buffer_read, cpu) = 0;
+
+ per_cpu(translate_text, cpu) = kmalloc(TRANSLATE_TEXT_SIZE, GFP_KERNEL);
+ if (!per_cpu(translate_text, cpu)) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ }
+
+ /* build CRC32 table */
+ poly = 0x04c11db7;
+ gator_crc32_table = kmalloc(256 * sizeof(*gator_crc32_table), GFP_KERNEL);
+ if (!gator_crc32_table) {
+ err = -ENOMEM;
+ goto cookie_setup_error;
+ }
+ for (i = 0; i < 256; i++) {
+ crc = i;
+ for (j = 8; j > 0; j--) {
+ if (crc & 1)
+ crc = (crc >> 1) ^ poly;
+ else
+ crc >>= 1;
+ }
+ gator_crc32_table[i] = crc;
+ }
+
+ setup_timer(&app_process_wake_up_timer, app_process_wake_up_handler, 0);
+
+cookie_setup_error:
+ return err;
+}
+
+static void cookies_release(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ kfree(per_cpu(cookie_keys, cpu));
+ per_cpu(cookie_keys, cpu) = NULL;
+
+ kfree(per_cpu(cookie_values, cpu));
+ per_cpu(cookie_values, cpu) = NULL;
+
+ kfree(per_cpu(translate_buffer, cpu));
+ per_cpu(translate_buffer, cpu) = NULL;
+ per_cpu(translate_buffer_read, cpu) = 0;
+ per_cpu(translate_buffer_write, cpu) = 0;
+
+ kfree(per_cpu(translate_text, cpu));
+ per_cpu(translate_text, cpu) = NULL;
+ }
+
+ del_timer_sync(&app_process_wake_up_timer);
+ kfree(gator_crc32_table);
+ gator_crc32_table = NULL;
+}
diff --git a/drivers/parrot/gator/gator_events_armv6.c b/drivers/parrot/gator/gator_events_armv6.c
new file mode 100644
index 00000000000000..a157a0013302e8
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_armv6.c
@@ -0,0 +1,234 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+/* gator_events_perf_pmu.c is used if perf is supported */
+#if GATOR_NO_PERF_SUPPORT
+
+static const char *pmnc_name;
+
+/*
+ * Per-CPU PMCR
+ */
+#define PMCR_E (1 << 0) /* Enable */
+#define PMCR_P (1 << 1) /* Count reset */
+#define PMCR_C (1 << 2) /* Cycle counter reset */
+#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
+#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
+#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
+
+#define PMN0 0
+#define PMN1 1
+#define CCNT 2
+#define CNTMAX (CCNT+1)
+
+static int pmnc_counters;
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+static inline void armv6_pmnc_write(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0x0ffff77f;
+ asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
+}
+
+static inline u32 armv6_pmnc_read(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
+ return val;
+}
+
+static void armv6_pmnc_reset_counter(unsigned int cnt)
+{
+ u32 val = 0;
+
+ switch (cnt) {
+ case CCNT:
+ asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
+ break;
+ case PMN0:
+ asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
+ break;
+ case PMN1:
+ asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
+ break;
+ }
+}
+
+int gator_events_armv6_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ pmnc_counters = 3;
+
+ for (i = PMN0; i <= CCNT; i++) {
+ char buf[40];
+
+ if (i == CCNT)
+ snprintf(buf, sizeof(buf), "ARM_%s_ccnt", pmnc_name);
+ else
+ snprintf(buf, sizeof(buf), "ARM_%s_cnt%d", pmnc_name, i);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i != CCNT)
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+
+ return 0;
+}
+
+static int gator_events_armv6_online(int **buffer, bool migrate)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+ u32 pmnc;
+
+ if (armv6_pmnc_read() & PMCR_E)
+ armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
+
+ /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
+ armv6_pmnc_write(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
+ PMCR_C | PMCR_P);
+
+ /* configure control register */
+ for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ event = pmnc_event[cnt] & 255;
+
+ /* Set event (if destined for PMNx counters) */
+ if (cnt == PMN0)
+ pmnc |= event << 20;
+ else if (cnt == PMN1)
+ pmnc |= event << 12;
+
+ /* Reset counter */
+ armv6_pmnc_reset_counter(cnt);
+ }
+ armv6_pmnc_write(pmnc | PMCR_E);
+
+ /* return zero values, no need to read as the counters were just reset */
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_armv6_offline(int **buffer, bool migrate)
+{
+ unsigned int cnt;
+
+ armv6_pmnc_write(armv6_pmnc_read() & ~PMCR_E);
+ for (cnt = PMN0; cnt <= CCNT; cnt++)
+ armv6_pmnc_reset_counter(cnt);
+
+ return 0;
+}
+
+static void gator_events_armv6_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_armv6_read(int **buffer, bool sched_switch)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+ if (!(armv6_pmnc_read() & PMCR_E))
+ return 0;
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ u32 value = 0;
+
+ switch (cnt) {
+ case CCNT:
+ asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r" (value));
+ break;
+ case PMN0:
+ asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r" (value));
+ break;
+ case PMN1:
+ asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r" (value));
+ break;
+ }
+ armv6_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_armv6_interface = {
+ .create_files = gator_events_armv6_create_files,
+ .stop = gator_events_armv6_stop,
+ .online = gator_events_armv6_online,
+ .offline = gator_events_armv6_offline,
+ .read = gator_events_armv6_read,
+};
+
+int gator_events_armv6_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case ARM1136:
+ case ARM1156:
+ case ARM1176:
+ pmnc_name = "ARM11";
+ break;
+ case ARM11MPCORE:
+ pmnc_name = "ARM11MPCore";
+ break;
+ default:
+ return -1;
+ }
+
+ for (cnt = PMN0; cnt <= CCNT; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_armv6_interface);
+}
+
+#endif
diff --git a/drivers/parrot/gator/gator_events_armv7.c b/drivers/parrot/gator/gator_events_armv7.c
new file mode 100644
index 00000000000000..09c94220114ca0
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_armv7.c
@@ -0,0 +1,314 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Disabling interrupts
+ * Many of the functions below disable interrupts via local_irq_save(). This disabling of interrupts is done to prevent any race conditions
+ * between multiple entities (e.g. hrtimer interrupts and event based interrupts) calling the same functions. As accessing the pmu involves
+ * several steps (disable, select, read, enable), these steps must be performed atomically. Normal synchronization routines cannot be used
+ * as these functions are being called from interrupt context.
+ */
+
+#include "gator.h"
+
+/* gator_events_perf_pmu.c is used if perf is supported */
+#if GATOR_NO_PERF_SUPPORT
+
+/* Per-CPU PMNC: config reg */
+#define PMNC_E (1 << 0) /* Enable all counters */
+#define PMNC_P (1 << 1) /* Reset all counters */
+#define PMNC_C (1 << 2) /* Cycle counter reset */
+#define PMNC_MASK 0x3f /* Mask for writable bits */
+
+/* ccnt reg */
+#define CCNT_REG (1 << 31)
+
+#define CCNT 0
+#define CNT0 1
+#define CNTMAX (6+1)
+
+static const char *pmnc_name;
+static int pmnc_counters;
+
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+inline void armv7_pmnc_write(u32 val)
+{
+ val &= PMNC_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+inline u32 armv7_pmnc_read(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+inline u32 armv7_ccnt_read(u32 reset_value)
+{
+ unsigned long flags;
+ u32 newval = -reset_value;
+ u32 den = CCNT_REG;
+ u32 val;
+
+ local_irq_save(flags);
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den)); /* disable */
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); /* read */
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (newval)); /* new value */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den)); /* enable */
+ local_irq_restore(flags);
+
+ return val;
+}
+
+inline u32 armv7_cntn_read(unsigned int cnt, u32 reset_value)
+{
+ unsigned long flags;
+ u32 newval = -reset_value;
+ u32 sel = (cnt - CNT0);
+ u32 den = 1 << sel;
+ u32 oldval;
+
+ local_irq_save(flags);
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (den)); /* disable */
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (sel)); /* select */
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (oldval)); /* read */
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (newval)); /* new value */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (den)); /* enable */
+ local_irq_restore(flags);
+
+ return oldval;
+}
+
+static inline void armv7_pmnc_disable_interrupt(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : (1 << 31);
+
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+}
+
+inline u32 armv7_pmnc_reset_interrupt(void)
+{
+ /* Get and reset overflow status flags */
+ u32 flags;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (flags));
+ flags &= 0x8000003f;
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (flags));
+ return flags;
+}
+
+static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+ return cnt;
+}
+
+static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
+{
+ u32 val = cnt ? (1 << (cnt - CNT0)) : CCNT_REG;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+ return cnt;
+}
+
+static inline int armv7_pmnc_select_counter(unsigned int cnt)
+{
+ u32 val = (cnt - CNT0);
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+ return cnt;
+}
+
+static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
+{
+ if (armv7_pmnc_select_counter(cnt) == cnt)
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+}
+
+static int gator_events_armv7_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < pmnc_counters; i++) {
+ char buf[40];
+
+ if (i == 0)
+ snprintf(buf, sizeof(buf), "%s_ccnt", pmnc_name);
+ else
+ snprintf(buf, sizeof(buf), "%s_cnt%d", pmnc_name, i - 1);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i > 0)
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+
+ return 0;
+}
+
+static int gator_events_armv7_online(int **buffer, bool migrate)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+
+ if (armv7_pmnc_read() & PMNC_E)
+ armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
+
+ /* Initialize & Reset PMNC: C bit and P bit */
+ armv7_pmnc_write(PMNC_P | PMNC_C);
+
+ /* Reset overflow flags */
+ armv7_pmnc_reset_interrupt();
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(cnt);
+
+ event = pmnc_event[cnt] & 255;
+
+ /* Set event (if destined for PMNx counters), we don't need to set the event if it's a cycle count */
+ if (cnt != CCNT)
+ armv7_pmnc_write_evtsel(cnt, event);
+
+ armv7_pmnc_disable_interrupt(cnt);
+
+ /* Reset counter */
+ cnt ? armv7_cntn_read(cnt, 0) : armv7_ccnt_read(0);
+
+ /* Enable counter */
+ armv7_pmnc_enable_counter(cnt);
+ }
+
+ /* enable */
+ armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
+
+ /* return zero values, no need to read as the counters were just reset */
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_armv7_offline(int **buffer, bool migrate)
+{
+ /* disable all counters, including PMCCNTR; overflow IRQs will not be signaled */
+ armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
+
+ return 0;
+}
+
+static void gator_events_armv7_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_armv7_read(int **buffer, bool sched_switch)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+ if (!(armv7_pmnc_read() & PMNC_E))
+ return 0;
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ int value;
+
+ if (cnt == CCNT)
+ value = armv7_ccnt_read(0);
+ else
+ value = armv7_cntn_read(cnt, 0);
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_armv7_interface = {
+ .create_files = gator_events_armv7_create_files,
+ .stop = gator_events_armv7_stop,
+ .online = gator_events_armv7_online,
+ .offline = gator_events_armv7_offline,
+ .read = gator_events_armv7_read,
+};
+
+int gator_events_armv7_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case CORTEX_A5:
+ pmnc_name = "ARMv7_Cortex_A5";
+ pmnc_counters = 2;
+ break;
+ case CORTEX_A7:
+ pmnc_name = "ARMv7_Cortex_A7";
+ pmnc_counters = 4;
+ break;
+ case CORTEX_A8:
+ pmnc_name = "ARMv7_Cortex_A8";
+ pmnc_counters = 4;
+ break;
+ case CORTEX_A9:
+ pmnc_name = "ARMv7_Cortex_A9";
+ pmnc_counters = 6;
+ break;
+ case CORTEX_A15:
+ pmnc_name = "ARMv7_Cortex_A15";
+ pmnc_counters = 6;
+ break;
+ /* ARM Cortex A17 is not supported by version of Linux before 3.0 */
+ default:
+ return -1;
+ }
+
+ pmnc_counters++; /* CNT[n] + CCNT */
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_armv7_interface);
+}
+
+#endif
diff --git a/drivers/parrot/gator/gator_events_block.c b/drivers/parrot/gator/gator_events_block.c
new file mode 100644
index 00000000000000..a352a54afa025d
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_block.c
@@ -0,0 +1,160 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/block.h>
+
+#define BLOCK_RQ_WR 0
+#define BLOCK_RQ_RD 1
+
+#define BLOCK_TOTAL (BLOCK_RQ_RD+1)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+#define EVENTWRITE REQ_RW
+#else
+#define EVENTWRITE REQ_WRITE
+#endif
+
+static ulong block_rq_wr_enabled;
+static ulong block_rq_rd_enabled;
+static ulong block_rq_wr_key;
+static ulong block_rq_rd_key;
+static atomic_t blockCnt[BLOCK_TOTAL];
+static int blockGet[BLOCK_TOTAL * 4];
+
+/* Tracepoint changed in 3.15 backported to older kernels. The Makefile tries to autodetect the correct value, but if it fails change the #if below */
+#if OLD_BLOCK_RQ_COMPLETE
+GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq))
+#else
+GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq, unsigned int nr_bytes))
+#endif
+{
+ int write;
+ unsigned int size;
+
+ if (!rq)
+ return;
+
+ write = rq->cmd_flags & EVENTWRITE;
+#if OLD_BLOCK_RQ_COMPLETE
+ size = rq->resid_len;
+#else
+ size = nr_bytes;
+#endif
+
+ if (!size)
+ return;
+
+ if (write) {
+ if (block_rq_wr_enabled)
+ atomic_add(size, &blockCnt[BLOCK_RQ_WR]);
+ } else {
+ if (block_rq_rd_enabled)
+ atomic_add(size, &blockCnt[BLOCK_RQ_RD]);
+ }
+}
+
+static int gator_events_block_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* block_complete_wr */
+ dir = gatorfs_mkdir(sb, root, "Linux_block_rq_wr");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &block_rq_wr_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_wr_key);
+
+ /* block_complete_rd */
+ dir = gatorfs_mkdir(sb, root, "Linux_block_rq_rd");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &block_rq_rd_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &block_rq_rd_key);
+
+ return 0;
+}
+
+static int gator_events_block_start(void)
+{
+ /* register tracepoints */
+ if (block_rq_wr_enabled || block_rq_rd_enabled)
+ if (GATOR_REGISTER_TRACE(block_rq_complete))
+ goto fail_block_rq_exit;
+ pr_debug("gator: registered block event tracepoints\n");
+
+ return 0;
+
+ /* unregister tracepoints on error */
+fail_block_rq_exit:
+ pr_err("gator: block event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_block_stop(void)
+{
+ if (block_rq_wr_enabled || block_rq_rd_enabled)
+ GATOR_UNREGISTER_TRACE(block_rq_complete);
+ pr_debug("gator: unregistered block event tracepoints\n");
+
+ block_rq_wr_enabled = 0;
+ block_rq_rd_enabled = 0;
+}
+
+static int gator_events_block_read(int **buffer, bool sched_switch)
+{
+ int len, value, data = 0;
+
+ if (!on_primary_core())
+ return 0;
+
+ len = 0;
+ if (block_rq_wr_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_WR])) > 0) {
+ atomic_sub(value, &blockCnt[BLOCK_RQ_WR]);
+ blockGet[len++] = block_rq_wr_key;
+ /* Indicates to Streamline that value bytes were written now, not since the last message */
+ blockGet[len++] = 0;
+ blockGet[len++] = block_rq_wr_key;
+ blockGet[len++] = value;
+ data += value;
+ }
+ if (block_rq_rd_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_RD])) > 0) {
+ atomic_sub(value, &blockCnt[BLOCK_RQ_RD]);
+ blockGet[len++] = block_rq_rd_key;
+ /* Indicates to Streamline that value bytes were read now, not since the last message */
+ blockGet[len++] = 0;
+ blockGet[len++] = block_rq_rd_key;
+ blockGet[len++] = value;
+ data += value;
+ }
+
+ if (buffer)
+ *buffer = blockGet;
+
+ return len;
+}
+
+static struct gator_interface gator_events_block_interface = {
+ .create_files = gator_events_block_create_files,
+ .start = gator_events_block_start,
+ .stop = gator_events_block_stop,
+ .read = gator_events_block_read,
+};
+
+int gator_events_block_init(void)
+{
+ block_rq_wr_enabled = 0;
+ block_rq_rd_enabled = 0;
+
+ block_rq_wr_key = gator_events_get_key();
+ block_rq_rd_key = gator_events_get_key();
+
+ return gator_events_install(&gator_events_block_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_irq.c b/drivers/parrot/gator/gator_events_irq.c
new file mode 100644
index 00000000000000..5221aac581b332
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_irq.c
@@ -0,0 +1,163 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/irq.h>
+
+#define HARDIRQ 0
+#define SOFTIRQ 1
+#define TOTALIRQ (SOFTIRQ+1)
+
+static ulong hardirq_enabled;
+static ulong softirq_enabled;
+static ulong hardirq_key;
+static ulong softirq_key;
+static DEFINE_PER_CPU(atomic_t[TOTALIRQ], irqCnt);
+static DEFINE_PER_CPU(int[TOTALIRQ * 2], irqGet);
+
+GATOR_DEFINE_PROBE(irq_handler_exit,
+ TP_PROTO(int irq, struct irqaction *action, int ret))
+{
+ atomic_inc(&per_cpu(irqCnt, get_physical_cpu())[HARDIRQ]);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+GATOR_DEFINE_PROBE(softirq_exit, TP_PROTO(struct softirq_action *h, struct softirq_action *vec))
+#else
+GATOR_DEFINE_PROBE(softirq_exit, TP_PROTO(unsigned int vec_nr))
+#endif
+{
+ atomic_inc(&per_cpu(irqCnt, get_physical_cpu())[SOFTIRQ]);
+}
+
+static int gator_events_irq_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* irq */
+ dir = gatorfs_mkdir(sb, root, "Linux_irq_irq");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &hardirq_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &hardirq_key);
+
+ /* soft irq */
+ dir = gatorfs_mkdir(sb, root, "Linux_irq_softirq");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &softirq_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &softirq_key);
+
+ return 0;
+}
+
+static int gator_events_irq_online(int **buffer, bool migrate)
+{
+ int len = 0, cpu = get_physical_cpu();
+
+ /* synchronization with the irq_exit functions is not necessary as the values are being reset */
+ if (hardirq_enabled) {
+ atomic_set(&per_cpu(irqCnt, cpu)[HARDIRQ], 0);
+ per_cpu(irqGet, cpu)[len++] = hardirq_key;
+ per_cpu(irqGet, cpu)[len++] = 0;
+ }
+
+ if (softirq_enabled) {
+ atomic_set(&per_cpu(irqCnt, cpu)[SOFTIRQ], 0);
+ per_cpu(irqGet, cpu)[len++] = softirq_key;
+ per_cpu(irqGet, cpu)[len++] = 0;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(irqGet, cpu);
+
+ return len;
+}
+
+static int gator_events_irq_start(void)
+{
+ /* register tracepoints */
+ if (hardirq_enabled)
+ if (GATOR_REGISTER_TRACE(irq_handler_exit))
+ goto fail_hardirq_exit;
+ if (softirq_enabled)
+ if (GATOR_REGISTER_TRACE(softirq_exit))
+ goto fail_softirq_exit;
+ pr_debug("gator: registered irq tracepoints\n");
+
+ return 0;
+
+ /* unregister tracepoints on error */
+fail_softirq_exit:
+ if (hardirq_enabled)
+ GATOR_UNREGISTER_TRACE(irq_handler_exit);
+fail_hardirq_exit:
+ pr_err("gator: irq tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_irq_stop(void)
+{
+ if (hardirq_enabled)
+ GATOR_UNREGISTER_TRACE(irq_handler_exit);
+ if (softirq_enabled)
+ GATOR_UNREGISTER_TRACE(softirq_exit);
+ pr_debug("gator: unregistered irq tracepoints\n");
+
+ hardirq_enabled = 0;
+ softirq_enabled = 0;
+}
+
+static int gator_events_irq_read(int **buffer, bool sched_switch)
+{
+ int len, value;
+ int cpu = get_physical_cpu();
+
+ len = 0;
+ if (hardirq_enabled) {
+ value = atomic_read(&per_cpu(irqCnt, cpu)[HARDIRQ]);
+ atomic_sub(value, &per_cpu(irqCnt, cpu)[HARDIRQ]);
+
+ per_cpu(irqGet, cpu)[len++] = hardirq_key;
+ per_cpu(irqGet, cpu)[len++] = value;
+ }
+
+ if (softirq_enabled) {
+ value = atomic_read(&per_cpu(irqCnt, cpu)[SOFTIRQ]);
+ atomic_sub(value, &per_cpu(irqCnt, cpu)[SOFTIRQ]);
+
+ per_cpu(irqGet, cpu)[len++] = softirq_key;
+ per_cpu(irqGet, cpu)[len++] = value;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(irqGet, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_irq_interface = {
+ .create_files = gator_events_irq_create_files,
+ .online = gator_events_irq_online,
+ .start = gator_events_irq_start,
+ .stop = gator_events_irq_stop,
+ .read = gator_events_irq_read,
+};
+
+int gator_events_irq_init(void)
+{
+ hardirq_key = gator_events_get_key();
+ softirq_key = gator_events_get_key();
+
+ hardirq_enabled = 0;
+ softirq_enabled = 0;
+
+ return gator_events_install(&gator_events_irq_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_l2c-310.c b/drivers/parrot/gator/gator_events_l2c-310.c
new file mode 100644
index 00000000000000..73aaac32327e8e
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_l2c-310.c
@@ -0,0 +1,208 @@
+/**
+ * l2c310 (L2 Cache Controller) event counters for gator
+ *
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#if defined(CONFIG_OF)
+#include <linux/of.h>
+#include <linux/of_address.h>
+#endif
+#include <asm/hardware/cache-l2x0.h>
+
+#include "gator.h"
+
+#define L2C310_COUNTERS_NUM 2
+
+static struct {
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long key;
+} l2c310_counters[L2C310_COUNTERS_NUM];
+
+static int l2c310_buffer[L2C310_COUNTERS_NUM * 2];
+
+static void __iomem *l2c310_base;
+
+static void gator_events_l2c310_reset_counters(void)
+{
+ u32 val = readl(l2c310_base + L2X0_EVENT_CNT_CTRL);
+
+ val |= ((1 << L2C310_COUNTERS_NUM) - 1) << 1;
+
+ writel(val, l2c310_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static int gator_events_l2c310_create_files(struct super_block *sb,
+ struct dentry *root)
+{
+ int i;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ char buf[16];
+ struct dentry *dir;
+
+ snprintf(buf, sizeof(buf), "L2C-310_cnt%d", i);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (WARN_ON(!dir))
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled",
+ &l2c310_counters[i].enabled);
+ gatorfs_create_ulong(sb, dir, "event",
+ &l2c310_counters[i].event);
+ gatorfs_create_ro_ulong(sb, dir, "key",
+ &l2c310_counters[i].key);
+ }
+
+ return 0;
+}
+
+static int gator_events_l2c310_start(void)
+{
+ static const unsigned long l2x0_event_cntx_cfg[L2C310_COUNTERS_NUM] = {
+ L2X0_EVENT_CNT0_CFG,
+ L2X0_EVENT_CNT1_CFG,
+ };
+ int i;
+
+ /* Counter event sources */
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++)
+ writel((l2c310_counters[i].event & 0xf) << 2,
+ l2c310_base + l2x0_event_cntx_cfg[i]);
+
+ gator_events_l2c310_reset_counters();
+
+ /* Event counter enable */
+ writel(1, l2c310_base + L2X0_EVENT_CNT_CTRL);
+
+ return 0;
+}
+
+static void gator_events_l2c310_stop(void)
+{
+ /* Event counter disable */
+ writel(0, l2c310_base + L2X0_EVENT_CNT_CTRL);
+}
+
+static int gator_events_l2c310_read(int **buffer, bool sched_switch)
+{
+ static const unsigned long l2x0_event_cntx_val[L2C310_COUNTERS_NUM] = {
+ L2X0_EVENT_CNT0_VAL,
+ L2X0_EVENT_CNT1_VAL,
+ };
+ int i;
+ int len = 0;
+
+ if (!on_primary_core())
+ return 0;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ if (l2c310_counters[i].enabled) {
+ l2c310_buffer[len++] = l2c310_counters[i].key;
+ l2c310_buffer[len++] = readl(l2c310_base +
+ l2x0_event_cntx_val[i]);
+ }
+ }
+
+ /* l2c310 counters are saturating, not wrapping in case of overflow */
+ gator_events_l2c310_reset_counters();
+
+ if (buffer)
+ *buffer = l2c310_buffer;
+
+ return len;
+}
+
+static struct gator_interface gator_events_l2c310_interface = {
+ .create_files = gator_events_l2c310_create_files,
+ .start = gator_events_l2c310_start,
+ .stop = gator_events_l2c310_stop,
+ .read = gator_events_l2c310_read,
+};
+
+#define L2C310_ADDR_PROBE (~0)
+
+MODULE_PARM_DESC(l2c310_addr, "L2C310 physical base address (0 to disable)");
+static unsigned long l2c310_addr = L2C310_ADDR_PROBE;
+module_param(l2c310_addr, ulong, 0444);
+
+static void __iomem *gator_events_l2c310_probe(void)
+{
+ phys_addr_t variants[] = {
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_S5PV310)
+ 0x10502000,
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ 0x48242000,
+#endif
+#if defined(CONFIG_ARCH_TEGRA)
+ 0x50043000,
+#endif
+#if defined(CONFIG_ARCH_U8500)
+ 0xa0412000,
+#endif
+#if defined(CONFIG_ARCH_VEXPRESS)
+ 0x1e00a000, /* A9x4 core tile (HBI-0191) */
+ 0x2c0f0000, /* New memory map tiles */
+#endif
+ };
+ int i;
+ void __iomem *base;
+#if defined(CONFIG_OF)
+ struct device_node *node = of_find_all_nodes(NULL);
+
+ if (node) {
+ of_node_put(node);
+
+ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
+ base = of_iomap(node, 0);
+ of_node_put(node);
+
+ return base;
+ }
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(variants); i++) {
+ base = ioremap(variants[i], SZ_4K);
+ if (base) {
+ u32 cache_id = readl(base + L2X0_CACHE_ID);
+
+ if ((cache_id & 0xff0003c0) == 0x410000c0)
+ return base;
+
+ iounmap(base);
+ }
+ }
+
+ return NULL;
+}
+
+int gator_events_l2c310_init(void)
+{
+ int i;
+
+ if (gator_cpuid() != CORTEX_A5 && gator_cpuid() != CORTEX_A9)
+ return -1;
+
+ if (l2c310_addr == L2C310_ADDR_PROBE)
+ l2c310_base = gator_events_l2c310_probe();
+ else if (l2c310_addr)
+ l2c310_base = ioremap(l2c310_addr, SZ_4K);
+
+ if (!l2c310_base)
+ return -1;
+
+ for (i = 0; i < L2C310_COUNTERS_NUM; i++) {
+ l2c310_counters[i].enabled = 0;
+ l2c310_counters[i].key = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_l2c310_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_mali_4xx.c b/drivers/parrot/gator/gator_events_mali_4xx.c
new file mode 100644
index 00000000000000..9cf43fe2c29b35
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_4xx.c
@@ -0,0 +1,622 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#include "linux/mali_linux_trace.h"
+
+#include "gator_events_mali_common.h"
+#include "gator_events_mali_4xx.h"
+
+/*
+* There have been four different variants of the comms between gator and Mali depending on driver version:
+* # | DDK vsn range | Support | Notes
+*
+* 1 | (obsolete) | No software counter support | Obsolete patches
+* 2 | (obsolete) | Tracepoint called for each separate s/w counter value as it appears | Obsolete patches
+* 3 | r3p0-04rel0 - r3p2-01rel2 | Single tracepoint for all s/w counters in a bundle. |
+* 4 | r3p2-01rel3 - date | As above but with extensions for MP devices (Mali-450) | At least r4p0-00rel1
+*/
+
+#if !defined(GATOR_MALI_INTERFACE_STYLE)
+#define GATOR_MALI_INTERFACE_STYLE (4)
+#endif
+
+#if GATOR_MALI_INTERFACE_STYLE == 1
+#error GATOR_MALI_INTERFACE_STYLE 1 is obsolete
+#elif GATOR_MALI_INTERFACE_STYLE == 2
+#error GATOR_MALI_INTERFACE_STYLE 2 is obsolete
+#elif GATOR_MALI_INTERFACE_STYLE >= 3
+/* Valid GATOR_MALI_INTERFACE_STYLE */
+#else
+#error Unknown GATOR_MALI_INTERFACE_STYLE option.
+#endif
+
+#if GATOR_MALI_INTERFACE_STYLE < 4
+#include "mali/mali_mjollnir_profiling_gator_api.h"
+#else
+#include "mali/mali_utgard_profiling_gator_api.h"
+#endif
+
+/*
+ * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
+ */
+#if (MALI_SUPPORT != MALI_4xx)
+#error MALI_SUPPORT set to an invalid device code: expecting MALI_4xx
+#endif
+
+static const char mali_name[] = "4xx";
+
+/* gatorfs variables for counter enable state,
+ * the event the counter should count and the
+ * 'key' (a unique id set by gatord and returned
+ * by gator.ko)
+ */
+static unsigned long counter_enabled[NUMBER_OF_EVENTS];
+static unsigned long counter_event[NUMBER_OF_EVENTS];
+static unsigned long counter_key[NUMBER_OF_EVENTS];
+
+/* The data we have recorded */
+static u32 counter_data[NUMBER_OF_EVENTS];
+/* The address to sample (or 0 if samples are sent to us) */
+static u32 *counter_address[NUMBER_OF_EVENTS];
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static int counter_dump[NUMBER_OF_EVENTS * 2];
+static int counter_prev[NUMBER_OF_EVENTS];
+static bool prev_set[NUMBER_OF_EVENTS];
+
+/* Note whether tracepoints have been registered */
+static int trace_registered;
+
+/*
+ * These numbers define the actual numbers of each block type that exist in the system. Initially
+ * these are set to the maxima defined above; if the driver is capable of being queried (newer
+ * drivers only) then the values may be revised.
+ */
+static unsigned int n_vp_cores = MAX_NUM_VP_CORES;
+static unsigned int n_l2_cores = MAX_NUM_L2_CACHE_CORES;
+static unsigned int n_fp_cores = MAX_NUM_FP_CORES;
+
+extern struct mali_counter mali_activity[2];
+static const char *const mali_activity_names[] = {
+ "fragment",
+ "vertex",
+};
+
+/**
+ * Returns non-zero if the given counter ID is an activity counter.
+ */
+static inline int is_activity_counter(unsigned int event_id)
+{
+ return (event_id >= FIRST_ACTIVITY_EVENT &&
+ event_id <= LAST_ACTIVITY_EVENT);
+}
+
+/**
+ * Returns non-zero if the given counter ID is a hardware counter.
+ */
+static inline int is_hw_counter(unsigned int event_id)
+{
+ return (event_id >= FIRST_HW_COUNTER && event_id <= LAST_HW_COUNTER);
+}
+
+/* Probe for hardware counter events */
+GATOR_DEFINE_PROBE(mali_hw_counter, TP_PROTO(unsigned int event_id, unsigned int value))
+{
+ if (is_hw_counter(event_id))
+ counter_data[event_id] = value;
+}
+
+GATOR_DEFINE_PROBE(mali_sw_counters, TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters))
+{
+ u32 i;
+
+ /* Copy over the values for those counters which are enabled. */
+ for (i = FIRST_SW_COUNTER; i <= LAST_SW_COUNTER; i++) {
+ if (counter_enabled[i])
+ counter_data[i] = (u32)(counters[i - FIRST_SW_COUNTER]);
+ }
+}
+
+/**
+ * Create a single filesystem entry for a specified event.
+ * @param sb the superblock
+ * @param root Filesystem root
+ * @param name The name of the entry to create
+ * @param event The ID of the event
+ * @param create_event_item boolean indicating whether to create an 'event' filesystem entry. True to create.
+ *
+ * @return 0 if ok, non-zero if the create failed.
+ */
+static int create_fs_entry(struct super_block *sb, struct dentry *root, const char *name, int event, int create_event_item)
+{
+ struct dentry *dir;
+
+ dir = gatorfs_mkdir(sb, root, name);
+
+ if (!dir)
+ return -1;
+
+ if (create_event_item)
+ gatorfs_create_ulong(sb, dir, "event", &counter_event[event]);
+
+ gatorfs_create_ulong(sb, dir, "enabled", &counter_enabled[event]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &counter_key[event]);
+
+ return 0;
+}
+
+#if GATOR_MALI_INTERFACE_STYLE > 3
+/*
+ * Read the version info structure if available
+ */
+static void initialise_version_info(void)
+{
+ void (*mali_profiling_get_mali_version_symbol)(struct _mali_profiling_mali_version *values);
+
+ mali_profiling_get_mali_version_symbol = symbol_get(_mali_profiling_get_mali_version);
+
+ if (mali_profiling_get_mali_version_symbol) {
+ struct _mali_profiling_mali_version version_info;
+
+ pr_debug("gator: mali online _mali_profiling_get_mali_version symbol @ %p\n",
+ mali_profiling_get_mali_version_symbol);
+
+ /*
+ * Revise the number of each different core type using information derived from the DDK.
+ */
+ mali_profiling_get_mali_version_symbol(&version_info);
+
+ n_fp_cores = version_info.num_of_fp_cores;
+ n_vp_cores = version_info.num_of_vp_cores;
+ n_l2_cores = version_info.num_of_l2_cores;
+
+ /* Release the function - we're done with it. */
+ symbol_put(_mali_profiling_get_mali_version);
+ } else {
+ pr_err("gator: mali online _mali_profiling_get_mali_version symbol not found\n");
+ pr_err("gator: check your Mali DDK version versus the GATOR_MALI_INTERFACE_STYLE setting\n");
+ }
+}
+#endif
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ int event;
+
+ char buf[40];
+ int core_id;
+ int counter_number;
+
+ pr_debug("gator: Initialising counters with style = %d\n", GATOR_MALI_INTERFACE_STYLE);
+
+#if GATOR_MALI_INTERFACE_STYLE > 3
+ /*
+ * Initialise first: this sets up the number of cores available (on compatible DDK versions).
+ * Ideally this would not need guarding but other parts of the code depend on the interface style being set
+ * correctly; if it is not then the system can enter an inconsistent state.
+ */
+ initialise_version_info();
+#endif
+
+ mali_activity[0].cores = n_fp_cores;
+ mali_activity[1].cores = n_vp_cores;
+ for (event = 0; event < ARRAY_SIZE(mali_activity); event++) {
+ if (gator_mali_create_file_system(mali_name, mali_activity_names[event], sb, root, &mali_activity[event], NULL) != 0)
+ return -1;
+ }
+
+ /* Vertex processor counters */
+ for (core_id = 0; core_id < n_vp_cores; core_id++) {
+ int activity_counter_id = ACTIVITY_VP_0;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+ if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0)
+ return -1;
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ int counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+ if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
+ return -1;
+ }
+ }
+
+ /* Fragment processors' counters */
+ for (core_id = 0; core_id < n_fp_cores; core_id++) {
+ int activity_counter_id = ACTIVITY_FP_0 + core_id;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+ if (create_fs_entry(sb, root, buf, activity_counter_id, 0) != 0)
+ return -1;
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ int counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+ if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
+ return -1;
+ }
+ }
+
+ /* L2 Cache counters */
+ for (core_id = 0; core_id < n_l2_cores; core_id++) {
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ int counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+ if (create_fs_entry(sb, root, buf, counter_id, 1) != 0)
+ return -1;
+ }
+ }
+
+ /* Now set up the software counter entries */
+ for (event = FIRST_SW_COUNTER; event <= LAST_SW_COUNTER; event++) {
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_SW_%d", mali_name, event - FIRST_SW_COUNTER);
+
+ if (create_fs_entry(sb, root, buf, event, 0) != 0)
+ return -1;
+ }
+
+ /* Now set up the special counter entries */
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_Filmstrip_cnt0", mali_name);
+ if (create_fs_entry(sb, root, buf, COUNTER_FILMSTRIP, 1) != 0)
+ return -1;
+
+#ifdef DVFS_REPORTED_BY_DDK
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_Frequency", mali_name);
+ if (create_fs_entry(sb, root, buf, COUNTER_FREQUENCY, 1) != 0)
+ return -1;
+
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_Voltage", mali_name);
+ if (create_fs_entry(sb, root, buf, COUNTER_VOLTAGE, 1) != 0)
+ return -1;
+#endif
+
+ return 0;
+}
+
+/*
+ * Local store for the get_counters entry point into the DDK.
+ * This is stored here since it is used very regularly.
+ */
+static void (*mali_get_counters)(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
+static u32 (*mali_get_l2_counters)(struct _mali_profiling_l2_counter_values *values);
+
+/*
+ * Examine list of counters between two index limits and determine if any one is enabled.
+ * Returns 1 if any counter is enabled, 0 if none is.
+ */
+static int is_any_counter_enabled(unsigned int first_counter, unsigned int last_counter)
+{
+ unsigned int i;
+
+ for (i = first_counter; i <= last_counter; i++) {
+ if (counter_enabled[i])
+ return 1; /* At least one counter is enabled */
+ }
+
+ return 0; /* No s/w counters enabled */
+}
+
+static void init_counters(unsigned int from_counter, unsigned int to_counter)
+{
+ unsigned int counter_id;
+
+ /* If a Mali driver is present and exporting the appropriate symbol
+ * then we can request the HW counters (of which there are only 2)
+ * be configured to count the desired events
+ */
+ mali_profiling_set_event_type *mali_set_hw_event;
+
+ mali_set_hw_event = symbol_get(_mali_profiling_set_event);
+
+ if (mali_set_hw_event) {
+ pr_debug("gator: mali online _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
+
+ for (counter_id = from_counter; counter_id <= to_counter; counter_id++) {
+ if (counter_enabled[counter_id])
+ mali_set_hw_event(counter_id, counter_event[counter_id]);
+ else
+ mali_set_hw_event(counter_id, 0xFFFFFFFF);
+ }
+
+ symbol_put(_mali_profiling_set_event);
+ } else {
+ pr_err("gator: mali online _mali_profiling_set_event symbol not found\n");
+ }
+}
+
+static void mali_counter_initialize(void)
+{
+ int i;
+
+ mali_profiling_control_type *mali_control;
+
+ init_counters(COUNTER_L2_0_C0, COUNTER_L2_0_C0 + (2 * n_l2_cores) - 1);
+ init_counters(COUNTER_VP_0_C0, COUNTER_VP_0_C0 + (2 * n_vp_cores) - 1);
+ init_counters(COUNTER_FP_0_C0, COUNTER_FP_0_C0 + (2 * n_fp_cores) - 1);
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+ if (mali_control) {
+ /* The event attribute in the XML file keeps the actual frame rate. */
+ unsigned int rate = counter_event[COUNTER_FILMSTRIP] & 0xff;
+ unsigned int resize_factor = (counter_event[COUNTER_FILMSTRIP] >> 8) & 0xff;
+
+ pr_debug("gator: mali online _mali_profiling_control symbol @ %p\n", mali_control);
+
+ mali_control(SW_COUNTER_ENABLE, (is_any_counter_enabled(FIRST_SW_COUNTER, LAST_SW_COUNTER) ? 1 : 0));
+ mali_control(FBDUMP_CONTROL_ENABLE, (counter_enabled[COUNTER_FILMSTRIP] ? 1 : 0));
+ mali_control(FBDUMP_CONTROL_RATE, rate);
+ mali_control(FBDUMP_CONTROL_RESIZE_FACTOR, resize_factor);
+
+ pr_debug("gator: sent mali_control enabled=%d, rate=%d\n", (counter_enabled[COUNTER_FILMSTRIP] ? 1 : 0), rate);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ pr_err("gator: mali online _mali_profiling_control symbol not found\n");
+ }
+
+ mali_get_counters = symbol_get(_mali_profiling_get_counters);
+ if (mali_get_counters)
+ pr_debug("gator: mali online _mali_profiling_get_counters symbol @ %p\n", mali_get_counters);
+ else
+ pr_debug("gator WARNING: mali _mali_profiling_get_counters symbol not defined\n");
+
+ mali_get_l2_counters = symbol_get(_mali_profiling_get_l2_counters);
+ if (mali_get_l2_counters)
+ pr_debug("gator: mali online _mali_profiling_get_l2_counters symbol @ %p\n", mali_get_l2_counters);
+ else
+ pr_debug("gator WARNING: mali _mali_profiling_get_l2_counters symbol not defined\n");
+
+ if (!mali_get_counters && !mali_get_l2_counters) {
+ pr_debug("gator: WARNING: no L2 counters available\n");
+ n_l2_cores = 0;
+ }
+
+ /* Clear counters in the start */
+ for (i = 0; i < NUMBER_OF_EVENTS; i++) {
+ counter_data[i] = 0;
+ prev_set[i] = false;
+ }
+}
+
+static void mali_counter_deinitialize(void)
+{
+ mali_profiling_set_event_type *mali_set_hw_event;
+ mali_profiling_control_type *mali_control;
+
+ mali_set_hw_event = symbol_get(_mali_profiling_set_event);
+
+ if (mali_set_hw_event) {
+ int i;
+
+ pr_debug("gator: mali offline _mali_profiling_set_event symbol @ %p\n", mali_set_hw_event);
+ for (i = FIRST_HW_COUNTER; i <= LAST_HW_COUNTER; i++)
+ mali_set_hw_event(i, 0xFFFFFFFF);
+
+ symbol_put(_mali_profiling_set_event);
+ } else {
+ pr_err("gator: mali offline _mali_profiling_set_event symbol not found\n");
+ }
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+
+ if (mali_control) {
+ pr_debug("gator: mali offline _mali_profiling_control symbol @ %p\n", mali_control);
+
+ /* Reset the DDK state - disable counter collection */
+ mali_control(SW_COUNTER_ENABLE, 0);
+
+ mali_control(FBDUMP_CONTROL_ENABLE, 0);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ pr_err("gator: mali offline _mali_profiling_control symbol not found\n");
+ }
+
+ if (mali_get_counters)
+ symbol_put(_mali_profiling_get_counters);
+
+ if (mali_get_l2_counters)
+ symbol_put(_mali_profiling_get_l2_counters);
+}
+
+static int start(void)
+{
+ /* register tracepoints */
+ if (GATOR_REGISTER_TRACE(mali_hw_counter)) {
+ pr_err("gator: mali_hw_counter tracepoint failed to activate\n");
+ return -1;
+ }
+
+ /* For Mali drivers with built-in support. */
+ if (GATOR_REGISTER_TRACE(mali_sw_counters)) {
+ pr_err("gator: mali_sw_counters tracepoint failed to activate\n");
+ return -1;
+ }
+
+ trace_registered = 1;
+
+ mali_counter_initialize();
+ return 0;
+}
+
+static void stop(void)
+{
+ unsigned int cnt;
+
+ pr_debug("gator: mali stop\n");
+
+ if (trace_registered) {
+ GATOR_UNREGISTER_TRACE(mali_hw_counter);
+
+ /* For Mali drivers with built-in support. */
+ GATOR_UNREGISTER_TRACE(mali_sw_counters);
+
+ pr_debug("gator: mali timeline tracepoint deactivated\n");
+
+ trace_registered = 0;
+ }
+
+ for (cnt = 0; cnt < NUMBER_OF_EVENTS; cnt++) {
+ counter_enabled[cnt] = 0;
+ counter_event[cnt] = 0;
+ counter_address[cnt] = NULL;
+ }
+
+ mali_counter_deinitialize();
+}
+
+static void dump_counters(unsigned int from_counter, unsigned int to_counter, unsigned int *len)
+{
+ unsigned int counter_id;
+
+ for (counter_id = from_counter; counter_id <= to_counter; counter_id++) {
+ if (counter_enabled[counter_id]) {
+ counter_dump[(*len)++] = counter_key[counter_id];
+ counter_dump[(*len)++] = counter_data[counter_id];
+
+ counter_data[counter_id] = 0;
+ }
+ }
+}
+
+static int read(int **buffer, bool sched_switch)
+{
+ int len = 0;
+
+ if (!on_primary_core())
+ return 0;
+
+ /* Read the L2 C0 and C1 here. */
+ if (n_l2_cores > 0 && is_any_counter_enabled(COUNTER_L2_0_C0, COUNTER_L2_0_C0 + (2 * n_l2_cores))) {
+ unsigned int unavailable_l2_caches = 0;
+ struct _mali_profiling_l2_counter_values cache_values;
+ unsigned int cache_id;
+ struct _mali_profiling_core_counters *per_core;
+
+ /* Poke the driver to get the counter values - older style; only one L2 cache */
+ if (mali_get_l2_counters) {
+ unavailable_l2_caches = mali_get_l2_counters(&cache_values);
+ } else if (mali_get_counters) {
+ per_core = &cache_values.cores[0];
+ mali_get_counters(&per_core->source0, &per_core->value0, &per_core->source1, &per_core->value1);
+ } else {
+ /* This should never happen, as n_l2_caches is only set > 0 if one of the above functions is found. */
+ }
+
+ /* Fill in the two cache counter values for each cache block. */
+ for (cache_id = 0; cache_id < n_l2_cores; cache_id++) {
+ unsigned int counter_id_0 = COUNTER_L2_0_C0 + (2 * cache_id);
+ unsigned int counter_id_1 = counter_id_0 + 1;
+
+ if ((1 << cache_id) & unavailable_l2_caches)
+ continue; /* This cache is unavailable (powered-off, possibly). */
+
+ per_core = &cache_values.cores[cache_id];
+
+ if (counter_enabled[counter_id_0] && prev_set[counter_id_0]) {
+ /* Calculate and save src0's counter val0 */
+ counter_dump[len++] = counter_key[counter_id_0];
+ counter_dump[len++] = per_core->value0 - counter_prev[counter_id_0];
+ }
+
+ if (counter_enabled[counter_id_1] && prev_set[counter_id_1]) {
+ /* Calculate and save src1's counter val1 */
+ counter_dump[len++] = counter_key[counter_id_1];
+ counter_dump[len++] = per_core->value1 - counter_prev[counter_id_1];
+ }
+
+ /* Save the previous values for the counters. */
+ counter_prev[counter_id_0] = per_core->value0;
+ prev_set[counter_id_0] = true;
+ counter_prev[counter_id_1] = per_core->value1;
+ prev_set[counter_id_1] = true;
+ }
+ }
+
+ /* Process other (non-timeline) counters. */
+ dump_counters(COUNTER_VP_0_C0, COUNTER_VP_0_C0 + (2 * n_vp_cores) - 1, &len);
+ dump_counters(COUNTER_FP_0_C0, COUNTER_FP_0_C0 + (2 * n_fp_cores) - 1, &len);
+
+ dump_counters(FIRST_SW_COUNTER, LAST_SW_COUNTER, &len);
+
+#ifdef DVFS_REPORTED_BY_DDK
+ {
+ int cnt;
+ /*
+ * Add in the voltage and frequency counters if enabled. Note
+ * that, since these are actually passed as events, the counter
+ * value should not be cleared.
+ */
+ cnt = COUNTER_FREQUENCY;
+ if (counter_enabled[cnt]) {
+ counter_dump[len++] = counter_key[cnt];
+ counter_dump[len++] = counter_data[cnt];
+ }
+
+ cnt = COUNTER_VOLTAGE;
+ if (counter_enabled[cnt]) {
+ counter_dump[len++] = counter_key[cnt];
+ counter_dump[len++] = counter_data[cnt];
+ }
+ }
+#endif
+
+ if (buffer)
+ *buffer = counter_dump;
+
+ return len;
+}
+
+static struct gator_interface gator_events_mali_interface = {
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read,
+};
+
+extern void gator_events_mali_log_dvfs_event(unsigned int frequency_mhz, unsigned int voltage_mv)
+{
+#ifdef DVFS_REPORTED_BY_DDK
+ counter_data[COUNTER_FREQUENCY] = frequency_mhz;
+ counter_data[COUNTER_VOLTAGE] = voltage_mv;
+#endif
+}
+
+int gator_events_mali_init(void)
+{
+ unsigned int cnt;
+
+ pr_debug("gator: mali init\n");
+
+ gator_mali_initialise_counters(mali_activity, ARRAY_SIZE(mali_activity));
+
+ for (cnt = 0; cnt < NUMBER_OF_EVENTS; cnt++) {
+ counter_enabled[cnt] = 0;
+ counter_event[cnt] = 0;
+ counter_key[cnt] = gator_events_get_key();
+ counter_address[cnt] = NULL;
+ counter_data[cnt] = 0;
+ }
+
+ trace_registered = 0;
+
+ return gator_events_install(&gator_events_mali_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_mali_4xx.h b/drivers/parrot/gator/gator_events_mali_4xx.h
new file mode 100644
index 00000000000000..976ca8c4cfa1af
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_4xx.h
@@ -0,0 +1,18 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * Header contains common definitions for the Mali-4xx processors.
+ */
+#if !defined(GATOR_EVENTS_MALI_4xx_H)
+#define GATOR_EVENTS_MALI_4xx_H
+
+extern void gator_events_mali_log_dvfs_event(unsigned int d0, unsigned int d1);
+
+#endif /* GATOR_EVENTS_MALI_4xx_H */
diff --git a/drivers/parrot/gator/gator_events_mali_common.c b/drivers/parrot/gator/gator_events_mali_common.c
new file mode 100644
index 00000000000000..1af87d649afe0f
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_common.c
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include "gator_events_mali_common.h"
+
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, struct mali_counter *counter, unsigned long *event)
+{
+ int err;
+ char buf[255];
+ struct dentry *dir;
+
+ /* If the counter name is empty ignore it */
+ if (strlen(event_name) != 0) {
+ /* Set up the filesystem entry for this event. */
+ if (mali_name == NULL)
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s", event_name);
+ else
+ snprintf(buf, sizeof(buf), "ARM_Mali-%s_%s", mali_name, event_name);
+
+ dir = gatorfs_mkdir(sb, root, buf);
+
+ if (dir == NULL) {
+ pr_debug("gator: %s: error creating file system for: %s (%s)\n", mali_name, event_name, buf);
+ return -1;
+ }
+
+ err = gatorfs_create_ulong(sb, dir, "enabled", &counter->enabled);
+ if (err != 0) {
+ pr_debug("gator: %s: error calling gatorfs_create_ulong for: %s (%s)\n", mali_name, event_name, buf);
+ return -1;
+ }
+ err = gatorfs_create_ro_ulong(sb, dir, "key", &counter->key);
+ if (err != 0) {
+ pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
+ return -1;
+ }
+ if (counter->cores != -1) {
+ err = gatorfs_create_ro_ulong(sb, dir, "cores", &counter->cores);
+ if (err != 0) {
+ pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
+ return -1;
+ }
+ }
+ if (event != NULL) {
+ err = gatorfs_create_ulong(sb, dir, "event", event);
+ if (err != 0) {
+ pr_debug("gator: %s: error calling gatorfs_create_ro_ulong for: %s (%s)\n", mali_name, event_name, buf);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+extern void gator_mali_initialise_counters(struct mali_counter counters[], unsigned int n_counters)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < n_counters; cnt++) {
+ struct mali_counter *counter = &counters[cnt];
+
+ counter->key = gator_events_get_key();
+ counter->enabled = 0;
+ counter->cores = -1;
+ }
+}
diff --git a/drivers/parrot/gator/gator_events_mali_common.h b/drivers/parrot/gator/gator_events_mali_common.h
new file mode 100644
index 00000000000000..e7082e62fe88c3
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_common.h
@@ -0,0 +1,77 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#if !defined(GATOR_EVENTS_MALI_COMMON_H)
+#define GATOR_EVENTS_MALI_COMMON_H
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/* Ensure that MALI_SUPPORT has been defined to something. */
+#ifndef MALI_SUPPORT
+#error MALI_SUPPORT not defined!
+#endif
+
+/* Values for the supported activity event types */
+#define ACTIVITY_START (1)
+#define ACTIVITY_STOP (2)
+
+/*
+ * Runtime state information for a counter.
+ */
+struct mali_counter {
+ /* 'key' (a unique id set by gatord and returned by gator.ko) */
+ unsigned long key;
+ /* counter enable state */
+ unsigned long enabled;
+ /* for activity counters, the number of cores, otherwise -1 */
+ unsigned long cores;
+};
+
+/*
+ * Mali-4xx
+ */
+typedef int mali_profiling_set_event_type(unsigned int, int);
+typedef void mali_profiling_control_type(unsigned int, unsigned int);
+
+/*
+ * Driver entry points for functions called directly by gator.
+ */
+extern int _mali_profiling_set_event(unsigned int, int);
+extern void _mali_profiling_control(unsigned int, unsigned int);
+extern void _mali_profiling_get_counters(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
+
+/**
+ * Creates a filesystem entry under /dev/gator relating to the specified event name and key, and
+ * associate the key/enable values with this entry point.
+ *
+ * @param event_name The name of the event.
+ * @param sb Linux super block
+ * @param root Directory under which the entry will be created.
+ * @param counter_key Ptr to location which will be associated with the counter key.
+ * @param counter_enabled Ptr to location which will be associated with the counter enable state.
+ *
+ * @return 0 if entry point was created, non-zero if not.
+ */
+extern int gator_mali_create_file_system(const char *mali_name, const char *event_name, struct super_block *sb, struct dentry *root, struct mali_counter *counter, unsigned long *event);
+
+/**
+ * Initializes the counter array.
+ *
+ * @param keys The array of counters
+ * @param n_counters The number of entries in each of the arrays.
+ */
+extern void gator_mali_initialise_counters(struct mali_counter counters[], unsigned int n_counters);
+
+#endif /* GATOR_EVENTS_MALI_COMMON_H */
diff --git a/drivers/parrot/gator/gator_events_mali_midgard.c b/drivers/parrot/gator/gator_events_mali_midgard.c
new file mode 100644
index 00000000000000..0aec906d7ae5c2
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_midgard.c
@@ -0,0 +1,562 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#ifdef MALI_DIR_MIDGARD
+/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard*/
+#include "mali_linux_trace.h"
+#else
+/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx*/
+#include "linux/mali_linux_trace.h"
+#endif
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Check that the MALI_SUPPORT define is set to one of the allowable device codes.
+ */
+#if (MALI_SUPPORT != MALI_MIDGARD)
+#error MALI_SUPPORT set to an invalid device code: expecting MALI_MIDGARD
+#endif
+
+static const char mali_name[] = "Midgard";
+
+/* Counters for Mali-Midgard:
+ *
+ * - Timeline events
+ * They are tracepoints, but instead of reporting a number they report a START/STOP event.
+ * They are reported in Streamline as number of microseconds while that particular counter was active.
+ *
+ * - SW counters
+ * They are tracepoints reporting a particular number.
+ * They are accumulated in sw_counter_data array until they are passed to Streamline, then they are zeroed.
+ *
+ * - Accumulators
+ * They are the same as software counters but their value is not zeroed.
+ */
+
+/* Timeline (start/stop) activity */
+static const char *const timeline_event_names[] = {
+ "PM_SHADER_0",
+ "PM_SHADER_1",
+ "PM_SHADER_2",
+ "PM_SHADER_3",
+ "PM_SHADER_4",
+ "PM_SHADER_5",
+ "PM_SHADER_6",
+ "PM_SHADER_7",
+ "PM_TILER_0",
+ "PM_L2_0",
+ "PM_L2_1",
+ "MMU_AS_0",
+ "MMU_AS_1",
+ "MMU_AS_2",
+ "MMU_AS_3"
+};
+
+enum {
+ PM_SHADER_0 = 0,
+ PM_SHADER_1,
+ PM_SHADER_2,
+ PM_SHADER_3,
+ PM_SHADER_4,
+ PM_SHADER_5,
+ PM_SHADER_6,
+ PM_SHADER_7,
+ PM_TILER_0,
+ PM_L2_0,
+ PM_L2_1,
+ MMU_AS_0,
+ MMU_AS_1,
+ MMU_AS_2,
+ MMU_AS_3
+};
+/* The number of shader blocks in the enum above */
+#define NUM_PM_SHADER (8)
+
+/* Software Counters */
+static const char *const software_counter_names[] = {
+ "MMU_PAGE_FAULT_0",
+ "MMU_PAGE_FAULT_1",
+ "MMU_PAGE_FAULT_2",
+ "MMU_PAGE_FAULT_3"
+};
+
+enum {
+ MMU_PAGE_FAULT_0 = 0,
+ MMU_PAGE_FAULT_1,
+ MMU_PAGE_FAULT_2,
+ MMU_PAGE_FAULT_3
+};
+
+/* Software Counters */
+static const char *const accumulators_names[] = {
+ "TOTAL_ALLOC_PAGES"
+};
+
+enum {
+ TOTAL_ALLOC_PAGES = 0
+};
+
+#define FIRST_TIMELINE_EVENT (0)
+#define NUMBER_OF_TIMELINE_EVENTS (sizeof(timeline_event_names) / sizeof(timeline_event_names[0]))
+#define FIRST_SOFTWARE_COUNTER (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS)
+#define NUMBER_OF_SOFTWARE_COUNTERS (sizeof(software_counter_names) / sizeof(software_counter_names[0]))
+#define FIRST_ACCUMULATOR (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS)
+#define NUMBER_OF_ACCUMULATORS (sizeof(accumulators_names) / sizeof(accumulators_names[0]))
+#define FILMSTRIP (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS)
+#define NUMBER_OF_EVENTS (NUMBER_OF_TIMELINE_EVENTS + NUMBER_OF_SOFTWARE_COUNTERS + NUMBER_OF_ACCUMULATORS + 1)
+
+/*
+ * gatorfs variables for counter enable state
+ */
+static struct mali_counter counters[NUMBER_OF_EVENTS];
+static unsigned long filmstrip_event;
+
+/* An array used to return the data we recorded
+ * as key,value pairs hence the *2
+ */
+static int counter_dump[NUMBER_OF_EVENTS * 2];
+
+/*
+ * Array holding counter start times (in ns) for each counter. A zero
+ * here indicates that the activity monitored by this counter is not
+ * running.
+ */
+static struct timespec timeline_event_starttime[NUMBER_OF_TIMELINE_EVENTS];
+
+/* The data we have recorded */
+static unsigned int timeline_data[NUMBER_OF_TIMELINE_EVENTS];
+static unsigned int sw_counter_data[NUMBER_OF_SOFTWARE_COUNTERS];
+static unsigned int accumulators_data[NUMBER_OF_ACCUMULATORS];
+
+/* Hold the previous timestamp, used to calculate the sample interval. */
+static struct timespec prev_timestamp;
+
+/**
+ * Returns the timespan (in microseconds) between the two specified timestamps.
+ *
+ * @param start Ptr to the start timestamp
+ * @param end Ptr to the end timestamp
+ *
+ * @return Number of microseconds between the two timestamps (can be negative if start follows end).
+ */
+static inline long get_duration_us(const struct timespec *start, const struct timespec *end)
+{
+ long event_duration_us = (end->tv_nsec - start->tv_nsec) / 1000;
+
+ event_duration_us += (end->tv_sec - start->tv_sec) * 1000000;
+
+ return event_duration_us;
+}
+
+static void record_timeline_event(unsigned int timeline_index, unsigned int type)
+{
+ struct timespec event_timestamp;
+ struct timespec *event_start = &timeline_event_starttime[timeline_index];
+
+ switch (type) {
+ case ACTIVITY_START:
+ /* Get the event time... */
+ getnstimeofday(&event_timestamp);
+
+ /* Remember the start time if the activity is not already started */
+ if (event_start->tv_sec == 0)
+ *event_start = event_timestamp; /* Structure copy */
+ break;
+
+ case ACTIVITY_STOP:
+ /* if the counter was started... */
+ if (event_start->tv_sec != 0) {
+ /* Get the event time... */
+ getnstimeofday(&event_timestamp);
+
+ /* Accumulate the duration in us */
+ timeline_data[timeline_index] += get_duration_us(event_start, &event_timestamp);
+
+ /* Reset the start time to indicate the activity is stopped. */
+ event_start->tv_sec = 0;
+ }
+ break;
+
+ default:
+ /* Other activity events are ignored. */
+ break;
+ }
+}
+
+/*
+ * Documentation about the following tracepoints is in mali_linux_trace.h
+ */
+
+GATOR_DEFINE_PROBE(mali_pm_status, TP_PROTO(unsigned int event_id, unsigned long long value))
+{
+#define SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define BIT_AT(value, pos) ((value >> pos) & 1)
+
+ static unsigned long long previous_shader_bitmask;
+ static unsigned long long previous_tiler_bitmask;
+ static unsigned long long previous_l2_bitmask;
+
+ switch (event_id) {
+ case SHADER_PRESENT_LO:
+ {
+ unsigned long long changed_bitmask = previous_shader_bitmask ^ value;
+ int pos;
+
+ for (pos = 0; pos < NUM_PM_SHADER; ++pos) {
+ if (BIT_AT(changed_bitmask, pos))
+ record_timeline_event(PM_SHADER_0 + pos, BIT_AT(value, pos) ? ACTIVITY_START : ACTIVITY_STOP);
+ }
+
+ previous_shader_bitmask = value;
+ break;
+ }
+
+ case TILER_PRESENT_LO:
+ {
+ unsigned long long changed = previous_tiler_bitmask ^ value;
+
+ if (BIT_AT(changed, 0))
+ record_timeline_event(PM_TILER_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+
+ previous_tiler_bitmask = value;
+ break;
+ }
+
+ case L2_PRESENT_LO:
+ {
+ unsigned long long changed = previous_l2_bitmask ^ value;
+
+ if (BIT_AT(changed, 0))
+ record_timeline_event(PM_L2_0, BIT_AT(value, 0) ? ACTIVITY_START : ACTIVITY_STOP);
+ if (BIT_AT(changed, 4))
+ record_timeline_event(PM_L2_1, BIT_AT(value, 4) ? ACTIVITY_START : ACTIVITY_STOP);
+
+ previous_l2_bitmask = value;
+ break;
+ }
+
+ default:
+ /* No other blocks are supported at present */
+ break;
+ }
+
+#undef SHADER_PRESENT_LO
+#undef TILER_PRESENT_LO
+#undef L2_PRESENT_LO
+#undef BIT_AT
+}
+
+GATOR_DEFINE_PROBE(mali_page_fault_insert_pages, TP_PROTO(int event_id, unsigned long value))
+{
+ /* We add to the previous since we may receive many tracepoints in one sample period */
+ sw_counter_data[MMU_PAGE_FAULT_0 + event_id] += value;
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_in_use, TP_PROTO(int event_id))
+{
+ record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_START);
+}
+
+GATOR_DEFINE_PROBE(mali_mmu_as_released, TP_PROTO(int event_id))
+{
+ record_timeline_event(MMU_AS_0 + event_id, ACTIVITY_STOP);
+}
+
+GATOR_DEFINE_PROBE(mali_total_alloc_pages_change, TP_PROTO(long long int event_id))
+{
+ accumulators_data[TOTAL_ALLOC_PAGES] = event_id;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ int event;
+ /*
+ * Create the filesystem for all events
+ */
+ int counter_index = 0;
+ mali_profiling_control_type *mali_control;
+
+ for (event = FIRST_TIMELINE_EVENT; event < FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS; event++) {
+ if (gator_mali_create_file_system(mali_name, timeline_event_names[counter_index], sb, root, &counters[event], NULL) != 0)
+ return -1;
+ counter_index++;
+ }
+ counter_index = 0;
+ for (event = FIRST_SOFTWARE_COUNTER; event < FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS; event++) {
+ if (gator_mali_create_file_system(mali_name, software_counter_names[counter_index], sb, root, &counters[event], NULL) != 0)
+ return -1;
+ counter_index++;
+ }
+ counter_index = 0;
+ for (event = FIRST_ACCUMULATOR; event < FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS; event++) {
+ if (gator_mali_create_file_system(mali_name, accumulators_names[counter_index], sb, root, &counters[event], NULL) != 0)
+ return -1;
+ counter_index++;
+ }
+
+ mali_control = symbol_get(_mali_profiling_control);
+ if (mali_control) {
+ if (gator_mali_create_file_system(mali_name, "Filmstrip_cnt0", sb, root, &counters[FILMSTRIP], &filmstrip_event) != 0)
+ return -1;
+ symbol_put(_mali_profiling_control);
+ }
+
+ return 0;
+}
+
+static int register_tracepoints(void)
+{
+ if (GATOR_REGISTER_TRACE(mali_pm_status)) {
+ pr_debug("gator: Mali-Midgard: mali_pm_status tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_page_fault_insert_pages)) {
+ pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_mmu_as_in_use)) {
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_mmu_as_released)) {
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_released tracepoint failed to activate\n");
+ return 0;
+ }
+
+ if (GATOR_REGISTER_TRACE(mali_total_alloc_pages_change)) {
+ pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change tracepoint failed to activate\n");
+ return 0;
+ }
+
+ pr_debug("gator: Mali-Midgard: start\n");
+ pr_debug("gator: Mali-Midgard: mali_pm_status probe is at %p\n", &probe_mali_pm_status);
+ pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages probe is at %p\n", &probe_mali_page_fault_insert_pages);
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use probe is at %p\n", &probe_mali_mmu_as_in_use);
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_released probe is at %p\n", &probe_mali_mmu_as_released);
+ pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change probe is at %p\n", &probe_mali_total_alloc_pages_change);
+
+ return 1;
+}
+
+static int start(void)
+{
+ unsigned int cnt;
+ mali_profiling_control_type *mali_control;
+
+ /* Clean all data for the next capture */
+ for (cnt = 0; cnt < NUMBER_OF_TIMELINE_EVENTS; cnt++) {
+ timeline_event_starttime[cnt].tv_sec = timeline_event_starttime[cnt].tv_nsec = 0;
+ timeline_data[cnt] = 0;
+ }
+
+ for (cnt = 0; cnt < NUMBER_OF_SOFTWARE_COUNTERS; cnt++)
+ sw_counter_data[cnt] = 0;
+
+ for (cnt = 0; cnt < NUMBER_OF_ACCUMULATORS; cnt++)
+ accumulators_data[cnt] = 0;
+
+ /* Register tracepoints */
+ if (register_tracepoints() == 0)
+ return -1;
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+ if (mali_control) {
+ /* The event attribute in the XML file keeps the actual frame rate. */
+ unsigned int enabled = counters[FILMSTRIP].enabled ? 1 : 0;
+ unsigned int rate = filmstrip_event & 0xff;
+ unsigned int resize_factor = (filmstrip_event >> 8) & 0xff;
+
+ pr_debug("gator: mali online _mali_profiling_control symbol @ %p\n", mali_control);
+
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+ mali_control(FBDUMP_CONTROL_ENABLE, enabled);
+ mali_control(FBDUMP_CONTROL_RATE, rate);
+ mali_control(FBDUMP_CONTROL_RESIZE_FACTOR, resize_factor);
+
+ pr_debug("gator: sent mali_control enabled=%d, rate=%d, resize_factor=%d\n", enabled, rate, resize_factor);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ pr_err("gator: mali online _mali_profiling_control symbol not found\n");
+ }
+
+ /*
+ * Set the first timestamp for calculating the sample interval. The first interval could be quite long,
+ * since it will be the time between 'start' and the first 'read'.
+ * This means that timeline values will be divided by a big number for the first sample.
+ */
+ getnstimeofday(&prev_timestamp);
+
+ return 0;
+}
+
+static void stop(void)
+{
+ mali_profiling_control_type *mali_control;
+
+ pr_debug("gator: Mali-Midgard: stop\n");
+
+ /*
+ * It is safe to unregister traces even if they were not successfully
+ * registered, so no need to check.
+ */
+ GATOR_UNREGISTER_TRACE(mali_pm_status);
+ pr_debug("gator: Mali-Midgard: mali_pm_status tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_page_fault_insert_pages);
+ pr_debug("gator: Mali-Midgard: mali_page_fault_insert_pages tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_mmu_as_in_use);
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_in_use tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_mmu_as_released);
+ pr_debug("gator: Mali-Midgard: mali_mmu_as_released tracepoint deactivated\n");
+
+ GATOR_UNREGISTER_TRACE(mali_total_alloc_pages_change);
+ pr_debug("gator: Mali-Midgard: mali_total_alloc_pages_change tracepoint deactivated\n");
+
+ /* Generic control interface for Mali DDK. */
+ mali_control = symbol_get(_mali_profiling_control);
+ if (mali_control) {
+ pr_debug("gator: mali offline _mali_profiling_control symbol @ %p\n", mali_control);
+
+ mali_control(FBDUMP_CONTROL_ENABLE, 0);
+
+ symbol_put(_mali_profiling_control);
+ } else {
+ pr_err("gator: mali offline _mali_profiling_control symbol not found\n");
+ }
+}
+
+static int read(int **buffer, bool sched_switch)
+{
+ int cnt;
+ int len = 0;
+ long sample_interval_us = 0;
+ struct timespec read_timestamp;
+
+ if (!on_primary_core())
+ return 0;
+
+ /* Get the start of this sample period. */
+ getnstimeofday(&read_timestamp);
+
+ /*
+ * Calculate the sample interval if the previous sample time is valid.
+ * We use tv_sec since it will not be 0.
+ */
+ if (prev_timestamp.tv_sec != 0)
+ sample_interval_us = get_duration_us(&prev_timestamp, &read_timestamp);
+
+ /* Structure copy. Update the previous timestamp. */
+ prev_timestamp = read_timestamp;
+
+ /*
+ * Report the timeline counters (ACTIVITY_START/STOP)
+ */
+ for (cnt = FIRST_TIMELINE_EVENT; cnt < (FIRST_TIMELINE_EVENT + NUMBER_OF_TIMELINE_EVENTS); cnt++) {
+ struct mali_counter *counter = &counters[cnt];
+
+ if (counter->enabled) {
+ const int index = cnt - FIRST_TIMELINE_EVENT;
+ unsigned int value;
+
+ /* If the activity is still running, reset its start time to the
+ * start of this sample period to correct the count. Add the
+ * time up to the end of the sample onto the count.
+ */
+ if (timeline_event_starttime[index].tv_sec != 0) {
+ const long event_duration = get_duration_us(&timeline_event_starttime[index], &read_timestamp);
+
+ timeline_data[index] += event_duration;
+ timeline_event_starttime[index] = read_timestamp; /* Activity is still running. */
+ }
+
+ if (sample_interval_us != 0) {
+ /* Convert the counter to a percent-of-sample value */
+ value = (timeline_data[index] * 100) / sample_interval_us;
+ } else {
+ pr_debug("gator: Mali-Midgard: setting value to zero\n");
+ value = 0;
+ }
+
+ /* Clear the counter value ready for the next sample. */
+ timeline_data[index] = 0;
+
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = value;
+ }
+ }
+
+ /* Report the software counters */
+ for (cnt = FIRST_SOFTWARE_COUNTER; cnt < (FIRST_SOFTWARE_COUNTER + NUMBER_OF_SOFTWARE_COUNTERS); cnt++) {
+ const struct mali_counter *counter = &counters[cnt];
+
+ if (counter->enabled) {
+ const int index = cnt - FIRST_SOFTWARE_COUNTER;
+
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = sw_counter_data[index];
+ /* Set the value to zero for the next time */
+ sw_counter_data[index] = 0;
+ }
+ }
+
+ /* Report the accumulators */
+ for (cnt = FIRST_ACCUMULATOR; cnt < (FIRST_ACCUMULATOR + NUMBER_OF_ACCUMULATORS); cnt++) {
+ const struct mali_counter *counter = &counters[cnt];
+
+ if (counter->enabled) {
+ const int index = cnt - FIRST_ACCUMULATOR;
+
+ counter_dump[len++] = counter->key;
+ counter_dump[len++] = accumulators_data[index];
+ /* Do not zero the accumulator */
+ }
+ }
+
+ /* Update the buffer */
+ if (buffer)
+ *buffer = counter_dump;
+
+ return len;
+}
+
+static struct gator_interface gator_events_mali_midgard_interface = {
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read
+};
+
+extern int gator_events_mali_midgard_init(void)
+{
+ pr_debug("gator: Mali-Midgard: sw_counters init\n");
+
+ gator_mali_initialise_counters(counters, NUMBER_OF_EVENTS);
+
+ return gator_events_install(&gator_events_mali_midgard_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_mali_midgard_hw.c b/drivers/parrot/gator/gator_events_mali_midgard_hw.c
new file mode 100644
index 00000000000000..cada130a694c45
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_midgard_hw.c
@@ -0,0 +1,974 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+/* Mali Midgard DDK includes */
+#if defined(MALI_SIMPLE_API)
+/* Header with wrapper functions to kbase structures and functions */
+#include "mali/mali_kbase_gator_api.h"
+#elif defined(MALI_DIR_MIDGARD)
+/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard */
+#include "mali_linux_trace.h"
+#include "mali_kbase.h"
+#include "mali_kbase_mem_linux.h"
+#else
+/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx */
+#include "linux/mali_linux_trace.h"
+#include "kbase/src/common/mali_kbase.h"
+#include "kbase/src/linux/mali_kbase_mem_linux.h"
+#endif
+
+/* If API version is not specified then assume API version 1. */
+#ifndef MALI_DDK_GATOR_API_VERSION
+#define MALI_DDK_GATOR_API_VERSION 1
+#endif
+
+#if (MALI_DDK_GATOR_API_VERSION != 1) && (MALI_DDK_GATOR_API_VERSION != 2) && (MALI_DDK_GATOR_API_VERSION != 3)
+#error MALI_DDK_GATOR_API_VERSION is invalid (must be 1 for r1/r2 DDK, or 2 for r3/r4 DDK, or 3 for r5 and later DDK).
+#endif
+
+#include "gator_events_mali_common.h"
+
+/*
+ * Mali-Midgard
+ */
+#if MALI_DDK_GATOR_API_VERSION == 3
+static uint32_t (*kbase_gator_instr_hwcnt_dump_irq_symbol)(struct kbase_gator_hwcnt_handles *);
+static uint32_t (*kbase_gator_instr_hwcnt_dump_complete_symbol)(struct kbase_gator_hwcnt_handles *, uint32_t *const);
+static struct kbase_gator_hwcnt_handles *(*kbase_gator_hwcnt_init_symbol)(struct kbase_gator_hwcnt_info *);
+static void (*kbase_gator_hwcnt_term_symbol)(struct kbase_gator_hwcnt_info *, struct kbase_gator_hwcnt_handles *);
+
+#else
+static struct kbase_device *(*kbase_find_device_symbol)(int);
+static struct kbase_context *(*kbase_create_context_symbol)(struct kbase_device *);
+static void (*kbase_destroy_context_symbol)(struct kbase_context *);
+
+#if MALI_DDK_GATOR_API_VERSION == 1
+static void *(*kbase_va_alloc_symbol)(struct kbase_context *, u32);
+static void (*kbase_va_free_symbol)(struct kbase_context *, void *);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+static void *(*kbase_va_alloc_symbol)(struct kbase_context *, u32, struct kbase_hwc_dma_mapping *);
+static void (*kbase_va_free_symbol)(struct kbase_context *, struct kbase_hwc_dma_mapping *);
+#endif
+
+static mali_error (*kbase_instr_hwcnt_enable_symbol)(struct kbase_context *, struct kbase_uk_hwcnt_setup *);
+static mali_error (*kbase_instr_hwcnt_disable_symbol)(struct kbase_context *);
+static mali_error (*kbase_instr_hwcnt_clear_symbol)(struct kbase_context *);
+static mali_error (*kbase_instr_hwcnt_dump_irq_symbol)(struct kbase_context *);
+static mali_bool (*kbase_instr_hwcnt_dump_complete_symbol)(struct kbase_context *, mali_bool *);
+
+static long shader_present_low;
+#endif
+
+/** The interval between reads, in ns.
+ *
+ * Earlier we introduced a 'hold off for 1ms after last read' to
+ * resolve MIDBASE-2178 and MALINE-724. However, the 1ms hold off is
+ * too long if no context switches occur as there is a race between
+ * this value and the tick of the read clock in gator which is also
+ * 1ms. If we 'miss' the current read, the counter values are
+ * effectively 'spread' over 2ms and the values seen are half what
+ * they should be (since Streamline averages over sample time). In the
+ * presence of context switches this spread can vary and markedly
+ * affect the counters. Currently there is no 'proper' solution to
+ * this, but empirically we have found that reducing the minimum read
+ * interval to 950us causes the counts to be much more stable.
+ */
+static const int READ_INTERVAL_NSEC = 950000;
+
+#if GATOR_TEST
+#include "gator_events_mali_midgard_hw_test.c"
+#endif
+
+#if MALI_DDK_GATOR_API_VERSION != 3
+/* Blocks for HW counters */
+enum {
+ JM_BLOCK = 0,
+ TILER_BLOCK,
+ SHADER_BLOCK,
+ MMU_BLOCK
+};
+#endif
+
+static const char *mali_name;
+
+/* Counters for Mali-Midgard:
+ *
+ * For HW counters we need strings to create /dev/gator/events files.
+ * Enums are not needed because the position of the HW name in the array is the same
+ * of the corresponding value in the received block of memory.
+ * HW counters are requested by calculating a bitmask, passed then to the driver.
+ * Every millisecond a HW counters dump is requested, and if the previous has been completed they are read.
+ */
+
+/* Hardware Counters */
+#if MALI_DDK_GATOR_API_VERSION == 3
+
+static const char *const *hardware_counter_names;
+static int number_of_hardware_counters;
+
+#else
+
+static const char *const hardware_counter_names[] = {
+ /* Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "MESSAGES_SENT",
+ "MESSAGES_RECEIVED",
+ "GPU_ACTIVE", /* 6 */
+ "IRQ_ACTIVE",
+ "JS0_JOBS",
+ "JS0_TASKS",
+ "JS0_ACTIVE",
+ "",
+ "JS0_WAIT_READ",
+ "JS0_WAIT_ISSUE",
+ "JS0_WAIT_DEPEND",
+ "JS0_WAIT_FINISH",
+ "JS1_JOBS",
+ "JS1_TASKS",
+ "JS1_ACTIVE",
+ "",
+ "JS1_WAIT_READ",
+ "JS1_WAIT_ISSUE",
+ "JS1_WAIT_DEPEND",
+ "JS1_WAIT_FINISH",
+ "JS2_JOBS",
+ "JS2_TASKS",
+ "JS2_ACTIVE",
+ "",
+ "JS2_WAIT_READ",
+ "JS2_WAIT_ISSUE",
+ "JS2_WAIT_DEPEND",
+ "JS2_WAIT_FINISH",
+ "JS3_JOBS",
+ "JS3_TASKS",
+ "JS3_ACTIVE",
+ "",
+ "JS3_WAIT_READ",
+ "JS3_WAIT_ISSUE",
+ "JS3_WAIT_DEPEND",
+ "JS3_WAIT_FINISH",
+ "JS4_JOBS",
+ "JS4_TASKS",
+ "JS4_ACTIVE",
+ "",
+ "JS4_WAIT_READ",
+ "JS4_WAIT_ISSUE",
+ "JS4_WAIT_DEPEND",
+ "JS4_WAIT_FINISH",
+ "JS5_JOBS",
+ "JS5_TASKS",
+ "JS5_ACTIVE",
+ "",
+ "JS5_WAIT_READ",
+ "JS5_WAIT_ISSUE",
+ "JS5_WAIT_DEPEND",
+ "JS5_WAIT_FINISH",
+ "JS6_JOBS",
+ "JS6_TASKS",
+ "JS6_ACTIVE",
+ "",
+ "JS6_WAIT_READ",
+ "JS6_WAIT_ISSUE",
+ "JS6_WAIT_DEPEND",
+ "JS6_WAIT_FINISH",
+
+ /*Tiler */
+ "",
+ "",
+ "",
+ "JOBS_PROCESSED",
+ "TRIANGLES",
+ "QUADS",
+ "POLYGONS",
+ "POINTS",
+ "LINES",
+ "VCACHE_HIT",
+ "VCACHE_MISS",
+ "FRONT_FACING",
+ "BACK_FACING",
+ "PRIM_VISIBLE",
+ "PRIM_CULLED",
+ "PRIM_CLIPPED",
+ "LEVEL0",
+ "LEVEL1",
+ "LEVEL2",
+ "LEVEL3",
+ "LEVEL4",
+ "LEVEL5",
+ "LEVEL6",
+ "LEVEL7",
+ "COMMAND_1",
+ "COMMAND_2",
+ "COMMAND_3",
+ "COMMAND_4",
+ "COMMAND_4_7",
+ "COMMAND_8_15",
+ "COMMAND_16_63",
+ "COMMAND_64",
+ "COMPRESS_IN",
+ "COMPRESS_OUT",
+ "COMPRESS_FLUSH",
+ "TIMESTAMPS",
+ "PCACHE_HIT",
+ "PCACHE_MISS",
+ "PCACHE_LINE",
+ "PCACHE_STALL",
+ "WRBUF_HIT",
+ "WRBUF_MISS",
+ "WRBUF_LINE",
+ "WRBUF_PARTIAL",
+ "WRBUF_STALL",
+ "ACTIVE",
+ "LOADING_DESC",
+ "INDEX_WAIT",
+ "INDEX_RANGE_WAIT",
+ "VERTEX_WAIT",
+ "PCACHE_WAIT",
+ "WRBUF_WAIT",
+ "BUS_READ",
+ "BUS_WRITE",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "UTLB_STALL",
+ "UTLB_REPLAY_MISS",
+ "UTLB_REPLAY_FULL",
+ "UTLB_NEW_MISS",
+ "UTLB_HIT",
+
+ /* Shader Core */
+ "",
+ "",
+ "",
+ "SHADER_CORE_ACTIVE",
+ "FRAG_ACTIVE",
+ "FRAG_PRIMATIVES",
+ "FRAG_PRIMATIVES_DROPPED",
+ "FRAG_CYCLE_DESC",
+ "FRAG_CYCLES_PLR",
+ "FRAG_CYCLES_VERT",
+ "FRAG_CYCLES_TRISETUP",
+ "FRAG_CYCLES_RAST",
+ "FRAG_THREADS",
+ "FRAG_DUMMY_THREADS",
+ "FRAG_QUADS_RAST",
+ "FRAG_QUADS_EZS_TEST",
+ "FRAG_QUADS_EZS_KILLED",
+ "FRAG_QUADS_LZS_TEST",
+ "FRAG_QUADS_LZS_KILLED",
+ "FRAG_CYCLE_NO_TILE",
+ "FRAG_NUM_TILES",
+ "FRAG_TRANS_ELIM",
+ "COMPUTE_ACTIVE",
+ "COMPUTE_TASKS",
+ "COMPUTE_THREADS",
+ "COMPUTE_CYCLES_DESC",
+ "TRIPIPE_ACTIVE",
+ "ARITH_WORDS",
+ "ARITH_CYCLES_REG",
+ "ARITH_CYCLES_L0",
+ "ARITH_FRAG_DEPEND",
+ "LS_WORDS",
+ "LS_ISSUES",
+ "LS_RESTARTS",
+ "LS_REISSUES_MISS",
+ "LS_REISSUES_VD",
+ "LS_REISSUE_ATTRIB_MISS",
+ "LS_NO_WB",
+ "TEX_WORDS",
+ "TEX_BUBBLES",
+ "TEX_WORDS_L0",
+ "TEX_WORDS_DESC",
+ "TEX_THREADS",
+ "TEX_RECIRC_FMISS",
+ "TEX_RECIRC_DESC",
+ "TEX_RECIRC_MULTI",
+ "TEX_RECIRC_PMISS",
+ "TEX_RECIRC_CONF",
+ "LSC_READ_HITS",
+ "LSC_READ_MISSES",
+ "LSC_WRITE_HITS",
+ "LSC_WRITE_MISSES",
+ "LSC_ATOMIC_HITS",
+ "LSC_ATOMIC_MISSES",
+ "LSC_LINE_FETCHES",
+ "LSC_DIRTY_LINE",
+ "LSC_SNOOPS",
+ "AXI_TLB_STALL",
+ "AXI_TLB_MIESS",
+ "AXI_TLB_TRANSACTION",
+ "LS_TLB_MISS",
+ "LS_TLB_HIT",
+ "AXI_BEATS_READ",
+ "AXI_BEATS_WRITTEN",
+
+ /*L2 and MMU */
+ "",
+ "",
+ "",
+ "",
+ "MMU_HIT",
+ "MMU_NEW_MISS",
+ "MMU_REPLAY_FULL",
+ "MMU_REPLAY_MISS",
+ "MMU_TABLE_WALK",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "UTLB_HIT",
+ "UTLB_NEW_MISS",
+ "UTLB_REPLAY_FULL",
+ "UTLB_REPLAY_MISS",
+ "UTLB_STALL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "L2_WRITE_BEATS",
+ "L2_READ_BEATS",
+ "L2_ANY_LOOKUP",
+ "L2_READ_LOOKUP",
+ "L2_SREAD_LOOKUP",
+ "L2_READ_REPLAY",
+ "L2_READ_SNOOP",
+ "L2_READ_HIT",
+ "L2_CLEAN_MISS",
+ "L2_WRITE_LOOKUP",
+ "L2_SWRITE_LOOKUP",
+ "L2_WRITE_REPLAY",
+ "L2_WRITE_SNOOP",
+ "L2_WRITE_HIT",
+ "L2_EXT_READ_FULL",
+ "L2_EXT_READ_HALF",
+ "L2_EXT_WRITE_FULL",
+ "L2_EXT_WRITE_HALF",
+ "L2_EXT_READ",
+ "L2_EXT_READ_LINE",
+ "L2_EXT_WRITE",
+ "L2_EXT_WRITE_LINE",
+ "L2_EXT_WRITE_SMALL",
+ "L2_EXT_BARRIER",
+ "L2_EXT_AR_STALL",
+ "L2_EXT_R_BUF_FULL",
+ "L2_EXT_RD_BUF_FULL",
+ "L2_EXT_R_RAW",
+ "L2_EXT_W_STALL",
+ "L2_EXT_W_BUF_FULL",
+ "L2_EXT_R_W_HAZARD",
+ "L2_TAG_HAZARD",
+ "L2_SNOOP_FULL",
+ "L2_REPLAY_FULL"
+};
+
+static const int number_of_hardware_counters = ARRAY_SIZE(hardware_counter_names);
+
+#endif
+
+#define GET_HW_BLOCK(c) (((c) >> 6) & 0x3)
+#define GET_COUNTER_OFFSET(c) ((c) & 0x3f)
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+/* Opaque handles for kbase_context and kbase_hwc_dma_mapping */
+static struct kbase_gator_hwcnt_handles *handles;
+
+/* Information about hardware counters */
+static struct kbase_gator_hwcnt_info *in_out_info;
+
+#else
+/* Memory to dump hardware counters into */
+static void *kernel_dump_buffer;
+
+#if MALI_DDK_GATOR_API_VERSION == 2
+/* DMA state used to manage lifetime of the buffer */
+struct kbase_hwc_dma_mapping kernel_dump_buffer_handle;
+#endif
+
+/* kbase context and device */
+static struct kbase_context *kbcontext;
+static struct kbase_device *kbdevice;
+
+/*
+ * The following function has no external prototype in older DDK
+ * revisions. When the DDK is updated then this should be removed.
+ */
+struct kbase_device *kbase_find_device(int minor);
+#endif
+
+static volatile bool kbase_device_busy;
+static unsigned int num_hardware_counters_enabled;
+
+/* gatorfs variables for counter enable state */
+static struct mali_counter *counters;
+
+/* An array used to return the data we recorded as key,value pairs */
+static int *counter_dump;
+
+extern struct mali_counter mali_activity[3];
+
+static const char *const mali_activity_names[] = {
+ "fragment",
+ "vertex",
+ "opencl",
+};
+
+#define SYMBOL_GET(FUNCTION, ERROR_COUNT) \
+ do { \
+ if (FUNCTION ## _symbol) { \
+ pr_err("gator: mali " #FUNCTION " symbol was already registered\n"); \
+ (ERROR_COUNT)++; \
+ } else { \
+ FUNCTION ## _symbol = symbol_get(FUNCTION); \
+ if (!FUNCTION ## _symbol) { \
+ pr_err("gator: mali online " #FUNCTION " symbol not found\n"); \
+ (ERROR_COUNT)++; \
+ } \
+ } \
+ } while (0)
+
+#define SYMBOL_CLEANUP(FUNCTION) \
+ do { \
+ if (FUNCTION ## _symbol) { \
+ symbol_put(FUNCTION); \
+ FUNCTION ## _symbol = NULL; \
+ } \
+ } while (0)
+
+/**
+ * Execute symbol_get for all the Mali symbols and check for success.
+ * @return the number of symbols not loaded.
+ */
+static int init_symbols(void)
+{
+ int error_count = 0;
+#if MALI_DDK_GATOR_API_VERSION == 3
+ SYMBOL_GET(kbase_gator_instr_hwcnt_dump_irq, error_count);
+ SYMBOL_GET(kbase_gator_instr_hwcnt_dump_complete, error_count);
+ SYMBOL_GET(kbase_gator_hwcnt_init, error_count);
+ SYMBOL_GET(kbase_gator_hwcnt_term, error_count);
+#else
+ SYMBOL_GET(kbase_find_device, error_count);
+ SYMBOL_GET(kbase_create_context, error_count);
+ SYMBOL_GET(kbase_va_alloc, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_enable, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_clear, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_dump_irq, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_dump_complete, error_count);
+ SYMBOL_GET(kbase_instr_hwcnt_disable, error_count);
+ SYMBOL_GET(kbase_va_free, error_count);
+ SYMBOL_GET(kbase_destroy_context, error_count);
+#endif
+
+ return error_count;
+}
+
+/**
+ * Execute symbol_put for all the registered Mali symbols.
+ */
+static void clean_symbols(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+ SYMBOL_CLEANUP(kbase_gator_instr_hwcnt_dump_irq);
+ SYMBOL_CLEANUP(kbase_gator_instr_hwcnt_dump_complete);
+ SYMBOL_CLEANUP(kbase_gator_hwcnt_init);
+ SYMBOL_CLEANUP(kbase_gator_hwcnt_term);
+#else
+ SYMBOL_CLEANUP(kbase_find_device);
+ SYMBOL_CLEANUP(kbase_create_context);
+ SYMBOL_CLEANUP(kbase_va_alloc);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_enable);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_clear);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_irq);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_dump_complete);
+ SYMBOL_CLEANUP(kbase_instr_hwcnt_disable);
+ SYMBOL_CLEANUP(kbase_va_free);
+ SYMBOL_CLEANUP(kbase_destroy_context);
+#endif
+}
+
+/**
+ * Determines whether a read should take place
+ * @param current_time The current time, obtained from getnstimeofday()
+ * @param prev_time_s The number of seconds at the previous read attempt.
+ * @param next_read_time_ns The time (in ns) when the next read should be allowed.
+ *
+ * Note that this function has been separated out here to allow it to be tested.
+ */
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns)
+{
+ /* If the current ns count rolls over a second, roll the next read time too. */
+ if (current_time->tv_sec != *prev_time_s)
+ *next_read_time_ns = *next_read_time_ns - NSEC_PER_SEC;
+
+ /* Abort the read if the next read time has not arrived. */
+ if (current_time->tv_nsec < *next_read_time_ns)
+ return 0;
+
+ /* Set the next read some fixed time after this one, and update the read timestamp. */
+ *next_read_time_ns = current_time->tv_nsec + READ_INTERVAL_NSEC;
+
+ *prev_time_s = current_time->tv_sec;
+ return 1;
+}
+
+static int start(void)
+{
+#if MALI_DDK_GATOR_API_VERSION != 3
+ struct kbase_uk_hwcnt_setup setup;
+ unsigned long long shadersPresent = 0;
+ u16 bitmask[] = { 0, 0, 0, 0 };
+ mali_error err;
+#endif
+ int cnt;
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+ /* Setup HW counters */
+ num_hardware_counters_enabled = 0;
+
+ /* Declare and initialise kbase_gator_hwcnt_info structure */
+ in_out_info = kmalloc(sizeof(*in_out_info), GFP_KERNEL);
+ for (cnt = 0; cnt < ARRAY_SIZE(in_out_info->bitmask); cnt++)
+ in_out_info->bitmask[cnt] = 0;
+
+ /* Calculate enable bitmasks based on counters_enabled array */
+ for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+ if (counters[cnt].enabled) {
+ int block = GET_HW_BLOCK(cnt);
+ int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
+
+ in_out_info->bitmask[block] |= (1 << enable_bit);
+ pr_debug("gator: Mali-Midgard: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
+ num_hardware_counters_enabled++;
+ }
+ }
+
+ /* Create a kbase context for HW counters */
+ if (num_hardware_counters_enabled > 0) {
+ if (init_symbols() > 0) {
+ clean_symbols();
+ /* No Mali driver code entrypoints found - not a fault. */
+ return 0;
+ }
+
+ handles = kbase_gator_hwcnt_init_symbol(in_out_info);
+
+ if (handles == NULL)
+ goto out;
+
+ kbase_device_busy = false;
+ }
+
+ return 0;
+#else
+ /* Setup HW counters */
+ num_hardware_counters_enabled = 0;
+
+ /* Calculate enable bitmasks based on counters_enabled array */
+ for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+ const struct mali_counter *counter = &counters[cnt];
+
+ if (counter->enabled) {
+ int block = GET_HW_BLOCK(cnt);
+ int enable_bit = GET_COUNTER_OFFSET(cnt) / 4;
+
+ bitmask[block] |= (1 << enable_bit);
+ pr_debug("gator: Mali-Midgard: hardware counter %s selected [%d]\n", hardware_counter_names[cnt], cnt);
+ num_hardware_counters_enabled++;
+ }
+ }
+
+ /* Create a kbase context for HW counters */
+ if (num_hardware_counters_enabled > 0) {
+ if (init_symbols() > 0) {
+ clean_symbols();
+ /* No Mali driver code entrypoints found - not a fault. */
+ return 0;
+ }
+
+ kbdevice = kbase_find_device_symbol(-1);
+
+ /* If we already got a context, fail */
+ if (kbcontext) {
+ pr_debug("gator: Mali-Midgard: error context already present\n");
+ goto out;
+ }
+
+ /* kbcontext will only be valid after all the Mali symbols are loaded successfully */
+ kbcontext = kbase_create_context_symbol(kbdevice);
+ if (!kbcontext) {
+ pr_debug("gator: Mali-Midgard: error creating kbase context\n");
+ goto out;
+ }
+
+ /* See if we can get the number of shader cores */
+ shadersPresent = kbdevice->shader_present_bitmap;
+ shader_present_low = (unsigned long)shadersPresent;
+
+ /*
+ * The amount of memory needed to store the dump (bytes)
+ * DUMP_SIZE = number of core groups
+ * * number of blocks (always 8 for midgard)
+ * * number of counters per block (always 64 for midgard)
+ * * number of bytes per counter (always 4 in midgard)
+ * For a Mali-Midgard with a single core group = 1 * 8 * 64 * 4 = 2048
+ * For a Mali-Midgard with a dual core group = 2 * 8 * 64 * 4 = 4096
+ */
+#if MALI_DDK_GATOR_API_VERSION == 1
+ kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+ kernel_dump_buffer = kbase_va_alloc_symbol(kbcontext, 4096, &kernel_dump_buffer_handle);
+#endif
+ if (!kernel_dump_buffer) {
+ pr_debug("gator: Mali-Midgard: error trying to allocate va\n");
+ goto destroy_context;
+ }
+
+ setup.dump_buffer = (uintptr_t)kernel_dump_buffer;
+ setup.jm_bm = bitmask[JM_BLOCK];
+ setup.tiler_bm = bitmask[TILER_BLOCK];
+ setup.shader_bm = bitmask[SHADER_BLOCK];
+ setup.mmu_l2_bm = bitmask[MMU_BLOCK];
+ /* These counters do not exist on Mali-T60x */
+ setup.l3_cache_bm = 0;
+
+ /* Use kbase API to enable hardware counters and provide dump buffer */
+ err = kbase_instr_hwcnt_enable_symbol(kbcontext, &setup);
+ if (err != MALI_ERROR_NONE) {
+ pr_debug("gator: Mali-Midgard: can't setup hardware counters\n");
+ goto free_buffer;
+ }
+ pr_debug("gator: Mali-Midgard: hardware counters enabled\n");
+ kbase_instr_hwcnt_clear_symbol(kbcontext);
+ pr_debug("gator: Mali-Midgard: hardware counters cleared\n");
+
+ kbase_device_busy = false;
+ }
+
+ return 0;
+
+free_buffer:
+#if MALI_DDK_GATOR_API_VERSION == 1
+ kbase_va_free_symbol(kbcontext, kernel_dump_buffer);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+ kbase_va_free_symbol(kbcontext, &kernel_dump_buffer_handle);
+#endif
+
+destroy_context:
+ kbase_destroy_context_symbol(kbcontext);
+#endif
+
+out:
+ clean_symbols();
+ return -1;
+}
+
+static void stop(void)
+{
+ unsigned int cnt;
+#if MALI_DDK_GATOR_API_VERSION == 3
+ struct kbase_gator_hwcnt_handles *temp_hand;
+#else
+ struct kbase_context *temp_kbcontext;
+#endif
+
+ pr_debug("gator: Mali-Midgard: stop\n");
+
+ /* Set all counters as disabled */
+ for (cnt = 0; cnt < number_of_hardware_counters; cnt++)
+ counters[cnt].enabled = 0;
+
+ /* Destroy the context for HW counters */
+#if MALI_DDK_GATOR_API_VERSION == 3
+ if (num_hardware_counters_enabled > 0 && handles != NULL) {
+ /*
+ * Set the global variable to NULL before destroying it, because
+ * other function will check this before using it.
+ */
+ temp_hand = handles;
+ handles = NULL;
+
+ kbase_gator_hwcnt_term_symbol(in_out_info, temp_hand);
+
+ kfree(in_out_info);
+
+#else
+ if (num_hardware_counters_enabled > 0 && kbcontext != NULL) {
+ /*
+ * Set the global variable to NULL before destroying it, because
+ * other function will check this before using it.
+ */
+ temp_kbcontext = kbcontext;
+ kbcontext = NULL;
+
+ kbase_instr_hwcnt_disable_symbol(temp_kbcontext);
+
+#if MALI_DDK_GATOR_API_VERSION == 1
+ kbase_va_free_symbol(temp_kbcontext, kernel_dump_buffer);
+#elif MALI_DDK_GATOR_API_VERSION == 2
+ kbase_va_free_symbol(temp_kbcontext, &kernel_dump_buffer_handle);
+#endif
+
+ kbase_destroy_context_symbol(temp_kbcontext);
+#endif
+
+ pr_debug("gator: Mali-Midgard: hardware counters stopped\n");
+
+ clean_symbols();
+ }
+}
+
+static int read_counter(const int cnt, const int len, const struct mali_counter *counter)
+{
+ const int block = GET_HW_BLOCK(cnt);
+ const int counter_offset = GET_COUNTER_OFFSET(cnt);
+ u32 value = 0;
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+ const char *block_base_address = (char *)in_out_info->kernel_dump_buffer;
+ int i;
+ int shader_core_count = 0;
+
+ for (i = 0; i < in_out_info->nr_hwc_blocks; i++) {
+ if (block == in_out_info->hwc_layout[i]) {
+ value += *((u32 *)(block_base_address + (0x100 * i)) + counter_offset);
+ if (block == SHADER_BLOCK)
+ ++shader_core_count;
+ }
+ }
+
+ if (shader_core_count > 1)
+ value /= shader_core_count;
+#else
+ const unsigned int vithar_blocks[] = {
+ 0x700, /* VITHAR_JOB_MANAGER, Block 0 */
+ 0x400, /* VITHAR_TILER, Block 1 */
+ 0x000, /* VITHAR_SHADER_CORE, Block 2 */
+ 0x500 /* VITHAR_MEMORY_SYSTEM, Block 3 */
+ };
+ const char *block_base_address = (char *)kernel_dump_buffer + vithar_blocks[block];
+
+ /* If counter belongs to shader block need to take into account all cores */
+ if (block == SHADER_BLOCK) {
+ int i = 0;
+ int shader_core_count = 0;
+
+ value = 0;
+
+ for (i = 0; i < 4; i++) {
+ if ((shader_present_low >> i) & 1) {
+ value += *((u32 *)(block_base_address + (0x100 * i)) + counter_offset);
+ shader_core_count++;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if ((shader_present_low >> (i+4)) & 1) {
+ value += *((u32 *)(block_base_address + (0x100 * i) + 0x800) + counter_offset);
+ shader_core_count++;
+ }
+ }
+
+ /* Need to total by number of cores to produce an average */
+ if (shader_core_count != 0)
+ value /= shader_core_count;
+ } else {
+ value = *((u32 *)block_base_address + counter_offset);
+ }
+#endif
+
+ counter_dump[len + 0] = counter->key;
+ counter_dump[len + 1] = value;
+
+ return 2;
+}
+
+static int read(int **buffer, bool sched_switch)
+{
+ int cnt;
+ int len = 0;
+ uint32_t success;
+
+ struct timespec current_time;
+ static u32 prev_time_s;
+ static s32 next_read_time_ns;
+
+ if (!on_primary_core() || sched_switch)
+ return 0;
+
+ getnstimeofday(&current_time);
+
+ /*
+ * Discard reads unless a respectable time has passed. This
+ * reduces the load on the GPU without sacrificing accuracy on
+ * the Streamline display.
+ */
+ if (!is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns))
+ return 0;
+
+ /*
+ * Report the HW counters
+ * Only process hardware counters if at least one of the hardware counters is enabled.
+ */
+ if (num_hardware_counters_enabled > 0) {
+#if MALI_DDK_GATOR_API_VERSION == 3
+ if (!handles)
+ return -1;
+
+ /* Mali symbols can be called safely since a kbcontext is valid */
+ if (kbase_gator_instr_hwcnt_dump_complete_symbol(handles, &success) == MALI_TRUE) {
+#else
+ if (!kbcontext)
+ return -1;
+
+ /* Mali symbols can be called safely since a kbcontext is valid */
+ if (kbase_instr_hwcnt_dump_complete_symbol(kbcontext, &success) == MALI_TRUE) {
+#endif
+ kbase_device_busy = false;
+
+ if (success == MALI_TRUE) {
+ /* Cycle through hardware counters and accumulate totals */
+ for (cnt = 0; cnt < number_of_hardware_counters; cnt++) {
+ const struct mali_counter *counter = &counters[cnt];
+
+ if (counter->enabled)
+ len += read_counter(cnt, len, counter);
+ }
+ }
+ }
+
+ if (!kbase_device_busy) {
+ kbase_device_busy = true;
+#if MALI_DDK_GATOR_API_VERSION == 3
+ kbase_gator_instr_hwcnt_dump_irq_symbol(handles);
+#else
+ kbase_instr_hwcnt_dump_irq_symbol(kbcontext);
+#endif
+ }
+ }
+
+ /* Update the buffer */
+ if (buffer)
+ *buffer = counter_dump;
+
+ return len;
+}
+
+static int create_files(struct super_block *sb, struct dentry *root)
+{
+ unsigned int event;
+ /*
+ * Create the filesystem for all events
+ */
+ for (event = 0; event < ARRAY_SIZE(mali_activity); event++) {
+ if (gator_mali_create_file_system("Midgard", mali_activity_names[event], sb, root, &mali_activity[event], NULL) != 0)
+ return -1;
+ }
+
+ for (event = 0; event < number_of_hardware_counters; event++) {
+ if (gator_mali_create_file_system(mali_name, hardware_counter_names[event], sb, root, &counters[event], NULL) != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void shutdown(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+ void (*kbase_gator_hwcnt_term_names_symbol)(void) = NULL;
+ int error_count = 0;
+#endif
+
+ kfree(counters);
+ kfree(counter_dump);
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+ SYMBOL_GET(kbase_gator_hwcnt_term_names, error_count);
+
+ number_of_hardware_counters = -1;
+ hardware_counter_names = NULL;
+ if (kbase_gator_hwcnt_term_names_symbol != NULL) {
+ kbase_gator_hwcnt_term_names_symbol();
+ pr_err("Released symbols\n");
+ }
+
+ SYMBOL_CLEANUP(kbase_gator_hwcnt_term_names);
+#endif
+}
+
+static struct gator_interface gator_events_mali_midgard_interface = {
+ .shutdown = shutdown,
+ .create_files = create_files,
+ .start = start,
+ .stop = stop,
+ .read = read
+};
+
+int gator_events_mali_midgard_hw_init(void)
+{
+#if MALI_DDK_GATOR_API_VERSION == 3
+ const char *const *(*kbase_gator_hwcnt_init_names_symbol)(uint32_t *) = NULL;
+ int error_count = 0;
+#endif
+
+ pr_debug("gator: Mali-Midgard: sw_counters init\n");
+
+#if GATOR_TEST
+ test_all_is_read_scheduled();
+#endif
+
+#if MALI_DDK_GATOR_API_VERSION == 3
+ SYMBOL_GET(kbase_gator_hwcnt_init_names, error_count);
+ if (error_count > 0) {
+ SYMBOL_CLEANUP(kbase_gator_hwcnt_init_names);
+ return 1;
+ }
+
+ number_of_hardware_counters = -1;
+ hardware_counter_names = kbase_gator_hwcnt_init_names_symbol(&number_of_hardware_counters);
+
+ SYMBOL_CLEANUP(kbase_gator_hwcnt_init_names);
+
+ if ((hardware_counter_names == NULL) || (number_of_hardware_counters <= 0)) {
+ pr_err("gator: Error reading hardware counters names: got %d names\n", number_of_hardware_counters);
+ return -1;
+ }
+#else
+ mali_name = "Midgard";
+#endif
+
+ counters = kmalloc(sizeof(*counters)*number_of_hardware_counters, GFP_KERNEL);
+ counter_dump = kmalloc(sizeof(*counter_dump)*number_of_hardware_counters*2, GFP_KERNEL);
+
+ gator_mali_initialise_counters(mali_activity, ARRAY_SIZE(mali_activity));
+ gator_mali_initialise_counters(counters, number_of_hardware_counters);
+
+ return gator_events_install(&gator_events_mali_midgard_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_mali_midgard_hw_test.c b/drivers/parrot/gator/gator_events_mali_midgard_hw_test.c
new file mode 100644
index 00000000000000..31a91e1c72b28c
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mali_midgard_hw_test.c
@@ -0,0 +1,55 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/**
+ * Test functions for mali_t600_hw code.
+ */
+
+static int is_read_scheduled(const struct timespec *current_time, u32 *prev_time_s, s32 *next_read_time_ns);
+
+static int test_is_read_scheduled(u32 s, u32 ns, u32 prev_s, s32 next_ns, int expected_result, s32 expected_next_ns)
+{
+ struct timespec current_time;
+ u32 prev_time_s = prev_s;
+ s32 next_read_time_ns = next_ns;
+
+ current_time.tv_sec = s;
+ current_time.tv_nsec = ns;
+
+ if (is_read_scheduled(&current_time, &prev_time_s, &next_read_time_ns) != expected_result) {
+ pr_err("Failed do_read(%u, %u, %u, %d): expected %d\n", s, ns, prev_s, next_ns, expected_result);
+ return 0;
+ }
+
+ if (next_read_time_ns != expected_next_ns) {
+ pr_err("Failed: next_read_ns expected=%d, actual=%d\n", expected_next_ns, next_read_time_ns);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void test_all_is_read_scheduled(void)
+{
+ const int HIGHEST_NS = 999999999;
+ int n_tests_passed = 0;
+
+ pr_err("gator: running tests on %s\n", __FILE__);
+
+ n_tests_passed += test_is_read_scheduled(0, 0, 0, 0, 1, READ_INTERVAL_NSEC); /* Null time */
+ n_tests_passed += test_is_read_scheduled(100, 1000, 0, 0, 1, READ_INTERVAL_NSEC + 1000); /* Initial values */
+
+ n_tests_passed += test_is_read_scheduled(100, HIGHEST_NS, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500);
+ n_tests_passed += test_is_read_scheduled(101, 0001, 100, HIGHEST_NS + 500, 0, HIGHEST_NS + 500 - NSEC_PER_SEC);
+ n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500 - NSEC_PER_SEC, 1, 600 + READ_INTERVAL_NSEC);
+
+ n_tests_passed += test_is_read_scheduled(101, 600, 100, HIGHEST_NS + 500, 1, 600 + READ_INTERVAL_NSEC);
+
+ pr_err("gator: %d tests passed\n", n_tests_passed);
+}
diff --git a/drivers/parrot/gator/gator_events_meminfo.c b/drivers/parrot/gator/gator_events_meminfo.c
new file mode 100644
index 00000000000000..c625ac5af9cd37
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_meminfo.c
@@ -0,0 +1,470 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+
+#include <linux/hardirq.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <trace/events/kmem.h>
+
+#define USE_THREAD defined(CONFIG_PREEMPT_RT_FULL)
+
+enum {
+ MEMINFO_MEMFREE,
+ MEMINFO_MEMUSED,
+ MEMINFO_BUFFERRAM,
+ MEMINFO_TOTAL,
+};
+
+enum {
+ PROC_SIZE,
+ PROC_SHARE,
+ PROC_TEXT,
+ PROC_DATA,
+ PROC_COUNT,
+};
+
+static const char * const meminfo_names[] = {
+ "Linux_meminfo_memfree",
+ "Linux_meminfo_memused",
+ "Linux_meminfo_bufferram",
+};
+
+static const char * const proc_names[] = {
+ "Linux_proc_statm_size",
+ "Linux_proc_statm_share",
+ "Linux_proc_statm_text",
+ "Linux_proc_statm_data",
+};
+
+static bool meminfo_global_enabled;
+static ulong meminfo_enabled[MEMINFO_TOTAL];
+static ulong meminfo_keys[MEMINFO_TOTAL];
+static long long meminfo_buffer[2 * (MEMINFO_TOTAL + 2)];
+static int meminfo_length;
+static bool new_data_avail;
+
+static bool proc_global_enabled;
+static ulong proc_enabled[PROC_COUNT];
+static ulong proc_keys[PROC_COUNT];
+static DEFINE_PER_CPU(long long, proc_buffer[2 * (PROC_COUNT + 3)]);
+
+#if USE_THREAD
+
+static int gator_meminfo_func(void *data);
+static bool gator_meminfo_run;
+/* Initialize semaphore unlocked to initialize memory values */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+static DECLARE_MUTEX(gator_meminfo_sem);
+#else
+static DEFINE_SEMAPHORE(gator_meminfo_sem);
+#endif
+
+static void notify(void)
+{
+ up(&gator_meminfo_sem);
+}
+
+#else
+
+static unsigned int mem_event;
+static void wq_sched_handler(struct work_struct *wsptr);
+DECLARE_WORK(work, wq_sched_handler);
+static struct timer_list meminfo_wake_up_timer;
+static void meminfo_wake_up_handler(unsigned long unused_data);
+
+static void notify(void)
+{
+ mem_event++;
+}
+
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+GATOR_DEFINE_PROBE(mm_page_free_direct, TP_PROTO(struct page *page, unsigned int order))
+#else
+GATOR_DEFINE_PROBE(mm_page_free, TP_PROTO(struct page *page, unsigned int order))
+#endif
+{
+ notify();
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+GATOR_DEFINE_PROBE(mm_pagevec_free, TP_PROTO(struct page *page, int cold))
+#else
+GATOR_DEFINE_PROBE(mm_page_free_batched, TP_PROTO(struct page *page, int cold))
+#endif
+{
+ notify();
+}
+
+GATOR_DEFINE_PROBE(mm_page_alloc, TP_PROTO(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype))
+{
+ notify();
+}
+
+static int gator_events_meminfo_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ dir = gatorfs_mkdir(sb, root, meminfo_names[i]);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &meminfo_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &meminfo_keys[i]);
+ }
+
+ for (i = 0; i < PROC_COUNT; ++i) {
+ dir = gatorfs_mkdir(sb, root, proc_names[i]);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &proc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &proc_keys[i]);
+ }
+
+ return 0;
+}
+
+static int gator_events_meminfo_start(void)
+{
+ int i;
+
+ new_data_avail = false;
+ meminfo_global_enabled = 0;
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ if (meminfo_enabled[i]) {
+ meminfo_global_enabled = 1;
+ break;
+ }
+ }
+
+ proc_global_enabled = 0;
+ for (i = 0; i < PROC_COUNT; ++i) {
+ if (proc_enabled[i]) {
+ proc_global_enabled = 1;
+ break;
+ }
+ }
+ if (meminfo_enabled[MEMINFO_MEMUSED])
+ proc_global_enabled = 1;
+
+ if (meminfo_global_enabled == 0)
+ return 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ if (GATOR_REGISTER_TRACE(mm_page_free_direct))
+#else
+ if (GATOR_REGISTER_TRACE(mm_page_free))
+#endif
+ goto mm_page_free_exit;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ if (GATOR_REGISTER_TRACE(mm_pagevec_free))
+#else
+ if (GATOR_REGISTER_TRACE(mm_page_free_batched))
+#endif
+ goto mm_page_free_batched_exit;
+ if (GATOR_REGISTER_TRACE(mm_page_alloc))
+ goto mm_page_alloc_exit;
+
+#if USE_THREAD
+ /* Start worker thread */
+ gator_meminfo_run = true;
+ /* Since the mutex starts unlocked, memory values will be initialized */
+ if (IS_ERR(kthread_run(gator_meminfo_func, NULL, "gator_meminfo")))
+ goto kthread_run_exit;
+#else
+ setup_timer(&meminfo_wake_up_timer, meminfo_wake_up_handler, 0);
+#endif
+
+ return 0;
+
+#if USE_THREAD
+kthread_run_exit:
+ GATOR_UNREGISTER_TRACE(mm_page_alloc);
+#endif
+mm_page_alloc_exit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_pagevec_free);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free_batched);
+#endif
+mm_page_free_batched_exit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_page_free_direct);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free);
+#endif
+mm_page_free_exit:
+ return -1;
+}
+
+static void gator_events_meminfo_stop(void)
+{
+ if (meminfo_global_enabled) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ GATOR_UNREGISTER_TRACE(mm_page_free_direct);
+ GATOR_UNREGISTER_TRACE(mm_pagevec_free);
+#else
+ GATOR_UNREGISTER_TRACE(mm_page_free);
+ GATOR_UNREGISTER_TRACE(mm_page_free_batched);
+#endif
+ GATOR_UNREGISTER_TRACE(mm_page_alloc);
+
+#if USE_THREAD
+ /* Stop worker thread */
+ gator_meminfo_run = false;
+ up(&gator_meminfo_sem);
+#else
+ del_timer_sync(&meminfo_wake_up_timer);
+#endif
+ }
+}
+
+static void do_read(void)
+{
+ struct sysinfo info;
+ int i, len;
+ unsigned long long value;
+
+ meminfo_length = len = 0;
+
+ si_meminfo(&info);
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ if (meminfo_enabled[i]) {
+ switch (i) {
+ case MEMINFO_MEMFREE:
+ value = info.freeram * PAGE_SIZE;
+ break;
+ case MEMINFO_MEMUSED:
+ /* pid -1 means system wide */
+ meminfo_buffer[len++] = 1;
+ meminfo_buffer[len++] = -1;
+ /* Emit value */
+ meminfo_buffer[len++] = meminfo_keys[MEMINFO_MEMUSED];
+ meminfo_buffer[len++] = (info.totalram - info.freeram) * PAGE_SIZE;
+ /* Clear pid */
+ meminfo_buffer[len++] = 1;
+ meminfo_buffer[len++] = 0;
+ continue;
+ case MEMINFO_BUFFERRAM:
+ value = info.bufferram * PAGE_SIZE;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ meminfo_buffer[len++] = meminfo_keys[i];
+ meminfo_buffer[len++] = value;
+ }
+ }
+
+ meminfo_length = len;
+ new_data_avail = true;
+}
+
+#if USE_THREAD
+
+static int gator_meminfo_func(void *data)
+{
+ for (;;) {
+ if (down_killable(&gator_meminfo_sem))
+ break;
+
+ /* Eat up any pending events */
+ while (!down_trylock(&gator_meminfo_sem))
+ ;
+
+ if (!gator_meminfo_run)
+ break;
+
+ do_read();
+ }
+
+ return 0;
+}
+
+#else
+
+/* Must be run in process context as the kernel function si_meminfo() can sleep */
+static void wq_sched_handler(struct work_struct *wsptr)
+{
+ do_read();
+}
+
+static void meminfo_wake_up_handler(unsigned long unused_data)
+{
+ /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
+ schedule_work(&work);
+}
+
+#endif
+
+static int gator_events_meminfo_read(long long **buffer)
+{
+#if !USE_THREAD
+ static unsigned int last_mem_event;
+#endif
+
+ if (!on_primary_core() || !meminfo_global_enabled)
+ return 0;
+
+#if !USE_THREAD
+ if (last_mem_event != mem_event) {
+ last_mem_event = mem_event;
+ mod_timer(&meminfo_wake_up_timer, jiffies + 1);
+ }
+#endif
+
+ if (!new_data_avail)
+ return 0;
+
+ new_data_avail = false;
+
+ if (buffer)
+ *buffer = meminfo_buffer;
+
+ return meminfo_length;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+
+static inline unsigned long gator_get_mm_counter(struct mm_struct *mm, int member)
+{
+#ifdef SPLIT_RSS_COUNTING
+ long val = atomic_long_read(&mm->rss_stat.count[member]);
+
+ if (val < 0)
+ val = 0;
+ return (unsigned long)val;
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+ return mm->rss_stat.count[member];
+#else
+ return atomic_long_read(&mm->rss_stat.count[member]);
+#endif
+#endif
+}
+
+#define get_mm_counter(mm, member) gator_get_mm_counter(mm, member)
+
+#endif
+
+static int gator_events_meminfo_read_proc(long long **buffer, struct task_struct *task)
+{
+ struct mm_struct *mm;
+ u64 share = 0;
+ int i;
+ long long value;
+ int len = 0;
+ int cpu = get_physical_cpu();
+ long long *buf = per_cpu(proc_buffer, cpu);
+
+ if (!proc_global_enabled)
+ return 0;
+
+ /* Collect the memory stats of the process instead of the thread */
+ if (task->group_leader != NULL)
+ task = task->group_leader;
+
+ /* get_task_mm/mmput is not needed in this context because the task and it's mm are required as part of the sched_switch */
+ mm = task->mm;
+ if (mm == NULL)
+ return 0;
+
+ /* Derived from task_statm in fs/proc/task_mmu.c */
+ if (meminfo_enabled[MEMINFO_MEMUSED] || proc_enabled[PROC_SHARE]) {
+ share = get_mm_counter(mm,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
+ file_rss
+#else
+ MM_FILEPAGES
+#endif
+ );
+ }
+
+ /* key of 1 indicates a pid */
+ buf[len++] = 1;
+ buf[len++] = task->pid;
+
+ for (i = 0; i < PROC_COUNT; ++i) {
+ if (proc_enabled[i]) {
+ switch (i) {
+ case PROC_SIZE:
+ value = mm->total_vm;
+ break;
+ case PROC_SHARE:
+ value = share;
+ break;
+ case PROC_TEXT:
+ value = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT;
+ break;
+ case PROC_DATA:
+ value = mm->total_vm - mm->shared_vm;
+ break;
+ }
+
+ buf[len++] = proc_keys[i];
+ buf[len++] = value * PAGE_SIZE;
+ }
+ }
+
+ if (meminfo_enabled[MEMINFO_MEMUSED]) {
+ value = share + get_mm_counter(mm,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
+ anon_rss
+#else
+ MM_ANONPAGES
+#endif
+ );
+ /* Send resident for this pid */
+ buf[len++] = meminfo_keys[MEMINFO_MEMUSED];
+ buf[len++] = value * PAGE_SIZE;
+ }
+
+ /* Clear pid */
+ buf[len++] = 1;
+ buf[len++] = 0;
+
+ if (buffer)
+ *buffer = buf;
+
+ return len;
+}
+
+static struct gator_interface gator_events_meminfo_interface = {
+ .create_files = gator_events_meminfo_create_files,
+ .start = gator_events_meminfo_start,
+ .stop = gator_events_meminfo_stop,
+ .read64 = gator_events_meminfo_read,
+ .read_proc = gator_events_meminfo_read_proc,
+};
+
+int gator_events_meminfo_init(void)
+{
+ int i;
+
+ meminfo_global_enabled = 0;
+ for (i = 0; i < MEMINFO_TOTAL; i++) {
+ meminfo_enabled[i] = 0;
+ meminfo_keys[i] = gator_events_get_key();
+ }
+
+ proc_global_enabled = 0;
+ for (i = 0; i < PROC_COUNT; ++i) {
+ proc_enabled[i] = 0;
+ proc_keys[i] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_meminfo_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_mmapped.c b/drivers/parrot/gator/gator_events_mmapped.c
new file mode 100644
index 00000000000000..6b2af995ed41e7
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_mmapped.c
@@ -0,0 +1,209 @@
+/*
+ * Example events provider
+ *
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Similar entries to those below must be present in the events.xml file.
+ * To add them to the events.xml, create an events-mmap.xml with the
+ * following contents and rebuild gatord:
+ *
+ * <category name="mmapped">
+ * <event counter="mmapped_cnt0" title="Simulated1" name="Sine" display="maximum" class="absolute" description="Sort-of-sine"/>
+ * <event counter="mmapped_cnt1" title="Simulated2" name="Triangle" display="maximum" class="absolute" description="Triangular wave"/>
+ * <event counter="mmapped_cnt2" title="Simulated3" name="PWM" display="maximum" class="absolute" description="PWM Signal"/>
+ * </category>
+ *
+ * When adding custom events, be sure to do the following:
+ * - add any needed .c files to the gator driver Makefile
+ * - call gator_events_install in the events init function
+ * - add the init function to GATOR_EVENTS_LIST in gator_main.c
+ * - add a new events-*.xml file to the gator daemon and rebuild
+ *
+ * Troubleshooting:
+ * - verify the new events are part of events.xml, which is created when building the daemon
+ * - verify the new events exist at /dev/gator/events/ once gatord is launched
+ * - verify the counter name in the XML matches the name at /dev/gator/events
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+
+#include "gator.h"
+
+#define MMAPPED_COUNTERS_NUM 3
+
+static int mmapped_global_enabled;
+
+static struct {
+ unsigned long enabled;
+ unsigned long key;
+} mmapped_counters[MMAPPED_COUNTERS_NUM];
+
+static int mmapped_buffer[MMAPPED_COUNTERS_NUM * 2];
+
+static s64 prev_time;
+
+/* Adds mmapped_cntX directories and enabled, event, and key files to /dev/gator/events */
+static int gator_events_mmapped_create_files(struct super_block *sb,
+ struct dentry *root)
+{
+ int i;
+
+ for (i = 0; i < MMAPPED_COUNTERS_NUM; i++) {
+ char buf[16];
+ struct dentry *dir;
+
+ snprintf(buf, sizeof(buf), "mmapped_cnt%d", i);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (WARN_ON(!dir))
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled",
+ &mmapped_counters[i].enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key",
+ &mmapped_counters[i].key);
+ }
+
+ return 0;
+}
+
+static int gator_events_mmapped_start(void)
+{
+ int i;
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ prev_time = timespec_to_ns(&ts);
+
+ mmapped_global_enabled = 0;
+ for (i = 0; i < MMAPPED_COUNTERS_NUM; i++) {
+ if (mmapped_counters[i].enabled) {
+ mmapped_global_enabled = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void gator_events_mmapped_stop(void)
+{
+}
+
+/* This function "simulates" counters, generating values of fancy
+ * functions like sine or triangle... */
+static int mmapped_simulate(int counter, int delta_in_us)
+{
+ int result = 0;
+
+ switch (counter) {
+ case 0: /* sort-of-sine */
+ {
+ static int t;
+ int x;
+
+ t += delta_in_us;
+ if (t > 2048000)
+ t = 0;
+
+ if (t % 1024000 < 512000)
+ x = 512000 - (t % 512000);
+ else
+ x = t % 512000;
+
+ result = 32 * x / 512000;
+ result = result * result;
+
+ if (t < 1024000)
+ result = 1922 - result;
+ }
+ break;
+ case 1: /* triangle */
+ {
+ static int v, d = 1;
+
+ v = v + d * delta_in_us;
+ if (v < 0) {
+ v = 0;
+ d = 1;
+ } else if (v > 1000000) {
+ v = 1000000;
+ d = -1;
+ }
+
+ result = v;
+ }
+ break;
+ case 2: /* PWM signal */
+ {
+ static int dc, x, t;
+
+ t += delta_in_us;
+ if (t > 1000000)
+ t = 0;
+ if (x / 1000000 != (x + delta_in_us) / 1000000)
+ dc = (dc + 100000) % 1000000;
+ x += delta_in_us;
+
+ result = t < dc ? 0 : 10;
+ }
+ break;
+ }
+
+ return result;
+}
+
+static int gator_events_mmapped_read(int **buffer, bool sched_switch)
+{
+ int i;
+ int len = 0;
+ int delta_in_us;
+ struct timespec ts;
+ s64 time;
+
+ /* System wide counters - read from one core only */
+ if (!on_primary_core() || !mmapped_global_enabled)
+ return 0;
+
+ getnstimeofday(&ts);
+ time = timespec_to_ns(&ts);
+ delta_in_us = (int)(time - prev_time) / 1000;
+ prev_time = time;
+
+ for (i = 0; i < MMAPPED_COUNTERS_NUM; i++) {
+ if (mmapped_counters[i].enabled) {
+ mmapped_buffer[len++] = mmapped_counters[i].key;
+ mmapped_buffer[len++] =
+ mmapped_simulate(i, delta_in_us);
+ }
+ }
+
+ if (buffer)
+ *buffer = mmapped_buffer;
+
+ return len;
+}
+
+static struct gator_interface gator_events_mmapped_interface = {
+ .create_files = gator_events_mmapped_create_files,
+ .start = gator_events_mmapped_start,
+ .stop = gator_events_mmapped_stop,
+ .read = gator_events_mmapped_read,
+};
+
+/* Must not be static! */
+int __init gator_events_mmapped_init(void)
+{
+ int i;
+
+ for (i = 0; i < MMAPPED_COUNTERS_NUM; i++) {
+ mmapped_counters[i].enabled = 0;
+ mmapped_counters[i].key = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_mmapped_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_net.c b/drivers/parrot/gator/gator_events_net.c
new file mode 100644
index 00000000000000..d21b4db7b77cde
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_net.c
@@ -0,0 +1,172 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <linux/netdevice.h>
+#include <linux/hardirq.h>
+
+#define NETRX 0
+#define NETTX 1
+#define TOTALNET 2
+
+static ulong netrx_enabled;
+static ulong nettx_enabled;
+static ulong netrx_key;
+static ulong nettx_key;
+static int rx_total, tx_total;
+static ulong netPrev[TOTALNET];
+static int netGet[TOTALNET * 4];
+
+static struct timer_list net_wake_up_timer;
+
+/* Must be run in process context as the kernel function dev_get_stats() can sleep */
+static void get_network_stats(struct work_struct *wsptr)
+{
+ int rx = 0, tx = 0;
+ struct net_device *dev;
+
+ for_each_netdev(&init_net, dev) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ const struct net_device_stats *stats = dev_get_stats(dev);
+#else
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+#endif
+ rx += stats->rx_bytes;
+ tx += stats->tx_bytes;
+ }
+ rx_total = rx;
+ tx_total = tx;
+}
+
+DECLARE_WORK(wq_get_stats, get_network_stats);
+
+static void net_wake_up_handler(unsigned long unused_data)
+{
+ /* had to delay scheduling work as attempting to schedule work during the context switch is illegal in kernel versions 3.5 and greater */
+ schedule_work(&wq_get_stats);
+}
+
+static void calculate_delta(int *rx, int *tx)
+{
+ int rx_calc, tx_calc;
+
+ rx_calc = (int)(rx_total - netPrev[NETRX]);
+ if (rx_calc < 0)
+ rx_calc = 0;
+ netPrev[NETRX] += rx_calc;
+
+ tx_calc = (int)(tx_total - netPrev[NETTX]);
+ if (tx_calc < 0)
+ tx_calc = 0;
+ netPrev[NETTX] += tx_calc;
+
+ *rx = rx_calc;
+ *tx = tx_calc;
+}
+
+static int gator_events_net_create_files(struct super_block *sb, struct dentry *root)
+{
+ /* Network counters are not currently supported in RT-Preempt full because mod_timer is used */
+#ifndef CONFIG_PREEMPT_RT_FULL
+ struct dentry *dir;
+
+ dir = gatorfs_mkdir(sb, root, "Linux_net_rx");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &netrx_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &netrx_key);
+
+ dir = gatorfs_mkdir(sb, root, "Linux_net_tx");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &nettx_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &nettx_key);
+#endif
+
+ return 0;
+}
+
+static int gator_events_net_start(void)
+{
+ get_network_stats(0);
+ netPrev[NETRX] = rx_total;
+ netPrev[NETTX] = tx_total;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ setup_timer(&net_wake_up_timer, net_wake_up_handler, 0);
+#else
+ setup_deferrable_timer_on_stack(&net_wake_up_timer, net_wake_up_handler, 0);
+#endif
+ return 0;
+}
+
+static void gator_events_net_stop(void)
+{
+ del_timer_sync(&net_wake_up_timer);
+ netrx_enabled = 0;
+ nettx_enabled = 0;
+}
+
+static int gator_events_net_read(int **buffer, bool sched_switch)
+{
+ int len, rx_delta, tx_delta;
+ static int last_rx_delta, last_tx_delta;
+
+ if (!on_primary_core())
+ return 0;
+
+ if (!netrx_enabled && !nettx_enabled)
+ return 0;
+
+ mod_timer(&net_wake_up_timer, jiffies + 1);
+
+ calculate_delta(&rx_delta, &tx_delta);
+
+ len = 0;
+ if (netrx_enabled && last_rx_delta != rx_delta) {
+ last_rx_delta = rx_delta;
+ netGet[len++] = netrx_key;
+ /* indicates to Streamline that rx_delta bytes were transmitted now, not since the last message */
+ netGet[len++] = 0;
+ netGet[len++] = netrx_key;
+ netGet[len++] = rx_delta;
+ }
+
+ if (nettx_enabled && last_tx_delta != tx_delta) {
+ last_tx_delta = tx_delta;
+ netGet[len++] = nettx_key;
+ /* indicates to Streamline that tx_delta bytes were transmitted now, not since the last message */
+ netGet[len++] = 0;
+ netGet[len++] = nettx_key;
+ netGet[len++] = tx_delta;
+ }
+
+ if (buffer)
+ *buffer = netGet;
+
+ return len;
+}
+
+static struct gator_interface gator_events_net_interface = {
+ .create_files = gator_events_net_create_files,
+ .start = gator_events_net_start,
+ .stop = gator_events_net_stop,
+ .read = gator_events_net_read,
+};
+
+int gator_events_net_init(void)
+{
+ netrx_key = gator_events_get_key();
+ nettx_key = gator_events_get_key();
+
+ netrx_enabled = 0;
+ nettx_enabled = 0;
+
+ return gator_events_install(&gator_events_net_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_perf_pmu.c b/drivers/parrot/gator/gator_events_perf_pmu.c
new file mode 100644
index 00000000000000..47cf278e508b11
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_perf_pmu.c
@@ -0,0 +1,574 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+/* gator_events_armvX.c is used for Linux 2.6.x */
+#if GATOR_PERF_PMU_SUPPORT
+
+#include <linux/io.h>
+#ifdef CONFIG_OF
+#include <linux/of_address.h>
+#endif
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+extern bool event_based_sampling;
+
+/* Maximum number of per-core counters - currently reserves enough space for two full hardware PMUs for big.LITTLE */
+#define CNTMAX 16
+#define CCI_400 4
+#define CCN_5XX 8
+/* Maximum number of uncore counters */
+/* + 1 for the cci-400 cycles counter */
+/* + 1 for the CCN-5xx cycles counter */
+#define UCCNT (CCI_400 + 1 + CCN_5XX + 1)
+
+/* Default to 0 if unable to probe the revision which was the previous behavior */
+#define DEFAULT_CCI_REVISION 0
+
+/* A gator_attr is needed for every counter */
+struct gator_attr {
+ /* Set once in gator_events_perf_pmu_*_init - the name of the event in the gatorfs */
+ char name[40];
+ /* Exposed in gatorfs - set by gatord to enable this counter */
+ unsigned long enabled;
+ /* Set once in gator_events_perf_pmu_*_init - the perf type to use, see perf_type_id in the perf_event.h header file. */
+ unsigned long type;
+ /* Exposed in gatorfs - set by gatord to select the event to collect */
+ unsigned long event;
+ /* Exposed in gatorfs - set by gatord with the sample period to use and enable EBS for this counter */
+ unsigned long count;
+ /* Exposed as read only in gatorfs - set once in __attr_init as the key to use in the APC data */
+ unsigned long key;
+};
+
+/* Per-core counter attributes */
+static struct gator_attr attrs[CNTMAX];
+/* Number of initialized per-core counters */
+static int attr_count;
+/* Uncore counter attributes */
+static struct gator_attr uc_attrs[UCCNT];
+/* Number of initialized uncore counters */
+static int uc_attr_count;
+
+struct gator_event {
+ int curr;
+ int prev;
+ int prev_delta;
+ bool zero;
+ struct perf_event *pevent;
+ struct perf_event_attr *pevent_attr;
+};
+
+static DEFINE_PER_CPU(struct gator_event[CNTMAX], events);
+static struct gator_event uc_events[UCCNT];
+static DEFINE_PER_CPU(int[(CNTMAX + UCCNT)*2], perf_cnt);
+
+static void gator_events_perf_pmu_stop(void);
+
+static int __create_files(struct super_block *sb, struct dentry *root, struct gator_attr *const attr)
+{
+ struct dentry *dir;
+
+ if (attr->name[0] == '\0')
+ return 0;
+ dir = gatorfs_mkdir(sb, root, attr->name);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &attr->enabled);
+ gatorfs_create_ulong(sb, dir, "count", &attr->count);
+ gatorfs_create_ro_ulong(sb, dir, "key", &attr->key);
+ gatorfs_create_ulong(sb, dir, "event", &attr->event);
+
+ return 0;
+}
+
+static int gator_events_perf_pmu_create_files(struct super_block *sb, struct dentry *root)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < attr_count; cnt++) {
+ if (__create_files(sb, root, &attrs[cnt]) != 0)
+ return -1;
+ }
+
+ for (cnt = 0; cnt < uc_attr_count; cnt++) {
+ if (__create_files(sb, root, &uc_attrs[cnt]) != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+static void ebs_overflow_handler(struct perf_event *event, int unused, struct perf_sample_data *data, struct pt_regs *regs)
+#else
+static void ebs_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+#endif
+{
+ gator_backtrace_handler(regs);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+static void dummy_handler(struct perf_event *event, int unused, struct perf_sample_data *data, struct pt_regs *regs)
+#else
+static void dummy_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
+#endif
+{
+ /* Required as perf_event_create_kernel_counter() requires an overflow handler, even though all we do is poll */
+}
+
+static int gator_events_perf_pmu_read(int **buffer, bool sched_switch);
+
+static int gator_events_perf_pmu_online(int **buffer, bool migrate)
+{
+ return gator_events_perf_pmu_read(buffer, false);
+}
+
+static void __online_dispatch(int cpu, bool migrate, struct gator_attr *const attr, struct gator_event *const event)
+{
+ perf_overflow_handler_t handler;
+
+ event->zero = true;
+
+ if (event->pevent != NULL || event->pevent_attr == 0 || migrate)
+ return;
+
+ if (attr->count > 0)
+ handler = ebs_overflow_handler;
+ else
+ handler = dummy_handler;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+ event->pevent = perf_event_create_kernel_counter(event->pevent_attr, cpu, 0, handler);
+#else
+ event->pevent = perf_event_create_kernel_counter(event->pevent_attr, cpu, 0, handler, 0);
+#endif
+ if (IS_ERR(event->pevent)) {
+ pr_debug("gator: unable to online a counter on cpu %d\n", cpu);
+ event->pevent = NULL;
+ return;
+ }
+
+ if (event->pevent->state != PERF_EVENT_STATE_ACTIVE) {
+ pr_debug("gator: inactive counter on cpu %d\n", cpu);
+ perf_event_release_kernel(event->pevent);
+ event->pevent = NULL;
+ return;
+ }
+}
+
+static void gator_events_perf_pmu_online_dispatch(int cpu, bool migrate)
+{
+ int cnt;
+
+ cpu = pcpu_to_lcpu(cpu);
+
+ for (cnt = 0; cnt < attr_count; cnt++)
+ __online_dispatch(cpu, migrate, &attrs[cnt], &per_cpu(events, cpu)[cnt]);
+
+ if (cpu == 0) {
+ for (cnt = 0; cnt < uc_attr_count; cnt++)
+ __online_dispatch(cpu, migrate, &uc_attrs[cnt], &uc_events[cnt]);
+ }
+}
+
+static void __offline_dispatch(int cpu, struct gator_event *const event)
+{
+ struct perf_event *pe = NULL;
+
+ if (event->pevent) {
+ pe = event->pevent;
+ event->pevent = NULL;
+ }
+
+ if (pe)
+ perf_event_release_kernel(pe);
+}
+
+static void gator_events_perf_pmu_offline_dispatch(int cpu, bool migrate)
+{
+ int cnt;
+
+ if (migrate)
+ return;
+ cpu = pcpu_to_lcpu(cpu);
+
+ for (cnt = 0; cnt < attr_count; cnt++)
+ __offline_dispatch(cpu, &per_cpu(events, cpu)[cnt]);
+
+ if (cpu == 0) {
+ for (cnt = 0; cnt < uc_attr_count; cnt++)
+ __offline_dispatch(cpu, &uc_events[cnt]);
+ }
+}
+
+static int __check_ebs(struct gator_attr *const attr)
+{
+ if (attr->count > 0) {
+ if (!event_based_sampling) {
+ event_based_sampling = true;
+ } else {
+ pr_warning("gator: Only one ebs counter is allowed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int __start(struct gator_attr *const attr, struct gator_event *const event)
+{
+ u32 size = sizeof(struct perf_event_attr);
+
+ event->pevent = NULL;
+ /* Skip disabled counters */
+ if (!attr->enabled)
+ return 0;
+
+ event->prev = 0;
+ event->curr = 0;
+ event->prev_delta = 0;
+ event->pevent_attr = kmalloc(size, GFP_KERNEL);
+ if (!event->pevent_attr) {
+ gator_events_perf_pmu_stop();
+ return -1;
+ }
+
+ memset(event->pevent_attr, 0, size);
+ event->pevent_attr->type = attr->type;
+ event->pevent_attr->size = size;
+ event->pevent_attr->config = attr->event;
+ event->pevent_attr->sample_period = attr->count;
+ event->pevent_attr->pinned = 1;
+
+ return 0;
+}
+
+static int gator_events_perf_pmu_start(void)
+{
+ int cnt, cpu;
+
+ event_based_sampling = false;
+ for (cnt = 0; cnt < attr_count; cnt++) {
+ if (__check_ebs(&attrs[cnt]) != 0)
+ return -1;
+ }
+
+ for (cnt = 0; cnt < uc_attr_count; cnt++) {
+ if (__check_ebs(&uc_attrs[cnt]) != 0)
+ return -1;
+ }
+
+ for_each_present_cpu(cpu) {
+ for (cnt = 0; cnt < attr_count; cnt++) {
+ if (__start(&attrs[cnt], &per_cpu(events, cpu)[cnt]) != 0)
+ return -1;
+ }
+ }
+
+ for (cnt = 0; cnt < uc_attr_count; cnt++) {
+ if (__start(&uc_attrs[cnt], &uc_events[cnt]) != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __event_stop(struct gator_event *const event)
+{
+ kfree(event->pevent_attr);
+ event->pevent_attr = NULL;
+}
+
+static void __attr_stop(struct gator_attr *const attr)
+{
+ attr->enabled = 0;
+ attr->event = 0;
+ attr->count = 0;
+}
+
+static void gator_events_perf_pmu_stop(void)
+{
+ unsigned int cnt, cpu;
+
+ for_each_present_cpu(cpu) {
+ for (cnt = 0; cnt < attr_count; cnt++)
+ __event_stop(&per_cpu(events, cpu)[cnt]);
+ }
+
+ for (cnt = 0; cnt < uc_attr_count; cnt++)
+ __event_stop(&uc_events[cnt]);
+
+ for (cnt = 0; cnt < attr_count; cnt++)
+ __attr_stop(&attrs[cnt]);
+
+ for (cnt = 0; cnt < uc_attr_count; cnt++)
+ __attr_stop(&uc_attrs[cnt]);
+}
+
+static void __read(int *const len, int cpu, struct gator_attr *const attr, struct gator_event *const event)
+{
+ int delta;
+ struct perf_event *const ev = event->pevent;
+
+ if (ev != NULL && ev->state == PERF_EVENT_STATE_ACTIVE) {
+ /* After creating the perf counter in __online_dispatch, there
+ * is a race condition between gator_events_perf_pmu_online and
+ * gator_events_perf_pmu_read. So have
+ * gator_events_perf_pmu_online call gator_events_perf_pmu_read
+ * and in __read check to see if it's the first call after
+ * __online_dispatch and if so, run the online code.
+ */
+ if (event->zero) {
+ ev->pmu->read(ev);
+ event->prev = event->curr = local64_read(&ev->count);
+ event->prev_delta = 0;
+ per_cpu(perf_cnt, cpu)[(*len)++] = attr->key;
+ per_cpu(perf_cnt, cpu)[(*len)++] = 0;
+ event->zero = false;
+ } else {
+ ev->pmu->read(ev);
+ event->curr = local64_read(&ev->count);
+ delta = event->curr - event->prev;
+ if (delta != 0 || delta != event->prev_delta) {
+ event->prev_delta = delta;
+ event->prev = event->curr;
+ per_cpu(perf_cnt, cpu)[(*len)++] = attr->key;
+ if (delta < 0)
+ delta *= -1;
+ per_cpu(perf_cnt, cpu)[(*len)++] = delta;
+ }
+ }
+ }
+}
+
+static int gator_events_perf_pmu_read(int **buffer, bool sched_switch)
+{
+ int cnt, len = 0;
+ const int cpu = get_logical_cpu();
+
+ for (cnt = 0; cnt < attr_count; cnt++)
+ __read(&len, cpu, &attrs[cnt], &per_cpu(events, cpu)[cnt]);
+
+ if (cpu == 0) {
+ for (cnt = 0; cnt < uc_attr_count; cnt++)
+ __read(&len, cpu, &uc_attrs[cnt], &uc_events[cnt]);
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perf_cnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_perf_pmu_interface = {
+ .create_files = gator_events_perf_pmu_create_files,
+ .start = gator_events_perf_pmu_start,
+ .stop = gator_events_perf_pmu_stop,
+ .online = gator_events_perf_pmu_online,
+ .online_dispatch = gator_events_perf_pmu_online_dispatch,
+ .offline_dispatch = gator_events_perf_pmu_offline_dispatch,
+ .read = gator_events_perf_pmu_read,
+};
+
+static void __attr_init(struct gator_attr *const attr)
+{
+ attr->name[0] = '\0';
+ attr->enabled = 0;
+ attr->type = 0;
+ attr->event = 0;
+ attr->count = 0;
+ attr->key = gator_events_get_key();
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id arm_cci_matches[] = {
+ {.compatible = "arm,cci-400" },
+ {},
+};
+
+static int probe_cci_revision(void)
+{
+ struct device_node *np;
+ struct resource res;
+ void __iomem *cci_ctrl_base;
+ int rev;
+ int ret = DEFAULT_CCI_REVISION;
+
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if (!np)
+ return ret;
+
+ if (of_address_to_resource(np, 0, &res))
+ goto node_put;
+
+ cci_ctrl_base = ioremap(res.start, resource_size(&res));
+
+ rev = (readl_relaxed(cci_ctrl_base + 0xfe8) >> 4) & 0xf;
+
+ if (rev <= 4)
+ ret = 0;
+ else if (rev <= 6)
+ ret = 1;
+
+ iounmap(cci_ctrl_base);
+
+ node_put:
+ of_node_put(np);
+
+ return ret;
+}
+
+#else
+
+static int probe_cci_revision(void)
+{
+ return DEFAULT_CCI_REVISION;
+}
+
+#endif
+
+static void gator_events_perf_pmu_uncore_init(const char *const name, const int type, const int count)
+{
+ int cnt;
+
+ snprintf(uc_attrs[uc_attr_count].name, sizeof(uc_attrs[uc_attr_count].name), "%s_ccnt", name);
+ uc_attrs[uc_attr_count].type = type;
+ ++uc_attr_count;
+
+ for (cnt = 0; cnt < count; ++cnt, ++uc_attr_count) {
+ struct gator_attr *const attr = &uc_attrs[uc_attr_count];
+
+ snprintf(attr->name, sizeof(attr->name), "%s_cnt%d", name, cnt);
+ attr->type = type;
+ }
+}
+
+static void gator_events_perf_pmu_cci_init(const int type)
+{
+ const char *cci_name;
+
+ switch (probe_cci_revision()) {
+ case 0:
+ cci_name = "CCI_400";
+ break;
+ case 1:
+ cci_name = "CCI_400-r1";
+ break;
+ default:
+ pr_debug("gator: unrecognized cci-400 revision\n");
+ return;
+ }
+
+ gator_events_perf_pmu_uncore_init(cci_name, type, CCI_400);
+}
+
+static void gator_events_perf_pmu_cpu_init(const struct gator_cpu *const gator_cpu, const int type)
+{
+ int cnt;
+
+ snprintf(attrs[attr_count].name, sizeof(attrs[attr_count].name), "%s_ccnt", gator_cpu->pmnc_name);
+ attrs[attr_count].type = type;
+ ++attr_count;
+
+ for (cnt = 0; cnt < gator_cpu->pmnc_counters; ++cnt, ++attr_count) {
+ struct gator_attr *const attr = &attrs[attr_count];
+
+ snprintf(attr->name, sizeof(attr->name), "%s_cnt%d", gator_cpu->pmnc_name, cnt);
+ attr->type = type;
+ }
+}
+
+int gator_events_perf_pmu_init(void)
+{
+ struct perf_event_attr pea;
+ struct perf_event *pe;
+ const struct gator_cpu *gator_cpu;
+ int type;
+ int cpu;
+ int cnt;
+ bool found_cpu = false;
+
+ for (cnt = 0; cnt < CNTMAX; cnt++)
+ __attr_init(&attrs[cnt]);
+ for (cnt = 0; cnt < UCCNT; cnt++)
+ __attr_init(&uc_attrs[cnt]);
+
+ memset(&pea, 0, sizeof(pea));
+ pea.size = sizeof(pea);
+ pea.config = 0xFF;
+ attr_count = 0;
+ uc_attr_count = 0;
+ for (type = PERF_TYPE_MAX; type < 0x20; ++type) {
+ pea.type = type;
+
+ /* A particular PMU may work on some but not all cores, so try on each core */
+ pe = NULL;
+ for_each_present_cpu(cpu) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+ pe = perf_event_create_kernel_counter(&pea, cpu, 0, dummy_handler);
+#else
+ pe = perf_event_create_kernel_counter(&pea, cpu, 0, dummy_handler, 0);
+#endif
+ if (!IS_ERR(pe))
+ break;
+ }
+ /* Assume that valid PMUs are contiguous */
+ if (IS_ERR(pe)) {
+ pea.config = 0xff00;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
+ pe = perf_event_create_kernel_counter(&pea, 0, 0, dummy_handler);
+#else
+ pe = perf_event_create_kernel_counter(&pea, 0, 0, dummy_handler, 0);
+#endif
+ if (IS_ERR(pe))
+ break;
+ }
+
+ if (pe->pmu != NULL && type == pe->pmu->type) {
+ if (strcmp("CCI", pe->pmu->name) == 0 || strcmp("CCI_400", pe->pmu->name) == 0 || strcmp("CCI_400-r1", pe->pmu->name) == 0) {
+ gator_events_perf_pmu_cci_init(type);
+ } else if (strcmp("ccn", pe->pmu->name) == 0) {
+ gator_events_perf_pmu_uncore_init("ARM_CCN_5XX", type, CCN_5XX);
+ } else if ((gator_cpu = gator_find_cpu_by_pmu_name(pe->pmu->name)) != NULL) {
+ found_cpu = true;
+ gator_events_perf_pmu_cpu_init(gator_cpu, type);
+ }
+ /* Initialize gator_attrs for dynamic PMUs here */
+ }
+
+ perf_event_release_kernel(pe);
+ }
+
+ if (!found_cpu) {
+ const struct gator_cpu *const gator_cpu = gator_find_cpu_by_cpuid(gator_cpuid());
+
+ if (gator_cpu == NULL)
+ return -1;
+ gator_events_perf_pmu_cpu_init(gator_cpu, PERF_TYPE_RAW);
+ }
+
+ /* Initialize gator_attrs for non-dynamic PMUs here */
+
+ if (attr_count > CNTMAX) {
+ pr_err("gator: Too many perf counters\n");
+ return -1;
+ }
+
+ if (uc_attr_count > UCCNT) {
+ pr_err("gator: Too many perf uncore counters\n");
+ return -1;
+ }
+
+ return gator_events_install(&gator_events_perf_pmu_interface);
+}
+
+#endif
diff --git a/drivers/parrot/gator/gator_events_sched.c b/drivers/parrot/gator/gator_events_sched.c
new file mode 100644
index 00000000000000..637107d6af1d6c
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_sched.c
@@ -0,0 +1,113 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "gator.h"
+#include <trace/events/sched.h>
+
+#define SCHED_SWITCH 0
+#define SCHED_TOTAL (SCHED_SWITCH+1)
+
+static ulong sched_switch_enabled;
+static ulong sched_switch_key;
+static DEFINE_PER_CPU(int[SCHED_TOTAL], schedCnt);
+static DEFINE_PER_CPU(int[SCHED_TOTAL * 2], schedGet);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
+#else
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
+#endif
+{
+ unsigned long flags;
+
+ /* disable interrupts to synchronize with gator_events_sched_read()
+ * spinlocks not needed since percpu buffers are used
+ */
+ local_irq_save(flags);
+ per_cpu(schedCnt, get_physical_cpu())[SCHED_SWITCH]++;
+ local_irq_restore(flags);
+}
+
+static int gator_events_sched_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* switch */
+ dir = gatorfs_mkdir(sb, root, "Linux_sched_switch");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &sched_switch_enabled);
+ gatorfs_create_ro_ulong(sb, dir, "key", &sched_switch_key);
+
+ return 0;
+}
+
+static int gator_events_sched_start(void)
+{
+ /* register tracepoints */
+ if (sched_switch_enabled)
+ if (GATOR_REGISTER_TRACE(sched_switch))
+ goto sched_switch_exit;
+ pr_debug("gator: registered scheduler event tracepoints\n");
+
+ return 0;
+
+ /* unregister tracepoints on error */
+sched_switch_exit:
+ pr_err("gator: scheduler event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_events_sched_stop(void)
+{
+ if (sched_switch_enabled)
+ GATOR_UNREGISTER_TRACE(sched_switch);
+ pr_debug("gator: unregistered scheduler event tracepoints\n");
+
+ sched_switch_enabled = 0;
+}
+
+static int gator_events_sched_read(int **buffer, bool sched_switch)
+{
+ unsigned long flags;
+ int len, value;
+ int cpu = get_physical_cpu();
+
+ len = 0;
+ if (sched_switch_enabled) {
+ local_irq_save(flags);
+ value = per_cpu(schedCnt, cpu)[SCHED_SWITCH];
+ per_cpu(schedCnt, cpu)[SCHED_SWITCH] = 0;
+ local_irq_restore(flags);
+ per_cpu(schedGet, cpu)[len++] = sched_switch_key;
+ per_cpu(schedGet, cpu)[len++] = value;
+ }
+
+ if (buffer)
+ *buffer = per_cpu(schedGet, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_sched_interface = {
+ .create_files = gator_events_sched_create_files,
+ .start = gator_events_sched_start,
+ .stop = gator_events_sched_stop,
+ .read = gator_events_sched_read,
+};
+
+int gator_events_sched_init(void)
+{
+ sched_switch_enabled = 0;
+
+ sched_switch_key = gator_events_get_key();
+
+ return gator_events_install(&gator_events_sched_interface);
+}
diff --git a/drivers/parrot/gator/gator_events_scorpion.c b/drivers/parrot/gator/gator_events_scorpion.c
new file mode 100644
index 00000000000000..49219362db0925
--- /dev/null
+++ b/drivers/parrot/gator/gator_events_scorpion.c
@@ -0,0 +1,674 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+/* gator_events_perf_pmu.c is used if perf is supported */
+#if GATOR_NO_PERF_SUPPORT
+
+static const char *pmnc_name;
+static int pmnc_counters;
+
+/* Per-CPU PMNC: config reg */
+#define PMNC_E (1 << 0) /* Enable all counters */
+#define PMNC_P (1 << 1) /* Reset all counters */
+#define PMNC_C (1 << 2) /* Cycle counter reset */
+#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define PMNC_X (1 << 4) /* Export to ETM */
+#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug */
+#define PMNC_MASK 0x3f /* Mask for writable bits */
+
+/* ccnt reg */
+#define CCNT_REG (1 << 31)
+
+#define CCNT 0
+#define CNT0 1
+#define CNTMAX (4+1)
+
+static unsigned long pmnc_enabled[CNTMAX];
+static unsigned long pmnc_event[CNTMAX];
+static unsigned long pmnc_key[CNTMAX];
+
+static DEFINE_PER_CPU(int[CNTMAX * 2], perfCnt);
+
+enum scorpion_perf_types {
+ SCORPION_ICACHE_EXPL_INV = 0x4c,
+ SCORPION_ICACHE_MISS = 0x4d,
+ SCORPION_ICACHE_ACCESS = 0x4e,
+ SCORPION_ICACHE_CACHEREQ_L2 = 0x4f,
+ SCORPION_ICACHE_NOCACHE_L2 = 0x50,
+ SCORPION_HIQUP_NOPED = 0x51,
+ SCORPION_DATA_ABORT = 0x52,
+ SCORPION_IRQ = 0x53,
+ SCORPION_FIQ = 0x54,
+ SCORPION_ALL_EXCPT = 0x55,
+ SCORPION_UNDEF = 0x56,
+ SCORPION_SVC = 0x57,
+ SCORPION_SMC = 0x58,
+ SCORPION_PREFETCH_ABORT = 0x59,
+ SCORPION_INDEX_CHECK = 0x5a,
+ SCORPION_NULL_CHECK = 0x5b,
+ SCORPION_EXPL_ICIALLU = 0x5c,
+ SCORPION_IMPL_ICIALLU = 0x5d,
+ SCORPION_NONICIALLU_BTAC_INV = 0x5e,
+ SCORPION_ICIMVAU_IMPL_ICIALLU = 0x5f,
+ SCORPION_SPIPE_ONLY_CYCLES = 0x60,
+ SCORPION_XPIPE_ONLY_CYCLES = 0x61,
+ SCORPION_DUAL_CYCLES = 0x62,
+ SCORPION_DISPATCH_ANY_CYCLES = 0x63,
+ SCORPION_FIFO_FULLBLK_CMT = 0x64,
+ SCORPION_FAIL_COND_INST = 0x65,
+ SCORPION_PASS_COND_INST = 0x66,
+ SCORPION_ALLOW_VU_CLK = 0x67,
+ SCORPION_VU_IDLE = 0x68,
+ SCORPION_ALLOW_L2_CLK = 0x69,
+ SCORPION_L2_IDLE = 0x6a,
+ SCORPION_DTLB_IMPL_INV_SCTLR_DACR = 0x6b,
+ SCORPION_DTLB_EXPL_INV = 0x6c,
+ SCORPION_DTLB_MISS = 0x6d,
+ SCORPION_DTLB_ACCESS = 0x6e,
+ SCORPION_ITLB_MISS = 0x6f,
+ SCORPION_ITLB_IMPL_INV = 0x70,
+ SCORPION_ITLB_EXPL_INV = 0x71,
+ SCORPION_UTLB_D_MISS = 0x72,
+ SCORPION_UTLB_D_ACCESS = 0x73,
+ SCORPION_UTLB_I_MISS = 0x74,
+ SCORPION_UTLB_I_ACCESS = 0x75,
+ SCORPION_UTLB_INV_ASID = 0x76,
+ SCORPION_UTLB_INV_MVA = 0x77,
+ SCORPION_UTLB_INV_ALL = 0x78,
+ SCORPION_S2_HOLD_RDQ_UNAVAIL = 0x79,
+ SCORPION_S2_HOLD = 0x7a,
+ SCORPION_S2_HOLD_DEV_OP = 0x7b,
+ SCORPION_S2_HOLD_ORDER = 0x7c,
+ SCORPION_S2_HOLD_BARRIER = 0x7d,
+ SCORPION_VIU_DUAL_CYCLE = 0x7e,
+ SCORPION_VIU_SINGLE_CYCLE = 0x7f,
+ SCORPION_VX_PIPE_WAR_STALL_CYCLES = 0x80,
+ SCORPION_VX_PIPE_WAW_STALL_CYCLES = 0x81,
+ SCORPION_VX_PIPE_RAW_STALL_CYCLES = 0x82,
+ SCORPION_VX_PIPE_LOAD_USE_STALL = 0x83,
+ SCORPION_VS_PIPE_WAR_STALL_CYCLES = 0x84,
+ SCORPION_VS_PIPE_WAW_STALL_CYCLES = 0x85,
+ SCORPION_VS_PIPE_RAW_STALL_CYCLES = 0x86,
+ SCORPION_EXCEPTIONS_INV_OPERATION = 0x87,
+ SCORPION_EXCEPTIONS_DIV_BY_ZERO = 0x88,
+ SCORPION_COND_INST_FAIL_VX_PIPE = 0x89,
+ SCORPION_COND_INST_FAIL_VS_PIPE = 0x8a,
+ SCORPION_EXCEPTIONS_OVERFLOW = 0x8b,
+ SCORPION_EXCEPTIONS_UNDERFLOW = 0x8c,
+ SCORPION_EXCEPTIONS_DENORM = 0x8d,
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
+ SCORPIONMP_NUM_BARRIERS = 0x8e,
+ SCORPIONMP_BARRIER_CYCLES = 0x8f,
+#else
+ SCORPION_BANK_AB_HIT = 0x8e,
+ SCORPION_BANK_AB_ACCESS = 0x8f,
+ SCORPION_BANK_CD_HIT = 0x90,
+ SCORPION_BANK_CD_ACCESS = 0x91,
+ SCORPION_BANK_AB_DSIDE_HIT = 0x92,
+ SCORPION_BANK_AB_DSIDE_ACCESS = 0x93,
+ SCORPION_BANK_CD_DSIDE_HIT = 0x94,
+ SCORPION_BANK_CD_DSIDE_ACCESS = 0x95,
+ SCORPION_BANK_AB_ISIDE_HIT = 0x96,
+ SCORPION_BANK_AB_ISIDE_ACCESS = 0x97,
+ SCORPION_BANK_CD_ISIDE_HIT = 0x98,
+ SCORPION_BANK_CD_ISIDE_ACCESS = 0x99,
+ SCORPION_ISIDE_RD_WAIT = 0x9a,
+ SCORPION_DSIDE_RD_WAIT = 0x9b,
+ SCORPION_BANK_BYPASS_WRITE = 0x9c,
+ SCORPION_BANK_AB_NON_CASTOUT = 0x9d,
+ SCORPION_BANK_AB_L2_CASTOUT = 0x9e,
+ SCORPION_BANK_CD_NON_CASTOUT = 0x9f,
+ SCORPION_BANK_CD_L2_CASTOUT = 0xa0,
+#endif
+ MSM_MAX_EVT
+};
+
+struct scorp_evt {
+ u32 evt_type;
+ u32 val;
+ u8 grp;
+ u32 evt_type_act;
+};
+
+static const struct scorp_evt sc_evt[] = {
+ {SCORPION_ICACHE_EXPL_INV, 0x80000500, 0, 0x4d},
+ {SCORPION_ICACHE_MISS, 0x80050000, 0, 0x4e},
+ {SCORPION_ICACHE_ACCESS, 0x85000000, 0, 0x4f},
+ {SCORPION_ICACHE_CACHEREQ_L2, 0x86000000, 0, 0x4f},
+ {SCORPION_ICACHE_NOCACHE_L2, 0x87000000, 0, 0x4f},
+ {SCORPION_HIQUP_NOPED, 0x80080000, 0, 0x4e},
+ {SCORPION_DATA_ABORT, 0x8000000a, 0, 0x4c},
+ {SCORPION_IRQ, 0x80000a00, 0, 0x4d},
+ {SCORPION_FIQ, 0x800a0000, 0, 0x4e},
+ {SCORPION_ALL_EXCPT, 0x8a000000, 0, 0x4f},
+ {SCORPION_UNDEF, 0x8000000b, 0, 0x4c},
+ {SCORPION_SVC, 0x80000b00, 0, 0x4d},
+ {SCORPION_SMC, 0x800b0000, 0, 0x4e},
+ {SCORPION_PREFETCH_ABORT, 0x8b000000, 0, 0x4f},
+ {SCORPION_INDEX_CHECK, 0x8000000c, 0, 0x4c},
+ {SCORPION_NULL_CHECK, 0x80000c00, 0, 0x4d},
+ {SCORPION_EXPL_ICIALLU, 0x8000000d, 0, 0x4c},
+ {SCORPION_IMPL_ICIALLU, 0x80000d00, 0, 0x4d},
+ {SCORPION_NONICIALLU_BTAC_INV, 0x800d0000, 0, 0x4e},
+ {SCORPION_ICIMVAU_IMPL_ICIALLU, 0x8d000000, 0, 0x4f},
+
+ {SCORPION_SPIPE_ONLY_CYCLES, 0x80000600, 1, 0x51},
+ {SCORPION_XPIPE_ONLY_CYCLES, 0x80060000, 1, 0x52},
+ {SCORPION_DUAL_CYCLES, 0x86000000, 1, 0x53},
+ {SCORPION_DISPATCH_ANY_CYCLES, 0x89000000, 1, 0x53},
+ {SCORPION_FIFO_FULLBLK_CMT, 0x8000000d, 1, 0x50},
+ {SCORPION_FAIL_COND_INST, 0x800d0000, 1, 0x52},
+ {SCORPION_PASS_COND_INST, 0x8d000000, 1, 0x53},
+ {SCORPION_ALLOW_VU_CLK, 0x8000000e, 1, 0x50},
+ {SCORPION_VU_IDLE, 0x80000e00, 1, 0x51},
+ {SCORPION_ALLOW_L2_CLK, 0x800e0000, 1, 0x52},
+ {SCORPION_L2_IDLE, 0x8e000000, 1, 0x53},
+
+ {SCORPION_DTLB_IMPL_INV_SCTLR_DACR, 0x80000001, 2, 0x54},
+ {SCORPION_DTLB_EXPL_INV, 0x80000100, 2, 0x55},
+ {SCORPION_DTLB_MISS, 0x80010000, 2, 0x56},
+ {SCORPION_DTLB_ACCESS, 0x81000000, 2, 0x57},
+ {SCORPION_ITLB_MISS, 0x80000200, 2, 0x55},
+ {SCORPION_ITLB_IMPL_INV, 0x80020000, 2, 0x56},
+ {SCORPION_ITLB_EXPL_INV, 0x82000000, 2, 0x57},
+ {SCORPION_UTLB_D_MISS, 0x80000003, 2, 0x54},
+ {SCORPION_UTLB_D_ACCESS, 0x80000300, 2, 0x55},
+ {SCORPION_UTLB_I_MISS, 0x80030000, 2, 0x56},
+ {SCORPION_UTLB_I_ACCESS, 0x83000000, 2, 0x57},
+ {SCORPION_UTLB_INV_ASID, 0x80000400, 2, 0x55},
+ {SCORPION_UTLB_INV_MVA, 0x80040000, 2, 0x56},
+ {SCORPION_UTLB_INV_ALL, 0x84000000, 2, 0x57},
+ {SCORPION_S2_HOLD_RDQ_UNAVAIL, 0x80000800, 2, 0x55},
+ {SCORPION_S2_HOLD, 0x88000000, 2, 0x57},
+ {SCORPION_S2_HOLD_DEV_OP, 0x80000900, 2, 0x55},
+ {SCORPION_S2_HOLD_ORDER, 0x80090000, 2, 0x56},
+ {SCORPION_S2_HOLD_BARRIER, 0x89000000, 2, 0x57},
+
+ {SCORPION_VIU_DUAL_CYCLE, 0x80000001, 4, 0x5c},
+ {SCORPION_VIU_SINGLE_CYCLE, 0x80000100, 4, 0x5d},
+ {SCORPION_VX_PIPE_WAR_STALL_CYCLES, 0x80000005, 4, 0x5c},
+ {SCORPION_VX_PIPE_WAW_STALL_CYCLES, 0x80000500, 4, 0x5d},
+ {SCORPION_VX_PIPE_RAW_STALL_CYCLES, 0x80050000, 4, 0x5e},
+ {SCORPION_VX_PIPE_LOAD_USE_STALL, 0x80000007, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAR_STALL_CYCLES, 0x80000008, 4, 0x5c},
+ {SCORPION_VS_PIPE_WAW_STALL_CYCLES, 0x80000800, 4, 0x5d},
+ {SCORPION_VS_PIPE_RAW_STALL_CYCLES, 0x80080000, 4, 0x5e},
+ {SCORPION_EXCEPTIONS_INV_OPERATION, 0x8000000b, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_DIV_BY_ZERO, 0x80000b00, 4, 0x5d},
+ {SCORPION_COND_INST_FAIL_VX_PIPE, 0x800b0000, 4, 0x5e},
+ {SCORPION_COND_INST_FAIL_VS_PIPE, 0x8b000000, 4, 0x5f},
+ {SCORPION_EXCEPTIONS_OVERFLOW, 0x8000000c, 4, 0x5c},
+ {SCORPION_EXCEPTIONS_UNDERFLOW, 0x80000c00, 4, 0x5d},
+ {SCORPION_EXCEPTIONS_DENORM, 0x8c000000, 4, 0x5f},
+
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
+ {SCORPIONMP_NUM_BARRIERS, 0x80000e00, 3, 0x59},
+ {SCORPIONMP_BARRIER_CYCLES, 0x800e0000, 3, 0x5a},
+#else
+ {SCORPION_BANK_AB_HIT, 0x80000001, 3, 0x58},
+ {SCORPION_BANK_AB_ACCESS, 0x80000100, 3, 0x59},
+ {SCORPION_BANK_CD_HIT, 0x80010000, 3, 0x5a},
+ {SCORPION_BANK_CD_ACCESS, 0x81000000, 3, 0x5b},
+ {SCORPION_BANK_AB_DSIDE_HIT, 0x80000002, 3, 0x58},
+ {SCORPION_BANK_AB_DSIDE_ACCESS, 0x80000200, 3, 0x59},
+ {SCORPION_BANK_CD_DSIDE_HIT, 0x80020000, 3, 0x5a},
+ {SCORPION_BANK_CD_DSIDE_ACCESS, 0x82000000, 3, 0x5b},
+ {SCORPION_BANK_AB_ISIDE_HIT, 0x80000003, 3, 0x58},
+ {SCORPION_BANK_AB_ISIDE_ACCESS, 0x80000300, 3, 0x59},
+ {SCORPION_BANK_CD_ISIDE_HIT, 0x80030000, 3, 0x5a},
+ {SCORPION_BANK_CD_ISIDE_ACCESS, 0x83000000, 3, 0x5b},
+ {SCORPION_ISIDE_RD_WAIT, 0x80000009, 3, 0x58},
+ {SCORPION_DSIDE_RD_WAIT, 0x80090000, 3, 0x5a},
+ {SCORPION_BANK_BYPASS_WRITE, 0x8000000a, 3, 0x58},
+ {SCORPION_BANK_AB_NON_CASTOUT, 0x8000000c, 3, 0x58},
+ {SCORPION_BANK_AB_L2_CASTOUT, 0x80000c00, 3, 0x59},
+ {SCORPION_BANK_CD_NON_CASTOUT, 0x800c0000, 3, 0x5a},
+ {SCORPION_BANK_CD_L2_CASTOUT, 0x8c000000, 3, 0x5b},
+#endif
+};
+
+static inline void scorpion_pmnc_write(u32 val)
+{
+ val &= PMNC_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+static inline u32 scorpion_pmnc_read(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_ccnt_read(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_cntn_read(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+ return val;
+}
+
+static inline u32 scorpion_pmnc_enable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u enabling wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ if (cnt == CCNT)
+ val = CCNT_REG;
+ else
+ val = (1 << (cnt - CNT0));
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+
+ return cnt;
+}
+
+static inline u32 scorpion_pmnc_disable_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u disabling wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ if (cnt == CCNT)
+ val = CCNT_REG;
+ else
+ val = (1 << (cnt - CNT0));
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+
+ return cnt;
+}
+
+static inline int scorpion_pmnc_select_counter(unsigned int cnt)
+{
+ u32 val;
+
+ if ((cnt == CCNT) || (cnt >= CNTMAX)) {
+ pr_err("gator: CPU%u selecting wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ return -1;
+ }
+
+ val = (cnt - CNT0);
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+
+ return cnt;
+}
+
+static u32 scorpion_read_lpm0(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm0(u32 val)
+{
+ asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm1(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm1(u32 val)
+{
+ asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_lpm2(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_lpm2(u32 val)
+{
+ asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_l2lpm(void)
+{
+ u32 val;
+
+ asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_l2lpm(u32 val)
+{
+ asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
+}
+
+static u32 scorpion_read_vlpm(void)
+{
+ u32 val;
+
+ asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void scorpion_write_vlpm(u32 val)
+{
+ asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
+}
+
+struct scorpion_access_funcs {
+ u32 (*read)(void);
+ void (*write)(u32);
+};
+
+struct scorpion_access_funcs scor_func[] = {
+ {scorpion_read_lpm0, scorpion_write_lpm0},
+ {scorpion_read_lpm1, scorpion_write_lpm1},
+ {scorpion_read_lpm2, scorpion_write_lpm2},
+ {scorpion_read_l2lpm, scorpion_write_l2lpm},
+ {scorpion_read_vlpm, scorpion_write_vlpm},
+};
+
+u32 venum_orig_val;
+u32 fp_orig_val;
+
+static void scorpion_pre_vlpm(void)
+{
+ u32 venum_new_val;
+ u32 fp_new_val;
+
+ /* CPACR Enable CP10 access */
+ asm volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (venum_orig_val));
+ venum_new_val = venum_orig_val | 0x00300000;
+ asm volatile("mcr p15, 0, %0, c1, c0, 2" : : "r" (venum_new_val));
+ /* Enable FPEXC */
+ asm volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (fp_orig_val));
+ fp_new_val = fp_orig_val | 0x40000000;
+ asm volatile("mcr p10, 7, %0, c8, c0, 0" : : "r" (fp_new_val));
+}
+
+static void scorpion_post_vlpm(void)
+{
+ /* Restore FPEXC */
+ asm volatile("mcr p10, 7, %0, c8, c0, 0" : : "r" (fp_orig_val));
+ /* Restore CPACR */
+ asm volatile("mcr p15, 0, %0, c1, c0, 2" : : "r" (venum_orig_val));
+}
+
+#define COLMN0MASK 0x000000ff
+#define COLMN1MASK 0x0000ff00
+#define COLMN2MASK 0x00ff0000
+static u32 scorpion_get_columnmask(u32 setval)
+{
+ if (setval & COLMN0MASK)
+ return 0xffffff00;
+ if (setval & COLMN1MASK)
+ return 0xffff00ff;
+ if (setval & COLMN2MASK)
+ return 0xff00ffff;
+ return 0x80ffffff;
+}
+
+static void scorpion_evt_setup(u32 gr, u32 setval)
+{
+ u32 val;
+
+ if (gr == 4)
+ scorpion_pre_vlpm();
+ val = scorpion_get_columnmask(setval) & scor_func[gr].read();
+ val = val | setval;
+ scor_func[gr].write(val);
+ if (gr == 4)
+ scorpion_post_vlpm();
+}
+
+static int get_scorpion_evtinfo(unsigned int evt_type, struct scorp_evt *evtinfo)
+{
+ u32 idx;
+
+ if ((evt_type < 0x4c) || (evt_type >= MSM_MAX_EVT))
+ return 0;
+ idx = evt_type - 0x4c;
+ if (sc_evt[idx].evt_type == evt_type) {
+ evtinfo->val = sc_evt[idx].val;
+ evtinfo->grp = sc_evt[idx].grp;
+ evtinfo->evt_type_act = sc_evt[idx].evt_type_act;
+ return 1;
+ }
+ return 0;
+}
+
+static inline void scorpion_pmnc_write_evtsel(unsigned int cnt, u32 val)
+{
+ if (scorpion_pmnc_select_counter(cnt) == cnt) {
+ if (val < 0x40) {
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+ } else {
+ u32 zero = 0;
+ struct scorp_evt evtinfo;
+ /* extract evtinfo.grp and evtinfo.tevt_type_act from val */
+ if (get_scorpion_evtinfo(val, &evtinfo) == 0)
+ return;
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (evtinfo.evt_type_act));
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (zero));
+ scorpion_evt_setup(evtinfo.grp, val);
+ }
+ }
+}
+
+static void scorpion_pmnc_reset_counter(unsigned int cnt)
+{
+ u32 val = 0;
+
+ if (cnt == CCNT) {
+ scorpion_pmnc_disable_counter(cnt);
+
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
+
+ if (pmnc_enabled[cnt] != 0)
+ scorpion_pmnc_enable_counter(cnt);
+
+ } else if (cnt >= CNTMAX) {
+ pr_err("gator: CPU%u resetting wrong PMNC counter %d\n", smp_processor_id(), cnt);
+ } else {
+ scorpion_pmnc_disable_counter(cnt);
+
+ if (scorpion_pmnc_select_counter(cnt) == cnt)
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
+
+ if (pmnc_enabled[cnt] != 0)
+ scorpion_pmnc_enable_counter(cnt);
+ }
+}
+
+static int gator_events_scorpion_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int i;
+
+ for (i = 0; i < pmnc_counters; i++) {
+ char buf[40];
+
+ if (i == 0)
+ snprintf(buf, sizeof(buf), "%s_ccnt", pmnc_name);
+ else
+ snprintf(buf, sizeof(buf), "%s_cnt%d", pmnc_name, i - 1);
+ dir = gatorfs_mkdir(sb, root, buf);
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &pmnc_enabled[i]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &pmnc_key[i]);
+ if (i > 0)
+ gatorfs_create_ulong(sb, dir, "event", &pmnc_event[i]);
+ }
+
+ return 0;
+}
+
+static int gator_events_scorpion_online(int **buffer, bool migrate)
+{
+ unsigned int cnt, len = 0, cpu = smp_processor_id();
+
+ if (scorpion_pmnc_read() & PMNC_E)
+ scorpion_pmnc_write(scorpion_pmnc_read() & ~PMNC_E);
+
+ /* Initialize & Reset PMNC: C bit and P bit */
+ scorpion_pmnc_write(PMNC_P | PMNC_C);
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ unsigned long event;
+
+ if (!pmnc_enabled[cnt])
+ continue;
+
+ /* disable counter */
+ scorpion_pmnc_disable_counter(cnt);
+
+ event = pmnc_event[cnt] & 255;
+
+ /* Set event (if destined for PMNx counters), We don't need to set the event if it's a cycle count */
+ if (cnt != CCNT)
+ scorpion_pmnc_write_evtsel(cnt, event);
+
+ /* reset counter */
+ scorpion_pmnc_reset_counter(cnt);
+
+ /* Enable counter, do not enable interrupt for this counter */
+ scorpion_pmnc_enable_counter(cnt);
+ }
+
+ /* enable */
+ scorpion_pmnc_write(scorpion_pmnc_read() | PMNC_E);
+
+ /* read the counters and toss the invalid data, return zero instead */
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ if (cnt == CCNT)
+ scorpion_ccnt_read();
+ else if (scorpion_pmnc_select_counter(cnt) == cnt)
+ scorpion_cntn_read();
+ scorpion_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = 0;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static int gator_events_scorpion_offline(int **buffer, bool migrate)
+{
+ scorpion_pmnc_write(scorpion_pmnc_read() & ~PMNC_E);
+ return 0;
+}
+
+static void gator_events_scorpion_stop(void)
+{
+ unsigned int cnt;
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ }
+}
+
+static int gator_events_scorpion_read(int **buffer, bool sched_switch)
+{
+ int cnt, len = 0;
+ int cpu = smp_processor_id();
+
+ /* a context switch may occur before the online hotplug event, thus need to check that the pmu is enabled */
+ if (!(scorpion_pmnc_read() & PMNC_E))
+ return 0;
+
+ for (cnt = 0; cnt < pmnc_counters; cnt++) {
+ if (pmnc_enabled[cnt]) {
+ int value;
+
+ if (cnt == CCNT)
+ value = scorpion_ccnt_read();
+ else if (scorpion_pmnc_select_counter(cnt) == cnt)
+ value = scorpion_cntn_read();
+ else
+ value = 0;
+ scorpion_pmnc_reset_counter(cnt);
+
+ per_cpu(perfCnt, cpu)[len++] = pmnc_key[cnt];
+ per_cpu(perfCnt, cpu)[len++] = value;
+ }
+ }
+
+ if (buffer)
+ *buffer = per_cpu(perfCnt, cpu);
+
+ return len;
+}
+
+static struct gator_interface gator_events_scorpion_interface = {
+ .create_files = gator_events_scorpion_create_files,
+ .stop = gator_events_scorpion_stop,
+ .online = gator_events_scorpion_online,
+ .offline = gator_events_scorpion_offline,
+ .read = gator_events_scorpion_read,
+};
+
+int gator_events_scorpion_init(void)
+{
+ unsigned int cnt;
+
+ switch (gator_cpuid()) {
+ case SCORPION:
+ pmnc_name = "Scorpion";
+ pmnc_counters = 4;
+ break;
+ case SCORPIONMP:
+ pmnc_name = "ScorpionMP";
+ pmnc_counters = 4;
+ break;
+ default:
+ return -1;
+ }
+
+ /* CNT[n] + CCNT */
+ pmnc_counters++;
+
+ for (cnt = CCNT; cnt < CNTMAX; cnt++) {
+ pmnc_enabled[cnt] = 0;
+ pmnc_event[cnt] = 0;
+ pmnc_key[cnt] = gator_events_get_key();
+ }
+
+ return gator_events_install(&gator_events_scorpion_interface);
+}
+
+#endif
diff --git a/drivers/parrot/gator/gator_fs.c b/drivers/parrot/gator/gator_fs.c
new file mode 100644
index 00000000000000..d8fb357b9edac1
--- /dev/null
+++ b/drivers/parrot/gator/gator_fs.c
@@ -0,0 +1,370 @@
+/**
+ * @file gatorfs.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ *
+ * A simple filesystem for configuration and
+ * access of oprofile.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+
+#define gatorfs_MAGIC 0x24051020
+#define TMPBUFSIZE 50
+DEFINE_SPINLOCK(gatorfs_lock);
+
+static struct inode *gatorfs_get_inode(struct super_block *sb, int mode)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
+ inode->i_ino = get_next_ino();
+#endif
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ }
+ return inode;
+}
+
+static const struct super_operations s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+static ssize_t gatorfs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
+{
+ char tmpbuf[TMPBUFSIZE];
+ size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+
+ if (maxlen > TMPBUFSIZE)
+ maxlen = TMPBUFSIZE;
+ return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+}
+
+static ssize_t gatorfs_u64_to_user(u64 val, char __user *buf, size_t count, loff_t *offset)
+{
+ char tmpbuf[TMPBUFSIZE];
+ size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%llu\n", val);
+
+ if (maxlen > TMPBUFSIZE)
+ maxlen = TMPBUFSIZE;
+ return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+}
+
+static int gatorfs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
+{
+ char tmpbuf[TMPBUFSIZE];
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (count > TMPBUFSIZE - 1)
+ return -EINVAL;
+
+ memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
+
+ spin_lock_irqsave(&gatorfs_lock, flags);
+ *val = simple_strtoul(tmpbuf, NULL, 0);
+ spin_unlock_irqrestore(&gatorfs_lock, flags);
+ return 0;
+}
+
+static int gatorfs_u64_from_user(u64 *val, char const __user *buf, size_t count)
+{
+ char tmpbuf[TMPBUFSIZE];
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (count > TMPBUFSIZE - 1)
+ return -EINVAL;
+
+ memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
+
+ spin_lock_irqsave(&gatorfs_lock, flags);
+ *val = simple_strtoull(tmpbuf, NULL, 0);
+ spin_unlock_irqrestore(&gatorfs_lock, flags);
+ return 0;
+}
+
+static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long *val = file->private_data;
+
+ return gatorfs_ulong_to_user(*val, buf, count, offset);
+}
+
+static ssize_t u64_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ u64 *val = file->private_data;
+
+ return gatorfs_u64_to_user(*val, buf, count, offset);
+}
+
+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long *value = file->private_data;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(value, buf, count);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static ssize_t u64_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ u64 *value = file->private_data;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_u64_from_user(value, buf, count);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static int default_open(struct inode *inode, struct file *filp)
+{
+ if (inode->i_private)
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations ulong_fops = {
+ .read = ulong_read_file,
+ .write = ulong_write_file,
+ .open = default_open,
+};
+
+static const struct file_operations u64_fops = {
+ .read = u64_read_file,
+ .write = u64_write_file,
+ .open = default_open,
+};
+
+static const struct file_operations ulong_ro_fops = {
+ .read = ulong_read_file,
+ .open = default_open,
+};
+
+static const struct file_operations u64_ro_fops = {
+ .read = u64_read_file,
+ .open = default_open,
+};
+
+static struct dentry *__gatorfs_create_file(struct super_block *sb,
+ struct dentry *root,
+ char const *name,
+ const struct file_operations *fops,
+ int perm)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ return NULL;
+ inode = gatorfs_get_inode(sb, S_IFREG | perm);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+ inode->i_fop = fops;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+int gatorfs_create_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &ulong_fops, 0644);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+static int gatorfs_create_u64(struct super_block *sb, struct dentry *root,
+ char const *name, u64 *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &u64_fops, 0644);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+int gatorfs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
+{
+ struct dentry *d = __gatorfs_create_file(sb, root, name,
+ &ulong_ro_fops, 0444);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+static int gatorfs_create_ro_u64(struct super_block *sb, struct dentry *root,
+ char const *name, u64 *val)
+{
+ struct dentry *d =
+ __gatorfs_create_file(sb, root, name, &u64_ro_fops, 0444);
+ if (!d)
+ return -EFAULT;
+
+ d->d_inode->i_private = val;
+ return 0;
+}
+
+static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ atomic_t *val = file->private_data;
+
+ return gatorfs_ulong_to_user(atomic_read(val), buf, count, offset);
+}
+
+static const struct file_operations atomic_ro_fops = {
+ .read = atomic_read_file,
+ .open = default_open,
+};
+
+static int gatorfs_create_file(struct super_block *sb, struct dentry *root,
+ char const *name, const struct file_operations *fops)
+{
+ if (!__gatorfs_create_file(sb, root, name, fops, 0644))
+ return -EFAULT;
+ return 0;
+}
+
+static int gatorfs_create_file_perm(struct super_block *sb, struct dentry *root,
+ char const *name,
+ const struct file_operations *fops, int perm)
+{
+ if (!__gatorfs_create_file(sb, root, name, fops, perm))
+ return -EFAULT;
+ return 0;
+}
+
+struct dentry *gatorfs_mkdir(struct super_block *sb,
+ struct dentry *root, char const *name)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ return NULL;
+ inode = gatorfs_get_inode(sb, S_IFDIR | 0755);
+ if (!inode) {
+ dput(dentry);
+ return NULL;
+ }
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ d_add(dentry, inode);
+ return dentry;
+}
+
+static int gatorfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode;
+ struct dentry *root_dentry;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = gatorfs_MAGIC;
+ sb->s_op = &s_ops;
+ sb->s_time_gran = 1;
+
+ root_inode = gatorfs_get_inode(sb, S_IFDIR | 0755);
+ if (!root_inode)
+ return -ENOMEM;
+ root_inode->i_op = &simple_dir_inode_operations;
+ root_inode->i_fop = &simple_dir_operations;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+ root_dentry = d_alloc_root(root_inode);
+#else
+ root_dentry = d_make_root(root_inode);
+#endif
+
+ if (!root_dentry) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+ iput(root_inode);
+#endif
+ return -ENOMEM;
+ }
+
+ sb->s_root = root_dentry;
+
+ gator_op_create_files(sb, root_dentry);
+
+ return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+static int gatorfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_single(fs_type, flags, data, gatorfs_fill_super, mnt);
+}
+#else
+static struct dentry *gatorfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_nodev(fs_type, flags, data, gatorfs_fill_super);
+}
+#endif
+
+static struct file_system_type gatorfs_type = {
+ .owner = THIS_MODULE,
+ .name = "gatorfs",
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+ .get_sb = gatorfs_get_sb,
+#else
+ .mount = gatorfs_mount,
+#endif
+
+ .kill_sb = kill_litter_super,
+};
+
+static int __init gatorfs_register(void)
+{
+ return register_filesystem(&gatorfs_type);
+}
+
+static void gatorfs_unregister(void)
+{
+ unregister_filesystem(&gatorfs_type);
+}
diff --git a/drivers/parrot/gator/gator_hrtimer_gator.c b/drivers/parrot/gator/gator_hrtimer_gator.c
new file mode 100644
index 00000000000000..c1525e10a8da37
--- /dev/null
+++ b/drivers/parrot/gator/gator_hrtimer_gator.c
@@ -0,0 +1,80 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+void (*callback)(void);
+DEFINE_PER_CPU(struct hrtimer, percpu_hrtimer);
+DEFINE_PER_CPU(ktime_t, hrtimer_expire);
+DEFINE_PER_CPU(int, hrtimer_is_active);
+static ktime_t profiling_interval;
+static void gator_hrtimer_online(void);
+static void gator_hrtimer_offline(void);
+
+static enum hrtimer_restart gator_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ int cpu = get_logical_cpu();
+
+ hrtimer_forward(hrtimer, per_cpu(hrtimer_expire, cpu), profiling_interval);
+ per_cpu(hrtimer_expire, cpu) = ktime_add(per_cpu(hrtimer_expire, cpu), profiling_interval);
+ (*callback)();
+ return HRTIMER_RESTART;
+}
+
+static void gator_hrtimer_online(void)
+{
+ int cpu = get_logical_cpu();
+ struct hrtimer *hrtimer = &per_cpu(percpu_hrtimer, cpu);
+
+ if (per_cpu(hrtimer_is_active, cpu) || profiling_interval.tv64 == 0)
+ return;
+
+ per_cpu(hrtimer_is_active, cpu) = 1;
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer->function = gator_hrtimer_notify;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ hrtimer->irqsafe = 1;
+#endif
+ per_cpu(hrtimer_expire, cpu) = ktime_add(hrtimer->base->get_time(), profiling_interval);
+ hrtimer_start(hrtimer, per_cpu(hrtimer_expire, cpu), HRTIMER_MODE_ABS_PINNED);
+}
+
+static void gator_hrtimer_offline(void)
+{
+ int cpu = get_logical_cpu();
+ struct hrtimer *hrtimer = &per_cpu(percpu_hrtimer, cpu);
+
+ if (!per_cpu(hrtimer_is_active, cpu))
+ return;
+
+ per_cpu(hrtimer_is_active, cpu) = 0;
+ hrtimer_cancel(hrtimer);
+}
+
+static int gator_hrtimer_init(int interval, void (*func)(void))
+{
+ int cpu;
+
+ (callback) = (func);
+
+ for_each_present_cpu(cpu) {
+ per_cpu(hrtimer_is_active, cpu) = 0;
+ }
+
+ /* calculate profiling interval */
+ if (interval > 0)
+ profiling_interval = ns_to_ktime(1000000000UL / interval);
+ else
+ profiling_interval.tv64 = 0;
+
+ return 0;
+}
+
+static void gator_hrtimer_shutdown(void)
+{
+ /* empty */
+}
diff --git a/drivers/parrot/gator/gator_iks.c b/drivers/parrot/gator/gator_iks.c
new file mode 100644
index 00000000000000..fb78c10fd98745
--- /dev/null
+++ b/drivers/parrot/gator/gator_iks.c
@@ -0,0 +1,197 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#if GATOR_IKS_SUPPORT
+
+#include <linux/of.h>
+#include <asm/bL_switcher.h>
+#include <asm/smp_plat.h>
+#include <trace/events/power_cpu_migrate.h>
+
+static bool map_cpuids;
+static int mpidr_cpuids[NR_CPUS];
+static const struct gator_cpu *mpidr_cpus[NR_CPUS];
+static int __lcpu_to_pcpu[NR_CPUS];
+
+static const struct gator_cpu *gator_find_cpu_by_dt_name(const char *const name)
+{
+ int i;
+
+ for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
+ const struct gator_cpu *const gator_cpu = &gator_cpus[i];
+
+ if (gator_cpu->dt_name != NULL && strcmp(gator_cpu->dt_name, name) == 0)
+ return gator_cpu;
+ }
+
+ return NULL;
+}
+
+static void calc_first_cluster_size(void)
+{
+ int len;
+ const u32 *val;
+ const char *compatible;
+ struct device_node *cn = NULL;
+ int mpidr_cpuids_count = 0;
+
+ /* Zero is a valid cpuid, so initialize the array to 0xff's */
+ memset(&mpidr_cpuids, 0xff, sizeof(mpidr_cpuids));
+ memset(&mpidr_cpus, 0, sizeof(mpidr_cpus));
+
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+ BUG_ON(mpidr_cpuids_count >= NR_CPUS);
+
+ val = of_get_property(cn, "reg", &len);
+ if (!val || len != 4) {
+ pr_err("%s missing reg property\n", cn->full_name);
+ continue;
+ }
+ compatible = of_get_property(cn, "compatible", NULL);
+ if (compatible == NULL) {
+ pr_err("%s missing compatible property\n", cn->full_name);
+ continue;
+ }
+
+ mpidr_cpuids[mpidr_cpuids_count] = be32_to_cpup(val);
+ mpidr_cpus[mpidr_cpuids_count] = gator_find_cpu_by_dt_name(compatible);
+ ++mpidr_cpuids_count;
+ }
+
+ map_cpuids = (mpidr_cpuids_count == nr_cpu_ids);
+}
+
+static int linearize_mpidr(int mpidr)
+{
+ int i;
+
+ for (i = 0; i < nr_cpu_ids; ++i) {
+ if (mpidr_cpuids[i] == mpidr)
+ return i;
+ }
+
+ BUG();
+}
+
+int lcpu_to_pcpu(const int lcpu)
+{
+ int pcpu;
+
+ if (!map_cpuids)
+ return lcpu;
+
+ BUG_ON(lcpu >= nr_cpu_ids || lcpu < 0);
+ pcpu = __lcpu_to_pcpu[lcpu];
+ BUG_ON(pcpu >= nr_cpu_ids || pcpu < 0);
+ return pcpu;
+}
+
+int pcpu_to_lcpu(const int pcpu)
+{
+ int lcpu;
+
+ if (!map_cpuids)
+ return pcpu;
+
+ BUG_ON(pcpu >= nr_cpu_ids || pcpu < 0);
+ for (lcpu = 0; lcpu < nr_cpu_ids; ++lcpu) {
+ if (__lcpu_to_pcpu[lcpu] == pcpu) {
+ BUG_ON(lcpu >= nr_cpu_ids || lcpu < 0);
+ return lcpu;
+ }
+ }
+ BUG();
+}
+
+static void gator_update_cpu_mapping(u32 cpu_hwid)
+{
+ int lcpu = smp_processor_id();
+ int pcpu = linearize_mpidr(cpu_hwid & MPIDR_HWID_BITMASK);
+
+ BUG_ON(lcpu >= nr_cpu_ids || lcpu < 0);
+ BUG_ON(pcpu >= nr_cpu_ids || pcpu < 0);
+ __lcpu_to_pcpu[lcpu] = pcpu;
+}
+
+GATOR_DEFINE_PROBE(cpu_migrate_begin, TP_PROTO(u64 timestamp, u32 cpu_hwid))
+{
+ const int cpu = get_physical_cpu();
+
+ gator_timer_offline((void *)1);
+ gator_timer_offline_dispatch(cpu, true);
+}
+
+GATOR_DEFINE_PROBE(cpu_migrate_finish, TP_PROTO(u64 timestamp, u32 cpu_hwid))
+{
+ int cpu;
+
+ gator_update_cpu_mapping(cpu_hwid);
+
+ /* get_physical_cpu must be called after gator_update_cpu_mapping */
+ cpu = get_physical_cpu();
+ gator_timer_online_dispatch(cpu, true);
+ gator_timer_online((void *)1);
+}
+
+GATOR_DEFINE_PROBE(cpu_migrate_current, TP_PROTO(u64 timestamp, u32 cpu_hwid))
+{
+ gator_update_cpu_mapping(cpu_hwid);
+}
+
+static void gator_send_iks_core_names(void)
+{
+ int cpu;
+ /* Send the cpu names */
+ preempt_disable();
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+ if (mpidr_cpus[cpu] != NULL)
+ gator_send_core_name(cpu, mpidr_cpus[cpu]->cpuid);
+ }
+ preempt_enable();
+}
+
+static int gator_migrate_start(void)
+{
+ int retval = 0;
+
+ if (!map_cpuids)
+ return retval;
+
+ if (retval == 0)
+ retval = GATOR_REGISTER_TRACE(cpu_migrate_begin);
+ if (retval == 0)
+ retval = GATOR_REGISTER_TRACE(cpu_migrate_finish);
+ if (retval == 0)
+ retval = GATOR_REGISTER_TRACE(cpu_migrate_current);
+ if (retval == 0) {
+ /* Initialize the logical to physical cpu mapping */
+ memset(&__lcpu_to_pcpu, 0xff, sizeof(__lcpu_to_pcpu));
+ bL_switcher_trace_trigger();
+ }
+ return retval;
+}
+
+static void gator_migrate_stop(void)
+{
+ if (!map_cpuids)
+ return;
+
+ GATOR_UNREGISTER_TRACE(cpu_migrate_current);
+ GATOR_UNREGISTER_TRACE(cpu_migrate_finish);
+ GATOR_UNREGISTER_TRACE(cpu_migrate_begin);
+}
+
+#else
+
+#define calc_first_cluster_size()
+#define gator_send_iks_core_names()
+#define gator_migrate_start() 0
+#define gator_migrate_stop()
+
+#endif
diff --git a/drivers/parrot/gator/gator_main.c b/drivers/parrot/gator/gator_main.c
new file mode 100644
index 00000000000000..d3814c7547ce60
--- /dev/null
+++ b/drivers/parrot/gator/gator_main.c
@@ -0,0 +1,1491 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/* This version must match the gator daemon version */
+#define PROTOCOL_VERSION 20
+static unsigned long gator_protocol_version = PROTOCOL_VERSION;
+
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/utsname.h>
+#include <linux/kthread.h>
+#include <asm/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include "gator.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
+#error kernels prior to 2.6.32 are not supported
+#endif
+
+#if defined(MODULE) && !defined(CONFIG_MODULES)
+#error Cannot build a module against a kernel that does not support modules. To resolve, either rebuild the kernel to support modules or build gator as part of the kernel.
+#endif
+
+#if !defined(CONFIG_GENERIC_TRACER) && !defined(CONFIG_TRACING)
+#error gator requires the kernel to have CONFIG_GENERIC_TRACER or CONFIG_TRACING defined
+#endif
+
+#ifndef CONFIG_PROFILING
+#error gator requires the kernel to have CONFIG_PROFILING defined
+#endif
+
+#ifndef CONFIG_HIGH_RES_TIMERS
+#error gator requires the kernel to have CONFIG_HIGH_RES_TIMERS defined to support PC sampling
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && defined(__arm__) && defined(CONFIG_SMP) && !defined(CONFIG_LOCAL_TIMERS)
+#error gator requires the kernel to have CONFIG_LOCAL_TIMERS defined on SMP systems
+#endif
+
+#if (GATOR_PERF_SUPPORT) && (!(GATOR_PERF_PMU_SUPPORT))
+#ifndef CONFIG_PERF_EVENTS
+#error gator requires the kernel to have CONFIG_PERF_EVENTS defined to support pmu hardware counters
+#elif !defined CONFIG_HW_PERF_EVENTS
+#error gator requires the kernel to have CONFIG_HW_PERF_EVENTS defined to support pmu hardware counters
+#endif
+#endif
+
+/******************************************************************************
+ * DEFINES
+ ******************************************************************************/
+#define SUMMARY_BUFFER_SIZE (1*1024)
+#define BACKTRACE_BUFFER_SIZE (128*1024)
+#define NAME_BUFFER_SIZE (64*1024)
+#define COUNTER_BUFFER_SIZE (64*1024) /* counters have the core as part of the data and the core value in the frame header may be discarded */
+#define BLOCK_COUNTER_BUFFER_SIZE (128*1024)
+#define ANNOTATE_BUFFER_SIZE (128*1024) /* annotate counters have the core as part of the data and the core value in the frame header may be discarded */
+#define SCHED_TRACE_BUFFER_SIZE (128*1024)
+#define IDLE_BUFFER_SIZE (32*1024) /* idle counters have the core as part of the data and the core value in the frame header may be discarded */
+#define ACTIVITY_BUFFER_SIZE (128*1024)
+
+#define NO_COOKIE 0U
+#define UNRESOLVED_COOKIE ~0U
+
+#define FRAME_SUMMARY 1
+#define FRAME_BACKTRACE 2
+#define FRAME_NAME 3
+#define FRAME_COUNTER 4
+#define FRAME_BLOCK_COUNTER 5
+#define FRAME_ANNOTATE 6
+#define FRAME_SCHED_TRACE 7
+#define FRAME_IDLE 9
+#define FRAME_ACTIVITY 13
+
+#define MESSAGE_END_BACKTRACE 1
+
+/* Name Frame Messages */
+#define MESSAGE_COOKIE 1
+#define MESSAGE_THREAD_NAME 2
+#define MESSAGE_LINK 4
+
+/* Scheduler Trace Frame Messages */
+#define MESSAGE_SCHED_SWITCH 1
+#define MESSAGE_SCHED_EXIT 2
+
+/* Idle Frame Messages */
+#define MESSAGE_IDLE_ENTER 1
+#define MESSAGE_IDLE_EXIT 2
+
+/* Summary Frame Messages */
+#define MESSAGE_SUMMARY 1
+#define MESSAGE_CORE_NAME 3
+
+/* Activity Frame Messages */
+#define MESSAGE_SWITCH 2
+#define MESSAGE_EXIT 3
+
+#define MAXSIZE_PACK32 5
+#define MAXSIZE_PACK64 10
+
+#define FRAME_HEADER_SIZE 3
+
+#if defined(__arm__)
+#define PC_REG regs->ARM_pc
+#elif defined(__aarch64__)
+#define PC_REG regs->pc
+#else
+#define PC_REG regs->ip
+#endif
+
+enum {
+ SUMMARY_BUF,
+ BACKTRACE_BUF,
+ NAME_BUF,
+ COUNTER_BUF,
+ BLOCK_COUNTER_BUF,
+ ANNOTATE_BUF,
+ SCHED_TRACE_BUF,
+ IDLE_BUF,
+ ACTIVITY_BUF,
+ NUM_GATOR_BUFS
+};
+
+/******************************************************************************
+ * Globals
+ ******************************************************************************/
+static unsigned long gator_cpu_cores;
+/* Size of the largest buffer. Effectively constant, set in gator_op_create_files */
+static unsigned long userspace_buffer_size;
+static unsigned long gator_backtrace_depth;
+/* How often to commit the buffers for live in nanoseconds */
+static u64 gator_live_rate;
+
+static unsigned long gator_started;
+static u64 gator_monotonic_started;
+static u64 gator_sync_time;
+static u64 gator_hibernate_time;
+static unsigned long gator_buffer_opened;
+static unsigned long gator_timer_count;
+static unsigned long gator_response_type;
+static DEFINE_MUTEX(start_mutex);
+static DEFINE_MUTEX(gator_buffer_mutex);
+
+bool event_based_sampling;
+
+static DECLARE_WAIT_QUEUE_HEAD(gator_buffer_wait);
+static DECLARE_WAIT_QUEUE_HEAD(gator_annotate_wait);
+static struct timer_list gator_buffer_wake_up_timer;
+static bool gator_buffer_wake_run;
+/* Initialize semaphore unlocked to initialize memory values */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+static DECLARE_MUTEX(gator_buffer_wake_sem);
+#else
+static DEFINE_SEMAPHORE(gator_buffer_wake_sem);
+#endif
+static struct task_struct *gator_buffer_wake_thread;
+static LIST_HEAD(gator_events);
+
+static DEFINE_PER_CPU(u64, last_timestamp);
+
+static bool printed_monotonic_warning;
+
+static u32 gator_cpuids[NR_CPUS];
+static bool sent_core_name[NR_CPUS];
+
+static DEFINE_PER_CPU(bool, in_scheduler_context);
+
+/******************************************************************************
+ * Prototypes
+ ******************************************************************************/
+static u64 gator_get_time(void);
+static void gator_emit_perf_time(u64 time);
+static void gator_op_create_files(struct super_block *sb, struct dentry *root);
+
+/* gator_buffer is protected by being per_cpu and by having IRQs
+ * disabled when writing to it. Most marshal_* calls take care of this
+ * except for marshal_cookie*, marshal_backtrace* and marshal_frame
+ * where the caller is responsible for doing so. No synchronization is
+ * needed with the backtrace buffer as it is per cpu and is only used
+ * from the hrtimer. The annotate_lock must be held when using the
+ * annotation buffer as it is not per cpu. collect_counters which is
+ * the sole writer to the block counter frame is additionally
+ * protected by the per cpu collecting flag.
+ */
+
+/* Size of the buffer, must be a power of 2. Effectively constant, set in gator_op_setup. */
+static uint32_t gator_buffer_size[NUM_GATOR_BUFS];
+/* gator_buffer_size - 1, bitwise and with pos to get offset into the array. Effectively constant, set in gator_op_setup. */
+static uint32_t gator_buffer_mask[NUM_GATOR_BUFS];
+/* Read position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are read by userspace in userspace_buffer_read */
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_read);
+/* Write position in the buffer. Initialized to zero in gator_op_setup and incremented after bytes are written to the buffer */
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_write);
+/* Commit position in the buffer. Initialized to zero in gator_op_setup and incremented after a frame is ready to be read by userspace */
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], gator_buffer_commit);
+/* If set to false, decreases the number of bytes returned by
+ * buffer_bytes_available. Set in buffer_check_space if no space is
+ * remaining. Initialized to true in gator_op_setup. This means that
+ * if we run out of space, continue to report that no space is
+ * available until bytes are read by userspace
+ */
+static DEFINE_PER_CPU(int[NUM_GATOR_BUFS], buffer_space_available);
+/* The buffer. Allocated in gator_op_setup */
+static DEFINE_PER_CPU(char *[NUM_GATOR_BUFS], gator_buffer);
+/* The time after which the buffer should be committed for live display */
+static DEFINE_PER_CPU(u64, gator_buffer_commit_time);
+
+/* List of all gator events - new events must be added to this list */
+#define GATOR_EVENTS_LIST \
+ GATOR_EVENT(gator_events_armv6_init) \
+ GATOR_EVENT(gator_events_armv7_init) \
+ GATOR_EVENT(gator_events_block_init) \
+ GATOR_EVENT(gator_events_ccn504_init) \
+ GATOR_EVENT(gator_events_irq_init) \
+ GATOR_EVENT(gator_events_l2c310_init) \
+ GATOR_EVENT(gator_events_mali_init) \
+ GATOR_EVENT(gator_events_mali_midgard_hw_init) \
+ GATOR_EVENT(gator_events_mali_midgard_init) \
+ GATOR_EVENT(gator_events_meminfo_init) \
+ GATOR_EVENT(gator_events_mmapped_init) \
+ GATOR_EVENT(gator_events_net_init) \
+ GATOR_EVENT(gator_events_perf_pmu_init) \
+ GATOR_EVENT(gator_events_sched_init) \
+ GATOR_EVENT(gator_events_scorpion_init) \
+
+#define GATOR_EVENT(EVENT_INIT) __weak int EVENT_INIT(void);
+GATOR_EVENTS_LIST
+#undef GATOR_EVENT
+
+static int (*gator_events_list[])(void) = {
+#define GATOR_EVENT(EVENT_INIT) EVENT_INIT,
+GATOR_EVENTS_LIST
+#undef GATOR_EVENT
+};
+
+/******************************************************************************
+ * Application Includes
+ ******************************************************************************/
+#include "gator_fs.c"
+#include "gator_buffer_write.c"
+#include "gator_buffer.c"
+#include "gator_marshaling.c"
+#include "gator_hrtimer_gator.c"
+#include "gator_cookies.c"
+#include "gator_annotate.c"
+#include "gator_trace_sched.c"
+#include "gator_trace_power.c"
+#include "gator_trace_gpu.c"
+#include "gator_backtrace.c"
+
+/******************************************************************************
+ * Misc
+ ******************************************************************************/
+
+static const struct gator_cpu gator_cpus[] = {
+ {
+ .cpuid = ARM1136,
+ .core_name = "ARM1136",
+ .pmnc_name = "ARM_ARM11",
+ .dt_name = "arm,arm1136",
+ .pmnc_counters = 3,
+ },
+ {
+ .cpuid = ARM1156,
+ .core_name = "ARM1156",
+ .pmnc_name = "ARM_ARM11",
+ .dt_name = "arm,arm1156",
+ .pmnc_counters = 3,
+ },
+ {
+ .cpuid = ARM1176,
+ .core_name = "ARM1176",
+ .pmnc_name = "ARM_ARM11",
+ .dt_name = "arm,arm1176",
+ .pmnc_counters = 3,
+ },
+ {
+ .cpuid = ARM11MPCORE,
+ .core_name = "ARM11MPCore",
+ .pmnc_name = "ARM_ARM11MPCore",
+ .dt_name = "arm,arm11mpcore",
+ .pmnc_counters = 3,
+ },
+ {
+ .cpuid = CORTEX_A5,
+ .core_name = "Cortex-A5",
+ .pmnc_name = "ARMv7_Cortex_A5",
+ .dt_name = "arm,cortex-a5",
+ .pmnc_counters = 2,
+ },
+ {
+ .cpuid = CORTEX_A7,
+ .core_name = "Cortex-A7",
+ .pmnc_name = "ARMv7_Cortex_A7",
+ .dt_name = "arm,cortex-a7",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A8,
+ .core_name = "Cortex-A8",
+ .pmnc_name = "ARMv7_Cortex_A8",
+ .dt_name = "arm,cortex-a8",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A9,
+ .core_name = "Cortex-A9",
+ .pmnc_name = "ARMv7_Cortex_A9",
+ .dt_name = "arm,cortex-a9",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = CORTEX_A15,
+ .core_name = "Cortex-A15",
+ .pmnc_name = "ARMv7_Cortex_A15",
+ .dt_name = "arm,cortex-a15",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = CORTEX_A17,
+ .core_name = "Cortex-A17",
+ .pmnc_name = "ARMv7_Cortex_A17",
+ .dt_name = "arm,cortex-a17",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = SCORPION,
+ .core_name = "Scorpion",
+ .pmnc_name = "Scorpion",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = SCORPIONMP,
+ .core_name = "ScorpionMP",
+ .pmnc_name = "ScorpionMP",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAITSIM,
+ .core_name = "KraitSIM",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAIT,
+ .core_name = "Krait",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = KRAIT_S4_PRO,
+ .core_name = "Krait S4 Pro",
+ .pmnc_name = "Krait",
+ .pmnc_counters = 4,
+ },
+ {
+ .cpuid = CORTEX_A53,
+ .core_name = "Cortex-A53",
+ .pmnc_name = "ARM_Cortex-A53",
+ .dt_name = "arm,cortex-a53",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = CORTEX_A57,
+ .core_name = "Cortex-A57",
+ .pmnc_name = "ARM_Cortex-A57",
+ .dt_name = "arm,cortex-a57",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = AARCH64,
+ .core_name = "AArch64",
+ .pmnc_name = "ARM_AArch64",
+ .pmnc_counters = 6,
+ },
+ {
+ .cpuid = OTHER,
+ .core_name = "Other",
+ .pmnc_name = "Other",
+ .pmnc_counters = 6,
+ },
+ {}
+};
+
+const struct gator_cpu *gator_find_cpu_by_cpuid(const u32 cpuid)
+{
+ int i;
+
+ for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
+ const struct gator_cpu *const gator_cpu = &gator_cpus[i];
+
+ if (gator_cpu->cpuid == cpuid)
+ return gator_cpu;
+ }
+
+ return NULL;
+}
+
+static const char OLD_PMU_PREFIX[] = "ARMv7 Cortex-";
+static const char NEW_PMU_PREFIX[] = "ARMv7_Cortex_";
+
+const struct gator_cpu *gator_find_cpu_by_pmu_name(const char *const name)
+{
+ int i;
+
+ for (i = 0; gator_cpus[i].cpuid != 0; ++i) {
+ const struct gator_cpu *const gator_cpu = &gator_cpus[i];
+
+ if (gator_cpu->pmnc_name != NULL &&
+ /* Do the names match exactly? */
+ (strcasecmp(gator_cpu->pmnc_name, name) == 0 ||
+ /* Do these names match but have the old vs new prefix? */
+ ((strncasecmp(name, OLD_PMU_PREFIX, sizeof(OLD_PMU_PREFIX) - 1) == 0 &&
+ strncasecmp(gator_cpu->pmnc_name, NEW_PMU_PREFIX, sizeof(NEW_PMU_PREFIX) - 1) == 0 &&
+ strcasecmp(name + sizeof(OLD_PMU_PREFIX) - 1, gator_cpu->pmnc_name + sizeof(NEW_PMU_PREFIX) - 1) == 0))))
+ return gator_cpu;
+ }
+
+ return NULL;
+}
+
+u32 gator_cpuid(void)
+{
+#if defined(__arm__) || defined(__aarch64__)
+ u32 val;
+#if !defined(__aarch64__)
+ asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (val));
+#else
+ asm volatile("mrs %0, midr_el1" : "=r" (val));
+#endif
+ return (val >> 4) & 0xfff;
+#else
+ return OTHER;
+#endif
+}
+
+static void gator_buffer_wake_up(unsigned long data)
+{
+ wake_up(&gator_buffer_wait);
+}
+
+static int gator_buffer_wake_func(void *data)
+{
+ for (;;) {
+ if (down_killable(&gator_buffer_wake_sem))
+ break;
+
+ /* Eat up any pending events */
+ while (!down_trylock(&gator_buffer_wake_sem))
+ ;
+
+ if (!gator_buffer_wake_run)
+ break;
+
+ gator_buffer_wake_up(0);
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * Commit interface
+ ******************************************************************************/
+static bool buffer_commit_ready(int *cpu, int *buftype)
+{
+ int cpu_x, x;
+
+ for_each_present_cpu(cpu_x) {
+ for (x = 0; x < NUM_GATOR_BUFS; x++)
+ if (per_cpu(gator_buffer_commit, cpu_x)[x] != per_cpu(gator_buffer_read, cpu_x)[x]) {
+ *cpu = cpu_x;
+ *buftype = x;
+ return true;
+ }
+ }
+ *cpu = -1;
+ *buftype = -1;
+ return false;
+}
+
+/******************************************************************************
+ * hrtimer interrupt processing
+ ******************************************************************************/
+static void gator_timer_interrupt(void)
+{
+ struct pt_regs *const regs = get_irq_regs();
+
+ gator_backtrace_handler(regs);
+}
+
+void gator_backtrace_handler(struct pt_regs *const regs)
+{
+ u64 time = gator_get_time();
+ int cpu = get_physical_cpu();
+
+ /* Output backtrace */
+ gator_add_sample(cpu, regs, time);
+
+ /* Collect counters */
+ if (!per_cpu(collecting, cpu))
+ collect_counters(time, current, false);
+
+ /* No buffer flushing occurs during sched switch for RT-Preempt full. The block counter frame will be flushed by collect_counters, but the sched buffer needs to be explicitly flushed */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ buffer_check(cpu, SCHED_TRACE_BUF, time);
+#endif
+}
+
+static int gator_running;
+
+/* This function runs in interrupt context and on the appropriate core */
+static void gator_timer_offline(void *migrate)
+{
+ struct gator_interface *gi;
+ int i, len, cpu = get_physical_cpu();
+ int *buffer;
+ u64 time;
+
+ gator_trace_sched_offline();
+ gator_trace_power_offline();
+
+ if (!migrate)
+ gator_hrtimer_offline();
+
+ /* Offline any events and output counters */
+ time = gator_get_time();
+ if (marshal_event_header(time)) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->offline) {
+ len = gi->offline(&buffer, migrate);
+ marshal_event(len, buffer);
+ }
+ }
+ /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
+ buffer_check(cpu, BLOCK_COUNTER_BUF, time);
+ }
+
+ /* Flush all buffers on this core */
+ for (i = 0; i < NUM_GATOR_BUFS; i++)
+ gator_commit_buffer(cpu, i, time);
+}
+
+/* This function runs in interrupt context and may be running on a core other than core 'cpu' */
+static void gator_timer_offline_dispatch(int cpu, bool migrate)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->offline_dispatch)
+ gi->offline_dispatch(cpu, migrate);
+ }
+}
+
+static void gator_timer_stop(void)
+{
+ int cpu;
+
+ if (gator_running) {
+ on_each_cpu(gator_timer_offline, NULL, 1);
+ for_each_online_cpu(cpu) {
+ gator_timer_offline_dispatch(lcpu_to_pcpu(cpu), false);
+ }
+
+ gator_running = 0;
+ gator_hrtimer_shutdown();
+ }
+}
+
+static void gator_send_core_name(const int cpu, const u32 cpuid)
+{
+#if defined(__arm__) || defined(__aarch64__)
+ if (!sent_core_name[cpu] || (cpuid != gator_cpuids[cpu])) {
+ const struct gator_cpu *const gator_cpu = gator_find_cpu_by_cpuid(cpuid);
+ const char *core_name = NULL;
+ char core_name_buf[32];
+
+ /* Save off this cpuid */
+ gator_cpuids[cpu] = cpuid;
+ if (gator_cpu != NULL) {
+ core_name = gator_cpu->core_name;
+ } else {
+ if (cpuid == -1)
+ snprintf(core_name_buf, sizeof(core_name_buf), "Unknown");
+ else
+ snprintf(core_name_buf, sizeof(core_name_buf), "Unknown (0x%.3x)", cpuid);
+ core_name = core_name_buf;
+ }
+
+ marshal_core_name(cpu, cpuid, core_name);
+ sent_core_name[cpu] = true;
+ }
+#endif
+}
+
+static void gator_read_cpuid(void *arg)
+{
+ gator_cpuids[get_physical_cpu()] = gator_cpuid();
+}
+
+/* This function runs in interrupt context and on the appropriate core */
+static void gator_timer_online(void *migrate)
+{
+ struct gator_interface *gi;
+ int len, cpu = get_physical_cpu();
+ int *buffer;
+ u64 time;
+
+ /* Send what is currently running on this core */
+ marshal_sched_trace_switch(current->pid, 0);
+
+ gator_trace_power_online();
+
+ /* online any events and output counters */
+ time = gator_get_time();
+ if (marshal_event_header(time)) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->online) {
+ len = gi->online(&buffer, migrate);
+ marshal_event(len, buffer);
+ }
+ }
+ /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
+ buffer_check(cpu, BLOCK_COUNTER_BUF, time);
+ }
+
+ if (!migrate)
+ gator_hrtimer_online();
+
+ gator_send_core_name(cpu, gator_cpuid());
+}
+
+/* This function runs in interrupt context and may be running on a core other than core 'cpu' */
+static void gator_timer_online_dispatch(int cpu, bool migrate)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->online_dispatch)
+ gi->online_dispatch(cpu, migrate);
+ }
+}
+
+#include "gator_iks.c"
+
+static int gator_timer_start(unsigned long sample_rate)
+{
+ int cpu;
+
+ if (gator_running) {
+ pr_notice("gator: already running\n");
+ return 0;
+ }
+
+ gator_running = 1;
+
+ /* event based sampling trumps hr timer based sampling */
+ if (event_based_sampling)
+ sample_rate = 0;
+
+ if (gator_hrtimer_init(sample_rate, gator_timer_interrupt) == -1)
+ return -1;
+
+ /* Send off the previously saved cpuids */
+ for_each_present_cpu(cpu) {
+ preempt_disable();
+ gator_send_core_name(cpu, gator_cpuids[cpu]);
+ preempt_enable();
+ }
+
+ gator_send_iks_core_names();
+ for_each_online_cpu(cpu) {
+ gator_timer_online_dispatch(lcpu_to_pcpu(cpu), false);
+ }
+ on_each_cpu(gator_timer_online, NULL, 1);
+
+ return 0;
+}
+
+static u64 gator_get_time(void)
+{
+ struct timespec ts;
+ u64 timestamp;
+ u64 prev_timestamp;
+ u64 delta;
+ int cpu = smp_processor_id();
+
+ /* Match clock_gettime(CLOCK_MONOTONIC_RAW, &ts) from userspace */
+ getrawmonotonic(&ts);
+ timestamp = timespec_to_ns(&ts);
+
+ /* getrawmonotonic is not monotonic on all systems. Detect and
+ * attempt to correct these cases. up to 0.5ms delta has been seen
+ * on some systems, which can skew Streamline data when viewing at
+ * high resolution. This doesn't work well with interrupts, but that
+ * it's OK - the real concern is to catch big jumps in time
+ */
+ prev_timestamp = per_cpu(last_timestamp, cpu);
+ if (prev_timestamp <= timestamp) {
+ per_cpu(last_timestamp, cpu) = timestamp;
+ } else {
+ delta = prev_timestamp - timestamp;
+ /* Log the error once */
+ if (!printed_monotonic_warning && delta > 500000) {
+ pr_err("%s: getrawmonotonic is not monotonic cpu: %i delta: %lli\nSkew in Streamline data may be present at the fine zoom levels\n", __func__, cpu, delta);
+ printed_monotonic_warning = true;
+ }
+ timestamp = prev_timestamp;
+ }
+
+ return timestamp - gator_monotonic_started;
+}
+
+static void gator_emit_perf_time(u64 time)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+ if (time >= gator_sync_time) {
+ marshal_event_single64(0, -1, local_clock());
+ gator_sync_time += NSEC_PER_SEC;
+ if (gator_live_rate <= 0) {
+ gator_commit_buffer(get_physical_cpu(), COUNTER_BUF, time);
+ }
+ }
+#endif
+}
+
+/******************************************************************************
+ * cpu hotplug and pm notifiers
+ ******************************************************************************/
+static int __cpuinit gator_hotcpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ int cpu = lcpu_to_pcpu((long)hcpu);
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ smp_call_function_single(cpu, gator_timer_offline, NULL, 1);
+ gator_timer_offline_dispatch(cpu, false);
+ break;
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ gator_timer_online_dispatch(cpu, false);
+ smp_call_function_single(cpu, gator_timer_online, NULL, 1);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata gator_hotcpu_notifier = {
+ .notifier_call = gator_hotcpu_notify,
+};
+
+/* n.b. calling "on_each_cpu" only runs on those that are online.
+ * Registered linux events are not disabled, so their counters will
+ * continue to collect
+ */
+static int gator_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy)
+{
+ int cpu;
+ struct timespec ts;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ unregister_hotcpu_notifier(&gator_hotcpu_notifier);
+ unregister_scheduler_tracepoints();
+ on_each_cpu(gator_timer_offline, NULL, 1);
+ for_each_online_cpu(cpu) {
+ gator_timer_offline_dispatch(lcpu_to_pcpu(cpu), false);
+ }
+
+ /* Record the wallclock hibernate time */
+ getnstimeofday(&ts);
+ gator_hibernate_time = timespec_to_ns(&ts) - gator_get_time();
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ /* Adjust gator_monotonic_started for the time spent sleeping, as gator_get_time does not account for it */
+ if (gator_hibernate_time > 0) {
+ getnstimeofday(&ts);
+ gator_monotonic_started += gator_hibernate_time + gator_get_time() - timespec_to_ns(&ts);
+ gator_hibernate_time = 0;
+ }
+
+ for_each_online_cpu(cpu) {
+ gator_timer_online_dispatch(lcpu_to_pcpu(cpu), false);
+ }
+ on_each_cpu(gator_timer_online, NULL, 1);
+ register_scheduler_tracepoints();
+ register_hotcpu_notifier(&gator_hotcpu_notifier);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gator_pm_notifier = {
+ .notifier_call = gator_pm_notify,
+};
+
+static int gator_notifier_start(void)
+{
+ int retval;
+
+ retval = register_hotcpu_notifier(&gator_hotcpu_notifier);
+ if (retval == 0)
+ retval = register_pm_notifier(&gator_pm_notifier);
+ return retval;
+}
+
+static void gator_notifier_stop(void)
+{
+ unregister_pm_notifier(&gator_pm_notifier);
+ unregister_hotcpu_notifier(&gator_hotcpu_notifier);
+}
+
+/******************************************************************************
+ * Main
+ ******************************************************************************/
+static void gator_summary(void)
+{
+ u64 timestamp, uptime;
+ struct timespec ts;
+ char uname_buf[512];
+
+ snprintf(uname_buf, sizeof(uname_buf), "%s %s %s %s %s GNU/Linux", utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine);
+
+ getnstimeofday(&ts);
+ timestamp = timespec_to_ns(&ts);
+
+ /* Similar to reading /proc/uptime from fs/proc/uptime.c, calculate uptime */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+ {
+ void (*m2b)(struct timespec *ts);
+
+ do_posix_clock_monotonic_gettime(&ts);
+ /* monotonic_to_bootbased is not defined for some versions of Android */
+ m2b = symbol_get(monotonic_to_bootbased);
+ if (m2b)
+ m2b(&ts);
+ }
+#else
+ get_monotonic_boottime(&ts);
+#endif
+ uptime = timespec_to_ns(&ts);
+
+ /* Disable preemption as gator_get_time calls smp_processor_id to verify time is monotonic */
+ preempt_disable();
+ /* Set monotonic_started to zero as gator_get_time is uptime minus monotonic_started */
+ gator_monotonic_started = 0;
+ gator_monotonic_started = gator_get_time();
+
+ marshal_summary(timestamp, uptime, gator_monotonic_started, uname_buf);
+ gator_sync_time = 0;
+ gator_emit_perf_time(gator_monotonic_started);
+ preempt_enable();
+}
+
+int gator_events_install(struct gator_interface *interface)
+{
+ list_add_tail(&interface->list, &gator_events);
+
+ return 0;
+}
+
+int gator_events_get_key(void)
+{
+ /* key 0 is reserved as a timestamp. key 1 is reserved as the marker
+ * for thread specific counters. key 2 is reserved as the marker for
+ * core. Odd keys are assigned by the driver, even keys by the
+ * daemon.
+ */
+ static int key = 3;
+ const int ret = key;
+
+ key += 2;
+ return ret;
+}
+
+static int gator_init(void)
+{
+ int i;
+
+ calc_first_cluster_size();
+
+ /* events sources */
+ for (i = 0; i < ARRAY_SIZE(gator_events_list); i++)
+ if (gator_events_list[i])
+ gator_events_list[i]();
+
+ gator_trace_sched_init();
+ gator_trace_power_init();
+
+ return 0;
+}
+
+static void gator_exit(void)
+{
+ struct gator_interface *gi;
+
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->shutdown)
+ gi->shutdown();
+}
+
+static int gator_start(void)
+{
+ unsigned long cpu, i;
+ struct gator_interface *gi;
+
+ gator_buffer_wake_run = true;
+ gator_buffer_wake_thread = kthread_run(gator_buffer_wake_func, NULL, "gator_bwake");
+ if (IS_ERR(gator_buffer_wake_thread))
+ goto bwake_failure;
+
+ if (gator_migrate_start())
+ goto migrate_failure;
+
+ /* Initialize the buffer with the frame type and core */
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < NUM_GATOR_BUFS; i++)
+ marshal_frame(cpu, i);
+ per_cpu(last_timestamp, cpu) = 0;
+ }
+ printed_monotonic_warning = false;
+
+ /* Capture the start time */
+ gator_summary();
+
+ /* start all events */
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->start && gi->start() != 0) {
+ struct list_head *ptr = gi->list.prev;
+
+ while (ptr != &gator_events) {
+ gi = list_entry(ptr, struct gator_interface, list);
+
+ if (gi->stop)
+ gi->stop();
+
+ ptr = ptr->prev;
+ }
+ goto events_failure;
+ }
+ }
+
+ /* cookies shall be initialized before trace_sched_start() and gator_timer_start() */
+ if (cookies_initialize())
+ goto cookies_failure;
+ if (gator_annotate_start())
+ goto annotate_failure;
+ if (gator_trace_sched_start())
+ goto sched_failure;
+ if (gator_trace_power_start())
+ goto power_failure;
+ if (gator_trace_gpu_start())
+ goto gpu_failure;
+ if (gator_timer_start(gator_timer_count))
+ goto timer_failure;
+ if (gator_notifier_start())
+ goto notifier_failure;
+
+ return 0;
+
+notifier_failure:
+ gator_timer_stop();
+timer_failure:
+ gator_trace_gpu_stop();
+gpu_failure:
+ gator_trace_power_stop();
+power_failure:
+ gator_trace_sched_stop();
+sched_failure:
+ gator_annotate_stop();
+annotate_failure:
+ cookies_release();
+cookies_failure:
+ /* stop all events */
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->stop)
+ gi->stop();
+events_failure:
+ gator_migrate_stop();
+migrate_failure:
+ gator_buffer_wake_run = false;
+ up(&gator_buffer_wake_sem);
+ gator_buffer_wake_thread = NULL;
+bwake_failure:
+
+ return -1;
+}
+
+static void gator_stop(void)
+{
+ struct gator_interface *gi;
+
+ gator_annotate_stop();
+ gator_trace_sched_stop();
+ gator_trace_power_stop();
+ gator_trace_gpu_stop();
+
+ /* stop all interrupt callback reads before tearing down other interfaces */
+ gator_notifier_stop(); /* should be called before gator_timer_stop to avoid re-enabling the hrtimer after it has been offlined */
+ gator_timer_stop();
+
+ /* stop all events */
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->stop)
+ gi->stop();
+
+ gator_migrate_stop();
+
+ gator_buffer_wake_run = false;
+ up(&gator_buffer_wake_sem);
+ gator_buffer_wake_thread = NULL;
+}
+
+/******************************************************************************
+ * Filesystem
+ ******************************************************************************/
+/* fopen("buffer") */
+static int gator_op_setup(void)
+{
+ int err = 0;
+ int cpu, i;
+
+ mutex_lock(&start_mutex);
+
+ gator_buffer_size[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE;
+ gator_buffer_mask[SUMMARY_BUF] = SUMMARY_BUFFER_SIZE - 1;
+
+ gator_buffer_size[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE;
+ gator_buffer_mask[BACKTRACE_BUF] = BACKTRACE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[NAME_BUF] = NAME_BUFFER_SIZE;
+ gator_buffer_mask[NAME_BUF] = NAME_BUFFER_SIZE - 1;
+
+ gator_buffer_size[COUNTER_BUF] = COUNTER_BUFFER_SIZE;
+ gator_buffer_mask[COUNTER_BUF] = COUNTER_BUFFER_SIZE - 1;
+
+ gator_buffer_size[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE;
+ gator_buffer_mask[BLOCK_COUNTER_BUF] = BLOCK_COUNTER_BUFFER_SIZE - 1;
+
+ gator_buffer_size[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE;
+ gator_buffer_mask[ANNOTATE_BUF] = ANNOTATE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE;
+ gator_buffer_mask[SCHED_TRACE_BUF] = SCHED_TRACE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[IDLE_BUF] = IDLE_BUFFER_SIZE;
+ gator_buffer_mask[IDLE_BUF] = IDLE_BUFFER_SIZE - 1;
+
+ gator_buffer_size[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE;
+ gator_buffer_mask[ACTIVITY_BUF] = ACTIVITY_BUFFER_SIZE - 1;
+
+ /* Initialize percpu per buffer variables */
+ for (i = 0; i < NUM_GATOR_BUFS; i++) {
+ /* Verify buffers are a power of 2 */
+ if (gator_buffer_size[i] & (gator_buffer_size[i] - 1)) {
+ err = -ENOEXEC;
+ goto setup_error;
+ }
+
+ for_each_present_cpu(cpu) {
+ per_cpu(gator_buffer_read, cpu)[i] = 0;
+ per_cpu(gator_buffer_write, cpu)[i] = 0;
+ per_cpu(gator_buffer_commit, cpu)[i] = 0;
+ per_cpu(buffer_space_available, cpu)[i] = true;
+ per_cpu(gator_buffer_commit_time, cpu) = gator_live_rate;
+
+ /* Annotation is a special case that only uses a single buffer */
+ if (cpu > 0 && i == ANNOTATE_BUF) {
+ per_cpu(gator_buffer, cpu)[i] = NULL;
+ continue;
+ }
+
+ per_cpu(gator_buffer, cpu)[i] = vmalloc(gator_buffer_size[i]);
+ if (!per_cpu(gator_buffer, cpu)[i]) {
+ err = -ENOMEM;
+ goto setup_error;
+ }
+ }
+ }
+
+setup_error:
+ mutex_unlock(&start_mutex);
+ return err;
+}
+
+/* Actually start profiling (echo 1>/dev/gator/enable) */
+static int gator_op_start(void)
+{
+ int err = 0;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started || gator_start())
+ err = -EINVAL;
+ else
+ gator_started = 1;
+
+ mutex_unlock(&start_mutex);
+
+ return err;
+}
+
+/* echo 0>/dev/gator/enable */
+static void gator_op_stop(void)
+{
+ mutex_lock(&start_mutex);
+
+ if (gator_started) {
+ gator_stop();
+
+ mutex_lock(&gator_buffer_mutex);
+
+ gator_started = 0;
+ gator_monotonic_started = 0;
+ cookies_release();
+ wake_up(&gator_buffer_wait);
+
+ mutex_unlock(&gator_buffer_mutex);
+ }
+
+ mutex_unlock(&start_mutex);
+}
+
+static void gator_shutdown(void)
+{
+ int cpu, i;
+
+ mutex_lock(&start_mutex);
+
+ for_each_present_cpu(cpu) {
+ mutex_lock(&gator_buffer_mutex);
+ for (i = 0; i < NUM_GATOR_BUFS; i++) {
+ vfree(per_cpu(gator_buffer, cpu)[i]);
+ per_cpu(gator_buffer, cpu)[i] = NULL;
+ per_cpu(gator_buffer_read, cpu)[i] = 0;
+ per_cpu(gator_buffer_write, cpu)[i] = 0;
+ per_cpu(gator_buffer_commit, cpu)[i] = 0;
+ per_cpu(buffer_space_available, cpu)[i] = true;
+ per_cpu(gator_buffer_commit_time, cpu) = 0;
+ }
+ mutex_unlock(&gator_buffer_mutex);
+ }
+
+ memset(&sent_core_name, 0, sizeof(sent_core_name));
+
+ mutex_unlock(&start_mutex);
+}
+
+static int gator_set_backtrace(unsigned long val)
+{
+ int err = 0;
+
+ mutex_lock(&start_mutex);
+
+ if (gator_started)
+ err = -EBUSY;
+ else
+ gator_backtrace_depth = val;
+
+ mutex_unlock(&start_mutex);
+
+ return err;
+}
+
+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ return gatorfs_ulong_to_user(gator_started, buf, count, offset);
+}
+
+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val)
+ retval = gator_op_start();
+ else
+ gator_op_stop();
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static const struct file_operations enable_fops = {
+ .read = enable_read,
+ .write = enable_write,
+};
+
+static int userspace_buffer_open(struct inode *inode, struct file *file)
+{
+ int err = -EPERM;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (test_and_set_bit_lock(0, &gator_buffer_opened))
+ return -EBUSY;
+
+ err = gator_op_setup();
+ if (err)
+ goto fail;
+
+ /* NB: the actual start happens from userspace
+ * echo 1 >/dev/gator/enable
+ */
+
+ return 0;
+
+fail:
+ __clear_bit_unlock(0, &gator_buffer_opened);
+ return err;
+}
+
+static int userspace_buffer_release(struct inode *inode, struct file *file)
+{
+ gator_op_stop();
+ gator_shutdown();
+ __clear_bit_unlock(0, &gator_buffer_opened);
+ return 0;
+}
+
+static ssize_t userspace_buffer_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ int commit, length1, length2, read;
+ char *buffer1;
+ char *buffer2;
+ int cpu, buftype;
+ int written = 0;
+
+ /* ensure there is enough space for a whole frame */
+ if (count < userspace_buffer_size || *offset)
+ return -EINVAL;
+
+ /* sleep until the condition is true or a signal is received the
+ * condition is checked each time gator_buffer_wait is woken up
+ */
+ wait_event_interruptible(gator_buffer_wait, buffer_commit_ready(&cpu, &buftype) || !gator_started);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (buftype == -1 || cpu == -1)
+ return 0;
+
+ mutex_lock(&gator_buffer_mutex);
+
+ do {
+ read = per_cpu(gator_buffer_read, cpu)[buftype];
+ commit = per_cpu(gator_buffer_commit, cpu)[buftype];
+
+ /* May happen if the buffer is freed during pending reads. */
+ if (!per_cpu(gator_buffer, cpu)[buftype])
+ break;
+
+ /* determine the size of two halves */
+ length1 = commit - read;
+ length2 = 0;
+ buffer1 = &(per_cpu(gator_buffer, cpu)[buftype][read]);
+ buffer2 = &(per_cpu(gator_buffer, cpu)[buftype][0]);
+ if (length1 < 0) {
+ length1 = gator_buffer_size[buftype] - read;
+ length2 = commit;
+ }
+
+ if (length1 + length2 > count - written)
+ break;
+
+ /* start, middle or end */
+ if (length1 > 0 && copy_to_user(&buf[written], buffer1, length1))
+ break;
+
+ /* possible wrap around */
+ if (length2 > 0 && copy_to_user(&buf[written + length1], buffer2, length2))
+ break;
+
+ per_cpu(gator_buffer_read, cpu)[buftype] = commit;
+ written += length1 + length2;
+
+ /* Wake up annotate_write if more space is available */
+ if (buftype == ANNOTATE_BUF)
+ wake_up(&gator_annotate_wait);
+ } while (buffer_commit_ready(&cpu, &buftype));
+
+ mutex_unlock(&gator_buffer_mutex);
+
+ /* kick just in case we've lost an SMP event */
+ wake_up(&gator_buffer_wait);
+
+ return written > 0 ? written : -EFAULT;
+}
+
+static const struct file_operations gator_event_buffer_fops = {
+ .open = userspace_buffer_open,
+ .release = userspace_buffer_release,
+ .read = userspace_buffer_read,
+};
+
+static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+ return gatorfs_ulong_to_user(gator_backtrace_depth, buf, count, offset);
+}
+
+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = gatorfs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ retval = gator_set_backtrace(val);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+static const struct file_operations depth_fops = {
+ .read = depth_read,
+ .write = depth_write
+};
+
+static void gator_op_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ struct gator_interface *gi;
+ int cpu;
+
+ /* reinitialize default values */
+ gator_cpu_cores = 0;
+ for_each_present_cpu(cpu) {
+ gator_cpu_cores++;
+ }
+ userspace_buffer_size = BACKTRACE_BUFFER_SIZE;
+ gator_response_type = 1;
+ gator_live_rate = 0;
+
+ gatorfs_create_file(sb, root, "enable", &enable_fops);
+ gatorfs_create_file(sb, root, "buffer", &gator_event_buffer_fops);
+ gatorfs_create_file(sb, root, "backtrace_depth", &depth_fops);
+ gatorfs_create_ro_ulong(sb, root, "cpu_cores", &gator_cpu_cores);
+ gatorfs_create_ro_ulong(sb, root, "buffer_size", &userspace_buffer_size);
+ gatorfs_create_ulong(sb, root, "tick", &gator_timer_count);
+ gatorfs_create_ulong(sb, root, "response_type", &gator_response_type);
+ gatorfs_create_ro_ulong(sb, root, "version", &gator_protocol_version);
+ gatorfs_create_ro_u64(sb, root, "started", &gator_monotonic_started);
+ gatorfs_create_u64(sb, root, "live_rate", &gator_live_rate);
+
+ /* Annotate interface */
+ gator_annotate_create_files(sb, root);
+
+ /* Linux Events */
+ dir = gatorfs_mkdir(sb, root, "events");
+ list_for_each_entry(gi, &gator_events, list)
+ if (gi->create_files)
+ gi->create_files(sb, dir);
+
+ /* Sched Events */
+ sched_trace_create_files(sb, dir);
+
+ /* Power interface */
+ gator_trace_power_create_files(sb, dir);
+}
+
+/******************************************************************************
+ * Module
+ ******************************************************************************/
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+
+#define GATOR_TRACEPOINTS \
+ GATOR_HANDLE_TRACEPOINT(block_rq_complete); \
+ GATOR_HANDLE_TRACEPOINT(cpu_frequency); \
+ GATOR_HANDLE_TRACEPOINT(cpu_idle); \
+ GATOR_HANDLE_TRACEPOINT(cpu_migrate_begin); \
+ GATOR_HANDLE_TRACEPOINT(cpu_migrate_current); \
+ GATOR_HANDLE_TRACEPOINT(cpu_migrate_finish); \
+ GATOR_HANDLE_TRACEPOINT(irq_handler_exit); \
+ GATOR_HANDLE_TRACEPOINT(mali_hw_counter); \
+ GATOR_HANDLE_TRACEPOINT(mali_job_slots_event); \
+ GATOR_HANDLE_TRACEPOINT(mali_mmu_as_in_use); \
+ GATOR_HANDLE_TRACEPOINT(mali_mmu_as_released); \
+ GATOR_HANDLE_TRACEPOINT(mali_page_fault_insert_pages); \
+ GATOR_HANDLE_TRACEPOINT(mali_pm_status); \
+ GATOR_HANDLE_TRACEPOINT(mali_sw_counter); \
+ GATOR_HANDLE_TRACEPOINT(mali_sw_counters); \
+ GATOR_HANDLE_TRACEPOINT(mali_timeline_event); \
+ GATOR_HANDLE_TRACEPOINT(mali_total_alloc_pages_change); \
+ GATOR_HANDLE_TRACEPOINT(mm_page_alloc); \
+ GATOR_HANDLE_TRACEPOINT(mm_page_free); \
+ GATOR_HANDLE_TRACEPOINT(mm_page_free_batched); \
+ GATOR_HANDLE_TRACEPOINT(sched_process_exec); \
+ GATOR_HANDLE_TRACEPOINT(sched_process_fork); \
+ GATOR_HANDLE_TRACEPOINT(sched_process_free); \
+ GATOR_HANDLE_TRACEPOINT(sched_switch); \
+ GATOR_HANDLE_TRACEPOINT(softirq_exit); \
+ GATOR_HANDLE_TRACEPOINT(task_rename); \
+
+#define GATOR_HANDLE_TRACEPOINT(probe_name) \
+ struct tracepoint *gator_tracepoint_##probe_name
+GATOR_TRACEPOINTS;
+#undef GATOR_HANDLE_TRACEPOINT
+
+static void gator_save_tracepoint(struct tracepoint *tp, void *priv)
+{
+#define GATOR_HANDLE_TRACEPOINT(probe_name) \
+ do { \
+ if (strcmp(tp->name, #probe_name) == 0) { \
+ gator_tracepoint_##probe_name = tp; \
+ return; \
+ } \
+ } while (0)
+GATOR_TRACEPOINTS;
+#undef GATOR_HANDLE_TRACEPOINT
+}
+
+#else
+
+#define for_each_kernel_tracepoint(fct, priv)
+
+#endif
+
+static int __init gator_module_init(void)
+{
+ for_each_kernel_tracepoint(gator_save_tracepoint, NULL);
+
+ if (gatorfs_register())
+ return -1;
+
+ if (gator_init()) {
+ gatorfs_unregister();
+ return -1;
+ }
+
+ setup_timer(&gator_buffer_wake_up_timer, gator_buffer_wake_up, 0);
+
+ /* Initialize the list of cpuids */
+ memset(gator_cpuids, -1, sizeof(gator_cpuids));
+ on_each_cpu(gator_read_cpuid, NULL, 1);
+
+ return 0;
+}
+
+static void __exit gator_module_exit(void)
+{
+ del_timer_sync(&gator_buffer_wake_up_timer);
+ tracepoint_synchronize_unregister();
+ gator_exit();
+ gatorfs_unregister();
+}
+
+module_init(gator_module_init);
+module_exit(gator_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("Gator system profiler");
+#define STRIFY2(ARG) #ARG
+#define STRIFY(ARG) STRIFY2(ARG)
+MODULE_VERSION(STRIFY(PROTOCOL_VERSION));
diff --git a/drivers/parrot/gator/gator_marshaling.c b/drivers/parrot/gator/gator_marshaling.c
new file mode 100644
index 00000000000000..0d116764364296
--- /dev/null
+++ b/drivers/parrot/gator/gator_marshaling.c
@@ -0,0 +1,371 @@
+/**
+ * Copyright (C) ARM Limited 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define NEWLINE_CANARY \
+ /* Unix */ \
+ "1\n" \
+ /* Windows */ \
+ "2\r\n" \
+ /* Mac OS */ \
+ "3\r" \
+ /* RISC OS */ \
+ "4\n\r" \
+ /* Add another character so the length isn't 0x0a bytes */ \
+ "5"
+
+#ifdef MALI_SUPPORT
+#include "gator_events_mali_common.h"
+#endif
+
+static void marshal_summary(long long timestamp, long long uptime, long long monotonic_delta, const char *uname)
+{
+ unsigned long flags;
+ int cpu = 0;
+
+ local_irq_save(flags);
+ gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_SUMMARY);
+ gator_buffer_write_string(cpu, SUMMARY_BUF, NEWLINE_CANARY);
+ gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, timestamp);
+ gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, uptime);
+ gator_buffer_write_packed_int64(cpu, SUMMARY_BUF, monotonic_delta);
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "uname");
+ gator_buffer_write_string(cpu, SUMMARY_BUF, uname);
+#if GATOR_IKS_SUPPORT
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "iks");
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+#endif
+#ifdef CONFIG_PREEMPT_RTB
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rtb");
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "preempt_rt_full");
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+#endif
+ /* Let Streamline know which GPU is used so that it can label the GPU Activity appropriately. This is a temporary fix, to be improved in a future release. */
+#ifdef MALI_SUPPORT
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "mali_type");
+#if (MALI_SUPPORT == MALI_4xx)
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "4xx");
+#elif (MALI_SUPPORT == MALI_MIDGARD)
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "6xx");
+#else
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "unknown");
+#endif
+#endif
+ gator_buffer_write_string(cpu, SUMMARY_BUF, "");
+ /* Commit the buffer now so it can be one of the first frames read by Streamline */
+ local_irq_restore(flags);
+ gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
+}
+
+static bool marshal_cookie_header(const char *text)
+{
+ int cpu = get_physical_cpu();
+
+ return buffer_check_space(cpu, NAME_BUF, strlen(text) + 3 * MAXSIZE_PACK32);
+}
+
+static void marshal_cookie(int cookie, const char *text)
+{
+ int cpu = get_physical_cpu();
+ /* buffer_check_space already called by marshal_cookie_header */
+ gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_COOKIE);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
+ gator_buffer_write_string(cpu, NAME_BUF, text);
+ buffer_check(cpu, NAME_BUF, gator_get_time());
+}
+
+static void marshal_thread_name(int pid, char *name)
+{
+ unsigned long flags, cpu;
+ u64 time;
+
+ local_irq_save(flags);
+ cpu = get_physical_cpu();
+ time = gator_get_time();
+ if (buffer_check_space(cpu, NAME_BUF, TASK_COMM_LEN + 3 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+ gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_THREAD_NAME);
+ gator_buffer_write_packed_int64(cpu, NAME_BUF, time);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
+ gator_buffer_write_string(cpu, NAME_BUF, name);
+ }
+ local_irq_restore(flags);
+ buffer_check(cpu, NAME_BUF, time);
+}
+
+static void marshal_link(int cookie, int tgid, int pid)
+{
+ unsigned long cpu = get_physical_cpu(), flags;
+ u64 time;
+
+ local_irq_save(flags);
+ time = gator_get_time();
+ if (buffer_check_space(cpu, NAME_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, NAME_BUF, MESSAGE_LINK);
+ gator_buffer_write_packed_int64(cpu, NAME_BUF, time);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, cookie);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, tgid);
+ gator_buffer_write_packed_int(cpu, NAME_BUF, pid);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, NAME_BUF, time);
+}
+
+static bool marshal_backtrace_header(int exec_cookie, int tgid, int pid, u64 time)
+{
+ int cpu = get_physical_cpu();
+
+ if (!buffer_check_space(cpu, BACKTRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32 + gator_backtrace_depth * 2 * MAXSIZE_PACK32)) {
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, BACKTRACE_BUF, time);
+
+ return false;
+ }
+
+ gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, time);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, exec_cookie);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, tgid);
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, pid);
+
+ return true;
+}
+
+static void marshal_backtrace(unsigned long address, int cookie, int in_kernel)
+{
+ int cpu = get_physical_cpu();
+
+ if (cookie == 0 && !in_kernel)
+ cookie = UNRESOLVED_COOKIE;
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, cookie);
+ gator_buffer_write_packed_int64(cpu, BACKTRACE_BUF, address);
+}
+
+static void marshal_backtrace_footer(u64 time)
+{
+ int cpu = get_physical_cpu();
+
+ gator_buffer_write_packed_int(cpu, BACKTRACE_BUF, MESSAGE_END_BACKTRACE);
+
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, BACKTRACE_BUF, time);
+}
+
+static bool marshal_event_header(u64 time)
+{
+ unsigned long flags, cpu = get_physical_cpu();
+ bool retval = false;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, BLOCK_COUNTER_BUF, MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, 0); /* key of zero indicates a timestamp */
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, time);
+ retval = true;
+ }
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static void marshal_event(int len, int *buffer)
+{
+ unsigned long i, flags, cpu = get_physical_cpu();
+
+ if (len <= 0)
+ return;
+
+ /* length must be even since all data is a (key, value) pair */
+ if (len & 0x1) {
+ pr_err("gator: invalid counter data detected and discarded\n");
+ return;
+ }
+
+ /* events must be written in key,value pairs */
+ local_irq_save(flags);
+ for (i = 0; i < len; i += 2) {
+ if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK32))
+ break;
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i]);
+ gator_buffer_write_packed_int(cpu, BLOCK_COUNTER_BUF, buffer[i + 1]);
+ }
+ local_irq_restore(flags);
+}
+
+static void marshal_event64(int len, long long *buffer64)
+{
+ unsigned long i, flags, cpu = get_physical_cpu();
+
+ if (len <= 0)
+ return;
+
+ /* length must be even since all data is a (key, value) pair */
+ if (len & 0x1) {
+ pr_err("gator: invalid counter data detected and discarded\n");
+ return;
+ }
+
+ /* events must be written in key,value pairs */
+ local_irq_save(flags);
+ for (i = 0; i < len; i += 2) {
+ if (!buffer_check_space(cpu, BLOCK_COUNTER_BUF, 2 * MAXSIZE_PACK64))
+ break;
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i]);
+ gator_buffer_write_packed_int64(cpu, BLOCK_COUNTER_BUF, buffer64[i + 1]);
+ }
+ local_irq_restore(flags);
+}
+
+static void __maybe_unused marshal_event_single(int core, int key, int value)
+{
+ unsigned long flags, cpu;
+ u64 time;
+
+ local_irq_save(flags);
+ cpu = get_physical_cpu();
+ time = gator_get_time();
+ if (buffer_check_space(cpu, COUNTER_BUF, MAXSIZE_PACK64 + 3 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, value);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, COUNTER_BUF, time);
+}
+
+static void __maybe_unused marshal_event_single64(int core, int key, long long value)
+{
+ unsigned long flags, cpu;
+ u64 time;
+
+ local_irq_save(flags);
+ cpu = get_physical_cpu();
+ time = gator_get_time();
+ if (buffer_check_space(cpu, COUNTER_BUF, 2 * MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int64(cpu, COUNTER_BUF, time);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, core);
+ gator_buffer_write_packed_int(cpu, COUNTER_BUF, key);
+ gator_buffer_write_packed_int64(cpu, COUNTER_BUF, value);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, COUNTER_BUF, time);
+}
+
+static void marshal_sched_trace_switch(int pid, int state)
+{
+ unsigned long cpu = get_physical_cpu(), flags;
+ u64 time;
+
+ if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ time = gator_get_time();
+ if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_SWITCH);
+ gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, state);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, SCHED_TRACE_BUF, time);
+}
+
+static void marshal_sched_trace_exit(int tgid, int pid)
+{
+ unsigned long cpu = get_physical_cpu(), flags;
+ u64 time;
+
+ if (!per_cpu(gator_buffer, cpu)[SCHED_TRACE_BUF])
+ return;
+
+ local_irq_save(flags);
+ time = gator_get_time();
+ if (buffer_check_space(cpu, SCHED_TRACE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, MESSAGE_SCHED_EXIT);
+ gator_buffer_write_packed_int64(cpu, SCHED_TRACE_BUF, time);
+ gator_buffer_write_packed_int(cpu, SCHED_TRACE_BUF, pid);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, SCHED_TRACE_BUF, time);
+}
+
+#if GATOR_CPU_FREQ_SUPPORT
+static void marshal_idle(int core, int state)
+{
+ unsigned long flags, cpu;
+ u64 time;
+
+ local_irq_save(flags);
+ cpu = get_physical_cpu();
+ time = gator_get_time();
+ if (buffer_check_space(cpu, IDLE_BUF, MAXSIZE_PACK64 + 2 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, IDLE_BUF, state);
+ gator_buffer_write_packed_int64(cpu, IDLE_BUF, time);
+ gator_buffer_write_packed_int(cpu, IDLE_BUF, core);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, IDLE_BUF, time);
+}
+#endif
+
+#if defined(__arm__) || defined(__aarch64__)
+static void marshal_core_name(const int core, const int cpuid, const char *name)
+{
+ int cpu = get_physical_cpu();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (buffer_check_space(cpu, SUMMARY_BUF, MAXSIZE_PACK32 + MAXSIZE_CORE_NAME)) {
+ gator_buffer_write_packed_int(cpu, SUMMARY_BUF, MESSAGE_CORE_NAME);
+ gator_buffer_write_packed_int(cpu, SUMMARY_BUF, core);
+ gator_buffer_write_packed_int(cpu, SUMMARY_BUF, cpuid);
+ gator_buffer_write_string(cpu, SUMMARY_BUF, name);
+ }
+ /* Commit core names now so that they can show up in live */
+ local_irq_restore(flags);
+ gator_commit_buffer(cpu, SUMMARY_BUF, gator_get_time());
+}
+#endif
+
+static void marshal_activity_switch(int core, int key, int activity, int pid, int state)
+{
+ unsigned long cpu = get_physical_cpu(), flags;
+ u64 time;
+
+ if (!per_cpu(gator_buffer, cpu)[ACTIVITY_BUF])
+ return;
+
+ local_irq_save(flags);
+ time = gator_get_time();
+ if (buffer_check_space(cpu, ACTIVITY_BUF, MAXSIZE_PACK64 + 5 * MAXSIZE_PACK32)) {
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, MESSAGE_SWITCH);
+ gator_buffer_write_packed_int64(cpu, ACTIVITY_BUF, time);
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, core);
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, key);
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, activity);
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, pid);
+ gator_buffer_write_packed_int(cpu, ACTIVITY_BUF, state);
+ }
+ local_irq_restore(flags);
+ /* Check and commit; commit is set to occur once buffer is 3/4 full */
+ buffer_check(cpu, ACTIVITY_BUF, time);
+}
+
+void gator_marshal_activity_switch(int core, int key, int activity, int pid)
+{
+ /* state is reserved for cpu use only */
+ marshal_activity_switch(core, key, activity, pid, 0);
+}
diff --git a/drivers/parrot/gator/gator_trace_gpu.c b/drivers/parrot/gator/gator_trace_gpu.c
new file mode 100644
index 00000000000000..5de9152e365aa4
--- /dev/null
+++ b/drivers/parrot/gator/gator_trace_gpu.c
@@ -0,0 +1,321 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "gator.h"
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+
+#ifdef MALI_SUPPORT
+#ifdef MALI_DIR_MIDGARD
+/* New DDK Directory structure with kernel/drivers/gpu/arm/midgard*/
+#include "mali_linux_trace.h"
+#else
+/* Old DDK Directory structure with kernel/drivers/gpu/arm/t6xx*/
+#include "linux/mali_linux_trace.h"
+#endif
+#endif
+
+/*
+ * Taken from MALI_PROFILING_EVENT_TYPE_* items in Mali DDK.
+ */
+#define EVENT_TYPE_SINGLE 0
+#define EVENT_TYPE_START 1
+#define EVENT_TYPE_STOP 2
+#define EVENT_TYPE_SUSPEND 3
+#define EVENT_TYPE_RESUME 4
+
+/* Note whether tracepoints have been registered */
+static int mali_timeline_trace_registered;
+static int mali_job_slots_trace_registered;
+
+enum {
+ GPU_UNIT_NONE = 0,
+ GPU_UNIT_VP,
+ GPU_UNIT_FP,
+ GPU_UNIT_CL,
+ NUMBER_OF_GPU_UNITS
+};
+
+#if defined(MALI_SUPPORT)
+
+struct mali_activity {
+ int core;
+ int key;
+ int count;
+ int last_activity;
+ int last_pid;
+};
+
+#define NUMBER_OF_GPU_CORES 16
+static struct mali_activity mali_activities[NUMBER_OF_GPU_UNITS*NUMBER_OF_GPU_CORES];
+static DEFINE_SPINLOCK(mali_activities_lock);
+
+/* Only one event should be running on a unit and core at a time (ie,
+ * a start event can only be followed by a stop and vice versa), but
+ * because the kernel only knows when a job is enqueued and not
+ * started, it is possible for a start1, start2, stop1, stop2. Change
+ * it back into start1, stop1, start2, stop2 by queueing up start2 and
+ * releasing it when stop1 is received.
+ */
+
+static int mali_activity_index(int core, int key)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_activities); ++i) {
+ if ((mali_activities[i].core == core) && (mali_activities[i].key == key))
+ break;
+ if ((mali_activities[i].core == 0) && (mali_activities[i].key == 0)) {
+ mali_activities[i].core = core;
+ mali_activities[i].key = key;
+ break;
+ }
+ }
+ BUG_ON(i >= ARRAY_SIZE(mali_activities));
+
+ return i;
+}
+
+static void mali_activity_enqueue(int core, int key, int activity, int pid)
+{
+ int i;
+ int count;
+
+ spin_lock(&mali_activities_lock);
+ i = mali_activity_index(core, key);
+
+ count = mali_activities[i].count;
+ BUG_ON(count < 0);
+ ++mali_activities[i].count;
+ if (count) {
+ mali_activities[i].last_activity = activity;
+ mali_activities[i].last_pid = pid;
+ }
+ spin_unlock(&mali_activities_lock);
+
+ if (!count)
+ gator_marshal_activity_switch(core, key, activity, pid);
+}
+
+static void mali_activity_stop(int core, int key)
+{
+ int i;
+ int count;
+ int last_activity = 0;
+ int last_pid = 0;
+
+ spin_lock(&mali_activities_lock);
+ i = mali_activity_index(core, key);
+
+ if (mali_activities[i].count == 0) {
+ spin_unlock(&mali_activities_lock);
+ return;
+ }
+ --mali_activities[i].count;
+ count = mali_activities[i].count;
+ if (count) {
+ last_activity = mali_activities[i].last_activity;
+ last_pid = mali_activities[i].last_pid;
+ }
+ spin_unlock(&mali_activities_lock);
+
+ gator_marshal_activity_switch(core, key, 0, 0);
+ if (count)
+ gator_marshal_activity_switch(core, key, last_activity, last_pid);
+}
+
+void mali_activity_clear(struct mali_counter mali_activity[], size_t mali_activity_size)
+{
+ int activity;
+ int cores;
+ int core;
+
+ for (activity = 0; activity < mali_activity_size; ++activity) {
+ cores = mali_activity[activity].cores;
+ if (cores < 0)
+ cores = 1;
+ for (core = 0; core < cores; ++core) {
+ if (mali_activity[activity].enabled) {
+ preempt_disable();
+ gator_marshal_activity_switch(core, mali_activity[activity].key, 0, 0);
+ preempt_enable();
+ }
+ }
+ }
+}
+
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
+#include "gator_events_mali_4xx.h"
+
+/*
+ * Taken from MALI_PROFILING_EVENT_CHANNEL_* in Mali DDK.
+ */
+enum {
+ EVENT_CHANNEL_SOFTWARE = 0,
+ EVENT_CHANNEL_VP0 = 1,
+ EVENT_CHANNEL_FP0 = 5,
+ EVENT_CHANNEL_FP1,
+ EVENT_CHANNEL_FP2,
+ EVENT_CHANNEL_FP3,
+ EVENT_CHANNEL_FP4,
+ EVENT_CHANNEL_FP5,
+ EVENT_CHANNEL_FP6,
+ EVENT_CHANNEL_FP7,
+ EVENT_CHANNEL_GPU = 21
+};
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+enum {
+ EVENT_REASON_SINGLE_GPU_NONE = 0,
+ EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+};
+
+struct mali_counter mali_activity[2];
+
+GATOR_DEFINE_PROBE(mali_timeline_event, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned int d2, unsigned int d3, unsigned int d4))
+{
+ unsigned int component, state;
+
+ /* do as much work as possible before disabling interrupts */
+ component = (event_id >> 16) & 0xFF; /* component is an 8-bit field */
+ state = (event_id >> 24) & 0xF; /* state is a 4-bit field */
+
+ switch (state) {
+ case EVENT_TYPE_START:
+ if (component == EVENT_CHANNEL_VP0) {
+ /* tgid = d0; pid = d1; */
+ if (mali_activity[1].enabled)
+ mali_activity_enqueue(0, mali_activity[1].key, 1, d1);
+ } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
+ /* tgid = d0; pid = d1; */
+ if (mali_activity[0].enabled)
+ mali_activity_enqueue(component - EVENT_CHANNEL_FP0, mali_activity[0].key, 1, d1);
+ }
+ break;
+
+ case EVENT_TYPE_STOP:
+ if (component == EVENT_CHANNEL_VP0) {
+ if (mali_activity[1].enabled)
+ mali_activity_stop(0, mali_activity[1].key);
+ } else if (component >= EVENT_CHANNEL_FP0 && component <= EVENT_CHANNEL_FP7) {
+ if (mali_activity[0].enabled)
+ mali_activity_stop(component - EVENT_CHANNEL_FP0, mali_activity[0].key);
+ }
+ break;
+
+ case EVENT_TYPE_SINGLE:
+ if (component == EVENT_CHANNEL_GPU) {
+ unsigned int reason = (event_id & 0xffff);
+
+ if (reason == EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE)
+ gator_events_mali_log_dvfs_event(d0, d1);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+
+struct mali_counter mali_activity[3];
+
+#if defined(MALI_JOB_SLOTS_EVENT_CHANGED)
+GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid, unsigned char job_id))
+#else
+GATOR_DEFINE_PROBE(mali_job_slots_event, TP_PROTO(unsigned int event_id, unsigned int tgid, unsigned int pid))
+#endif
+{
+ unsigned int component, state, unit;
+#if !defined(MALI_JOB_SLOTS_EVENT_CHANGED)
+ unsigned char job_id = 0;
+#endif
+
+ component = (event_id >> 16) & 0xFF; /* component is an 8-bit field */
+ state = (event_id >> 24) & 0xF; /* state is a 4-bit field */
+
+ switch (component) {
+ case 0:
+ unit = GPU_UNIT_FP;
+ break;
+ case 1:
+ unit = GPU_UNIT_VP;
+ break;
+ case 2:
+ unit = GPU_UNIT_CL;
+ break;
+ default:
+ unit = GPU_UNIT_NONE;
+ }
+
+ if (unit != GPU_UNIT_NONE) {
+ switch (state) {
+ case EVENT_TYPE_START:
+ if (mali_activity[component].enabled)
+ mali_activity_enqueue(0, mali_activity[component].key, 1, (pid != 0 ? pid : tgid));
+ break;
+ case EVENT_TYPE_STOP:
+ default: /* Some jobs can be soft-stopped, so ensure that this terminates the activity trace. */
+ if (mali_activity[component].enabled)
+ mali_activity_stop(0, mali_activity[component].key);
+ break;
+ }
+ }
+}
+#endif
+
+static int gator_trace_gpu_start(void)
+{
+ /*
+ * Returns nonzero for installation failed
+ * Absence of gpu trace points is not an error
+ */
+
+#if defined(MALI_SUPPORT)
+ memset(&mali_activities, 0, sizeof(mali_activities));
+#endif
+ mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
+ mali_activity_clear(mali_activity, ARRAY_SIZE(mali_activity));
+ if (!GATOR_REGISTER_TRACE(mali_timeline_event))
+ mali_timeline_trace_registered = 1;
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+ mali_activity_clear(mali_activity, ARRAY_SIZE(mali_activity));
+ if (!GATOR_REGISTER_TRACE(mali_job_slots_event))
+ mali_job_slots_trace_registered = 1;
+#endif
+
+ return 0;
+}
+
+static void gator_trace_gpu_stop(void)
+{
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT != MALI_MIDGARD)
+ if (mali_timeline_trace_registered)
+ GATOR_UNREGISTER_TRACE(mali_timeline_event);
+#endif
+
+#if defined(MALI_SUPPORT) && (MALI_SUPPORT == MALI_MIDGARD)
+ if (mali_job_slots_trace_registered)
+ GATOR_UNREGISTER_TRACE(mali_job_slots_event);
+#endif
+
+ mali_timeline_trace_registered = mali_job_slots_trace_registered = 0;
+}
diff --git a/drivers/parrot/gator/gator_trace_power.c b/drivers/parrot/gator/gator_trace_power.c
new file mode 100644
index 00000000000000..46e04b29a18748
--- /dev/null
+++ b/drivers/parrot/gator/gator_trace_power.c
@@ -0,0 +1,192 @@
+/**
+ * Copyright (C) ARM Limited 2011-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <trace/events/power.h>
+
+#if defined(__arm__)
+
+#include <asm/mach-types.h>
+
+#define implements_wfi() (!machine_is_omap3_beagle())
+
+#else
+
+#define implements_wfi() false
+
+#endif
+
+/* cpu_frequency and cpu_idle trace points were introduced in Linux
+ * kernel v2.6.38 the now deprecated power_frequency trace point was
+ * available prior to 2.6.38, but only for x86
+ */
+#if GATOR_CPU_FREQ_SUPPORT
+enum {
+ POWER_CPU_FREQ,
+ POWER_TOTAL
+};
+
+static DEFINE_PER_CPU(ulong, idle_prev_state);
+static ulong power_cpu_enabled[POWER_TOTAL];
+static ulong power_cpu_key[POWER_TOTAL];
+static ulong power_cpu_cores;
+
+static int gator_trace_power_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+ int cpu;
+ bool found_nonzero_freq = false;
+
+ /* Even if CONFIG_CPU_FREQ is defined, it still may not be
+ * used. Check for non-zero values from cpufreq_quick_get
+ */
+ for_each_online_cpu(cpu) {
+ if (cpufreq_quick_get(cpu) > 0) {
+ found_nonzero_freq = true;
+ break;
+ }
+ }
+
+ if (found_nonzero_freq) {
+ /* cpu_frequency */
+ dir = gatorfs_mkdir(sb, root, "Linux_power_cpu_freq");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &power_cpu_enabled[POWER_CPU_FREQ]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &power_cpu_key[POWER_CPU_FREQ]);
+ }
+
+ return 0;
+}
+
+/* 'cpu' may not equal smp_processor_id(), i.e. may not be running on the core that is having the freq/idle state change */
+GATOR_DEFINE_PROBE(cpu_frequency, TP_PROTO(unsigned int frequency, unsigned int cpu))
+{
+ cpu = lcpu_to_pcpu(cpu);
+ marshal_event_single64(cpu, power_cpu_key[POWER_CPU_FREQ], frequency * 1000L);
+}
+
+GATOR_DEFINE_PROBE(cpu_idle, TP_PROTO(unsigned int state, unsigned int cpu))
+{
+ cpu = lcpu_to_pcpu(cpu);
+
+ if (state == per_cpu(idle_prev_state, cpu))
+ return;
+
+ if (implements_wfi()) {
+ if (state == PWR_EVENT_EXIT) {
+ /* transition from wfi to non-wfi */
+ marshal_idle(cpu, MESSAGE_IDLE_EXIT);
+ } else {
+ /* transition from non-wfi to wfi */
+ marshal_idle(cpu, MESSAGE_IDLE_ENTER);
+ }
+ }
+
+ per_cpu(idle_prev_state, cpu) = state;
+}
+
+static void gator_trace_power_online(void)
+{
+ int pcpu = get_physical_cpu();
+ int lcpu = get_logical_cpu();
+
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ marshal_event_single64(pcpu, power_cpu_key[POWER_CPU_FREQ], cpufreq_quick_get(lcpu) * 1000L);
+}
+
+static void gator_trace_power_offline(void)
+{
+ /* Set frequency to zero on an offline */
+ int cpu = get_physical_cpu();
+
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ marshal_event_single(cpu, power_cpu_key[POWER_CPU_FREQ], 0);
+}
+
+static int gator_trace_power_start(void)
+{
+ int cpu;
+
+ /* register tracepoints */
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ if (GATOR_REGISTER_TRACE(cpu_frequency))
+ goto fail_cpu_frequency_exit;
+
+ /* Always register for cpu_idle for detecting WFI */
+ if (GATOR_REGISTER_TRACE(cpu_idle))
+ goto fail_cpu_idle_exit;
+ pr_debug("gator: registered power event tracepoints\n");
+
+ for_each_present_cpu(cpu) {
+ per_cpu(idle_prev_state, cpu) = 0;
+ }
+
+ return 0;
+
+ /* unregister tracepoints on error */
+fail_cpu_idle_exit:
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ GATOR_UNREGISTER_TRACE(cpu_frequency);
+fail_cpu_frequency_exit:
+ pr_err("gator: power event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void gator_trace_power_stop(void)
+{
+ int i;
+
+ if (power_cpu_enabled[POWER_CPU_FREQ])
+ GATOR_UNREGISTER_TRACE(cpu_frequency);
+ GATOR_UNREGISTER_TRACE(cpu_idle);
+ pr_debug("gator: unregistered power event tracepoints\n");
+
+ for (i = 0; i < POWER_TOTAL; i++)
+ power_cpu_enabled[i] = 0;
+}
+
+static void gator_trace_power_init(void)
+{
+ int i;
+
+ power_cpu_cores = nr_cpu_ids;
+ for (i = 0; i < POWER_TOTAL; i++) {
+ power_cpu_enabled[i] = 0;
+ power_cpu_key[i] = gator_events_get_key();
+ }
+}
+#else
+static int gator_trace_power_create_files(struct super_block *sb, struct dentry *root)
+{
+ return 0;
+}
+
+static void gator_trace_power_online(void)
+{
+}
+
+static void gator_trace_power_offline(void)
+{
+}
+
+static int gator_trace_power_start(void)
+{
+ return 0;
+}
+
+static void gator_trace_power_stop(void)
+{
+}
+
+static void gator_trace_power_init(void)
+{
+}
+#endif
diff --git a/drivers/parrot/gator/gator_trace_sched.c b/drivers/parrot/gator/gator_trace_sched.c
new file mode 100644
index 00000000000000..6d7cbd7348e151
--- /dev/null
+++ b/drivers/parrot/gator/gator_trace_sched.c
@@ -0,0 +1,321 @@
+/**
+ * Copyright (C) ARM Limited 2010-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <trace/events/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+#include <trace/events/task.h>
+#endif
+
+#include "gator.h"
+
+#define TASK_MAP_ENTRIES 1024 /* must be power of 2 */
+#define TASK_MAX_COLLISIONS 2
+
+enum {
+ STATE_WAIT_ON_OTHER = 0,
+ STATE_CONTENTION,
+ STATE_WAIT_ON_IO,
+ CPU_WAIT_TOTAL
+};
+
+static DEFINE_PER_CPU(uint64_t *, taskname_keys);
+static DEFINE_PER_CPU(int, collecting);
+
+/* this array is never read as the cpu wait charts are derived
+ * counters the files are needed, nonetheless, to show that these
+ * counters are available
+ */
+static ulong cpu_wait_enabled[CPU_WAIT_TOTAL];
+static ulong sched_cpu_key[CPU_WAIT_TOTAL];
+
+static int sched_trace_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *dir;
+
+ /* CPU Wait - Contention */
+ dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_contention");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_CONTENTION]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_CONTENTION]);
+
+ /* CPU Wait - I/O */
+ dir = gatorfs_mkdir(sb, root, "Linux_cpu_wait_io");
+ if (!dir)
+ return -1;
+ gatorfs_create_ulong(sb, dir, "enabled", &cpu_wait_enabled[STATE_WAIT_ON_IO]);
+ gatorfs_create_ro_ulong(sb, dir, "key", &sched_cpu_key[STATE_WAIT_ON_IO]);
+
+ return 0;
+}
+
+static void emit_pid_name(const char *comm, struct task_struct *task)
+{
+ bool found = false;
+ char taskcomm[TASK_COMM_LEN + 3];
+ unsigned long x, cpu = get_physical_cpu();
+ uint64_t *keys = &(per_cpu(taskname_keys, cpu)[(task->pid & 0xFF) * TASK_MAX_COLLISIONS]);
+ uint64_t value;
+
+ value = gator_chksum_crc32(comm);
+ value = (value << 32) | (uint32_t)task->pid;
+
+ /* determine if the thread name was emitted already */
+ for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
+ if (keys[x] == value) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* shift values, new value always in front */
+ uint64_t oldv, newv = value;
+
+ for (x = 0; x < TASK_MAX_COLLISIONS; x++) {
+ oldv = keys[x];
+ keys[x] = newv;
+ newv = oldv;
+ }
+
+ /* emit pid names, cannot use get_task_comm, as it's not exported on all kernel versions */
+ if (strlcpy(taskcomm, comm, TASK_COMM_LEN) == TASK_COMM_LEN - 1) {
+ /* append ellipses if comm has length of TASK_COMM_LEN - 1 */
+ strcat(taskcomm, "...");
+ }
+
+ marshal_thread_name(task->pid, taskcomm);
+ }
+}
+
+static void collect_counters(u64 time, struct task_struct *task, bool sched_switch)
+{
+ int *buffer, len, cpu = get_physical_cpu();
+ long long *buffer64;
+ struct gator_interface *gi;
+
+ if (marshal_event_header(time)) {
+ list_for_each_entry(gi, &gator_events, list) {
+ if (gi->read) {
+ len = gi->read(&buffer, sched_switch);
+ marshal_event(len, buffer);
+ } else if (gi->read64) {
+ len = gi->read64(&buffer64);
+ marshal_event64(len, buffer64);
+ }
+ if (gi->read_proc && task != NULL) {
+ len = gi->read_proc(&buffer64, task);
+ marshal_event64(len, buffer64);
+ }
+ }
+ if (cpu == 0)
+ gator_emit_perf_time(time);
+ /* Only check after writing all counters so that time and corresponding counters appear in the same frame */
+ buffer_check(cpu, BLOCK_COUNTER_BUF, time);
+
+ /* Commit buffers on timeout */
+ if (gator_live_rate > 0 && time >= per_cpu(gator_buffer_commit_time, cpu)) {
+ static const int buftypes[] = { NAME_BUF, COUNTER_BUF, BLOCK_COUNTER_BUF, SCHED_TRACE_BUF, ACTIVITY_BUF };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(buftypes); ++i)
+ gator_commit_buffer(cpu, buftypes[i], time);
+
+ /* spinlocks are noops on uniprocessor machines and mutexes do
+ * not work in sched_switch context in RT-Preempt full, so
+ * disable proactive flushing of the annotate frame on
+ * uniprocessor machines.
+ */
+#ifdef CONFIG_SMP
+ /* Try to preemptively flush the annotate buffer to reduce the chance of the buffer being full */
+ if (on_primary_core() && spin_trylock(&annotate_lock)) {
+ gator_commit_buffer(0, ANNOTATE_BUF, time);
+ spin_unlock(&annotate_lock);
+ }
+#endif
+ }
+ }
+}
+
+/* special case used during a suspend of the system */
+static void trace_sched_insert_idle(void)
+{
+ marshal_sched_trace_switch(0, 0);
+}
+
+static void gator_trace_emit_link(struct task_struct *p)
+{
+ int cookie;
+ int cpu = get_physical_cpu();
+
+ cookie = get_exec_cookie(cpu, p);
+ emit_pid_name(p->comm, p);
+
+ marshal_link(cookie, p->tgid, p->pid);
+}
+
+GATOR_DEFINE_PROBE(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child))
+{
+ gator_trace_emit_link(child);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+GATOR_DEFINE_PROBE(sched_process_exec, TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm))
+{
+ gator_trace_emit_link(p);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
+GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, char *comm))
+#else
+GATOR_DEFINE_PROBE(task_rename, TP_PROTO(struct task_struct *task, const char *comm))
+#endif
+{
+ emit_pid_name(comm, task);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct rq *rq, struct task_struct *prev, struct task_struct *next))
+#else
+GATOR_DEFINE_PROBE(sched_switch, TP_PROTO(struct task_struct *prev, struct task_struct *next))
+#endif
+{
+ int state;
+ int cpu = get_physical_cpu();
+
+ per_cpu(in_scheduler_context, cpu) = true;
+
+ /* do as much work as possible before disabling interrupts */
+ if (prev->state == TASK_RUNNING)
+ state = STATE_CONTENTION;
+ else if (prev->in_iowait)
+ state = STATE_WAIT_ON_IO;
+ else
+ state = STATE_WAIT_ON_OTHER;
+
+ per_cpu(collecting, cpu) = 1;
+ collect_counters(gator_get_time(), prev, true);
+ per_cpu(collecting, cpu) = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)
+ gator_trace_emit_link(next);
+#endif
+ marshal_sched_trace_switch(next->pid, state);
+
+ per_cpu(in_scheduler_context, cpu) = false;
+}
+
+GATOR_DEFINE_PROBE(sched_process_free, TP_PROTO(struct task_struct *p))
+{
+ marshal_sched_trace_exit(p->tgid, p->pid);
+}
+
+static void do_nothing(void *info)
+{
+ /* Intentionally do nothing */
+ (void)info;
+}
+
+static int register_scheduler_tracepoints(void)
+{
+ /* register tracepoints */
+ if (GATOR_REGISTER_TRACE(sched_process_fork))
+ goto fail_sched_process_fork;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ if (GATOR_REGISTER_TRACE(sched_process_exec))
+ goto fail_sched_process_exec;
+ if (GATOR_REGISTER_TRACE(task_rename))
+ goto fail_task_rename;
+#endif
+ if (GATOR_REGISTER_TRACE(sched_switch))
+ goto fail_sched_switch;
+ if (GATOR_REGISTER_TRACE(sched_process_free))
+ goto fail_sched_process_free;
+ pr_debug("gator: registered tracepoints\n");
+
+ /* Now that the scheduler tracepoint is registered, force a context
+ * switch on all cpus to capture what is currently running.
+ */
+ on_each_cpu(do_nothing, NULL, 0);
+
+ return 0;
+
+ /* unregister tracepoints on error */
+fail_sched_process_free:
+ GATOR_UNREGISTER_TRACE(sched_switch);
+fail_sched_switch:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ GATOR_UNREGISTER_TRACE(task_rename);
+fail_task_rename:
+ GATOR_UNREGISTER_TRACE(sched_process_exec);
+fail_sched_process_exec:
+#endif
+ GATOR_UNREGISTER_TRACE(sched_process_fork);
+fail_sched_process_fork:
+ pr_err("gator: tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n");
+
+ return -1;
+}
+
+static void unregister_scheduler_tracepoints(void)
+{
+ GATOR_UNREGISTER_TRACE(sched_process_fork);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+ GATOR_UNREGISTER_TRACE(sched_process_exec);
+ GATOR_UNREGISTER_TRACE(task_rename);
+#endif
+ GATOR_UNREGISTER_TRACE(sched_switch);
+ GATOR_UNREGISTER_TRACE(sched_process_free);
+ pr_debug("gator: unregistered tracepoints\n");
+}
+
+static void gator_trace_sched_stop(void)
+{
+ int cpu;
+
+ unregister_scheduler_tracepoints();
+
+ for_each_present_cpu(cpu) {
+ kfree(per_cpu(taskname_keys, cpu));
+ }
+}
+
+static int gator_trace_sched_start(void)
+{
+ int cpu, size;
+ int ret;
+
+ for_each_present_cpu(cpu) {
+ size = TASK_MAP_ENTRIES * TASK_MAX_COLLISIONS * sizeof(uint64_t);
+ per_cpu(taskname_keys, cpu) = kmalloc(size, GFP_KERNEL);
+ if (!per_cpu(taskname_keys, cpu))
+ return -1;
+ memset(per_cpu(taskname_keys, cpu), 0, size);
+ }
+
+ ret = register_scheduler_tracepoints();
+
+ return ret;
+}
+
+static void gator_trace_sched_offline(void)
+{
+ trace_sched_insert_idle();
+}
+
+static void gator_trace_sched_init(void)
+{
+ int i;
+
+ for (i = 0; i < CPU_WAIT_TOTAL; i++) {
+ cpu_wait_enabled[i] = 0;
+ sched_cpu_key[i] = gator_events_get_key();
+ }
+}
diff --git a/drivers/parrot/gator/mali/mali_kbase_gator_api.h b/drivers/parrot/gator/mali/mali_kbase_gator_api.h
new file mode 100644
index 00000000000000..5ed069797e36f8
--- /dev/null
+++ b/drivers/parrot/gator/mali/mali_kbase_gator_api.h
@@ -0,0 +1,219 @@
+/**
+ * Copyright (C) ARM Limited 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to collect hardware counters data from a Mali device.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ * Version 1 API: DDK versions r1px, r2px
+ * Version 2 API: DDK versions r3px, r4px
+ * Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ * kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ * In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ * hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ * hwcnt_name # pointer to name list
+ *
+ * u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ * # Iterate over each 64-counter block in this GPU configuration
+ * for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ * hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ * # Skip reserved type blocks - they contain no counters at all
+ * if( type == RESERVED_BLOCK ) {
+ * continue;
+ * }
+ *
+ * size_t name_offset = type * 64;
+ * size_t data_offset = i * 64;
+ *
+ * # Iterate over the names of the counters in this block type
+ * for( j = 0; j < 64; j++) {
+ * const char * name = hwcnt_name[name_offset+j];
+ *
+ * # Skip empty name strings - there is no counter here
+ * if( name[0] == '\0' ) {
+ * continue;
+ * }
+ *
+ * u32 data = hwcnt_data[data_offset+j];
+ *
+ * printk( "COUNTER: %s DATA: %u\n", name, data );
+ * }
+ * }
+ *
+ *
+ * Note that in most implementations you typically want to either SUM or
+ * AVERAGE multiple instances of the same counter if, for example, you have
+ * multiple shader cores or multiple L2 caches. The most sensible view for
+ * analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ * counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling kbase_gator_hwcnt_term_names().
+ * This function must only be called if init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+#if !defined(MALI_TRUE)
+ #define MALI_TRUE ((uint32_t)1)
+#endif
+
+#if !defined(MALI_FALSE)
+ #define MALI_FALSE ((uint32_t)0)
+#endif
+
+enum hwc_type {
+ JM_BLOCK = 0,
+ TILER_BLOCK,
+ SHADER_BLOCK,
+ MMU_L2_BLOCK,
+ RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+
+ /* Passed from Gator to kbase */
+
+ /* the bitmask of enabled hardware counters for each counter block */
+ uint16_t bitmask[4];
+
+ /* Passed from kbase to Gator */
+
+ /* ptr to counter dump memory */
+ void *kernel_dump_buffer;
+
+ /* size of counter dump memory */
+ uint32_t size;
+
+ /* the ID of the Mali device */
+ uint32_t gpu_id;
+
+ /* the number of shader cores in the GPU */
+ uint32_t nr_cores;
+
+ /* the number of core groups */
+ uint32_t nr_core_groups;
+
+ /* the memory layout of the performance counters */
+ enum hwc_type *hwc_layout;
+
+ /* the total number of hardware couter blocks */
+ uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ * specific information that will be returned to Gator. On entry Gator must have populated the
+ * 'bitmask' field with the counters it wishes to enable for each class of counter block.
+ * Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ * enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ * the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ * kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info A pointer to a structure containing the enabled counters passed from Gator and all the
+ * Mali specific information that will be returned to Gator.
+ * @param opaque_handles A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ * @param[out] success Non-zero on success, zero on failure.
+ *
+ * @return Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ * completed dump may not have dumped succesfully, so the caller must test for both
+ * a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles A wrapper structure for kbase structures.
+ *
+ * @return Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_number_of_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return Pointer to an array of strings of length *total_number_of_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_number_of_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/drivers/parrot/gator/mali/mali_mjollnir_profiling_gator_api.h b/drivers/parrot/gator/mali/mali_mjollnir_profiling_gator_api.h
new file mode 100644
index 00000000000000..2bc0b037eee6c6
--- /dev/null
+++ b/drivers/parrot/gator/mali/mali_mjollnir_profiling_gator_api.h
@@ -0,0 +1,159 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MALI_MJOLLNIR_PROFILING_GATOR_API_H__
+#define __MALI_MJOLLNIR_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/*
+ * The number of processor cores. Update to suit your hardware implementation.
+ */
+#define MAX_NUM_FP_CORES (4)
+#define MAX_NUM_VP_CORES (1)
+#define MAX_NUM_L2_CACHE_CORES (1)
+
+enum counters {
+ /* Timeline activity */
+ ACTIVITY_VP_0 = 0,
+ ACTIVITY_FP_0,
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0,
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+
+ /* EGL Software Counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES Software Counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ COUNTER_FILMSTRIP,
+ COUNTER_FREQUENCY,
+ COUNTER_VOLTAGE,
+
+ NUMBER_OF_EVENTS
+};
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_3
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_3_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+/* Signifies that the system is able to report voltage and frequency numbers. */
+#define DVFS_REPORTED_BY_DDK 1
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+struct _mali_profiling_core_counters {
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+};
+
+/*
+ * For compatibility with utgard.
+ */
+struct _mali_profiling_l2_counter_values {
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+};
+
+struct _mali_profiling_mali_version {
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+};
+
+extern void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+extern u32 _mali_profiling_get_l2_counters(struct _mali_profiling_l2_counter_values *values);
+
+/*
+ * List of possible actions allowing DDK to be controlled by Streamline.
+ * The following numbers are used by DDK to control the frame buffer dumping.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MJOLLNIR_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gator/mali/mali_utgard_profiling_gator_api.h b/drivers/parrot/gator/mali/mali_utgard_profiling_gator_api.h
new file mode 100644
index 00000000000000..d6465312628ed1
--- /dev/null
+++ b/drivers/parrot/gator/mali/mali_utgard_profiling_gator_api.h
@@ -0,0 +1,197 @@
+/**
+ * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+/** The list of events supported by the Mali DDK. */
+enum {
+ /* Vertex processor activity */
+ ACTIVITY_VP_0 = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP_0, /* 1 */
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+ ACTIVITY_FP_4,
+ ACTIVITY_FP_5,
+ ACTIVITY_FP_6,
+ ACTIVITY_FP_7,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+ COUNTER_L2_1_C0,
+ COUNTER_L2_1_C1,
+ COUNTER_L2_2_C0,
+ COUNTER_L2_2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0, /*15*/
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+ COUNTER_FP_4_C0,
+ COUNTER_FP_4_C1,
+ COUNTER_FP_5_C0,
+ COUNTER_FP_5_C1,
+ COUNTER_FP_6_C0,
+ COUNTER_FP_6_C1,
+ COUNTER_FP_7_C0,
+ COUNTER_FP_7_C1, /* 32 */
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+struct _mali_profiling_core_counters {
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+};
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+struct _mali_profiling_l2_counter_values {
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+};
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+struct _mali_profiling_mali_version {
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+};
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(struct _mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gator/mali_midgard.mk b/drivers/parrot/gator/mali_midgard.mk
new file mode 100644
index 00000000000000..1b784d5c3d589f
--- /dev/null
+++ b/drivers/parrot/gator/mali_midgard.mk
@@ -0,0 +1,39 @@
+# Defines for Mali-Midgard driver
+EXTRA_CFLAGS += -DMALI_USE_UMP=1 \
+ -DMALI_LICENSE_IS_GPL=1 \
+ -DMALI_BASE_TRACK_MEMLEAK=0 \
+ -DMALI_DEBUG=0 \
+ -DMALI_ERROR_INJECT_ON=0 \
+ -DMALI_CUSTOMER_RELEASE=1 \
+ -DMALI_UNIT_TEST=0 \
+ -DMALI_BACKEND_KERNEL=1 \
+ -DMALI_NO_MALI=0
+
+DDK_DIR ?= .
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/t6xx),)
+KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase
+OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/t6xx/kbase/osk
+endif
+
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/midgard),)
+KBASE_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard
+OSK_DIR = $(DDK_DIR)/drivers/gpu/arm/midgard/osk
+EXTRA_CFLAGS += -DMALI_DIR_MIDGARD=1
+endif
+
+ifneq ($(wildcard $(DDK_DIR)/drivers/gpu/arm/midgard/mali_kbase_gator_api.h),)
+EXTRA_CFLAGS += -DMALI_SIMPLE_API=1
+endif
+
+UMP_DIR = $(DDK_DIR)/include/linux
+
+# Include directories in the DDK
+EXTRA_CFLAGS += -I$(KBASE_DIR)/ \
+ -I$(KBASE_DIR)/.. \
+ -I$(OSK_DIR)/.. \
+ -I$(UMP_DIR)/.. \
+ -I$(DDK_DIR)/include \
+ -I$(KBASE_DIR)/osk/src/linux/include \
+ -I$(KBASE_DIR)/platform_dummy \
+ -I$(KBASE_DIR)/src
+
diff --git a/drivers/parrot/gpio/Kconfig b/drivers/parrot/gpio/Kconfig
new file mode 100644
index 00000000000000..b7b80cb1c7fcea
--- /dev/null
+++ b/drivers/parrot/gpio/Kconfig
@@ -0,0 +1,20 @@
+config GPIO_P7MU
+ tristate "Parrot P7MU GPIO"
+ depends on GPIOLIB && MFD_P7MU
+ default m
+ help
+ Builds the Parrot7 power Management Unit GPIO block driver.
+
+config GPIO_P7MU_BUILTIN
+ bool
+ select GPIO_P7MU
+
+config GPIO_HAVE_PARROT7
+ bool
+ depends on GPIOLIB && ARCH_PARROT7
+ default y
+ help
+ Builds the Parrot7 GPIO controller driver.
+
+config GPIO_PARROT7
+ bool "Parrot7 GPIO controller"
diff --git a/drivers/parrot/gpio/Makefile b/drivers/parrot/gpio/Makefile
new file mode 100644
index 00000000000000..d2a1b47d692ff3
--- /dev/null
+++ b/drivers/parrot/gpio/Makefile
@@ -0,0 +1,6 @@
+ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
+
+obj-$(CONFIG_GPIO_PARROT7) += p7-gpio.o
+
+CFLAGS_p7mu-gpio.o += -I$(srctree)/drivers/parrot
+obj-$(CONFIG_GPIO_P7MU) += p7mu-gpio.o
diff --git a/drivers/parrot/gpio/p7-gpio.c b/drivers/parrot/gpio/p7-gpio.c
new file mode 100644
index 00000000000000..fd416dd28a84df
--- /dev/null
+++ b/drivers/parrot/gpio/p7-gpio.c
@@ -0,0 +1,1411 @@
+/**
+ * linux/drivers/parrot/gpio/p7-gpio.c - Parrot7 GPIO pin controller driver
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Lionel Flandrin <lionel.flandrin@parrot.com>
+ * date: 06-Apr-2012
+ *
+ * This file is released under the GPL
+ *
+ * TODO:
+ * - implement power management support
+ * - optional: implement driver removal once irq_domain deletion is supported
+ * - optional: implement a better GPIO <-> interrupt mapping scheme to prevent
+ * platforms from providing mandatory mappings (using
+ * irq_create_mapping() to perform dynamic mapping at gpio_to_irq()
+ * time would have the side effect of creating mappings each time
+ * gpio_export() is called, particularly when browsing sysfs
+ * tree...).
+ */
+
+#include <linux/gpio.h>
+#include <asm/gpio.h>
+#include <asm/mach/irq.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/sysfs.h>
+#include <mach/irqs.h>
+#include "p7-gpio.h"
+
+#define P7_GPIO_FILTER
+
+#define P7_GPIO_START_FILTER_IRQ (0x8)
+#define P7_GPIO_PORT_DATA_BASE (0x0)
+#define P7_GPIO_PORT_DIR_BASE (0x7000)
+#define P7_GPIO_STATUS (0x7400)
+#define P7_GPIO_DEBOUNCE (0x7404)
+#define P7_GPIO_PHASE_IT (0x7420)
+#define P7_GPIO_PHASE (0x7600)
+#define P7_GPIO_PHASE_FROM_IRQ(IRQ) ((IRQ - P7_GPIO_START_FILTER_IRQ) / 2)
+#define P7_GPIO_PHASE_VAL(PHASE) (P7_GPIO_PHASE + (PHASE * 0x20))
+#define P7_GPIO_PHASE_ITEN(PHASE) (P7_GPIO_PHASE + (PHASE * 0x20) + 0x4)
+#define P7_GPIO_PHASE_ITACK(PHASE) (P7_GPIO_PHASE + (PHASE * 0x20) + 0x8)
+#define P7_GPIO_PHASE_IFILTER(PHASE) (P7_GPIO_PHASE + (PHASE * 0x20) + 0xC)
+#define P7_GPIO_INTC_BASE (0x7800)
+
+#define P7_GPIO_INTC_POSEDGE (0 << 16)
+#define P7_GPIO_INTC_ALLEDGE (1 << 16)
+#define P7_GPIO_INTC_NEGEDGE (2 << 16)
+#define P7_GPIO_INTC_LEVEL (3 << 16)
+#define P7_GPIO_INTC_HIGH (P7_GPIO_INTC_LEVEL | (0 << 18))
+#define P7_GPIO_INTC_LOW (P7_GPIO_INTC_LEVEL | (1 << 18))
+#define P7_GPIO_INTC_TYPE_MASK (7 << 16)
+#define P7_GPIO_INTC_ITCLR (1 << 19)
+#define P7_GPIO_INTC_ITEN (1 << 20)
+#define P7_GPIO_INTC_DEBOUNCE (1 << 21)
+#define P7_GPIO_INTC_FILTER_MODE (1 << 22)
+#define P7_GPIO_INTC_ANA_MEASURE (1 << 23)
+#define P7_GPIO_INTC_PHASE_MEASURE (1 << 24)
+#define P7_GPIO_ITFILTER_MODE_POS 24
+
+#define P7_GPIO_STATUS_IRQ_MASK ((1 << P7_GPIO_IRQS) - 1)
+#define P7_GPIO_STATUS_PHASE (1 << 22)
+
+#define P7_GPIO_INVAL -1
+
+struct gpio_irq {
+ int gpio;
+ u8 filter;
+ u32 measure;
+ struct timespec ts;
+};
+
+struct p7gpio {
+ unsigned long base;
+ struct resource *ioarea;
+ struct clk *clk;
+ int irq;
+ struct resource *mux_irqs;
+ struct gpio_chip chip;
+ spinlock_t lock;
+ const struct p7gpio_plat_data *pdata;
+ struct gpio_irq irq_map[P7_GPIO_IRQS];
+ struct irq_domain *irq_domain;
+#ifdef CONFIG_PM_SLEEP
+ u32 *saved_regs;
+#endif
+};
+
+/*
+ * GPIO chip callbacks
+ */
+
+static int p7gpio_get(struct gpio_chip *c, unsigned gpio)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 port = p7gpio->base + P7_GPIO_PORT_DATA_BASE;
+
+ /* Access to the data register is a bit peculiar: each port contains 8
+ * GPIOs and can be accessed through 255 different addresses. The bits 2
+ * to 10 of these addresses is actually a mask applied to the value
+ * present in this register (bits [1:0] are not part of the mask and are
+ * always 0).
+ */
+
+ /* first we compute the base address for the port containing this
+ * GPIO */
+ port += (gpio / 8) * 0x400;
+ /* Then we add the mask */
+ port |= 1U << ((gpio % 8) + 2);
+
+ /* since all the other GPIOs in this port are masked, we don't have to
+ * check for the specific bit */
+ return !!(__raw_readl(port) & 0xff);
+}
+
+static void p7gpio_set(struct gpio_chip *c, unsigned gpio, int v)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 port = p7gpio->base + P7_GPIO_PORT_DATA_BASE;
+
+ port += (gpio / 8) * 0x400;
+ port |= 1U << ((gpio % 8) + 2);
+
+ /* since all the other GPIOs in this port are masked, we don't have to
+ * only set the specific bit */
+ __raw_writel(v ? 0xff : 0x00, port);
+}
+
+/* spinlock is taken by caller */
+static int p7gpio_direction_input(struct gpio_chip *c, unsigned gpio)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 port = p7gpio->base + P7_GPIO_PORT_DIR_BASE;
+ u32 mask;
+
+ /* There are 8 GPIOs per port */
+ port += (gpio / 8) * 4;
+ mask = ~(1U << (gpio % 8)) & 0xff;
+
+ __raw_writel(__raw_readl(port) & mask, port);
+
+ return 0;
+}
+
+/* spinlock is taken by caller */
+static int p7gpio_direction_output(struct gpio_chip *c, unsigned gpio, int v)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 port = p7gpio->base + P7_GPIO_PORT_DIR_BASE;
+ u32 mask;
+
+ /* There are 8 GPIOs per port */
+ port += (gpio / 8) * 4;
+ mask = 1U << (gpio % 8);
+
+ /* Set the GPIO value before we enable it. */
+ p7gpio_set(c, gpio, v);
+
+ __raw_writel((__raw_readl(port) | mask) & 0xff, port);
+
+ return 0;
+}
+
+static int p7gpio_is_output(struct gpio_chip *c, unsigned gpio)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 port = p7gpio->base + P7_GPIO_PORT_DIR_BASE;
+ u32 mask;
+
+ /* There are 8 GPIOs per port */
+ port += (gpio / 8) * 4;
+ mask = 1U << (gpio % 8);
+
+ return !!(__raw_readl(port) & mask);
+}
+
+/*
+ * Recover the internal interrupt number for a gpio.
+ */
+static inline int p7gpio_to_hw_irq(struct p7gpio *p7gpio, unsigned gpio)
+{
+ unsigned i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ for (i = 0; i < P7_GPIO_IRQS; i++)
+ if (p7gpio->irq_map[i].gpio == gpio)
+ break;
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ return i == P7_GPIO_IRQS ? -1: i;
+}
+
+/*
+ * Configure the debounce delay.
+ * d is the debounce delay in microseconds.
+ *
+ * Hardware limitations: only the first 8 interruptions can be debounced, and
+ * the debouncing delay is global for all those GPIOs. This means that the
+ * debounce value for *all* debounced GPIO will in fact be the latest non-zero
+ * value given to this function.
+ *
+ * spinlock is taken by caller
+ */
+/* XXX this not working for p7r3 : we have a 8 bit prediv and per gpio debounce */
+static int p7gpio_set_debounce(struct gpio_chip *c, unsigned gpio, unsigned d)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ u32 *addr = (u32 *)(p7gpio->base + P7_GPIO_INTC_BASE);
+ unsigned hw_irq;
+
+ hw_irq = p7gpio_to_hw_irq(p7gpio, gpio);
+ if (hw_irq == -1)
+ /* This GPIO is not mapped to any IRQ */
+ return -EINVAL;
+
+ /* Only the first 8 interrupts are debounce-capable */
+ if (hw_irq >= 8)
+ return -EINVAL;
+
+ addr += hw_irq;
+
+ if (d != 0) {
+ unsigned long clk_khz = clk_get_rate(p7gpio->clk) / 1000;
+ u32 ticks;
+
+ /* convert the delay in a number of ticks from the GPIO clock */
+ ticks = d * clk_khz / 1000;
+
+ /* debounce register is only 24 bit wide */
+ if (ticks > 0xffffff || ticks == 0)
+ return -EINVAL;
+
+ /* reconfigure the global debounce delay with the new value. */
+ __raw_writel(ticks, p7gpio->base + P7_GPIO_DEBOUNCE);
+
+ /* enable debouncing on this interrupt */
+ __raw_writel(__raw_readl(addr) | P7_GPIO_INTC_DEBOUNCE,
+ addr);
+ } else {
+ /* if delay is 0 we simply remove the debounce flag from the
+ * interrupt register and leave the debounce value untouched. */
+ __raw_writel(__raw_readl(addr) & ~P7_GPIO_INTC_DEBOUNCE,
+ addr);
+ }
+
+ return 0;
+}
+
+static int p7gpio_to_irq(struct gpio_chip *c, unsigned gpio)
+{
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ unsigned hw_irq;
+
+ hw_irq = p7gpio_to_hw_irq(p7gpio, gpio);
+
+ if (hw_irq == -1)
+ return -ENXIO;
+
+ return irq_linear_revmap(p7gpio->irq_domain, hw_irq);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void p7gpio_dbg_show(struct seq_file *s, struct gpio_chip *c)
+{
+ unsigned i;
+ const char *label;
+
+ for (i = 0; i < c->ngpio; i++) {
+ label = gpiochip_is_requested(c, i);
+
+ if (label) {
+ int irq;
+
+ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
+ c->base + i, label,
+ p7gpio_is_output(c, i) ? "out" : "in ",
+ p7gpio_get(c, i) ? "hi" : "lo");
+
+ irq = gpio_to_irq(c->base + i);
+ if (irq >= 0)
+ seq_printf(s, " [IRQ %d]", irq);
+
+ seq_printf(s, "\n");
+ }
+ }
+}
+
+#define P7GPIO_DBG_SHOW p7gpio_dbg_show
+
+#else /* CONFIG_DEBUG_FS */
+
+#define P7GPIO_DBG_SHOW NULL
+
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_PINCTRL
+
+#include <linux/pinctrl/consumer.h>
+
+/* Request pinctrl subsystem to switch pin as gpio. */
+static int p7gpio_request(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+/* Tell pinctrl subsystem we are not using pin as a gpio anymore. */
+static void p7gpio_free(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ /* When a PAD is disabled it's put back into GPIO function. We have to
+ * make sure we're not driving anything. */
+ p7gpio_direction_input(chip, offset);
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+#else /* CONFIG_PINCTRL */
+
+#define p7gpio_request NULL
+#define p7gpio_free NULL
+
+#endif /* CONFIG_PINCTRL */
+
+static const struct gpio_chip p7gpio_chip_defaults = {
+ .request = p7gpio_request,
+ .free = p7gpio_free,
+ .get = p7gpio_get,
+ .set = p7gpio_set,
+ .direction_input = p7gpio_direction_input,
+ .direction_output = p7gpio_direction_output,
+ .set_debounce = p7gpio_set_debounce,
+ .to_irq = p7gpio_to_irq,
+ .dbg_show = P7GPIO_DBG_SHOW,
+ .base = P7_FIRST_GPIO,
+ .label = P7GPIO_DRV_NAME,
+ .can_sleep = 0, /* P7 GPIO functions are IRQ-safe */
+};
+
+/*
+ * GPIO IRQ callbacks
+ */
+
+
+/*
+ * p7gpio_filter_irq_reg: Enable, disable, ack filtered gpios
+ */
+static inline void p7gpio_filter_irq_ack(struct p7gpio *p7gpio, struct irq_data *d)
+{
+ unsigned long flags;
+ unsigned int reg;
+ u32 addr;
+ int phase;
+
+ phase = P7_GPIO_PHASE_FROM_IRQ(d->hwirq);
+ addr = p7gpio->base + P7_GPIO_PHASE_ITACK(phase);
+ /*
+ * even hwirq means measure done ->
+ * bit 0 of gpio_phasex_itack register
+ * odd hwirq means filter it available ->
+ * bit 1 if gpio_phasex_itack register
+ */
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ reg = __raw_readl(addr);
+ reg |= 1 << d->hwirq % 2;
+ __raw_writel(reg, addr);
+
+ if (!(d->hwirq % 2)) { /*measure done*/
+ addr = p7gpio->base + P7_GPIO_PHASE_VAL(phase);
+ p7gpio->irq_map[d->hwirq + 1].measure = __raw_readl(addr);
+ ktime_get_ts(&p7gpio->irq_map[d->hwirq + 1].ts);
+ }
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static inline void p7gpio_filter_irq_msk(struct p7gpio *p7gpio, struct irq_data *d, u8 val)
+{
+ unsigned long flags;
+ unsigned int reg;
+ u32 addr;
+ int phase;
+
+ phase = P7_GPIO_PHASE_FROM_IRQ(d->hwirq);
+ addr = p7gpio->base + P7_GPIO_PHASE_ITEN(phase);
+ /*
+ * even hwirq means measure done ->
+ * bit 0 of gpio_phasex_itack register
+ * odd hwirq means filter it available ->
+ * bit 1 if gpio_phasex_itack register
+ */
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ reg = __raw_readl(addr);
+ reg &= ~(val << (d->hwirq % 2));
+ reg |= !!val << (d->hwirq % 2);
+ __raw_writel(reg, addr);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static void p7gpio_irq_ack(struct irq_data *d)
+{
+ struct p7gpio *p7gpio = irq_data_get_irq_chip_data(d);
+ u32 *addr = (u32 *)(p7gpio->base + P7_GPIO_INTC_BASE);
+ unsigned long flags;
+
+ BUG_ON(d->hwirq >= P7_GPIO_IRQS);
+
+ if (p7gpio->irq_map[d->hwirq].filter){
+ p7gpio_filter_irq_ack(p7gpio, d);
+ return;
+ }
+ addr += d->hwirq;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ /* in order to clear the interrupt, we have to write a 1 to INTCLR and
+ * then a 0 to re-enable the interrupt. */
+ __raw_writel(__raw_readl(addr) | P7_GPIO_INTC_ITCLR, addr);
+ __raw_writel(__raw_readl(addr) & ~P7_GPIO_INTC_ITCLR, addr);
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static void p7gpio_irq_unmask(struct irq_data *d)
+{
+ struct p7gpio *p7gpio = irq_data_get_irq_chip_data(d);
+ u32 *addr = (u32 *)(p7gpio->base + P7_GPIO_INTC_BASE);
+ unsigned long flags;
+
+ BUG_ON(d->hwirq >= P7_GPIO_IRQS);
+
+ if (p7gpio->irq_map[d->hwirq].filter) {
+ p7gpio_filter_irq_msk(p7gpio, d, 1);
+ return;
+ }
+
+ addr += d->hwirq;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ __raw_writel(__raw_readl(addr) | P7_GPIO_INTC_ITEN, addr);
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static void p7gpio_irq_mask(struct irq_data *d)
+{
+ struct p7gpio *p7gpio = irq_data_get_irq_chip_data(d);
+ u32 *addr;
+ unsigned long flags;
+
+ BUG_ON(d->hwirq >= P7_GPIO_IRQS);
+ BUG_ON(!p7gpio);
+
+ if (p7gpio->irq_map[d->hwirq].filter) {
+ p7gpio_filter_irq_msk(p7gpio, d, 0);
+ return;
+ }
+
+ addr = (u32 *)(p7gpio->base + P7_GPIO_INTC_BASE) + d->hwirq;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ __raw_writel(__raw_readl(addr) & ~P7_GPIO_INTC_ITEN, addr);
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static int p7gpio_irq_type(struct irq_data *d, unsigned trigger)
+{
+ struct p7gpio *p7gpio = irq_data_get_irq_chip_data(d);
+ u32 *addr = (u32 *)(p7gpio->base + P7_GPIO_INTC_BASE);
+ u32 val;
+ unsigned long flags;
+ irq_flow_handler_t irqf;
+
+ BUG_ON(d->hwirq >= P7_GPIO_IRQS);
+
+ addr += d->hwirq;
+
+ switch (trigger & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = P7_GPIO_INTC_ALLEDGE;
+ irqf = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = P7_GPIO_INTC_POSEDGE;
+ irqf = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = P7_GPIO_INTC_NEGEDGE;
+ irqf = handle_edge_irq;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = P7_GPIO_INTC_HIGH;
+ irqf = handle_level_irq;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = P7_GPIO_INTC_LOW;
+ irqf = handle_level_irq;
+ break;
+ default:
+ /* uh? */
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ /* set the appropriate IRQ handler */
+ __irq_set_handler_locked(d->irq, irqf);
+ /* configure the GPIO controller's interrupt */
+ __raw_writel((__raw_readl(addr) & ~P7_GPIO_INTC_TYPE_MASK) | val,
+ addr);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ return 0;
+}
+
+static inline void p7gpio_irq_phase_handler(struct p7gpio *p7gpio, u32 status)
+{
+ unsigned long hw_irq = 8;
+ u32 phase_status;
+
+ if (status & P7_GPIO_STATUS_PHASE) {
+ phase_status = __raw_readl(p7gpio->base + P7_GPIO_PHASE_IT);
+ } else
+ return;
+
+ while (phase_status) {
+ int first_bit = ffs(phase_status);
+ unsigned virq;
+
+ hw_irq += first_bit;
+ phase_status >>= first_bit;
+
+ virq = irq_linear_revmap(p7gpio->irq_domain, hw_irq - 1);
+ generic_handle_irq(virq);
+ }
+}
+
+static void p7gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct p7gpio *p7gpio = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long hw_irq = 0;
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ status = __raw_readl(p7gpio->base + P7_GPIO_STATUS);
+
+ p7gpio_irq_phase_handler(p7gpio, status);
+
+ status &= P7_GPIO_STATUS_IRQ_MASK;
+
+ while (status) {
+ int first_bit = ffs(status);
+ unsigned virq;
+
+ hw_irq += first_bit;
+ status >>= first_bit;
+
+ virq = irq_linear_revmap(p7gpio->irq_domain, hw_irq - 1);
+
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip p7gpio_irq_chip = {
+ .name = P7GPIO_DRV_NAME,
+ .irq_ack = p7gpio_irq_ack,
+ .irq_unmask = p7gpio_irq_unmask,
+ .irq_mask = p7gpio_irq_mask,
+ .irq_set_type = p7gpio_irq_type,
+};
+
+/*
+ * IRQ domain interface
+ */
+
+static int p7gpio_irq_domain_map(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ struct p7gpio *p7gpio = domain->host_data;
+
+ /* Register the GPIO interrupt */
+ irq_set_chip_data(virq, p7gpio);
+ /* By default the interrupt triggers on POSEDGE */
+ irq_set_chip_and_handler(virq, &p7gpio_irq_chip, handle_edge_irq);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static void p7gpio_irq_domain_unmap(struct irq_domain *domain,
+ unsigned int virq)
+{
+ irq_set_chip(virq, NULL);
+ set_irq_flags(virq, 0);
+}
+
+static struct irq_domain_ops p7gpio_irq_domain_ops = {
+ .map = p7gpio_irq_domain_map,
+ .unmap = p7gpio_irq_domain_unmap,
+};
+
+/*
+ * GPIO <=> IRQ mapping
+ *
+ * The P7 GPIO Controller can monitor up to 20 interruption conditions. Each one
+ * of these interrupts can be mapped to any single GPIO.
+ *
+ * So for instance we can have GPIO interrupt 2 mapped to the falling edge of
+ * GPIO 40, and interrupt 5 mapped to a high level of gpio 8, or any other
+ * combination. This mapping is static and should be declared in the
+ * platform_data. In this fictional case it'd give us:
+ *
+ * static struct p7gpio_irq_map my_gpio_irq_map[] = {
+ * { .hw_irq = 2, .gpio = 40 },
+ * { .hw_irq = 5, .gpio = 8 },
+ * };
+ *
+ * The linux gpiolib interface doesn't let us change this mapping at runtime,
+ * but the gpio_to_irq function can be used to find the IRQ mapped to a GPIO (if
+ * it exists).
+ */
+static int p7gpio_interrupt_register(struct p7gpio *p7gpio, unsigned gpio)
+{
+ unsigned hw_irq;
+ unsigned long flags;
+ int ret = 0;
+ u32 addr = p7gpio->base + P7_GPIO_INTC_BASE;
+ u32 reg;
+
+ if (gpio >= p7gpio->chip.ngpio) {
+ return -EINVAL;
+ }
+
+ /* check if it is already mapped */
+ if (p7gpio_to_hw_irq(p7gpio, gpio) >= 0)
+ return 0;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ for (hw_irq = 0; hw_irq < P7_GPIO_IRQS; hw_irq++) {
+ if (p7gpio->irq_map[hw_irq].gpio == P7_GPIO_INVAL)
+ break;
+ }
+ if (hw_irq == P7_GPIO_IRQS) {
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+ goto undo;
+ }
+
+ /* update the IRQ <=> GPIO mapping */
+ p7gpio->irq_map[hw_irq].gpio = gpio;
+ p7gpio->irq_map[hw_irq].filter = 0;
+
+ /* Compute the address of the interrupt register */
+ addr += hw_irq * 4;
+
+ /* configure the interrupt in the GPIO controller and clear any
+ * pending IT */
+ reg = (__raw_readl(addr) & ~0xff) | gpio | P7_GPIO_INTC_ITCLR;
+ __raw_writel(reg, addr);
+
+ /* We have to put the ITCLR bit back to 0 (see
+ * p7gpio_irq_ack above) */
+ reg &= ~P7_GPIO_INTC_ITCLR;
+ __raw_writel(reg, addr);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ /* Create the mapping for this interrupt in our irq_domain */
+ if (irq_create_mapping(p7gpio->irq_domain, hw_irq) == 0) {
+ ret = -EINVAL;
+ goto undo;
+ }
+ return 0;
+
+undo:
+ return ret;
+}
+
+static void p7gpio_interrupt_free(struct p7gpio *p7gpio, unsigned gpio)
+{
+ unsigned hw_irq;
+ unsigned long flags;
+
+ if (gpio >= p7gpio->chip.ngpio) {
+ return;
+ }
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+ for (hw_irq = 0; hw_irq < P7_GPIO_IRQS; hw_irq++) {
+ if (p7gpio->irq_map[hw_irq].gpio == gpio) {
+ p7gpio->irq_map[hw_irq].gpio = P7_GPIO_INVAL;
+ irq_dispose_mapping(irq_find_mapping(
+ p7gpio->irq_domain, hw_irq));
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+}
+
+static int p7gpio_match_chip (struct gpio_chip *chip, const void *data)
+{
+ return !strcmp(chip->label, P7GPIO_DRV_NAME);
+}
+
+int p7_gpio_interrupt_register(unsigned gpio)
+{
+ /* we use gpiochip_find instead of gpio_to_chip because it take a lock
+ and can be used on a gpio that is not requested
+ */
+ struct gpio_chip *c = gpiochip_find(NULL, p7gpio_match_chip);
+ if (c) {
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ if (gpio >= c->base && gpio < c->base + c->ngpio)
+ return p7gpio_interrupt_register(p7gpio, gpio - c->base);
+ }
+ return -EINVAL;
+}
+
+#ifdef P7_GPIO_FILTER
+static inline void p7gpio_interrupt_register_gpio(struct p7gpio *p7gpio,
+ int gpio, u8 filter, s8 mode, unsigned hw_irq)
+{
+ u32 addr_intC;
+ u32 intC = 0;
+
+ addr_intC = p7gpio->base + P7_GPIO_INTC_BASE + (hw_irq * 4);
+
+ /* read regs */
+ intC = (__raw_readl(addr_intC) & P7_GPIO_INTC_TYPE_MASK);
+
+ /* set gpio */
+ intC |= (gpio & 0xff);
+
+ /* mode */
+ if (mode != GPIO_NO_MEASURE) {
+ intC |= P7_GPIO_INTC_PHASE_MEASURE;
+ __raw_writel(intC, addr_intC);
+ }
+
+ p7gpio->irq_map[hw_irq].gpio = gpio;
+ p7gpio->irq_map[hw_irq].filter = filter;
+}
+
+static inline void p7gpio_interrupt_configure(struct p7gpio *p7gpio,
+ int phase, u8 filter, s8 mode)
+{
+ u32 addr_itFilter, addr_itEn;
+
+ /* regs to write */
+ u32 itFilter = 0;
+ u32 itEn = 0;
+
+ addr_itFilter = p7gpio->base + P7_GPIO_PHASE_IFILTER(phase);
+ addr_itEn = p7gpio->base + P7_GPIO_PHASE_ITEN(phase);
+
+ // itEn no
+ itFilter = __raw_readl(addr_itFilter);
+
+ /* mode */
+ if (mode != GPIO_NO_MEASURE) {
+ itFilter |= (mode & 0x3) << P7_GPIO_ITFILTER_MODE_POS;
+ itEn = 1 << 0;
+ }
+
+ /* filter */
+ if (filter) {
+ itFilter |= (filter & 0xff);
+ itEn |= 1 << 1;
+ }
+
+ __raw_writel(itFilter, addr_itFilter);
+ __raw_writel(itEn, addr_itEn);
+}
+
+static int p7gpio_measure_interrupt_register(struct p7gpio *p7gpio,
+ unsigned gpio1, unsigned gpio2, u16 filter, s8 mode)
+{
+ unsigned hw_irq;
+ unsigned long flags;
+ int ret = 0;
+ int phase;
+
+ if (gpio1 >= p7gpio->chip.ngpio) {
+ return -EINVAL;
+ }
+
+ /* check if it is already mapped */
+ if (p7gpio_to_hw_irq(p7gpio, gpio1) >= 0)
+ return 0;
+
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+
+ /* If gpio2 is set, it means that phase measure is enabled.
+ * If phase measure is enabled, it should be associated to irq range [8-19]
+ * Phase measure uses two consecutives irqs.
+ */
+ for (hw_irq = 8; hw_irq < P7_GPIO_IRQS; hw_irq+=2) {
+ if ((p7gpio->irq_map[hw_irq].gpio == P7_GPIO_INVAL) &&
+ (p7gpio->irq_map[hw_irq+1].gpio == P7_GPIO_INVAL))
+ break;
+ }
+
+ if (hw_irq == P7_GPIO_IRQS)
+ return -EBUSY;
+
+ p7gpio_interrupt_register_gpio(p7gpio, gpio1,
+ filter, mode, hw_irq);
+
+ p7gpio_interrupt_register_gpio(p7gpio, gpio2,
+ filter, mode, hw_irq + 1);
+
+ phase = P7_GPIO_PHASE_FROM_IRQ(hw_irq);
+ p7gpio_interrupt_configure(p7gpio, phase, filter, mode);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ /* Create the mapping for this interrupt in our irq_domain */
+ if (irq_create_mapping(p7gpio->irq_domain, hw_irq) == 0) {
+ ret = -EINVAL;
+ }
+ /* Create the mapping for this interrupt in our irq_domain */
+ if (irq_create_mapping(p7gpio->irq_domain, hw_irq+1) == 0) {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * This function is called instead of p7gpio_interrupt_register when need to
+ * register filtred gpios
+ */
+static int p7gpio_filter_interrupt_register(struct p7gpio *p7gpio,
+ unsigned gpio1, u16 filter, s8 mode)
+{
+ unsigned hw_irq;
+ unsigned long flags;
+ int ret = 0;
+ int phase;
+
+ if (gpio1 >= p7gpio->chip.ngpio) {
+ return -EINVAL;
+ }
+
+ /* check if it is already mapped */
+ if (p7gpio_to_hw_irq(p7gpio, gpio1) >= 0)
+ return 0;
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+
+ /* If phase measure is enabled, it should be associated to irq range [8-19]
+ * Phase measure uses two consecutives irqs.
+ */
+
+ for (hw_irq = 8; hw_irq < P7_GPIO_IRQS; hw_irq++) {
+ if (p7gpio->irq_map[hw_irq].gpio == P7_GPIO_INVAL)
+ break;
+ }
+
+ if (hw_irq == P7_GPIO_IRQS)
+ return -EBUSY;
+
+ p7gpio_interrupt_register_gpio(p7gpio, gpio1,
+ filter, mode, hw_irq);
+
+ phase = P7_GPIO_PHASE_FROM_IRQ(hw_irq);
+ p7gpio_interrupt_configure(p7gpio, phase, filter, mode);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ if (irq_create_mapping(p7gpio->irq_domain, hw_irq) == 0) {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int p7gpio_sysfs_get_hwirq(struct p7gpio *p7gpio, const char *str_gpio)
+{
+ int hw_irq;
+ int gpio;
+
+ gpio = simple_strtoul(str_gpio, NULL, 10);
+ for (hw_irq = 0; hw_irq < P7_GPIO_IRQS; hw_irq++) {
+ if (p7gpio->irq_map[hw_irq].gpio == gpio)
+ break;
+ }
+
+ /*if not reach end of table*/
+ if (hw_irq == P7_GPIO_IRQS)
+ return -1;
+
+ return hw_irq;
+}
+
+static ssize_t p7gpio_sysfs_reset_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct p7gpio *p7gpio = dev_get_drvdata(device);
+ int hw_irq;
+
+ /* Attribute name format is reset_%d */
+ hw_irq = p7gpio_sysfs_get_hwirq(p7gpio, attr->attr.name + 6);
+ if (hw_irq < 0)
+ return 0;
+
+ return snprintf(buf,PAGE_SIZE, "%d", p7gpio->irq_map[hw_irq].filter);
+}
+
+static ssize_t p7gpio_sysfs_reset_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct p7gpio *p7gpio = dev_get_drvdata(device);
+ int hw_irq;
+ u32 addr_itFilter;
+ u32 addr_itAck;
+ u32 itFilter = 0;
+ u32 itAck = 0;
+ int phase;
+ unsigned long flags;
+
+ /* Fetch HW IRQ */
+ hw_irq = p7gpio_sysfs_get_hwirq(p7gpio, attr->attr.name + 6);
+ if (hw_irq < 0)
+ return count;
+
+ phase = P7_GPIO_PHASE_FROM_IRQ(hw_irq);
+ addr_itFilter = p7gpio->base + P7_GPIO_PHASE_IFILTER(phase);
+ addr_itAck = p7gpio->base + P7_GPIO_PHASE_ITACK(phase);
+
+ spin_lock_irqsave(&p7gpio->lock, flags);
+
+ /* Ack any pending interrupt */
+ itAck = __raw_readl(addr_itAck);
+ itAck |= 1 << (hw_irq % 2);
+ __raw_writel(itAck, addr_itAck);
+
+ /* Reset filter counter */
+ itFilter = __raw_readl(addr_itFilter);
+ __raw_writel(itFilter, addr_itFilter);
+
+ spin_unlock_irqrestore(&p7gpio->lock, flags);
+
+ return count;
+}
+
+static struct device_attribute p7gpio_sysfs_reset_attr = {
+ .attr = { .mode = S_IRUGO | S_IWUSR },
+ .show = &p7gpio_sysfs_reset_show,
+ .store = &p7gpio_sysfs_reset_store
+};
+
+static int p7_gpio_filter_get_reset_gpio(struct p7gpio_filter_phase *data,
+ u32 *gpio)
+{
+ int err = 0;
+
+ /* Find GPIO to export for reset */
+ switch (data->mode) {
+ case GPIO_MEASURE_START:
+ case GPIO_MEASURE_START_AFTER_STOP:
+ *gpio = data->start;
+ break;
+
+ case GPIO_MEASURE_STOP:
+ case GPIO_MEASURE_STOP_AFTER_START:
+ *gpio = data->stop;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int p7_gpio_filter_interrupt_export_reset(
+ struct p7gpio_filter_phase *data)
+{
+ int err;
+ u32 gpio;
+ const int len = 20;
+ char *name;
+ struct gpio_chip *c;
+ struct p7gpio *p7gpio;
+
+ c = gpiochip_find(NULL, p7gpio_match_chip);
+ if (!c)
+ return -EINVAL;
+
+ p7gpio = container_of(c, struct p7gpio, chip);
+
+ err = p7_gpio_filter_get_reset_gpio(data, &gpio);
+ if (err < 0)
+ return err;
+
+ name = kzalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ snprintf(name, len, "reset_%d", gpio);
+ p7gpio_sysfs_reset_attr.attr.name = name;
+
+ err = device_create_file(p7gpio->chip.dev, &p7gpio_sysfs_reset_attr);
+ if (err)
+ printk("Erreur de creation de device file\n");
+
+ return 0;
+}
+
+static ssize_t p7gpio_sysfs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct p7gpio *p7gpio = dev_get_drvdata(device);
+ int hw_irq;
+
+ /* Attribute name format is phase_%d */
+ hw_irq = p7gpio_sysfs_get_hwirq(p7gpio, attr->attr.name + 6);
+ if (hw_irq < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "0x%x:%lld",
+ p7gpio->irq_map[hw_irq].measure,
+ timespec_to_ns(&p7gpio->irq_map[hw_irq].ts));
+}
+
+static ssize_t p7gpio_sysfs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 reg, err = 0;
+ err = kstrtoul(buf, 0, (unsigned long*) &reg);
+ return err;
+}
+
+static struct device_attribute p7gpio_sysfs_attr = {
+ .attr = {
+ .mode = S_IRUGO | S_IWUSR},
+ .show = &p7gpio_sysfs_show,
+ .store = &p7gpio_sysfs_store
+};
+
+/*DEVICE_ATTR(phase, 0644, p7gpio_sysfs_show, p7gpio_sysfs_store);*/
+
+/* If filterring is performed on only one gpio,
+ * leave start or stop field with 0.
+ */
+int p7_gpio_filter_interrupt_register(struct p7gpio_filter_phase data)
+{
+ int err = 0;
+
+ char *name1, *name2;
+ const int len = 20;
+ struct gpio_chip *c = gpiochip_find(NULL, p7gpio_match_chip);
+
+
+ if (c) {
+ struct p7gpio *p7gpio = container_of(c, struct p7gpio, chip);
+ name1 = (char *) kzalloc(len, GFP_KERNEL);
+ if (name1 == NULL) {
+ return -ENOMEM;
+ }
+
+ /*FIXME kfree is never performed! Built-in driver*/
+ snprintf(name1, len, "phase_%d", data.start);
+ p7gpio_sysfs_attr.attr.name = name1;
+ err = device_create_file(p7gpio->chip.dev, &p7gpio_sysfs_attr);
+ if (err){
+ printk("Erreur de creation de device file\n");
+ }
+ if (data.stop){
+ name2 = (char *) kzalloc(len, GFP_KERNEL);
+ if (name2 == NULL) {
+ return -ENOMEM;
+ }
+
+ snprintf(name2, len, "phase_%d", data.stop);
+ p7gpio_sysfs_attr.attr.name = name2;
+ err = device_create_file(p7gpio->chip.dev, &p7gpio_sysfs_attr);
+ if (err){
+ printk("Erreur de creation de device file\n");
+ }
+ }
+
+ /* Test gpio range */
+ if (data.start >= c->base && data.start < c->base + c->ngpio){
+ if((data.stop) &&
+ (data.stop >= c->base && data.stop < c->base + c->ngpio)){
+ err = p7gpio_measure_interrupt_register(
+ p7gpio,
+ data.start - c->base,
+ data.stop - c->base,
+ data.filter, data.mode);
+ } else {
+ err = p7gpio_filter_interrupt_register(
+ p7gpio,
+ data.start - c->base,
+ data.filter, data.mode);
+ }
+
+ if (err < 0)
+ return err;
+
+ if (data.export_reset)
+ return p7_gpio_filter_interrupt_export_reset(
+ &data);
+ else
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+#endif
+
+static void p7gpio_unmap_irqs(struct p7gpio *p7gpio)
+{
+ unsigned hw_irq;
+
+ BUG_ON(p7gpio->pdata->irq_gpios_sz > P7_GPIO_IRQS);
+
+ for (hw_irq = 0; hw_irq < p7gpio->pdata->irq_gpios_sz; hw_irq++) {
+ unsigned long gpio = p7gpio->pdata->irq_gpios[hw_irq];
+
+ p7gpio_interrupt_free(p7gpio, gpio);
+ }
+}
+
+static int p7gpio_map_irqs(struct p7gpio *p7gpio)
+{
+ unsigned hw_irq;
+ int ret = 0;
+
+ if (p7gpio->pdata->irq_gpios_sz > P7_GPIO_IRQS)
+ /* We can't have that many GPIOs as interrupt */
+ return -ERANGE;
+
+ for (hw_irq = 0; hw_irq < p7gpio->pdata->irq_gpios_sz; hw_irq++) {
+ unsigned long gpio = p7gpio->pdata->irq_gpios[hw_irq];
+
+ ret = p7gpio_interrupt_register(p7gpio, gpio);
+ if (ret < 0) {
+ goto undo;
+ }
+ }
+
+ return 0;
+
+undo:
+ /* Undo any partial initialization */
+ p7gpio_unmap_irqs(p7gpio);
+ return ret;
+}
+
+static int __devinit p7gpio_probe(struct platform_device *pdev)
+{
+ struct p7gpio *p7gpio;
+ struct resource *res;
+ int ret;
+ unsigned i;
+ unsigned int num_saved_regs __maybe_unused;
+
+ p7gpio = kzalloc(sizeof(*p7gpio), GFP_KERNEL);
+ if (!p7gpio) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ spin_lock_init(&p7gpio->lock);
+
+ p7gpio->pdata = dev_get_platdata(&pdev->dev);
+ if (!p7gpio->pdata) {
+ ret = -ENODEV;
+ goto kfree;
+ }
+
+ p7gpio->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(p7gpio->clk)) {
+ ret = PTR_ERR(p7gpio->clk);
+ goto kfree;
+ }
+
+ ret = clk_prepare_enable(p7gpio->clk);
+ if (ret)
+ goto put_clock;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto stop_clock;
+ }
+
+ p7gpio->ioarea = request_mem_region(res->start,
+ resource_size(res),
+ pdev->name);
+ if (!p7gpio->ioarea) {
+ ret = -EBUSY;
+ goto stop_clock;
+ }
+
+ p7gpio->base = (unsigned long)ioremap(res->start, resource_size(res));
+ if (!p7gpio->base) {
+ ret = -EBUSY;
+ goto release_region;
+ }
+
+ /* Register the GPIO chip callbacks */
+ p7gpio->chip = p7gpio_chip_defaults;
+ p7gpio->chip.ngpio = p7gpio->pdata->gpio_nr;
+ p7gpio->chip.dev = &pdev->dev;
+
+ for (i = 0; i < P7_GPIO_IRQS; i++) {
+ p7gpio->irq_map[i].gpio = P7_GPIO_INVAL;
+ p7gpio->irq_map[i].filter = 0;
+ }
+
+ /* XXX this should be done last
+ in theory, user can try to use irq before we finish
+ */
+ ret = gpiochip_add(&p7gpio->chip);
+ if (ret)
+ goto unmap;
+
+ p7gpio->irq = platform_get_irq(pdev, 0);
+
+ if (p7gpio->irq < 0) {
+ ret = -ENOENT;
+ goto gpio_remove;
+ }
+
+ p7gpio->irq_domain =
+ irq_domain_add_linear(NULL,
+ P7_GPIO_IRQS,
+ &p7gpio_irq_domain_ops,
+ p7gpio);
+
+ if (!p7gpio->irq_domain) {
+ ret = -ENOMEM;
+ goto gpio_remove;
+ }
+
+ /* p7gpio->irq is the only IRQ line going out of the GPIO
+ * controller. The per-GPIO interrupt will be triggered in
+ * software from this IRQ handler. */
+ irq_set_chained_handler(p7gpio->irq, p7gpio_irq_handler);
+ irq_set_handler_data(p7gpio->irq, p7gpio);
+
+ ret = p7gpio_map_irqs(p7gpio);
+ if (ret)
+ goto remove_irq;
+
+#ifdef CONFIG_PM_SLEEP
+ num_saved_regs = 2 * roundup(p7gpio->chip.ngpio, 8) +
+ P7_GPIO_IRQS + 1; /* 1 is for debounce reg */
+
+ p7gpio->saved_regs = kmalloc(num_saved_regs * sizeof(u32), GFP_KERNEL);
+ if (! p7gpio->saved_regs) {
+ dev_err(&pdev->dev, "Failed to allocate memory"
+ "to save registers\n");
+ ret = -ENOMEM;
+ goto unmap_irqs;
+ }
+#endif
+
+ platform_set_drvdata(pdev, p7gpio);
+
+ dev_info(&pdev->dev, "GPIO driver initialized\n");
+
+ return 0;
+
+#ifdef CONFIG_PM_SLEEP
+unmap_irqs:
+ p7gpio_unmap_irqs(p7gpio);
+#endif
+remove_irq:
+ irq_set_chained_handler(p7gpio->irq, NULL);
+gpio_remove:
+ /* this shouldn't fail since the gpiochip shouldn't be busy by this
+ * point */
+ BUG_ON(gpiochip_remove(&p7gpio->chip));
+unmap:
+ iounmap((void *)p7gpio->base);
+release_region:
+ release_mem_region(p7gpio->ioarea->start, resource_size(p7gpio->ioarea));
+stop_clock:
+ clk_disable_unprepare(p7gpio->clk);
+put_clock:
+ clk_put(p7gpio->clk);
+kfree:
+ kfree(p7gpio);
+exit:
+ dev_info(&pdev->dev, "Init failed: %d\n", ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7gpio_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct p7gpio *p7gpio = platform_get_drvdata(pdev);
+ struct gpio_chip *chip = &p7gpio->chip;
+ int i, j = 0;
+
+ for (i = 0; i < roundup(chip->ngpio, 8); i++) {
+ u32 data_port = p7gpio->base + P7_GPIO_PORT_DATA_BASE;
+ u32 dir_port = p7gpio->base + P7_GPIO_PORT_DIR_BASE;
+
+ data_port += (i / 8) * 0x400;
+ data_port |= 0xffU << 2; /* read 8 bits at once */
+
+ dir_port += i * 4;
+
+ p7gpio->saved_regs[j++] = __raw_readl(data_port);
+ p7gpio->saved_regs[j++] = __raw_readl(dir_port);
+ }
+
+ for (i = 0; i < P7_GPIO_IRQS; i++) {
+ u32 port = p7gpio->base + P7_GPIO_INTC_BASE;
+ port += i * 4;
+ p7gpio->saved_regs[j++] = __raw_readl(port);
+ }
+
+ p7gpio->saved_regs[j++] = __raw_readl(p7gpio->base + P7_GPIO_DEBOUNCE);
+
+ clk_disable_unprepare(p7gpio->clk);
+
+ return 0;
+}
+
+static int p7gpio_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct p7gpio *p7gpio = platform_get_drvdata(pdev);
+ struct gpio_chip *chip = &p7gpio->chip;
+ int i, j = 0;
+
+ clk_prepare_enable(p7gpio->clk);
+
+ for (i = 0; i < roundup(chip->ngpio, 8); i++) {
+ u32 data_port = p7gpio->base + P7_GPIO_PORT_DATA_BASE;
+ u32 dir_port = p7gpio->base + P7_GPIO_PORT_DIR_BASE;
+
+ data_port += (i / 8) * 0x400;
+ data_port |= 0xff << 2;
+
+ dir_port += i * 4;
+
+ __raw_writel(p7gpio->saved_regs[j++], data_port);
+ __raw_writel(p7gpio->saved_regs[j++], dir_port);
+ }
+
+ for (i = 0; i < P7_GPIO_IRQS; i++) {
+ u32 port = p7gpio->base + P7_GPIO_INTC_BASE;
+ port += i * 4;
+ __raw_writel(p7gpio->saved_regs[j++], port);
+ }
+
+ __raw_writel(p7gpio->saved_regs[j++], p7gpio->base + P7_GPIO_DEBOUNCE);
+
+ return 0;
+}
+#else
+#define p7gpio_suspend_noirq NULL
+#define p7gpio_resume_noirq NULL
+#endif
+
+static struct dev_pm_ops p7gpio_dev_pm_ops = {
+ .suspend_noirq = p7gpio_suspend_noirq,
+ .resume_noirq = p7gpio_resume_noirq,
+};
+
+static struct platform_driver p7gpio_driver = {
+ .driver = {
+ .name = P7GPIO_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7gpio_dev_pm_ops,
+ },
+ .probe = p7gpio_probe,
+};
+
+static int __init p7gpio_init(void)
+{
+ return platform_driver_register(&p7gpio_driver);
+}
+postcore_initcall(p7gpio_init);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("P7 GPIO controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/gpio/p7-gpio.h b/drivers/parrot/gpio/p7-gpio.h
new file mode 100644
index 00000000000000..f184ec3fe30cbd
--- /dev/null
+++ b/drivers/parrot/gpio/p7-gpio.h
@@ -0,0 +1,37 @@
+/**
+ * linux/drivers/parrot/gpio/p7-gpio.h - Parrot7 GPIO pin controller driver
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Lionel Flandrin <lionel.flandrin@parrot.com>
+ * date: 06-Apr-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7_GPIO_H_
+#define _P7_GPIO_H_
+
+#define P7GPIO_DRV_NAME "p7-gpio"
+
+#if defined(CONFIG_GPIO_PARROT7) || defined(CONFIG_GPIO_PARROT7_MODULE)
+
+/**
+ * struct p7gpio_irq_map - Parrot7 GPIO controller platform specific data
+ *
+ * @irq_gpios: array of GPIOs to be used as IRQs.
+ * @irq_gpios_sz: number of entries in array irq_gpios
+ *
+ * Note: @irq_map_sz is limited to %P7_GPIO_IRQS
+ */
+struct p7gpio_plat_data {
+ const unsigned *irq_gpios;
+ unsigned irq_gpios_sz;
+ u16 gpio_nr;
+};
+
+#endif /* defined(CONFIG_GPIO_PARROT7) ||
+ defined(CONFIG_GPIO_PARROT7_MODULE) */
+
+#endif /* _P7_GPIO_H_ */
diff --git a/drivers/parrot/gpio/p7mu-gpio.c b/drivers/parrot/gpio/p7mu-gpio.c
new file mode 100644
index 00000000000000..fa55dc92b939df
--- /dev/null
+++ b/drivers/parrot/gpio/p7mu-gpio.c
@@ -0,0 +1,301 @@
+/**
+ * linux/drivers/parrot/gpio/p7mu-gpio.c - Parrot7 power management unit GPIOs
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ *
+ * This file is released under the GPL
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <mfd/p7mu-pin.h>
+
+DEFINE_MUTEX( p7mu_gpio_lck);
+static struct resource const* p7mu_gpio_res;
+
+struct p7mu_gpio_chip {
+ struct gpio_chip gpio_chip;
+};
+
+
+#ifdef DEBUG
+#define P7MU_ASSERT_GPIO(_chip, _gpio) \
+ BUG_ON(! p7mu_gpio_res); \
+ BUG_ON(_gpio >= _chip->ngpio)
+#else
+#define P7MU_ASSERT_GPIO(_chip, _gpio)
+#endif
+
+/*
+ * Reserve GPIO line for future usage.
+ */
+static int p7mu_request_gpio(struct gpio_chip* chip, unsigned int gpio)
+{
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ /* Forward call to P7MU pin management layer. */
+ return p7mu_request_pin(gpio, P7MU_INGPIO_MUX);
+}
+
+/*
+ * Release GPIO line previously reserved with p7mu_request_gpio.
+ */
+static void p7mu_free_gpio(struct gpio_chip* chip, unsigned int gpio)
+{
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ /* Forward call to P7MU pin management layer. */
+ p7mu_free_pin(gpio);
+}
+
+/*
+ * Switch GPIO line to input mode.
+ */
+static int p7mu_gpio_input(struct gpio_chip* chip, unsigned int gpio)
+{
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ /* Forward call to P7MU pin management layer. */
+ return p7mu_setup_pin(gpio, P7MU_INGPIO_MUX);
+}
+
+/*
+ * Retrieve current value of GPIO line passed in argument.
+ */
+static int p7mu_get_gpio(struct gpio_chip* chip, unsigned int gpio)
+{
+ int val;
+ int err;
+ u16 p7mu_gpio_val;
+
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ err = p7mu_read16(p7mu_gpio_res->start, &p7mu_gpio_val);
+
+ if (err)
+ return err;
+
+ val = !! (p7mu_gpio_val & (u16) (1U << gpio));
+
+ return val;
+}
+
+/*
+ * Set current value of GPIO line passed in argument.
+ */
+static void p7mu_set_gpio(struct gpio_chip* chip,
+ unsigned int gpio,
+ int value)
+{
+ int err;
+ u16 p7mu_gpio_val;
+
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ mutex_lock(&p7mu_gpio_lck);
+
+ /* Fetch all GPIO lines values. */
+ err = p7mu_read16(p7mu_gpio_res->start, &p7mu_gpio_val);
+ if (err) {
+ goto err;
+ }
+
+ /* Update GPIO line value. */
+ if (value)
+ p7mu_gpio_val |= 1U << gpio;
+ else
+ p7mu_gpio_val &= ~(1U << gpio);
+
+ err = p7mu_write16(p7mu_gpio_res->start, p7mu_gpio_val);
+ if (! err)
+ /* Success */
+ goto unlock;
+
+err:
+ dev_warn(chip->dev,
+ "failed to set GPIO %d value (%d)\n",
+ gpio,
+ err);
+unlock:
+ mutex_unlock(&p7mu_gpio_lck);
+}
+
+/*
+ * Switch GPIO line to output mode.
+ */
+static int p7mu_gpio_output(struct gpio_chip* chip, unsigned int gpio, int value)
+{
+ int err;
+
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ err = p7mu_setup_pin(gpio, P7MU_OUTGPIO_MUX);
+ if (err)
+ return err;
+
+ p7mu_set_gpio(chip, gpio, value);
+ return 0;
+}
+
+static int p7mu_gpio2irq(struct gpio_chip* chip, unsigned int gpio)
+{
+ return p7mu_gpio_to_irq(gpio);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+/*
+ * Show requested GPIO lines current state.
+ */
+static void p7mu_show_gpio(struct seq_file* seq, struct gpio_chip* chip)
+{
+ unsigned int gpio;
+
+ for (gpio = 0; gpio < chip->ngpio; gpio++) {
+ char const* const label = gpiochip_is_requested(chip, gpio);
+
+ P7MU_ASSERT_GPIO(chip, gpio);
+
+ if (label) {
+ char const* dir = dir;
+ int irq;
+
+ switch (p7mu_pin_mux(gpio)) {
+ case P7MU_INGPIO_MUX:
+ dir = "in ";
+ break;
+
+ case P7MU_OUTGPIO_MUX:
+ dir = "out";
+ break;
+#ifdef DEBUG
+ default:
+ BUG();
+#endif
+ }
+
+ seq_printf(seq,
+ " gpio-%-3d (%-20.20s) %s %s",
+ chip->base + gpio,
+ label,
+ dir,
+ p7mu_get_gpio(chip, gpio) ? "hi" : "lo");
+
+ irq = gpio_to_irq(chip->base + gpio);
+ if (irq >= 0)
+ seq_printf(seq, " [IRQ %d]", irq);
+
+ seq_printf(seq, "\n");
+ }
+ }
+}
+
+#define P7MU_SHOW_GPIO p7mu_show_gpio
+
+#else /* CONFIG_DEBUG_FS */
+
+#define P7MU_SHOW_GPIO NULL
+
+#endif /* CONFIG_DEBUG_FS */
+
+#define P7MU_GPIO_DRV_NAME "p7mu-gpio"
+
+static struct p7mu_gpio_chip p7mu_gpio_chip
+= {
+ .gpio_chip = {
+ .label = P7MU_GPIO_DRV_NAME,
+ .request = p7mu_request_gpio,
+ .free = p7mu_free_gpio,
+ .direction_input = p7mu_gpio_input,
+ .get = p7mu_get_gpio,
+ .direction_output = p7mu_gpio_output,
+ .set = p7mu_set_gpio,
+ .base = P7MU_FIRST_GPIO,
+ .to_irq = p7mu_gpio2irq,
+ .ngpio = P7MU_GPIOS_MAX,
+ .dbg_show = P7MU_SHOW_GPIO,
+ .can_sleep = 1,
+ },
+};
+
+static int __devinit p7mu_probe_gpio(struct platform_device* pdev)
+{
+ int err;
+
+ p7mu_gpio_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (! p7mu_gpio_res) {
+ dev_err(&pdev->dev, "failed to find I/O region address\n");
+ err = -ENXIO;
+ goto err;
+ }
+
+ p7mu_gpio_res = p7mu_request_region(pdev, p7mu_gpio_res);
+ if (IS_ERR(p7mu_gpio_res)) {
+ err = PTR_ERR(p7mu_gpio_res);
+ goto err;
+ }
+
+ p7mu_gpio_chip.gpio_chip.dev = &pdev->dev;
+ err = gpiochip_add(&p7mu_gpio_chip.gpio_chip);
+ if (! err)
+ return 0;
+
+ p7mu_release_region(p7mu_gpio_res);
+
+err:
+ dev_err(&pdev->dev,
+ "failed to register gpio chip (%d)\n",
+ err);
+ return err;
+}
+
+static int __devexit p7mu_remove_gpio(struct platform_device* pdev)
+{
+ int const err = gpiochip_remove(&p7mu_gpio_chip.gpio_chip);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to unregister gpio chip (%d)\n",
+ err);
+ return err;
+ }
+
+#ifdef DEBUG
+ BUG_ON(! p7mu_gpio_res);
+ BUG_ON(IS_ERR(p7mu_gpio_res));
+#endif
+ p7mu_release_region(p7mu_gpio_res);
+
+ return 0;
+}
+
+static struct platform_driver p7mu_gpio_driver = {
+ .driver = {
+ .name = P7MU_GPIO_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = p7mu_probe_gpio,
+ .remove = __devexit_p(&p7mu_remove_gpio),
+};
+
+static int __init p7mu_init_gpio(void)
+{
+ return platform_driver_register(&p7mu_gpio_driver);
+}
+postcore_initcall(p7mu_init_gpio);
+
+static void __exit p7mu_exit_gpio(void)
+{
+ platform_driver_unregister(&p7mu_gpio_driver);
+}
+module_exit(p7mu_exit_gpio);
+
+MODULE_DESCRIPTION("Parrot Power Management Unit GPIO driver");
+MODULE_AUTHOR("Grİgor Boirie <gregor.boirie@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/gpu/Kconfig b/drivers/parrot/gpu/Kconfig
new file mode 100644
index 00000000000000..354c29654432ae
--- /dev/null
+++ b/drivers/parrot/gpu/Kconfig
@@ -0,0 +1,29 @@
+menu "Parrot7 GPU"
+
+config GPU_PARROT7
+ tristate "Parrot7 GPU support"
+ default n
+ ---help---
+ This will just enable support for Mali 200 GPU.
+ It requires Mali-200 of Mali-400 ARM drivers to be enabled.
+
+config UMP_PARROT7
+ tristate "Parrot7 UMP interface"
+ depends on UMP
+ default n
+ ---help---
+ Driver to allow memory sharing between APIs by tracking UMP memory tokens.
+ Used with Mali200 GPU.
+
+comment "ARM UMP memory sharing driver"
+source "drivers/parrot/gpu/ump/Kconfig"
+
+comment "ARM Mali200 driver"
+source "drivers/parrot/gpu/mali200/Kconfig"
+
+comment "ARM Mali400 driver"
+source "drivers/parrot/gpu/mali400/Kconfig"
+
+endmenu
+
+source "drivers/parrot/gpu/ion/Kconfig"
diff --git a/drivers/parrot/gpu/Makefile b/drivers/parrot/gpu/Makefile
new file mode 100644
index 00000000000000..9c2c6cc3474d38
--- /dev/null
+++ b/drivers/parrot/gpu/Makefile
@@ -0,0 +1,18 @@
+ccflags-y += -I$(srctree)/drivers/parrot/gpu/ump/include/ump
+obj-$(CONFIG_GPU_PARROT7) += p7gpu-pm.o
+obj-$(CONFIG_UMP_PARROT7) += p7ump.o
+
+ifneq ($(CONFIG_MALI400_SUPPORT),y)
+obj-$(CONFIG_MALI200) += mali200/
+obj-$(CONFIG_UMP) += ump/
+endif
+
+ifneq ($(CONFIG_MALI400_LEGACY),y)
+ obj-$(CONFIG_UMP) += ump/
+ obj-$(CONFIG_MALI400) += mali400/
+else
+ obj-$(CONFIG_UMP) += ump_legacy/
+ obj-$(CONFIG_MALI400) += mali400_legacy/
+endif
+
+obj-y += ion/
diff --git a/drivers/parrot/gpu/ion/Kconfig b/drivers/parrot/gpu/ion/Kconfig
new file mode 100644
index 00000000000000..3208f8b1256b66
--- /dev/null
+++ b/drivers/parrot/gpu/ion/Kconfig
@@ -0,0 +1,7 @@
+config ION_PARROT7
+ bool "Parrot7 ION allocator"
+ depends on ION && ARCH_PARROT7
+ default ION
+ help
+ Enable support for Parrot7 ION allocator.
+
diff --git a/drivers/parrot/gpu/ion/Makefile b/drivers/parrot/gpu/ion/Makefile
new file mode 100644
index 00000000000000..3aaa7927c66259
--- /dev/null
+++ b/drivers/parrot/gpu/ion/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -I$(srctree)/drivers/gpu/ion
+obj-$(CONFIG_ION_PARROT7) += p7-ion.o
diff --git a/drivers/parrot/gpu/ion/p7-ion.c b/drivers/parrot/gpu/ion/p7-ion.c
new file mode 100644
index 00000000000000..2946bddf4f3d1b
--- /dev/null
+++ b/drivers/parrot/gpu/ion/p7-ion.c
@@ -0,0 +1,115 @@
+/**
+ * linux/driver/parrot/gpu/ion/p7-ion.c - Parrot7 ION allocator implementation
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 16-Jan-2014
+ *
+ * This file is released under the GPL.
+ *
+ * TODO:
+ * - review me !!
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ion.h>
+#include "ion_priv.h"
+#include <asm/uaccess.h>
+#include "p7-ion.h"
+
+static struct ion_device* p7ion_dev;
+static unsigned int p7ion_heaps_nr;
+static struct ion_heap** p7ion_heaps;
+
+static long p7ion_custom_ioctl(struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case P7ION_GET_BUFFER_INFO:
+ {
+ struct p7ion_buffer_info data;
+ struct ion_handle *handle;
+ if (copy_from_user(&data, (void __user *)arg, sizeof(struct p7ion_buffer_info)))
+ return -EFAULT;
+
+ handle = ion_handle_get_by_id(client,(int)data.handle);
+ ion_phys(client,handle,&data.phys_address,&data.len);
+ ion_handle_put(handle);
+
+ if(copy_to_user ((void __user *)arg, &data, sizeof(struct p7ion_buffer_info)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int __devinit p7ion_probe(struct platform_device* pdev)
+{
+ struct ion_platform_data* const pdata = dev_get_platdata(&pdev->dev);
+ int err;
+ unsigned int h;
+
+ BUG_ON(! pdata);
+ BUG_ON(! pdata->nr);
+
+ p7ion_heaps_nr = pdata->nr;
+ p7ion_heaps = kcalloc(p7ion_heaps_nr, sizeof(p7ion_heaps[0]), GFP_KERNEL);
+ if (! p7ion_heaps)
+ return -ENOMEM;
+
+ p7ion_dev = ion_device_create(p7ion_custom_ioctl);
+ if (IS_ERR_OR_NULL(p7ion_dev)) {
+ err = PTR_ERR(p7ion_dev);
+ goto err;
+ }
+
+ for (h = 0; h < p7ion_heaps_nr; h++) {
+ p7ion_heaps[h] = ion_heap_create(&pdata->heaps[h]);
+ if (IS_ERR_OR_NULL(p7ion_heaps[h])) {
+ err = PTR_ERR(p7ion_heaps[h]);
+ goto free;
+ }
+
+ ion_device_add_heap(p7ion_dev, p7ion_heaps[h]);
+ }
+
+ return 0;
+
+free:
+ while (h--)
+ ion_heap_destroy(p7ion_heaps[h]);
+err:
+ kfree(p7ion_heaps);
+ return err;
+}
+
+static int __devexit p7ion_remove(struct platform_device* pdev)
+{
+ int h;
+
+ for (h = 0; h < p7ion_heaps_nr; h++)
+ ion_heap_destroy(p7ion_heaps[h]);
+
+ ion_device_destroy(p7ion_dev);
+ kfree(p7ion_heaps);
+
+ return 0;
+}
+
+static struct platform_driver p7ion_driver = {
+ .probe = p7ion_probe,
+ .remove = __devexit_p(p7ion_remove),
+ .driver = {
+ .name = "p7-ion",
+ .owner = THIS_MODULE,
+ }
+};
+module_platform_driver(p7ion_driver);
diff --git a/drivers/parrot/gpu/ion/p7-ion.h b/drivers/parrot/gpu/ion/p7-ion.h
new file mode 100644
index 00000000000000..52741a7c8d1766
--- /dev/null
+++ b/drivers/parrot/gpu/ion/p7-ion.h
@@ -0,0 +1,15 @@
+#ifndef P7_ION_H
+#define P7_ION_H
+
+enum{
+ P7ION_GET_BUFFER_INFO
+};
+
+struct p7ion_buffer_info{
+ struct ion_handle* handle;
+ unsigned long int phys_address;
+ size_t len;
+};
+
+
+#endif /*P7_ION_H*/
diff --git a/drivers/parrot/gpu/mali200/Kbuild b/drivers/parrot/gpu/mali200/Kbuild
new file mode 100644
index 00000000000000..731abefbb6deae
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/Kbuild
@@ -0,0 +1,317 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+ifneq (,$(filter $(CONFIG_MALI200),y m))
+OSKOS=linux
+
+ifneq ($(.DEFAULT_GOAL),__clean)
+ifneq ($(CONFIG_ARCH_PARROT7),y)
+$(error MALI configuration not supported on this architecture.)
+endif
+CONFIG=P7
+endif
+
+# set up defaults if not defined by the user
+ifneq (,$(filter $(CONFIG_UMP),y m))
+USING_UMP ?= 1
+else
+USING_UMP ?= 0
+endif
+USING_OS_MEMORY ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+ifeq ($(CONFIG_MALI200_PROFILING),y)
+USING_PROFILING ?= 1
+else
+ifeq ($(CONFIG_MALI200_PROFILING),m)
+USING_PROFILING ?= 1
+else
+USING_PROFILING ?= 0
+endif
+endif
+USING_INTERNAL_PROFILING ?= 0
+DISABLE_PP0 ?= 0
+DISABLE_PP1 ?= 0
+DISABLE_PP2 ?= 0
+DISABLE_PP3 ?= 0
+TIMESTAMP ?= default
+ifneq (,$(filter $(CONFIG_MALI400_DEBUG),y m))
+BUILD ?= debug
+else
+BUILD ?= release
+endif
+TARGET_PLATFORM ?= default
+KERNEL_RUNTIME_PM_ENABLED ?= 0
+CONFIG ?= pb-virtex5-m200
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+
+DEFINES := $(EXTRA_DEFINES)
+
+# Get path to driver source from Linux build system
+DRIVER_DIR=$(KBUILD_SRC)/$(src)
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary
+# Disable profiling for proprietary
+override USING_PROFILING := 0
+$(warning "USING_PROFILING not supported, disabling.")
+else
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(USING_INTERNAL_PROFILING),0)
+ifndef CONFIG_TRACEPOINTS
+# Should default to gator profiling, but we dont have the kernel feature required, so disable profiling
+override USING_PROFILING = 0
+$(warning "CONFIG_TRACEPOINTS required for USING_PROFILING")
+endif
+endif
+endif
+
+ifeq ($(USING_PROFILING),0)
+# make sure user hasnt selected incompatible flags
+override USING_INTERNAL_PROFILING = 0
+endif
+
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+# Version customized for P7.
+submodule_enabled = $(shell grep type $(KBUILD_SRC)/arch/arm/mach-parrot7/gpu-m200.c | grep -c $(2))
+
+ifeq ($(CONFIG_UMP),y)
+OSKFILES=\
+ $(OSKOS)/mali_osk_irq.o \
+ $(OSKOS)/mali_osk_locks.o \
+ $(OSKOS)/mali_osk_wait_queue.o \
+ $(OSKOS)/mali_osk_low_level_mem.o \
+ $(OSKOS)/mali_osk_mali.o \
+ $(OSKOS)/mali_osk_notification.o \
+ $(OSKOS)/mali_osk_time.o \
+ $(OSKOS)/mali_osk_timers.o
+else
+OSKFILES=\
+ $(OSKOS)/mali_osk_atomics.o \
+ $(OSKOS)/mali_osk_irq.o \
+ $(OSKOS)/mali_osk_locks.o \
+ $(OSKOS)/mali_osk_wait_queue.o \
+ $(OSKOS)/mali_osk_low_level_mem.o \
+ $(OSKOS)/mali_osk_math.o \
+ $(OSKOS)/mali_osk_memory.o \
+ $(OSKOS)/mali_osk_misc.o \
+ $(OSKOS)/mali_osk_mali.o \
+ $(OSKOS)/mali_osk_notification.o \
+ $(OSKOS)/mali_osk_time.o \
+ $(OSKOS)/mali_osk_timers.o
+endif
+
+
+UKKFILES = \
+ $(OSKOS)/mali_ukk_mem.c \
+ $(OSKOS)/mali_ukk_gp.c \
+ $(OSKOS)/mali_ukk_pp.c \
+ $(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES += \
+ $(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE = platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV :=
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=1
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_INTERNAL_TIMELINE_PROFILING_ENABLED=$(USING_INTERNAL_PROFILING)
+DEFINES += -DDISABLE_PP0=$(DISABLE_PP0)
+DEFINES += -DDISABLE_PP1=$(DISABLE_PP1)
+DEFINES += -DDISABLE_PP2=$(DISABLE_PP2)
+DEFINES += -DDISABLE_PP3=$(DISABLE_PP3)
+DEFINES += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+DEFINES += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+DEFINES += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+ifeq ($(shell test $(SUBLEVEL) -gt 32 -a $(PATCHLEVEL) = 6 -a $(VERSION) = 2 -o $(VERSION) -gt 2 && echo "OK"),OK)
+# MALI_STATE_TRACKING is only supported on Linux kernels from version 2.6.32.
+DEFINES += -DMALI_STATE_TRACKING=1
+else
+DEFINES += -DMALI_STATE_TRACKING=0
+endif
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+
+MALI_PLATFORM_FILE = platform/$(TARGET_PLATFORM)/mali_platform.c
+
+
+ifdef CONFIG_PM
+ifdef CONFIG_PM_RUNTIME
+ KERNEL_RUNTIME_PM_ENABLED = 1
+endif
+endif
+
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(KERNEL_RUNTIME_PM_ENABLED)
+
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+DEFINES += -DSVN_REV=$(SVN_REV)
+DEFINES += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_memory.c)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+ifeq ($(USING_UMP),1)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1
+ ccflags-y += -I$(DRIVER_DIR)/../ump/include/ump
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# Source files which always are included in a build
+SRC = \
+ common/mali_kernel_core.c \
+ linux/mali_kernel_linux.c \
+ $(OSKOS)/mali_osk_indir_mmap.c \
+ common/mali_kernel_descriptor_mapping.c \
+ common/mali_session.c \
+ common/mali_device_pause_resume.c \
+ common/mali_kernel_vsync.c \
+ linux/mali_ukk_vsync.c \
+ linux/mali_kernel_sysfs.c \
+ common/mali_mmu.c \
+ common/mali_mmu_page_directory.c \
+ common/mali_memory.c \
+ common/mali_kernel_memory_engine.c \
+ common/mali_block_allocator.c \
+ common/mali_kernel_mem_os.c \
+ common/mali_mem_validation.c \
+ common/mali_hw_core.c \
+ common/mali_gp.c \
+ common/mali_pp.c \
+ common/mali_pp_job.c \
+ common/mali_gp_job.c \
+ common/mali_scheduler.c \
+ common/mali_gp_scheduler.c \
+ common/mali_pp_scheduler.c \
+ common/mali_cluster.c \
+ common/mali_group.c \
+ common/mali_dlbu.c \
+ common/mali_pm.c \
+ common/mali_pmu.c \
+ common/mali_user_settings_db.c \
+ $(OSKOS)/mali_osk_pm.c \
+ linux/mali_kernel_pm.c \
+ linux/mali_pmu_power_up_down.c \
+ $(MALI_PLATFORM_FILE) \
+ $(OSKFILES) \
+ $(UKKFILES) \
+ __malidrv_build_info.c
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_INTERNAL_PROFILING),1)
+PROFILING_BACKEND_SOURCES = \
+ linux/mali_osk_profiling_internal.c \
+ timestamp-$(TIMESTAMP)/mali_timestamp.c
+ccflags-y += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+else
+ifeq ($(USING_PROFILING),1)
+PROFILING_BACKEND_SOURCES = \
+ linux/mali_osk_profiling_gator.c
+endif
+endif
+
+# Add the profiling sources
+SRC += $(PROFILING_BACKEND_SOURCES)
+
+ifeq ($(USING_MALI_PMM_TESTSUITE),1)
+ccflags-y += -I$(DRIVER_DIR)/platform/mali_pmu_testing
+endif
+
+mali-$(CONFIG_MALI200_GPU_UTILIZATION) += common/mali_kernel_utilization.o
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI400PP),0)
+ # Mali-400 PP in use
+ ccflags-y += -DUSING_MALI400
+endif
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI300PP),0)
+ # Mali-400 PP in use
+ ccflags-y += -DUSING_MALI400
+endif
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI200),0)
+ # Mali200 in use
+ ccflags-y += -DUSING_MALI200
+endif
+
+# Always build in support for Mali L2 cache
+SRC += common/mali_l2_cache.c
+
+# Tell the Linux build system to enable building of our .c files
+mali200-y += $(SRC:.c=.o)
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI200) := mali200.o
+
+
+VERSION_STRINGS :=
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+#VERSION_STRINGS += BUILD_DATE=$(shell date)
+
+VERSION_STRINGS += BUILD=$(shell echo $(BUILD) | tr a-z A-Z)
+VERSION_STRINGS += CPU=$(CPU)
+VERSION_STRINGS += USING_UMP=$(USING_UMP)
+VERSION_STRINGS += USING_MALI200=$(call submodule_enabled, $(DRIVER_DIR), MALI200)
+VERSION_STRINGS += USING_MALI400=$(call submodule_enabled, $(DRIVER_DIR), MALI400)
+VERSION_STRINGS += USING_MALI400_L2_CACHE=$(call submodule_enabled, $(DRIVER_DIR), MALI400L2)
+VERSION_STRINGS += USING_GP2=$(call submodule_enabled, $(DRIVER_DIR), MALIGP2)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += MALI_PLATFORM_FILE=$(MALI_PLATFORM_FILE)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_PROFILING=$(USING_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(USING_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(CONFIG_MALI200_GPU_UTILIZATION)
+
+# Create file with Mali driver configuration
+$(DRIVER_DIR)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(DRIVER_DIR)/__malidrv_build_info.c
+endif
+
diff --git a/drivers/parrot/gpu/mali200/Kbuild.original b/drivers/parrot/gpu/mali200/Kbuild.original
new file mode 100644
index 00000000000000..a3f63ae4f43ea8
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/Kbuild.original
@@ -0,0 +1,288 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+OSKOS=linux
+
+# set up defaults if not defined by the user
+USING_UMP ?= 0
+USING_OS_MEMORY ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+DISABLE_PP0 ?= 0
+DISABLE_PP1 ?= 0
+DISABLE_PP2 ?= 0
+DISABLE_PP3 ?= 0
+TIMESTAMP ?= default
+BUILD ?= debug
+TARGET_PLATFORM ?= default
+KERNEL_RUNTIME_PM_ENABLED ?= 0
+CONFIG ?= pb-virtex5-m200
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+
+DEFINES := $(EXTRA_DEFINES)
+
+# Get path to driver source from Linux build system
+DRIVER_DIR=$(src)
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary
+# Disable profiling for proprietary
+override USING_PROFILING := 0
+$(warning "USING_PROFILING not supported, disabling.")
+else
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(USING_INTERNAL_PROFILING),0)
+ifndef CONFIG_TRACEPOINTS
+# Should default to gator profiling, but we dont have the kernel feature required, so disable profiling
+override USING_PROFILING = 0
+$(warning "CONFIG_TRACEPOINTS required for USING_PROFILING")
+endif
+endif
+endif
+
+ifeq ($(USING_PROFILING),0)
+# make sure user hasnt selected incompatible flags
+override USING_INTERNAL_PROFILING = 0
+endif
+
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+submodule_enabled = $(shell gcc $(DEFINES) -E $1/arch/config.h | grep type | grep -c $(2))
+
+OSKFILES = \
+ $(OSKOS)/mali_osk_atomics.c \
+ $(OSKOS)/mali_osk_irq.c \
+ $(OSKOS)/mali_osk_locks.c \
+ $(OSKOS)/mali_osk_wait_queue.c \
+ $(OSKOS)/mali_osk_low_level_mem.c \
+ $(OSKOS)/mali_osk_math.c \
+ $(OSKOS)/mali_osk_memory.c \
+ $(OSKOS)/mali_osk_misc.c \
+ $(OSKOS)/mali_osk_mali.c \
+ $(OSKOS)/mali_osk_notification.c \
+ $(OSKOS)/mali_osk_time.c \
+ $(OSKOS)/mali_osk_timers.c
+
+UKKFILES = \
+ $(OSKOS)/mali_ukk_mem.c \
+ $(OSKOS)/mali_ukk_gp.c \
+ $(OSKOS)/mali_ukk_pp.c \
+ $(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES += \
+ $(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE = platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV := $(shell (cd $(DRIVER_DIR); (svnversion | grep -qv exported && svnversion) || git svn info | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+endif
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=1
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_INTERNAL_TIMELINE_PROFILING_ENABLED=$(USING_INTERNAL_PROFILING)
+DEFINES += -DDISABLE_PP0=$(DISABLE_PP0)
+DEFINES += -DDISABLE_PP1=$(DISABLE_PP1)
+DEFINES += -DDISABLE_PP2=$(DISABLE_PP2)
+DEFINES += -DDISABLE_PP3=$(DISABLE_PP3)
+DEFINES += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+DEFINES += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+DEFINES += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+ifeq ($(shell test $(SUBLEVEL) -gt 32 -a $(PATCHLEVEL) = 6 -a $(VERSION) = 2 -o $(VERSION) -gt 2 && echo "OK"),OK)
+# MALI_STATE_TRACKING is only supported on Linux kernels from version 2.6.32.
+DEFINES += -DMALI_STATE_TRACKING=1
+else
+DEFINES += -DMALI_STATE_TRACKING=0
+endif
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+
+MALI_PLATFORM_FILE = platform/$(TARGET_PLATFORM)/mali_platform.c
+
+
+ifdef CONFIG_PM
+ifdef CONFIG_PM_RUNTIME
+ KERNEL_RUNTIME_PM_ENABLED = 1
+endif
+endif
+
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(KERNEL_RUNTIME_PM_ENABLED)
+
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+DEFINES += -DSVN_REV=$(SVN_REV)
+DEFINES += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_memory.c)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+ifeq ($(USING_UMP),1)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1
+ ccflags-y += -I$(DRIVER_DIR)/../../ump/include/ump
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Use our defines when compiling
+ccflags-y += $(DEFINES) -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# Source files which always are included in a build
+SRC = \
+ common/mali_kernel_core.c \
+ linux/mali_kernel_linux.c \
+ $(OSKOS)/mali_osk_indir_mmap.c \
+ common/mali_kernel_descriptor_mapping.c \
+ common/mali_session.c \
+ common/mali_device_pause_resume.c \
+ common/mali_kernel_vsync.c \
+ linux/mali_ukk_vsync.c \
+ linux/mali_kernel_sysfs.c \
+ common/mali_mmu.c \
+ common/mali_mmu_page_directory.c \
+ common/mali_memory.c \
+ common/mali_kernel_memory_engine.c \
+ common/mali_block_allocator.c \
+ common/mali_kernel_mem_os.c \
+ common/mali_mem_validation.c \
+ common/mali_hw_core.c \
+ common/mali_gp.c \
+ common/mali_pp.c \
+ common/mali_pp_job.c \
+ common/mali_gp_job.c \
+ common/mali_scheduler.c \
+ common/mali_gp_scheduler.c \
+ common/mali_pp_scheduler.c \
+ common/mali_cluster.c \
+ common/mali_group.c \
+ common/mali_dlbu.c \
+ common/mali_pm.c \
+ common/mali_pmu.c \
+ common/mali_user_settings_db.c \
+ $(OSKOS)/mali_osk_pm.c \
+ linux/mali_kernel_pm.c \
+ linux/mali_pmu_power_up_down.c \
+ $(MALI_PLATFORM_FILE) \
+ $(OSKFILES) \
+ $(UKKFILES) \
+ __malidrv_build_info.c
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_INTERNAL_PROFILING),1)
+PROFILING_BACKEND_SOURCES = \
+ linux/mali_osk_profiling_internal.c \
+ timestamp-$(TIMESTAMP)/mali_timestamp.c
+ccflags-y += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+else
+ifeq ($(USING_PROFILING),1)
+PROFILING_BACKEND_SOURCES = \
+ linux/mali_osk_profiling_gator.c
+endif
+endif
+
+# Add the profiling sources
+SRC += $(PROFILING_BACKEND_SOURCES)
+
+ifeq ($(USING_MALI_PMM_TESTSUITE),1)
+ccflags-y += -I$(DRIVER_DIR)/platform/mali_pmu_testing
+endif
+
+mali-$(CONFIG_MALI400_GPU_UTILIZATION) += common/mali_kernel_utilization.o
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI400PP),0)
+ # Mali-400 PP in use
+ ccflags-y += -DUSING_MALI400
+endif
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI300PP),0)
+ # Mali-400 PP in use
+ ccflags-y += -DUSING_MALI400
+endif
+
+ifneq ($(call submodule_enabled, $(DRIVER_DIR), MALI200),0)
+ # Mali200 in use
+ ccflags-y += -DUSING_MALI200
+endif
+
+# Always build in support for Mali L2 cache
+SRC += common/mali_l2_cache.c
+
+# Tell the Linux build system to enable building of our .c files
+mali-y += $(SRC:.c=.o)
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+
+VERSION_STRINGS :=
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+
+VERSION_STRINGS += BUILD=$(shell echo $(BUILD) | tr a-z A-Z)
+VERSION_STRINGS += CPU=$(CPU)
+VERSION_STRINGS += USING_UMP=$(USING_UMP)
+VERSION_STRINGS += USING_MALI200=$(call submodule_enabled, $(DRIVER_DIR), MALI200)
+VERSION_STRINGS += USING_MALI400=$(call submodule_enabled, $(DRIVER_DIR), MALI400)
+VERSION_STRINGS += USING_MALI400_L2_CACHE=$(call submodule_enabled, $(DRIVER_DIR), MALI400L2)
+VERSION_STRINGS += USING_GP2=$(call submodule_enabled, $(DRIVER_DIR), MALIGP2)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += MALI_PLATFORM_FILE=$(MALI_PLATFORM_FILE)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_PROFILING=$(USING_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(USING_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(CONFIG_MALI400_GPU_UTILIZATION)
+
+# Create file with Mali driver configuration
+$(DRIVER_DIR)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(DRIVER_DIR)/__malidrv_build_info.c
diff --git a/drivers/parrot/gpu/mali200/Kconfig b/drivers/parrot/gpu/mali200/Kconfig
new file mode 100644
index 00000000000000..e34f635ba29355
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/Kconfig
@@ -0,0 +1,31 @@
+config MALI200
+ tristate "Mali-200 support"
+ depends on ARM && !MALI400
+ depends on GPU_PARROT7
+ select FB
+ ---help---
+ This enables support for the Mali-200 GPUs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mali.
+
+config MALI200_DEBUG
+ bool "Enable debug in Mali driver"
+ depends on MALI200
+ ---help---
+ This enabled extra debug checks and messages in the Mali-200
+ driver.
+
+config MALI200_PROFILING
+ bool "Enable Mali200 profiling"
+ depends on MALI200 && TRACEPOINTS
+ ---help---
+ This enables gator profiling of Mali GPU events.
+
+config MALI200_GPU_UTILIZATION
+ bool "Enable Mali GPU utilization tracking"
+ depends on MALI200
+ ---help---
+ This enables gathering and processing of the utilization of Mali GPU.
+ This data can be used as a basis to change GPU operating frequency.
+
diff --git a/drivers/parrot/gpu/mali200/Makefile b/drivers/parrot/gpu/mali200/Makefile
new file mode 100644
index 00000000000000..4fb02263da8905
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/Makefile
@@ -0,0 +1,89 @@
+#
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Check that required parameters are supplied.
+ifeq ($(CONFIG),)
+$(error "CONFIG must be specified.")
+endif
+ifeq ($(CPU)$(KDIR),)
+$(error "KDIR or CPU must be specified.")
+endif
+
+ifeq ($(USING_UMP),1)
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+endif
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning Config $(CONFIG))
+$(warning Host CPU $(CPU))
+$(warning OS_MEMORY $(USING_OS_MEMORY))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+
+ifeq ($(USING_GPU_UTILIZATION),1)
+export EXTRA_DEFINES += -DCONFIG_MALI400_GPU_UTILIZATION=1
+export CONFIG_MALI400_GPU_UTILIZATION := y
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/parrot/gpu/mali200/__malidrv_build_info.c b/drivers/parrot/gpu/mali200/__malidrv_build_info.c
new file mode 100644
index 00000000000000..fc8bc2ec78beeb
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/__malidrv_build_info.c
@@ -0,0 +1 @@
+const char *__malidrv_build_info(void) { return "malidrv: CONFIG=P7 USING_OS_MEMORY=0 API_VERSION=14 REPO_URL=r3p0-04rel0 REVISION=r3p0-04rel0 CHANGED_REVISION=r3p0-04rel0 CHANGE_DATE=r3p0-04rel0 BUILD=RELEASE CPU= USING_UMP=1 USING_MALI200=1 USING_MALI400=0 USING_MALI400_L2_CACHE=0 USING_GP2=1 KDIR= MALI_PLATFORM_FILE=platform/default/mali_platform.c OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=6 USING_PROFILING=0 USING_INTERNAL_PROFILING=0 USING_GPU_UTILIZATION=";}
diff --git a/drivers/parrot/gpu/mali200/arch-pb-virtex5-m200/config.h b/drivers/parrot/gpu/mali200/arch-pb-virtex5-m200/config.h
new file mode 100644
index 00000000000000..9b3d0c7cf1c88f
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/arch-pb-virtex5-m200/config.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Note: IRQ auto detection (setting irq to -1) only works if the IRQ is not shared with any other hardware resource */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALIGP2,
+ .description = "MALI GP2",
+ .base = 0xc0002000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xc0003000,
+ .irq = -1, /*105*/
+ .description = "Mali MMU",
+ .mmu_id = 1
+ },
+ {
+ .type = MALI200,
+ .base = 0xc0000000,
+ .irq = -1/*106*/,
+ .description = "Mali 200 (GX525)",
+ .mmu_id = 1
+ },
+#if USING_OS_MEMORY
+ {
+ .type = OS_MEMORY,
+ .description = "OS Memory",
+ .alloc_order = 10, /* Lowest preference for this memory */
+ .size = 0x04000000, /* 64MB, to avoid OOM-killer */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+#endif /** USING_OS_MEMORY */
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000, /* 256 MB */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/mali200/arch-pb-virtex5-m400-1-pmu/config.h b/drivers/parrot/gpu/mali200/arch-pb-virtex5-m400-1-pmu/config.h
new file mode 100644
index 00000000000000..d85c0907aa3689
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/arch-pb-virtex5-m400-1-pmu/config.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/mali200/arch-ve-virtex6-m450-8/config.h b/drivers/parrot/gpu/mali200/arch-ve-virtex6-m450-8/config.h
new file mode 100644
index 00000000000000..9b38f359b59b99
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/arch-ve-virtex6-m450-8/config.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the Versatile Express platform */
+
+#define MALI_BASE_ADDRESS 0xFC040000
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ /* GP cluster */
+ {
+ .type = MALI400L2,
+ .base = MALI_BASE_ADDRESS + 0x10000,
+ .description = "Mali-450 L2 cache for GP"
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-450 GP",
+ .base = MALI_BASE_ADDRESS,
+ .irq = -1,
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x3000,
+ .irq = 70,
+ .description = "Mali-450 MMU for GP",
+ },
+
+ /* PP0-3 cluster */
+ {
+ .type = MALI400L2,
+ .base = MALI_BASE_ADDRESS + 0x1000,
+ .description = "Mali-450 L2 cache for PP0-3"
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0x8000,
+ .irq = 70,
+ .description = "Mali-450 PP0",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x4000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP0",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0xA000,
+ .irq = 70,
+ .description = "Mali-450 PP1",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x5000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP1",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0xC000,
+ .irq = 70,
+ .description = "Mali-450 PP2",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x6000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP2",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0xE000,
+ .irq = 70,
+ .description = "Mali-450 PP3",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x7000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP3",
+ },
+
+ /* PP4-7 cluster */
+ {
+ .type = MALI400L2,
+ .base = MALI_BASE_ADDRESS + 0x11000,
+ .description = "Mali-450 L2 cache for PP4-7"
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0x28000,
+ .irq = 70,
+ .description = "Mali-450 PP4",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x1C000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP4",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0x2A000,
+ .irq = 70,
+ .description = "Mali-450 PP5",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x1D000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP5",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0x2C000,
+ .irq = 70,
+ .description = "Mali-450 PP6",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x1E000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP6",
+ },
+ {
+ .type = MALI400PP,
+ .base = MALI_BASE_ADDRESS + 0x2E000,
+ .irq = 70,
+ .description = "Mali-450 PP7",
+ },
+ {
+ .type = MMU,
+ .base = MALI_BASE_ADDRESS + 0x1F000,
+ .irq = 70,
+ .description = "Mali-450 MMU for PP7",
+ },
+
+ /* Memory */
+ {
+ .type = OS_MEMORY,
+ .description = "Mali OS memory",
+ .cpu_usage_adjust = 0,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0x0,
+ .size = 256 * 1024 * 1024,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/mali200/arch/config.h b/drivers/parrot/gpu/mali200/arch/config.h
new file mode 100644
index 00000000000000..fa27ba65a59341
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/arch/config.h
@@ -0,0 +1,12 @@
+/**
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 20-10-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <mach/gpu-p7.h>
+
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_block_allocator.c b/drivers/parrot/gpu/mali200/common/mali_block_allocator.c
new file mode 100644
index 00000000000000..269e662d818a4a
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_block_allocator.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->stat = block_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );
+}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecessary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ block_info *block;
+ u32 free_blocks = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ info = (block_allocator*)allocator->ctx;
+ block = info->first_free;
+
+ while(block)
+ {
+ free_blocks++;
+ block = block->next;
+ }
+ return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_block_allocator.h b/drivers/parrot/gpu/mali200/common/mali_block_allocator.h
new file mode 100644
index 00000000000000..d3f0f9be60ad7d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_cluster.c b/drivers/parrot/gpu/mali200/common/mali_cluster.c
new file mode 100644
index 00000000000000..f0fb2b6137b123
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_cluster.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_cluster.h"
+#include "mali_osk.h"
+#include "mali_group.h"
+#include "mali_l2_cache.h"
+#include "mali_scheduler.h"
+
+static struct mali_cluster *mali_global_clusters[MALI_MAX_NUMBER_OF_CLUSTERS] = { NULL, NULL, NULL };
+static u32 mali_global_num_clusters = 0;
+
+/**
+ * The structure represents a render cluster
+ * A render cluster is defined by all the cores that share the same Mali L2 cache
+ */
+struct mali_cluster
+{
+ struct mali_l2_cache_core *l2;
+ u32 number_of_groups;
+ struct mali_group* groups[MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER];
+ u32 last_invalidated_id;
+ mali_bool power_is_enabled;
+};
+
+struct mali_cluster *mali_cluster_create(struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_cluster *cluster = NULL;
+
+ if (mali_global_num_clusters >= MALI_MAX_NUMBER_OF_CLUSTERS)
+ {
+ MALI_PRINT_ERROR(("Mali cluster: Too many cluster objects created\n"));
+ return NULL;
+ }
+
+ cluster = _mali_osk_malloc(sizeof(struct mali_cluster));
+ if (NULL != cluster)
+ {
+ _mali_osk_memset(cluster, 0, sizeof(struct mali_cluster));
+ cluster->l2 = l2_cache; /* This cluster now owns this L2 cache object */
+ cluster->last_invalidated_id = 0;
+ cluster->power_is_enabled = MALI_TRUE;
+
+ mali_global_clusters[mali_global_num_clusters] = cluster;
+ mali_global_num_clusters++;
+
+ return cluster;
+ }
+
+ return NULL;
+}
+
+void mali_cluster_power_is_enabled_set(struct mali_cluster * cluster, mali_bool power_is_enabled)
+{
+ cluster->power_is_enabled = power_is_enabled;
+}
+
+mali_bool mali_cluster_power_is_enabled_get(struct mali_cluster * cluster)
+{
+ return cluster->power_is_enabled;
+}
+
+
+void mali_cluster_add_group(struct mali_cluster *cluster, struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (cluster->number_of_groups < MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER)
+ {
+ /* This cluster now owns the group object */
+ cluster->groups[cluster->number_of_groups] = group;
+ cluster->number_of_groups++;
+ }
+}
+
+void mali_cluster_delete(struct mali_cluster *cluster)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ /* Free all the resources we own */
+ for (i = 0; i < cluster->number_of_groups; i++)
+ {
+ mali_group_delete(cluster->groups[i]);
+ }
+
+ if (NULL != cluster->l2)
+ {
+ mali_l2_cache_delete(cluster->l2);
+ }
+
+ for (i = 0; i < mali_global_num_clusters; i++)
+ {
+ if (mali_global_clusters[i] == cluster)
+ {
+ mali_global_clusters[i] = NULL;
+ mali_global_num_clusters--;
+ break;
+ }
+ }
+
+ _mali_osk_free(cluster);
+}
+
+void mali_cluster_reset(struct mali_cluster *cluster)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ /* Free all the resources we own */
+ for (i = 0; i < cluster->number_of_groups; i++)
+ {
+ struct mali_group *group = cluster->groups[i];
+
+ mali_group_reset(group);
+ }
+
+ if (NULL != cluster->l2)
+ {
+ mali_l2_cache_reset(cluster->l2);
+ }
+}
+
+struct mali_l2_cache_core* mali_cluster_get_l2_cache_core(struct mali_cluster *cluster)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+ return cluster->l2;
+}
+
+struct mali_group *mali_cluster_get_group(struct mali_cluster *cluster, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (index < cluster->number_of_groups)
+ {
+ return cluster->groups[index];
+ }
+
+ return NULL;
+}
+
+struct mali_cluster *mali_cluster_get_global_cluster(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_CLUSTERS > index)
+ {
+ return mali_global_clusters[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_cluster_get_glob_num_clusters(void)
+{
+ return mali_global_num_clusters;
+}
+
+mali_bool mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (NULL != cluster->l2)
+ {
+ /* If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started. */
+ if (((s32)id) <= ((s32)cluster->last_invalidated_id))
+ {
+ return MALI_FALSE;
+ }
+ else
+ {
+ cluster->last_invalidated_id = mali_scheduler_get_new_id();
+ }
+
+ mali_l2_cache_invalidate_all(cluster->l2);
+ }
+ return MALI_TRUE;
+}
+
+void mali_cluster_l2_cache_invalidate_all_force(struct mali_cluster *cluster)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (NULL != cluster->l2)
+ {
+ cluster->last_invalidated_id = mali_scheduler_get_new_id();
+ mali_l2_cache_invalidate_all(cluster->l2);
+ }
+}
+
+void mali_cluster_invalidate_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+
+ for (i = 0; i < mali_global_num_clusters; i++)
+ {
+ /*additional check for cluster*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_clusters[i]->l2))
+ {
+ mali_l2_cache_invalidate_pages(mali_global_clusters[i]->l2, pages, num_pages);
+ }
+ mali_l2_cache_unlock_power_state(mali_global_clusters[i]->l2);
+ /*check for failed power locking???*/
+ }
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_cluster.h b/drivers/parrot/gpu/mali200/common/mali_cluster.h
new file mode 100644
index 00000000000000..33debdb2d50cd8
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_cluster.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CLUSTER_H__
+#define __MALI_CLUSTER_H__
+
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+
+/* Maximum 1 GP and 4 PP for a cluster (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER 5
+#define MALI_MAX_NUMBER_OF_CLUSTERS 3
+
+struct mali_cluster;
+struct mali_group;
+
+struct mali_cluster *mali_cluster_create(struct mali_l2_cache_core *l2_cache);
+void mali_cluster_add_group(struct mali_cluster *cluster, struct mali_group *group);
+void mali_cluster_delete(struct mali_cluster *cluster);
+
+void mali_cluster_power_is_enabled_set(struct mali_cluster * cluster, mali_bool power_is_enabled);
+mali_bool mali_cluster_power_is_enabled_get(struct mali_cluster * cluster);
+
+void mali_cluster_reset(struct mali_cluster *cluster);
+
+struct mali_l2_cache_core* mali_cluster_get_l2_cache_core(struct mali_cluster *cluster);
+struct mali_group *mali_cluster_get_group(struct mali_cluster *cluster, u32 index);
+
+struct mali_cluster *mali_cluster_get_global_cluster(u32 index);
+u32 mali_cluster_get_glob_num_clusters(void);
+
+/* Returns MALI_TRUE if it did the flush */
+mali_bool mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id);
+void mali_cluster_l2_cache_invalidate_all_force(struct mali_cluster *cluster);
+void mali_cluster_invalidate_pages(u32 *pages, u32 num_pages);
+
+#endif /* __MALI_CLUSTER_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.c b/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.c
new file mode 100644
index 00000000000000..6af12790d6c1b7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.c
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
+
+void mali_dev_pause(mali_bool *power_is_on)
+{
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+
+ /*
+ * Take and hold the PM lock to be sure we don't change power state as well.
+ * (it might be unsafe to for instance change frequency if Mali GPU is powered off)
+ */
+ mali_pm_execute_state_change_lock();
+ if (NULL != power_is_on)
+ {
+ *power_is_on = mali_pm_is_powered_on();
+ }
+}
+
+void mali_dev_resume(void)
+{
+ mali_pm_execute_state_change_unlock();
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+/*
+EXPORT_SYMBOL(mali_dev_pause);
+EXPORT_SYMBOL(mali_dev_resume);
+*/
diff --git a/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.h b/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.h
new file mode 100644
index 00000000000000..6be75b0510f342
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_device_pause_resume.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#include "mali_osk.h"
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ *
+ * @param power_is_on Receives the power current status of Mali GPU. MALI_TRUE if GPU is powered on
+ */
+void mali_dev_pause(mali_bool *power_is_on);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_dlbu.c b/drivers/parrot/gpu/mali200/common/mali_dlbu.c
new file mode 100644
index 00000000000000..fcc51fac694a45
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_dlbu.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = 0;
+
+static u32 mali_dlbu_tile_position;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum
+{
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+ MALI_DEBUG_PRINT(2, ("Dynamic Load Balancing Unit initializing\n"));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr))
+ {
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ mali_mmu_release_table_page(mali_dlbu_phys_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core))
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR, MALI_DLB_VIRT_ADDR);
+
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ mali_dlbu_reset(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+void mali_dlbu_enable(struct mali_dlbu_core *dlbu)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR);
+
+ wval |= 0x1;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, wval);
+}
+
+void mali_dlbu_disable(struct mali_dlbu_core *dlbu)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR);
+
+ wval |= (wval & 0xFFFFFFFE);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, wval);
+}
+
+_mali_osk_errcode_t mali_dlbu_enable_pp_core(struct mali_dlbu_core *dlbu, u32 pp_core_enable, u32 val)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK);
+
+ if((pp_core_enable < mali_pp_get_glob_num_pp_cores()) && ((0 == val) || (1 == val))) /* check for valid input parameters */
+ {
+ if (val == 1)
+ {
+ val = (wval | (pp_core_enable <<= 0x1));
+ }
+ if (val == 0)
+ {
+ val = (wval & ~(pp_core_enable << 0x1));
+ }
+ wval |= val;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, wval);
+ dlbu->pp_cores_mask = wval;
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_enable_all_pp_cores(struct mali_dlbu_core *dlbu)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_disable_all_pp_cores(struct mali_dlbu_core *dlbu)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, 0x0);
+}
+
+void mali_dlbu_setup(struct mali_dlbu_core *dlbu, u8 fb_xdim, u8 fb_ydim, u8 xtilesize, u8 ytilesize, u8 blocksize, u8 xgr0, u8 ygr0, u8 xgr1, u8 ygr1)
+{
+ u32 wval = 0x0;
+
+ /* write the framebuffer dimensions */
+ wval = (16 << (u32)fb_ydim) | (u32)fb_xdim;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_FB_DIM, wval);
+
+ /* write the tile list configuration */
+ wval = 0x0;
+ wval = (28 << (u32)blocksize) | (16 << (u32)ytilesize) | ((u32)xtilesize);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_CONF, wval);
+
+ /* write the start tile position */
+ wval = 0x0;
+ wval = (24 << (u32)ygr1 | (16 << (u32)xgr1) | 8 << (u32)ygr0) | (u32)xgr0;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, wval);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, mali_dlbu_phys_addr);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_FB_DIM, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_CONF, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, 0x00);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ u32 wval, rval;
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+
+ /* find the core id and set the mask */
+
+ if (NULL != pp_core)
+ {
+ wval = mali_pp_core_get_id(pp_core);
+ rval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, (wval << 0x1) | rval);
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ return err;
+}
+
+void mali_dlbu_set_tllist_base_address(struct mali_dlbu_core *dlbu, u32 val)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, val);
+}
+
+void mali_dlbu_pp_jobs_stop(struct mali_dlbu_core *dlbu)
+{
+ /* this function to implement (see documentation):
+ * 1) clear all bits in the enable register
+ * 2) wait until all PPs have finished - mali_pp_scheduler.c code - this done in interrupts call?
+ * 3) read the current tile position registers to get current tile positions -
+ * note that current tile position register is the same as start tile position - perhaps the name should be changed!!! */
+
+ /* 1) */
+ mali_dlbu_disable_all_pp_cores(dlbu);
+
+ /* 3) */
+ mali_dlbu_tile_position = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS);
+}
+
+void mali_dlbu_pp_jobs_restart(struct mali_dlbu_core *dlbu)
+{
+ /* this function to implement (see the document):
+ * 1) configure the dynamic load balancing unit as normal
+ * 2) set the current tile position registers as read when stopping the job
+ * 3) configure the PPs to start the job as normal - done by another part of the system - scheduler */
+
+ /* 1) */
+ mali_dlbu_reset(dlbu);
+ /* ++ setup the needed values - see this */
+
+ /* 2) */
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, mali_dlbu_tile_position);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_dlbu.h b/drivers/parrot/gpu/mali200/common/mali_dlbu.h
new file mode 100644
index 00000000000000..e3c3b9db977589
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_dlbu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#include "mali_osk.h"
+#include "mali_group.h"
+
+#define MALI_DLB_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_enable(struct mali_dlbu_core *dlbu);
+void mali_dlbu_disable(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_enable_pp_core(struct mali_dlbu_core *dlbu, u32 pp_core_enable, u32 val);
+void mali_dlbu_enable_all_pp_cores(struct mali_dlbu_core *dlbu);
+void mali_dlbu_disable_all_pp_cores(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+void mali_dlbu_setup(struct mali_dlbu_core *dlbu, u8 fb_xdim, u8 fb_ydim, u8 xtilesize, u8 ytilesize, u8 blocksize, u8 xgr0, u8 ygr0, u8 xgr1, u8 ygr1);
+
+_mali_osk_errcode_t mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_set_tllist_base_address(struct mali_dlbu_core *dlbu, u32 val);
+
+void mali_dlbu_pp_jobs_stop(struct mali_dlbu_core *dlbu);
+void mali_dlbu_pp_jobs_restart(struct mali_dlbu_core *dlbu);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp.c b/drivers/parrot/gpu/mali200/common/mali_gp.c
new file mode 100644
index 00000000000000..d03721c3ebaaff
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent group for this core */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ struct mali_gp_job *running_job; /**< Current running job */
+ _mali_osk_timer_t *timeout_timer; /**< timeout timer for this core */
+ u32 timeout_job_id; /**< job id for the timed out job - relevant only if gp_core_timed_out == MALI_TRUE */
+ mali_bool core_timed_out; /**< if MALI_TRUE, this gp core has timed out; if MALI_FALSE, no timeout on this gp core */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static _mali_osk_errcode_t mali_gp_upper_half(void *data);
+static void mali_gp_bottom_half(void *data);
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend);
+static void mali_gp_timeout(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+{
+ struct mali_gp_core* core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core)
+ {
+ core->group = group;
+ core->running_job = NULL;
+ core->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ _mali_osk_errcode_t ret;
+
+ mali_group_lock(group);
+ ret = mali_gp_reset(core);
+ mali_group_unlock(group);
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_gp_upper_half,
+ mali_gp_bottom_half,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ "mali_gp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ /* Initialise the timeout timer */
+ core->timeout_timer = _mali_osk_timer_init();
+ if(NULL != core->timeout_timer)
+ {
+ _mali_osk_timer_setcallback(core->timeout_timer, mali_gp_timeout, (void *)core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup timeout timer for GP core %s\n", core->hw_core.description));
+ /* Release IRQ handlers */
+ _mali_osk_irq_term(core->irq);
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_timer_term(core->timeout_timer);
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_gp_post_process_job(core, MALI_FALSE);
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register))
+ {
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_gp_post_process_job(core, MALI_FALSE);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ /* On Mali-200, stop the bus, then do a hard reset of the core */
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* the bus was stopped OK, do the hard reset */
+ mali_gp_hard_reset(core);
+
+#elif defined(USING_MALI400)
+
+ /* Mali-300 and Mali-400 have a safe reset command which we use */
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+#error "no supported mali core defined"
+#endif
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ core->counter_src0_used = core->counter_src0;
+ core->counter_src1_used = core->counter_src1;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (mali_gp_job_has_vs_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ /* This selects which performance counters we are reading */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ /* global_config has enabled HW counters, this will override anything specified by user space */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ else
+ {
+ /* Use HW counters from job object, if any */
+ u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+ if (0 != perf_counter_flag)
+ {
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* Setup the timeout timer value and save the job id for the job running on the gp core */
+
+ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+ core->timeout_job_id = mali_gp_job_get_id(job);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ job->frame_builder_id, job->flush_id, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), job->pid, job->tid, 0, 0, 0);
+#endif
+
+ core->running_job = job;
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+#endif
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+void mali_gp_abort_job(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (_MALI_OSK_ERR_FAULT != mali_gp_reset(core))
+ {
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+mali_bool mali_gp_core_set_counter_src0(struct mali_gp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src0 = counter;
+ return MALI_TRUE;
+}
+
+mali_bool mali_gp_core_set_counter_src1(struct mali_gp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src1 = counter;
+ return MALI_TRUE;
+}
+
+u32 mali_gp_core_get_counter_src0(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src0;
+}
+
+u32 mali_gp_core_get_counter_src1(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src1;
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static _mali_osk_errcode_t mali_gp_upper_half(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_irq_schedulework(core->irq);
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_gp_bottom_half(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
+#endif
+#endif
+
+ mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */
+
+ if ( MALI_FALSE == mali_group_power_is_on(core->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
+ mali_group_unlock(core->group);
+ return;
+ }
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));
+
+ if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
+ {
+ u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+ if (0 != irq_errors)
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */
+ return;
+ }
+ else if (MALI_TRUE == core->core_timed_out) /* SW timeout */
+ {
+ if (core->timeout_job_id == mali_gp_job_get_id(core->running_job))
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job)));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT);
+ }
+ core->core_timed_out = MALI_FALSE;
+ return;
+ }
+ else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ /* GP wants more memory in order to continue.
+ *
+ * This must be handled prior to HANG because this actually can
+ * generate a HANG while waiting for more memory.
+ * And it must be handled before the completion interrupts,
+ * since the PLBU can run out of memory after VS is complete;
+ * in which case the OOM must be handled before to complete the
+ * PLBU work.
+ */
+ mali_gp_post_process_job(core, MALI_TRUE);
+ MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */
+ return;
+ }
+ else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG)
+ {
+ /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG);
+ }
+
+ /*
+ * The only way to get here is if we got a HANG interrupt, which we ignore, or only one of two needed END_CMD_LST interrupts.
+ * Re-enable interrupts and let core continue to run.
+ */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_group_unlock(core->group);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
+#endif
+#endif
+}
+
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* @@@@ This should not be needed */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+
+static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend)
+{
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (NULL != core->running_job)
+ {
+ u32 val0 = 0;
+ u32 val1 = 0;
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 event_id;
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ if (mali_gp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_gp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_gp_job_set_perf_counter_value0(core->running_job, val0);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_gp_job_set_perf_counter_value0(core->running_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ if (mali_gp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_gp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_gp_job_set_perf_counter_value1(core->running_job, val1);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrieved (overridden by counter src set on core) */
+ mali_gp_job_set_perf_counter_value1(core->running_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_C1, val1);
+#endif
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (MALI_TRUE == suspend)
+ {
+ event_id = MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0);
+ }
+ else
+ {
+ event_id = MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0);
+ }
+ _mali_osk_profiling_add_event(event_id, val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0);
+#endif
+
+ mali_gp_job_set_current_heap_addr(core->running_job,
+ mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR));
+
+ if (MALI_TRUE != suspend)
+ {
+ /* We are no longer running a job... */
+ core->running_job = NULL;
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+ }
+}
+
+/* callback function for gp core timeout */
+static void mali_gp_timeout(void *data)
+{
+ struct mali_gp_core * core = ((struct mali_gp_core *)data);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: TIMEOUT callback \n"));
+ core->core_timed_out = MALI_TRUE;
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+#if 0
+void mali_gp_print_state(struct mali_gp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali GP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp.h b/drivers/parrot/gpu/mali200/common/mali_gp.h
new file mode 100644
index 00000000000000..3175b7548acb72
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+
+struct mali_gp_core;
+struct mali_group;
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+void mali_gp_abort_job(struct mali_gp_core *core);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+mali_bool mali_gp_core_set_counter_src0(struct mali_gp_core *core, u32 counter);
+mali_bool mali_gp_core_set_counter_src1(struct mali_gp_core *core, u32 counter);
+u32 mali_gp_core_get_counter_src0(struct mali_gp_core *core);
+u32 mali_gp_core_get_counter_src1(struct mali_gp_core *core);
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp_job.c b/drivers/parrot/gpu/mali200/common/mali_gp_job.c
new file mode 100644
index 00000000000000..abe1d933c87138
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp_job.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id)
+{
+ struct mali_gp_job *job;
+
+ job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+ if (NULL != job)
+ {
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->user_id = args->user_job_ptr;
+ _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers));
+ job->heap_current_addr = args->frame_registers[4];
+ job->perf_counter_flag = args->perf_counter_flag;
+ job->perf_counter_src0 = args->perf_counter_src0;
+ job->perf_counter_src1 = args->perf_counter_src1;
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+ job->frame_builder_id = args->frame_builder_id;
+ job->flush_id = args->flush_id;
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ _mali_osk_free(job);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp_job.h b/drivers/parrot/gpu/mali200/common/mali_gp_job.h
new file mode 100644
index 00000000000000..9c29f1c04f4830
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp_job.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+
+/**
+ * The structure represends a GP job, including all sub-jobs
+ * (This struct unfortunatly needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 id; /**< identifier for this job in kernel space (sequential numbering) */
+ u32 user_id; /**< identifier for the job in user space */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< core specific registers associated with this job, see ARM DDI0415A */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_flag; /**< bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 frame_builder_id; /**< id of the originating frame builder */
+ u32 flush_id; /**< flush id within the originating frame builder */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ return job->user_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ return job->frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ return job->flush_id;
+}
+
+MALI_STATIC_INLINE u32* mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ return job->frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ return (job->frame_registers[0] != job->frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ return (job->frame_registers[2] != job->frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ return job->perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ return job->perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ return job->perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value1 = value;
+}
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.c b/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.c
new file mode 100644
index 00000000000000..05f00fcfb94545
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_cluster.h"
+
+enum mali_gp_slot_state
+{
+ MALI_GP_SLOT_STATE_IDLE,
+ MALI_GP_SLOT_STATE_WORKING,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot
+{
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_gp_slot_state state;
+ u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void);
+
+static _mali_osk_lock_t *gp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+ u32 i;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+ gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+
+ if (NULL == gp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (NULL == gp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(gp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Find all the available GP cores */
+ for (i = 0; i < mali_cluster_get_glob_num_clusters(); i++)
+ {
+ u32 group_id = 0;
+ struct mali_cluster *curr_cluster = mali_cluster_get_global_cluster(i);
+ struct mali_group *group = mali_cluster_get_group(curr_cluster, group_id);
+ while (NULL != group)
+ {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ if (0 == gp_version)
+ {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+ slot.group = group;
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ break; /* There are only one GP, no point in looking for more */
+ }
+ group_id++;
+ group = mali_cluster_get_group(curr_cluster, group_id);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(gp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+ _mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+}
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED()
+#endif
+
+static void mali_gp_scheduler_schedule(void)
+{
+ struct mali_gp_job *job;
+
+ MALI_ASSERT_GP_SCHEDULER_LOCKED();
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+ return; /* Nothing to do, so early out */
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+ if (_MALI_OSK_ERR_OK == mali_group_start_gp_job(slot.group, job))
+ {
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ /* Remove from queue of unscheduled jobs */
+ _mali_osk_list_del(&job->list);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
+ }
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL != notobj)
+ {
+ _mali_uk_gp_job_finished_s *jobres = notobj->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notobj);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali GP scheduler: Unable to allocate notification object\n"));
+ }
+
+ mali_gp_job_delete(job);
+}
+
+
+void mali_gp_scheduler_do_schedule(void)
+{
+ mali_gp_scheduler_lock();
+
+ mali_gp_scheduler_schedule();
+
+ mali_gp_scheduler_unlock();
+}
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+ mali_gp_scheduler_lock();
+
+ /* Mark slot as idle again */
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+ }
+ else
+ {
+ mali_gp_scheduler_schedule();
+ }
+
+ mali_gp_scheduler_unlock();
+
+ mali_gp_scheduler_return_job_to_user(job, success);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notobj;
+
+ notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != notobj)
+ {
+ _mali_uk_gp_job_suspended_s * jobres;
+
+ mali_gp_scheduler_lock();
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notobj->result_buffer;
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
+ jobres->cookie = mali_gp_job_get_id(job);
+ slot.returned_cookie = jobres->cookie;
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notobj);
+
+ mali_gp_scheduler_unlock();
+ }
+
+ /*
+ * If this function failed, then we could return the job to user space right away,
+ * but there is a job timer anyway that will do that eventually.
+ * This is not exactly a common case anyway.
+ */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_gp_scheduler_unlock();
+
+ _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ if (0 == pause_count)
+ {
+ mali_gp_scheduler_schedule();
+ }
+ mali_gp_scheduler_unlock();
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data*)args->ctx;
+ if (NULL == session)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ mali_gp_scheduler_lock();
+
+ _mali_osk_list_addtail(&job->list, &job_queue);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+ mali_gp_scheduler_schedule();
+
+ mali_gp_scheduler_unlock();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data*)args->ctx;
+ if (NULL == session)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_gp_scheduler_lock();
+
+ /* Make sure that the cookie returned by user space is the same as we provided in the first place */
+ if (args->cookie != slot.returned_cookie)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Got an illegal cookie from user space, expected %u but got %u (job id)\n", slot.returned_cookie, args->cookie)) ;
+ mali_gp_scheduler_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_gp_scheduler_unlock();
+
+ switch (args->code)
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP:
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+ mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+ ret = _MALI_OSK_ERR_OK;
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ ret = _MALI_OSK_ERR_OK;
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Mali GP scheduler: Wrong suspend response from user space\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ break;
+ }
+
+ return ret;
+
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *job, *tmp;
+
+ mali_gp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ /* Check queue for jobs and remove */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list)
+ {
+ if (mali_gp_job_get_session(job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job));
+ _mali_osk_list_del(&(job->list));
+ mali_gp_job_delete(job);
+ }
+ }
+
+ mali_gp_scheduler_unlock();
+
+ /* Abort running jobs from this session. It is safe to do this outside
+ * the scheduler lock as there is only one GP core, and the queue has
+ * already been emptied, as long as there are no new jobs coming in
+ * from user space. */
+ mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_gp_scheduler_lock();
+ ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE;
+ mali_gp_scheduler_unlock();
+
+ return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+
+ n += mali_group_dump_state(slot.group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\t\tState: %d\n", mali_group_gp_state(slot.group));
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.h b/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.h
new file mode 100644
index 00000000000000..ef5850944589d6
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_gp_scheduler.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_gp_job.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_do_schedule(void);
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_group.c b/drivers/parrot/gpu/mali200/common/mali_group.c
new file mode 100644
index 00000000000000..9202bc17a30521
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_group.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group
+{
+ struct mali_cluster *cluster;
+
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+ int page_dir_ref_count;
+ mali_bool power_is_on;
+#if defined(USING_MALI200)
+ mali_bool pagedir_activation_failed;
+#endif
+
+ struct mali_gp_core *gp_core;
+ enum mali_group_core_state gp_state;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ enum mali_group_core_state pp_state;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ _mali_osk_lock_t *lock;
+};
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS];
+static u32 mali_global_num_groups = 0;
+
+enum mali_group_activate_pd_status
+{
+ MALI_GROUP_ACTIVATE_PD_STATUS_FAILED,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD,
+};
+
+/* local helper functions */
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_complete_jobs(struct mali_group *group, mali_bool complete_gp, mali_bool complete_pp, bool success);
+
+void mali_group_lock(struct mali_group *group)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(group->lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+ _mali_osk_lock_signal(group->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_cluster *cluster, struct mali_mmu_core *mmu)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS)
+ {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_malloc(sizeof(struct mali_group));
+ if (NULL != group)
+ {
+ _mali_osk_memset(group, 0, sizeof(struct mali_group));
+ group->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_GROUP);
+ if (NULL != group->lock)
+ {
+ group->cluster = cluster;
+ group->mmu = mmu; /* This group object now owns the MMU object */
+ group->session = NULL;
+ group->page_dir_ref_count = 0;
+ group->power_is_on = MALI_TRUE;
+
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_FALSE;
+#endif
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+void mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+}
+
+void mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core)
+ {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu)
+ {
+ mali_mmu_delete(group->mmu);
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ if (mali_global_groups[i] == group)
+ {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+ break;
+ }
+ }
+
+ _mali_osk_lock_term(group->lock);
+
+ _mali_osk_free(group);
+}
+
+/* Called from mali_cluster_reset() when the system is re-turned on */
+void mali_group_reset(struct mali_group *group)
+{
+ mali_group_lock(group);
+
+ group->session = NULL;
+
+ if (NULL != group->mmu)
+ {
+ mali_mmu_reset(group->mmu);
+ }
+
+ if (NULL != group->gp_core)
+ {
+ mali_gp_reset(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_pp_reset(group->pp_core);
+ }
+
+ mali_group_unlock(group);
+}
+
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
+{
+ return group->gp_core;
+}
+
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
+{
+ return group->pp_core;
+}
+
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+
+ mali_pm_core_event(MALI_CORE_EVENT_GP_START);
+
+ session = mali_gp_job_get_session(job);
+
+ mali_group_lock(group);
+
+ mali_cluster_l2_cache_invalidate_all(group->cluster, mali_gp_job_get_id(job));
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+ mali_gp_job_start(group->gp_core, job);
+ group->gp_running_job = job;
+ group->gp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_TRUE;
+#endif
+
+ mali_group_unlock(group);
+
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_GP_START */
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+
+ mali_pm_core_event(MALI_CORE_EVENT_PP_START);
+
+ session = mali_pp_job_get_session(job);
+
+ mali_group_lock(group);
+
+ mali_cluster_l2_cache_invalidate_all(group->cluster, mali_pp_job_get_id(job));
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n"));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->pp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_TRUE;
+#endif
+
+ mali_group_unlock(group);
+
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_PP_START */
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ mali_group_lock(group);
+
+ if (group->gp_state != MALI_GROUP_CORE_STATE_OOM ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* Illegal request or job has already been aborted */
+ }
+
+ mali_cluster_l2_cache_invalidate_all_force(group->cluster);
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+ group->gp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+ mali_group_lock(group);
+
+ if (group->gp_state == MALI_GROUP_CORE_STATE_IDLE ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* Will release group lock */
+}
+
+void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+ mali_group_lock(group);
+
+ if (group->pp_state == MALI_GROUP_CORE_STATE_IDLE ||
+ mali_pp_job_get_id(group->pp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* Will release group lock */
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_pp_job *pp_job;
+ u32 gp_job_id = 0;
+ u32 pp_job_id = 0;
+ mali_bool abort_pp = MALI_FALSE;
+ mali_bool abort_gp = MALI_FALSE;
+
+ mali_group_lock(group);
+
+ gp_job = group->gp_running_job;
+ pp_job = group->pp_running_job;
+
+ if (gp_job && mali_gp_job_get_session(gp_job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+ gp_job_id = mali_gp_job_get_id(gp_job);
+ abort_gp = MALI_TRUE;
+ }
+
+ if (pp_job && mali_pp_job_get_session(pp_job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+ pp_job_id = mali_pp_job_get_id(pp_job);
+ abort_pp = MALI_TRUE;
+ }
+
+ mali_group_unlock(group);
+
+ /* These functions takes and releases the group lock */
+ if (0 != abort_gp)
+ {
+ mali_group_abort_gp_job(group, gp_job_id);
+ }
+ if (0 != abort_pp)
+ {
+ mali_group_abort_pp_job(group, pp_job_id);
+ }
+
+ mali_group_lock(group);
+ mali_group_remove_session_if_unused(group, session);
+ mali_group_unlock(group);
+}
+
+enum mali_group_core_state mali_group_gp_state(struct mali_group *group)
+{
+ return group->gp_state;
+}
+
+enum mali_group_core_state mali_group_pp_state(struct mali_group *group)
+{
+ return group->pp_state;
+}
+
+/* group lock need to be taken before calling mali_group_bottom_half */
+void mali_group_bottom_half(struct mali_group *group, enum mali_group_event_t event)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ switch (event)
+ {
+ case GROUP_EVENT_PP_JOB_COMPLETED:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_TRUE); /* PP job SUCCESS */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_PP_JOB_FAILED:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_PP_JOB_TIMED_OUT:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job TIMEOUT */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_COMPLETED:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_TRUE); /* GP job SUCCESS */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_FAILED:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_TIMED_OUT:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job TIMEOUT */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_OOM:
+ group->gp_state = MALI_GROUP_CORE_STATE_OOM;
+ mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+ mali_gp_scheduler_oom(group, group->gp_running_job);
+ break;
+ case GROUP_EVENT_MMU_PAGE_FAULT:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_TRUE, MALI_FALSE); /* GP and PP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ default:
+ break;
+ }
+}
+
+struct mali_mmu_core *mali_group_get_mmu(struct mali_group *group)
+{
+ return group->mmu;
+}
+
+struct mali_session_data *mali_group_get_session(struct mali_group *group)
+{
+ return group->session;
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if(mali_global_num_groups > index)
+ {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+/* Used to check if scheduler for the other core type needs to be called on job completion.
+ *
+ * Used only for Mali-200, where job start may fail if the only MMU is busy
+ * with another session's address space.
+ */
+static inline mali_bool mali_group_other_reschedule_needed(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+#if defined(USING_MALI200)
+ if (group->pagedir_activation_failed)
+ {
+ group->pagedir_activation_failed = MALI_FALSE;
+ return MALI_TRUE;
+ }
+ else
+#endif
+ {
+ return MALI_FALSE;
+ }
+}
+
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ enum mali_group_activate_pd_status retval;
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+
+ if (0 != group->page_dir_ref_count)
+ {
+ if (group->session != session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session FAILED: 0x%08x on group 0x%08X. Existing session: 0x%08x\n", session, group, group->session));
+ return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session already activated: 0x%08x on group 0x%08X. New Ref: %d\n", session, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+
+ }
+ }
+ else
+ {
+ /* There might be another session here, but it is ok to overwrite it since group->page_dir_ref_count==0 */
+ if (group->session != session)
+ {
+ mali_bool activate_success;
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X. Ref: %d\n", session, group->session, group, 1+group->page_dir_ref_count));
+
+ activate_success = mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ MALI_DEBUG_ASSERT(activate_success);
+ if ( MALI_FALSE== activate_success ) return MALI_FALSE;
+ group->session = session;
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X. Ref: %d\n", session->page_directory, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+ }
+ }
+
+ group->page_dir_ref_count++;
+ return retval;
+}
+
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(0 < group->page_dir_ref_count);
+ MALI_DEBUG_ASSERT(session == group->session);
+
+ group->page_dir_ref_count--;
+
+ /* As an optimization, the MMU still points to the group->session even if (0 == group->page_dir_ref_count),
+ and we do not call mali_mmu_activate_empty_page_directory(group->mmu); */
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+}
+
+void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (0 == group->page_dir_ref_count)
+ {
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+
+ if (group->session == session)
+ {
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+ }
+}
+
+void mali_group_power_on(void)
+{
+ int i;
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+ mali_group_lock(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+ MALI_DEBUG_ASSERT_POINTER(group->cluster);
+ group->power_is_on = MALI_TRUE;
+ mali_cluster_power_is_enabled_set(group->cluster, MALI_TRUE);
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(3,("group: POWER ON\n"));
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_off(void)
+{
+ int i;
+ /* It is necessary to set group->session = NULL; so that the powered off MMU is not written to on map /unmap */
+ /* It is necessary to set group->power_is_on=MALI_FALSE so that pending bottom_halves does not access powered off cores. */
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+ mali_group_lock(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+ MALI_DEBUG_ASSERT_POINTER(group->cluster);
+ group->session = NULL;
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ group->power_is_on = MALI_FALSE;
+ mali_cluster_power_is_enabled_set(group->cluster, MALI_FALSE);
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(3,("group: POWER OFF\n"));
+}
+
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_stop_bus(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_stop_bus_wait(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_stop_bus_wait(group->gp_core);
+ }
+
+ /* Reset cores */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_hard_reset(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ mali_mmu_reset(group->mmu);
+ group->session = NULL;
+}
+
+/* Group lock need to be taken before calling mali_group_complete_jobs. Will release the lock here. */
+static void mali_group_complete_jobs(struct mali_group *group, mali_bool complete_gp, mali_bool complete_pp, bool success)
+{
+ mali_bool need_group_reset = MALI_FALSE;
+ mali_bool gp_success = success;
+ mali_bool pp_success = success;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (complete_gp && !complete_pp)
+ {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ if (_MALI_OSK_ERR_OK == mali_gp_reset(group->gp_core))
+ {
+ struct mali_gp_job *gp_job_to_return = group->gp_running_job;
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job_to_return);
+
+ mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return));
+
+ if(mali_group_other_reschedule_needed(group))
+ {
+ mali_group_unlock(group);
+ mali_pp_scheduler_do_schedule();
+ }
+ else
+ {
+ mali_group_unlock(group);
+ }
+
+ mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+
+ return;
+ }
+ else
+ {
+ need_group_reset = MALI_TRUE;
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset GP, need to reset entire group\n"));
+ pp_success = MALI_FALSE; /* This might kill PP as well, so this should fail */
+ }
+ }
+ if (complete_pp && !complete_gp)
+ {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ if (_MALI_OSK_ERR_OK == mali_pp_reset(group->pp_core))
+ {
+ struct mali_pp_job *pp_job_to_return = group->pp_running_job;
+ u32 pp_sub_job_to_return = group->pp_running_sub_job;
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(pp_job_to_return);
+
+ mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return));
+
+ if(mali_group_other_reschedule_needed(group))
+ {
+ mali_group_unlock(group);
+ mali_gp_scheduler_do_schedule();
+ }
+ else
+ {
+ mali_group_unlock(group);
+ }
+
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+
+ return;
+ }
+ else
+ {
+ need_group_reset = MALI_TRUE;
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset PP, need to reset entire group\n"));
+ gp_success = MALI_FALSE; /* This might kill GP as well, so this should fail */
+ }
+ }
+ else if (complete_gp && complete_pp)
+ {
+ need_group_reset = MALI_TRUE;
+ }
+
+ if (MALI_TRUE == need_group_reset)
+ {
+ struct mali_gp_job *gp_job_to_return = group->gp_running_job;
+ struct mali_pp_job *pp_job_to_return = group->pp_running_job;
+ u32 pp_sub_job_to_return = group->pp_running_sub_job;
+ mali_bool schedule_other = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali group: Resetting entire group\n"));
+
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->gp_running_job = NULL;
+ if (NULL != gp_job_to_return)
+ {
+ mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return));
+ }
+
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_running_job = NULL;
+ if (NULL != pp_job_to_return)
+ {
+ mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return));
+ }
+
+ /* The reset has to be done after mali_group_deactivate_page_directory() */
+ mali_group_recovery_reset(group);
+
+ if (mali_group_other_reschedule_needed(group) && (NULL == gp_job_to_return || NULL == pp_job_to_return))
+ {
+ schedule_other = MALI_TRUE;
+ }
+
+ mali_group_unlock(group);
+
+ if (NULL != gp_job_to_return)
+ {
+ mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+ }
+ else if (schedule_other)
+ {
+ mali_pp_scheduler_do_schedule();
+ }
+
+ if (NULL != pp_job_to_return)
+ {
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+ }
+ else if (schedule_other)
+ {
+ mali_gp_scheduler_do_schedule();
+ }
+
+ return;
+ }
+
+ mali_group_unlock(group);
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+ if (group->gp_core)
+ {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP state: %d\n", group->gp_state);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+ }
+ if (group->pp_core)
+ {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP state: %d\n", group->pp_state);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+ group->pp_running_job, group->pp_running_sub_job);
+ }
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_group.h b/drivers/parrot/gpu/mali200/common/mali_group.h
new file mode 100644
index 00000000000000..1561d6bf8ef7cd
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_group.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/* max runtime [ms] for a core job - used by timeout timers */
+#define MAX_RUNTIME 5000
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 9
+
+struct mali_group;
+
+enum mali_group_event_t
+{
+ GROUP_EVENT_PP_JOB_COMPLETED, /**< PP job completed successfully */
+ GROUP_EVENT_PP_JOB_FAILED, /**< PP job completed with failure */
+ GROUP_EVENT_PP_JOB_TIMED_OUT, /**< PP job reached max runtime */
+ GROUP_EVENT_GP_JOB_COMPLETED, /**< GP job completed successfully */
+ GROUP_EVENT_GP_JOB_FAILED, /**< GP job completed with failure */
+ GROUP_EVENT_GP_JOB_TIMED_OUT, /**< GP job reached max runtime */
+ GROUP_EVENT_GP_OOM, /**< GP job ran out of heap memory */
+ GROUP_EVENT_MMU_PAGE_FAULT, /**< MMU page fault */
+};
+
+enum mali_group_core_state
+{
+ MALI_GROUP_CORE_STATE_IDLE,
+ MALI_GROUP_CORE_STATE_WORKING,
+ MALI_GROUP_CORE_STATE_OOM
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_cluster *cluster, struct mali_mmu_core *mmu);
+void mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+void mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+/** @brief Start fragment of PP job
+ */
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+enum mali_group_core_state mali_group_gp_state(struct mali_group *group);
+enum mali_group_core_state mali_group_pp_state(struct mali_group *group);
+
+/** @brief The common group bottom half interrupt handler
+ *
+ * This is only called from the GP and PP bottom halves.
+ *
+ * The action taken is dictated by the \a event.
+ *
+ * @param event The event code
+ */
+void mali_group_bottom_half(struct mali_group *group, enum mali_group_event_t event);
+
+struct mali_mmu_core *mali_group_get_mmu(struct mali_group *group);
+struct mali_session_data *mali_group_get_session(struct mali_group *group);
+
+void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session_data);
+
+void mali_group_power_on(void);
+void mali_group_power_off(void);
+mali_bool mali_group_power_is_on(struct mali_group *group);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_hw_core.c b/drivers/parrot/gpu/mali200/common/mali_hw_core.c
new file mode 100644
index 00000000000000..0b0862257c958f
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_hw_core.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->description = resource->description;
+ core->size = reg_size;
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description))
+ {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers)
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_hw_core.h b/drivers/parrot/gpu/mali200/common/mali_hw_core.h
new file mode 100644
index 00000000000000..c797804f4d63d9
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_hw_core.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core
+{
+ u32 phys_addr; /**< Physical address of the registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char* description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+#define MALI_HW_CORE_INVALID_VALUE ((u32)-1)
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i< nr_of_regs; i++)
+ {
+ mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_common.h b/drivers/parrot/gpu/mali200/common/mali_kernel_common.h
new file mode 100644
index 00000000000000..81da173f5b0255
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_common.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#ifndef KERN_EMERG
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+#endif
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF((KERN_ERR "Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((KERN_ERR " %s()%4d\n"KERN_ERR" ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF((KERN_ERR "\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF((KERN_INFO "Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_core.c b/drivers/parrot/gpu/mali200/common/mali_kernel_core.c
new file mode 100644
index 00000000000000..1d8d07779563f5
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_core.c
@@ -0,0 +1,1185 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_cluster.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#include "mali_l2_cache.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+static _mali_osk_errcode_t build_system_info(void);
+static void cleanup_system_info(_mali_system_info *cleanup);
+
+/* system info variables */
+static _mali_osk_lock_t *system_info_lock = NULL;
+static _mali_system_info *system_info = NULL;
+static u32 system_info_size = 0;
+static u32 first_pp_offset = 0;
+
+#define WATCHDOG_MSECS_DEFAULT 4000 /* 4 s */
+
+/* timer related */
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+static _mali_osk_resource_t *mali_find_resource(_mali_osk_resource_type_t type, u32 offset)
+{
+ int i;
+ u32 addr = global_gpu_base_address + offset;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (type == arch_configuration[i].type && arch_configuration[i].base == addr)
+ {
+ return &(arch_configuration[i]);
+ }
+ }
+
+ return NULL;
+}
+
+static u32 mali_count_resources(_mali_osk_resource_type_t type)
+{
+ int i;
+ u32 retval = 0;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (type == arch_configuration[i].type)
+ {
+ retval++;
+ }
+ }
+
+ return retval;
+}
+
+
+static _mali_osk_errcode_t mali_parse_gpu_base_and_first_pp_offset_address(void)
+{
+ int i;
+ _mali_osk_resource_t *first_gp_resource = NULL;
+ _mali_osk_resource_t *first_pp_resource = NULL;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (MALI_GP == arch_configuration[i].type)
+ {
+ if (NULL == first_gp_resource || first_gp_resource->base > arch_configuration[i].base)
+ {
+ first_gp_resource = &(arch_configuration[i]);
+ }
+ }
+ if (MALI_PP == arch_configuration[i].type)
+ {
+ if (NULL == first_pp_resource || first_pp_resource->base > arch_configuration[i].base)
+ {
+ first_pp_resource = &(arch_configuration[i]);
+ }
+ }
+ }
+
+ if (NULL == first_gp_resource || NULL == first_pp_resource)
+ {
+ MALI_PRINT_ERROR(("No GP+PP core specified in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (first_gp_resource->base < first_pp_resource->base)
+ {
+ /* GP is first, so we are dealing with Mali-300, Mali-400 or Mali-450 */
+ global_gpu_base_address = first_gp_resource->base;
+ first_pp_offset = 0x8000;
+ }
+ else
+ {
+ /* PP is first, so we are dealing with Mali-200 */
+ global_gpu_base_address = first_pp_resource->base;
+ first_pp_offset = 0x0;
+ }
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ _mali_osk_resource_t *first_pp_resource = NULL;
+
+ /* Find the first PP core */
+ first_pp_resource = mali_find_resource(MALI_PP, first_pp_offset);
+ if (NULL != first_pp_resource)
+ {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL);
+ if (NULL != group)
+ {
+ /*struct mali_pp_core *pp_core = mali_pp_create(first_pp_resource, group, 0);*/
+ struct mali_pp_core *pp_core = mali_pp_create(first_pp_resource, group);
+ if (NULL != pp_core)
+ {
+ u32 pp_version = mali_pp_core_get_version(pp_core);
+ mali_pp_delete(pp_core);
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16)
+ {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_delete_clusters(void)
+{
+ u32 i;
+ u32 number_of_clusters = mali_cluster_get_glob_num_clusters();
+
+ for (i = 0; i < number_of_clusters; i++)
+ {
+ mali_cluster_delete(mali_cluster_get_global_cluster(i));
+ }
+}
+
+static _mali_osk_errcode_t mali_create_cluster(_mali_osk_resource_t *resource)
+{
+ if (NULL != resource)
+ {
+ struct mali_l2_cache_core *l2_cache;
+
+ if (mali_l2_cache_core_get_glob_num_l2_cores() >= mali_l2_cache_core_get_max_num_l2_cores())
+ {
+ MALI_PRINT_ERROR(("Found too many L2 cache core objects, max %u is supported\n", mali_l2_cache_core_get_max_num_l2_cores()));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s, starting new cluster\n", resource->description));
+
+ /*l2_cache = mali_l2_cache_create(resource, global_num_l2_cache_cores);*/
+ l2_cache = mali_l2_cache_create(resource);
+ if (NULL == l2_cache)
+ {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (NULL == mali_cluster_create(l2_cache))
+ {
+ MALI_PRINT_ERROR(("Failed to create cluster object\n"));
+ mali_l2_cache_delete(l2_cache);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ else
+ {
+ mali_cluster_create(NULL);
+ if (NULL == mali_cluster_get_global_cluster(0))
+ {
+ MALI_PRINT_ERROR(("Failed to create cluster object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Created cluster object\n"));
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_cluster(void)
+{
+ if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+ {
+ /* Create dummy cluster without L2 cache */
+ return mali_create_cluster(NULL);
+ }
+ else if (_MALI_PRODUCT_ID_MALI300 == global_product_id || _MALI_PRODUCT_ID_MALI400 == global_product_id)
+ {
+ _mali_osk_resource_t *l2_resource = mali_find_resource(MALI_L2, 0x1000);
+ if (NULL == l2_resource)
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return mali_create_cluster(l2_resource);
+ }
+ else if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t *l2_gp_resource;
+ _mali_osk_resource_t *l2_pp_grp0_resource;
+ _mali_osk_resource_t *l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ l2_gp_resource = mali_find_resource(MALI_L2, 0x10000);
+ if (NULL != l2_gp_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for GP\n"));
+ ret = mali_create_cluster(l2_gp_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Make cluster for first PP core group */
+ l2_pp_grp0_resource = mali_find_resource(MALI_L2, 0x1000);
+ if (NULL != l2_pp_grp0_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for PP group 0\n"));
+ ret = mali_create_cluster(l2_pp_grp0_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ l2_pp_grp1_resource = mali_find_resource(MALI_L2, 0x11000);
+ if (NULL != l2_pp_grp1_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for PP group 0\n"));
+ ret = mali_create_cluster(l2_pp_grp1_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_group(struct mali_cluster *cluster,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+ struct mali_pp_core *pp;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the MMU object */
+ mmu = mali_mmu_create(resource_mmu);
+ if (NULL == mmu)
+ {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+ group = mali_group_create(cluster, mmu);
+ if (NULL == group)
+ {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ mali_mmu_delete(mmu);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Set pointer back to group in mmu.*/
+ mali_mmu_set_group(mmu, group);
+
+ /* Add this group to current cluster */
+ mali_cluster_add_group(cluster, group);
+
+ if (NULL != resource_gp)
+ {
+ /* Create the GP core object inside this group */
+ /* global_gp_core = mali_gp_create(resource_gp, group); */
+ if (NULL == mali_gp_create(resource_gp, group))
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add GP object to this group */
+ MALI_DEBUG_PRINT(3, ("Adding GP %s to group\n", resource_gp->description));
+ mali_group_add_gp_core(group, mali_gp_get_global_gp_core());
+ }
+
+ if (NULL != resource_pp)
+ {
+ /* Create the PP core object inside this group */
+ pp = mali_pp_create(resource_pp, group);
+
+ if (NULL == pp)
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add PP object to this group */
+ MALI_DEBUG_PRINT(3, ("Adding PP %s to group\n", resource_pp->description));
+ mali_group_add_pp_core(group, pp);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+ {
+ _mali_osk_resource_t *resource_gp;
+ _mali_osk_resource_t *resource_pp;
+ _mali_osk_resource_t *resource_mmu;
+
+ MALI_DEBUG_ASSERT(1 == mali_cluster_get_glob_num_clusters());
+
+ resource_gp = mali_find_resource(MALI_GP, 0x02000);
+ resource_pp = mali_find_resource(MALI_PP, 0x00000);
+ resource_mmu = mali_find_resource(MMU, 0x03000);
+
+ if (NULL == resource_mmu || NULL == resource_gp || NULL == resource_pp)
+ {
+ /* Missing mandatory core(s) */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /*return mali_create_group(global_clusters[0], resource_mmu, resource_gp, resource_pp);*/
+ return mali_create_group(mali_cluster_get_global_cluster(0), resource_mmu, resource_gp, resource_pp);
+ }
+ else if (_MALI_PRODUCT_ID_MALI300 == global_product_id ||
+ _MALI_PRODUCT_ID_MALI400 == global_product_id ||
+ _MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ _mali_osk_errcode_t err;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+ _mali_osk_resource_t *resource_gp;
+ _mali_osk_resource_t *resource_gp_mmu;
+ _mali_osk_resource_t *resource_pp[mali_pp_get_max_num_pp_cores()];
+ _mali_osk_resource_t *resource_pp_mmu[mali_pp_get_max_num_pp_cores()];
+ u32 max_num_pp_cores = mali_pp_get_max_num_pp_cores();
+
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ /* Mali-450 has separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp = mali_find_resource(MALI_GP, 0x00000);
+ resource_gp_mmu = mali_find_resource(MMU, 0x03000);
+ resource_pp[0] = mali_find_resource(MALI_PP, 0x08000);
+ resource_pp[1] = mali_find_resource(MALI_PP, 0x0A000);
+ resource_pp[2] = mali_find_resource(MALI_PP, 0x0C000);
+ resource_pp[3] = mali_find_resource(MALI_PP, 0x0E000);
+ resource_pp[4] = mali_find_resource(MALI_PP, 0x28000);
+ resource_pp[5] = mali_find_resource(MALI_PP, 0x2A000);
+ resource_pp[6] = mali_find_resource(MALI_PP, 0x2C000);
+ resource_pp[7] = mali_find_resource(MALI_PP, 0x2E000);
+ resource_pp_mmu[0] = mali_find_resource(MMU, 0x04000);
+ resource_pp_mmu[1] = mali_find_resource(MMU, 0x05000);
+ resource_pp_mmu[2] = mali_find_resource(MMU, 0x06000);
+ resource_pp_mmu[3] = mali_find_resource(MMU, 0x07000);
+ resource_pp_mmu[4] = mali_find_resource(MMU, 0x1C000);
+ resource_pp_mmu[5] = mali_find_resource(MMU, 0x1D000);
+ resource_pp_mmu[6] = mali_find_resource(MMU, 0x1E000);
+ resource_pp_mmu[7] = mali_find_resource(MMU, 0x1F000);
+
+ if (NULL == resource_gp || NULL == resource_gp_mmu || NULL == resource_pp[0] || NULL == resource_pp_mmu[0])
+ {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU (0x%08X, 0x%08X, 0x%08X, 0x%08X)\n",
+ resource_gp, resource_gp_mmu, resource_pp[0], resource_pp_mmu[0]));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_cluster_get_glob_num_clusters());
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_gp), resource_gp_mmu, resource_gp, NULL);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_cluster_get_glob_num_clusters() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp0), resource_pp_mmu[0], NULL, resource_pp[0]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) /* First half of the PP cores belong to first core group */
+ {
+ if (NULL != resource_pp[i])
+ {
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp0), resource_pp_mmu[i], NULL, resource_pp[i]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < max_num_pp_cores; i++) /* Second half of the PP cores belong to second core group */
+ {
+ if (NULL != resource_pp[i])
+ {
+ MALI_DEBUG_ASSERT(mali_cluster_get_glob_num_clusters() >= 2); /* Only Mali-450 have more than 4 PPs, and these cores belong to second core group */
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp1), resource_pp_mmu[i], NULL, resource_pp[i]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_resource_t *resource_pmu;
+ u32 number_of_pp_cores;
+ u32 number_of_l2_caches;
+
+ resource_pmu = mali_find_resource(PMU, 0x02000);
+ number_of_pp_cores = mali_count_resources(MALI_PP);
+ number_of_l2_caches = mali_count_resources(MALI_L2);
+
+ if (NULL != resource_pmu)
+ {
+ if (NULL == mali_pmu_create(resource_pmu, number_of_pp_cores, number_of_l2_caches))
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ }
+ }
+ return err;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ int i;
+ _mali_osk_errcode_t ret;
+
+ for(i = 0; i < num_resources; i++)
+ {
+ switch(arch_configuration[i].type)
+ {
+ case OS_MEMORY:
+ ret = mali_memory_core_resource_os_memory(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register OS_MEMORY\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ case MEMORY:
+ ret = mali_memory_core_resource_dedicated_memory(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register MEMORY\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ case MEM_VALIDATION:
+ ret = mali_mem_validation_add_range(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register MEM_VALIDATION\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+ mali_bool is_pmu_enabled;
+
+ MALI_CHECK_NON_NULL(system_info_lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0 ), _MALI_OSK_ERR_FAULT);
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ /* Build dummy system info. Will be removed in the future. */
+ err = build_system_info();
+ if (_MALI_OSK_ERR_OK != err) goto build_system_info_failed;
+
+ /* Get data from config.h */
+ err = _mali_osk_resources_init(&arch_configuration, &num_resources);
+ if (_MALI_OSK_ERR_OK != err) goto osk_resources_init_failed;
+
+ /* Initialize driver subsystems */
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+ /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+ /* Parsing the GPU base address and first pp offset */
+ err = mali_parse_gpu_base_and_first_pp_offset_address();
+ if (_MALI_OSK_ERR_OK != err) goto parse_gpu_base_address_failed;
+
+ /* Initialize the MALI PMU */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+ is_pmu_enabled = mali_pmu_get_global_pmu_core() != NULL ? MALI_TRUE : MALI_FALSE;
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+ /* Make sure the power stays on for the rest of this function */
+ mali_pm_always_on(MALI_TRUE);
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Initialize MMU module */
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+ /* Initialize the DLBU module for Mali-450 */
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+ }
+
+ /* Start configuring the actual Mali hardware. */
+ err = mali_parse_config_cluster();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+ /* Initialize the schedulers */
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+ err = mali_gp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+ err = mali_pp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+#endif
+
+ /* We no longer need to stay */
+ mali_pm_always_on(MALI_FALSE);
+ MALI_SUCCESS; /* all ok */
+
+ /* Error handling */
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+utilization_init_failed:
+ mali_pp_scheduler_terminate();
+#endif
+pp_scheduler_init_failed:
+ mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+ mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+ mali_delete_clusters(); /* Delete clusters even if config parsing failed. */
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ mali_dlbu_terminate();
+ }
+dlbu_init_failed:
+ mali_mmu_terminate();
+mmu_init_failed:
+ /* Nothing to roll back */
+product_info_parsing_failed:
+ mali_pm_terminate();
+pm_init_failed:
+ if (is_pmu_enabled)
+ {
+ mali_pmu_delete(mali_pmu_get_global_pmu_core());
+ }
+parse_pmu_config_failed:
+parse_gpu_base_address_failed:
+parse_memory_config_failed:
+ mali_memory_terminate();
+memory_init_failed:
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+osk_resources_init_failed:
+ cleanup_system_info(system_info);
+build_system_info_failed:
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_term();
+#endif
+ mali_session_terminate();
+session_init_failed:
+ return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ /* shut down subsystems in reverse order from startup */
+
+ mali_pm_always_on(MALI_TRUE); /* Mali will be powered off once PM subsystem terminates */
+
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ mali_pp_scheduler_terminate();
+ mali_gp_scheduler_terminate();
+ mali_scheduler_terminate();
+
+ mali_delete_clusters(); /* Delete clusters even if config parsing failed. */
+
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ mali_dlbu_terminate();
+ }
+
+ mali_mmu_terminate();
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu)
+ {
+ mali_pmu_delete(pmu);
+ }
+
+ mali_pm_terminate();
+
+ mali_memory_terminate();
+
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+
+ cleanup_system_info(system_info);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_term();
+#endif
+
+ mali_session_terminate();
+
+ if (NULL != system_info_lock)
+ {
+ _mali_osk_lock_term( system_info_lock );
+ }
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+void mali_kernel_core_wakeup(void)
+{
+ u32 i;
+ u32 glob_num_clusters = mali_cluster_get_glob_num_clusters();
+ struct mali_cluster *cluster;
+
+ for (i = 0; i < glob_num_clusters; i++)
+ {
+ cluster = mali_cluster_get_global_cluster(i);
+ mali_cluster_reset(cluster);
+ }
+}
+
+static void cleanup_system_info(_mali_system_info *cleanup)
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+
+ /* delete all the core info structs */
+ while (NULL != cleanup->core_info)
+ {
+ current_core = cleanup->core_info;
+ cleanup->core_info = cleanup->core_info->next;
+ _mali_osk_free(current_core);
+ }
+
+ /* delete all the mem info struct */
+ while (NULL != cleanup->mem_info)
+ {
+ current_mem = cleanup->mem_info;
+ cleanup->mem_info = cleanup->mem_info->next;
+ _mali_osk_free(current_mem);
+ }
+
+ /* delete the system info struct itself */
+ _mali_osk_free(cleanup);
+}
+
+/* Build a dummy system info struct. User space still need this. */
+static _mali_osk_errcode_t build_system_info(void)
+{
+ _mali_system_info * new_info;
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ u32 new_size = 0;
+
+ /* create a new system info struct */
+ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(new_info, 0, sizeof(_mali_system_info));
+
+ /* fill in the info */
+ new_info->has_mmu = 1;
+ new_info->drivermode = _MALI_DRIVER_MODE_NORMAL;
+
+ new_info->core_info = NULL; /* Not used by user space */
+
+ new_info->mem_info = _mali_osk_calloc(1, sizeof(_mali_mem_info));
+ if(NULL == new_info->mem_info)
+ {
+ _mali_osk_free(new_info);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ new_info->mem_info->size = 1024 * 1024 * 1024; /* 1GiB */
+ new_info->mem_info->flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE;
+ new_info->mem_info->maximum_order_supported = 30;
+ new_info->mem_info->identifier = 0;
+ new_info->mem_info->next = NULL;
+
+ /* building succeeded, calculate the size */
+
+ /* size needed of the system info struct itself */
+ new_size = sizeof(_mali_system_info);
+
+ /* size needed for the cores */
+ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+ new_size += sizeof(_mali_core_info);
+ }
+
+ /* size needed for the memory banks */
+ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ new_size += sizeof(_mali_mem_info);
+ }
+
+ /* lock system info access so a user wont't get a corrupted version */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* set new info */
+ system_info = new_info;
+ system_info_size = new_size;
+
+ /* we're safe */
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* ok result */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ args->size = system_info_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ void * current_write_pos, ** current_patch_pos;
+ u32 adjust_ptr_base;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* lock the system info */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* first check size */
+ if (args->size < system_info_size) goto exit_when_locked;
+
+ /* we build a copy of system_info in the user space buffer specified by the user and
+ * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
+ * indicate a different base address for patching the pointers (normally the
+ * address of the provided system_info buffer would be used). This is helpful when
+ * the system_info buffer needs to get copied to user space and the pointers need
+ * to be in user space.
+ */
+ if (0 == args->ukk_private)
+ {
+ adjust_ptr_base = (u32)args->system_info;
+ }
+ else
+ {
+ adjust_ptr_base = args->ukk_private;
+ }
+
+ /* copy each struct into the buffer, and update its pointers */
+ current_write_pos = (void *)args->system_info;
+
+ /* first, the master struct */
+ _mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));
+
+ /* advance write pointer */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));
+
+ /* first we write the core info structs, patch starts at master's core_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));
+
+ for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ /* then we write the mem info structs, patch starts at master's mem_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));
+
+ for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ err = _MALI_OSK_ERR_OK;
+exit_when_locked:
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ struct mali_session_data *session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(2, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session_data->page_directory)
+ {
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session_data->page_directory, MALI_DLB_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ MALI_PRINT_ERROR(("Failed to map DLB page into session\n"));
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (0 != mali_dlbu_phys_addr)
+ {
+ mali_mmu_pagedir_update(session_data->page_directory, MALI_DLB_VIRT_ADDR, mali_dlbu_phys_addr, _MALI_OSK_MALI_PAGE_SIZE);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session_data))
+ {
+ mali_mmu_pagedir_free(session_data->page_directory);
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ *context = (void*)session_data;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session_data);
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* Abort queued and running jobs */
+ mali_gp_scheduler_abort_session(session);
+ mali_pp_scheduler_abort_session(session);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_flush_workqueue(NULL);
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+ /* Free session data structures */
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_gp_scheduler_dump_state(buf + n, size - n);
+ n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_core.h b/drivers/parrot/gpu/mali200/common/mali_kernel_core.h
new file mode 100644
index 00000000000000..0ac967eabe0b24
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_core.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+extern int mali_max_job_runtime;
+
+typedef enum
+{
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+void mali_kernel_core_wakeup(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.c b/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000000..b9f05ca4965a00
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ void *old_value = NULL;
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ old_value = map->table->mappings[descriptor];
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return old_value;
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.h b/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000000..82ed94d6ffef27
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.c b/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.c
new file mode 100644
index 00000000000000..167b01fd3e0dbd
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->stat = os_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ info = (os_allocator*)allocator->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB Configured maximum OS memory usage: %d kB\n",
+ (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ int allocation_order = 6; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt = NULL;
+ u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /* if the number of pages to be requested lead to exceeding the memory
+ * limit in info->num_pages_max, reduce the size that is to be requested. */
+ while ( (info->num_pages_allocated + (1 << allocation_order) > info->num_pages_max)
+ && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ if ( allocation_order > 0 ) {
+ --allocation_order;
+ } else {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+ }
+
+ /* try to allocate 2^(allocation_order) pages, if that fails, try
+ * allocation_order-1 to allocation_order 0 (inclusive) */
+ while ( allocation_order >= 0 )
+ {
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if (NULL != virt) break;
+
+ --allocation_order;
+ }
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate consistent memory. Is CONSISTENT_DMA_SIZE set too low?\n"));
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("os_allocator_allocate_page_table_block: Allocation of order %i succeeded\n",
+ allocation_order));
+
+ /* we now know the size of the allocation since we know for what
+ * allocation_order the allocation succeeded */
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += (1 << allocation_order);
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.h b/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.h
new file mode 100644
index 00000000000000..0946169f79d07c
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.c b/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.c
new file mode 100644
index 00000000000000..137756060fe756
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(mem_engine, descriptor);
+ mali_allocation_engine_release_pt2_physical_memory_free(mem_engine, descriptor);
+}
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Calling: mali_address_manager_release() */
+ /* This function is allowed to be called several times, and it only does the release on the first call. */
+ engine->mali_address->release(descriptor);
+}
+
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ /* Remove this from a tracking list in session_data->memory_head */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
+{
+ u32 sum = 0;
+ while(NULL != allocator)
+ {
+ /* Only count allocators that have set up a stat function. */
+ if(allocator->stat)
+ sum += allocator->stat(allocator);
+
+ allocator = allocator->next;
+ }
+
+ return sum;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.h b/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.h
new file mode 100644
index 00000000000000..cda74c30e85b74
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_memory_engine.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than committing a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ u32 (*stat)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function to allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.c b/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.c
new file mode 100644
index 00000000000000..a374dbfed7009f
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 1000
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0)
+ {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0)
+ {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+void mali_utilization_core_start(u64 time_now)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+ {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < period_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = period_start_time;
+ }
+
+ work_start_time = time_now;
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+void mali_utilization_core_end(u64 time_now)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+ {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < work_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = work_start_time;
+ }
+
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.h b/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.h
new file mode 100644
index 00000000000000..1f605179d8fc7d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(u64 time_now);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(u64 time_now);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_kernel_vsync.c b/drivers/parrot/gpu/mali200/common/mali_kernel_vsync.c
new file mode 100644
index 00000000000000..63c9f5b71c5893
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_kernel_vsync.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event==_MALI_UK_VSYNC_EVENT_END_WAIT)
+ {
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_l2_cache.c b/drivers/parrot/gpu/mali200/common/mali_l2_cache.c
new file mode 100644
index 00000000000000..aa5cc54d8e9aaf
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_l2_cache.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_pm.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 core_id; /**< Unique core ID */
+ _mali_osk_lock_t *command_lock; /**< Serialize all L2 cache commands */
+ _mali_osk_lock_t *counter_lock; /**< Synchronize L2 cache counter access */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+};
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+ if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL != cache)
+ {
+ cache->core_id = mali_global_num_l2_cache_cores;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE))
+ {
+ cache->command_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE,
+ 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+ if (NULL != cache->command_lock)
+ {
+ cache->counter_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE,
+ 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER);
+ if (NULL != cache->counter_lock)
+ {
+ if (_MALI_OSK_ERR_OK == mali_l2_cache_reset(cache))
+ {
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+ mali_global_num_l2_cache_cores++;
+
+ return cache;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to reset L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ _mali_osk_lock_term(cache->counter_lock);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ _mali_osk_lock_term(cache->command_lock);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ mali_hw_core_delete(&cache->hw_core);
+ }
+
+ _mali_osk_free(cache);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+
+ /* reset to defaults */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ _mali_osk_lock_term(cache->counter_lock);
+ _mali_osk_lock_term(cache->command_lock);
+ mali_hw_core_delete(&cache->hw_core);
+
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+ {
+ if (mali_global_l2_cache_cores[i] == cache)
+ {
+ mali_global_l2_cache_cores[i] = NULL;
+ mali_global_num_l2_cache_cores--;
+ }
+ }
+
+ _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ return cache->core_id;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(counter < (1 << 7)); /* the possible values are 0-127 */
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src0 = counter;
+
+ if (counter != MALI_HW_CORE_NO_COUNTER)
+ {
+ value = counter;
+ }
+
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+ return MALI_TRUE;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(counter < (1 << 7)); /* the possible values are 0-127 */
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src1 = counter;
+
+ if (counter != MALI_HW_CORE_NO_COUNTER)
+ {
+ value = counter;
+ }
+
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+ return MALI_TRUE;
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ /* Caller must hold the PM lock and know that we are powered on */
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ }
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_L2_CACHE_CORES > index)
+ {
+ return mali_global_l2_cache_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2_cache_cores;
+}
+
+u32 mali_l2_cache_core_get_max_num_l2_cores(void)
+{
+ return MALI_MAX_NUMBER_OF_L2_CACHE_CORES;
+}
+
+_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_invalidate_all(cache);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+ /* Restart any performance counters (if enabled) */
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache)
+{
+ return mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages)
+{
+ u32 i;
+ _mali_osk_errcode_t ret1, ret = _MALI_OSK_ERR_OK;
+
+ for (i = 0; i < num_pages; i++)
+ {
+ ret1 = mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[i]);
+ if (_MALI_OSK_ERR_OK != ret1)
+ {
+ ret = ret1;
+ }
+ }
+
+ return ret;
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+ /*
+ * Take PM lock and check power state.
+ * Returns MALI_TRUE if module is powered on.
+ * Power state will not change until mali_l2_cache_unlock_power_state() is called.
+ */
+ mali_pm_lock();
+ return mali_pm_is_powered_on();
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+ /* Release PM lock */
+ mali_pm_unlock();
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_l2_cache.h b/drivers/parrot/gpu/mali200/common/mali_l2_cache.h
new file mode 100644
index 00000000000000..5a8e4dacd7e924
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_l2_cache.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+
+struct mali_l2_cache_core;
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+u32 mali_l2_cache_core_get_max_num_l2_cores(void);
+
+_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache);
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_mem_validation.c b/drivers/parrot/gpu/mali200/common/mali_mem_validation.c
new file mode 100644
index 00000000000000..ea9c4289a1a157
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mem_validation.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(const _mali_osk_resource_t *resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base)
+ {
+ MALI_PRINT_ERROR(("Failed to add MEM_VALIDATION resource %s; another range is already specified\n", resource->description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (resource->base & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (resource->size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ MALI_PRINT_ERROR(("Failed to add MEM_VALIDATION resource %s; incorrect alignment\n", resource->description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = resource->base;
+ mali_mem_validator.size = resource->size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator '%s' installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ resource->description, mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) /* Don't allow overflow (or zero size) */
+ {
+ if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) )
+ {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_mem_validation.h b/drivers/parrot/gpu/mali200/common/mali_mem_validation.h
new file mode 100644
index 00000000000000..2043b44f89c1e4
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(const _mali_osk_resource_t * resource);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_memory.c b/drivers/parrot/gpu/mali200/common/mali_memory.c
new file mode 100644
index 00000000000000..7a11d1a9187011
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_memory.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_memory.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_session.h"
+#include "mali_l2_cache.h"
+#include "mali_cluster.h"
+#include "mali_group.h"
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_list.h"
+#include "mali_osk_bitops.h"
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * This function will fail if it is unable to put the MMU in stall mode (which
+ * might be the case if a page fault is also being processed).
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+static void mali_mmu_page_table_cache_destroy(void);
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+/* called during module init */
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(2, ("Memory system initializing\n"));
+
+ err = mali_mmu_page_table_cache_create();
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err);
+ }
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+void mali_memory_terminate(void)
+{
+ MALI_DEBUG_PRINT(2, ("Memory system terminating\n"));
+
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+}
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->memory_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+ if (NULL == session_data->memory_lock)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session_data)
+{
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+
+#ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+#if _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this.
+#else
+ {
+ _mali_osk_errcode_t err;
+ err = _MALI_OSK_ERR_BUSY;
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ err = _MALI_OSK_ERR_OK;
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->memory_lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ err = _mali_ukk_mem_munmap_internal( &unmap_args );
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ break; /* Will jump back into: "while (err == _MALI_OSK_ERR_BUSY)" */
+ }
+ }
+ }
+ }
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+ }
+#endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */
+#else
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+#endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->memory_lock );
+
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while (NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save low level cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = session_data->memory_lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ descriptor = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != descriptor)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(5, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = session_data->memory_lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ void* old_value;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ old_value = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != old_value)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ struct mali_session_data *session_data;
+ u32 actual_size;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+
+ actual_size = descriptor->size;
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ actual_size += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session_data->page_directory, descriptor->mali_address, actual_size);
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ const u32 illegal_mali_address = 0xffffffff;
+ struct mali_session_data *session_data;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* It is allowed to call this function several times on the same descriptor.
+ When memory is released we set the illegal_mali_address so we can early out here. */
+ if ( illegal_mali_address == descriptor->mali_address) return;
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ mali_mmu_pagedir_unmap(session_data->page_directory, descriptor->mali_address, descriptor->size);
+
+ descriptor->mali_address = illegal_mali_address ;
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ struct mali_session_data *session_data;
+ u32 mali_address;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(phys_addr);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", *phys_addr, mali_address, size));
+
+ mali_mmu_pagedir_update(session_data->page_directory, mali_address, *phys_addr, size);
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ session_data = (struct mali_session_data *)args->ctx;
+
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->lock = session_data->memory_lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ /* We do not FLUSH nor TLB_ZAP on MMAP, since we do both of those on job start*/
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ u32 num_groups = mali_group_get_glob_num_groups();
+ struct mali_group *group;
+ u32 i;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Unmapping the memory from the mali virtual address space.
+ It is allowed to call this function severeal times, which might happen if zapping below fails. */
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(memory_engine, descriptor);
+
+#ifdef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ {
+ u32 number_of_clusters = mali_cluster_get_glob_num_clusters();
+ for (i = 0; i < number_of_clusters; i++)
+ {
+ struct mali_cluster *cluster;
+ cluster = mali_cluster_get_global_cluster(i);
+ if( mali_cluster_power_is_enabled_get(cluster) )
+ {
+ mali_cluster_l2_cache_invalidate_all_force(cluster);
+ }
+ }
+ }
+#endif
+
+ for (i = 0; i < num_groups; i++)
+ {
+ group = mali_group_get_glob_group(i);
+ mali_group_lock(group);
+ mali_group_remove_session_if_unused(group, session_data);
+ if (mali_group_get_session(group) == session_data)
+ {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(mali_group_get_mmu(group));
+ if (MALI_TRUE != zap_success)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+ mali_group_bottom_half(group, GROUP_EVENT_MMU_PAGE_FAULT);
+ /* The bottom half will also do the unlock */
+ continue;
+ }
+ }
+ mali_group_unlock(group);
+ }
+
+ /* Removes the descriptor from the session's memory list, releases physical memory, releases descriptor */
+ mali_allocation_engine_release_pt2_physical_memory_free(memory_engine, descriptor);
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+ _mali_osk_errcode_t err;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ err = _MALI_OSK_ERR_BUSY;
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ err = _mali_ukk_mem_munmap_internal( args );
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ }
+ }
+
+ return err;
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ return mali_allocation_engine_memory_usage(physical_memory_allocators);
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ if (alloc->num_pages > 1)
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+ }
+ else
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.full);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated: VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ else
+ {
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+static void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ _mali_osk_lock_term(page_table_cache.lock);
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_memory.h b/drivers/parrot/gpu/mali200/common/mali_memory.h
new file mode 100644
index 00000000000000..53a994c0e5ed04
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_memory.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+struct mali_cluster;
+struct mali_group;
+
+/** @brief Initialize Mali memory subsystem
+ *
+ * Allocate and initialize internal data structures. Must be called before
+ * allocating Mali memory.
+ *
+ * @return On success _MALI_OSK_ERR_OK, othervise some error code describing the error.
+ */
+_mali_osk_errcode_t mali_memory_initialize(void);
+
+/** @brief Terminate Mali memory system
+ *
+ * Clean up and release internal data structures.
+ */
+void mali_memory_terminate(void);
+
+/** @brief Start new Mali memory session
+ *
+ * Allocate and prepare session specific memory allocation data data. The
+ * session page directory, lock, and descriptor map is set up.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *mali_session_data);
+
+/** @brief Close a Mali memory session
+ *
+ * Release session specific memory allocation related data.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+void mali_memory_session_end(struct mali_session_data *mali_session_data);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, othervise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+void mali_mmu_release_table_page(u32 pa);
+
+
+/** @brief Parse resource and prepare the OS memory allocator
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_mmu.c b/drivers/parrot/gpu/mali200/common/mali_mmu.c
new file mode 100644
index 00000000000000..2f2fa4dce81784
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mmu.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent core group */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_mmu_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_mmu_bottom_half(void *data);
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory = mali_allocate_empty_page();
+ if(0 == mali_empty_page_directory)
+ {
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page))
+ {
+ mali_free_empty_page(mali_empty_page_directory);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory);
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_mmu_core* mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+ if (NULL != mmu)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_mmu_upper_half,
+ mali_mmu_bottom_half,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ "mali_mmu_irq_handlers");
+ if (NULL != mmu->irq)
+ {
+ return mmu;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ _mali_osk_irq_term(mmu->irq);
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+void mali_mmu_set_group(struct mali_mmu_core *mmu, struct mali_group *group)
+{
+ mmu->group = group;
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 999;
+ int i;
+ u32 mmu_status;
+
+ /* There are no group when it is called from mali_mmu_create */
+ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) )
+ {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n"));
+ return MALI_TRUE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if ( mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE))
+ {
+ break;
+ }
+ if ( 0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED )))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+ u32 mmu_status;
+ /* There are no group when it is called from mali_mmu_create */
+ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
+ {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) )
+ {
+ break;
+ }
+ if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ break;
+ }
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+ /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
+ if (mmu->group)MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
+ if (mmu->group)
+ {
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ }
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* The stall can not fail in current hw-state */
+ MALI_DEBUG_ASSERT(stall_success);
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
+ {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+
+/* ------------- interrupt handling below ------------------ */
+
+static _mali_osk_errcode_t mali_mmu_upper_half(void * data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 != int_stat)
+ {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ /* clear interrupt flag */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK,
+ mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ MALI_PRINT_ERROR(("Mali MMU: Read bus error\n"));
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_mmu_bottom_half(void * data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core*)data;
+ u32 raw, status, fault_address;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: Page fault bottom half: Locking subsystems\n"));
+
+ mali_group_lock(mmu->group); /* Unlocked in mali_group_bottom_half */
+
+ if ( MALI_FALSE == mali_group_power_is_on(mmu->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.",mmu->hw_core.description));
+ mali_group_unlock(mmu->group);
+ return;
+ }
+
+ raw = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault bottom half: No Irq found.\n"));
+ mali_group_unlock(mmu->group);
+ /* MALI_DEBUG_ASSERT(0); */
+ return;
+ }
+
+ /* An actual page fault has occurred. */
+
+ fault_address = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+
+ MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->hw_core.description));
+
+ mali_group_bottom_half(mmu->group, GROUP_EVENT_MMU_PAGE_FAULT); /* Unlocks the group lock */
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success)
+ {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ if ( MALI_FALSE==stall_success ) return MALI_FALSE;
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT( stall_success );
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_mmu.h b/drivers/parrot/gpu/mali200/common/mali_mmu.h
new file mode 100644
index 00000000000000..c7274b8edd3ec5
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mmu.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+struct mali_mmu_core;
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource);
+void mali_mmu_set_group(struct mali_mmu_core *mmu, struct mali_group *group);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core* mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu);
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu);
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.c b/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.c
new file mode 100644
index 00000000000000..43a4cf449a3d74
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+
+#include "mali_cluster.h"
+#include "mali_group.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ u32 address;
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping))
+ {
+ /* Allocation failed */
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(address);
+ }
+ return address;
+}
+
+void mali_free_empty_page(u32 address)
+{
+ if (MALI_INVALID_PAGE != address)
+ {
+ mali_mmu_release_table_page(address);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address page_directory_mapping;
+ mali_io_address page_table_mapping;
+ mali_io_address data_page_mapping;
+
+ err = mali_mmu_get_table_page(data_page, &data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_table, &page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_directory, &page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(data_page_mapping, 0);
+ fill_page(page_table_mapping, *data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ if (MALI_INVALID_PAGE != *page_directory)
+ {
+ mali_mmu_release_table_page(*page_directory);
+ *page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table)
+ {
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page)
+ {
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ u32 pde_phys;
+ int i;
+
+ for(i = first_pde; i <= last_pde; i++)
+ {
+ if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT))
+ {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ pagedir->page_entries_usage_count[i] = 1;
+ }
+ else
+ {
+ pagedir->page_entries_usage_count[i]++;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+#endif
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++)
+ {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
+ {
+ size_in_pde = left;
+ }
+ else
+ {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i]--;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i])
+ {
+ u32 page_address;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_address);
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ pd_changed = MALI_TRUE;
+#endif
+ }
+ else
+ {
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ MALI_DEBUG_ASSERT(num_pages_inv<3);
+#endif
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed)
+ {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ MALI_DEBUG_ASSERT(num_pages_inv<3);
+ }
+
+ if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id())
+ {
+ mali_cluster_invalidate_pages(pages_to_invalidate, num_pages_inv);
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if(NULL == pagedir)
+ {
+ return NULL;
+ }
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped))
+ {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size)
+{
+ u32 end_address = mali_address + size;
+
+ /* Map physical pages into MMU page tables */
+ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ phys_address | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ }
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+/* For instrumented */
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+{
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != pagedir->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.h b/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.h
new file mode 100644
index 00000000000000..8aababe0edb703
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_mmu_page_directory.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_MASK = 0x07
+} mali_mmu_entry_flags;
+
+
+struct mali_page_directory
+{
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(void);
+void mali_free_empty_page(u32 address);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_osk.h b/drivers/parrot/gpu/mali200/common/mali_osk.h
new file mode 100644
index 00000000000000..05d54fca03cd94
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_osk.h
@@ -0,0 +1,1800 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * Holding more than one lock of the same order at the same time is not
+ * allowed.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCK_ORDER_LAST = 0,
+
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+ _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+
+ _MALI_OSK_LOCK_ORDER_FIRST
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner(l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock );
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+
+ MALI_PP =2, /**< Mali Pixel Processor core */
+ MALI450PP =2, /**< Compatibility option */
+ MALI400PP =2, /**< Compatibility option */
+ MALI300PP =2, /**< Compatibility option */
+ MALI200 =2, /**< Compatibility option */
+
+ MALI_GP =3, /**< Mali Geometry Processor core */
+ MALI450GP =3, /**< Compatibility option */
+ MALI400GP =3, /**< Compatibility option */
+ MALI300GP =3, /**< Compatibility option */
+ MALIGP2 =3, /**< Compatibility option */
+
+ MMU =4, /**< Mali MMU (Memory Management Unit) */
+
+ FPGA_FRAMEWORK =5, /**< Mali registers specific to FPGA implementations */
+
+ MALI_L2 =6, /**< Mali Level 2 cache core */
+ MALI450L2 =6, /**< Compatibility option */
+ MALI400L2 =6, /**< Compatibility option */
+ MALI300L2 =6, /**< Compatibility option */
+
+ MEM_VALIDATION =7, /**< External Memory Validator */
+
+ PMU =8, /**< Power Manangement Unit */
+
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @brief flushing workqueue.
+ *
+ * This will flush the workqueue.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_flush_workqueue( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#ifdef MALI_STATE_TRACKING
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ *
+ * Put thread to sleep if the given \a codition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) );
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Tells the OS that device is now idle
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_idle(void);
+
+/** @brief Tells the OS that the device is about to become active
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_activate(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_osk_bitops.h b/drivers/parrot/gpu/mali200/common/mali_osk_bitops.h
new file mode 100644
index 00000000000000..f262f7dad37d50
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_osk_list.h b/drivers/parrot/gpu/mali200/common/mali_osk_list.h
new file mode 100644
index 00000000000000..a8d15f2a107ca9
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_osk_mali.h b/drivers/parrot/gpu/mali200/common/mali_osk_mali.h
new file mode 100644
index 00000000000000..427fcc8de335be
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_osk_mali.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_osk_profiling.h b/drivers/parrot/gpu/mali200/common/mali_osk_profiling.h
new file mode 100644
index 00000000000000..fd9a8fb81449b1
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_osk_profiling.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED */
+
+#include "mali_profiling_events.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+#else
+/* Internal profiling is handled like a plain function call */
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+#endif
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+#else
+/* Internal profiling is handled like a plain function call */
+void _mali_osk_profiling_report_hw_counter(u32 counter_id, u32 value);
+#endif
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_pm.c b/drivers/parrot/gpu/mali200/common/mali_pm.c
new file mode 100644
index 00000000000000..cfd89adc857543
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pm.c
@@ -0,0 +1,547 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_platform.h"
+#include "mali_kernel_utilization.h"
+#include "mali_kernel_core.h"
+#include "mali_group.h"
+
+#define MALI_PM_LIGHT_SLEEP_TIMEOUT 1000
+
+enum mali_pm_scheme
+{
+ MALI_PM_SCHEME_DYNAMIC,
+ MALI_PM_SCHEME_OS_SUSPENDED,
+ MALI_PM_SCHEME_ALWAYS_ON
+};
+
+enum mali_pm_level
+{
+ MALI_PM_LEVEL_1_ON,
+ MALI_PM_LEVEL_2_STANDBY,
+ MALI_PM_LEVEL_3_LIGHT_SLEEP,
+ MALI_PM_LEVEL_4_DEEP_SLEEP
+};
+static _mali_osk_lock_t *mali_pm_lock_set_next_state;
+static _mali_osk_lock_t *mali_pm_lock_set_core_states;
+static _mali_osk_lock_t *mali_pm_lock_execute_state_change;
+static _mali_osk_irq_t *wq_irq;
+
+static _mali_osk_timer_t *idle_timer = NULL;
+static mali_bool idle_timer_running = MALI_FALSE;
+static u32 mali_pm_event_number = 0;
+
+static u32 num_active_gps = 0;
+static u32 num_active_pps = 0;
+
+static enum mali_pm_scheme current_scheme = MALI_PM_SCHEME_DYNAMIC;
+static enum mali_pm_level current_level = MALI_PM_LEVEL_1_ON;
+static enum mali_pm_level next_level_dynamic = MALI_PM_LEVEL_2_STANDBY; /* Should be the state we go to when we go out of MALI_PM_SCHEME_ALWAYS_ON during init */
+
+
+
+static _mali_osk_errcode_t mali_pm_upper_half(void *data);
+static void mali_pm_bottom_half(void *data);
+static void mali_pm_powerup(void);
+static void mali_pm_powerdown(mali_power_mode power_mode);
+
+static void timeout_light_sleep(void* arg);
+#if 0
+/* Deep sleep timout not supported */
+static void timeout_deep_sleep(void* arg);
+#endif
+static u32 mali_pm_event_number_get(void);
+static void mali_pm_event(enum mali_pm_event pm_event, mali_bool schedule_work, u32 timer_time );
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ mali_pm_lock_execute_state_change = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ORDERED |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PM_EXECUTE);
+
+ if (NULL != mali_pm_lock_execute_state_change )
+ {
+ mali_pm_lock_set_next_state = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ONELOCK| _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_LAST);
+
+ if (NULL != mali_pm_lock_set_next_state)
+ {
+ mali_pm_lock_set_core_states = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PM_CORE_STATE);
+
+ if (NULL != mali_pm_lock_set_core_states)
+ {
+ idle_timer = _mali_osk_timer_init();
+ if (NULL != idle_timer)
+ {
+ wq_irq = _mali_osk_irq_init(_MALI_OSK_IRQ_NUMBER_PMM,
+ mali_pm_upper_half,
+ mali_pm_bottom_half,
+ NULL,
+ NULL,
+ (void *)NULL,
+ "Mali PM deferred work");
+ if (NULL != wq_irq)
+ {
+ if (_MALI_OSK_ERR_OK == mali_platform_init())
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pm_dev_enable();
+ mali_pm_powerup();
+#endif
+ return _MALI_OSK_ERR_OK;
+ }
+
+ _mali_osk_irq_term(wq_irq);
+ }
+
+ _mali_osk_timer_del(idle_timer);
+ _mali_osk_timer_term(idle_timer);
+ }
+ _mali_osk_lock_term(mali_pm_lock_set_core_states);
+ }
+ _mali_osk_lock_term(mali_pm_lock_set_next_state);
+ }
+ _mali_osk_lock_term(mali_pm_lock_execute_state_change);
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_pm_terminate(void)
+{
+ mali_platform_deinit();
+ _mali_osk_irq_term(wq_irq);
+ _mali_osk_timer_del(idle_timer);
+ _mali_osk_timer_term(idle_timer);
+ _mali_osk_lock_term(mali_pm_lock_execute_state_change);
+ _mali_osk_lock_term(mali_pm_lock_set_next_state);
+ _mali_osk_lock_term(mali_pm_lock_set_core_states);
+}
+
+
+inline void mali_pm_lock(void)
+{
+ _mali_osk_lock_wait(mali_pm_lock_set_next_state, _MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_unlock(void)
+{
+ _mali_osk_lock_signal(mali_pm_lock_set_next_state, _MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_execute_state_change_lock(void)
+{
+ _mali_osk_lock_wait(mali_pm_lock_execute_state_change,_MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_execute_state_change_unlock(void)
+{
+ _mali_osk_lock_signal(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+}
+
+static void mali_pm_powerup(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_ON\n"));
+ mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ /* Aquire our reference */
+ MALI_DEBUG_PRINT(4, ("Mali PM: Getting device PM reference (=> requesting MALI_POWER_MODE_ON)\n"));
+ _mali_osk_pm_dev_activate();
+#endif
+
+ mali_group_power_on();
+}
+
+static void mali_pm_powerdown(mali_power_mode power_mode)
+{
+ if ( (MALI_PM_LEVEL_1_ON == current_level) || (MALI_PM_LEVEL_2_STANDBY == current_level) )
+ {
+ mali_group_power_off();
+ }
+ mali_platform_power_mode_change(power_mode);
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pm_dev_idle();
+#endif
+}
+
+mali_bool mali_pm_is_powered_on(void)
+{
+ mali_bool is_on = MALI_TRUE;
+
+ if( ! (MALI_PM_SCHEME_ALWAYS_ON == current_scheme || MALI_PM_SCHEME_DYNAMIC == current_scheme) )
+ {
+ is_on = MALI_FALSE;
+ }
+ else if ( ! (MALI_PM_LEVEL_1_ON == current_level || MALI_PM_LEVEL_2_STANDBY == current_level))
+ {
+ is_on = MALI_FALSE;
+ }
+ else if ( ! (MALI_PM_LEVEL_1_ON == next_level_dynamic || MALI_PM_LEVEL_2_STANDBY == next_level_dynamic))
+ {
+ is_on = MALI_FALSE;
+ }
+
+ return is_on;
+}
+
+MALI_DEBUG_CODE(
+static const char *state_as_string(enum mali_pm_level level)
+{
+ switch(level)
+ {
+ case MALI_PM_LEVEL_1_ON:
+ return "MALI_PM_LEVEL_1_ON";
+ case MALI_PM_LEVEL_2_STANDBY:
+ return "MALI_PM_LEVEL_2_STANDBY";
+ case MALI_PM_LEVEL_3_LIGHT_SLEEP:
+ return "MALI_PM_LEVEL_3_LIGHT_SLEEP";
+ case MALI_PM_LEVEL_4_DEEP_SLEEP:
+ return "MALI_PM_LEVEL_4_DEEP_SLEEP";
+ default:
+ return "UNKNOWN LEVEL";
+ }
+});
+
+/* This could be used from another thread (work queue), if we need that */
+static void mali_pm_process_next(void)
+{
+ enum mali_pm_level pm_level_to_set;
+
+ _mali_osk_lock_wait(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+
+ pm_level_to_set = current_level;
+
+ if (MALI_PM_SCHEME_DYNAMIC == current_scheme)
+ {
+ pm_level_to_set = next_level_dynamic;
+
+ MALI_DEBUG_PRINT(4, ("Mali PM: Dynamic scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(pm_level_to_set)));
+
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ /* Start timers according to new state, so we get STANDBY -> LIGHT_SLEEP -> DEEP_SLEEP */
+
+ if (MALI_TRUE == idle_timer_running)
+ {
+ /* There is an existing timeout, so delete it */
+ _mali_osk_timer_del(idle_timer);
+ idle_timer_running = MALI_FALSE;
+ }
+
+ /* Making sure that we turn on through the platform file
+ Since it was turned OFF directly through the platform file.
+ This might lead to double turn-on, but the plaform file supports that.*/
+ if ( current_level == MALI_PM_LEVEL_4_DEEP_SLEEP)
+ {
+ mali_pm_powerup();
+ mali_kernel_core_wakeup();
+
+ }
+ if (MALI_PM_LEVEL_1_ON == pm_level_to_set)
+ {
+ if (MALI_PM_LEVEL_2_STANDBY != current_level)
+ {
+ /* We only need to do anything if we came from one of the sleeping states */
+ mali_pm_powerup();
+
+ /* Wake up Mali cores since we came from a sleep state */
+ mali_kernel_core_wakeup();
+ }
+ }
+ else if (MALI_PM_LEVEL_2_STANDBY == pm_level_to_set)
+ {
+ /* This is just an internal state, so we don't bother to report it to the platform file */
+ idle_timer_running = MALI_TRUE;
+ _mali_osk_timer_setcallback(idle_timer, timeout_light_sleep, (void*) mali_pm_event_number_get());
+ _mali_osk_timer_add(idle_timer, _mali_osk_time_mstoticks(MALI_PM_LIGHT_SLEEP_TIMEOUT));
+ }
+ else if (MALI_PM_LEVEL_3_LIGHT_SLEEP == pm_level_to_set)
+ {
+ mali_pm_powerdown(MALI_POWER_MODE_LIGHT_SLEEP);
+ }
+ else if (MALI_PM_LEVEL_4_DEEP_SLEEP == pm_level_to_set)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_DEEP_SLEEP\n"));
+ mali_pm_powerdown(MALI_POWER_MODE_DEEP_SLEEP);
+ }
+ }
+ else if (MALI_PM_SCHEME_OS_SUSPENDED == current_scheme)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PM: OS scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(MALI_PM_LEVEL_4_DEEP_SLEEP)));
+
+ pm_level_to_set = MALI_PM_LEVEL_4_DEEP_SLEEP;
+
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ /* Cancel any timers */
+ if (MALI_TRUE == idle_timer_running)
+ {
+ /* There is an existing timeout, so delete it */
+ _mali_osk_timer_del(idle_timer);
+ idle_timer_running = MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_DEEP_SLEEP\n"));
+ mali_pm_powerdown(MALI_POWER_MODE_DEEP_SLEEP);
+ next_level_dynamic = current_level;
+ }
+ else if (MALI_PM_SCHEME_ALWAYS_ON == current_scheme)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PM: Always on scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(MALI_PM_LEVEL_1_ON)));
+
+ pm_level_to_set = MALI_PM_LEVEL_1_ON;
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_ON\n"));
+ mali_pm_powerup();
+ if (MALI_PM_LEVEL_2_STANDBY != current_level)
+ {
+ /* Wake up Mali cores since we came from a sleep state */
+ mali_kernel_core_wakeup();
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("MALI PM: Illegal scheme"));
+ }
+
+ current_level = pm_level_to_set;
+
+end_function:
+ _mali_osk_lock_signal(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_pm_always_on(mali_bool enable)
+{
+ if (MALI_TRUE == enable)
+ {
+ /* The event is processed in current thread synchronously */
+ mali_pm_event(MALI_PM_EVENT_SCHEME_ALWAYS_ON, MALI_FALSE, 0 );
+ }
+ else
+ {
+ /* The event is processed in current thread synchronously */
+ mali_pm_event(MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL, MALI_FALSE, 0 );
+ }
+}
+
+static _mali_osk_errcode_t mali_pm_upper_half(void *data)
+{
+ /* not used */
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_bottom_half(void *data)
+{
+ mali_pm_process_next();
+}
+
+static u32 mali_pm_event_number_get(void)
+{
+ u32 retval;
+
+ mali_pm_lock(); /* spinlock: mali_pm_lock_set_next_state */
+ retval = ++mali_pm_event_number;
+ if (0==retval ) retval = ++mali_pm_event_number;
+ mali_pm_unlock();
+
+ return retval;
+}
+
+static void mali_pm_event(enum mali_pm_event pm_event, mali_bool schedule_work, u32 timer_time )
+{
+ mali_pm_lock(); /* spinlock: mali_pm_lock_set_next_state */
+ /* Only timer events should set this variable, all other events must set it to zero. */
+ if ( 0 != timer_time )
+ {
+ MALI_DEBUG_ASSERT( (pm_event==MALI_PM_EVENT_TIMER_LIGHT_SLEEP) || (pm_event==MALI_PM_EVENT_TIMER_DEEP_SLEEP) );
+ if ( mali_pm_event_number != timer_time )
+ {
+ /* In this case there have been processed newer events since the timer event was set up.
+ If so we always ignore the timing event */
+ mali_pm_unlock();
+ return;
+ }
+ }
+ else
+ {
+ /* Delete possible ongoing timers
+ if ( (MALI_PM_LEVEL_2_STANDBY==current_level) || (MALI_PM_LEVEL_3_LIGHT_SLEEP==current_level) )
+ {
+ _mali_osk_timer_del(idle_timer);
+ }
+ */
+ }
+ mali_pm_event_number++;
+ switch (pm_event)
+ {
+ case MALI_PM_EVENT_CORES_WORKING:
+ next_level_dynamic = MALI_PM_LEVEL_1_ON;
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ break;
+ case MALI_PM_EVENT_CORES_IDLE:
+ next_level_dynamic = MALI_PM_LEVEL_2_STANDBY;
+ /*MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );*/
+ break;
+ case MALI_PM_EVENT_TIMER_LIGHT_SLEEP:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ next_level_dynamic = MALI_PM_LEVEL_3_LIGHT_SLEEP;
+ break;
+ case MALI_PM_EVENT_TIMER_DEEP_SLEEP:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ next_level_dynamic = MALI_PM_LEVEL_4_DEEP_SLEEP;
+ break;
+ case MALI_PM_EVENT_OS_SUSPEND:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ current_scheme = MALI_PM_SCHEME_OS_SUSPENDED;
+ next_level_dynamic = MALI_PM_LEVEL_4_DEEP_SLEEP; /* Dynamic scheme will go into level when we are resumed */
+ break;
+ case MALI_PM_EVENT_OS_RESUME:
+ MALI_DEBUG_ASSERT(MALI_PM_SCHEME_OS_SUSPENDED == current_scheme );
+ current_scheme = MALI_PM_SCHEME_DYNAMIC;
+ break;
+ case MALI_PM_EVENT_SCHEME_ALWAYS_ON:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ current_scheme = MALI_PM_SCHEME_ALWAYS_ON;
+ break;
+ case MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON == current_scheme || MALI_PM_SCHEME_DYNAMIC == current_scheme );
+ current_scheme = MALI_PM_SCHEME_DYNAMIC;
+ break;
+ default:
+ MALI_DEBUG_PRINT_ERROR(("Unknown next state."));
+ mali_pm_unlock();
+ return;
+ }
+ mali_pm_unlock();
+
+ if (MALI_TRUE == schedule_work)
+ {
+ _mali_osk_irq_schedulework(wq_irq);
+ }
+ else
+ {
+ mali_pm_process_next();
+ }
+}
+
+static void timeout_light_sleep(void* arg)
+{
+ /* State change only if no newer power events have happend from the time in arg.
+ Actual work will be scheduled on worker thread. */
+ mali_pm_event(MALI_PM_EVENT_TIMER_LIGHT_SLEEP, MALI_TRUE, (u32) arg);
+}
+
+void mali_pm_core_event(enum mali_core_event core_event)
+{
+ mali_bool transition_working = MALI_FALSE;
+ mali_bool transition_idle = MALI_FALSE;
+
+ _mali_osk_lock_wait(mali_pm_lock_set_core_states, _MALI_OSK_LOCKMODE_RW);
+
+ switch (core_event)
+ {
+ case MALI_CORE_EVENT_GP_START:
+ if (num_active_pps + num_active_gps == 0)
+ {
+ transition_working = MALI_TRUE;
+ }
+ num_active_gps++;
+ break;
+ case MALI_CORE_EVENT_GP_STOP:
+ if (num_active_pps + num_active_gps == 1)
+ {
+ transition_idle = MALI_TRUE;
+ }
+ num_active_gps--;
+ break;
+ case MALI_CORE_EVENT_PP_START:
+ if (num_active_pps + num_active_gps == 0)
+ {
+ transition_working = MALI_TRUE;
+ }
+ num_active_pps++;
+ break;
+ case MALI_CORE_EVENT_PP_STOP:
+ if (num_active_pps + num_active_gps == 1)
+ {
+ transition_idle = MALI_TRUE;
+ }
+ num_active_pps--;
+ break;
+ }
+
+ if (transition_working == MALI_TRUE)
+ {
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+ mali_utilization_core_start(_mali_osk_time_get_ns());
+#endif
+ mali_pm_event(MALI_PM_EVENT_CORES_WORKING, MALI_FALSE, 0); /* process event in same thread */
+ }
+ else if (transition_idle == MALI_TRUE)
+ {
+#ifdef CONFIG_MALI200_GPU_UTILIZATION
+ mali_utilization_core_end(_mali_osk_time_get_ns());
+#endif
+ mali_pm_event(MALI_PM_EVENT_CORES_IDLE, MALI_FALSE, 0); /* process event in same thread */
+ }
+
+ _mali_osk_lock_signal(mali_pm_lock_set_core_states, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_pm_os_suspend(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS suspending...\n"));
+
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_pm_event(MALI_PM_EVENT_OS_SUSPEND, MALI_FALSE, 0); /* process event in same thread */
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS suspend completed\n"));
+}
+
+void mali_pm_os_resume(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS resuming...\n"));
+
+ mali_pm_event(MALI_PM_EVENT_OS_RESUME, MALI_FALSE, 0); /* process event in same thread */
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS resume completed\n"));
+}
+
+void mali_pm_runtime_suspend(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS runtime suspended\n"));
+}
+
+void mali_pm_runtime_resume(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS runtime resumed\n"));
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_pm.h b/drivers/parrot/gpu/mali200/common/mali_pm.h
new file mode 100644
index 00000000000000..d4ccfde490bbc5
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pm.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+enum mali_core_event
+{
+ MALI_CORE_EVENT_GP_START,
+ MALI_CORE_EVENT_GP_STOP,
+ MALI_CORE_EVENT_PP_START,
+ MALI_CORE_EVENT_PP_STOP
+};
+
+enum mali_pm_event
+{
+ MALI_PM_EVENT_CORES_WORKING,
+ MALI_PM_EVENT_CORES_IDLE,
+ MALI_PM_EVENT_TIMER_LIGHT_SLEEP,
+ MALI_PM_EVENT_TIMER_DEEP_SLEEP,
+ MALI_PM_EVENT_OS_SUSPEND,
+ MALI_PM_EVENT_OS_RESUME,
+ MALI_PM_EVENT_SCHEME_ALWAYS_ON,
+ MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL,
+};
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+void mali_pm_always_on(mali_bool enable);
+
+void mali_pm_lock(void);
+void mali_pm_unlock(void);
+void mali_pm_execute_state_change_lock(void);
+
+void mali_pm_execute_state_change_unlock(void);
+
+mali_bool mali_pm_is_powered_on(void);
+
+void mali_pm_core_event(enum mali_core_event core_event);
+
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_pmu.c b/drivers/parrot/gpu/mali200/common/mali_pmu.c
new file mode 100644
index 00000000000000..348b5dc74dd0a3
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pmu.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core
+{
+ struct mali_hw_core hw_core;
+ u32 mali_registered_cores_power_mask;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x10, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ struct mali_pmu_core* pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ if (NULL != pmu)
+ {
+ pmu->mali_registered_cores_power_mask = mali_pmu_detect_mask(number_of_pp_cores, number_of_l2_caches);
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_pmu_reset(pmu))
+ {
+ mali_global_pmu_core = pmu;
+ return pmu;
+ }
+ mali_hw_core_delete(&pmu->hw_core);
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+ pmu = NULL;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ /* Don't use interrupts - just poll status */
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 );
+ MALI_DEBUG_PRINT( 4, ("Mali PMU: power down (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, pmu->mali_registered_cores_power_mask);
+
+ /* Wait for cores to be powered down (100 x 100us = 100ms) */
+ timeout = 100;
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->mali_registered_cores_power_mask;
+ if( stat == pmu->mali_registered_cores_power_mask ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(100);
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 )
+ {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 ); /* Shouldn't be zero */
+ MALI_DEBUG_PRINT( 4, ("Mali PMU: power up (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, pmu->mali_registered_cores_power_mask);
+
+ /* Wait for cores to be powered up (100 x 100us = 100ms) */
+ timeout = 100;
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core,PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->mali_registered_cores_power_mask;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(100);
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 )
+ {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ u32 mask = 0;
+
+ if (number_of_l2_caches == 1)
+ {
+ /* Mali-300 or Mali-400 */
+ u32 i;
+
+ /* GP */
+ mask = 0x01;
+
+ /* L2 cache */
+ mask |= 0x01<<1;
+
+ /* Set bit for each PP core */
+ for (i = 0; i < number_of_pp_cores; i++)
+ {
+ mask |= 0x01<<(i+2);
+ }
+ }
+ else if (number_of_l2_caches > 1)
+ {
+ /* Mali-450 */
+
+ /* GP (including its L2 cache) */
+ mask = 0x01;
+
+ /* There is always at least one PP (including its L2 cache) */
+ mask |= 0x01<<1;
+
+ /* Additional PP cores in same L2 cache */
+ if (number_of_pp_cores >= 2)
+ {
+ mask |= 0x01<<2;
+ }
+
+ /* Additional PP cores in a third L2 cache */
+ if (number_of_pp_cores >= 5)
+ {
+ mask |= 0x01<<3;
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power mask is 0x%08X (%u + %u)\n", mask, number_of_pp_cores, number_of_l2_caches));
+
+ return mask;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_pmu.h b/drivers/parrot/gpu/mali200/common/mali_pmu.h
new file mode 100644
index 00000000000000..fd10c080cbdcf8
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pmu.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu);
+
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp.c b/drivers/parrot/gpu/mali200/common/mali_pp.c
new file mode 100644
index 00000000000000..2b2846bed31b02
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/* See mali_gp.c file for description on how to handle the interrupt mask.
+ * This is how to do it on PP: mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ */
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 8
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent group for this core */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ struct mali_pp_job *running_job; /**< Current running (super) job */
+ u32 running_sub_job; /**< Current running sub job */
+ _mali_osk_timer_t *timeout_timer; /**< timeout timer for this core */
+ u32 timeout_job_id; /**< job id for the timed out job - relevant only if pp_core_timed_out == MALI_TRUE */
+ mali_bool core_timed_out; /**< if MALI_TRUE, this pp core has timed out; if MALI_FALSE, no timeout on this pp core */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+static struct mali_pp_core* mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES];
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static _mali_osk_errcode_t mali_pp_upper_half(void *data);
+static void mali_pp_bottom_half(void *data);
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+static void mali_pp_post_process_job(struct mali_pp_core *core);
+static void mali_pp_timeout(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_pp_core* core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+ if (NULL != core)
+ {
+ core->group = group;
+ core->core_id = mali_global_num_pp_cores;
+ core->running_job = NULL;
+ core->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK))
+ {
+ _mali_osk_errcode_t ret;
+
+ mali_group_lock(group);
+ ret = mali_pp_reset(core);
+ mali_group_unlock(group);
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_pp_upper_half,
+ mali_pp_bottom_half,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ "mali_pp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ /* Initialise the timeout timer */
+ core->timeout_timer = _mali_osk_timer_init();
+ if(NULL != core->timeout_timer)
+ {
+ _mali_osk_timer_setcallback(core->timeout_timer, mali_pp_timeout, (void *)core);
+
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup timeout timer for PP core %s\n", core->hw_core.description));
+ /* Release IRQ handlers */
+ _mali_osk_irq_term(core->irq);
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_timer_term(core->timeout_timer);
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++)
+ {
+ if (mali_global_pp_cores[i] == core)
+ {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const int reset_finished_loop_count = 15;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_pp_post_process_job(core); /* @@@@ is there some cases where it is unsafe to post process the job here? */
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_pp_post_process_job(core); /* @@@@ is there some cases where it is unsafe to post process the job here? */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ /* On Mali-200, stop the bus, then do a hard reset of the core */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT ;
+ }
+
+ /* the bus was stopped OK, do the hard reset */
+ mali_pp_hard_reset(core);
+
+#elif defined(USING_MALI400)
+
+ /* Mali-300 and Mali-400 have a safe reset command which we use */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali PP: Failed to reset core %s, Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+#error "no supported mali core defined"
+#endif
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
+{
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ core->counter_src0_used = core->counter_src0;
+ core->counter_src1_used = core->counter_src1;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, frame_registers, MALI200_NUM_REGS_FRAME);
+ if (0 != sub_job)
+ {
+ /*
+ * There are two frame registers which are different for each sub job.
+ * For the first sub job, these are correctly represented in the frame register array,
+ * but we need to patch these for all other sub jobs
+ */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job));
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job));
+ }
+
+ if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ /* This selects which performance counters we are reading */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ /* global_config has enabled HW counters, this will override anything specified by user space */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ else
+ {
+ /* Use HW counters from job object, if any */
+ u32 perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+ if (0 != perf_counter_flag)
+ {
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+ core->timeout_job_id = mali_pp_job_get_id(job);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job->frame_builder_id, job->flush_id, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), job->pid, job->tid, 0, 0, 0);
+#endif
+
+ core->running_job = job;
+ core->running_sub_job = sub_job;
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+mali_bool mali_pp_core_set_counter_src0(struct mali_pp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src0 = counter;
+ return MALI_TRUE;
+}
+
+mali_bool mali_pp_core_set_counter_src1(struct mali_pp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src1 = counter;
+ return MALI_TRUE;
+}
+
+u32 mali_pp_core_get_counter_src0(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src0;
+}
+
+u32 mali_pp_core_get_counter_src1(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src1;
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_PP_CORES > index)
+ {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+u32 mali_pp_get_max_num_pp_cores(void)
+{
+ return MALI_MAX_NUMBER_OF_PP_CORES;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static _mali_osk_errcode_t mali_pp_upper_half(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_irq_schedulework(core->irq);
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_pp_bottom_half(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+#endif
+#endif
+
+ mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */
+
+ if ( MALI_FALSE == mali_group_power_is_on(core->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
+ mali_group_unlock(core->group);
+ return;
+ }
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME)
+ {
+ mali_pp_post_process_job(core);
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); /* Will release group lock */
+ return;
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_OF_FRAME and HANG interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
+ if (0 != irq_errors)
+ {
+ mali_pp_post_process_job(core);
+ MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n",
+ irq_readout, core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); /* Will release group lock */
+ return;
+ }
+ else if (MALI_TRUE == core->core_timed_out) /* SW timeout */
+ {
+ if (core->timeout_job_id == mali_pp_job_get_id(core->running_job))
+ {
+ mali_pp_post_process_job(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n",
+ mali_pp_job_get_id(core->running_job), core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); /* Will release group lock */
+ }
+ else
+ {
+ mali_group_unlock(core->group);
+ }
+ core->core_timed_out = MALI_FALSE;
+ return;
+ }
+ else if (irq_readout & MALI200_REG_VAL_IRQ_HANG)
+ {
+ /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+ }
+
+ /*
+ * The only way to get here is if we got a HANG interrupt, which we ignore.
+ * Re-enable interrupts and let core continue to run
+ */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_group_unlock(core->group);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+#endif
+#endif
+}
+
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); /* @@@@ This should not be needed */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/* ------ local helper functions below --------- */
+static void mali_pp_post_process_job(struct mali_pp_core *core)
+{
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (NULL != core->running_job)
+ {
+ u32 val0 = 0;
+ u32 val1 = 0;
+#if MALI_TIMELINE_PROFILING_ENABLED
+ int counter_index = COUNTER_FP0_C0 + (2 * core->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ if (mali_pp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_pp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, val0);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ if (mali_pp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_pp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, val1);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id),
+ val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0);
+#endif
+
+ /* We are no longer running a job... */
+ core->running_job = NULL;
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+}
+
+/* callback function for pp core timeout */
+static void mali_pp_timeout(void *data)
+{
+ struct mali_pp_core * core = ((struct mali_pp_core *)data);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: TIMEOUT callback \n"));
+ core->core_timed_out = MALI_TRUE;
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp.h b/drivers/parrot/gpu/mali200/common/mali_pp.h
new file mode 100644
index 00000000000000..9b425a08d36f02
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+
+struct mali_pp_core;
+struct mali_group;
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+u32 mali_pp_core_get_id(struct mali_pp_core *core);
+
+mali_bool mali_pp_core_set_counter_src0(struct mali_pp_core *core, u32 counter);
+mali_bool mali_pp_core_set_counter_src1(struct mali_pp_core *core, u32 counter);
+u32 mali_pp_core_get_counter_src0(struct mali_pp_core *core);
+u32 mali_pp_core_get_counter_src1(struct mali_pp_core *core);
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+u32 mali_pp_get_max_num_pp_cores(void);
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp_job.c b/drivers/parrot/gpu/mali200/common/mali_pp_job.c
new file mode 100644
index 00000000000000..1efcfdaed9044d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp_job.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *args, u32 id)
+{
+ struct mali_pp_job *job;
+
+ if (args->num_cores > _MALI_PP_MAX_SUB_JOBS)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ return NULL;
+ }
+
+ job = _mali_osk_malloc(sizeof(struct mali_pp_job));
+ if (NULL != job)
+ {
+ u32 i;
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->user_id = args->user_job_ptr;
+ _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers));
+ _mali_osk_memcpy(job->frame_registers_addr_frame, args->frame_registers_addr_frame, sizeof(job->frame_registers_addr_frame));
+ _mali_osk_memcpy(job->frame_registers_addr_stack, args->frame_registers_addr_stack, sizeof(job->frame_registers_addr_stack));
+
+ /* Only copy write back registers for the units that are enabled */
+ job->wb0_registers[0] = 0;
+ job->wb1_registers[0] = 0;
+ job->wb2_registers[0] = 0;
+ if (args->wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb0_registers, args->wb0_registers, sizeof(job->wb0_registers));
+ }
+ if (args->wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb1_registers, args->wb1_registers, sizeof(job->wb1_registers));
+ }
+ if (args->wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb2_registers, args->wb2_registers, sizeof(job->wb2_registers));
+ }
+
+ job->perf_counter_flag = args->perf_counter_flag;
+ job->perf_counter_src0 = args->perf_counter_src0;
+ job->perf_counter_src1 = args->perf_counter_src1;
+ for (i = 0; i < args->num_cores; i++)
+ {
+ job->perf_counter_value0[i] = 0;
+ job->perf_counter_value1[i] = 0;
+ }
+ job->sub_job_count = args->num_cores;
+ job->sub_jobs_started = 0;
+ job->sub_jobs_completed = 0;
+ job->sub_job_errors = 0;
+
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+ job->frame_builder_id = args->frame_builder_id;
+ job->flush_id = args->flush_id;
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ _mali_osk_free(job);
+}
+
+_mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ if ((0 == job->frame_registers[0]) || (0 == job->frame_registers[1]))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp_job.h b/drivers/parrot/gpu/mali200/common/mali_pp_job.h
new file mode 100644
index 00000000000000..7fe87f85d3c07a
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp_job.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+
+/**
+ * The structure represends a PP job, including all sub-jobs
+ * (This struct unfortunatly needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 id; /**< identifier for this job in kernel space (sequencial numbering) */
+ u32 user_id; /**< identifier for the job in user space */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< core specific registers associated with this job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 0 registers */
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 1 registers */
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 2 registers */
+ u32 perf_counter_flag; /**< bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< Source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< Source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+ u32 sub_job_count; /**< Total number of sub-jobs in this superjob */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+ u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 frame_builder_id; /**< id of the originating frame builder */
+ u32 flush_id; /**< flush id within the originating frame builder */
+};
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *args, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+_mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job);
+
+/******************************************************
+ * simple utility functions for dealing with pp jobs:
+ *****************************************************/
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ return job->user_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ return job->frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ return job->flush_id;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ return job->frame_registers;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ if (sub_job == 0)
+ {
+ return job->frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ if (sub_job == 0)
+ {
+ return job->frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ return job->wb0_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ return job->wb1_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ return job->wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ job->wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ job->wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ job->wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_started < job->sub_job_count) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs is started. */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_currently_rendering_and_if_so_abort_new_starts(struct mali_pp_job *job)
+{
+ /* All can not be started, since then it would not be in the job queue */
+ MALI_DEBUG_ASSERT( job->sub_jobs_started != job->sub_job_count );
+
+ /* If at least one job is started */
+ if ( (job->sub_jobs_started > 0) )
+ {
+ /* If at least one job is currently being rendered, and thus assigned to a group and core */
+ if (job->sub_jobs_started > job->sub_jobs_completed )
+ {
+ u32 jobs_remaining = job->sub_job_count - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+ job->sub_job_errors += jobs_remaining;
+ /* Returning TRUE indicating that we can not delete this job which is being redered */
+ return MALI_TRUE;
+ }
+ }
+ /* The job is not being rendered to at the moment and can then safely be deleted */
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ return (job->sub_job_count == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ return job->sub_job_count;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ job->sub_jobs_completed++;
+ if ( MALI_FALSE == success )
+ {
+ job->sub_job_errors++;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ if ( 0 == job->sub_job_errors )
+ {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ return job->perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job)
+{
+ return job->perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job)
+{
+ return job->perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value1[sub_job] = value;
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.c b/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.c
new file mode 100644
index 00000000000000..8cdbcff3a35403
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_cluster.h"
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 8
+
+static mali_bool mali_pp_scheduler_is_suspended(void);
+
+enum mali_pp_slot_state
+{
+ MALI_PP_SLOT_STATE_IDLE,
+ MALI_PP_SLOT_STATE_WORKING,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_pp_slot
+{
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_pp_slot_state state;
+};
+
+static u32 pp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
+static struct mali_pp_slot slots[MALI_MAX_NUMBER_OF_PP_GROUPS];
+static u32 num_slots = 0;
+static u32 num_slots_idle = 0;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static _mali_osk_lock_t *pp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+MALI_DEBUG_CODE(static u32 pp_scheduler_lock_owner = 0);
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+ u32 i;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+ pp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == pp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == pp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Find all the available PP cores */
+ for (i = 0; i < mali_cluster_get_glob_num_clusters(); i++)
+ {
+ u32 group_id = 0;
+ struct mali_cluster *curr_cluster = mali_cluster_get_global_cluster(i);
+ struct mali_group *group = mali_cluster_get_group(curr_cluster, group_id);
+ while (NULL != group)
+ {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ if (0 == pp_version)
+ {
+ /* Retrieve PP version from first avaiable PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+ slots[num_slots].group = group;
+ slots[num_slots].state = MALI_PP_SLOT_STATE_IDLE;
+ num_slots++;
+ num_slots_idle++;
+ }
+ group_id++;
+ group = mali_cluster_get_group(curr_cluster, group_id);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n"));
+ MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock\n"));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = 0);
+ _mali_osk_lock_signal(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_pp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+}
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() mali_pp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED()
+#endif
+
+static void mali_pp_scheduler_schedule(void)
+{
+ u32 i;
+ struct mali_pp_job *job;
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ struct mali_session_data * session;
+#endif
+
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (0 < pause_count || 0 == num_slots_idle || _mali_osk_list_empty(&job_queue))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, num_slots_idle));
+ return; /* Nothing to do, so early out */
+ }
+
+
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP
+ if ( num_slots_idle < num_slots )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job not started, since only %d/%d cores are available\n", num_slots_idle,num_slots));
+ return;
+ }
+#endif
+
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ /* Finding initial session for the PP cores */
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+ session = job->session;
+ if ( num_slots != num_slots_idle )
+ {
+ for (i = 0; (i < num_slots) ; i++)
+ {
+ if ( slots[i].state == MALI_PP_SLOT_STATE_IDLE )
+ {
+ continue;
+ }
+ session = mali_group_get_session(slots[i].group);
+ break;
+ }
+ }
+#endif
+
+ for (i = 0; (i < num_slots) && (0 < num_slots_idle); i++)
+ {
+ u32 sub_job;
+
+ if (_mali_osk_list_empty(&job_queue)) /* move this check down to where we know we have started all sub jobs for this job??? */
+ {
+ break; /* No more jobs to schedule, so early out */
+ }
+
+ if (MALI_PP_SLOT_STATE_IDLE != slots[i].state)
+ {
+ continue;
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job)); /* All jobs on the job_queue should have unstarted sub jobs */
+
+ #if MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED
+ if ( (0==job->sub_jobs_started) && (num_slots_idle < num_slots) && (job->sub_job_count > num_slots_idle))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job with %d subjobs not started, since only %d/%d cores are available\n", job->sub_job_count, num_slots_idle,num_slots));
+ return;
+ }
+ #endif
+
+ #if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ if ( job->session != session )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job not started since existing job is from another application\n"));
+ return;
+ }
+ #endif
+
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Starting job %u (0x%08X) part %u/%u\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+ if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(slots[i].group, job, sub_job))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job %u (0x%08X) part %u/%u started\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+
+ /* Mark this sub job as started */
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Mark slot as busy */
+ slots[i].state = MALI_PP_SLOT_STATE_WORKING;
+ num_slots_idle--;
+
+ if (!mali_pp_job_has_unstarted_sub_jobs(job))
+ {
+ /*
+ * All sub jobs have now started for this job, remove this job from the job queue.
+ * The job will now only be referred to by the slots which are running it.
+ * The last slot to complete will make sure it is returned to user space.
+ */
+ _mali_osk_list_del(&job->list);
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP
+ MALI_DEBUG_PRINT(6, ("Mali PP scheduler: Skip scheduling more jobs when MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP is set.\n"));
+ return;
+#endif
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Failed to start PP job\n"));
+ return;
+ }
+ }
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job)
+{
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL != notobj)
+ {
+ u32 i;
+ u32 sub_jobs = mali_pp_job_get_sub_job_count(job);
+ mali_bool success = mali_pp_job_was_success(job);
+
+ _mali_uk_pp_job_finished_s *jobres = notobj->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ for (i = 0; i < sub_jobs; i++)
+ {
+ jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+ }
+
+ mali_session_send_notification(mali_pp_job_get_session(job), notobj);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP scheduler: Unable to allocate notification object\n"));
+ }
+
+ mali_pp_job_delete(job);
+}
+
+void mali_pp_scheduler_do_schedule(void)
+{
+ mali_pp_scheduler_lock();
+
+ mali_pp_scheduler_schedule();
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success)
+{
+ u32 i;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) part %u/%u completed (%s)\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job), success ? "success" : "failure"));
+
+ mali_pp_scheduler_lock();
+
+ /* Find slot which was running this job */
+ for (i = 0; i < num_slots; i++)
+ {
+ if (slots[i].group == group)
+ {
+ MALI_DEBUG_ASSERT(MALI_PP_SLOT_STATE_WORKING == slots[i].state);
+ slots[i].state = MALI_PP_SLOT_STATE_IDLE;
+ num_slots_idle++;
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+ }
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ /* Wake up sleeping workers. Their wake-up condition is that
+ * num_slots == num_slots_idle, so unless we are done working, no
+ * threads will actually be woken up.
+ */
+ _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+ }
+ else
+ {
+ mali_pp_scheduler_schedule();
+ }
+
+ job_is_done = mali_pp_job_is_complete(job);
+
+ mali_pp_scheduler_unlock();
+
+ if (job_is_done)
+ {
+ /* Send notification back to user space */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for job %u (0x%08X)\n", mali_pp_job_get_id(job), job));
+ mali_pp_scheduler_return_job_to_user(job);
+ }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_pp_scheduler_unlock();
+
+ /*mali_pp_scheduler_working_lock();*/
+ /* We have now aquired the working lock, which means that we have successfully paused the scheduler */
+ /*mali_pp_scheduler_working_unlock();*/
+
+ /* go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+ * mali_pp_scheduler_suspended() function will be called. This will return true
+ * iff state is idle and pause_count > 0, so if the core is active this
+ * will not do anything.
+ */
+ _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ if (0 == pause_count)
+ {
+ mali_pp_scheduler_schedule();
+ }
+ mali_pp_scheduler_unlock();
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(_mali_uk_pp_start_job_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data*)args->ctx;
+
+ job = mali_pp_job_create(session, args, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job))
+ {
+ /* Not a valid job, return to user immediately */
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ mali_pp_scheduler_lock();
+
+ _mali_osk_list_addtail(&job->list, &job_queue);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) with %u parts queued\n", mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+ mali_pp_scheduler_schedule();
+
+ mali_pp_scheduler_unlock();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->number_of_cores = num_slots;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data*)args->ctx;
+
+ mali_pp_scheduler_lock();
+
+ /* Check queue for jobs that match */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (mali_pp_job_get_session(job) == session &&
+ mali_pp_job_get_frame_builder_id(job) == (u32)args->fb_id &&
+ mali_pp_job_get_flush_id(job) == (u32)args->flush_id)
+ {
+ if (args->wbx & _MALI_UK_PP_JOB_WB0)
+ {
+ mali_pp_job_disable_wb0(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB1)
+ {
+ mali_pp_job_disable_wb1(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB2)
+ {
+ mali_pp_job_disable_wb2(job);
+ }
+ break;
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_pp_job *job, *tmp;
+ int i;
+
+ mali_pp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ /* Check queue for jobs and remove */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (mali_pp_job_get_session(job) == session)
+ {
+ _mali_osk_list_del(&(job->list));
+
+ if ( mali_pp_job_is_currently_rendering_and_if_so_abort_new_starts(job) )
+ {
+ /* The job is in the render pipeline, we can not delete it yet. */
+ /* It will be deleted in the mali_group_abort_session() call below */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Keeping partially started PP job 0x%08x in queue\n", job));
+ continue;
+ }
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Removing PP job 0x%08x from queue\n", job));
+ mali_pp_job_delete(job);
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+
+ /* Abort running jobs from this session */
+ for (i = 0; i < num_slots; i++)
+ {
+ struct mali_group *group = slots[i].group;
+
+ MALI_DEBUG_PRINT(5, ("PP sched abort: Looking at group 0x%08x\n", group));
+
+ if (MALI_PP_SLOT_STATE_WORKING == slots[i].state)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Aborting session 0x%08x from group 0x%08x\n", session, group));
+
+ mali_group_abort_session(group, session);
+ }
+ }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_pp_scheduler_lock();
+ ret = pause_count > 0 && num_slots == num_slots_idle;
+ mali_pp_scheduler_unlock();
+
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ int i;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ for (i = 0; i < num_slots; i++)
+ {
+ n += mali_group_dump_state(slots[i].group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\t\tState: %d\n", slots[i].state);
+ }
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.h b/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.h
new file mode 100644
index 00000000000000..48eb3bd63dde5d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_pp_scheduler.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_pp_job.h"
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+void mali_pp_scheduler_do_schedule(void);
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/** @brief Abort all PP jobs from session running or queued
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the queue and jobs
+ * currently running on a core will be aborted.
+ *
+ * @param session Pointer to session whose jobs should be aborted
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_scheduler.c b/drivers/parrot/gpu/mali200/common/mali_scheduler.c
new file mode 100644
index 00000000000000..f36020937e9ec9
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_scheduler.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static _mali_osk_atomic_t mali_job_autonumber;
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0))
+ {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+ _mali_osk_atomic_term(&mali_job_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+ u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
+ return job_id;
+}
+
diff --git a/drivers/parrot/gpu/mali200/common/mali_scheduler.h b/drivers/parrot/gpu/mali200/common/mali_scheduler.h
new file mode 100644
index 00000000000000..74f0947b5516e7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_scheduler.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_session.c b/drivers/parrot/gpu/mali200/common/mali_session.c
new file mode 100644
index 00000000000000..2394bb90889e71
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_session.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+
+_mali_osk_lock_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+ mali_sessions_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+ if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ _mali_osk_lock_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_unlock();
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_session.h b/drivers/parrot/gpu/mali200/common/mali_session.h
new file mode 100644
index 00000000000000..c8640b5f9dea55
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_session.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_session_data
+{
+ _mali_osk_notification_queue_t * ioctl_queue;
+
+ _mali_osk_lock_t *memory_lock; /**< Lock protecting the vm manipulation */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_lock_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_lock_wait(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_lock_signal(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_ukk.h b/drivers/parrot/gpu/mali200/common/mali_ukk.h
new file mode 100644
index 00000000000000..9aab792deb0fd0
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_ukk.h
@@ -0,0 +1,661 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Returns the size of the buffer needed for a _mali_ukk_get_system_info call
+ *
+ * This function must be called before a call is made to
+ * _mali_ukk_get_system_info, so that memory of the correct size can be
+ * allocated, and a pointer to this memory written into the system_info member
+ * of _mali_uk_get_system_info_s.
+ *
+ * @param args see _mali_uk_get_system_info_size_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info_size( _mali_uk_get_system_info_size_s *args );
+
+/** @brief Returns information about the system (cores and memory banks)
+ *
+ * A buffer for this needs to be allocated by the caller. The size of the buffer required is returned by
+ * _mali_ukk_get_system_info_size(). The user is responsible for freeing the buffer.
+ *
+ * The _mali_system_info structure will be written to the start of this buffer,
+ * and the core_info and mem_info lists will be written to locations inside
+ * the buffer, and will be suitably aligned.
+ *
+ * Under OS implementations of the U/K interface we need to pack/unpack
+ * pointers across the user/kernel boundary. This has required that we malloc()
+ * an intermediate buffer inside the kernel-side U/K interface, and free it
+ * before returning to user-side. To avoid modifying common code, we do the
+ * following pseudo-code, which we shall call 'pointer switching':
+ *
+ * @code
+ * {
+ * Copy_From_User(kargs, args, ... );
+ * void __user * local_ptr = kargs->system_info;
+ * kargs->system_info = _mali_osk_malloc( ... );
+ * _mali_ukk_get_system_info( kargs );
+ * Copy_To_User( local_ptr, kargs->system_info, ... );
+ * _mali_osk_free( kargs->system_info );
+ * }
+ * @endcode
+ * @note The user-side's args->system_info members was unmodified here.
+ *
+ * However, the current implementation requires an extra ukk_private word so that the common code can work out
+ * how to patch pointers to user-mode for an OS's U/K implementation, this should be set to the user-space
+ * destination address for pointer-patching to occur. When NULL, it is unused, an no pointer-patching occurs in the
+ * common code.
+ *
+ * @param args see _mali_uk_get_system_info_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args );
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/parrot/gpu/mali200/common/mali_user_settings_db.c b/drivers/parrot/gpu/mali200/common/mali_user_settings_db.c
new file mode 100644
index 00000000000000..681c2b0d285121
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_user_settings_db.c
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ struct mali_session_data *session, *tmp;
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link)
+ {
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, sizeof(_mali_uk_settings_changed_s));
+ _mali_uk_settings_changed_s *data = notobj->result_buffer;
+ data->setting = setting;
+ data->value = value;
+
+ mali_session_send_notification(session, notobj);
+ }
+ mali_session_unlock();
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+ if (mali_user_settings[setting] != value)
+ {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify)
+ {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (0 <= setting && _MALI_UK_USER_SETTING_MAX > setting)
+ {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, (sizeof(u32) * _MALI_UK_USER_SETTING_MAX));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali200/common/mali_user_settings_db.h b/drivers/parrot/gpu/mali200/common/mali_user_settings_db.h
new file mode 100644
index 00000000000000..fbb9415729c64c
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/common/mali_user_settings_db.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard.h b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard.h
new file mode 100644
index 00000000000000..7c78947e70d3ae
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ */
+int mali_pmu_powerdown(void);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ */
+int mali_pmu_powerup(void);
+
+#endif /* __MALI_UTGARD_H__ */
+
diff --git a/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_counters.h b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_counters.h
new file mode 100644
index 00000000000000..40822f739722ac
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_counters.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_COUNTERS_H_
+#define _MALI_UTGARD_COUNTERS_H_
+
+typedef struct
+{
+ void *unused;
+} mali_cinstr_counter_info;
+
+typedef enum
+{
+ MALI_CINSTR_COUNTER_SOURCE_EGL = 0,
+ MALI_CINSTR_COUNTER_SOURCE_OPENGLES = 1000,
+ MALI_CINSTR_COUNTER_SOURCE_OPENVG = 2000,
+ MALI_CINSTR_COUNTER_SOURCE_GP = 3000,
+ MALI_CINSTR_COUNTER_SOURCE_PP = 4000,
+} cinstr_counter_source;
+
+#define MALI_CINSTR_EGL_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_EGL
+#define MALI_CINSTR_EGL_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_EGL + 999)
+
+#define MALI_CINSTR_GLES_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENGLES
+#define MALI_CINSTR_GLES_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 999)
+
+#define MALI_CINSTR_VG_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENVG
+#define MALI_CINSTR_VG_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENVG + 999)
+
+#define MALI_CINSTR_GP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_GP
+#define MALI_CINSTR_GP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_GP + 999)
+
+#define MALI_CINSTR_PP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_PP
+#define MALI_CINSTR_PP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_PP + 999)
+
+
+typedef enum
+{
+ /* EGL counters */
+
+ MALI_CINSTR_EGL_BLIT_TIME = MALI_CINSTR_COUNTER_SOURCE_EGL + 0,
+
+ /* Last counter in the EGL set */
+ MALI_CINSTR_EGL_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_EGL + 1,
+
+ /* GLES counters */
+
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 0,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_INDICES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 1,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 2,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 3,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 4,
+ MALI_CINSTR_GLES_DRAW_POINTS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 5,
+ MALI_CINSTR_GLES_DRAW_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 6,
+ MALI_CINSTR_GLES_DRAW_LINE_LOOP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 7,
+ MALI_CINSTR_GLES_DRAW_LINE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 8,
+ MALI_CINSTR_GLES_DRAW_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 9,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 10,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_FAN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 11,
+ MALI_CINSTR_GLES_NON_VBO_DATA_COPY_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 12,
+ MALI_CINSTR_GLES_UNIFORM_BYTES_COPIED_TO_MALI = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 13,
+ MALI_CINSTR_GLES_UPLOAD_TEXTURE_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 14,
+ MALI_CINSTR_GLES_UPLOAD_VBO_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 15,
+ MALI_CINSTR_GLES_NUM_FLUSHES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 16,
+ MALI_CINSTR_GLES_NUM_VSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 17,
+ MALI_CINSTR_GLES_NUM_FSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 18,
+ MALI_CINSTR_GLES_VSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 19,
+ MALI_CINSTR_GLES_FSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 20,
+ MALI_CINSTR_GLES_INPUT_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 21,
+ MALI_CINSTR_GLES_VXCACHE_HIT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 22,
+ MALI_CINSTR_GLES_VXCACHE_MISS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 23,
+ MALI_CINSTR_GLES_VXCACHE_COLLISION = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 24,
+ MALI_CINSTR_GLES_CULLED_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 25,
+ MALI_CINSTR_GLES_CULLED_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 26,
+ MALI_CINSTR_GLES_BACKFACE_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 27,
+ MALI_CINSTR_GLES_GBCLIP_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 28,
+ MALI_CINSTR_GLES_GBCLIP_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 29,
+ MALI_CINSTR_GLES_TRIANGLES_DRAWN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 30,
+ MALI_CINSTR_GLES_DRAWCALL_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 31,
+ MALI_CINSTR_GLES_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 32,
+ MALI_CINSTR_GLES_INDEPENDENT_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 33,
+ MALI_CINSTR_GLES_STRIP_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 34,
+ MALI_CINSTR_GLES_FAN_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 35,
+ MALI_CINSTR_GLES_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 36,
+ MALI_CINSTR_GLES_INDEPENDENT_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 37,
+ MALI_CINSTR_GLES_STRIP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 38,
+ MALI_CINSTR_GLES_LOOP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 39,
+ MALI_CINSTR_GLES_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 40,
+
+ /* Last counter in the GLES set */
+ MALI_CINSTR_GLES_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 41,
+
+ /* OpenVG counters */
+
+ MALI_CINSTR_VG_MASK_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 0,
+ MALI_CINSTR_VG_CLEAR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 1,
+ MALI_CINSTR_VG_APPEND_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 2,
+ MALI_CINSTR_VG_APPEND_PATH_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 3,
+ MALI_CINSTR_VG_MODIFY_PATH_COORDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 4,
+ MALI_CINSTR_VG_TRANSFORM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 5,
+ MALI_CINSTR_VG_INTERPOLATE_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 6,
+ MALI_CINSTR_VG_PATH_LENGTH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 7,
+ MALI_CINSTR_VG_POINT_ALONG_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 8,
+ MALI_CINSTR_VG_PATH_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 9,
+ MALI_CINSTR_VG_PATH_TRANSFORMED_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 10,
+ MALI_CINSTR_VG_DRAW_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 11,
+ MALI_CINSTR_VG_CLEAR_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 12,
+ MALI_CINSTR_VG_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 13,
+ MALI_CINSTR_VG_GET_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 14,
+ MALI_CINSTR_VG_COPY_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 15,
+ MALI_CINSTR_VG_DRAW_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 16,
+ MALI_CINSTR_VG_SET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 17,
+ MALI_CINSTR_VG_WRITE_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 18,
+ MALI_CINSTR_VG_GET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 19,
+ MALI_CINSTR_VG_READ_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 20,
+ MALI_CINSTR_VG_COPY_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 21,
+ MALI_CINSTR_VG_COLOR_MATRIX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 22,
+ MALI_CINSTR_VG_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 23,
+ MALI_CINSTR_VG_SEPARABLE_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 24,
+ MALI_CINSTR_VG_GAUSSIAN_BLUR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 25,
+ MALI_CINSTR_VG_LOOKUP_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 26,
+ MALI_CINSTR_VG_LOOKUP_SINGLE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 27,
+ MALI_CINSTR_VG_CONTEXT_CREATE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 28,
+ MALI_CINSTR_VG_STROKED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 29,
+ MALI_CINSTR_VG_STROKED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 30,
+ MALI_CINSTR_VG_STROKED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 31,
+ MALI_CINSTR_VG_STROKED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 32,
+ MALI_CINSTR_VG_FILLED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 33,
+ MALI_CINSTR_VG_FILLED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 34,
+ MALI_CINSTR_VG_FILLED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 35,
+ MALI_CINSTR_VG_FILLED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 36,
+ MALI_CINSTR_VG_DRAW_PATH_CALLS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 37,
+ MALI_CINSTR_VG_TRIANGLES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 38,
+ MALI_CINSTR_VG_VERTICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 39,
+ MALI_CINSTR_VG_INDICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 40,
+ MALI_CINSTR_VG_FILLED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 41,
+ MALI_CINSTR_VG_STROKED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 42,
+ MALI_CINSTR_VG_FILL_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 43,
+ MALI_CINSTR_VG_DRAW_FILLED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 44,
+ MALI_CINSTR_VG_STROKE_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 45,
+ MALI_CINSTR_VG_DRAW_STROKED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 46,
+ MALI_CINSTR_VG_DRAW_PAINT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 47,
+ MALI_CINSTR_VG_DATA_STRUCTURES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 48,
+ MALI_CINSTR_VG_MEM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 49,
+ MALI_CINSTR_VG_RSW_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 50,
+
+ /* Last counter in the VG set */
+ MALI_CINSTR_VG_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 51,
+
+ /* Mali GP counters */
+
+ MALI_CINSTR_GP_DEPRECATED_0 = MALI_CINSTR_COUNTER_SOURCE_GP + 0,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_GP = MALI_CINSTR_COUNTER_SOURCE_GP + 1,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 2,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_STORER = MALI_CINSTR_COUNTER_SOURCE_GP + 3,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_LOADER = MALI_CINSTR_COUNTER_SOURCE_GP + 4,
+ MALI_CINSTR_GP_CYCLES_VERTEX_LOADER_WAITING_FOR_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 5,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_READ = MALI_CINSTR_COUNTER_SOURCE_GP + 6,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_WRITTEN = MALI_CINSTR_COUNTER_SOURCE_GP + 7,
+ MALI_CINSTR_GP_NUMBER_OF_READ_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 8,
+ MALI_CINSTR_GP_NUMBER_OF_WRITE_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 9,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_PROCESSED = MALI_CINSTR_COUNTER_SOURCE_GP + 10,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 11,
+ MALI_CINSTR_GP_NUMBER_OF_PRIMITIVES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 12,
+ MALI_CINSTR_GP_RESERVED_13 = MALI_CINSTR_COUNTER_SOURCE_GP + 13,
+ MALI_CINSTR_GP_NUMBER_OF_BACKFACE_CULLINGS_DONE = MALI_CINSTR_COUNTER_SOURCE_GP + 14,
+ MALI_CINSTR_GP_NUMBER_OF_COMMANDS_WRITTEN_TO_TILES = MALI_CINSTR_COUNTER_SOURCE_GP + 15,
+ MALI_CINSTR_GP_NUMBER_OF_MEMORY_BLOCKS_ALLOCATED = MALI_CINSTR_COUNTER_SOURCE_GP + 16,
+ MALI_CINSTR_GP_RESERVED_17 = MALI_CINSTR_COUNTER_SOURCE_GP + 17,
+ MALI_CINSTR_GP_RESERVED_18 = MALI_CINSTR_COUNTER_SOURCE_GP + 18,
+ MALI_CINSTR_GP_NUMBER_OF_VERTEX_LOADER_CACHE_MISSES = MALI_CINSTR_COUNTER_SOURCE_GP + 19,
+ MALI_CINSTR_GP_RESERVED_20 = MALI_CINSTR_COUNTER_SOURCE_GP + 20,
+ MALI_CINSTR_GP_RESERVED_21 = MALI_CINSTR_COUNTER_SOURCE_GP + 21,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 22,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 23,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_LIST_WRITER = MALI_CINSTR_COUNTER_SOURCE_GP + 24,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_THROUGH_THE_PREPARE_LIST_COMMANDS = MALI_CINSTR_COUNTER_SOURCE_GP + 25,
+ MALI_CINSTR_GP_RESERVED_26 = MALI_CINSTR_COUNTER_SOURCE_GP + 26,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PRIMITIVE_ASSEMBLY = MALI_CINSTR_COUNTER_SOURCE_GP + 27,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_VERTEX_FETCHER = MALI_CINSTR_COUNTER_SOURCE_GP + 28,
+ MALI_CINSTR_GP_RESERVED_29 = MALI_CINSTR_COUNTER_SOURCE_GP + 29,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_BOUNDINGBOX_AND_COMMAND_GENERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 30,
+ MALI_CINSTR_GP_RESERVED_31 = MALI_CINSTR_COUNTER_SOURCE_GP + 31,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_SCISSOR_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 32,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 33,
+ MALI_CINSTR_GP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_GP + 900,
+
+ /* Mali PP counters */
+
+ MALI_CINSTR_PP_ACTIVE_CLOCK_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 0,
+ MALI_CINSTR_PP_TOTAL_CLOCK_CYCLES_COUNT_REMOVED = MALI_CINSTR_COUNTER_SOURCE_PP + 1,
+ MALI_CINSTR_PP_TOTAL_BUS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 2,
+ MALI_CINSTR_PP_TOTAL_BUS_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 3,
+ MALI_CINSTR_PP_BUS_READ_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 4,
+ MALI_CINSTR_PP_BUS_WRITE_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 5,
+ MALI_CINSTR_PP_BUS_READ_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 6,
+ MALI_CINSTR_PP_BUS_WRITE_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 7,
+ MALI_CINSTR_PP_RESERVED_08 = MALI_CINSTR_COUNTER_SOURCE_PP + 8,
+ MALI_CINSTR_PP_TILE_WRITEBACK_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 9,
+ MALI_CINSTR_PP_STORE_UNIT_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 10,
+ MALI_CINSTR_PP_RESERVED_11 = MALI_CINSTR_COUNTER_SOURCE_PP + 11,
+ MALI_CINSTR_PP_PALETTE_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 12,
+ MALI_CINSTR_PP_TEXTURE_CACHE_UNCOMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 13,
+ MALI_CINSTR_PP_POLYGON_LIST_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 14,
+ MALI_CINSTR_PP_RSW_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 15,
+ MALI_CINSTR_PP_VERTEX_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 16,
+ MALI_CINSTR_PP_UNIFORM_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 17,
+ MALI_CINSTR_PP_PROGRAM_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+ MALI_CINSTR_PP_VARYING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 20,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 21,
+ MALI_CINSTR_PP_TEXTURE_CACHE_COMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 22,
+ MALI_CINSTR_PP_LOAD_UNIT_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 23,
+ MALI_CINSTR_PP_POLYGON_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 24,
+ MALI_CINSTR_PP_PIXEL_RECTANGLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 25,
+ MALI_CINSTR_PP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 26,
+ MALI_CINSTR_PP_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 27,
+ MALI_CINSTR_PP_STALL_CYCLES_POLYGON_LIST_READER = MALI_CINSTR_COUNTER_SOURCE_PP + 28,
+ MALI_CINSTR_PP_STALL_CYCLES_TRIANGLE_SETUP = MALI_CINSTR_COUNTER_SOURCE_PP + 29,
+ MALI_CINSTR_PP_QUAD_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 30,
+ MALI_CINSTR_PP_FRAGMENT_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 31,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 32,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FWD_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 33,
+ MALI_CINSTR_PP_FRAGMENT_PASSED_ZSTENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 34,
+ MALI_CINSTR_PP_PATCHES_REJECTED_EARLY_Z_STENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 35,
+ MALI_CINSTR_PP_PATCHES_EVALUATED = MALI_CINSTR_COUNTER_SOURCE_PP + 36,
+ MALI_CINSTR_PP_INSTRUCTION_COMPLETED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 37,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_RENDEZVOUS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 38,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_VARYING_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 39,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TEXTURE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 40,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_LOAD_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 41,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TILE_READ_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 42,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_STORE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 43,
+ MALI_CINSTR_PP_RENDEZVOUS_BREAKAGE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 44,
+ MALI_CINSTR_PP_PIPELINE_BUBBLES_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 45,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_MULTIPASS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 46,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 47,
+ MALI_CINSTR_PP_VERTEX_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 48,
+ MALI_CINSTR_PP_VERTEX_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 49,
+ MALI_CINSTR_PP_VARYING_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 50,
+ MALI_CINSTR_PP_VARYING_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 51,
+ MALI_CINSTR_PP_VARYING_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 52,
+ MALI_CINSTR_PP_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 53,
+ MALI_CINSTR_PP_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 54,
+ MALI_CINSTR_PP_TEXTURE_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 55,
+ MALI_CINSTR_PP_PALETTE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 200 only */
+ MALI_CINSTR_PP_PALETTE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 200 only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 400 class only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 400 class only */
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 58,
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 59,
+ MALI_CINSTR_PP_PROGRAM_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 60,
+ MALI_CINSTR_PP_PROGRAM_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 61,
+ MALI_CINSTR_PP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 900,
+} cinstr_counters_m200_t;
+
+#endif /*_MALI_UTGARD_COUNTERS_H_*/
diff --git a/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_ioctl.h b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644
index 00000000000000..31af4cf7d75228
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_ioctl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_GET_SYSTEM_INFO_SIZE _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO_SIZE, _mali_uk_get_system_info_s *)
+#define MALI_IOC_GET_SYSTEM_INFO _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO, _mali_uk_get_system_info_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_profiling_events.h b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644
index 00000000000000..129526f856fa53
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_profiling_events.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24,
+ MALI_PROFILING_EVENT_TYPE_START = 1 << 24,
+ MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24,
+ MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+ MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_START_STOP_MALI = 1,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE= 33,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35,
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_uk_types.h b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644
index 00000000000000..512b1e2f3e3fed
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/include/linux/mali/mali_utgard_uk_types.h
@@ -0,0 +1,1133 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_GET_SYSTEM_INFO_SIZE, /**< _mali_ukk_get_system_info_size() */
+ _MALI_UK_GET_SYSTEM_INFO, /**< _mali_ukk_get_system_info() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+ _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */
+ _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+ _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+ _MALI_UK_PROFILING_GET_CONFIG, /**< __mali_uku_profiling_get_config() */
+ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+/** @brief Information about each Mali Core
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Both Fragment Processor (PP) and Vertex Processor (GP) cores are represented
+ * by this struct.
+ *
+ * The type is reported by the type field, _mali_core_info::_mali_core_type.
+ *
+ * Each core is given a unique Sequence number identifying it, the core_nr
+ * member.
+ *
+ * Flags are taken directly from the resource's flags, and are currently unused.
+ *
+ * Multiple mali_core_info structs are linked in a single linked list using the next field
+ */
+typedef struct _mali_core_info
+{
+ _mali_core_type type; /**< Type of core */
+ _mali_core_version version; /**< Core Version, as reported by the Core's Version Register */
+ u32 reg_address; /**< Address of Registers */
+ u32 core_nr; /**< Sequence number */
+ u32 flags; /**< Flags. Currently Unused. */
+ struct _mali_core_info * next; /**< Next core in Linked List */
+} _mali_core_info;
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * Each bank is uniquely identified by its identifier member. On Mali-nonMMU
+ * systems, to allocate from this bank, the value of identifier must be passed
+ * as the type_id member of the _mali_uk_get_big_block_s arguments to
+ * _mali_ukk_get_big_block.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /**< Unique identifier, to be used in allocate calls */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @brief Info about the whole Mali system.
+ *
+ * This Contains a linked list of the cores and memory banks available. Each
+ * list pointer will remain inside the system_info buffer supplied in the
+ * _mali_uk_get_system_info_s arguments to a _mali_ukk_get_system_info call.
+ *
+ * The has_mmu member must be inspected to ensure the correct group of
+ * Memory function calls is obtained - that is, those for either Mali-MMU
+ * or Mali-nonMMU. @see _mali_uk_memory
+ */
+typedef struct _mali_system_info
+{
+ _mali_core_info * core_info; /**< List of _mali_core_info structures */
+ _mali_mem_info * mem_info; /**< List of _mali_mem_info structures */
+ u32 has_mmu; /**< Non-zero if Mali-MMU present. Zero otherwise. */
+ _mali_driver_mode drivermode; /**< Reserved. Must always be _MALI_DRIVER_MODE_NORMAL */
+} _mali_system_info;
+
+/** @brief Arguments to _mali_ukk_get_system_info()
+ *
+ * A buffer of the size returned by _mali_ukk_get_system_info_size() must be
+ * allocated, and the pointer to this buffer must be written into the
+ * system_info member. The buffer must be suitably aligned for storage of
+ * the _mali_system_info structure - for example, one returned by
+ * _mali_osk_malloc(), which will be suitably aligned for any structure.
+ *
+ * The ukk_private member must be set to zero by the user-side. Under an OS
+ * implementation, the U/K interface must write in the user-side base address
+ * into the ukk_private member, so that the common code in
+ * _mali_ukk_get_system_info() can determine how to adjust the pointers such
+ * that they are sensible from user space. Leaving ukk_private as NULL implies
+ * that no pointer adjustment is necessary - which will be the case on a
+ * bare-metal/RTOS system.
+ *
+ * @see _mali_system_info
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer provided to store system information data */
+ _mali_system_info * system_info; /**< [in,out] pointer to buffer to store system information data. No initialisation of buffer required on input. */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+} _mali_uk_get_system_info_s;
+/** @} */ /* end group _mali_uk_getsysteminfo */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-4) */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+} _mali_uk_pp_job_finished_s;
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum
+{
+ _MALI_UK_PP_JOB_WB0 = 1,
+ _MALI_UK_PP_JOB_WB1 = 2,
+ _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
+ u32 flush_id; /**< [in] Flush ID of job to disable WB units for */
+ _mali_uk_pp_job_wbx_flag wbx; /**< [in] write-back units to disable */
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+ _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum
+{
+ _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+ _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+ _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+ _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+{ \
+ "sw_events_enable", \
+ "colorbuffer_capture_enable", \
+ "depthbuffer_capture_enable", \
+ "stencilbuffer_capture_enable", \
+ "per_tile_counters_enable", \
+ "buffer_capture_compositor", \
+ "buffer_capture_window", \
+ "buffer_capture_other", \
+ "buffer_capture_n_frames", \
+ "buffer_capture_resize_factor", \
+ "sw_counters_enable", \
+};
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct
+{
+ _mali_uk_user_setting_t setting;
+ u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 14
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_user_setting_t setting; /**< [in] setting to get */
+ u32 value; /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s and _mali_uk_attach_ump_mem_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+/** @note Mali-MMU only; will be supported in future version */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32* counters; /**< [in] The array of counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/license/gpl/mali_kernel_license.h b/drivers/parrot/gpu/mali200/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 00000000000000..e9e5e55a082227
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.c b/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.c
new file mode 100644
index 00000000000000..2750820dc38d0c
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.c
@@ -0,0 +1,523 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_pm.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_platform.h"
+#include "mali_kernel_license.h"
+#include "arch/config.h"
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && MALI_TIMELINE_PROFILING_ENABLED
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS */
+
+static _mali_osk_errcode_t initialize_kernel_device(void);
+static int initialize_sysfs(void);
+static void terminate_kernel_device(void);
+
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int mali_major = 0;
+module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(mali_major, "Device major number");
+
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* the mali device */
+static struct mali_dev device;
+
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+/* Linux char file operations provided by the Mali module */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+int mali_driver_init(void)
+{
+ int ret = 0;
+
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+ if (arch_configuration[0].base == -1)
+ return -ENODEV;
+
+ ret = _mali_dev_platform_register();
+ if (0 != ret) goto platform_register_failed;
+ ret = map_errcode(initialize_kernel_device());
+ if (0 != ret) goto initialize_kernel_device_failed;
+
+ ret = map_errcode(mali_platform_init());
+ if (0 != ret) goto platform_init_failed;
+
+ mali_osk_low_level_mem_init();
+
+ ret = map_errcode(mali_initialize_subsystems());
+ if (0 != ret) goto initialize_subsystems_failed;
+
+ ret = initialize_sysfs();
+ if (0 != ret) goto initialize_sysfs_failed;
+
+ MALI_PRINT(("Mali device driver loaded\n"));
+
+ return 0; /* Success */
+
+ /* Error handling */
+initialize_sysfs_failed:
+ mali_terminate_subsystems();
+initialize_subsystems_failed:
+ mali_osk_low_level_mem_term();
+ mali_platform_deinit();
+platform_init_failed:
+ terminate_kernel_device();
+initialize_kernel_device_failed:
+ _mali_dev_platform_unregister();
+platform_register_failed:
+ return ret;
+}
+
+void mali_driver_exit(void)
+{
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+
+ /* No need to terminate sysfs, this will be done automatically along with device termination */
+
+ mali_terminate_subsystems();
+
+ mali_osk_low_level_mem_term();
+
+ mali_platform_deinit();
+
+ terminate_kernel_device();
+ _mali_dev_platform_unregister();
+
+#if MALI_LICENSE_IS_GPL
+ /* @@@@ clean up the work queues! This should not be terminated here, since it isn't inited in the function above! */
+ flush_workqueue(mali_wq);
+ destroy_workqueue(mali_wq);
+ mali_wq = NULL;
+#endif
+
+ MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int initialize_kernel_device(void)
+{
+ int err;
+ dev_t dev = 0;
+ if (0 == mali_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
+ mali_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(mali_major, 0);
+ err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
+ }
+
+ if (err)
+ {
+ goto init_chrdev_err;
+ }
+
+ memset(&device, 0, sizeof(device));
+
+ /* initialize our char dev data */
+ cdev_init(&device.cdev, &mali_fops);
+ device.cdev.owner = THIS_MODULE;
+ device.cdev.ops = &mali_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&device.cdev, dev, 1/*count*/);
+ if (err)
+ {
+ goto init_cdev_err;
+ }
+
+ /* Success! */
+ return 0;
+
+init_cdev_err:
+ unregister_chrdev_region(dev, 1/*count*/);
+init_chrdev_err:
+ return err;
+}
+
+static int initialize_sysfs(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+ return mali_sysfs_register(&device, dev, mali_dev_name);
+}
+
+static void terminate_kernel_device(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+
+ mali_sysfs_unregister(&device, dev, mali_dev_name);
+
+ /* unregister char device */
+ cdev_del(&device.cdev);
+ /* free major */
+ unregister_chrdev_region(dev, 1/*count*/);
+ return;
+}
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start)) );
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+#ifdef CONFIG_UMP
+extern int map_errcode( _mali_osk_errcode_t err );
+#else
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+#endif
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_GET_SYSTEM_INFO_SIZE:
+ err = get_system_info_size_wrapper(session_data, (_mali_uk_get_system_info_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_SYSTEM_INFO:
+ err = get_system_info_wrapper(session_data, (_mali_uk_get_system_info_s __user *)arg);
+ break;
+
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_USER_SETTINGS:
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_CONFIG:
+ /* Deprecated: still compatible with get_user_settings */
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_PROFILING_START: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_STOP: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_CLEAR: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_CONFIG: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+ err = -ENOTTY;
+ break;
+
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_DISABLE_WB:
+ err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
+ err = -ENOTTY;
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_driver_init);
+module_exit(mali_driver_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.h b/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.h
new file mode 100644
index 00000000000000..22dc9a431b15c6
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_linux.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk.h"
+
+struct mali_dev
+{
+ struct cdev cdev;
+#if MALI_LICENSE_IS_GPL
+ struct class * mali_class;
+#endif
+};
+
+#if MALI_LICENSE_IS_GPL
+/* Defined in mali_osk_irq.h */
+extern struct workqueue_struct * mali_wq;
+#endif
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.c b/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.c
new file mode 100644
index 00000000000000..2cb23bfbf66c41
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.c
@@ -0,0 +1,265 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_pm.c
+ * Linux Power Management integration
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <asm/current.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_linux_pm.h"
+#include "mali_pm.h"
+#include "mali_platform.h"
+
+#if ! MALI_LICENSE_IS_GPL
+#undef CONFIG_PM_RUNTIME
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_runtime_suspend(struct device *dev);
+static int mali_runtime_resume(struct device *dev);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+static int mali_os_suspend(struct platform_device *pdev, pm_message_t state);
+static int mali_os_resume(struct platform_device *pdev);
+#else
+static int mali_os_suspend(struct device *dev);
+static int mali_os_resume(struct device *dev);
+#endif
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+static const struct dev_pm_ops mali_dev_pm_ops =
+{
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_runtime_suspend,
+ .runtime_resume = mali_runtime_resume,
+ .runtime_idle = NULL,
+#else
+ .suspend = mali_os_suspend,
+ .resume = mali_os_resume,
+#endif
+
+ .freeze = mali_os_suspend,
+ .poweroff = mali_os_suspend,
+ .thaw = mali_os_resume,
+ .restore = mali_os_resume,
+};
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+struct pm_ext_ops mali_ext_pm_operations =
+{
+ .base =
+ {
+ .freeze = mali_os_suspend,
+ .thaw = mali_os_resume,
+ .poweroff = mali_os_suspend,
+ .restore = mali_os_resume,
+ },
+};
+#endif
+
+
+static struct platform_driver mali_plat_driver =
+{
+ .probe = mali_probe,
+ .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+ .suspend = mali_os_suspend,
+ .resume = mali_os_resume,
+ .pm = &mali_ext_pm_operations,
+#endif
+
+ .driver =
+ {
+ .name = "mali_dev",
+ .owner = THIS_MODULE,
+#if MALI_LICENSE_IS_GPL
+ .bus = &platform_bus_type,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block =
+{
+ .notifier_call = mali_pwr_suspend_notifier
+};
+#endif
+
+/** This function is called when platform device is unregistered. This function
+ * is necessary when the platform device is unregistered.
+ */
+static void _mali_release_pm(struct device *device)
+{
+}
+struct platform_device mali_gpu_device =
+{
+ .name = "mali_dev",
+ .id = 0,
+ .dev.release = _mali_release_pm
+};
+
+/** This function is called when the device is probed */
+static int mali_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(&pdev->dev);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+ switch (event)
+ {
+ case PM_SUSPEND_PREPARE:
+ MALI_DEBUG_PRINT(2, ("mali_pwr_suspend_notifier(PM_SUSPEND_PREPARE) called\n"));
+ mali_pm_os_suspend();
+ break;
+ case PM_POST_SUSPEND:
+ MALI_DEBUG_PRINT(2, ("mali_pwr_suspend_notifier(PM_SUSPEND_PREPARE) called\n"));
+ mali_pm_os_resume();
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#endif
+
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int mali_runtime_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(3, ("mali_runtime_suspend() called\n"));
+ mali_pm_runtime_suspend();
+ return 0; /* all ok */
+}
+
+static int mali_runtime_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(3, ("mali_runtime_resume() called\n"));
+ mali_pm_runtime_resume();
+ return 0; /* all ok */
+}
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+
+static int mali_os_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ MALI_DEBUG_PRINT(3, ("mali_os_suspend(old) called\n"));
+ mali_pm_os_suspend();
+ return 0; /* all ok */
+}
+
+static int mali_os_resume(struct platform_device *pdev)
+{
+ MALI_DEBUG_PRINT(3, ("mali_os_resume(old) called\n"));
+ mali_pm_os_resume();
+ return 0; /* all ok */
+}
+
+#else
+
+static int mali_os_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(3, ("mali_os_suspend(new) called\n"));
+ mali_pm_os_suspend();
+ return 0; /* all ok */
+}
+
+static int mali_os_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(3, ("mali_os_resume(new) called\n"));
+ mali_pm_os_resume();
+ return 0; /* all ok */
+}
+
+#endif
+
+/** This function is called when Mali GPU device is initialized
+ */
+int _mali_dev_platform_register(void)
+{
+ int err;
+
+#ifdef CONFIG_PM_RUNTIME
+ set_mali_parent_power_domain((void *)&mali_gpu_device);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+ err = register_pm_notifier(&mali_pwr_notif_block);
+ if (err)
+ {
+ return err;
+ }
+#endif
+
+#if MALI_LICENSE_IS_GPL
+ err = platform_device_register(&mali_gpu_device);
+ if (!err)
+ {
+ err = platform_driver_register(&mali_plat_driver);
+ if (err)
+ {
+#ifdef CONFIG_PM_RUNTIME
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif
+ platform_device_unregister(&mali_gpu_device);
+ }
+ }
+#endif
+
+ return err;
+}
+
+/** This function is called when Mali GPU device is unloaded
+ */
+void _mali_dev_platform_unregister(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif
+
+#if MALI_LICENSE_IS_GPL
+ platform_driver_unregister(&mali_plat_driver);
+ platform_device_unregister(&mali_gpu_device);
+#endif
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.h b/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.h
new file mode 100644
index 00000000000000..6ef72706256c39
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_pm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PM_H__
+#define __MALI_KERNEL_PM_H__
+
+int _mali_dev_platform_register(void);
+void _mali_dev_platform_unregister(void);
+
+#endif /* __MALI_KERNEL_PM_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.c b/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.c
new file mode 100644
index 00000000000000..13b4ef6f789528
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.c
@@ -0,0 +1,1281 @@
+/**
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include "mali_kernel_sysfs.h"
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+#include "mali_pm.h"
+#include "mali_cluster.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_device_pause_resume.h"
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND,
+ _MALI_DEVICE_RESUME,
+ _MALI_DEVICE_DVFS_PAUSE,
+ _MALI_DEVICE_DVFS_RESUME,
+ _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char* const mali_power_events[_MALI_MAX_EVENTS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+ [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static u32 virtual_power_status_register=0;
+static char pwr_buf[POWER_BUFFER_SIZE];
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t gp_gpx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_gp_core *gp_core = (struct mali_gp_core *)filp->private_data;
+
+ if (0 == src_id)
+ {
+ val = mali_gp_core_get_counter_src0(gp_core);
+ }
+ else
+ {
+ val = mali_gp_core_get_counter_src1(gp_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, gpos, buf, r);
+}
+
+static ssize_t gp_gpx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ struct mali_gp_core *gp_core = (struct mali_gp_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_gp_core_set_counter_src0(gp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_gp_core_set_counter_src1(gp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *gpos += cnt;
+ return cnt;
+}
+
+static ssize_t gp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 ci;
+ struct mali_cluster *cluster;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ ci = 0;
+ cluster = mali_cluster_get_global_cluster(ci);
+ while (NULL != cluster)
+ {
+ u32 gi = 0;
+ struct mali_group *group = mali_cluster_get_group(cluster, gi);
+ while (NULL != group)
+ {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_gp_core_set_counter_src0(gp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_gp_core_set_counter_src1(gp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ }
+
+ /* try next group */
+ gi++;
+ group = mali_cluster_get_group(cluster, gi);
+ }
+
+ /* try next cluster */
+ ci++;
+ cluster = mali_cluster_get_global_cluster(ci);
+ }
+
+ *gpos += cnt;
+ return cnt;
+}
+
+static ssize_t gp_gpx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_gpx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static const struct file_operations gp_gpx_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = gp_gpx_counter_src0_read,
+ .write = gp_gpx_counter_src0_write,
+};
+
+static const struct file_operations gp_gpx_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = gp_gpx_counter_src1_read,
+ .write = gp_gpx_counter_src1_write,
+};
+
+static const struct file_operations gp_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = gp_all_counter_src0_write,
+};
+
+static const struct file_operations gp_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = gp_all_counter_src1_write,
+};
+
+static ssize_t pp_ppx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_pp_core *pp_core = (struct mali_pp_core *)filp->private_data;
+
+ if (0 == src_id)
+ {
+ val = mali_pp_core_get_counter_src0(pp_core);
+ }
+ else
+ {
+ val = mali_pp_core_get_counter_src1(pp_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t pp_ppx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_pp_core *pp_core = (struct mali_pp_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_pp_core_set_counter_src0(pp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_pp_core_set_counter_src1(pp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t pp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 ci;
+ struct mali_cluster *cluster;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ ci = 0;
+ cluster = mali_cluster_get_global_cluster(ci);
+ while (NULL != cluster)
+ {
+ u32 gi = 0;
+ struct mali_group *group = mali_cluster_get_group(cluster, gi);
+ while (NULL != group)
+ {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_pp_core_set_counter_src0(pp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_pp_core_set_counter_src1(pp_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ }
+
+ /* try next group */
+ gi++;
+ group = mali_cluster_get_group(cluster, gi);
+ }
+
+ /* try next cluster */
+ ci++;
+ cluster = mali_cluster_get_global_cluster(ci);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t pp_ppx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_ppx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations pp_ppx_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = pp_ppx_counter_src0_read,
+ .write = pp_ppx_counter_src0_write,
+};
+
+static const struct file_operations pp_ppx_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = pp_ppx_counter_src1_read,
+ .write = pp_ppx_counter_src1_write,
+};
+
+static const struct file_operations pp_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_all_counter_src0_write,
+};
+
+static const struct file_operations pp_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_all_counter_src1_write,
+};
+
+
+
+
+
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ if (0 == src_id)
+ {
+ val = mali_l2_cache_core_get_counter_src0(l2_core);
+ }
+ else
+ {
+ val = mali_l2_cache_core_get_counter_src1(l2_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src0_read,
+ .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src1_read,
+ .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src1_write,
+};
+
+static ssize_t power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+
+ memset(pwr_buf,0,POWER_BUFFER_SIZE);
+ virtual_power_status_register = 0;
+ if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_SUSPEND],strlen(mali_power_events[_MALI_DEVICE_SUSPEND])))
+ {
+ mali_pm_os_suspend();
+ /* @@@@ assuming currently suspend is successful later on to tune as per previous*/
+ virtual_power_status_register =1;
+
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_RESUME],strlen(mali_power_events[_MALI_DEVICE_RESUME])))
+ {
+ mali_pm_os_resume();
+
+ /* @@@@ assuming currently resume is successful later on to tune as per previous */
+ virtual_power_status_register = 1;
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_PAUSE],strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE])))
+ {
+ mali_bool power_on;
+ mali_dev_pause(&power_on);
+ if (!power_on)
+ {
+ virtual_power_status_register = 2;
+ mali_dev_resume();
+ }
+ else
+ {
+ /* @@@@ assuming currently resume is successful later on to tune as per previous */
+ virtual_power_status_register =1;
+ }
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_RESUME],strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME])))
+ {
+ mali_dev_resume();
+ /* @@@@ assuming currently resume is successful later on to tune as per previous */
+ virtual_power_status_register = 1;
+
+ }
+ *ppos += cnt;
+ sprintf(pwr_buf, "%d",virtual_power_status_register);
+ return cnt;
+}
+
+static ssize_t power_events_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return simple_read_from_buffer(ubuf, cnt, ppos, pwr_buf, POWER_BUFFER_SIZE);
+}
+
+static loff_t power_events_seek(struct file *file, loff_t offset, int orig)
+{
+ file->f_pos = offset;
+ return 0;
+}
+
+static const struct file_operations power_events_fops = {
+ .owner = THIS_MODULE,
+ .read = power_events_read,
+ .write = power_events_write,
+ .llseek = power_events_seek,
+};
+
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if(!size)
+ {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%u\n", _mali_osk_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val != 0)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_osk_profiling_is_recording())
+ {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_osk_profiling_have_recording())
+ {
+ if (_MALI_OSK_ERR_OK != _mali_osk_profiling_clear())
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_osk_profiling_start(&limit))
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ }
+ else
+ {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_osk_profiling_stop(&count))
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_osk_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos)
+ {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_osk_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_osk_profiling_get_count() <= (u32)(*spos + 1))
+ {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32)*spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_osk_profiling_get_event(index, &timestamp, &event_id, data))
+ {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _mali_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = memory_used_read,
+};
+
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ _mali_uk_user_setting_t setting;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (0 != ret)
+ {
+ return ret;
+ }
+
+ /* Update setting */
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ mali_set_user_setting(setting, val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 value;
+ _mali_uk_user_setting_t setting;
+
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ value = mali_get_user_setting(setting);
+
+ r = snprintf(buf, 64, "%u\n", value);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = user_settings_read,
+ .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+ struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+ if (mali_user_settings_dir != NULL)
+ {
+ int i;
+ for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++)
+ {
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void*)i, &user_settings_fops);
+ }
+ }
+
+ return 0;
+}
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ int err = 0;
+ struct device * mdev;
+
+ device->mali_class = class_create(THIS_MODULE, mali_dev_name);
+ if (IS_ERR(device->mali_class))
+ {
+ err = PTR_ERR(device->mali_class);
+ goto init_class_err;
+ }
+ mdev = device_create(device->mali_class, NULL, dev, NULL, mali_dev_name);
+ if (IS_ERR(mdev))
+ {
+ err = PTR_ERR(mdev);
+ goto init_mdev_err;
+ }
+
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if(ERR_PTR(-ENODEV) == mali_debugfs_dir)
+ {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ }
+ else
+ {
+ if(NULL != mali_debugfs_dir)
+ {
+ /* Debugfs directory created successfully; create files now */
+ struct dentry *mali_power_dir;
+ struct dentry *mali_gp_dir;
+ struct dentry *mali_pp_dir;
+ struct dentry *mali_l2_dir;
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+ struct dentry *mali_profiling_dir;
+#endif
+
+ mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+ if (mali_power_dir != NULL)
+ {
+ debugfs_create_file("power_events", 0400, mali_power_dir, NULL, &power_events_fops);
+ }
+
+ mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+ if (mali_gp_dir != NULL)
+ {
+ struct dentry *mali_gp_all_dir;
+ u32 ci;
+ struct mali_cluster *cluster;
+
+ mali_gp_all_dir = debugfs_create_dir("all", mali_gp_dir);
+ if (mali_gp_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0400, mali_gp_all_dir, NULL, &gp_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0400, mali_gp_all_dir, NULL, &gp_all_counter_src1_fops);
+ }
+
+ ci = 0;
+ cluster = mali_cluster_get_global_cluster(ci);
+ while (NULL != cluster)
+ {
+ u32 gi = 0;
+ struct mali_group *group = mali_cluster_get_group(cluster, gi);
+ while (NULL != group)
+ {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ struct dentry *mali_gp_gpx_dir;
+ mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+ if (NULL != mali_gp_gpx_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src1_fops);
+ }
+ break; /* no need to look for any other GP cores */
+ }
+
+ /* try next group */
+ gi++;
+ group = mali_cluster_get_group(cluster, gi);
+ }
+
+ /* try next cluster */
+ ci++;
+ cluster = mali_cluster_get_global_cluster(ci);
+ }
+ }
+
+ mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+ if (mali_pp_dir != NULL)
+ {
+ struct dentry *mali_pp_all_dir;
+ u32 ci;
+ struct mali_cluster *cluster;
+
+ mali_pp_all_dir = debugfs_create_dir("all", mali_pp_dir);
+ if (mali_pp_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0400, mali_pp_all_dir, NULL, &pp_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0400, mali_pp_all_dir, NULL, &pp_all_counter_src1_fops);
+ }
+
+ ci = 0;
+ cluster = mali_cluster_get_global_cluster(ci);
+ while (NULL != cluster)
+ {
+ u32 gi = 0;
+ struct mali_group *group = mali_cluster_get_group(cluster, gi);
+ while (NULL != group)
+ {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ char buf[16];
+ struct dentry *mali_pp_ppx_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+ mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+ if (NULL != mali_pp_ppx_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src1_fops);
+ }
+ }
+
+ /* try next group */
+ gi++;
+ group = mali_cluster_get_group(cluster, gi);
+ }
+
+ /* try next cluster */
+ ci++;
+ cluster = mali_cluster_get_global_cluster(ci);
+ }
+ }
+
+ mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+ if (mali_l2_dir != NULL)
+ {
+ struct dentry *mali_l2_all_dir;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+ if (mali_l2_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0400, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0400, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache)
+ {
+ char buf[16];
+ struct dentry *mali_l2_l2x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+ mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+ if (NULL != mali_l2_l2x_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+ }
+
+ debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+#if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+ mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL)
+ {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void*)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ }
+#endif
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+ if (mali_sysfs_user_settings_register())
+ {
+ /* Failed to create the debugfs entries for the user settings DB. */
+ MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+ }
+ }
+ }
+
+ /* Success! */
+ return 0;
+
+ /* Error handling */
+init_mdev_err:
+ class_destroy(device->mali_class);
+init_class_err:
+
+ return err;
+}
+
+int mali_sysfs_unregister(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ if(NULL != mali_debugfs_dir)
+ {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ device_destroy(device->mali_class, dev);
+ class_destroy(device->mali_class);
+
+ return 0;
+}
+
+#else
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+
+#endif
diff --git a/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.h b/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.h
new file mode 100644
index 00000000000000..d79a886226943b
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_kernel_sysfs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(struct mali_dev *mali_class, dev_t dev, const char *mali_dev_name);
+
+int mali_sysfs_unregister(struct mali_dev *mali_class, dev_t dev, const char *mali_dev_name);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_linux_pm.h b/drivers/parrot/gpu/mali200/linux/mali_linux_pm.h
new file mode 100644
index 00000000000000..10f633e4177cb7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_linux_pm.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_LINUX_PM_H__
+#define __MALI_LINUX_PM_H__
+
+#ifdef CONFIG_PM
+/* Number of power states supported for making power up and down */
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND, /* Suspend */
+ _MALI_DEVICE_RESUME, /* Resume */
+ _MALI_DEVICE_MAX_POWER_STATES, /* Maximum power states */
+} _mali_device_power_states;
+
+/* Number of DVFS events */
+typedef enum
+{
+ _MALI_DVFS_PAUSE_EVENT = _MALI_DEVICE_MAX_POWER_STATES, /* DVFS Pause event */
+ _MALI_DVFS_RESUME_EVENT, /* DVFS Resume event */
+ _MALI_MAX_DEBUG_OPERATIONS,
+} _mali_device_dvfs_events;
+
+extern _mali_device_power_states mali_device_state;
+extern _mali_device_power_states mali_dvfs_device_state;
+extern _mali_osk_lock_t *lock;
+extern short is_wake_up_needed;
+extern int timeout_fired;
+extern struct platform_device mali_gpu_device;
+
+/* dvfs pm thread */
+extern struct task_struct *dvfs_pm_thread;
+
+/* Power management thread */
+extern struct task_struct *pm_thread;
+
+int mali_device_suspend(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_device_resume(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_get_ospmm_thread_state(void);
+
+#endif /* CONFIG_PM */
+#endif /* __MALI_LINUX_PM_H___ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_linux_pm_testsuite.h b/drivers/parrot/gpu/mali200/linux/mali_linux_pm_testsuite.h
new file mode 100644
index 00000000000000..7d811bd787da03
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_linux_pm_testsuite.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if MALI_POWER_MGMT_TEST_SUITE && defined(CONFIG_PM)
+
+typedef enum
+{
+ _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+ _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+ _MALI_DEVICE_PMM_REGISTERED_CORES,
+ _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE && defined(CONFIG_PM) */
+
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_linux_trace.h b/drivers/parrot/gpu/mali200/linux/mali_linux_trace.h
new file mode 100644
index 00000000000000..5329ba360704e7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_linux_trace.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ * encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+ unsigned int d2, unsigned int d3, unsigned int d4),
+
+ TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, d0)
+ __field(unsigned int, d1)
+ __field(unsigned int, d2)
+ __field(unsigned int, d3)
+ __field(unsigned int, d4)
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->d0 = d0;
+ __entry->d1 = d1;
+ __entry->d2 = d2;
+ __entry->d3 = d3;
+ __entry->d4 = d4;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int counter_id, unsigned int value),
+
+ TP_ARGS(counter_id, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, counter_id)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->counter_id = counter_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+);
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+ TP_PROTO(pid_t pid, pid_t tid, void * surface_id, unsigned int * counters),
+
+ TP_ARGS(pid, tid, surface_id, counters),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(pid_t, tid)
+ __field(void *, surface_id)
+ __field(unsigned int *, counters)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->tid = tid;
+ __entry->surface_id = surface_id;
+ __entry->counters = counters;
+ ),
+
+ TP_printk("counters were %s", __entry->counters == NULL? "NULL" : "not NULL")
+);
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_atomics.c b/drivers/parrot/gpu/mali200/linux/mali_osk_atomics.c
new file mode 100644
index 00000000000000..05831c54272382
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.c b/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.c
new file mode 100644
index 00000000000000..7297218b9b643d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * @file mali_osk_specific.c
+ * Implementation of per-OS Kernel level specifics
+ */
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
+{
+ /* args->ctx ignored here; args->ukk_private required instead */
+ /* we need to lock the mmap semaphore before calling the do_mmap function */
+ down_write(&current->mm->mmap_sem);
+
+ args->mapping = (void __user *)do_mmap(
+ (struct file *)args->ukk_private,
+ 0, /* start mapping from any address after NULL */
+ args->size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ args->phys_addr
+ );
+
+ /* and unlock it after the call */
+ up_write(&current->mm->mmap_sem);
+
+ /* No cookie required here */
+ args->cookie = 0;
+ /* uku_private meaningless, so zero */
+ args->uku_private = NULL;
+
+ if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Success */
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args )
+{
+ /* args->ctx and args->cookie ignored here */
+
+ if ((NULL != current) && (NULL != current->mm))
+ {
+ /* remove mapping of mali memory from the process' view */
+ /* lock mmap semaphore before call */
+ /* lock mmap_sem before calling do_munmap */
+ down_write(&current->mm->mmap_sem);
+ do_munmap(
+ current->mm,
+ (unsigned long)args->mapping,
+ args->size
+ );
+ /* and unlock after call */
+ up_write(&current->mm->mmap_sem);
+ MALI_DEBUG_PRINT(5, ("unmapped\n"));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Freeing of a big block while no user process attached, assuming crash cleanup in progress\n"));
+ }
+
+ return _MALI_OSK_ERR_OK; /* always succeeds */
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.h b/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.h
new file mode 100644
index 00000000000000..f87739bb8f9861
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_indir_mmap.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_INDIR_MMAP_H__
+#define __MALI_OSK_INDIR_MMAP_H__
+
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Linux specific means for calling _mali_ukk_mem_mmap/munmap
+ *
+ * The presence of _MALI_OSK_SPECIFIC_INDIRECT_MMAP indicates that
+ * _mali_osk_specific_indirect_mmap and _mali_osk_specific_indirect_munmap
+ * should be used instead of _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * The arguments are the same as _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * In ALL operating system other than Linux, it is expected that common code
+ * should be able to call _mali_ukk_mem_mmap/_mali_ukk_mem_munmap directly.
+ * Such systems should NOT define _MALI_OSK_SPECIFIC_INDIRECT_MMAP.
+ */
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_INDIR_MMAP_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_irq.c b/drivers/parrot/gpu/mali200/linux/mali_osk_irq.c
new file mode 100644
index 00000000000000..ddfe5642a5ca64
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_irq.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+ _mali_osk_irq_bhandler_t bhandler;
+ struct work_struct work_queue_irq_handle; /* Workqueue for the bottom half of the IRQ-handling. This job is activated when this core gets an IRQ.*/
+} mali_osk_irq_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *pmm_wq = NULL;
+struct workqueue_struct *mali_wq = NULL;
+#endif
+
+typedef void (*workqueue_func_t)(void *);
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work );
+#else
+static void irq_handler_bottom_half ( void * input );
+#endif
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) return NULL;
+
+#if MALI_LICENSE_IS_GPL
+ if (NULL == mali_wq)
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+ mali_wq = alloc_workqueue("mali", WQ_UNBOUND, 0);
+#else
+ mali_wq = create_workqueue("mali");
+#endif
+ if(NULL == mali_wq)
+ {
+ MALI_PRINT_ERROR(("Unable to create Mali workqueue\n"));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+#endif
+
+ /* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+ /* New syntax: INIT_WORK( struct work_struct *work, void (*function)(struct work_struct *)) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half);
+#else
+ /* Old syntax: INIT_WORK( struct work_struct *work, void (*function)(void *), void *data) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half, irq_object);
+#endif /* defined(INIT_DELAYED_WORK) */
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->bhandler = bhandler;
+ irq_object->data = data;
+
+ /* Is this a real IRQ handler we need? */
+ if (irqnum != _MALI_OSK_IRQ_NUMBER_FAKE && irqnum != _MALI_OSK_IRQ_NUMBER_PMM)
+ {
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+
+#if MALI_LICENSE_IS_GPL
+ if ( _MALI_OSK_IRQ_NUMBER_PMM == irqnum )
+ {
+ pmm_wq = create_singlethread_workqueue("mali-pmm-wq");
+ }
+#endif
+
+ return irq_object;
+}
+
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+#if MALI_LICENSE_IS_GPL
+ if ( irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ queue_work( pmm_wq,&irq_object->work_queue_irq_handle );
+ }
+ else
+ {
+ queue_work(mali_wq, &irq_object->work_queue_irq_handle);
+ }
+#else
+ schedule_work(&irq_object->work_queue_irq_handle);
+#endif
+}
+
+void _mali_osk_flush_workqueue( _mali_osk_irq_t *irq )
+{
+#if MALI_LICENSE_IS_GPL
+ if (NULL != irq)
+ {
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ if(irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ flush_workqueue(pmm_wq);
+ }
+ else
+ {
+ flush_workqueue(mali_wq);
+ }
+ }
+ else
+ {
+ flush_workqueue(mali_wq);
+ }
+#endif
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+
+#if MALI_LICENSE_IS_GPL
+ if(irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ flush_workqueue(pmm_wq);
+ destroy_workqueue(pmm_wq);
+ }
+#endif
+ free_irq(irq_object->irqnum, irq_object);
+ kfree(irq_object);
+ flush_scheduled_work();
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+ {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* Is executed when an interrupt occur on one core */
+/* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work )
+#else
+static void irq_handler_bottom_half ( void * input )
+#endif
+{
+ mali_osk_irq_object_t *irq_object;
+
+#if defined(INIT_DELAYED_WORK)
+ irq_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_irq_object_t, work_queue_irq_handle);
+#else
+ if ( NULL == input )
+ {
+ MALI_PRINT_ERROR(("IRQ: Null pointer! Illegal!"));
+ return; /* Error */
+ }
+ irq_object = (mali_osk_irq_object_t *) input;
+#endif
+
+ irq_object->bhandler(irq_object->data);
+}
+
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_locks.c b/drivers/parrot/gpu/mali200/linux/mali_osk_locks.c
new file mode 100644
index 00000000000000..0297c776990a41
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_locks.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use up()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use up()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {up,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct semaphore sema;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+
+ /* id of the thread currently holding this lock, 0 if no
+ * threads hold it. */
+ u32 owner;
+ /* number of owners this lock currently has (can be > 1 if
+ * taken in R/O mode. */
+ u32 nOwners;
+ /* what mode the lock was taken in */
+ _mali_osk_lock_mode_t mode;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ sema_init( &lock->obj.sema, 1 );
+ }
+
+#ifdef DEBUG
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+
+ /* Debug tracking of lock owner */
+ lock->owner = 0;
+ lock->nOwners = 0;
+#endif /* DEBUG */
+
+ return lock;
+}
+
+#ifdef DEBUG
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock )
+{
+ return lock->owner;
+}
+
+u32 _mali_osk_lock_get_number_owners( _mali_osk_lock_t *lock )
+{
+ return lock->nOwners;
+}
+
+u32 _mali_osk_lock_get_mode( _mali_osk_lock_t *lock )
+{
+ return lock->mode;
+}
+#endif /* DEBUG */
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_lock_irqsave(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if ( down_interruptible(&lock->obj.sema) )
+ {
+ MALI_PRINT_ERROR(("Can not lock mutex\n"));
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ down(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+#ifdef DEBUG
+ /* This thread is now the owner of this lock */
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ if (mode == _MALI_OSK_LOCKMODE_RW)
+ {
+ /*MALI_DEBUG_ASSERT(0 == lock->owner);*/
+ if (0 != lock->owner)
+ {
+ printk(KERN_ERR "%d: ERROR: Lock %p already has owner %d\n", _mali_osk_get_tid(), lock, lock->owner);
+ dump_stack();
+ }
+ lock->owner = _mali_osk_get_tid();
+ lock->mode = mode;
+ ++lock->nOwners;
+ }
+ else /* mode == _MALI_OSK_LOCKMODE_RO */
+ {
+ lock->owner |= _mali_osk_get_tid();
+ lock->mode = mode;
+ ++lock->nOwners;
+ }
+ }
+#endif
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+#ifdef DEBUG
+ /* make sure the thread releasing the lock actually was the owner */
+ if (mode == _MALI_OSK_LOCKMODE_RW)
+ {
+ /*MALI_DEBUG_ASSERT(_mali_osk_get_tid() == lock->owner);*/
+ if (_mali_osk_get_tid() != lock->owner)
+ {
+ printk(KERN_ERR "%d: ERROR: Lock %p owner was %d\n", _mali_osk_get_tid(), lock, lock->owner);
+ dump_stack();
+ }
+ /* This lock now has no owner */
+ lock->owner = 0;
+ --lock->nOwners;
+ }
+ else /* mode == _MALI_OSK_LOCKMODE_RO */
+ {
+ if ((_mali_osk_get_tid() & lock->owner) != _mali_osk_get_tid())
+ {
+ printk(KERN_ERR "%d: ERROR: Not an owner of %p lock.\n", _mali_osk_get_tid(), lock);
+ dump_stack();
+ }
+
+ /* if this is the last thread holding this lock in R/O mode, set owner
+ * back to 0 */
+ if (0 == --lock->nOwners)
+ {
+ lock->owner = 0;
+ }
+ }
+#endif /* DEBUG */
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ up(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_low_level_mem.c b/drivers/parrot/gpu/mali200/linux/mali_osk_low_level_mem.c
new file mode 100644
index 00000000000000..5767912ef632a9
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+static DEFINE_SPINLOCK(allocation_list_spinlock);
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+
+void mali_osk_low_level_mem_init(void)
+{
+ pre_allocated_memory = (AllocationList*) NULL ;
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return 0;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( 0 == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier( void )
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+ release_mem_region(phys, size);
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ _mali_osk_write_mem_barrier();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+ if (NULL == alloc_item)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ alloc_item->next = mappingInfo->list;
+ alloc_item->offset = offset;
+
+ /*alloc_item->physaddr = linux_phys_addr;*/
+ mappingInfo->list = alloc_item;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ _kernel_page_release(alloc->physaddr);
+
+ /* Remove the allocation from the list */
+ *prev = alloc->next;
+ _mali_osk_free( alloc );
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_mali.c b/drivers/parrot/gpu/mali200/linux/mali_osk_mali.c
new file mode 100644
index 00000000000000..a6195a28f87fcf
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_mali.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "arch/config.h" /* contains the configuration of the arch we are compiling for */
+
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources )
+{
+ u32 i = 0;
+ bool marker_found = 0;
+ for (i = 0; marker_found == 0; i++)
+ if (arch_configuration[i].base == 0xffffffff && arch_configuration[i].size == 0xffffffff)
+ marker_found = 1;
+ *num_resources = i-1;
+ *arch_config = arch_configuration;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources )
+{
+ /* Nothing to do */
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_math.c b/drivers/parrot/gpu/mali200/linux/mali_osk_math.c
new file mode 100644
index 00000000000000..3e62e519cbac37
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_memory.c b/drivers/parrot/gpu/mali200/linux/mali_osk_memory.c
new file mode 100644
index 00000000000000..7bb470f8828d66
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc( u32 size )
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree( void *ptr )
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_misc.c b/drivers/parrot/gpu/mali200/linux/mali_osk_misc.c
new file mode 100644
index 00000000000000..ad486db6f5e44b
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_misc.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vscnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ dump_stack();
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_notification.c b/drivers/parrot/gpu/mali200/linux/mali_osk_notification.c
new file mode 100644
index 00000000000000..c14c0d5c28b086
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_notification.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ struct semaphore mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ sema_init(&result->mutex, 1);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size,
+ GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* lock queue access */
+ down(&queue->mutex);
+ /* add to list */
+ list_add_tail(&notification->list, &queue->head);
+ /* unlock the queue */
+ up(&queue->mutex);
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+static int _mali_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ int ret;
+
+ down(&queue->mutex);
+ ret = list_empty(&queue->head);
+ up(&queue->mutex);
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ return _mali_notification_queue_is_empty(queue) ? MALI_TRUE : MALI_FALSE;
+}
+#endif
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+ down(&queue->mutex);
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ up(&queue->mutex);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ while (_MALI_OSK_ERR_OK != _mali_osk_notification_queue_dequeue(queue, result))
+ {
+ if (wait_event_interruptible(queue->receive_queue, !_mali_notification_queue_is_empty(queue)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_pm.c b/drivers/parrot/gpu/mali200/linux/mali_osk_pm.c
new file mode 100644
index 00000000000000..491a6038dd448e
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_pm.c
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_linux_pm.h"
+#include "mali_kernel_license.h"
+
+#if ! MALI_LICENSE_IS_GPL
+#undef CONFIG_PM_RUNTIME
+#endif
+
+extern struct platform_device mali_gpu_device;
+
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool have_runtime_reference = MALI_FALSE;
+#endif
+
+void _mali_osk_pm_dev_enable(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+}
+
+/* NB: Function is not thread safe */
+_mali_osk_errcode_t _mali_osk_pm_dev_idle(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ if (MALI_TRUE == have_runtime_reference)
+ {
+ int err;
+ err = pm_runtime_put_sync(&(mali_gpu_device.dev));
+ if (0 > err)
+ {
+ MALI_PRINT_ERROR(("OSK PM: pm_runtime_put_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ have_runtime_reference = MALI_FALSE;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+/* NB: Function is not thread safe */
+_mali_osk_errcode_t _mali_osk_pm_dev_activate(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ if (MALI_TRUE != have_runtime_reference)
+ {
+ int err;
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ if (0 > err)
+ {
+ MALI_PRINT_ERROR(("OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ have_runtime_reference = MALI_TRUE;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_gator.c b/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_gator.c
new file mode 100644
index 00000000000000..fcc40caa668835
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_gator.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ if (MALI_TRUE == auto_start)
+ {
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ /* Nothing to do */
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_profiling_get_count(void)
+{
+ return 0;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+ return MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+ return MALI_FALSE;
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ _mali_osk_profiling_report_sw_counters(args->counters);
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+
+ if (counter_id == COUNTER_VP_C0)
+ {
+ struct mali_gp_core* gp_core = mali_gp_get_global_gp_core();
+ if (NULL != gp_core)
+ {
+ if (MALI_TRUE == mali_gp_core_set_counter_src0(gp_core, event_id))
+ {
+ return 1;
+ }
+ }
+ }
+ else if (counter_id == COUNTER_VP_C1)
+ {
+ struct mali_gp_core* gp_core = mali_gp_get_global_gp_core();
+ if (NULL != gp_core)
+ {
+ if (MALI_TRUE == mali_gp_core_set_counter_src1(gp_core, event_id))
+ {
+ return 1;
+ }
+ }
+ }
+ else if (counter_id >= COUNTER_FP0_C0 && counter_id <= COUNTER_FP3_C1)
+ {
+ u32 core_id = (counter_id - COUNTER_FP0_C0) >> 1;
+ struct mali_pp_core* pp_core = mali_pp_get_global_pp_core(core_id);
+ if (NULL != pp_core)
+ {
+ u32 counter_src = (counter_id - COUNTER_FP0_C0) & 1;
+ if (0 == counter_src)
+ {
+ if (MALI_TRUE == mali_pp_core_set_counter_src0(pp_core, event_id))
+ {
+ return 1;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE == mali_pp_core_set_counter_src1(pp_core, event_id))
+ {
+ return 1;
+ }
+ }
+ }
+ }
+ else if (counter_id >= COUNTER_L2_C0 && counter_id <= COUNTER_L2_C1)
+ {
+ u32 core_id = (counter_id - COUNTER_L2_C0) >> 1;
+ struct mali_l2_cache_core* l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+ if (NULL != l2_cache_core)
+ {
+ u32 counter_src = (counter_id - COUNTER_L2_C0) & 1;
+ if (0 == counter_src)
+ {
+ if (MALI_TRUE == mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id))
+ {
+ return 1;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE == mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id))
+ {
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for the first L2 cache.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param src0 First L2 cache counter ID.
+ * @param val0 First L2 cache counter value.
+ * @param src1 Second L2 cache counter ID.
+ * @param val1 Second L2 cache counter value.
+ */
+void _mali_profiling_get_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+ struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(0);
+ if (NULL != l2_cache)
+ {
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache))
+ {
+ /* It is now safe to access the L2 cache core in order to retrieve the counters */
+ mali_l2_cache_core_get_counter_values(l2_cache, src0, val0, src1, val1);
+ }
+ mali_l2_cache_unlock_power_state(l2_cache);
+ }
+}
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+ switch(action)
+ {
+ case FBDUMP_CONTROL_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+ break;
+ case FBDUMP_CONTROL_RATE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+ break;
+ case SW_COUNTER_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+ break;
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+ break;
+ default:
+ break; /* Ignore unimplemented actions */
+ }
+}
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_internal.c b/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_internal.c
new file mode 100644
index 00000000000000..9a423d2186b370
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_profiling_internal.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static u32 profile_entry_count = 0;
+static _mali_osk_atomic_t profile_insert_index;
+static _mali_osk_atomic_t profile_entries_written;
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+
+ lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING);
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ if (_MALI_OSK_ERR_OK != _mali_osk_profiling_start(&limit))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+inline _mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+{
+ _mali_osk_errcode_t ret;
+
+ mali_profiling_entry *new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if(NULL == new_profile_entries)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_IDLE)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (*limit > MALI_PROFILING_MAX_BUFFER_ENTRIES)
+ {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_entries = new_profile_entries;
+ profile_entry_count = *limit;
+
+ ret = _mali_timestamp_reset();
+
+ if (ret == _MALI_OSK_ERR_OK)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+inline void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
+ {
+ /*
+ * Not in recording mode, or buffer is full
+ * Decrement index again, and early out
+ */
+ _mali_osk_atomic_dec(&profile_insert_index);
+ return;
+ }
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ _mali_osk_atomic_inc(&profile_entries_written);
+}
+
+inline void _mali_osk_profiling_report_hw_counter(u32 counter_id, u32 value)
+{
+ /* Not implemented */
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ /* Not implemented */
+}
+
+inline _mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+inline u32 _mali_osk_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (prof_state == MALI_PROFILING_STATE_RETURN)
+ {
+ retval = _mali_osk_atomic_read(&profile_entries_written);
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ return retval;
+}
+
+inline _mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= _mali_osk_atomic_read(&profile_entries_written))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+inline _mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ _mali_osk_profiling_report_sw_counters(args->counters);
+ return _MALI_OSK_ERR_OK;
+}
+
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_specific.h b/drivers/parrot/gpu/mali200/linux/mali_osk_specific.h
new file mode 100644
index 00000000000000..83ee906df4e7ea
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_specific.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef __cplusplus
+}
+#endif
+
+/** The list of events supported by the Mali DDK. */
+typedef enum
+{
+ /* Vertex processor activity */
+ ACTIVITY_VP = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP0,
+ ACTIVITY_FP1,
+ ACTIVITY_FP2,
+ ACTIVITY_FP3,
+
+ /* L2 cache counters */
+ COUNTER_L2_C0,
+ COUNTER_L2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_C0,
+ COUNTER_VP_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP0_C0,
+ COUNTER_FP0_C1,
+ COUNTER_FP1_C0,
+ COUNTER_FP1_C1,
+ COUNTER_FP2_C0,
+ COUNTER_FP2_C1,
+ COUNTER_FP3_C0,
+ COUNTER_FP3_C1,
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP3
+
+#define FIRST_HW_COUNTER COUNTER_L2_C0
+#define LAST_HW_COUNTER COUNTER_FP3_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_time.c b/drivers/parrot/gpu/mali200/linux/mali_osk_time.c
new file mode 100644
index 00000000000000..da9b8656b703e6
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_timers.c b/drivers/parrot/gpu/mali200/linux/mali_osk_timers.c
new file mode 100644
index 00000000000000..e5829a3e734c08
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_timers.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = _mali_osk_time_tickcount() + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), expiry_tick);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_osk_wait_queue.c b/drivers/parrot/gpu/mali200/linux/mali_osk_wait_queue.c
new file mode 100644
index 00000000000000..ce0561d51832ba
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_osk_wait_queue.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct
+{
+ wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
+{
+ _mali_osk_wait_queue_t* ret = NULL;
+
+ ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+ if (NULL == ret)
+ {
+ return ret;
+ }
+
+ init_waitqueue_head(&ret->wait_queue);
+ MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+ return ret;
+}
+
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event(queue->wait_queue, condition());
+}
+
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* if queue is empty, don't attempt to wake up its elements */
+ if (!waitqueue_active(&queue->wait_queue)) return;
+
+ MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+ wake_up_all(&queue->wait_queue);
+
+ MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* Linux requires no explicit termination of wait queues */
+ kfree(queue);
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_pmu_power_up_down.c b/drivers/parrot/gpu/mali200/linux/mali_pmu_power_up_down.c
new file mode 100644
index 00000000000000..f3b0a2c95a0139
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_pmu_power_up_down.c
@@ -0,0 +1,65 @@
+/**
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_pmu.h"
+#include "linux/mali/mali_utgard.h"
+
+/* Mali PMU power up/down APIs */
+
+int mali_pmu_powerup(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
+
+ if (NULL == pmu)
+ {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_powerup_all(pmu))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerup);
+
+int mali_pmu_powerdown(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
+
+ if (NULL == pmu)
+ {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_powerdown_all(pmu))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerdown);
diff --git a/drivers/parrot/gpu/mali200/linux/mali_profiling_events.h b/drivers/parrot/gpu/mali200/linux/mali_profiling_events.h
new file mode 100644
index 00000000000000..2639a404d47578
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_profiling_events.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_uk_types.h b/drivers/parrot/gpu/mali200/linux/mali_uk_types.h
new file mode 100644
index 00000000000000..fbe902a02d4329
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_uk_types.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_core.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_core.c
new file mode 100644
index 00000000000000..6eb2df39bb154d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_core.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs)
+{
+ _mali_uk_get_system_info_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_system_info_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs)
+{
+ _mali_uk_get_system_info_s kargs;
+ _mali_osk_errcode_t err;
+ _mali_system_info *system_info_user;
+ _mali_system_info *system_info_kernel;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.system_info, &uargs->system_info)) return -EFAULT;
+ if (0 != get_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ /* A temporary kernel buffer for the system_info datastructure is passed through the system_info
+ * member. The ukk_private member will point to the user space destination of this buffer so
+ * that _mali_ukk_get_system_info() can correct the pointers in the system_info correctly
+ * for user space.
+ */
+ system_info_kernel = kmalloc(kargs.size, GFP_KERNEL);
+ if (NULL == system_info_kernel) return -EFAULT;
+
+ system_info_user = kargs.system_info;
+ kargs.system_info = system_info_kernel;
+ kargs.ukk_private = (u32)system_info_user;
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_system_info(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ kfree(system_info_kernel);
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(system_info_user, system_info_kernel, kargs.size))
+ {
+ kfree(system_info_kernel);
+ return -EFAULT;
+ }
+
+ kfree(system_info_kernel);
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+ _mali_uk_get_user_settings_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_user_settings(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_gp.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_gp.c
new file mode 100644
index 00000000000000..7070016de2714c
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_gp.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_uk_gp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_mem.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_mem.c
new file mode 100644
index 00000000000000..260f257c2bab6b
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_mem.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ kargs.buffer = _mali_osk_valloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+ return rc;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_pp.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_pp.c
new file mode 100644
index 00000000000000..c11c61b9002286
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_pp.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_uk_pp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_pp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+ _mali_uk_pp_disable_wb_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_job_disable_wb(&kargs);
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_profiling.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_profiling.c
new file mode 100644
index 00000000000000..f4e31c92723e9d
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_profiling.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+ _mali_uk_sw_counters_report_s kargs;
+ _mali_osk_errcode_t err;
+ u32 *counter_buffer;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ /* make sure that kargs.num_counters is [at least somewhat] sane */
+ if (kargs.num_counters > 10000) {
+ MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+ return -EINVAL;
+ }
+
+ counter_buffer = (u32*)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+ if (NULL == counter_buffer)
+ {
+ return -ENOMEM;
+ }
+
+ if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters))
+ {
+ kfree(counter_buffer);
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ kargs.counters = counter_buffer;
+
+ err = _mali_ukk_sw_counters_report(&kargs);
+
+ kfree(counter_buffer);
+
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_vsync.c b/drivers/parrot/gpu/mali200/linux/mali_ukk_vsync.c
new file mode 100644
index 00000000000000..f9b5a3e3851876
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/parrot/gpu/mali200/linux/mali_ukk_wrappers.h b/drivers/parrot/gpu/mali200/linux/mali_ukk_wrappers.h
new file mode 100644
index 00000000000000..f70daa5bd2718f
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/linux/mali_ukk_wrappers.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs);
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs);
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/mali200/platform/default/mali_platform.c b/drivers/parrot/gpu/mali200/platform/default/mali_platform.c
new file mode 100644
index 00000000000000..9e64ce75344b53
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/platform/default/mali_platform.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+
+_mali_osk_errcode_t mali_platform_init(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+void set_mali_parent_power_domain(void* dev)
+{
+}
+
+
diff --git a/drivers/parrot/gpu/mali200/platform/mali_platform.h b/drivers/parrot/gpu/mali200/platform/mali_platform.h
new file mode 100644
index 00000000000000..8c51a972535298
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/platform/mali_platform.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PLATFORM_H__
+#define __MALI_PLATFORM_H__
+
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+ MALI_POWER_MODE_ON, /**< Power Mali on */
+ MALI_POWER_MODE_LIGHT_SLEEP, /**< Mali has been idle for a short time, or runtime PM suspend */
+ MALI_POWER_MODE_DEEP_SLEEP, /**< Mali has been idle for a long time, or OS suspend */
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(void);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(void);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Notification from the Mali device driver stating the new desired power mode.
+ * MALI_POWER_MODE_ON must be obeyed, while the other modes are optional.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+/** @brief Setting the power domain of MALI
+ *
+ * This function sets the power domain of MALI if Linux run time power management is enabled
+ *
+ * @param dev Reference to struct platform_device (defined in linux) used by MALI GPU
+ */
+void set_mali_parent_power_domain(void* dev);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/parrot/gpu/mali200/platform/mali_platform_pmu_testing/mali_platform.c b/drivers/parrot/gpu/mali200/platform/mali_platform_pmu_testing/mali_platform.c
new file mode 100644
index 00000000000000..cb95dc69276570
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/platform/mali_platform_pmu_testing/mali_platform.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_pmu.h"
+#include "linux/mali/mali_utgard.h"
+
+static u32 bPowerOff = 1;
+
+_mali_osk_errcode_t mali_platform_init(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+ switch (power_mode)
+ {
+ case MALI_POWER_MODE_ON:
+ if (bPowerOff == 1)
+ {
+ mali_pmu_powerup();
+ bPowerOff = 0;
+ }
+ break;
+ case MALI_POWER_MODE_LIGHT_SLEEP:
+ case MALI_POWER_MODE_DEEP_SLEEP:
+
+ if (bPowerOff == 0)
+ {
+ mali_pmu_powerdown();
+ bPowerOff = 1;
+ }
+
+ break;
+ }
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+void set_mali_parent_power_domain(void* dev)
+{
+}
+
+
diff --git a/drivers/parrot/gpu/mali200/readme.txt b/drivers/parrot/gpu/mali200/readme.txt
new file mode 100644
index 00000000000000..3acc51c824a889
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> \
+TARGET_PLATFORM=<target_platform> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+ target_platform: Name of the sub-folder in platform/ that contains the
+ required mali_platform.c file.
+ your_config: Name of the sub-folder to find the required config.h(**) file
+ ("arch-" will be prepended)
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+(**) The config.h file contains the configuration parameters needed, like where the
+ Mali GPU is located, interrupts it uses, memory and so on.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/parrot/gpu/mali200/regs/mali_200_regs.h b/drivers/parrot/gpu/mali200/regs/mali_200_regs.h
new file mode 100644
index 00000000000000..59e92c858cd255
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/regs/mali_200_regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+#if defined(USING_MALI200)
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+#endif
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+#if defined(USING_MALI400) || defined(USING_MALI450)
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7),
+#endif
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#if defined(USING_MALI200)
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined(USING_MALI400) || defined(USING_MALI450)
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+#else
+#error "No supported mali core defined"
+#endif
+
+#if defined(USING_MALI200)
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined(USING_MALI400) || defined(USING_MALI450)
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+#else
+#error "No supported mali core defined"
+#endif
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+ MALI200_REG_ADDR_STACK = 0x0030
+};
+
+#if defined(USING_MALI200)
+#define MALI200_NUM_REGS_FRAME ((0x04C/4)+1)
+#elif defined(USING_MALI400)
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#elif defined(USING_MALI450)
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#else
+#error "No supported mali core defined"
+#endif
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+ MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+};
+
+/** The number of registers in one single writeback unit */
+#ifndef MALI200_NUM_REGS_WBx
+#define MALI200_NUM_REGS_WBx ((0x02C/4)+1)
+#endif
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/parrot/gpu/mali200/regs/mali_gp_regs.h b/drivers/parrot/gpu/mali200/regs/mali_gp_regs.h
new file mode 100644
index 00000000000000..21c83c0246a525
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/regs/mali_gp_regs.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+#if defined(USING_MALI400) || defined(USING_MALI450)
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10),
+#endif
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#if defined(USING_MALI400) || defined(USING_MALI450)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+#elif !defined USING_MALI200
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining all IRQs in MaliGP2 */
+#if defined(USING_MALI200)
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined(USING_MALI400) || defined(USING_MALI450)
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining the IRQs in MaliGP2 which we use*/
+#if defined(USING_MALI200)
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined(USING_MALI400) || defined(USING_MALI450)
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.c b/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 00000000000000..a6b1d76d567fc7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.h b/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 00000000000000..3279daede45999
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.c b/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.c
new file mode 100644
index 00000000000000..a6b1d76d567fc7
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.h b/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.h
new file mode 100644
index 00000000000000..94b842a10088bc
--- /dev/null
+++ b/drivers/parrot/gpu/mali200/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/mali400/Kbuild b/drivers/parrot/gpu/mali400/Kbuild
new file mode 100755
index 00000000000000..a4481dd8f03b9d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/Kbuild
@@ -0,0 +1,209 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 1
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+MALI_ENABLE_CPU_CYCLES ?= 0
+
+# Set path to driver source
+DRIVER_DIR=$(KBUILD_SRC)/$(src)
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary
+ ifeq ($(CONFIG_MALI400_PROFILING),y)
+ $(error Profiling is incompatible with non-GPL license)
+ endif
+ ifeq ($(CONFIG_PM_RUNTIME),y)
+ $(error Runtime PM is incompatible with non-GPL license)
+ endif
+ ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
+ $(error DMA-BUF is incompatible with non-GPL license)
+ endif
+ $(error Linux Device integration is incompatible with non-GPL license)
+else
+ ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+
+mali-y += \
+ linux/mali_osk_atomics.o \
+ linux/mali_osk_irq.o \
+ linux/mali_osk_wq.o \
+ linux/mali_osk_locks.o \
+ linux/mali_osk_wait_queue.o \
+ linux/mali_osk_low_level_mem.o \
+ linux/mali_osk_math.o \
+ linux/mali_osk_memory.o \
+ linux/mali_osk_misc.o \
+ linux/mali_osk_mali.o \
+ linux/mali_osk_notification.o \
+ linux/mali_osk_time.o \
+ linux/mali_osk_timers.o
+
+mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
+mali-y += linux/mali_memory_external.o
+mali-y += linux/mali_memory_block_alloc.o
+
+mali-y += \
+ linux/mali_ukk_mem.o \
+ linux/mali_ukk_gp.o \
+ linux/mali_ukk_pp.o \
+ linux/mali_ukk_core.o \
+ linux/mali_ukk_soft_job.o \
+ linux/mali_ukk_timeline.o
+
+# Source files which always are included in a build
+mali-y += \
+ common/mali_kernel_core.o \
+ linux/mali_kernel_linux.o \
+ common/mali_kernel_descriptor_mapping.o \
+ common/mali_session.o \
+ linux/mali_device_pause_resume.o \
+ common/mali_kernel_vsync.o \
+ linux/mali_ukk_vsync.o \
+ linux/mali_kernel_sysfs.o \
+ common/mali_mmu.o \
+ common/mali_mmu_page_directory.o \
+ common/mali_mem_validation.o \
+ common/mali_hw_core.o \
+ common/mali_gp.o \
+ common/mali_pp.o \
+ common/mali_pp_job.o \
+ common/mali_gp_job.o \
+ common/mali_soft_job.o \
+ common/mali_scheduler.o \
+ common/mali_gp_scheduler.o \
+ common/mali_pp_scheduler.o \
+ common/mali_group.o \
+ common/mali_dlbu.o \
+ common/mali_broadcast.o \
+ common/mali_pm.o \
+ common/mali_pmu.o \
+ common/mali_user_settings_db.o \
+ common/mali_kernel_utilization.o \
+ common/mali_l2_cache.o \
+ common/mali_dma.o \
+ common/mali_timeline.o \
+ common/mali_timeline_fence_wait.o \
+ common/mali_timeline_sync_fence.o \
+ common/mali_spinlock_reentrant.o \
+ common/mali_pm_domain.o \
+ linux/mali_osk_pm.o \
+ linux/mali_pmu_power_up_down.o \
+ __malidrv_build_info.o
+
+ifneq ($(MALI_PLATFORM_FILES),)
+ mali-y += $(MALI_PLATFORM_FILES:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o
+
+mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
+
+mali-$(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) += common/mali_power_performance_policy.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+ ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+ccflags-$(CONFIG_MALI400_UMP) += -I$(DRIVER_DIR)/../ump/include/ump
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(DRIVER_DIR); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+#GIT_REV := $(shell cd $(DRIVER_DIR); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(DRIVER_DIR); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(DRIVER_DIR); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+#VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += USING_POWER_PERFORMANCE_POLICY=$(CONFIG_POWER_PERFORMANCE_POLICY)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(DRIVER_DIR)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(DRIVER_DIR)/__malidrv_build_info.c
diff --git a/drivers/parrot/gpu/mali400/Kconfig b/drivers/parrot/gpu/mali400/Kconfig
new file mode 100755
index 00000000000000..7d9f339b2aff3e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/Kconfig
@@ -0,0 +1,94 @@
+config MALI400
+ tristate "Mali-300/400/450 support"
+ depends on ARM
+ depends on GPU_PARROT7
+ select DMA_SHARED_BUFFER
+ select MALI400_SUPPORT
+ ---help---
+ This enables support for the ARM Mali-300, Mali-400, and Mali-450
+ GPUs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mali.
+
+config MALI400_LEGACY
+ bool "Legacy driver for mali400"
+ depends on MALI400
+ ---help---
+ This enables the use of an older version of the mali driver
+ to be compatible with android 2.3
+
+config MALI400_SUPPORT
+ bool "Support for mali 400"
+ depends on MALI400
+
+config MALI450
+ bool "Enable Mali-450 support"
+ depends on MALI400
+ ---help---
+ This enables support for Mali-450 specific features.
+
+config MALI400_DEBUG
+ bool "Enable debug in Mali driver"
+ depends on MALI400
+ ---help---
+ This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+ bool "Enable Mali profiling"
+ depends on MALI400
+ select TRACEPOINTS
+ default y
+ ---help---
+ This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+ bool "Enable internal Mali profiling API"
+ depends on MALI400_PROFILING
+ default n
+ ---help---
+ This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+ bool "Enable UMP support"
+ depends on MALI400
+ ---help---
+ This enables support for the UMP memory sharing API in the Mali driver.
+
+config MALI400_POWER_PERFORMANCE_POLICY
+ bool "Enable Mali power performance policy"
+ depends on ARM
+ default n
+ ---help---
+ This enables support for dynamic performance scaling of Mali with the goal of lowering power consumption.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+ bool "Map dma-buf attachments on attach"
+ depends on MALI400 && DMA_SHARED_BUFFER
+ default y
+ ---help---
+ This makes the Mali driver map dma-buf attachments after doing
+ attach. If this is not set the dma-buf attachments will be mapped for
+ every time the GPU need to access the buffer.
+
+ Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+ bool "Support for shared interrupts"
+ depends on MALI400
+ default n
+ ---help---
+ Adds functionality required to properly support shared interrupts. Without this support,
+ the device driver will fail during insmod if it detects shared interrupts. This also
+ works when the GPU is not using shared interrupts, but might have a slight performance
+ impact.
+
+config MALI_PMU_PARALLEL_POWER_UP
+ bool "Power up Mali PMU domains in parallel"
+ depends on MALI400
+ default n
+ ---help---
+ This makes the Mali driver power up all PMU power domains in parallel, instead of
+ powering up domains one by one, with a slight delay in between. Powering on all power
+ domains at the same time may cause peak currents higher than what some systems can handle.
+ These systems must not enable this option.
diff --git a/drivers/parrot/gpu/mali400/Makefile b/drivers/parrot/gpu/mali400/Makefile
new file mode 100755
index 00000000000000..1edee7cd26e329
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/Makefile
@@ -0,0 +1,170 @@
+#
+# Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_POWER_PERFORMANCE_POLICY ?= 0
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+export CONFIG_MALI450=y
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+export EXTRA_DEFINES += -DCONFIG_MALI450=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_POWER_PERFORMANCE_POLICY),1)
+export CONFIG_MALI400_POWER_PERFORMANCE_POLICY=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_POWER_PERFORMANCE_POLICY
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifneq ($(BUILD),release)
+# Debug
+export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/parrot/gpu/mali400/__malidrv_build_info.c b/drivers/parrot/gpu/mali400/__malidrv_build_info.c
new file mode 100644
index 00000000000000..69420368741eec
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/__malidrv_build_info.c
@@ -0,0 +1 @@
+const char *__malidrv_build_info(void) { return "malidrv: API_VERSION=401 REPO_URL= REVISION=r4p0-00rel0 CHANGED_REVISION=r4p0-00rel0 CHANGE_DATE=r4p0-00rel0 BUILD=release TARGET_PLATFORM= MALI_PLATFORM= KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP=y USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=1 USING_POWER_PERFORMANCE_POLICY= MALI_UPPER_HALF_SCHEDULING=1";}
diff --git a/drivers/parrot/gpu/mali400/common/mali_broadcast.c b/drivers/parrot/gpu/mali400/common/mali_broadcast.c
new file mode 100644
index 00000000000000..774f04efec5e7b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_broadcast.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static const int bcast_unit_reg_size = 0x1000;
+static const int bcast_unit_addr_broadcast_mask = 0x0;
+static const int bcast_unit_addr_irq_override_mask = 0x4;
+
+struct mali_bcast_unit {
+ struct mali_hw_core hw_core;
+ u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_bcast_unit *bcast_unit = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+ MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+
+ bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+ if (NULL == bcast_unit) {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+ bcast_unit->current_mask = 0;
+ mali_bcast_reset(bcast_unit);
+
+ return bcast_unit;
+ } else {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+ }
+
+ _mali_osk_free(bcast_unit);
+
+ return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ mali_hw_core_delete(&bcast_unit->hw_core);
+ _mali_osk_free(bcast_unit);
+}
+
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+ broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ bcast_unit->current_mask);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ 0x0);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ 0x0);
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_broadcast.h b/drivers/parrot/gpu/mali400/common/mali_broadcast.h
new file mode 100644
index 00000000000000..6c7472ce32a5c7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_broadcast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 — (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ * setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+ mali_bcast_reset(bcast_unit);
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_dlbu.c b/drivers/parrot/gpu/mali400/common/mali_dlbu.c
new file mode 100644
index 00000000000000..f70a5c7d7ac414
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_dlbu.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+ core->pp_cores_mask = 0;
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_dlbu_reset(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ u32 dlbu_registers[7];
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+ dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+ dlbu_registers[2] = 0;
+ dlbu_registers[3] = 0;
+ dlbu_registers[4] = 0;
+ dlbu_registers[5] = 0;
+ dlbu_registers[6] = dlbu->pp_cores_mask;
+
+ /* write reset values to core registers */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask |= bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask &= ~bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+ u32 *registers;
+ MALI_DEBUG_ASSERT(job);
+ registers = mali_pp_job_get_dlbu_registers(job);
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+ /* Writing 4 registers:
+ * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_dlbu.h b/drivers/parrot/gpu/mali400/common/mali_dlbu.h
new file mode 100644
index 00000000000000..b77657b51bf1f4
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_dlbu.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_dma.c b/drivers/parrot/gpu/mali400/common/mali_dma.c
new file mode 100644
index 00000000000000..8147883672ede3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_dma.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+/**
+ * Size of the Mali-450 DMA unit registers in bytes.
+ */
+#define MALI450_DMA_REG_SIZE 0x08
+
+/**
+ * Value that appears in MEMSIZE if an error occurs when reading the command list.
+ */
+#define MALI450_DMA_BUS_ERR_VAL 0xffffffff
+
+/**
+ * Mali DMA registers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register.
+ */
+typedef enum mali_dma_register {
+
+ MALI450_DMA_REG_SOURCE_ADDRESS = 0x0000,
+ MALI450_DMA_REG_SOURCE_SIZE = 0x0004,
+} mali_dma_register;
+
+struct mali_dma_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_spinlock_t *lock; /**< Lock protecting access to DMA core */
+ mali_dma_pool pool; /**< Memory pool for command buffers */
+};
+
+static struct mali_dma_core *mali_global_dma_core = NULL;
+
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource)
+{
+ struct mali_dma_core *dma;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_dma_core);
+
+ dma = _mali_osk_malloc(sizeof(struct mali_dma_core));
+ if (dma == NULL) goto alloc_failed;
+
+ dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND);
+ if (NULL == dma->lock) goto lock_init_failed;
+
+ dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0);
+ if (NULL == dma->pool) goto dma_pool_failed;
+
+ err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE);
+ if (_MALI_OSK_ERR_OK != err) goto hw_core_failed;
+
+ mali_global_dma_core = dma;
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n"));
+ return dma;
+
+ /* Error handling */
+
+hw_core_failed:
+ mali_dma_pool_destroy(dma->pool);
+dma_pool_failed:
+ _mali_osk_spinlock_term(dma->lock);
+lock_init_failed:
+ _mali_osk_free(dma);
+alloc_failed:
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n"));
+ return NULL;
+}
+
+void mali_dma_delete(struct mali_dma_core *dma)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma);
+
+ MALI_DEBUG_PRINT(2, ("Mali DMA: Deleted Mali APB DMA unit\n"));
+
+ mali_hw_core_delete(&dma->hw_core);
+ _mali_osk_spinlock_term(dma->lock);
+ mali_dma_pool_destroy(dma->pool);
+ _mali_osk_free(dma);
+}
+
+static void mali_dma_bus_error(struct mali_dma_core *dma)
+{
+ u32 addr = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS);
+
+ MALI_PRINT_ERROR(("Mali DMA: Bus error when reading command list from 0x%lx\n", addr));
+ MALI_IGNORE(addr);
+
+ /* Clear the bus error */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, 0);
+}
+
+static mali_bool mali_dma_is_busy(struct mali_dma_core *dma)
+{
+ u32 val;
+ mali_bool dma_busy_flag = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(dma);
+
+ val = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE);
+
+ if (MALI450_DMA_BUS_ERR_VAL == val) {
+ /* Bus error reading command list */
+ mali_dma_bus_error(dma);
+ return MALI_FALSE;
+ }
+ if (val > 0) {
+ dma_busy_flag = MALI_TRUE;
+ }
+
+ return dma_busy_flag;
+}
+
+static void mali_dma_start_transfer(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
+{
+ u32 memsize = buf->size * 4;
+ u32 addr = buf->phys_addr;
+
+ MALI_DEBUG_ASSERT_POINTER(dma);
+ MALI_DEBUG_ASSERT(memsize < (1 << 16));
+ MALI_DEBUG_ASSERT(0 == (memsize & 0x3)); /* 4 byte aligned */
+
+ MALI_DEBUG_ASSERT(!mali_dma_is_busy(dma));
+
+ /* Writes the physical source memory address of chunk containing command headers and data */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS, addr);
+
+ /* Writes the length of transfer */
+ mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, memsize);
+}
+
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf)
+{
+ MALI_DEBUG_ASSERT_POINTER(buf);
+
+ buf->virt_addr = (u32 *)mali_dma_pool_alloc(mali_global_dma_core->pool, &buf->phys_addr);
+ if (NULL == buf->virt_addr) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* size contains the number of words in the buffer and is incremented
+ * as commands are added to the buffer. */
+ buf->size = 0;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf)
+{
+ MALI_DEBUG_ASSERT_POINTER(buf);
+
+ if (NULL == buf->virt_addr) return;
+
+ mali_dma_pool_free(mali_global_dma_core->pool, buf->virt_addr, buf->phys_addr);
+
+ buf->virt_addr = NULL;
+}
+
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ _mali_osk_spinlock_lock(dma->lock);
+
+ if (mali_dma_is_busy(dma)) {
+ err = _MALI_OSK_ERR_BUSY;
+ goto out;
+ }
+
+ mali_dma_start_transfer(dma, buf);
+
+out:
+ _mali_osk_spinlock_unlock(dma->lock);
+ return err;
+}
+
+void mali_dma_debug(struct mali_dma_core *dma)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma);
+ MALI_DEBUG_PRINT(1, ("DMA unit registers:\n\t%08x, %08x\n",
+ mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS),
+ mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE)
+ ));
+
+}
+
+struct mali_dma_core *mali_dma_get_global_dma_core(void)
+{
+ /* Returns the global dma core object */
+ return mali_global_dma_core;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_dma.h b/drivers/parrot/gpu/mali400/common/mali_dma.h
new file mode 100644
index 00000000000000..61eef113a940c3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_dma.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DMA_H__
+#define __MALI_DMA_H__
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_hw_core.h"
+
+#define MALI_DMA_CMD_BUF_SIZE 1024
+
+typedef struct mali_dma_cmd_buf {
+ u32 *virt_addr; /**< CPU address of command buffer */
+ u32 phys_addr; /**< Physical address of command buffer */
+ u32 size; /**< Number of prepared words in command buffer */
+} mali_dma_cmd_buf;
+
+/** @brief Create a new DMA unit
+ *
+ * This is called from entry point of the driver in order to create and
+ * intialize the DMA resource
+ *
+ * @param resource it will be a pointer to a DMA resource
+ * @return DMA object on success, NULL on failure
+ */
+struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource);
+
+/** @brief Delete DMA unit
+ *
+ * This is called on entry point of driver if the driver initialization fails
+ * after initialization of the DMA unit. It is also called on the exit of the
+ * driver to delete the DMA resource
+ *
+ * @param dma Pointer to DMA unit object
+ */
+void mali_dma_delete(struct mali_dma_core *dma);
+
+/** @brief Retrieves the MALI DMA core object (if there is)
+ *
+ * @return The Mali DMA object otherwise NULL
+ */
+struct mali_dma_core *mali_dma_get_global_dma_core(void);
+
+/**
+ * @brief Run a command buffer on the DMA unit
+ *
+ * @param dma Pointer to the DMA unit to use
+ * @param buf Pointer to the command buffer to use
+ * @return _MALI_OSK_ERR_OK if the buffer was started successfully,
+ * _MALI_OSK_ERR_BUSY if the DMA unit is busy.
+ */
+_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Create a DMA command
+ *
+ * @param core Mali core
+ * @param reg offset to register of core
+ * @param n number of registers to write
+ */
+MALI_STATIC_INLINE u32 mali_dma_command_write(struct mali_hw_core *core, u32 reg, u32 n)
+{
+ u32 core_offset = core->phys_offset;
+
+ MALI_DEBUG_ASSERT(reg < 0x2000);
+ MALI_DEBUG_ASSERT(n < 0x800);
+ MALI_DEBUG_ASSERT(core_offset < 0x30000);
+ MALI_DEBUG_ASSERT(0 == ((core_offset + reg) & ~0x7FFFF));
+
+ return (n << 20) | (core_offset + reg);
+}
+
+/**
+ * @brief Add a array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ */
+MALI_STATIC_INLINE void mali_dma_write_array(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 *data, u32 count)
+{
+ MALI_DEBUG_ASSERT((buf->size + 1 + count) < MALI_DMA_CMD_BUF_SIZE / 4);
+
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, count);
+
+ _mali_osk_memcpy(buf->virt_addr + buf->size, data, count * sizeof(*buf->virt_addr));
+
+ buf->size += count;
+}
+
+/**
+ * @brief Add a conditional array write to DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param count Number of 4 byte words to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_array_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 *data, u32 count, const u32 *ref)
+{
+ /* Do conditional array writes are not yet implemented, fallback to a
+ * normal array write. */
+ mali_dma_write_array(buf, core, reg, data, count);
+}
+
+/**
+ * @brief Add a conditional register write to the DMA command buffer
+ *
+ * If the data matches the reference the command will be skipped.
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ * @param ref Pointer to referance data that can be skipped if equal
+ */
+MALI_STATIC_INLINE void mali_dma_write_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 data, const u32 ref)
+{
+ /* Skip write if reference value is equal to data. */
+ if (data == ref) return;
+
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+ buf->virt_addr[buf->size++] = data;
+
+ MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Add a register write to the DMA command buffer
+ *
+ * @param buf DMA command buffer to fill in
+ * @param core Core to do DMA to
+ * @param reg Register on core to start writing to
+ * @param data Pointer to data to write
+ */
+MALI_STATIC_INLINE void mali_dma_write(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
+ u32 reg, u32 data)
+{
+ buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
+
+ buf->virt_addr[buf->size++] = data;
+
+ MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
+}
+
+/**
+ * @brief Prepare DMA command buffer for use
+ *
+ * This function allocates the DMA buffer itself.
+ *
+ * @param buf The mali_dma_cmd_buf to prepare
+ * @return _MALI_OSK_ERR_OK if the \a buf is ready to use
+ */
+_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf);
+
+/**
+ * @brief Check if a DMA command buffer is ready for use
+ *
+ * @param buf The mali_dma_cmd_buf to check
+ * @return MALI_TRUE if buffer is usable, MALI_FALSE otherwise
+ */
+MALI_STATIC_INLINE mali_bool mali_dma_cmd_buf_is_valid(mali_dma_cmd_buf *buf)
+{
+ return NULL != buf->virt_addr;
+}
+
+/**
+ * @brief Return a DMA command buffer
+ *
+ * @param buf Pointer to DMA command buffer to return
+ */
+void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf);
+
+#endif /* __MALI_DMA_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp.c b/drivers/parrot/gpu/mali400/common/mali_gp.c
new file mode 100644
index 00000000000000..1e633f2444497b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_gp_core *core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_gp_reset(core);
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_gp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_gp,
+ group,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_gp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ mali_gp_reset_async(core);
+ return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ if (mali_gp_job_has_vs_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_GP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+ {
+ u32 bits = 0;
+
+ if (mali_gp_job_has_vs_job(job))
+ bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ if (mali_gp_job_has_plbu_job(job))
+ bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+ mali_hw_core_register_write_relaxed(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+ }
+#endif
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+#endif
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp.h b/drivers/parrot/gpu/mali400/common/mali_gp.h
new file mode 100644
index 00000000000000..ee5a44d71b6057
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+
+/*** Accessor functions ***/
+MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+{
+ /* Enable all interrupts, except those specified in irq_exceptions */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp_job.c b/drivers/parrot/gpu/mali400/common/mali_gp_job.c
new file mode 100644
index 00000000000000..1d3d94299d8c07
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp_job.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+ struct mali_gp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+ if (NULL != job) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL == job->finished_notification) {
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+ if (NULL == job->oom_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_notification_delete(job->oom_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+ mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->heap_current_addr = job->uargs.frame_registers[4];
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ job->pp_tracker = pp_tracker;
+ if (NULL != job->pp_tracker) {
+ /* Take a reference on PP job's tracker that will be released when the GP
+ job is done. */
+ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+
+ /* de-allocate the pre-allocated oom notifications */
+ if (NULL != job->oom_notification) {
+ _mali_osk_notification_delete(job->oom_notification);
+ job->oom_notification = NULL;
+ }
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+ _mali_osk_free(job);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+ return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+ gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+ return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+ gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (NULL != job->pp_tracker) {
+ schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+ job->pp_tracker = NULL;
+ }
+
+ return schedule_mask;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp_job.h b/drivers/parrot/gpu/mali400/common/mali_gp_job.h
new file mode 100644
index 00000000000000..f404089068df23
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp_job.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * The structure represents a GP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job {
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value1 = value;
+}
+
+/**
+ * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+{
+ /* A span is used to handle job ID wrapping. */
+ return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+}
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.c b/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.c
new file mode 100644
index 00000000000000..ff973f8736008c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+enum mali_gp_slot_state {
+ MALI_GP_SLOT_STATE_IDLE,
+ MALI_GP_SLOT_STATE_WORKING,
+ MALI_GP_SLOT_STATE_DISABLED,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot {
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_gp_slot_state state;
+ u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue); /* List of unscheduled jobs. */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue_high); /* List of unscheduled high priority jobs. */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data);
+static void mali_gp_scheduler_job_queued(void);
+static void mali_gp_scheduler_job_completed(void);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *gp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *gp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+ u32 num_groups;
+ u32 i;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ gp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+ gp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ if (NULL == gp_scheduler_lock) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto cleanup;
+ }
+
+ gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == gp_scheduler_working_wait_queue) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto cleanup;
+ }
+
+ /* Find all the available GP cores */
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+ MALI_DEBUG_ASSERT(NULL != group);
+ if (NULL != group) {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core) {
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+ slot.group = group;
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ break; /* There is only one GP, no point in looking for more */
+ }
+ } else {
+ ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ goto cleanup;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+
+cleanup:
+ if (NULL != gp_scheduler_working_wait_queue) {
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+ gp_scheduler_working_wait_queue = NULL;
+ }
+
+ if (NULL != gp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ gp_scheduler_lock = NULL;
+ }
+
+ return ret;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state
+ || MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_ASSERT_POINTER(slot.group);
+ mali_group_delete(slot.group);
+
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_lock(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_lock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_unlock(gp_scheduler_lock);
+#else
+ _mali_osk_spinlock_unlock(gp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock)
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+/* Group and scheduler must be locked when entering this function. Both will be unlocked before
+ * exiting. */
+static void mali_gp_scheduler_schedule_internal_and_unlock(void)
+{
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(slot.group->lock);
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state ||
+ (_mali_osk_list_empty(&job_queue) && _mali_osk_list_empty(&job_queue_high))) {
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(slot.group);
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0);
+#endif
+ return; /* Nothing to do, so early out */
+ }
+
+ /* Get next job in queue */
+ if (!_mali_osk_list_empty(&job_queue_high)) {
+ job = _MALI_OSK_LIST_ENTRY(job_queue_high.next, struct mali_gp_job, list);
+ } else {
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job_queue));
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Remove the job from queue */
+ _mali_osk_list_del(&job->list);
+
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ mali_gp_scheduler_unlock();
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+ mali_group_start_gp_job(slot.group, job);
+ mali_group_unlock(slot.group);
+}
+
+void mali_gp_scheduler_schedule(void)
+{
+ mali_group_lock(slot.group);
+ mali_gp_scheduler_lock();
+
+ mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+
+ mali_gp_job_delete(job);
+ mali_gp_scheduler_job_completed();
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(slot.group == group);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+ /* Release tracker. */
+ schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job. */
+ schedule_mask |= mali_gp_job_signal_pp_tracker(job, success);
+
+ mali_gp_scheduler_lock();
+
+ /* Mark slot as idle again */
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0) {
+ _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+ }
+
+ /* Schedule any queued GP jobs on this group. */
+ mali_gp_scheduler_schedule_internal_and_unlock();
+
+ /* GP is now scheduled, removing it from the mask. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_GP;
+
+ if (MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+ /* Releasing the tracker activated other jobs that need scheduling. */
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ }
+
+ /* Sends the job end message to user space and free the job object */
+ mali_gp_scheduler_return_job_to_user(job, success);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ mali_gp_scheduler_lock();
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+ slot.returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = mali_gp_job_get_id(job);
+
+ mali_gp_scheduler_unlock();
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notification);
+
+ /*
+ * If this function failed, then we could return the job to user space right away,
+ * but there is a job timer anyway that will do that eventually.
+ * This is not exactly a common case anyway.
+ */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_gp_scheduler_unlock();
+
+ _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended, NULL);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_gp_scheduler_unlock();
+ if (0 == pause_count) {
+ mali_gp_scheduler_schedule();
+ }
+}
+
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_GP);
+
+ return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *timeline_point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+
+ point = mali_gp_scheduler_submit_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the job was started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *resumed_job;
+ _mali_osk_notification_t *new_notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data *)args->ctx;
+ if (NULL == session) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL == new_notification) {
+ MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
+ mali_group_lock(slot.group);
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ mali_group_lock(slot.group);
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+
+ resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+ if (NULL != resumed_job) {
+ resumed_job->oom_notification = new_notification;
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+ } else {
+ mali_group_unlock(slot.group);
+ _mali_osk_notification_delete(new_notification);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+ mali_gp_scheduler_lock();
+
+ /* Find all jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list) {
+ if (job->session == session) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+ _mali_osk_list_move(&job->list, &removed_jobs);
+ }
+ }
+
+ /* Find all high priority jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue_high, struct mali_gp_job, list) {
+ if (job->session == session) {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
+ _mali_osk_list_move(&job->list, &removed_jobs);
+ }
+ }
+
+ mali_gp_scheduler_unlock();
+
+ /* Release and delete all found jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &removed_jobs, struct mali_gp_job, list) {
+ mali_timeline_tracker_release(&job->tracker);
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+ mali_gp_job_delete(job);
+ mali_gp_scheduler_job_completed();
+ }
+
+ /* Abort any running jobs from the session. */
+ mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_gp_scheduler_lock();
+ ret = pause_count > 0 && (slot.state == MALI_GP_SLOT_STATE_IDLE || slot.state == MALI_GP_SLOT_STATE_DISABLED);
+ mali_gp_scheduler_unlock();
+
+ return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue_high) ? "empty" : "not empty");
+
+ n += mali_group_dump_state(slot.group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+void mali_gp_scheduler_reset_all_groups(void)
+{
+ if (NULL != slot.group) {
+ mali_group_lock(slot.group);
+ mali_group_reset(slot.group);
+ mali_group_unlock(slot.group);
+ }
+}
+
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ if (NULL != slot.group) {
+ mali_group_zap_session(slot.group, session);
+ }
+}
+
+void mali_gp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group));
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state) {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group));
+ return;
+ }
+
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+
+ /* Pick up any jobs that might have been queued while the GP group was disabled. */
+ mali_gp_scheduler_schedule_internal_and_unlock();
+}
+
+void mali_gp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group));
+
+ mali_gp_scheduler_suspend();
+ mali_group_lock(group);
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group));
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_DISABLED;
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group, MALI_TRUE);
+ }
+
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(group);
+ mali_gp_scheduler_resume();
+}
+
+static mali_scheduler_mask mali_gp_scheduler_queue_job(struct mali_gp_job *job)
+{
+ _mali_osk_list_t *queue = NULL;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_gp_job *iter, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+ job->cache_order = mali_scheduler_get_new_cache_order();
+
+ /* Determine which queue the job should be added to. */
+ if (job->session->use_high_priority_job_queue) {
+ queue = &job_queue_high;
+ } else {
+ queue = &job_queue;
+ }
+
+ /* Find position in queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_gp_job, list) {
+ if (mali_gp_job_is_after(job, iter)) {
+ break;
+ }
+ }
+
+ /* Add job to queue. */
+ _mali_osk_list_add(&job->list, &iter->list);
+
+ /* Set schedule bitmask if the GP core is idle. */
+ if (MALI_GP_SLOT_STATE_IDLE == slot.state) {
+ schedule_mask |= MALI_SCHEDULER_MASK_GP;
+ }
+
+ mali_gp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", mali_gp_job_get_id(job), job));
+
+ mali_gp_scheduler_lock();
+
+ if (unlikely(job->session->is_aborting)) {
+ /* Before checking if the session is aborting, the scheduler must be locked. */
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) activated while session is aborting.\n", mali_gp_job_get_id(job), job));
+
+ /* This job should not be on any list. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+
+ mali_gp_scheduler_unlock();
+
+ /* Release tracker and delete job. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+ mali_gp_job_delete(job);
+
+ /* Release the PM ref taken in mali_gp_scheduler_submit_job */
+ _mali_osk_pm_dev_ref_dec();
+
+ /* Since we are aborting we ignore the scheduler mask. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ /* GP job is ready to run, queue it. */
+ schedule_mask = mali_gp_scheduler_queue_job(job);
+
+ mali_gp_scheduler_unlock();
+
+ return schedule_mask;
+}
+
+static void mali_gp_scheduler_job_queued(void)
+{
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a GP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+}
+
+static void mali_gp_scheduler_job_completed(void)
+{
+ /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.h b/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.h
new file mode 100644
index 00000000000000..84a096c1fac927
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_gp_scheduler.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued GP jobs from session.
+ *
+* This functions aborts all GP jobs from the specified session. Queued jobs are removed from the
+* queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the GP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ */
+void mali_gp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Re-enable a group that has been disabled with mali_gp_scheduler_disable_group
+ *
+ * If a Mali PMU is present, the group will be powered back on and added back
+ * into the GP scheduler.
+ *
+ * @param group Pointer to the group to enable
+ */
+void mali_gp_scheduler_enable_group(struct mali_group *group);
+
+/**
+ * @brief Disable a group
+ *
+ * The group will be taken out of the GP scheduler and powered off, if a Mali
+ * PMU is present.
+ *
+ * @param group Pointer to the group to disable
+ */
+void mali_gp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_gp_scheduler_schedule(void);
+
+/**
+ * @brief Submit a GP job to the GP scheduler.
+ *
+ * This will add the GP job to the Timeline system.
+ *
+ * @param session Session this job belongs to.
+ * @param job GP job that will be submitted
+ * @return Point on GP timeline for job.
+ */
+mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_group.c b/drivers/parrot/gpu/mali400/common/mali_group.c
new file mode 100644
index 00000000000000..922f012508ab98
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_group.c
@@ -0,0 +1,2048 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* timer related */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
+
+static void mali_group_post_process_job_pp(struct mali_group *group);
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
+
+void mali_group_lock(struct mali_group *group)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(group->lock);
+#else
+ _mali_osk_spinlock_lock(group->lock);
+#endif
+ MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(group->lock);
+#else
+ _mali_osk_spinlock_unlock(group->lock);
+#endif
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_calloc(1, sizeof(struct mali_group));
+ if (NULL != group) {
+ group->timeout_timer = _mali_osk_timer_init();
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_lock_order_t order;
+ _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+ if (NULL != dlbu) {
+ order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
+ } else {
+ order = _MALI_OSK_LOCK_ORDER_GROUP;
+ }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#else
+ group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
+#endif
+
+ if (NULL != group->lock) {
+ group->l2_cache_core[0] = core;
+ group->session = NULL;
+ group->power_is_on = MALI_TRUE;
+ group->state = MALI_GROUP_STATE_IDLE;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->pp_scheduler_list);
+ group->parent_group = NULL;
+ group->l2_cache_core_ref_count[0] = 0;
+ group->l2_cache_core_ref_count[1] = 0;
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
+
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
+{
+ /* This group object now owns the MMU core object */
+ group->mmu = mmu_core;
+ group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+ if (NULL == group->bottom_half_work_mmu) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+ /* This group object no longer owns the MMU core object */
+ group->mmu = NULL;
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+ group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+ if (NULL == group->bottom_half_work_gp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the GP core object */
+ group->gp_core = NULL;
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+ group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+ if (NULL == group->bottom_half_work_pp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the PP core object */
+ group->pp_core = NULL;
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+{
+ group->pm_domain = domain;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(group->lock);
+#else
+ _mali_osk_spinlock_term(group->lock);
+#endif
+ _mali_osk_free(group);
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+ u32 i;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+ i = 0;
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+ MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+ i++;
+ }
+})
+
+/**
+ * @brief Add child group to virtual group parent
+ *
+ * Before calling this function, child must have it's state set to JOINING_VIRTUAL
+ * to ensure it's not touched during the transition period. When this function returns,
+ * child's state will be IN_VIRTUAL.
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+{
+ mali_bool found;
+ u32 i;
+ struct mali_session_data *child_session;
+
+ MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(NULL == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+
+ _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+ child->state = MALI_GROUP_STATE_IN_VIRTUAL;
+ child->parent_group = parent;
+
+ MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+ MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+ MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+ /* Keep track of the L2 cache cores of child groups */
+ found = MALI_FALSE;
+ for (i = 0; i < 2; i++) {
+ if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+ MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+ parent->l2_cache_core_ref_count[i]++;
+ found = MALI_TRUE;
+ }
+ }
+
+ if (!found) {
+ /* First time we see this L2 cache, add it to our list */
+ i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+ MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core[i] = child->l2_cache_core[0];
+ parent->l2_cache_core_ref_count[i]++;
+ }
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_add_group(parent->bcast_core, child);
+ mali_dlbu_add_group(parent->dlbu_core, child);
+
+ child_session = child->session;
+ child->session = NULL;
+
+ /* Above this comment, only software state is updated and the HW is not
+ * touched. Now, check if Mali is powered and skip the rest if it isn't
+ * powered.
+ */
+
+ if (!update_hw) {
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+ return;
+ }
+
+ /* Update MMU */
+ if (parent->session == child_session) {
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ if (NULL == parent->session) {
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ } else {
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+ }
+
+ mali_dlbu_update_mask(parent->dlbu_core);
+
+ /* Start job on child when parent is active */
+ if (NULL != parent->pp_running_job) {
+ struct mali_pp_job *job = parent->pp_running_job;
+ u32 subjob = -1;
+
+ if (mali_pp_job_is_with_dlbu(parent->pp_running_job)) {
+ subjob = mali_pp_core_get_id(child->pp_core);
+ }
+
+ /* Take the next unstarted sub job directly without scheduler lock should be
+ * safe here. Because: 1) Virtual group is the only consumer of this job.
+ * 2) Taking next unstarted sub job doesn't do any change to the job queue itself
+ */
+ if (mali_pp_job_has_unstarted_sub_jobs(job)) {
+ subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, subjob);
+ }
+
+ if (-1 != subjob) {
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
+ /* Reset broadcast unit only when it will help run subjob */
+ mali_bcast_reset(parent->bcast_core);
+
+ mali_group_start_job_on_group(child, job, subjob);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ *
+ * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
+ * to IDLE before it can be used.
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+ u32 i;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(parent == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
+ /* Removing groups while running is not yet supported. */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+
+ mali_group_lock(child);
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_remove_group(parent->bcast_core, child);
+ mali_dlbu_remove_group(parent->dlbu_core, child);
+
+ /* Update HW only if power is on */
+ if (mali_pm_is_power_on()) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ _mali_osk_list_delinit(&child->group_list);
+
+ child->session = parent->session;
+ child->parent_group = NULL;
+ child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+ /* Keep track of the L2 cache cores of child groups */
+ i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core_ref_count[i]--;
+
+ if (parent->l2_cache_core_ref_count[i] == 0) {
+ parent->l2_cache_core[i] = NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+
+ mali_group_unlock(child);
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+ struct mali_group *child;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+
+ mali_group_remove_group(parent, child);
+
+ return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+ /*
+ * This function should not be used to abort jobs,
+ * currently only called during insmod and PM resume
+ */
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+ group->session = NULL;
+
+ if (NULL != group->dlbu_core) {
+ mali_dlbu_reset(group->dlbu_core);
+ }
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_reset(group->bcast_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_group_reset_mmu(group);
+ }
+
+ if (NULL != group->gp_core) {
+ mali_gp_reset(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_group_reset_pp(group);
+ }
+}
+
+/* This function is called before running a job on virtual group
+ * Remove some child group from the bcast mask necessarily
+ * Set child groups particular registers respectively etc
+ */
+static void mali_group_job_prepare_virtual(struct mali_group *group, struct mali_pp_job *job,
+ u32 first_subjob, u32 last_subjob)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 subjob = first_subjob;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
+
+ /* Set each core specific registers:
+ * 1. Renderer List Address
+ * 2. Fragment Shader Stack Address
+ * Other general registers are set through Broadcast Unit in one go.
+ * Note: for Addtional temporary unused group core in virtual group
+ * we need to remove it from Broadcast Unit before start the job in
+ * this virtual group, otherwise, we may never get Frame_end interrupt.
+ */
+ if (!mali_pp_job_is_with_dlbu(job)) {
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (subjob <= last_subjob) {
+ /* Write specific Renderer List Address for each group */
+ mali_pp_write_addr_renderer_list(child->pp_core, job, subjob);
+ /* Write specific stack address for each child group */
+ mali_pp_write_addr_stack(child->pp_core, job, subjob);
+ subjob++;
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual group job %u (0x%08X) part %u/%u started.\n",
+ mali_pp_job_get_id(job), job, subjob,
+ mali_pp_job_get_sub_job_count(job)));
+ } else {
+ /* Some physical group are just redundant for this run
+ * remove it from broadcast
+ */
+ mali_bcast_remove_group(group->bcast_core, child);
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %0x remove from bcast_core\n", (int)child));
+ }
+ }
+
+ /* Reset broadcast */
+ mali_bcast_reset(group->bcast_core);
+ } else {
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job, child->pp_core->core_id);
+ mali_bcast_add_group(group->bcast_core, child);
+ }
+
+ /* Reset broadcast */
+ mali_bcast_reset(group->bcast_core);
+
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write Renderer List Address for each child group */
+ mali_pp_write_addr_renderer_list(group->pp_core, job, 0);
+
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
+ mali_pp_job_get_id(job), job, 1,
+ mali_pp_job_get_sub_job_count(job)));
+ }
+}
+
+/* Call this function to make sure group->group_list are consistent with the group->broad_core mask */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group)
+{
+ struct mali_group *child, *temp;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list, struct mali_group, group_list) {
+ mali_bcast_add_group(group->bcast_core, child);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali group: New physical groups added in virtual group at non dlbu job done"));
+ /**
+ * When new physical groups added in the virtual groups, they may have different
+ * page directory with the virtual group. Here just activate the empty page directory
+ * for the virtual group to avoid potential inconsistent page directory.
+ */
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+}
+
+struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ return group->gp_core;
+}
+
+struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ return group->pp_core;
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_gp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+ }
+
+ mali_group_activate_page_directory(group, session);
+
+ mali_gp_job_start(group->gp_core, job);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
+ mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+#endif
+
+ group->gp_running_job = job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ /* Setup the timeout timer value and save the job id for the job running on the gp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
+static void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_pp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+ }
+
+ if (NULL != group->l2_cache_core[1]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+ }
+
+ mali_group_activate_page_directory(group, session);
+
+ if (mali_group_is_virtual(group)) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ /* Try to use DMA unit to start job, fallback to writing directly to the core */
+ MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
+ if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ }
+ } else {
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ }
+
+ /* if the group is virtual, loop through physical groups which belong to this group
+ * and call profiling events for its cores as virtual */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+#if defined(CONFIG_MALI400_PROFILING)
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ } else { /* group is physical - call profiling events for physical cores */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+#endif
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job,
+ u32 first_subjob, u32 last_subjob)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
+
+ /* Prepare the group for running this job */
+ mali_group_job_prepare_virtual(group, job, first_subjob, last_subjob);
+
+ /* Start job. General setting for all the PP cores */
+ mali_group_start_pp_job(group, job, first_subjob);
+}
+
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state || MALI_GROUP_STATE_IN_VIRTUAL == group->state);
+
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+ */
+ mali_pp_write_addr_renderer_list(group->pp_core, job, subjob);
+
+ /* Write specific stack address for each child group */
+ mali_pp_write_addr_stack(group->pp_core, job, subjob);
+
+ /* For start a job in a group which is just joining the virtual group
+ * just start the job directly, all the accouting information and state
+ * updates have been covered by virtual group state
+ */
+ if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
+ mali_pp_job_start(group->pp_core, job, subjob);
+ return;
+ }
+
+ /* Start job. General setting for all the PP cores */
+ mali_group_start_pp_job(group, job, subjob);
+}
+
+
+
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (group->state != MALI_GROUP_STATE_OOM ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id) {
+ return NULL; /* Illegal request or job has already been aborted */
+ }
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
+ }
+
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ return group->gp_running_job;
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ _mali_osk_errcode_t err;
+
+ if (!mali_group_is_virtual(group)) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ } else { /* virtual group */
+ err = mali_mmu_reset(group->mmu);
+ if (_MALI_OSK_ERR_OK == err) {
+ return;
+ }
+
+ /* Loop through all members of this virtual group and wait
+ * until they are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ err = mali_mmu_reset(child->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ mali_pp_reset_async(group->pp_core);
+
+ if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ mali_pp_reset_wait(group->pp_core);
+ } else { /* virtual group */
+ /* Loop through all members of this virtual group and wait until they
+ * are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_reset_wait(child->pp_core);
+ }
+ }
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+{
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_pp(group);
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_pp_reset_async(group->pp_core);
+ }
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ /* Return job to user, schedule and unlock group. */
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+}
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+{
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_gp_reset_async(group->gp_core);
+ }
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ /* Return job to user, schedule and unlock group. */
+ mali_gp_scheduler_job_done(group, gp_job_to_return, success);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id) {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ /* Function will unlock the group, so we need to lock it again */
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ mali_group_lock(group);
+}
+
+static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_pp_job_get_id(group->pp_running_job) != job_id) {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+ mali_group_lock(group);
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_pp_job *pp_job;
+ u32 gp_job_id = 0;
+ u32 pp_job_id = 0;
+ mali_bool abort_pp = MALI_FALSE;
+ mali_bool abort_gp = MALI_FALSE;
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group)) {
+ /* Group is member of a virtual group, don't touch it! */
+ mali_group_unlock(group);
+ return;
+ }
+
+ gp_job = group->gp_running_job;
+ pp_job = group->pp_running_job;
+
+ if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
+ MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+ gp_job_id = mali_gp_job_get_id(gp_job);
+ abort_gp = MALI_TRUE;
+ }
+
+ if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
+ MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+ pp_job_id = mali_pp_job_get_id(pp_job);
+ abort_pp = MALI_TRUE;
+ }
+
+ if (abort_gp) {
+ mali_group_abort_gp_job(group, gp_job_id);
+ }
+ if (abort_pp) {
+ mali_group_abort_pp_job(group, pp_job_id);
+ }
+
+ mali_group_remove_session_if_unused(group, session);
+
+ mali_group_unlock(group);
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if (mali_global_num_groups > index) {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+
+ if (group->session != session) {
+ /* Different session than last time, so we need to do some work */
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+ mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ group->session = session;
+ } else {
+ /* Same session as last time, so no work required */
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+}
+
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state) {
+ if (group->session == session) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+ }
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ return group->power_is_on;
+}
+
+void mali_group_power_on_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
+
+ group->power_is_on = MALI_TRUE;
+}
+
+void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
+
+ /* It is necessary to set group->session = NULL so that the powered off MMU is not written
+ * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
+ * pending bottom_halves does not access powered off cores. */
+
+ group->session = NULL;
+
+ if (do_power_change) {
+ group->power_is_on = MALI_FALSE;
+ }
+}
+
+void mali_group_power_on(void)
+{
+ int i;
+ for (i = 0; i < mali_global_num_groups; i++) {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ } else {
+ mali_group_power_on_group(group);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
+}
+
+void mali_group_power_off(mali_bool do_power_change)
+{
+ int i;
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ } else {
+ mali_group_power_off_group(group, do_power_change);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core) {
+ mali_pp_stop_bus(group->pp_core);
+ } else {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU and clear page fault (if any) */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus, then do a hard reset on them */
+ if (NULL != group->pp_core) {
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child, *temp;
+
+ /* Disable the broadcast unit while we do reset directly on the member cores. */
+ mali_bcast_disable(group->bcast_core);
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_stop_bus_wait(child->pp_core);
+ mali_pp_hard_reset(child->pp_core);
+ }
+
+ mali_bcast_enable(group->bcast_core);
+ } else {
+ mali_pp_stop_bus_wait(group->pp_core);
+ mali_pp_hard_reset(group->pp_core);
+ }
+ } else {
+ mali_gp_stop_bus_wait(group->gp_core);
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ MALI_IGNORE(err);
+
+ group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+ }
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+ group->pp_running_job, group->pp_running_sub_job);
+ }
+
+ return n;
+}
+#endif
+
+/* Group must be locked when entering this function. Will be unlocked before exiting. */
+static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (NULL != group->pp_core) {
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+
+ mali_group_post_process_job_pp(group);
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ /* Will unlock group. */
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
+ } else {
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ /* Will unlock group. */
+ mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+ }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 int_stat;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_get_int_status(mmu);
+ if (0 != int_stat) {
+ struct mali_group *parent = group->parent_group;
+
+ /* page fault or bus error, we thread them both in the same way */
+ mali_mmu_mask_all_interrupts(mmu);
+ if (NULL == parent) {
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+ } else {
+ _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
+ }
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 rawstat;
+ MALI_DEBUG_CODE(u32 status);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ mali_group_lock(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
+ mali_group_unlock(group);
+ return;
+ }
+
+ rawstat = mali_mmu_get_rawstat(mmu);
+ MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
+
+ if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ /* An actual page fault has occurred. */
+#ifdef DEBUG
+ u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void *)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->hw_core.description));
+#endif
+
+ mali_group_mmu_page_fault_and_unlock(group);
+ return;
+ }
+
+ mali_group_unlock(group);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_gp_core *core = group->gp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_gp_get_int_stat(core);
+
+ if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_gp_mask_all_interrupts(core);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+
+ mali_group_lock(group);
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_gp_read_rawstat(group->gp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+
+ if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
+ u32 core_status = mali_gp_read_core_status(group->gp_core);
+ if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
+ MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+
+ mali_group_complete_gp_and_unlock(group, MALI_TRUE);
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_HANG | MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+ if (0 != irq_errors) {
+ MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ return;
+ } else if (group->core_timed_out) { /* SW timeout */
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
+ MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
+
+ mali_group_complete_gp_and_unlock(group, MALI_FALSE);
+ return;
+ }
+ } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ /* GP wants more memory in order to continue. */
+ MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
+
+ group->state = MALI_GROUP_STATE_OOM;
+ mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+ mali_gp_scheduler_oom(group, group->gp_running_job);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * The only way to get here is if we only got one of two needed END_CMD_LST
+ * interrupts. Enable all but not the complete interrupt that has been
+ * received and continue to run.
+ */
+ mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+{
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL == group->gp_running_job) {
+ /* Nothing to do */
+ return;
+ }
+
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if (suspend) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
+
+ /*
+ * For Mali-450 there is one particular case we need to watch out for:
+ *
+ * Criteria 1) this function call can be due to a shared interrupt,
+ * and not necessary because this core signaled an interrupt.
+ * Criteria 2) this core is a part of a virtual group, and thus it should
+ * not do any post processing.
+ * Criteria 3) this core has actually indicated that is has completed by
+ * having set raw_stat/int_stat registers to != 0
+ *
+ * If all this criteria is meet, then we could incorrectly start post
+ * processing on the wrong group object (this should only happen on the
+ * parent group)
+ */
+#if !defined(MALI_UPPER_HALF_SCHEDULING)
+ if (mali_group_is_in_virtual(group)) {
+ /*
+ * This check is done without the group lock held, which could lead to
+ * a potential race. This is however ok, since we will safely re-check
+ * this with the group lock held at a later stage. This is just an
+ * early out which will strongly benefit shared IRQ systems.
+ */
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_pp_get_int_stat(core);
+ if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_pp_mask_all_interrupts(core);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Currently no support for this interrupt event for the virtual PP core */
+ if (!mali_group_is_virtual(group)) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
+ irq_readout, 0, 0, 0, 0);
+ }
+#endif
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
+
+ mali_group_lock(group);
+
+ /* Check if job is complete without errors, again, after taking the group lock */
+ irq_readout = mali_pp_read_rawstat(core);
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+ if (mali_group_is_virtual(group)) {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+ }
+
+ if (mali_group_is_in_virtual(group)) {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_FAULT;
+ goto out;
+ }
+
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
+
+ /* No need to enable interrupts again, since the core will be reset while completing the job */
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group)) {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ if (MALI_FALSE == mali_group_power_is_on(group)) {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_pp_read_rawstat(group->pp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
+ if (mali_group_is_virtual(group)) {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ }
+
+ if (!group->core_timed_out) {
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_OF_FRAME and HANG interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME | MALI200_REG_VAL_IRQ_HANG);
+ if (0 != irq_errors) {
+ MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
+ irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+ group->core_timed_out = MALI_FALSE;
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ } else if (group->core_timed_out) { /* SW timeout */
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
+ MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
+ mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
+
+ mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
+ } else {
+ mali_group_unlock(group);
+ }
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * We should never get here, re-enable interrupts and continue
+ */
+ if (0 == irq_readout) {
+ MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ } else {
+ MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ }
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_pp(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job), mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+ }
+}
+
+static void mali_group_timeout(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ group->core_timed_out = MALI_TRUE;
+
+ if (NULL != group->gp_core) {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ /* Early out - safe even if mutex is not held */
+ if (group->session != session) return;
+
+ mali_group_lock(group);
+
+ mali_group_remove_session_if_unused(group, session);
+
+ if (group->session == session) {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+ if (MALI_TRUE != zap_success) {
+ MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+
+ mali_group_mmu_page_fault_and_unlock(group);
+ return;
+ }
+ }
+
+ mali_group_unlock(group);
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+ u32 source0 = 0;
+ u32 value0 = 0;
+ u32 source1 = 0;
+ u32 value1 = 0;
+ u32 profiling_channel = 0;
+
+ switch (core_num) {
+ case 0:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ case 1:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+ break;
+ case 2:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+ break;
+ default:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ }
+
+ if (0 == core_num) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ if (1 == core_num) {
+ if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+ if (2 == core_num) {
+ if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+
+ _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+mali_bool mali_group_is_enabled(struct mali_group *group)
+{
+ mali_bool enabled = MALI_TRUE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ enabled = MALI_FALSE;
+ }
+ mali_group_unlock(group);
+
+ return enabled;
+}
+
+void mali_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_enable_group(group);
+ } else {
+ mali_gp_scheduler_enable_group(group);
+ }
+}
+
+void mali_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_disable_group(group);
+ } else {
+ mali_gp_scheduler_disable_group(group);
+ }
+}
+
+static struct mali_pm_domain *mali_group_get_l2_domain(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+
+ /* l2_cache_core[0] stores the related l2 domain */
+ return group->l2_cache_core[0]->pm_domain;
+}
+
+void mali_group_get_pm_domain_ref(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /* Get group used l2 domain ref */
+ mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
+ /* Get group used core domain ref */
+ mali_pm_domain_ref_get(group->pm_domain);
+}
+
+void mali_group_put_pm_domain_ref(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /* Put group used core domain ref */
+ mali_pm_domain_ref_put(group->pm_domain);
+ /* Put group used l2 domain ref */
+ mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_group.h b/drivers/parrot/gpu/mali400/common/mali_group.h
new file mode 100644
index 00000000000000..c7301f252a16ab
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_group.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+
+enum mali_group_core_state {
+ MALI_GROUP_STATE_IDLE,
+ MALI_GROUP_STATE_WORKING,
+ MALI_GROUP_STATE_OOM,
+ MALI_GROUP_STATE_IN_VIRTUAL,
+ MALI_GROUP_STATE_JOINING_VIRTUAL,
+ MALI_GROUP_STATE_LEAVING_VIRTUAL,
+ MALI_GROUP_STATE_DISABLED,
+};
+
+/* Forward declaration from mali_pm_domain.h */
+struct mali_pm_domain;
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+
+ mali_bool power_is_on;
+ enum mali_group_core_state state;
+
+ struct mali_gp_core *gp_core;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ struct mali_l2_cache_core *l2_cache_core[2];
+ u32 l2_cache_core_ref_count[2];
+
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_t *lock;
+#else
+ _mali_osk_spinlock_t *lock;
+#endif
+
+ _mali_osk_list_t pp_scheduler_list;
+
+ /* List used for virtual groups. For a virtual group, the list represents the
+ * head element. */
+ _mali_osk_list_t group_list;
+
+ struct mali_group *pm_domain_list;
+ struct mali_pm_domain *pm_domain;
+
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
+ _mali_osk_wq_work_t *bottom_half_work_mmu;
+ _mali_osk_wq_work_t *bottom_half_work_gp;
+ _mali_osk_wq_work_t *bottom_half_work_pp;
+
+ _mali_osk_timer_t *timeout_timer;
+ mali_bool core_timed_out;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
+
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+ return (NULL != group->dlbu_core);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is considered as part of a virtual group
+ *
+ * @note A group is considered to be "part of" a virtual group also during the transition
+ * in to / out of the virtual group.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+#if defined(CONFIG_MALI450)
+ return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
+
+
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
+
+
+/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
+
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+mali_bool mali_group_power_is_on(struct mali_group *group);
+void mali_group_power_on_group(struct mali_group *group);
+void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
+void mali_group_power_on(void);
+
+/** @brief Prepare group for power off
+ *
+ * Update the group's state and prepare for the group to be powered off.
+ *
+ * If do_power_change is MALI_FALSE group session will be set to NULL so that
+ * no more activity will happen to this group, but the power state flag will be
+ * left unchanged.
+ *
+ * @do_power_change MALI_TRUE if power status is to be updated
+ */
+void mali_group_power_off(mali_bool do_power_change);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+/* MMU-related functions */
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+
+/* GP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+
+/* PP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+/** @brief Check if group is enabled
+ *
+ * @param group group to check
+ * @return MALI_TRUE if enabled, MALI_FALSE if not
+ */
+mali_bool mali_group_is_enabled(struct mali_group *group);
+
+/** @brief Enable group
+ *
+ * An enabled job is put on the idle scheduler list and can be used to handle jobs. Does nothing if
+ * group is already enabled.
+ *
+ * @param group group to enable
+ */
+void mali_group_enable(struct mali_group *group);
+
+/** @brief Disable group
+ *
+ * A disabled group will no longer be used by the scheduler. If part of a virtual group, the group
+ * will be removed before being disabled. Cores part of a disabled group is safe to power down.
+ *
+ * @param group group to disable
+ */
+void mali_group_disable(struct mali_group *group);
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list)) {
+ group->state = MALI_GROUP_STATE_DISABLED;
+ group->session = NULL;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list)) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+
+/* Get group used l2 domain and core domain ref */
+void mali_group_get_pm_domain_ref(struct mali_group *group);
+/* Put group used l2 domain and core domain ref */
+void mali_group_put_pm_domain_ref(struct mali_group *group);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_hw_core.c b/drivers/parrot/gpu/mali400/common/mali_hw_core.c
new file mode 100644
index 00000000000000..e5004abdb12542
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_hw_core.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->phys_offset = resource->base - _mali_osk_resource_base_address();
+ core->description = resource->description;
+ core->size = reg_size;
+
+ MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers) {
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ } else {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_hw_core.h b/drivers/parrot/gpu/mali400/common/mali_hw_core.h
new file mode 100644
index 00000000000000..341d3037b80861
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_hw_core.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+ u32 phys_addr; /**< Physical address of the registers */
+ u32 phys_offset; /**< Offset from start of Mali to registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char *description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ if (old_val != new_val) {
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+ }
+}
+
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ if (old_array[i] != write_array[i]) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_common.h b/drivers/parrot/gpu/mali400/common/mali_kernel_common.h
new file mode 100644
index 00000000000000..f6e14f7810b403
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_common.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+* Fundamental error macro. Reports an error code. This is abstracted to allow us to
+* easily switch to a different error reporting method if we want, and also to allow
+* us to search for error returns easily.
+*
+* Note no closing semicolon - this is supplied in typical usage:
+*
+* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than
+ * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ * as an error code. This effectively acts as a guard clause, and propagates
+ * error values up the call stack. This uses a temporary value to ensure that
+ * the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error,
+ * this error will be returned without evaluating the expression in
+ * MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_core.c b/drivers/parrot/gpu/mali400/common/mali_kernel_core.c
new file mode 100644
index 00000000000000..770cf662119dea
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_core.c
@@ -0,0 +1,1455 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_dma.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+unsigned int mali_shared_mem_size = 0;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+ global_gpu_base_address = _mali_osk_resource_base_address();
+ if (0 == global_gpu_base_address) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+ switch (resource_pp->base - global_gpu_base_address) {
+ case 0x08000:
+ case 0x20000: /* fall-through for aliased mapping */
+ return 0x01;
+ case 0x0A000:
+ case 0x22000: /* fall-through for aliased mapping */
+ return 0x02;
+ case 0x0C000:
+ case 0x24000: /* fall-through for aliased mapping */
+ return 0x04;
+ case 0x0E000:
+ case 0x26000: /* fall-through for aliased mapping */
+ return 0x08;
+ case 0x28000:
+ return 0x10;
+ case 0x2A000:
+ return 0x20;
+ case 0x2C000:
+ return 0x40;
+ case 0x2E000:
+ return 0x80;
+ default:
+ return 0;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ /*
+ * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
+ * Look at the version register for the first PP core in order to determine the GPU HW revision.
+ */
+
+ u32 first_pp_offset;
+ _mali_osk_resource_t first_pp_resource;
+
+ /* Find out where the first PP core is located */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
+ /* Mali-300/400/450 */
+ first_pp_offset = 0x8000;
+ } else {
+ /* Mali-200 */
+ first_pp_offset = 0x0000;
+ }
+
+ /* Find the first PP core resource (again) */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+ if (NULL != pp_core) {
+ u32 pp_version = mali_pp_core_get_version(pp_core);
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16) {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+ _mali_osk_abort();
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+static void mali_resource_count(u32 *pp_count, u32 *l2_count)
+{
+ *pp_count = 0;
+ *l2_count = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+ ++(*pp_count);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+ ++(*l2_count);
+ }
+}
+
+static void mali_delete_groups(void)
+{
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(0);
+ while (NULL != group) {
+ mali_group_delete(group);
+ group = mali_group_get_glob_group(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+ struct mali_l2_cache_core *l2;
+
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ while (NULL != l2) {
+ mali_l2_cache_delete(l2);
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (NULL != resource) {
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+ l2_cache = mali_l2_cache_create(resource);
+ if (NULL == l2_cache) {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return NULL;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+ return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (mali_is_mali400()) {
+ _mali_osk_resource_t l2_resource;
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ l2_cache = mali_create_l2_cache_core(&l2_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+ } else if (mali_is_mali450()) {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t l2_gp_resource;
+ _mali_osk_resource_t l2_pp_grp0_resource;
+ _mali_osk_resource_t l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding l2 domain */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the group object */
+ group = mali_group_create(cache, NULL, NULL);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ return NULL;
+ }
+
+ /* Create the MMU object inside group */
+ mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+ if (NULL == mmu) {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+
+ if (NULL != resource_gp) {
+ /* Create the GP core object inside this group */
+ struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+ if (NULL == gp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ if (NULL != resource_pp) {
+ struct mali_pp_core *pp_core;
+
+ /* Create the PP core object inside this group */
+ pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+ if (NULL == pp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ /* Reset group */
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+
+ return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+ _mali_osk_resource_t *resource_pp_bcast,
+ _mali_osk_resource_t *resource_dlbu,
+ _mali_osk_resource_t *resource_bcast)
+{
+ struct mali_mmu_core *mmu_pp_bcast_core;
+ struct mali_pp_core *pp_bcast_core;
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+ /* Create the DLBU core object */
+ dlbu_core = mali_dlbu_create(resource_dlbu);
+ if (NULL == dlbu_core) {
+ MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the Broadcast unit core */
+ bcast_core = mali_bcast_unit_create(resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+#if defined(DEBUG)
+ /* Get a physical PP group to temporarily add to broadcast unit. IRQ
+ * verification needs a physical group in the broadcast unit to test
+ * the broadcast unit interrupt line. */
+ {
+ struct mali_group *phys_group = NULL;
+ int i;
+ for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+ phys_group = mali_group_get_glob_group(i);
+ if (NULL != mali_group_get_pp_core(phys_group)) break;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+ /* Add the group temporarily to the broadcast, and update the
+ * broadcast HW. Since the HW is not updated when removing the
+ * group the IRQ check will work when the virtual PP is created
+ * later.
+ *
+ * When the virtual group gets populated, the actually used
+ * groups will be added to the broadcast unit and the HW will
+ * be updated.
+ */
+ mali_bcast_add_group(bcast_core, phys_group);
+ mali_bcast_reset(bcast_core);
+ mali_bcast_remove_group(bcast_core, phys_group);
+ }
+#endif /* DEBUG */
+ group = mali_group_create(NULL, dlbu_core, bcast_core);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+ mali_bcast_unit_delete(bcast_core);
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the MMU object inside group */
+ mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+ if (NULL == mmu_pp_bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the PP core object inside this group */
+ pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+ if (NULL == pp_bcast_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ struct mali_group *group;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+
+ _mali_osk_resource_t resource_gp;
+ _mali_osk_resource_t resource_gp_mmu;
+ _mali_osk_resource_t resource_pp[8];
+ _mali_osk_resource_t resource_pp_mmu[8];
+ _mali_osk_resource_t resource_pp_mmu_bcast;
+ _mali_osk_resource_t resource_pp_bcast;
+ _mali_osk_resource_t resource_dlbu;
+ _mali_osk_resource_t resource_bcast;
+ _mali_osk_errcode_t resource_gp_found;
+ _mali_osk_errcode_t resource_gp_mmu_found;
+ _mali_osk_errcode_t resource_pp_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+ _mali_osk_errcode_t resource_pp_bcast_found;
+ _mali_osk_errcode_t resource_dlbu_found;
+ _mali_osk_errcode_t resource_bcast_found;
+
+ if (!(mali_is_mali400() || mali_is_mali450())) {
+ /* No known HW core */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+ /* Group settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.max_job_runtime) {
+ mali_max_job_runtime = data.max_job_runtime;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ /* Mali-450 have separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+
+
+ if (mali_is_mali450()) {
+ resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+
+ if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_dlbu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+ /* Missing mandatory core(s) for Mali-450 */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != resource_gp_found ||
+ _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add GP in group, for PMU ref count */
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding pp domain */
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
+
+ mali_inited_pp_cores_group_1++;
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+ if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
+ mali_inited_pp_cores_group_1++;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+ if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+ mali_inited_pp_cores_group_2++;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+
+ mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+ mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+ MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+ MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_pm_domains(void)
+{
+ int i;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == mali_pmu_get_domain_mask(i)) continue;
+
+ if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_use_default_pm_domain_config(void)
+{
+ u32 pp_count_gr1 = 0;
+ u32 pp_count_gr2 = 0;
+ u32 l2_count = 0;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
+ mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 2);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 1);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 3);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 4);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
+ ++pp_count_gr1;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 5);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 2);
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
+ ++pp_count_gr2;
+
+ mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
+ ++l2_count;
+
+ if (mali_is_mali400()) {
+ mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 1);
+ } else if (mali_is_mali450()) {
+ mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 0);
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
+ ++l2_count;
+
+ mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01 << 1);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
+ ++l2_count;
+
+ mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01 << 3);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
+}
+
+static void mali_set_pmu_global_domain_config(void)
+{
+ _mali_osk_device_data data = { 0, };
+ int i = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Check whether has customized pmu domain configure */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 != data.pmu_domain_config[i]) break;
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
+ mali_use_default_pm_domain_config();
+ } else {
+ /* Copy the customer config to global config */
+ mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_resource_t resource_pmu;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+ struct mali_pmu_core *pmu;
+
+ mali_set_pmu_global_domain_config();
+
+ pmu = mali_pmu_create(&resource_pmu);
+ if (NULL == pmu) {
+ MALI_PRINT_ERROR(("Failed to create PMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ /* It's ok if the PMU doesn't exist */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_dma(void)
+{
+ _mali_osk_resource_t resource_dma;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
+ if (NULL == mali_dma_create(&resource_dma)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ _mali_osk_errcode_t ret;
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* Memory settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_dedicated_mem_start = data.dedicated_mem_start;
+ mali_dedicated_mem_size = data.dedicated_mem_size;
+ mali_shared_mem_size = data.shared_mem_size;
+ }
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* No GPU memory specified */
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ }
+
+ if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ /* Dedicated memory */
+ ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 < mali_shared_mem_size) {
+ /* Shared OS memory */
+ ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 == mali_fb_start && 0 == mali_fb_size) {
+ /* Frame buffer settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_fb_start = data.fb_start;
+ mali_fb_size = data.fb_size;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+
+ if (0 != mali_fb_size) {
+ /* Register frame buffer */
+ ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+ u32 number_of_pp_cores = 0;
+ u32 number_of_l2_caches = 0;
+
+ mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+ if (number_of_l2_caches > 1) {
+ mali_gpu_class_is_mali450 = MALI_TRUE;
+ }
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if defined(CONFIG_MALI450)
+ _mali_osk_resource_t resource_bcast;
+
+ /* Ensure broadcast unit is in a good state before we start creating
+ * groups and cores.
+ */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast)) {
+ struct mali_bcast_unit *bcast_core;
+
+ bcast_core = mali_bcast_unit_create(&resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_bcast_unit_delete(bcast_core);
+ }
+#endif /* CONFIG_MALI450 */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ mali_pp_job_initialize();
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err) {
+ /* No biggie if we weren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+ /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+ err = mali_set_global_gpu_base_address();
+ if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+
+ /* Detect gpu class according to l2 cache number */
+ mali_detect_gpu_class();
+
+ err = mali_check_shared_interrupts();
+ if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+
+ err = mali_pp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+ /* Initialize the MALI PMU */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+ /* Make sure the power stays on for the rest of this function */
+ err = _mali_osk_pm_dev_ref_add();
+ if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
+
+ /*
+ * If run-time PM is used, then the mali_pm module has now already been
+ * notified that the power now is on (through the resume callback functions).
+ * However, if run-time PM is not used, then there will probably not be any
+ * calls to the resume callback functions, so we need to explicitly tell it
+ * that the power is on.
+ */
+ mali_pm_set_power_is_on();
+
+ /* Reset PMU HW and ensure all Mali power domains are on */
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ err = mali_pmu_reset(pmu);
+ if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+ }
+
+ /* Ensure HW is in a good state before starting to access cores. */
+ err = mali_init_hw_reset();
+ if (_MALI_OSK_ERR_OK != err) goto init_hw_reset_failed;
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Create PM domains only if PMU exists */
+ if (NULL != pmu) {
+ err = mali_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
+ }
+
+ /* Initialize MMU module */
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+ if (mali_is_mali450()) {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+
+ err = mali_parse_config_dma();
+ if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+ }
+
+ /* Start configuring the actual Mali hardware. */
+ err = mali_parse_config_l2_cache();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+ /* Initialize the schedulers */
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+ err = mali_gp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+
+ /* PP scheduler population can't fail */
+ mali_pp_scheduler_populate();
+
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ MALI_SUCCESS; /* all ok */
+
+ /* Error handling */
+
+utilization_init_failed:
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+ mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+ mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
+ mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
+ {
+ struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+ if (NULL != dma) mali_dma_delete(dma);
+ }
+dma_parsing_failed:
+ mali_dlbu_terminate();
+dlbu_init_failed:
+ mali_mmu_terminate();
+mmu_init_failed:
+ mali_pm_domain_terminate();
+pm_domain_failed:
+ /* Nothing to roll back */
+product_info_parsing_failed:
+ /* Nothing to roll back */
+init_hw_reset_failed:
+ /* Nothing to roll back */
+pmu_reset_failed:
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+pm_always_on_failed:
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+parse_pmu_config_failed:
+ mali_pm_terminate();
+pm_init_failed:
+ mali_pp_scheduler_terminate();
+pp_scheduler_init_failed:
+check_shared_interrupts_failed:
+ global_gpu_base_address = 0;
+set_global_gpu_base_address_failed:
+ /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
+parse_memory_config_failed:
+ mali_memory_terminate();
+memory_init_failed:
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+ mali_session_terminate();
+session_init_failed:
+ mali_pp_job_terminate();
+ return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ struct mali_dma_core *dma = mali_dma_get_global_dma_core();
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ /* shut down subsystems in reverse order from startup */
+
+ /* We need the GPU to be powered up for the terminate sequence */
+ _mali_osk_pm_dev_ref_add();
+
+ mali_utilization_term();
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+ mali_scheduler_terminate();
+ mali_delete_l2_cache_cores();
+ if (mali_is_mali450()) {
+ mali_dlbu_terminate();
+ }
+ mali_mmu_terminate();
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+ if (NULL != dma) {
+ mali_dma_delete(dma);
+ }
+ mali_pm_terminate();
+ mali_memory_terminate();
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ mali_pp_scheduler_terminate();
+ mali_session_terminate();
+
+ mali_pp_job_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+ return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+ return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete(notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
+{
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if (NULL == notification) {
+ MALI_PRINT_ERROR(("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session = (struct mali_session_data *) args->ctx;
+
+ if (!session->use_high_priority_job_queue) {
+ session->use_high_priority_job_queue = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ u32 i;
+ struct mali_session_data *session;
+
+ /* allocated struct to track this session */
+ session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session->ioctl_queue) {
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session->page_directory) {
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (0 != mali_dlbu_phys_addr) {
+ mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Create soft system. */
+ session->soft_job_system = mali_soft_job_system_create(session);
+ if (NULL == session->soft_job_system) {
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Create timeline system. */
+ session->timeline_system = mali_timeline_system_create(session);
+ if (NULL == session->timeline_system) {
+ mali_soft_job_system_destroy(session->soft_job_system);
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
+ MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
+ mali_timeline_system_destroy(session->timeline_system);
+ mali_soft_job_system_destroy(session->soft_job_system);
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ session->use_high_priority_job_queue = MALI_FALSE;
+
+ /* Initialize list of PP jobs on this session. */
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+ /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+ }
+
+ *context = (void *)session;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session);
+
+ MALI_DEBUG_PRINT(2, ("Session started\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+ MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* This flag is used to prevent queueing of jobs due to activation. */
+ session->is_aborting = MALI_TRUE;
+
+ /* Stop the soft job timer. */
+ mali_timeline_system_stop_timer(session->timeline_system);
+
+ /* Abort queued and running GP and PP jobs. */
+ mali_gp_scheduler_abort_session(session);
+ mali_pp_scheduler_abort_session(session);
+
+ /* Abort the soft job system. */
+ mali_soft_job_system_abort(session->soft_job_system);
+
+ /* Force execution of all pending bottom half processing for GP and PP. */
+ _mali_osk_wq_flush();
+
+ /* The session PP list should now be empty. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+ /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+ * session, and all soft jobs in the soft job system has been destroyed. */
+
+ /* Any trackers left in the timeline system are directly or indirectly waiting on external
+ * sync fences. Cancel all sync fence waiters to trigger activation of all remaining
+ * trackers. This call will sleep until all timelines are empty. */
+ mali_timeline_system_abort(session->timeline_system);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_wq_flush();
+
+ /* Destroy timeline system. */
+ mali_timeline_system_destroy(session->timeline_system);
+ session->timeline_system = NULL;
+
+ /* Destroy soft system. */
+ mali_soft_job_system_destroy(session->soft_job_system);
+ session->soft_job_system = NULL;
+
+ MALI_DEBUG_CODE({
+ /* Check that the pp_job_fb_lookup_list array is empty. */
+ u32 i;
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
+ {
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i]));
+ }
+ });
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+ /* Free session data structures */
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_gp_scheduler_dump_state(buf + n, size - n);
+ n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_core.h b/drivers/parrot/gpu/mali400/common/mali_kernel_core.h
new file mode 100644
index 00000000000000..88dc80ed134908
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_core.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+#if defined(CONFIG_MALI450)
+ return mali_gpu_class_is_mali450;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+#if !defined(CONFIG_MALI450)
+ return MALI_TRUE;
+#else
+ return !mali_gpu_class_is_mali450;
+#endif
+}
+
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.c b/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000000..893994e777f455
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table *descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table *table);
+
+mali_descriptor_mapping *mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map) {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table) {
+ map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock) {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping *map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_mutex_rw_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping *map, void *target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings) {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table *new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping *map, void (*callback)(int, void *))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i) {
+ if (_mali_osk_test_bit(i, map->table->usage)) {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping *map, int descriptor, void **target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ } else *target = NULL;
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping *map, int descriptor, void *target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor)
+{
+ void *old_value = NULL;
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ old_value = map->table->mappings[descriptor];
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return old_value;
+}
+
+static mali_descriptor_table *descriptor_table_alloc(int count)
+{
+ mali_descriptor_table *table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));
+
+ if (NULL != table) {
+ table->usage = (u32 *)((u8 *)table + sizeof(mali_descriptor_table));
+ table->mappings = (void **)((u8 *)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table *table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.h b/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000000..de2b8f5512421d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table {
+ u32 *usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void **mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping {
+ _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table *table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping *mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping *map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping *map, void *target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping *map, int descriptor, void **target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping *map, int descriptor, void *target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping *map, void (*callback)(int, void *));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.c b/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.c
new file mode 100644
index 00000000000000..19a9aae652eceb
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+static _mali_osk_spinlock_irq_t *time_data_lock;
+
+static u32 num_running_gp_cores;
+static u32 num_running_pp_cores;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u64 period_start_time = 0;
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+static u32 mali_utilization_timeout = 1000;
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+#endif /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
+
+static void calculate_gpu_utilization(void *arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized_gpu;
+ u32 work_normalized_gp;
+ u32 work_normalized_pp;
+ u32 period_normalized;
+ u32 utilization_gpu;
+ u32 utilization_gp;
+ u32 utilization_pp;
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ u32 window_render_fps;
+#endif
+
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+ /*
+ * No work done for this period
+ * - No need to reschedule timer
+ * - Report zero usage
+ */
+ timer_running = MALI_FALSE;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ if (NULL != mali_utilization_callback) {
+ struct mali_gpu_utilization_data data = { 0, };
+ mali_utilization_callback(&data);
+ }
+
+ mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time_gpu != 0) {
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = time_now;
+
+ /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+ if (work_start_time_gp != 0) {
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = time_now;
+ }
+
+ if (work_start_time_pp != 0) {
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = time_now;
+ }
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+ work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+ work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF) {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ } else {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized_gpu <<= 8;
+ work_normalized_gp <<= 8;
+ work_normalized_pp <<= 8;
+ }
+
+ utilization_gpu = work_normalized_gpu / period_normalized;
+ utilization_gp = work_normalized_gp / period_normalized;
+ utilization_pp = work_normalized_pp / period_normalized;
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ window_render_fps = calculate_window_render_fps(time_period);
+#endif
+
+ last_utilization_gpu = utilization_gpu;
+ last_utilization_gp = utilization_gp;
+ last_utilization_pp = utilization_pp;
+
+ if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+ (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+ mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+ } else {
+ mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ }
+
+ /* starting a new period */
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+ period_start_time = time_now;
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+
+ if (NULL != mali_utilization_callback) {
+ struct mali_gpu_utilization_data data = {
+ utilization_gpu, utilization_gp, utilization_pp,
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ window_render_fps, window_render_fps
+#endif
+ };
+ mali_utilization_callback(&data);
+ }
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.utilization_interval) {
+ mali_utilization_timeout = data.utilization_interval;
+ }
+ if (NULL != data.utilization_callback) {
+ mali_utilization_callback = data.utilization_callback;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+ }
+ }
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (mali_utilization_callback == NULL) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
+ mali_utilization_callback = mali_power_performance_policy_callback;
+ }
+#endif
+
+ if (NULL == mali_utilization_callback) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+ }
+
+ time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+ if (NULL == time_data_lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ num_running_gp_cores = 0;
+ num_running_pp_cores = 0;
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer) {
+ _mali_osk_spinlock_irq_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ _mali_osk_timer_del(utilization_timer);
+ return;
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer) {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_spinlock_irq_term(time_data_lock);
+}
+
+void mali_utilization_gp_start(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ ++num_running_gp_cores;
+ if (1 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First GP core started, consider GP busy from now and onwards */
+ work_start_time_gp = time_now;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ mali_session_max_window_num();
+#endif
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ } else {
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+ } else {
+ /* Nothing to do */
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+}
+
+void mali_utilization_pp_start(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ ++num_running_pp_cores;
+ if (1 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First PP core started, consider PP busy from now and onwards */
+ work_start_time_pp = time_now;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ /* Clear session->number_of_window_jobs */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ mali_session_max_window_num();
+#endif
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ } else {
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+ } else {
+ /* Nothing to do */
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+ }
+}
+
+void mali_utilization_gp_end(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ --num_running_gp_cores;
+ if (0 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last GP core ended, consider GP idle from now and onwards */
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = 0;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+void mali_utilization_pp_end(void)
+{
+ _mali_osk_spinlock_irq_lock(time_data_lock);
+
+ --num_running_pp_cores;
+ if (0 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last PP core ended, consider PP idle from now and onwards */
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = 0;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_spinlock_irq_unlock(time_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+ return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+ return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+ return last_utilization_pp;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.h b/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.h
new file mode 100644
index 00000000000000..7d588ebf00ef81
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_utilization.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
+{
+ return (NULL != mali_utilization_callback);
+}
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_kernel_vsync.c b/drivers/parrot/gpu/mali400/common/mali_kernel_vsync.c
new file mode 100644
index 00000000000000..d067937cdda38d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_kernel_vsync.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/parrot/gpu/mali400/common/mali_l2_cache.c b/drivers/parrot/gpu/mali400/common/mali_l2_cache.c
new file mode 100644
index 00000000000000..3893f27e4e5f96
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_l2_cache.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(cache->counter_lock);
+#else
+ _mali_osk_spinlock_lock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(cache->counter_lock);
+#else
+ _mali_osk_spinlock_unlock(cache->counter_lock);
+#endif
+}
+
+static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_lock(cache->command_lock);
+#else
+ _mali_osk_spinlock_lock(cache->command_lock);
+#endif
+}
+
+static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
+{
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_unlock(cache->command_lock);
+#else
+ _mali_osk_spinlock_unlock(cache->command_lock);
+#endif
+}
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *cache = NULL;
+
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+ if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL != cache) {
+ cache->core_id = mali_global_num_l2_cache_cores;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->pm_domain = NULL;
+ cache->mali_l2_status = MALI_L2_NORMAL;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+ cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+ if (NULL != cache->command_lock) {
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#else
+ cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+#endif
+ if (NULL != cache->counter_lock) {
+ mali_l2_cache_reset(cache);
+
+ cache->last_invalidated_id = 0;
+
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+ mali_global_num_l2_cache_cores++;
+
+ return cache;
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+ _mali_osk_spinlock_term(cache->command_lock);
+#endif
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ mali_hw_core_delete(&cache->hw_core);
+ }
+
+ _mali_osk_free(cache);
+ } else {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+
+ /* reset to defaults */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_term(cache->counter_lock);
+ _mali_osk_spinlock_irq_term(cache->command_lock);
+#else
+ _mali_osk_spinlock_term(cache->command_lock);
+ _mali_osk_spinlock_term(cache->counter_lock);
+#endif
+
+ mali_hw_core_delete(&cache->hw_core);
+
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ if (mali_global_l2_cache_cores[i] == cache) {
+ mali_global_l2_cache_cores[i] = NULL;
+ mali_global_num_l2_cache_cores--;
+
+ if (i != mali_global_num_l2_cache_cores) {
+ /* We removed a l2 cache from the middle of the array -- move the last
+ * l2 cache to the current position to close the gap */
+ mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ return cache->core_id;
+}
+
+static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ u32 reg_offset = 0;
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ mali_l2_cache_counter_lock(cache);
+
+ switch (source_id) {
+ case 0:
+ cache->counter_src0 = counter;
+ reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ break;
+
+ case 1:
+ cache->counter_src1 = counter;
+ reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ break;
+
+ default:
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+ return;
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on) {
+ mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+}
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+ mali_l2_cache_core_set_counter_internal(cache, 0, counter);
+}
+
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+ mali_l2_cache_core_set_counter_internal(cache, 1, counter);
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ /* Caller must hold the PM lock and know that we are powered on */
+
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+
+ return;
+ }
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+}
+
+static void mali_l2_cache_reset_counters_all(void)
+{
+ int i;
+ u32 value;
+ struct mali_l2_cache_core *cache;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ for (i = 0; i < num_cores; i++) {
+ cache = mali_l2_cache_core_get_glob_l2_core(i);
+ if (!cache)
+ continue;
+
+ if (mali_l2_cache_lock_power_state(cache)) {
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock_power_state(cache);
+ return;
+ }
+
+ /* Reset performance counters */
+ if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
+ value = 0;
+ } else {
+ value = cache->counter_src0;
+ }
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+
+ if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
+ value = 0;
+ } else {
+ value = cache->counter_src1;
+ }
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+
+ mali_l2_cache_counter_unlock(cache);
+ }
+
+ mali_l2_cache_unlock_power_state(cache);
+ }
+}
+
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (mali_global_num_l2_cache_cores > index) {
+ return mali_global_l2_cache_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2_cache_cores;
+}
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ mali_l2_cache_counter_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_counter_unlock(cache);
+
+ return;
+ }
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ }
+
+ mali_l2_cache_counter_unlock(cache);
+}
+
+void mali_l2_cache_reset_all(void)
+{
+ int i;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ for (i = 0; i < num_cores; i++) {
+ mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
+ }
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache) {
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+}
+
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache) {
+ /* If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started. */
+ if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
+ return MALI_FALSE;
+ } else {
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ }
+
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+ return MALI_TRUE;
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+ _mali_osk_errcode_t ret;
+ mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+ u32 j;
+ for (j = 0; j < num_pages; j++) {
+ _mali_osk_errcode_t ret;
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
+ }
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_lock_state(cache->pm_domain);
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_unlock_state(cache->pm_domain);
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ mali_l2_cache_command_lock(cache);
+
+ if (MALI_L2_PAUSE == cache->mali_l2_status) {
+ mali_l2_cache_command_unlock(cache);
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for L2 come back\n"));
+
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++) {
+ if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ break;
+ }
+ }
+
+ if (i == loop_count) {
+ mali_l2_cache_command_unlock(cache);
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ mali_l2_cache_command_unlock(cache);
+
+ MALI_SUCCESS;
+}
+
+void mali_l2_cache_pause_all(mali_bool pause)
+{
+ int i;
+ struct mali_l2_cache_core *cache;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ mali_l2_power_status status = MALI_L2_NORMAL;
+
+ if (pause) {
+ status = MALI_L2_PAUSE;
+ }
+
+ for (i = 0; i < num_cores; i++) {
+ cache = mali_l2_cache_core_get_glob_l2_core(i);
+ if (NULL != cache) {
+ cache->mali_l2_status = status;
+
+ /* Take and release the counter and command locks to
+ * ensure there are no active threads that didn't get
+ * the status flag update.
+ *
+ * The locks will also ensure the necessary memory
+ * barriers are done on SMP systems.
+ */
+ mali_l2_cache_counter_lock(cache);
+ mali_l2_cache_counter_unlock(cache);
+
+ mali_l2_cache_command_lock(cache);
+ mali_l2_cache_command_unlock(cache);
+ }
+ }
+
+ /* Resume from pause: do the cache invalidation here to prevent any
+ * loss of cache operation during the pause period to make sure the SW
+ * status is consistent with L2 cache status.
+ */
+ if (!pause) {
+ mali_l2_cache_invalidate_all();
+ mali_l2_cache_reset_counters_all();
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_l2_cache.h b/drivers/parrot/gpu/mali400/common/mali_l2_cache.h
new file mode 100644
index 00000000000000..4882e3141537c9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_l2_cache.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+struct mali_group;
+struct mali_pm_domain;
+
+/* Flags describing state of the L2 */
+typedef enum mali_l2_power_status {
+ MALI_L2_NORMAL, /**< L2 is in normal state and operational */
+ MALI_L2_PAUSE, /**< L2 may not be accessed and may be powered off */
+} mali_l2_power_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 core_id; /**< Unique core ID */
+#ifdef MALI_UPPER_HALF_SCHEDULING
+ _mali_osk_spinlock_irq_t *command_lock; /**< Serialize all L2 cache commands */
+ _mali_osk_spinlock_irq_t *counter_lock; /**< Synchronize L2 cache counter access */
+#else
+ _mali_osk_spinlock_t *command_lock;
+ _mali_osk_spinlock_t *counter_lock;
+#endif
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 last_invalidated_id;
+ struct mali_pm_domain *pm_domain;
+ mali_l2_power_status mali_l2_status; /**< Indicate whether the L2 is paused or not */
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+/**
+ * L2 pause is just a status that the L2 can't be accessed temporarily.
+*/
+void mali_l2_cache_pause_all(mali_bool pause);
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+{
+ cache->pm_domain = domain;
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+void mali_l2_cache_reset_all(void);
+
+struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_mem_validation.c b/drivers/parrot/gpu/mali400/common/mali_mem_validation.c
new file mode 100644
index 00000000000000..b2c3c175b856bc
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mem_validation.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = start;
+ mali_mem_validator.size = size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+ if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_mem_validation.h b/drivers/parrot/gpu/mali400/common/mali_mem_validation.h
new file mode 100644
index 00000000000000..f0a76d1c8bc9fd
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_mmu.c b/drivers/parrot/gpu/mali400/common/mali_mmu.c
new file mode 100644
index 00000000000000..715fd09799c835
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mmu.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+ if (0 == mali_empty_page_directory_phys) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping)) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+ struct mali_mmu_core *mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+ if (NULL != mmu) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+ if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+ if (is_virtual) {
+ /* Skip reset and IRQ setup for virtual MMU */
+ return mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_mmu,
+ group,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ resource->description);
+ if (NULL != mmu->irq) {
+ return mmu;
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_group_remove_mmu_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ if (NULL != mmu->irq) {
+ _mali_osk_irq_term(mmu->irq);
+ }
+
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+ return MALI_TRUE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+ break;
+ }
+ if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
+ break;
+ }
+ if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ if (!stall_success) {
+ err = _MALI_OSK_ERR_BUSY;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success) {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400/common/mali_mmu.h b/drivers/parrot/gpu/mali400/common/mali_mmu.h
new file mode 100644
index 00000000000000..f3040da219aebd
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mmu.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+ MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.c b/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.c
new file mode 100644
index 00000000000000..b11bbe94d30e7a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ u32 address;
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+ /* Allocation failed */
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_mmu_release_table_page(address, mapping);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+ return 0;
+ }
+
+ *virt_addr = mapping;
+ return address;
+}
+
+void mali_free_empty_page(u32 address, mali_io_address virt_addr)
+{
+ if (MALI_INVALID_PAGE != address) {
+ mali_mmu_release_table_page(address, virt_addr);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+ u32 *page_table, mali_io_address *page_table_mapping,
+ u32 *data_page, mali_io_address *data_page_mapping)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(data_page, data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_table, page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ fill_page(*data_page_mapping, 0);
+ fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+ fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+ u32 *page_table, mali_io_address *page_table_mapping,
+ u32 *data_page, mali_io_address *data_page_mapping)
+{
+ if (MALI_INVALID_PAGE != *page_directory) {
+ mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+ *page_directory = MALI_INVALID_PAGE;
+ *page_directory_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table) {
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ *page_table_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page) {
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ *data_page_mapping = NULL;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+ _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ u32 pde_phys;
+ int i;
+
+ if (last_pde < first_pde) {
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ for (i = first_pde; i <= last_pde; i++) {
+ if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ pagedir->page_entries_usage_count[i] = 1;
+ } else {
+ pagedir->page_entries_usage_count[i]++;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++) {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+ mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++) {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+ size_in_pde = left;
+ } else {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i]--;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i]) {
+ u32 page_phys;
+ void *page_virt;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
+ page_virt = pagedir->page_entries_mapped[i];
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_phys, page_virt);
+ pd_changed = MALI_TRUE;
+ } else {
+ MALI_DEBUG_ASSERT(num_pages_inv < 2);
+ if (num_pages_inv < 2) {
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed) {
+ MALI_DEBUG_ASSERT(num_pages_inv < 3);
+ if (num_pages_inv < 3) {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+ }
+
+ if (invalidate_all) {
+ mali_l2_cache_invalidate_all();
+ } else {
+ mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+ }
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if (NULL == pagedir) {
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++) {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+ u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
+{
+ u32 end_address = mali_address + size;
+
+ /* Map physical pages into MMU page tables */
+ for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ phys_address | permission_bits);
+ }
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+/* For instrumented */
+struct dump_info {
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info) {
+ info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
+
+ if (NULL != info->buffer) {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32) * 2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
+{
+ if (NULL != info) {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer) {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped) {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++) {
+ if (NULL != pagedir->page_entries_mapped[i]) {
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.h b/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.h
new file mode 100644
index 00000000000000..c49e93ea4f07a9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_mmu_page_directory.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(u32 address, mali_io_address virtual);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+ u32 *page_table, mali_io_address *page_table_mapping,
+ u32 *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
+ u32 *page_table, mali_io_address *page_table_mapping,
+ u32 *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk.h b/drivers/parrot/gpu/mali400/common/mali_osk.h
new file mode 100644
index 00000000000000..edff2760d0d282
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk.h
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h" /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc(u32 n, u32 size);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free(void *ptr);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree(void *ptr);
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset(void *s, u32 c, u32 n);
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier(void);
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier(void);
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description);
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description);
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion(u32 phys, u32 size);
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall(void);
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after(u32 ticka, u32 tickb);
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks(u32 ms);
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms(u32 ticks);
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount(void);
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay(u32 usecs);
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns(void);
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz(u32 val);
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls(u32 val);
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true. Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg(const char *fmt, ...);
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Disable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_disable(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
+
+
+/** @brief Release the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ */
+void _mali_osk_pm_dev_ref_dec(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * Will leave the cores powered off if they are already powered off.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ *
+ * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
+
+
+/** @brief Releasing the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+
+/** @brief Block untill pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk_bitops.h b/drivers/parrot/gpu/mali400/common/mali_osk_bitops.h
new file mode 100644
index 00000000000000..525b944e985284
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk_bitops.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz(isolated);
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_clear_bit(nr, addr);
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_set_bit(nr, addr);
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ return _mali_internal_test_bit(nr, *addr);
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
+{
+ u32 total;
+
+ for (total = 0; total < maxbit; total += 32, ++addr) {
+ int result;
+ result = _mali_internal_find_first_zero_bit(*addr);
+
+ /* non-negative signifies the bit was found */
+ if (result >= 0) {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if (total >= maxbit) {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk_list.h b/drivers/parrot/gpu/mali400/common/mali_osk_list.h
new file mode 100644
index 00000000000000..2cd190e7d49738
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk_list.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+ if (!_mali_osk_list_empty(old_list)) {
+ new_list->next = old_list->next;
+ new_list->prev = old_list->prev;
+ new_list->next->prev = new_list;
+ new_list->prev->next = new_list;
+ old_list->next = old_list;
+ old_list->prev = old_list;
+ }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk_mali.h b/drivers/parrot/gpu/mali400/common/mali_osk_mali.h
new file mode 100644
index 00000000000000..24356c0fcbc6b1
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk_mali.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+typedef struct mali_gpu_device_data _mali_osk_device_data;
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+u32 _mali_osk_resource_base_address(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk_profiling.h b/drivers/parrot/gpu/mali400/common/mali_osk_profiling.h
new file mode 100644
index 00000000000000..6463177a4dfebf
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk_profiling.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/parrot/gpu/mali400/common/mali_osk_types.h b/drivers/parrot/gpu/mali400/common/mali_osk_types.h
new file mode 100644
index 00000000000000..feeefb92b34bd1
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_osk_types.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char u8;
+typedef signed char s8;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+ union {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ * allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ * is not allowed.
+ *
+ */
+typedef enum {
+ /* || Locks || */
+ /* || must be || */
+ /* _||_ taken in _||_ */
+ /* \ / this \ / */
+ /* \/ order! \/ */
+
+ _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
+ _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+ _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+ _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+ _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
+ _MALI_OSK_LOCK_ORDER_PMU,
+
+ _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+ _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x2,
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address *mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void *result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+ const char *description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pm.c b/drivers/parrot/gpu/mali400/common/mali_pm.c
new file mode 100644
index 00000000000000..57c9790859720c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pm.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+static mali_bool mali_power_on = MALI_FALSE;
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ _mali_osk_pm_dev_enable();
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+ mali_pm_domain_terminate();
+ _mali_osk_pm_dev_disable();
+}
+
+/* Reset GPU after power up */
+static void mali_pm_reset_gpu(void)
+{
+ /* Reset all L2 caches */
+ mali_l2_cache_reset_all();
+
+ /* Reset all groups */
+ mali_scheduler_reset_all_groups();
+}
+
+void mali_pm_os_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_utilization_suspend();
+ mali_group_power_off(MALI_TRUE);
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ if (MALI_TRUE != mali_power_on) {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset) {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+void mali_pm_runtime_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+ mali_group_power_off(MALI_TRUE);
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_runtime_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+
+ if (MALI_TRUE != mali_power_on) {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset) {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+}
+
+void mali_pm_set_power_is_on(void)
+{
+ mali_power_on = MALI_TRUE;
+}
+
+mali_bool mali_pm_is_power_on(void)
+{
+ return mali_power_on;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_pm.h b/drivers/parrot/gpu/mali400/common/mali_pm.h
new file mode 100644
index 00000000000000..4f4d03617b828f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+
+/* Callback functions registered for the runtime PMM system */
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+void mali_pm_set_power_is_on(void);
+mali_bool mali_pm_is_power_on(void);
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pm_domain.c b/drivers/parrot/gpu/mali400/common/mali_pm_domain.c
new file mode 100644
index 00000000000000..8d9d7ccddddb36
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pm_domain.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+
+static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+{
+ _mali_osk_spinlock_irq_lock(domain->lock);
+}
+
+static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+{
+ _mali_osk_spinlock_irq_unlock(domain->lock);
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
+{
+ domain->state = state;
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+ struct mali_pm_domain *domain = NULL;
+ u32 domain_id = 0;
+
+ domain = mali_pm_domain_get_from_mask(pmu_mask);
+ if (NULL != domain) return domain;
+
+ MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+ if (NULL != domain) {
+ domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
+ if (NULL == domain->lock) {
+ _mali_osk_free(domain);
+ return NULL;
+ }
+
+ domain->state = MALI_PM_DOMAIN_ON;
+ domain->pmu_mask = pmu_mask;
+ domain->use_count = 0;
+ domain->group_list = NULL;
+ domain->group_count = 0;
+ domain->l2 = NULL;
+
+ domain_id = _mali_osk_fls(pmu_mask) - 1;
+ /* Verify the domain_id */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+ mali_pm_domains[domain_id] = domain;
+
+ return domain;
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) {
+ return;
+ }
+ _mali_osk_spinlock_irq_term(domain->lock);
+
+ _mali_osk_free(domain);
+}
+
+void mali_pm_domain_terminate(void)
+{
+ int i;
+
+ /* Delete all domains */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ }
+}
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+ struct mali_group *next;
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ ++domain->group_count;
+ next = domain->group_list;
+
+ domain->group_list = group;
+
+ group->pm_domain_list = next;
+
+ mali_group_set_pm_domain(group, domain);
+
+ /* Get pm domain ref after mali_group_set_pm_domain */
+ mali_group_get_pm_domain_ref(group);
+}
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT(NULL == domain->l2);
+ MALI_DEBUG_ASSERT(NULL != l2);
+
+ domain->l2 = l2;
+
+ mali_l2_cache_set_pm_domain(l2, domain);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+ u32 id = 0;
+
+ if (0 == mask) return NULL;
+
+ id = _mali_osk_fls(mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == mask);
+
+ return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+ return mali_pm_domains[id];
+}
+
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state) {
+ /* Power on */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+
+ if (NULL != pmu) {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_up(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
+ } else {
+ MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+ }
+
+ mali_pm_domain_unlock(domain);
+}
+
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ --domain->use_count;
+
+ if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
+ /* Power off */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+
+ if (NULL != pmu) {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_down(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ }
+ mali_pm_domain_unlock(domain);
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+{
+ mali_bool is_powered = MALI_TRUE;
+
+ /* Take a reference without powering on */
+ if (NULL != domain) {
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state) {
+ is_powered = MALI_FALSE;
+ }
+ mali_pm_domain_unlock(domain);
+ }
+
+ if (!_mali_osk_pm_dev_ref_add_no_power_on()) {
+ is_powered = MALI_FALSE;
+ }
+
+ return is_powered;
+}
+
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
+{
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ if (NULL != domain) {
+ mali_pm_domain_ref_put(domain);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_pm_domain.h b/drivers/parrot/gpu/mali400/common/mali_pm_domain.h
new file mode 100644
index 00000000000000..1915105d6b9dd3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pm_domain.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+typedef enum {
+ MALI_PM_DOMAIN_ON,
+ MALI_PM_DOMAIN_OFF,
+} mali_pm_domain_state;
+
+struct mali_pm_domain {
+ mali_pm_domain_state state;
+ _mali_osk_spinlock_irq_t *lock;
+
+ s32 use_count;
+
+ u32 pmu_mask;
+
+ int group_count;
+ struct mali_group *group_list;
+
+ struct mali_l2_cache_core *l2;
+};
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+
+void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+
+void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_terminate(void);
+
+/** Get PM domain from domain ID
+ */
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+{
+ return domain->l2;
+}
+
+MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+{
+ return domain->state;
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+
+#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
+ NULL != (group); (group) = (group)->pm_domain_list)
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pmu.c b/drivers/parrot/gpu/mali400/common/mali_pmu.c
new file mode 100644
index 00000000000000..d3f14728eb98c1
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pmu.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {0};
+
+static u32 mali_pmu_detect_mask(void);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ _mali_osk_spinlock_t *lock;
+ u32 registered_cores_mask;
+ u32 active_cores_mask;
+ u32 switch_delay;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ if (NULL != pmu) {
+ pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
+ if (NULL != pmu->lock) {
+ pmu->registered_cores_mask = mali_pmu_detect_mask();
+ pmu->active_cores_mask = pmu->registered_cores_mask;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+ if (_MALI_OSK_ERR_OK == err) {
+ pmu->switch_delay = data.pmu_switch_delay;
+ mali_global_pmu_core = pmu;
+ return pmu;
+ }
+ mali_hw_core_delete(&pmu->hw_core);
+ }
+ _mali_osk_spinlock_term(pmu->lock);
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+ _mali_osk_spinlock_term(pmu->lock);
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+ mali_global_pmu_core = NULL;
+}
+
+static void mali_pmu_lock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_spinlock_lock(pmu->lock);
+}
+static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_spinlock_unlock(pmu->lock);
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+{
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+ MALI_DEBUG_ASSERT(pmu);
+
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+ MALI_DEBUG_ASSERT(0 < timeout);
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+ & PMU_REG_VAL_IRQ));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+#else
+ for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+#if defined(DEBUG)
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+ MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
+#endif /* defined(DEBUG) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+ & PMU_REG_VAL_IRQ));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+ /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
+ * by our power down command, because the HW will simply not generate an
+ * interrupt in this case.*/
+ if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ } else {
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ }
+#if defined(DEBUG)
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+ u32 cores_off_mask, cores_on_mask, stat;
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+
+ cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
+ cores_on_mask = pmu->registered_cores_mask & (stat & pmu->active_cores_mask);
+
+ if (0 != cores_off_mask) {
+ err = mali_pmu_power_down_internal(pmu, cores_off_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+ if (0 != cores_on_mask) {
+ err = mali_pmu_power_up_internal(pmu, cores_on_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+#if defined(DEBUG)
+ {
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
+ }
+#endif /* defined(DEBUG) */
+
+ mali_pmu_unlock(pmu);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Make sure we have a valid power domain mask */
+ if (mask > pmu->registered_cores_mask) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
+
+ pmu->active_cores_mask &= ~mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on()) {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_power_down_internal(pmu, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Make sure we have a valid power domain mask */
+ if (mask & ~pmu->registered_cores_mask) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+
+ pmu->active_cores_mask |= mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on()) {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_power_up_internal(pmu, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
+
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
+
+ mali_pmu_unlock(pmu);
+ return err;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(void)
+{
+ int dynamic_config_pp = 0;
+ int dynamic_config_l2 = 0;
+ int i = 0;
+ u32 mask = 0;
+
+ /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
+ mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+
+ for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
+ mask |= mali_pmu_get_domain_mask(i);
+
+ if (0x0 != mali_pmu_get_domain_mask(i)) {
+ dynamic_config_pp++;
+ }
+ }
+
+ for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
+ mask |= mali_pmu_get_domain_mask(i);
+
+ if (0x0 != mali_pmu_get_domain_mask(i)) {
+ dynamic_config_l2++;
+ }
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+
+ return mask;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_pmu.h b/drivers/parrot/gpu/mali400/common/mali_pmu.h
new file mode 100644
index 00000000000000..55715f0071f073
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pmu.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+
+#define MALI_GP_DOMAIN_INDEX 0
+#define MALI_PP0_DOMAIN_INDEX 1
+#define MALI_PP1_DOMAIN_INDEX 2
+#define MALI_PP2_DOMAIN_INDEX 3
+#define MALI_PP3_DOMAIN_INDEX 4
+#define MALI_PP4_DOMAIN_INDEX 5
+#define MALI_PP5_DOMAIN_INDEX 6
+#define MALI_PP6_DOMAIN_INDEX 7
+#define MALI_PP7_DOMAIN_INDEX 8
+#define MALI_L20_DOMAIN_INDEX 9
+#define MALI_L21_DOMAIN_INDEX 10
+#define MALI_L22_DOMAIN_INDEX 11
+
+#define MALI_MAX_NUMBER_OF_DOMAINS 12
+
+/* Record the domain config from the customer or default config */
+extern u16 mali_pmu_global_domain_config[];
+
+static inline u16 mali_pmu_get_domain_mask(u32 index)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+ return mali_pmu_global_domain_config[index];
+}
+
+static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+
+ mali_pmu_global_domain_config[index] = value;
+}
+
+static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
+{
+ _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
+}
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_up.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_down.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp.c b/drivers/parrot/gpu/mali400/common/mali_pp.c
new file mode 100644
index 00000000000000..cd172201bcdc52
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+ struct mali_pp_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+ if (NULL != core) {
+ core->core_id = mali_global_num_pp_cores;
+ core->bcast_id = bcast_id;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+ _mali_osk_errcode_t ret;
+
+ if (!is_virtual) {
+ ret = mali_pp_reset(core);
+ } else {
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_pp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_pp,
+ group,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_pp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++) {
+ if (mali_global_pp_cores[i] == core) {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+
+ if (i != mali_global_num_pp_cores) {
+ /* We removed a PP core from the middle of the array -- move the last
+ * PP core to the current position to close the gap */
+ mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+ mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+ 0x0, /* Renderer List Address Register */
+ 0x0, /* Renderer State Word Base Address Register */
+ 0x0, /* Renderer Vertex Base Register */
+ 0x2, /* Feature Enable Register */
+ 0x0, /* Z Clear Value Register */
+ 0x0, /* Stencil Clear Value Register */
+ 0x0, /* ABGR Clear Value 0 Register */
+ 0x0, /* ABGR Clear Value 1 Register */
+ 0x0, /* ABGR Clear Value 2 Register */
+ 0x0, /* ABGR Clear Value 3 Register */
+ 0x0, /* Bounding Box Left Right Register */
+ 0x0, /* Bounding Box Bottom Register */
+ 0x0, /* FS Stack Address Register */
+ 0x0, /* FS Stack Size and Initial Value Register */
+ 0x0, /* Reserved */
+ 0x0, /* Reserved */
+ 0x0, /* Origin Offset X Register */
+ 0x0, /* Origin Offset Y Register */
+ 0x75, /* Subpixel Specifier Register */
+ 0x0, /* Tiebreak mode Register */
+ 0x0, /* Polygon List Format Register */
+ 0x0, /* Scaling Register */
+ 0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+ 0x0, /* WBx Source Select Register */
+ 0x0, /* WBx Target Address Register */
+ 0x0, /* WBx Target Pixel Format Register */
+ 0x0, /* WBx Target AA Format Register */
+ 0x0, /* WBx Target Layout */
+ 0x0, /* WBx Target Scanline Length */
+ 0x0, /* WBx Target Flags Register */
+ 0x0, /* WBx MRT Enable Register */
+ 0x0, /* WBx MRT Offset Register */
+ 0x0, /* WBx Global Test Enable Register */
+ 0x0, /* WBx Global Test Reference Value Register */
+ 0x0 /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ mali_pp_reset_async(core);
+ return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+ mali_dma_cmd_buf *buf)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_dma_write_array_conditional(buf, &core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_dma_write_conditional(buf, &core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_dma_write_array_conditional(buf, &core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+ if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+ }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
+{
+ if (mali_global_num_pp_cores > index) {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+ int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp.h b/drivers/parrot/gpu/mali400/common/mali_pp.h
new file mode 100644
index 00000000000000..42a12532bcae57
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+#include "mali_dma.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
+
+/**
+ * @brief Add commands to DMA command buffer to start PP job on core.
+ */
+void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
+ mali_dma_cmd_buf *buf);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->bcast_id;
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+ struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
+}
+
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_stack(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp_job.c b/drivers/parrot/gpu/mali400/common/mali_pp_job.c
new file mode 100644
index 00000000000000..f000df287c7550
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp_job.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_dma.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_pp_scheduler.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+ _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+ _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+{
+ struct mali_pp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+ if (NULL != job) {
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+ goto fail;
+ }
+
+ if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ goto fail;
+ }
+
+ if (!mali_pp_job_use_no_notification(job)) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL == job->finished_notification) goto fail;
+ }
+
+ perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+ /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+ job->uargs.perf_counter_src0 = pp_counter_src0;
+ job->uargs.perf_counter_src1 = pp_counter_src1;
+
+ /* We only copy the per sub job array if it is enabled with at least one counter */
+ if (0 < sub_job_count) {
+ job->perf_counter_per_sub_job_count = sub_job_count;
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+ }
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ _mali_osk_list_init(&job->session_list);
+ job->id = id;
+
+ job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ job->num_memory_cookies = job->uargs.num_memory_cookies;
+ if (job->num_memory_cookies > 0) {
+ u32 size;
+
+ if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
+ goto fail;
+ }
+
+ size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+
+ job->memory_cookies = _mali_osk_malloc(size);
+ if (NULL == job->memory_cookies) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+ goto fail;
+ }
+
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+ goto fail;
+ }
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ job->num_dma_bufs = job->num_memory_cookies;
+ job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
+ if (NULL == job->dma_bufs) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+ goto fail;
+ }
+#endif
+ }
+
+ /* Prepare DMA command buffer to start job, if it is virtual. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ struct mali_pp_core *core;
+ _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
+ goto fail;
+ }
+
+ core = mali_pp_scheduler_get_virtual_pp();
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+ /* Not a valid job. */
+ goto fail;
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ }
+
+fail:
+ if (NULL != job) {
+ mali_pp_job_delete(job);
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ }
+
+ _mali_osk_free(job->memory_cookies);
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Unmap buffers attached to job */
+ if (0 < job->num_dma_bufs) {
+ mali_dma_buf_unmap_job(job);
+ }
+
+ _mali_osk_free(job->dma_bufs);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ _mali_osk_free(job);
+}
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ return job->uargs.perf_counter_src0;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+ return job->perf_counter_per_sub_job_src0[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ /* Virtual jobs always use the global job counter */
+ return job->uargs.perf_counter_src1;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+ return job->perf_counter_per_sub_job_src1[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+ pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+ pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+ return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+ return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp_job.h b/drivers/parrot/gpu/mali400/common/mali_pp_job.h
new file mode 100644
index 00000000000000..978eadd749c347
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp_job.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dma.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+/**
+ * The structure represents a PP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job {
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_osk_list_t session_list; /**< Used to link jobs together in the session job list */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
+ mali_dma_cmd_buf dma_cmd_buf; /**< Command buffer for starting job using Mali-450 DMA unit */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+ u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ u32 num_memory_cookies; /**< Number of memory cookies attached to job */
+ u32 *memory_cookies; /**< Memory cookies attached to job */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ struct mali_dma_buf_attachment **dma_bufs; /**< Array of DMA-bufs used by job */
+ u32 num_dma_bufs; /**< Number of DMA-bufs used by job */
+#endif
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+ return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual_group_job(struct mali_pp_job *job)
+{
+ if (mali_is_mali450()) {
+ return 1 != job->uargs.num_cores;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_with_dlbu(struct mali_pp_job *job)
+{
+#if defined(CONFIG_MALI450)
+ return 0 == job->uargs.num_cores;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ if (mali_pp_job_is_with_dlbu(job)) {
+ return MALI_DLBU_VIRT_ADDR;
+ } else if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+ ) {
+ /* At least one output unit active */
+ return MALI_FALSE;
+ }
+
+ /* All outputs are disabled - we can abort the job */
+ return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+ u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+ job->sub_job_errors += jobs_remaining;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
+{
+ u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT(job);
+
+ if (0 != job->num_memory_cookies) {
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ job->sub_jobs_completed++;
+ if (MALI_FALSE == success) {
+ job->sub_job_errors++;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ if (0 == job->sub_job_errors) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+ return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ if (mali_pp_job_is_with_dlbu(job) && job->sub_jobs_num != 1) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if first job should be started after second job.
+ *
+ * @param first First job.
+ * @param second Second job.
+ * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
+{
+ MALI_DEBUG_ASSERT_POINTER(first);
+ MALI_DEBUG_ASSERT_POINTER(second);
+
+ /* First job should be started after second job if second job is in progress. */
+ if (0 < second->sub_jobs_started) {
+ return MALI_TRUE;
+ }
+
+ /* First job should be started after second job if first job has a higher job id. A span is
+ used to handle job id wrapping. */
+ if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
+ return MALI_TRUE;
+ }
+
+ /* Second job should be started after first job. */
+ return MALI_FALSE;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+
+ return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.c b/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.c
new file mode 100644
index 00000000000000..993f6d868fdb6e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.c
@@ -0,0 +1,2127 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_session.h"
+#include "mali_pm_domain.h"
+#include "linux/mali/mali_utgard.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+/* Queue type used for physical and virtual job queues. */
+struct mali_pp_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
+ _MALI_OSK_LIST_HEAD(high_pri); /* List of high priority jobs with some unscheduled work. */
+ u32 depth; /* Depth of combined queues. */
+};
+
+/* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
+ * since both might sleep. */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+static void mali_pp_scheduler_job_queued(void);
+static void mali_pp_scheduler_job_completed(void);
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data);
+
+static u32 pp_version = 0;
+
+/* Physical job queue */
+static struct mali_pp_scheduler_job_queue job_queue;
+
+/* Physical groups */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); /* List of physical groups with working jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); /* List of physical groups with idle jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); /* List of disabled physical groups */
+
+/* Virtual job queue (Mali-450 only) */
+static struct mali_pp_scheduler_job_queue virtual_group_job_queue;
+
+/**
+ * Add job to scheduler queue.
+ *
+ * @param job Job to queue.
+ * @return Schedule mask.
+ */
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
+
+/* Virtual group (Mali-450 only) */
+static struct mali_group *virtual_group = NULL; /* Virtual group (if any) */
+static enum {
+ VIRTUAL_GROUP_IDLE,
+ VIRTUAL_GROUP_WORKING,
+ VIRTUAL_GROUP_DISABLED,
+}
+virtual_group_state = VIRTUAL_GROUP_IDLE; /* Flag which indicates whether the virtual group is working or idle */
+
+/* Number of physical cores */
+static u32 num_cores = 0;
+
+/* Number of physical cores which are enabled */
+static u32 enabled_cores = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
+#else
+static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_lock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_unlock(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+#if defined(DEBUG)
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
+#endif /* defined(DEBUG) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
+
+static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+ /* This job object should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
+}
+
+static void mali_pp_scheduler_do_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
+ * we start deleting the job objects (without any locks held
+ */
+ _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
+
+static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+ _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
+}
+
+static void mali_pp_scheduler_do_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
+ * we start queueing the job objects (without any locks held)
+ */
+ _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
+
+ _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
+ _mali_osk_list_delinit(&job->list);
+ schedule_mask |= mali_pp_scheduler_queue_job(job);
+ }
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
+{
+#if defined(CONFIG_MALI450)
+ return NULL != virtual_group;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
+ job_queue.depth = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.high_pri);
+ virtual_group_job_queue.depth = 0;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#else
+ pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ if (NULL == pp_scheduler_lock) goto cleanup;
+
+ pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
+ if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
+
+ pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
+ if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
+
+ pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ return _MALI_OSK_ERR_OK;
+
+cleanup:
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (NULL != pp_scheduler_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+ pp_scheduler_job_queue_lock = NULL;
+ }
+
+ if (NULL != pp_scheduler_wq_job_queue) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+ pp_scheduler_wq_job_queue = NULL;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ if (NULL != pp_scheduler_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+ pp_scheduler_job_delete_lock = NULL;
+ }
+
+ if (NULL != pp_scheduler_wq_job_delete) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+ pp_scheduler_wq_job_delete = NULL;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ if (NULL != pp_scheduler_working_wait_queue) {
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ pp_scheduler_working_wait_queue = NULL;
+ }
+
+ if (NULL != pp_scheduler_lock) {
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+ pp_scheduler_lock = NULL;
+ }
+
+ return _MALI_OSK_ERR_NOMEM;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ _mali_osk_spinlock_irq_term(pp_scheduler_lock);
+#else
+ _mali_osk_spinlock_term(pp_scheduler_lock);
+#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
+}
+
+void mali_pp_scheduler_populate(void)
+{
+ struct mali_group *group;
+ struct mali_pp_core *pp_core;
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
+
+ virtual_group = group;
+ break;
+ }
+ }
+
+ /* Find all the available PP cores */
+ for (i = 0; i < num_groups; i++) {
+ group = mali_group_get_glob_group(i);
+ pp_core = mali_group_get_pp_core(group);
+
+ if (NULL != pp_core && !mali_group_is_virtual(group)) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Add all physical PP cores to the virtual group */
+ mali_group_lock(virtual_group);
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ mali_group_unlock(virtual_group);
+ } else {
+ _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
+ }
+
+ num_cores++;
+ }
+ }
+
+ enabled_cores = num_cores;
+}
+
+void mali_pp_scheduler_depopulate(void)
+{
+ struct mali_group *group, *temp;
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+
+ /* Delete all groups owned by scheduler */
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_delete(virtual_group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ mali_group_delete(group);
+ }
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+ mali_group_delete(group);
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_disable_if_empty(virtual_group)) {
+ MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_DISABLED;
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_enable_if_empty(virtual_group)) {
+ MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+}
+
+static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
+{
+ struct mali_pp_job *job = NULL;
+
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* Check if we have a normal priority job. */
+ if (!_mali_osk_list_empty(&queue->normal_pri)) {
+ MALI_DEBUG_ASSERT(queue->depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
+ }
+
+ /* Prefer normal priority job if it is in progress. */
+ if (NULL != job && 0 < job->sub_jobs_started) {
+ return job;
+ }
+
+ /* Check if we have a high priority job. */
+ if (!_mali_osk_list_empty(&queue->high_pri)) {
+ MALI_DEBUG_ASSERT(queue->depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
+ }
+
+ return job;
+}
+
+/**
+ * Returns a physical job if a physical job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ return mali_pp_scheduler_get_job(&job_queue);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(job_queue.depth > 0);
+
+ /* Remove job from queue */
+ if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* All sub jobs have been started: remove job from queue */
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+ }
+
+ --job_queue.depth;
+}
+
+/**
+ * Returns a virtual job if a virtual job is ready to run
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_group_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ return mali_pp_scheduler_get_job(&virtual_group_job_queue);
+}
+
+static void mali_pp_scheduler_dequeue_virtual_group_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
+
+ /* Remove job from queue */
+ if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+ --virtual_group_job_queue.depth;
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_pick_virtual_group_job(struct mali_pp_job *job,
+ u32 *first_subjob, u32 *last_subjob)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+
+ MALI_DEBUG_ASSERT_POINTER(first_subjob);
+ MALI_DEBUG_ASSERT_POINTER(last_subjob);
+
+ MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
+
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+ *first_subjob = *last_subjob =
+ mali_pp_job_get_first_unstarted_sub_job(job);
+
+ if (mali_pp_job_is_with_dlbu(job)) {
+ MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+ mali_pp_job_mark_sub_job_started(job, 0);
+ } else {
+ struct mali_group *child, *temp;
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &virtual_group->group_list, struct mali_group, group_list) {
+ if (mali_pp_job_has_unstarted_sub_jobs(job)) {
+ *last_subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, *last_subjob);
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Virtual group is now working. */
+ virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+ mali_pp_scheduler_dequeue_virtual_group_job(job);
+}
+
+
+/**
+ * Checks if the criteria is met for removing a physical core from virtual group
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ /*
+ * The criteria for taking out a physical group from a virtual group are the following:
+ * - There virtual group is idle
+ * - There are currently no physical groups (idle and working)
+ * - There are physical jobs to be scheduled
+ */
+ return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
+ _mali_osk_list_empty(&group_list_idle) &&
+ _mali_osk_list_empty(&group_list_working) &&
+ (NULL != mali_pp_scheduler_get_physical_job());
+}
+
+MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (!_mali_osk_list_empty(&group_list_idle)) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
+ return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+ } else if (mali_pp_scheduler_has_virtual_group()) {
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+ struct mali_group *group;
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
+ group = mali_group_acquire_group(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ return group;
+ }
+ }
+
+ return NULL;
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
+{
+ if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
+ u32 i;
+ u32 num_counters_to_copy;
+ mali_bool success = mali_pp_job_was_success(job);
+
+ _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_with_dlbu(job)) {
+ num_counters_to_copy = num_cores; /* Number of physical cores available */
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ if (MALI_TRUE == deferred) {
+ /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
+ mali_pp_scheduler_deferred_job_delete(job);
+ } else {
+ mali_pp_job_delete(job);
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
+ mali_pp_job_delete(job);
+#endif
+}
+
+static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job)
+{
+ /* This job object should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ /* Send notification back to user space */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
+#else
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+#endif
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
+ _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
+ }
+#endif
+
+ mali_pp_scheduler_job_completed();
+}
+
+void mali_pp_scheduler_schedule(void)
+{
+ struct mali_group *physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ struct mali_pp_job *physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
+ int num_physical_jobs_to_start = 0;
+ int i;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Lock the virtual group since we might have to grab physical groups. */
+ mali_group_lock(virtual_group);
+ }
+
+ mali_pp_scheduler_lock();
+ if (pause_count > 0) {
+ /* Scheduler is suspended, don't schedule any jobs. */
+ mali_pp_scheduler_unlock();
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_unlock(virtual_group);
+ }
+ return;
+ }
+
+ /* Find physical job(s) to schedule first. */
+ while (1) {
+ struct mali_group *group;
+ struct mali_pp_job *job;
+ u32 sub_job;
+
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL == job) {
+ break; /* No job, early out. */
+ }
+
+ if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
+ mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
+ /* Since not all groups are idle, don't schedule yet. */
+ break;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+ MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
+
+ /* Acquire a physical group, either from the idle list or from the virtual group.
+ * In case the group was acquired from the virtual group, it's state will be
+ * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
+ group = mali_pp_scheduler_acquire_physical_group();
+ if (NULL == group) {
+ /* Could not get a group to run the job on, early out. */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
+ break;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
+
+ /* Mark sub job as started. */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ /* Move group to working list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
+
+ /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
+ physical_groups_to_start[num_physical_jobs_to_start] = group;
+ physical_jobs_to_start[num_physical_jobs_to_start] = job;
+ physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
+ ++num_physical_jobs_to_start;
+
+ MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ /* We have a virtual group and it is idle. */
+
+ struct mali_pp_job *job;
+
+ /* Find a virtual job we can start. */
+ job = mali_pp_scheduler_get_virtual_group_job();
+
+ if (NULL != job) {
+ u32 first_subjob, last_subjob;
+
+ /* To mark necessary subjobs status of this job and remove the job from job queue
+ * when all the subjobs will be run by this virtual group in one go
+ */
+ mali_pp_scheduler_pick_virtual_group_job(job, &first_subjob, &last_subjob);
+
+ /* We no longer need the scheduler lock, but we still need the virtual lock
+ * in order to start the virtual job.
+ */
+ mali_pp_scheduler_unlock();
+
+ /* Start job. */
+ mali_group_start_job_on_virtual(virtual_group, job, first_subjob, last_subjob);
+ } else {
+ /* No virtual job to start. */
+ mali_pp_scheduler_unlock();
+ }
+ } else {
+ /* We have a virtual group, but it is busy or disabled. */
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
+
+ mali_pp_scheduler_unlock();
+ }
+ mali_group_unlock(virtual_group);
+ } else {
+ /* There is no virtual group. */
+ mali_pp_scheduler_unlock();
+ }
+
+ /* We have now released the scheduler lock, and we are ready to start the physical jobs.
+ * The reason we want to wait until we have released the scheduler lock is that job start
+ * may take quite a bit of time (many registers have to be written). This will allow new
+ * jobs from user space to come in, and post-processing of other PP jobs to happen at the
+ * same time as we start jobs. */
+ for (i = 0; i < num_physical_jobs_to_start; i++) {
+ struct mali_group *group = physical_groups_to_start[i];
+ struct mali_pp_job *job = physical_jobs_to_start[i];
+ u32 sub_job = physical_sub_jobs_to_start[i];
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+
+ mali_group_lock(group);
+
+ /* Set state to IDLE if group was acquired from the virtual group. */
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_start_job_on_group(group, job, sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
+ mali_pp_job_get_id(job), job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job)));
+
+ mali_group_unlock(group);
+ }
+}
+
+/**
+ * Set group idle.
+ *
+ * If @ref group is the virtual group, nothing is done since the virtual group should be idle
+ * already.
+ *
+ * If @ref group is a physical group we rejoin the virtual group, if it exists. If not, we move the
+ * physical group to the idle list.
+ *
+ * @note The group and the scheduler must both be locked when entering this function. Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to set idle.
+ */
+static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ if (mali_group_is_virtual(group)) {
+ /* The virtual group should have been set to non-working already. */
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ return;
+ } else {
+ if (mali_pp_scheduler_has_virtual_group()) {
+ /* Rejoin virtual group. */
+
+ /* We're no longer needed on the scheduler list. */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+
+ /* Make sure no interrupts are handled for this group during the transition
+ * from physical to virtual. */
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_pp_scheduler_enable_empty_virtual();
+ }
+
+ /* We need to recheck the group state since it is possible that someone has
+ * modified the group before we locked the virtual group. */
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ }
+
+ mali_group_unlock(virtual_group);
+ } else {
+ /* Move physical group back to idle list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+ }
+ }
+}
+
+/**
+ * Schedule job on locked group.
+ *
+ * @note The group and the scheduler must both be locked when entering this function. Both will be
+ * unlocked before exiting.
+ *
+ * @param group The group to schedule on.
+ */
+static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ if (mali_group_is_virtual(group)) {
+ /* Now that the virtual group is idle, check if we should reconfigure. */
+
+ struct mali_pp_job *virtual_job = NULL;
+ struct mali_pp_job *physical_job = NULL;
+ struct mali_group *physical_group = NULL;
+ u32 physical_sub_job = 0;
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ if (mali_pp_scheduler_can_move_virtual_to_physical()) {
+ /* There is a runnable physical job and we can acquire a physical group. */
+ physical_job = mali_pp_scheduler_get_physical_job();
+ MALI_DEBUG_ASSERT_POINTER(physical_job);
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
+
+ /* Mark sub job as started. */
+ physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
+ mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(physical_job);
+
+ /* Acquire a physical group from the virtual group. Its state will
+ * be LEAVING_VIRTUAL and must be set to IDLE before it can be
+ * used. */
+ physical_group = mali_group_acquire_group(virtual_group);
+
+ /* Move physical group to the working list, as we will soon start a job on it. */
+ _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ /* Get next virtual job. */
+ virtual_job = mali_pp_scheduler_get_virtual_group_job();
+ if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ u32 first_subjob, last_subjob;
+ /* To mark necessary subjobs status of this job and remove the job from job queue
+ * when all the subjobs will be run by this virtual group in one go
+ */
+ mali_pp_scheduler_pick_virtual_group_job(virtual_job, &first_subjob, &last_subjob);
+
+ /* We no longer need the scheduler lock, but we still need the virtual lock
+ * in order to start the virtual job.
+ */
+ mali_pp_scheduler_unlock();
+
+ /* Start job. */
+ mali_group_start_job_on_virtual(group, virtual_job, first_subjob, last_subjob);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(virtual_job), virtual_job, 1,
+ mali_pp_job_get_sub_job_count(virtual_job)));
+ } else {
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
+#endif
+
+ mali_pp_scheduler_unlock();
+ }
+
+ /* Releasing the virtual group lock that was held when entering the function. */
+ mali_group_unlock(group);
+
+ /* Start a physical job (if we acquired a physical group earlier). */
+ if (NULL != physical_job && NULL != physical_group) {
+ mali_group_lock(physical_group);
+
+ /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
+ physical_group->state = MALI_GROUP_STATE_IDLE;
+
+ /* Start job. */
+ mali_group_start_job_on_group(physical_group, physical_job, physical_sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
+ mali_pp_job_get_sub_job_count(physical_job)));
+
+ mali_group_unlock(physical_group);
+ }
+ } else {
+ /* Physical group. */
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = 0;
+
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL != job) {
+ /* There is a runnable physical job. */
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+ /* Mark sub job as started. */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Remove job from queue (if this was the last sub job). */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ mali_pp_scheduler_unlock();
+
+ /* Group is already on the working list, so start the new job. */
+ mali_group_start_job_on_group(group, job, sub_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
+ mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+
+ mali_group_unlock(group);
+ } else {
+ mali_pp_scheduler_set_group_idle_and_unlock(group);
+ }
+ }
+}
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
+{
+ mali_bool job_is_done = MALI_FALSE;
+ mali_bool schedule_on_group = MALI_FALSE;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
+ mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
+ mali_pp_job_get_id(job),
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ success ? "success" : "failure"));
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ mali_pp_scheduler_lock();
+
+ if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
+ u32 subjobs;
+
+ /* Get how many subjobs are running parallel in this virtual group */
+ subjobs = mali_pp_job_get_first_unstarted_sub_job(job) - group->pp_running_sub_job;
+ MALI_DEBUG_ASSERT(subjobs > 0);
+
+ for (; 0 < subjobs; subjobs--) {
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+
+ mali_group_non_dlbu_job_done_virtual(group);
+ } else {
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job) == mali_group_is_virtual(group));
+
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done) {
+ /* Some groups added into the virtual group may take some subjobs to run but
+ * without dequeuing the job, here do necessary dequeue under scheduler lock
+ */
+ if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
+ if (!_mali_osk_list_empty(&job->list)) {
+ mali_pp_scheduler_dequeue_virtual_group_job(job);
+ }
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ /* Remove job from session list. */
+ _mali_osk_list_delinit(&job->session_list);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
+ mali_pp_job_is_virtual_group_job(job) ? "virtual group" : "physical",
+ mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_unlock();
+
+ /* Release tracker. If other trackers are waiting on this tracker, this could
+ * trigger activation. The returned scheduling mask can be used to determine if we
+ * have to schedule GP, PP or both. */
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_pp_scheduler_lock();
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Obey the policy. */
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+
+ /* If paused, then this was the last job, so wake up sleeping workers and return. */
+ if (pause_count > 0) {
+ /* Wake up sleeping workers. Their wake-up condition is that
+ * num_slots == num_slots_idle, so unless we are done working, no
+ * threads will actually be woken up.
+ */
+ if (!mali_group_is_virtual(group)) {
+ /* Move physical group to idle list. */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+
+ _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ if (job_is_done) {
+ /* Return job to user and delete it. */
+ mali_pp_scheduler_finalize_job(job);
+ }
+
+ /* A GP job might be queued by tracker release above,
+ * make sure GP scheduler has a chance to schedule this (if possible)
+ */
+ mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
+
+ return;
+ }
+
+ /* Since this group just finished running a job, we can reschedule a new job on it
+ * immediately. */
+
+ /* By default, don't schedule on group. */
+ schedule_on_group = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /* Always schedule immediately on virtual group. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_TRUE;
+ } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
+ struct mali_pp_job *next_job = NULL;
+
+ next_job = mali_pp_scheduler_get_physical_job();
+ MALI_DEBUG_ASSERT_POINTER(next_job);
+
+ /* If no new jobs have been queued or if this group is the only idle group, we can
+ * schedule immediately on this group, unless we are GP bound and the next job would
+ * benefit from all its sub jobs being started concurrently. */
+
+ if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
+ /* We are GP bound and the job would benefit from all sub jobs being started
+ * concurrently. Postpone scheduling until after group has been unlocked. */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_FALSE;
+ } else {
+ /* Schedule job immediately since we are not GP bound. */
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ schedule_on_group = MALI_TRUE;
+ }
+ } else if ((0 < virtual_group_job_queue.depth) && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP))) {
+ /* This case is rare, only in virtual job has sub jobs case,
+ * the last "pilot job" here might not be the real pilot job,
+ * it may be a real pp job with only 1 subjob i.e. only 1 region inform,
+ * so this job may not trigger any virtual job queued in virtual queue,
+ * so it may suspend the pp scheduler, even when there are already
+ * some jobs in the virtual queue, so in this case we need to explicit
+ * set the schedule_mask */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+
+ if (schedule_on_group) {
+ /* Schedule a new job on this group. */
+ mali_pp_scheduler_schedule_on_group_and_unlock(group);
+ } else {
+ /* Set group idle. Will rejoin virtual group, under appropriate conditions. */
+ mali_pp_scheduler_set_group_idle_and_unlock(group);
+ }
+
+ if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
+ if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
+ /* Schedule PP directly. */
+ mali_pp_scheduler_schedule();
+ schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
+ }
+
+ /* Schedule other jobs that were activated. */
+ mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
+ }
+
+ if (job_is_done) {
+ /* Return job to user and delete it. */
+ mali_pp_scheduler_finalize_job(job);
+ }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_pp_scheduler_unlock();
+
+ /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+ * mali_pp_scheduler_suspended() function will be called. This will return true
+ * if state is idle and pause_count > 0, so if the core is active this
+ * will not do anything.
+ */
+ _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_pp_scheduler_unlock();
+ if (0 == pause_count) {
+ mali_pp_scheduler_schedule();
+ }
+}
+
+static mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
+{
+ mali_timeline_point point;
+ u32 fb_lookup_id = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_pp_scheduler_lock();
+
+ fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+ /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
+ _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
+
+ mali_pp_scheduler_unlock();
+
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
+
+ return point;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *timeline_point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+
+ point = mali_pp_scheduler_submit_job(session, job);
+ job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the job was started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *timeline_point_ptr = NULL;
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
+
+ /* Submit GP job. */
+ mali_gp_scheduler_submit_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ point = mali_pp_scheduler_submit_job(session, pp_job);
+ pp_job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
+ /* Let user space know that something failed after the jobs were started. */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->number_of_total_cores = num_cores;
+ args->number_of_enabled_cores = enabled_cores;
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_pp_scheduler_get_num_cores_total(void)
+{
+ return num_cores;
+}
+
+u32 mali_pp_scheduler_get_num_cores_enabled(void)
+{
+ return enabled_cores;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data *)args->ctx;
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_pp_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+ if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+ if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
+ }
+ if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
+ } else {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+ u32 i = 0;
+ struct mali_pp_job *job, *tmp_job;
+ struct mali_group *group, *tmp_group;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
+
+ mali_pp_scheduler_lock();
+
+ /* Find all jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
+ /* Remove job from queue. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ if (mali_pp_job_has_unstarted_sub_jobs(job))
+ --virtual_group_job_queue.depth;
+ } else {
+ job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
+ }
+
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+ mali_pp_job_mark_unstarted_failed(job);
+
+ if (mali_pp_job_is_complete(job)) {
+ /* Job is complete, remove from session list. */
+ _mali_osk_list_delinit(&job->session_list);
+
+ /* Move job to local list for release and deletion. */
+ _mali_osk_list_add(&job->list, &removed_jobs);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+
+ mali_pp_scheduler_unlock();
+
+ /* Release and delete all found jobs from the aborting session. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
+ mali_timeline_tracker_release(&job->tracker);
+ mali_pp_job_delete(job);
+ mali_pp_scheduler_job_completed();
+ }
+
+ /* Abort any running jobs from the session. */
+ while (i > 0) {
+ mali_group_abort_session(groups[--i], session);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_abort_session(virtual_group, session);
+ }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_pp_scheduler_lock();
+
+ ret = pause_count > 0
+ && _mali_osk_list_empty(&group_list_working)
+ && VIRTUAL_GROUP_WORKING != virtual_group_state;
+
+ mali_pp_scheduler_unlock();
+
+ return ret;
+}
+
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
+{
+ if (mali_pp_scheduler_has_virtual_group()) {
+ return mali_group_get_pp_core(virtual_group);
+ } else {
+ return NULL;
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+ return n;
+}
+#endif
+
+/* This function is intended for power on reset of all cores.
+ * No locking is done for the list iteration, which can only be safe if the
+ * scheduler is paused and all cores idle. That is always the case on init and
+ * power on. */
+void mali_pp_scheduler_reset_all_groups(void)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_lock(virtual_group);
+ mali_group_reset(virtual_group);
+ mali_group_unlock(virtual_group);
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0) {
+ group = groups[--i];
+
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_zap_session(virtual_group, session);
+ }
+
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0) {
+ mali_group_zap_session(groups[--i], session);
+ }
+}
+
+/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
+ * before calling this function to avoid Mali powering down as HW is accessed.
+ */
+static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state) {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
+
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ ++enabled_cores;
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_bool update_hw;
+
+ /* Add group to virtual group. */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ update_hw = mali_pm_is_power_on();
+ /* Get ref of group domain */
+ mali_group_get_pm_domain_ref(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ if (update_hw) {
+ mali_group_lock(group);
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+
+ mali_pp_scheduler_enable_empty_virtual();
+ mali_group_add_group(virtual_group, group, update_hw);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
+
+ mali_group_unlock(virtual_group);
+ } else {
+ /* Get ref of group domain */
+ mali_group_get_pm_domain_ref(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ /* Put group on idle list. */
+ if (mali_pm_is_power_on()) {
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ }
+
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_enable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ /* Pick up any jobs that might have been queued if all PP groups were disabled. */
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
+{
+ if (mali_pp_scheduler_has_virtual_group()) {
+ mali_group_lock(virtual_group);
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
+ /* The group was in the process of being added to the virtual group. We
+ * only need to change the state to reverse this. */
+ group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+ } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
+ /* Remove group from virtual group. The state of the group will be
+ * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
+ mali_group_remove_group(virtual_group, group);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ mali_group_unlock(virtual_group);
+ }
+
+ mali_group_lock(group);
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
+
+ --enabled_cores;
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group, MALI_TRUE);
+ mali_group_put_pm_domain_ref(group);
+ }
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+}
+
+void mali_pp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_pp_scheduler_suspend();
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_disable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_resume();
+}
+
+static void mali_pp_scheduler_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
+{
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ _mali_osk_pm_dev_barrier();
+
+ while (target_core_nr > enabled_cores) {
+ /*
+ * If there are any cores which do not belong to any domain,
+ * then these will always be found at the head of the list and
+ * we'll thus enabled these first.
+ */
+
+ mali_pp_scheduler_lock();
+
+ if (!_mali_osk_list_empty(&group_list_disabled)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_enable_group_internal(group);
+ } else {
+ mali_pp_scheduler_unlock();
+ break; /* no more groups on disabled list */
+ }
+ }
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
+{
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+
+ mali_pp_scheduler_suspend();
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ if (NULL != mali_pmu_get_global_pmu_core()) {
+ int i;
+
+ for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
+ if (target_core_nr < enabled_cores) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && (NULL != domain->group_list)) {
+ struct mali_group *group;
+
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
+ /* If group is pp core */
+ if (NULL != mali_group_get_pp_core(group)) {
+ mali_pp_scheduler_disable_group_internal(group);
+ if (target_core_nr >= enabled_cores) {
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ }
+
+ /*
+ * Didn't find enough cores associated with a power domain,
+ * so we need to disable cores which we can't power off with the PMU.
+ * Start with physical groups used by the scheduler,
+ * then remove physical from virtual if even more groups are needed.
+ */
+
+ while (target_core_nr < enabled_cores) {
+ mali_pp_scheduler_lock();
+ if (!_mali_osk_list_empty(&group_list_idle)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_disable_group_internal(group);
+ } else {
+ mali_pp_scheduler_unlock();
+ break; /* No more physical groups */
+ }
+ }
+
+ if (mali_pp_scheduler_has_virtual_group()) {
+ while (target_core_nr < enabled_cores) {
+ mali_group_lock(virtual_group);
+ if (!_mali_osk_list_empty(&virtual_group->group_list)) {
+ struct mali_group *group;
+
+ group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_unlock(virtual_group);
+
+ mali_pp_scheduler_disable_group_internal(group);
+ } else {
+ mali_group_unlock(virtual_group);
+ break; /* No more physical groups in virtual group */
+ }
+ }
+ }
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_resume();
+}
+
+int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == enabled_cores) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_cores) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ if (target_core_nr > enabled_cores) {
+ mali_pp_scheduler_core_scale_up(target_core_nr);
+ } else if (target_core_nr < enabled_cores) {
+ mali_pp_scheduler_core_scale_down(target_core_nr);
+ }
+
+ if (target_core_nr != enabled_cores) {
+ MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
+ }
+
+ mali_pp_scheduler_notify_core_change(enabled_cores);
+
+ return 0;
+}
+
+void mali_pp_scheduler_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_pp_scheduler_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+static void mali_pp_scheduler_job_queued(void)
+{
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+}
+
+static void mali_pp_scheduler_job_completed(void)
+{
+ /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_pp_end();
+ }
+}
+
+static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ /* This job should not be on any lists. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
+
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+
+ mali_pp_scheduler_unlock();
+
+ /* Release tracker. */
+ mali_timeline_tracker_release(&job->tracker);
+}
+
+static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
+{
+ _mali_osk_list_t *queue = NULL;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_pp_job *iter, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_dma_buf_map_job(job);
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ mali_pp_scheduler_lock();
+
+ if (unlikely(job->session->is_aborting)) {
+ /* Before checking if the session is aborting, the scheduler must be locked. */
+ MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
+
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+ /* Delete job. */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ mali_pp_scheduler_deferred_job_delete(job);
+#else
+ mali_pp_job_delete(job);
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
+
+ /* Release the PM reference taken for the job in
+ * mali_pp_scheduler_submit_job(). */
+ _mali_osk_pm_dev_ref_dec();
+
+ /* Since we are aborting we ignore the scheduler mask. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_pp_scheduler_job_queued();
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
+#endif
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
+
+ job->cache_order = mali_scheduler_get_new_cache_order();
+
+ /* Determine which queue the job should be added to. */
+ if (mali_pp_job_is_virtual_group_job(job)) {
+ if (job->session->use_high_priority_job_queue) {
+ queue = &virtual_group_job_queue.high_pri;
+ } else {
+ queue = &virtual_group_job_queue.normal_pri;
+ }
+
+ virtual_group_job_queue.depth += 1;
+
+ /* Set schedule bitmask if the virtual group is idle. */
+ if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+ } else {
+ if (job->session->use_high_priority_job_queue) {
+ queue = &job_queue.high_pri;
+ } else {
+ queue = &job_queue.normal_pri;
+ }
+
+ job_queue.depth += mali_pp_job_get_sub_job_count(job);
+
+ /* Set schedule bitmask if there are physical PP cores available, or if there is an
+ * idle virtual group. */
+ if (!_mali_osk_list_empty(&group_list_idle)
+ || (mali_pp_scheduler_has_virtual_group()
+ && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ }
+ }
+
+ /* Find position in queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
+ if (mali_pp_job_should_start_after(job, iter)) {
+ break;
+ }
+ }
+
+ /* Add job to queue. */
+ _mali_osk_list_add(&job->list, &iter->list);
+
+ /* Add job to session list. */
+ _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
+ mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+ mali_pp_scheduler_unlock();
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
+
+ if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
+
+ mali_pp_scheduler_lock();
+ mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
+
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_finalize_job(job);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ /* PP job is ready to run, queue it. */
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_pp_scheduler_deferred_job_queue(job);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
+
+ schedule_mask = mali_pp_scheduler_queue_job(job);
+
+ return schedule_mask;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.h b/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.h
new file mode 100644
index 00000000000000..618152aba687de
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_pp_scheduler.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "linux/mali/mali_utgard.h"
+
+/** Initalize the HW independent parts of the PP scheduler
+ */
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+/** Poplulate the PP scheduler with groups
+ */
+void mali_pp_scheduler_populate(void);
+void mali_pp_scheduler_depopulate(void);
+
+/**
+ * @brief Handle job completion.
+ *
+ * Will attempt to start a new job on the locked group.
+ *
+ * If all sub jobs have completed the job's tracker will be released, any other resources associated
+ * with the job will be freed. A notification will also be sent to user space.
+ *
+ * Releasing the tracker might activate other jobs, so if appropriate we also schedule them.
+ *
+ * @note Group must be locked when entering this function. Will be unlocked before exiting.
+ *
+ * @param group The group that completed the job.
+ * @param job The job that is done.
+ * @param sub_job Sub job of job.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @param in_upper_half MALI_TRUE if called from upper half, MALI_FALSE if not.
+ */
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/**
+ * @brief Abort all running and queued PP jobs from session.
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the
+ * queue and jobs currently running on a core will be aborted.
+ *
+ * @param session Session that is aborting.
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the PP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ *
+ * This function is intended for power on reset of all cores.
+ * No locking is done, which can only be safe if the scheduler is paused and
+ * all cores idle. That is always the case on init and power on.
+ */
+void mali_pp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
+
+/**
+ * @brief Get the virtual PP core
+ *
+ * The returned PP core may only be used to prepare DMA command buffers for the
+ * PP core. Other actions must go through the PP scheduler, or the virtual
+ * group.
+ *
+ * @return Pointer to the virtual PP core, NULL if this doesn't exist
+ */
+struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void);
+
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_pp_scheduler_enable_group(struct mali_group *group);
+void mali_pp_scheduler_disable_group(struct mali_group *group);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
+ * call.
+ */
+mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job);
+
+/**
+ * @brief Schedule queued jobs on idle cores.
+ */
+void mali_pp_scheduler_schedule(void);
+
+int mali_pp_scheduler_set_perf_level(u32 cores, mali_bool override);
+
+void mali_pp_scheduler_core_scaling_enable(void);
+void mali_pp_scheduler_core_scaling_disable(void);
+mali_bool mali_pp_scheduler_core_scaling_is_enabled(void);
+
+u32 mali_pp_scheduler_get_num_cores_total(void);
+u32 mali_pp_scheduler_get_num_cores_enabled(void);
+
+/**
+ * @brief Returns the number of Pixel Processors in the system irrespective of the context
+ *
+ * @return number of physical Pixel Processor cores in the system
+ */
+u32 mali_pp_scheduler_get_num_cores_total(void);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_scheduler.c b/drivers/parrot/gpu/mali400/common/mali_scheduler.c
new file mode 100644
index 00000000000000..8940653d0161fa
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_scheduler.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+static _mali_osk_atomic_t mali_job_id_autonumber;
+static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
+static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+
+static void mali_scheduler_wq_schedule_pp(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_scheduler_wq_schedule_gp(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mali_gp_scheduler_schedule();
+}
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job cache order counter failed.\n"));
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
+ if (NULL == pp_scheduler_wq_high_pri) {
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
+ if (NULL == gp_scheduler_wq_high_pri) {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+ _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+ u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+ return job_id;
+}
+
+u32 mali_scheduler_get_new_cache_order(void)
+{
+ u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+ return job_cache_order;
+}
+
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_GP & mask) {
+ /* GP needs scheduling. */
+ if (deferred_schedule) {
+ /* Schedule GP deferred. */
+ _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
+ } else {
+ /* Schedule GP now. */
+ mali_gp_scheduler_schedule();
+ }
+ }
+
+ if (MALI_SCHEDULER_MASK_PP & mask) {
+ /* PP needs scheduling. */
+ if (deferred_schedule) {
+ /* Schedule PP deferred. */
+ _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+ } else {
+ /* Schedule PP now. */
+ mali_pp_scheduler_schedule();
+ }
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_scheduler.h b/drivers/parrot/gpu/mali400/common/mali_scheduler.h
new file mode 100644
index 00000000000000..73eb53e58731c5
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_scheduler.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+u32 mali_scheduler_get_new_cache_order(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the both the PP and GP scheuduler.
+ * This must be called after the Mali HW has been powered on in order to reset
+ * the HW.
+ */
+MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+{
+ mali_gp_scheduler_reset_all_groups();
+ mali_pp_scheduler_reset_all_groups();
+}
+
+/**
+ * @brief Zap TLB on all active groups running \a session
+ *
+ * @param session Pointer to the session to zap
+ */
+MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ mali_gp_scheduler_zap_all_active(session);
+ mali_pp_scheduler_zap_all_active(session);
+}
+
+/**
+ * Check if bit is set in scheduler mask.
+ *
+ * @param mask Scheduler mask to check.
+ * @param bit Bit to check.
+ * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
+{
+ return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
+}
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+/* Enable or disable scheduler hint. */
+extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+
+MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ mali_scheduler_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ mali_scheduler_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
+ return mali_scheduler_hints[hint];
+}
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_scheduler_types.h b/drivers/parrot/gpu/mali400/common/mali_scheduler_types.h
new file mode 100644
index 00000000000000..2d24a25c92ad32
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_scheduler_types.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+typedef enum {
+ MALI_SCHEDULER_HINT_GP_BOUND = 0
+#define MALI_SCHEDULER_HINT_MAX 1
+} mali_scheduler_hint;
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_session.c b/drivers/parrot/gpu/mali400/common/mali_session.c
new file mode 100644
index 00000000000000..6e9c2c143f8bbe
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_session.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+ if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_count++;
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_count--;
+ mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+ return mali_session_count;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void)
+{
+ struct mali_session_data *session, *tmp;
+ u32 max_window_num = 0;
+ u32 tmp_number = 0;
+
+ mali_session_lock();
+
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+ if (max_window_num < tmp_number) {
+ max_window_num = tmp_number;
+ }
+ }
+
+ mali_session_unlock();
+
+ return max_window_num;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400/common/mali_session.h b/drivers/parrot/gpu/mali400/common/mali_session.h
new file mode 100644
index 00000000000000..d1a1a1b8ca5690
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_session.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+
+struct mali_session_data {
+ _mali_osk_notification_queue_t *ioctl_queue;
+
+ _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+ mali_descriptor_mapping *descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+ _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+
+ _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */
+
+ struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+ struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+ mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+ mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+u32 mali_session_max_window_num(void);
+#endif
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_soft_job.c b/drivers/parrot/gpu/mali400/common/mali_soft_job.c
new file mode 100644
index 00000000000000..f7020f0180d48c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_soft_job.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ _mali_osk_spinlock_irq_lock(system->lock);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+ MALI_DEBUG_ASSERT(0 == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = 0);
+ _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+ u32 i;
+ struct mali_soft_job_system *system;
+ struct mali_soft_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->session = session;
+
+ system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == system->lock) {
+ mali_soft_job_system_destroy(system);
+ return NULL;
+ }
+ system->lock_owner = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
+ _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+ for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
+ job = &(system->jobs[i]);
+ _mali_osk_list_add(&(job->system_list), &(system->jobs_free));
+ job->system = system;
+ job->state = MALI_SOFT_JOB_STATE_FREE;
+ job->id = i;
+ }
+
+ return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ /* All jobs should be free at this point. */
+ MALI_DEBUG_CODE({
+ u32 i;
+ struct mali_soft_job *job;
+
+ for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i)
+ {
+ job = &(system->jobs[i]);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
+ }
+ });
+
+ if (NULL != system) {
+ if (NULL != system->lock) {
+ _mali_osk_spinlock_irq_term(system->lock);
+ }
+ _mali_osk_free(system);
+ }
+}
+
+static struct mali_soft_job *mali_soft_job_system_alloc_job(struct mali_soft_job_system *system)
+{
+ struct mali_soft_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+ if (_mali_osk_list_empty(&(system->jobs_free))) {
+ /* No jobs available. */
+ return NULL;
+ }
+
+ /* Grab first job and move it to the used list. */
+ job = _MALI_OSK_LIST_ENTRY(system->jobs_free.next, struct mali_soft_job, system_list);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
+
+ _mali_osk_list_move(&(job->system_list), &(system->jobs_used));
+ job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+ MALI_DEBUG_ASSERT(system == job->system);
+
+ return job;
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE != job->state);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+ MALI_DEBUG_ASSERT(system == job->system);
+
+ job->state = MALI_SOFT_JOB_STATE_FREE;
+ _mali_osk_list_move(&(job->system_list), &(system->jobs_free));
+
+ mali_soft_job_system_unlock(job->system);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+ if (job_id < MALI_MAX_NUM_SOFT_JOBS) {
+ return &system->jobs[job_id];
+ }
+
+ return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+ if (NULL != job) {
+ if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+ _mali_osk_atomic_term(&job->refcount);
+
+ if (NULL != job->activated_notification) {
+ _mali_osk_notification_delete(job->activated_notification);
+ job->activated_notification = NULL;
+ }
+
+ mali_soft_job_system_free_job(job->system, job);
+ }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job)
+{
+ struct mali_soft_job *job;
+ _mali_osk_notification_t *notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_TYPE_USER_SIGNALED >= type);
+
+ if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+ if (unlikely(NULL == notification)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+ return NULL;
+ }
+ }
+
+ mali_soft_job_system_lock(system);
+
+ job = mali_soft_job_system_alloc_job(system);
+ if (NULL == job) {
+ mali_soft_job_system_unlock(system);
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate job"));
+ _mali_osk_notification_delete(notification);
+ return NULL;
+ }
+
+ job->type = type;
+ job->user_job = user_job;
+ job->activated = MALI_FALSE;
+
+ if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
+ job->activated_notification = notification;
+ }
+
+ _mali_osk_atomic_init(&job->refcount, 1);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ MALI_DEBUG_ASSERT(system == job->system);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+ mali_soft_job_system_unlock(system);
+
+ return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+ mali_timeline_point point;
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ system = job->system;
+
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+ mali_soft_job_system_lock(system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+ point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+ return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+ struct mali_soft_job *job;
+
+ job = (struct mali_soft_job *) data;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job;
+ struct mali_timeline_system *timeline_system;
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(system);
+
+ job = mali_soft_job_system_lookup_job(system, job_id);
+
+ if (NULL == job || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+ mali_soft_job_system_unlock(system);
+ MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ /* Since the job now is in signaled state, timeouts from the timeline system will be
+ * ignored, and it is not possible to signal this job again. */
+
+ timeline_system = system->session->timeline_system;
+ MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+ /* Wait until activated. */
+ _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+ if (NULL != job->activated_notification) {
+ _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+ res->user_job = job->user_job;
+ mali_session_send_notification(job->system->session, job->activated_notification);
+ }
+ job->activated_notification = NULL;
+}
+
+void mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+ mali_soft_job_system_unlock(job->system);
+
+ /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_soft_job_destroy(job);
+ return;
+ }
+
+ /* Send activated notification. */
+ mali_soft_job_send_activated_notification(job);
+
+ /* Wake up sleeping signaler. */
+ job->activated = MALI_TRUE;
+ _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+ mali_soft_job_system_unlock(job->system);
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ /* The session is aborting. This job will be released and destroyed by @ref
+ * mali_soft_job_system_abort(). */
+ mali_soft_job_system_unlock(job->system);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ /* The job is about to be signaled, ignore timeout. */
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+ mali_soft_job_system_unlock(job->system);
+ return schedule_mask;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+ _mali_osk_atomic_inc(&job->refcount);
+
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+
+ return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+ u32 i;
+ struct mali_soft_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+ mali_soft_job_system_lock(system);
+
+ for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
+ job = &(system->jobs[i]);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state ||
+ MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+ /* If the job has been activated, we have to release the tracker and destroy
+ * the job. If not, the tracker will be released and the job destroyed when
+ * it is activated. */
+ if (MALI_TRUE == job->activated) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+ /* We need to destroy this soft job. */
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ }
+
+ mali_soft_job_system_unlock(system);
+
+ /* Release and destroy jobs. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ /* Move job back to used list before destroying. */
+ _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+ mali_soft_job_destroy(job);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_soft_job.h b/drivers/parrot/gpu/mali400/common/mali_soft_job.h
new file mode 100644
index 00000000000000..2afa3b97ca3918
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_soft_job.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ */
+typedef enum mali_soft_job_type {
+ MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * All soft jobs in a soft job system will initially be in state MALI_SOFT_JOB_STATE_FREE. On @ref
+ * mali_soft_job_system_start_job a job will first be allocated. A job in state
+ * MALI_SOFT_JOB_STATE_FREE will be picked and the state changed to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * When a soft job's reference count reaches zero, it will be freed and the state returns to
+ * MALI_SOFT_JOB_STATE_FREE.
+ */
+typedef enum mali_soft_job_state {
+ MALI_SOFT_JOB_STATE_FREE,
+ MALI_SOFT_JOB_STATE_ALLOCATED,
+ MALI_SOFT_JOB_STATE_STARTED,
+ MALI_SOFT_JOB_STATE_SIGNALED,
+ MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/* Maximum number of soft jobs per soft system. */
+#define MALI_MAX_NUM_SOFT_JOBS 20
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+ mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */
+ u32 user_job; /**< Identifier for soft job in user space. */
+ _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */
+ mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+ _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+ /* Protected by soft job system lock. */
+ u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */
+ mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+ struct mali_soft_job_system *system; /**< The soft job system this job is in. */
+ _mali_osk_list_t system_list; /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+ struct mali_session_data *session; /**< The session this soft job system belongs to. */
+
+ struct mali_soft_job jobs[MALI_MAX_NUM_SOFT_JOBS]; /**< Array of all soft jobs in this system. */
+ _MALI_OSK_LIST_HEAD(jobs_free); /**< List of all free soft jobs. */
+ _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */
+
+ _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */
+ u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs. Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ */
+void mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.c b/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.c
new file mode 100644
index 00000000000000..de076abfc65b47
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+ struct mali_spinlock_reentrant *spinlock;
+
+ spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+ if (NULL == spinlock) {
+ return NULL;
+ }
+
+ spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+ if (NULL == spinlock->lock) {
+ mali_spinlock_reentrant_term(spinlock);
+ return NULL;
+ }
+
+ return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+ if (NULL != spinlock->lock) {
+ _mali_osk_spinlock_irq_term(spinlock->lock);
+ }
+
+ _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid);
+
+ MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+ if (tid != spinlock->owner) {
+ _mali_osk_spinlock_irq_lock(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+ spinlock->owner = tid;
+ }
+
+ MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+ ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+ --spinlock->counter;
+ if (0 == spinlock->counter) {
+ spinlock->owner = 0;
+ MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+ _mali_osk_spinlock_irq_unlock(spinlock->lock);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.h b/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.h
new file mode 100644
index 00000000000000..f252ab46a00714
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_spinlock_reentrant.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+ _mali_osk_spinlock_irq_t *lock;
+ u32 owner;
+ u32 counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline.c b/drivers/parrot/gpu/mali400/common/mali_timeline.c
new file mode 100644
index 00000000000000..c546285a1b99cf
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline.c
@@ -0,0 +1,1376 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC)
+#include <linux/version.h>
+
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+ int fence_status = sync_fence->status;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+ tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ is_aborting = system->session->is_aborting;
+ if (!is_aborting && (0 > fence_status)) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ }
+
+ waiter = tracker->waiter_sync;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ tracker->sync_fence = NULL;
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ sync_fence_put(sync_fence);
+
+ if (!is_aborting) {
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif /* defined(CONFIG_SYNC) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+ return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker;
+ struct mali_timeline *timeline;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ timeline = (struct mali_timeline *) data;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ if (!system->timer_enabled) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ tracker = timeline->tracker_tail;
+ timeline->timer_active = MALI_FALSE;
+
+ if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+ /* This is likely the delayed work that has been schedule out before cancelled. */
+ if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ schedule_mask = mali_timeline_tracker_time_out(tracker);
+ tracker->timer_active = MALI_FALSE;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ system->timer_enabled = MALI_FALSE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+ }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ if (NULL != timeline) {
+ /* Assert that the timeline object has been properly cleaned up before destroying it. */
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != timeline->system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != timeline->sync_tl) {
+ sync_timeline_destroy(timeline->sync_tl);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+ }
+
+ _mali_osk_free(timeline);
+ }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+ timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+ if (NULL == timeline) {
+ return NULL;
+ }
+
+ /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+ /* Start the timeline a bit before wrapping when debugging. */
+ timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+ timeline->point_next = 1;
+#endif
+ timeline->point_oldest = timeline->point_next;
+
+ /* The tracker and waiter lists will initially be empty. */
+
+ timeline->system = system;
+ timeline->id = id;
+
+ timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+ if (NULL == timeline->delayed_work) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC)
+ {
+ char timeline_name[32];
+
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_PP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_SOFT:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+ if (NULL == timeline->sync_tl) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ if (mali_timeline_is_full(timeline)) {
+ /* Don't add tracker if timeline is full. */
+ tracker->point = MALI_TIMELINE_NO_POINT;
+ return;
+ }
+
+ tracker->timeline = timeline;
+ tracker->point = timeline->point_next;
+
+ /* Find next available point. */
+ timeline->point_next++;
+ if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+ timeline->point_next++;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+ /* Add tracker as new head on timeline's tracker list. */
+ if (NULL == timeline->tracker_head) {
+ /* Tracker list is empty. */
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+ timeline->tracker_tail = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+ } else {
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+ tracker->timeline_prev = timeline->tracker_head;
+ timeline->tracker_head->timeline_next = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ }
+ timeline->tracker_head = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+ struct mali_timeline_waiter *waiter_prev;
+ struct mali_timeline_waiter *waiter_next;
+
+ /* Waiter time must be between timeline head and tail, and there must
+ * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+ MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+ MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+ /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+ waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */
+ waiter_next = NULL; /* Insert new before waiter_next */
+
+ /* Iterating backwards from head (newest) to tail (oldest) until we
+ * find the correct spot to insert the new waiter */
+ while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+ waiter_next = waiter_prev;
+ waiter_prev = waiter_prev->timeline_prev;
+ }
+
+ if (NULL == waiter_prev && NULL == waiter_next) {
+ /* list is empty */
+ timeline->waiter_head = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else if (NULL == waiter_next) {
+ /* insert at head */
+ waiter_new->timeline_prev = timeline->waiter_head;
+ timeline->waiter_head->timeline_next = waiter_new;
+ timeline->waiter_head = waiter_new;
+ } else if (NULL == waiter_prev) {
+ /* insert at tail */
+ waiter_new->timeline_next = timeline->waiter_tail;
+ timeline->waiter_tail->timeline_prev = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else {
+ /* insert between */
+ waiter_new->timeline_next = waiter_next;
+ waiter_new->timeline_prev = waiter_prev;
+ waiter_next->timeline_prev = waiter_new;
+ waiter_prev->timeline_next = waiter_new;
+ }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *oldest_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Timer is disabled, early out. */
+ if (!system->timer_enabled) return;
+
+ oldest_tracker = timeline->tracker_tail;
+ if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+ if (MALI_FALSE == oldest_tracker->timer_active) {
+ if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ }
+ _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+ oldest_tracker->timer_active = MALI_TRUE;
+ timeline->timer_active = MALI_TRUE;
+ }
+ } else if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ MALI_DEBUG_CODE({
+ struct mali_timeline_system *system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ });
+
+ if (NULL != timeline->tracker_tail) {
+ /* Set oldest point to oldest tracker's point */
+ timeline->point_oldest = timeline->tracker_tail->point;
+ } else {
+ /* No trackers, mark point list as empty */
+ timeline->point_oldest = timeline->point_next;
+ }
+
+ /* Release all waiters no longer on the timeline's point list.
+ * Releasing a waiter can trigger this function to be called again, so
+ * we do not store any pointers on stack. */
+ while (NULL != timeline->waiter_tail) {
+ u32 waiter_time_relative;
+ u32 time_head_relative;
+ struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+ time_head_relative = timeline->point_next - timeline->point_oldest;
+ waiter_time_relative = waiter->point - timeline->point_oldest;
+
+ if (waiter_time_relative < time_head_relative) {
+ /* This and all following waiters are on the point list, so we are done. */
+ break;
+ }
+
+ /* Remove waiter from timeline's waiter list. */
+ if (NULL != waiter->timeline_next) {
+ waiter->timeline_next->timeline_prev = NULL;
+ } else {
+ /* This was the last waiter */
+ timeline->waiter_head = NULL;
+ }
+ timeline->waiter_tail = waiter->timeline_next;
+
+ /* Release waiter. This could activate a tracker, if this was
+ * the last waiter for the tracker. */
+ schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+ /* Zero out all tracker members. */
+ _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+ tracker->type = type;
+ tracker->job = job;
+ tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */
+ tracker->os_tick_create = _mali_osk_time_tickcount();
+ MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+ tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+ /* Copy fence. */
+ if (NULL != fence) {
+ _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+ }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker_next, *tracker_prev;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ /* Upon entry a group lock will be held, but not a scheduler lock. */
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ /* Tracker should have been triggered */
+ MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+ /* All waiters should have been released at this point */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+ timeline = tracker->timeline;
+ if (NULL == timeline) {
+ /* Tracker was not on a timeline, there is nothing to release. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Tracker should still be on timeline */
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
+
+ /* Tracker is no longer valid. */
+ MALI_DEBUG_CODE(tracker->magic = 0);
+
+ tracker_next = tracker->timeline_next;
+ tracker_prev = tracker->timeline_prev;
+ tracker->timeline_next = NULL;
+ tracker->timeline_prev = NULL;
+
+ /* Removing tracker from timeline's tracker list */
+ if (NULL == tracker_next) {
+ /* This tracker was the head */
+ timeline->tracker_head = tracker_prev;
+ } else {
+ tracker_next->timeline_prev = tracker_prev;
+ }
+
+ if (NULL == tracker_prev) {
+ /* This tracker was the tail */
+ timeline->tracker_tail = tracker_next;
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ /* Update the timeline's oldest time and release any waiters */
+ schedule_mask |= mali_timeline_update_oldest_point(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ } else {
+ tracker_prev->timeline_next = tracker_next;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Update delayed work only when it is the soft job timeline */
+ if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+ mali_timeline_update_delayed_work(tracker->timeline);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *tail,
+ struct mali_timeline_waiter *head)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(head);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ head->tracker_next = system->waiter_empty_list;
+ system->waiter_empty_list = tail;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_timeline_system *system;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+ if (NULL != tracker->waiter_head) {
+ mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+ tracker->waiter_head = NULL;
+ tracker->waiter_tail = NULL;
+ }
+
+ switch (tracker->type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_PP:
+ schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SOFT:
+ timeline = tracker->timeline;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+ /* Start a soft timer to make sure the soft job be released in a limited time */
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_update_delayed_work(timeline);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ break;
+ case MALI_TIMELINE_TRACKER_WAIT:
+ mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC)
+ mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+ MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) */
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+ break;
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count++;
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count--;
+
+ tracker->activation_error |= activation_error;
+
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ fence->points[i] = uk_fence->points[i];
+ }
+
+ fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+ u32 i;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+ system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == system->spinlock) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+ if (NULL == system->timelines[i]) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+ if (NULL == system->signaled_sync_tl) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ system->waiter_empty_list = NULL;
+ system->session = session;
+ system->timer_enabled = MALI_TRUE;
+
+ system->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == system->wait_queue) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ return system;
+}
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+ struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ return mali_timeline_is_empty(timeline);
+}
+
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel sync fence waiters. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->sync_fence) continue;
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+ /* Cancel sync fence waiter. */
+ if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+ /* Callback was not called, move tracker to local list. */
+ _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+ }
+ }
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+ mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+ }
+
+ /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+
+#endif /* defined(CONFIG_SYNC) */
+
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+ MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC)
+ mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) */
+
+ /* Should not be any waiters or trackers left at this point. */
+ MALI_DEBUG_CODE({
+ u32 i;
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+ {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ }
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+ u32 i;
+ struct mali_timeline_waiter *waiter, *next;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+ if (NULL != system) {
+ /* There should be no waiters left on this queue. */
+ if (NULL != system->wait_queue) {
+ _mali_osk_wait_queue_term(system->wait_queue);
+ system->wait_queue = NULL;
+ }
+
+ /* Free all waiters in empty list */
+ waiter = system->waiter_empty_list;
+ while (NULL != waiter) {
+ next = waiter->tracker_next;
+ _mali_osk_free(waiter);
+ waiter = next;
+ }
+
+#if defined(CONFIG_SYNC)
+ if (NULL != system->signaled_sync_tl) {
+ sync_timeline_destroy(system->signaled_sync_tl);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (NULL != system->timelines[i]) {
+ mali_timeline_destroy(system->timelines[i]);
+ }
+ }
+ if (NULL != system->spinlock) {
+ mali_spinlock_reentrant_term(system->spinlock);
+ }
+
+ _mali_osk_free(system);
+ }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+ u32 i, num_waiters = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+ ++num_waiters;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) */
+
+ return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ waiter = system->waiter_empty_list;
+ if (NULL != waiter) {
+ /* Remove waiter from empty list and zero it */
+ system->waiter_empty_list = waiter->tracker_next;
+ _mali_osk_memset(waiter, 0, sizeof(*waiter));
+ }
+
+ /* Return NULL if list was empty. */
+ return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+ struct mali_timeline_waiter **tail,
+ struct mali_timeline_waiter **head,
+ int max_num_waiters)
+{
+ u32 i, tid = _mali_osk_get_tid();
+ mali_bool do_alloc;
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT_POINTER(head);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ *head = *tail = NULL;
+ do_alloc = MALI_FALSE;
+ i = 0;
+ while (i < max_num_waiters) {
+ if (MALI_FALSE == do_alloc) {
+ waiter = mali_timeline_system_get_zeroed_waiter(system);
+ if (NULL == waiter) {
+ do_alloc = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ continue;
+ }
+ } else {
+ waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+ if (NULL == waiter) break;
+ }
+ ++i;
+ if (NULL == *tail) {
+ *tail = waiter;
+ *head = waiter;
+ } else {
+ (*head)->tracker_next = waiter;
+ *head = waiter;
+ }
+ }
+ if (MALI_TRUE == do_alloc) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ struct mali_timeline_waiter *waiter_tail,
+ struct mali_timeline_waiter *waiter_head)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif /* defined(CONFIG_SYNC) */
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+ /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+ * into the timelines sorted list of waiters */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ struct mali_timeline_waiter *waiter;
+
+ /* Get point on current timeline from tracker's fence. */
+ point = tracker->fence.points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline so we don't need a waiter. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+ point, timeline->point_oldest, timeline->point_next));
+ continue;
+ }
+
+ if (likely(mali_timeline_is_point_released(timeline, point))) {
+ /* Tracker representing the point has been released so we don't need a
+ * waiter. */
+ continue;
+ }
+
+ /* The point is on timeline. */
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+ /* Get a new zeroed waiter object. */
+ if (likely(NULL != waiter_tail)) {
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ continue;
+ }
+
+ /* Yanking the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = point;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Add waiter to timeline. */
+ mali_timeline_insert_waiter(timeline, waiter);
+ }
+#if defined(CONFIG_SYNC)
+ if (-1 != tracker->fence.sync_fd) {
+ int ret;
+ struct mali_timeline_waiter *waiter;
+
+ sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+ if (unlikely(NULL == sync_fence)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+ goto exit;
+ }
+
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Start asynchronous wait that will release waiter when the fence is signaled. */
+ sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+ ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+ if (1 == ret) {
+ /* Fence already signaled, no waiter needed. */
+ goto exit;
+ } else if (0 != ret) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_sync = waiter;
+
+ /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+ tracker->sync_fence = sync_fence;
+
+ sync_fence = NULL;
+ }
+exit:
+#endif /* defined(CONFIG_SYNC) */
+
+ if (NULL != waiter_tail) {
+ mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+ }
+
+ /* Release the initial trigger ref count. */
+ tracker->trigger_ref_count--;
+
+ /* If there were no waiters added to this tracker we activate immediately. */
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id)
+{
+ int num_waiters = 0;
+ struct mali_timeline_waiter *waiter_tail, *waiter_head;
+ u32 tid = _mali_osk_get_tid();
+ mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->system = system;
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+ /* Allocate waiters. */
+ mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If
+ * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+ * point will be MALI_TIMELINE_NO_POINT.
+ *
+ * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the
+ * point will be MALI_TIMELINE_NO_POINT. */
+ MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+ if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+ struct mali_timeline *timeline = system->timelines[timeline_id];
+ mali_timeline_insert_tracker(timeline, tracker);
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ }
+
+ point = tracker->point;
+
+ /* Create waiters for tracker based on supplied fence. Each waiter will increase the
+ * trigger ref count. */
+ mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+ tracker = NULL;
+
+ /* At this point the tracker object might have been freed so we should no longer
+ * access it. */
+
+
+ /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+ * returned. */
+ return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter)
+{
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker = waiter->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ /* At this point the waiter has been removed from the timeline's waiter list, but it is
+ * still on the tracker's waiter list. All of the tracker's waiters will be released when
+ * the tracker is activated. */
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = NULL;
+
+ tracker->trigger_ref_count--;
+ if (0 == tracker->trigger_ref_count) {
+ /* This was the last waiter; activate tracker */
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id)
+{
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ if (MALI_TIMELINE_MAX <= timeline_id) {
+ return MALI_TIMELINE_NO_POINT;
+ }
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ timeline = system->timelines[timeline_id];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ point = MALI_TIMELINE_NO_POINT;
+ if (timeline->point_oldest != timeline->point_next) {
+ point = timeline->point_next - 1;
+ if (MALI_TIMELINE_NO_POINT == point) point--;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return point;
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+ timeline = tracker->timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline->system);
+ system = timeline->system;
+
+ if (MALI_TIMELINE_MAX > id) {
+ return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ } else {
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+ return MALI_FALSE;
+ }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ return " GP";
+ case MALI_TIMELINE_PP:
+ return " PP";
+ case MALI_TIMELINE_SOFT:
+ return "SOFT";
+ default:
+ return "NONE";
+ }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+ switch (type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ return " GP";
+ case MALI_TIMELINE_TRACKER_PP:
+ return " PP";
+ case MALI_TIMELINE_TRACKER_SOFT:
+ return "SOFT";
+ case MALI_TIMELINE_TRACKER_WAIT:
+ return "WAIT";
+ case MALI_TIMELINE_TRACKER_SYNC:
+ return "SYNC";
+ default:
+ return "INVALID";
+ }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ timeline = tracker->timeline;
+
+ if (0 != tracker->trigger_ref_count) {
+ return MALI_TIMELINE_TS_WAITING;
+ }
+
+ if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+ return MALI_TIMELINE_TS_ACTIVE;
+ }
+
+ if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+ return MALI_TIMELINE_TS_INIT;
+ }
+
+ return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker)
+{
+ const char *tracker_state = "IWAF";
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ if (0 != tracker->trigger_ref_count) {
+ MALI_PRINTF(("TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u,%d] (0x%08X)\n",
+ timeline_tracker_type_to_string(tracker->type), tracker->point,
+ *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
+ tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->job));
+ } else {
+ MALI_PRINTF(("TL: %s %u %c (0x%08X)\n",
+ timeline_tracker_type_to_string(tracker->type), tracker->point,
+ *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
+ tracker->job));
+ }
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+ int i_max = 30;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker && 0 < --i_max) {
+ mali_timeline_debug_print_tracker(tracker);
+ tracker = tracker->timeline_next;
+ }
+
+ if (0 == i_max) {
+ MALI_PRINTF(("TL: Too many trackers in list to print\n"));
+ }
+}
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system)
+{
+ int i;
+ int num_printed = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ /* Print all timelines */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL == timeline->tracker_head) continue;
+
+ MALI_PRINTF(("TL: Timeline %s:\n",
+ timeline_id_to_string((enum mali_timeline_id)i)));
+ mali_timeline_debug_print_timeline(timeline);
+ num_printed++;
+ }
+
+ if (0 == num_printed) {
+ MALI_PRINTF(("TL: All timelines empty\n"));
+ }
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline.h b/drivers/parrot/gpu/mali400/common/mali_timeline.h
new file mode 100644
index 00000000000000..141b9d50c6e5af
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline.h
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+ MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */
+ MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */
+ MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+ MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+ MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */
+ MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */
+ MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+ MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+ MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+ MALI_TIMELINE_TRACKER_MAX = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline. A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+ mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+ s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+ struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+ /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */
+ struct mali_timeline_waiter *waiter_empty_list;
+
+ struct mali_session_data *session; /**< Session that owns this system. */
+
+ mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+ _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif /* defined(CONFIG_SYNC) */
+};
+
+/**
+ * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+ mali_timeline_point point_next; /**< The next available point. */
+ mali_timeline_point point_oldest; /**< The oldest point not released. */
+
+ /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with
+ * tail pointing to the tracker with the oldest time. */
+ struct mali_timeline_tracker *tracker_head;
+ struct mali_timeline_tracker *tracker_tail;
+
+ /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait
+ * with tail pointing to the waiter with oldest wait time. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+ struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */
+ enum mali_timeline_id id; /**< Timeline type. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */
+#endif /* defined(CONFIG_SYNC) */
+
+ /* The following fields are used to time out soft job trackers. */
+ _mali_osk_wq_delayed_work_t *delayed_work;
+ mali_bool timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+ mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */
+ struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */
+
+ struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */
+ struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+ struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+ MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+ mali_timeline_point point; /**< Point on timeline for this tracker */
+
+ struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */
+ struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+ u32 trigger_ref_count; /**< When zero tracker will be activated */
+ mali_timeline_activation_error activation_error; /**< Activation error. */
+ struct mali_timeline_fence fence; /**< Fence used to create this tracker */
+
+ /* Single-linked list of waiters. Sorted in order of insertions with
+ * tail pointing to first waiter. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+#if defined(CONFIG_SYNC)
+ /* These are only used if the tracker is waiting on a sync fence. */
+ struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+ struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+ struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
+ _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+#endif /* defined(CONFIG_SYNC) */
+
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */
+ enum mali_timeline_tracker_type type; /**< Type of tracker. */
+ void *job; /**< Owner of tracker. */
+
+ /* The following fields are used to time out soft job trackers. */
+ u32 os_tick_create;
+ u32 os_tick_activate;
+ mali_bool timer_active;
+};
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is. Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero. Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in. The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ * [ point is in forbidden zone ]
+ * 64k wide
+ * MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ * [ point is on timeline ) ( point is released ]
+ *
+ * 0--------------------------##############################--------------------2^32 - 1
+ * ^ ^
+ * \ |
+ * oldest point on timeline |
+ * \
+ * next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+ return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released. A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ mali_timeline_point point_normalized;
+ mali_timeline_point next_normalized;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ point_normalized = point - timeline->point_oldest;
+ next_normalized = timeline->point_next - timeline->point_oldest;
+
+ return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid. A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be
+ * activated before this function returns. Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ * MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+#if defined(DEBUG)
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state. Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+ MALI_TIMELINE_TS_INIT = 0,
+ MALI_TIMELINE_TS_WAITING = 1,
+ MALI_TIMELINE_TS_ACTIVE = 2,
+ MALI_TIMELINE_TS_FINISH = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline);
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.c b/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.c
new file mode 100644
index 00000000000000..1eace170e526e7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_fence_wait.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+ return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ _mali_osk_atomic_term(&wait->refcount);
+ _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated. Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+
+ wait = (struct mali_timeline_fence_wait_tracker *) data;
+ MALI_DEBUG_ASSERT_POINTER(wait);
+
+ return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ mali_timeline_point point;
+
+ point = fence->points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+ }
+
+ if (!mali_timeline_is_point_released(timeline, point)) {
+ ret = MALI_FALSE;
+ goto exit;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) {
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (likely(NULL != sync_fence)) {
+ if (0 == sync_fence->status) {
+ ret = MALI_FALSE;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+exit:
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+ mali_timeline_point point;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+ return mali_timeline_fence_wait_check_status(system, fence);
+ }
+
+ wait = mali_timeline_fence_wait_tracker_alloc();
+ if (unlikely(NULL == wait)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+ return MALI_FALSE;
+ }
+
+ wait->activated = MALI_FALSE;
+ wait->system = system;
+
+ /* Initialize refcount to two references. The reference first will be released by this
+ * function after the wait is over. The second reference will be released when the tracker
+ * is activated. */
+ _mali_osk_atomic_init(&wait->refcount, 2);
+
+ /* Add tracker to timeline system, but not to a timeline. */
+ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+ point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+ MALI_IGNORE(point);
+
+ /* Wait for the tracker to be activated or time out. */
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+ } else {
+ _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+ }
+
+ ret = wait->activated;
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+
+ return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+ wait->activated = MALI_TRUE;
+
+ _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+ MALI_IGNORE(schedule_mask);
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.h b/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.h
new file mode 100644
index 00000000000000..393a71d44e0c34
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline_fence_wait.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+ mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+ _mali_osk_atomic_t refcount; /**< Reference count. */
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.c b/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.c
new file mode 100644
index 00000000000000..ebfa569b0552c2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and
+ * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+ struct sync_fence *sync_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ /* Allocate sync fence tracker. */
+ sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+ if (NULL == sync_fence_tracker) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+ return NULL;
+ }
+
+ /* Create sync flag. */
+ MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+ sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+ if (NULL == sync_fence_tracker->flag) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Create sync fence from sync flag. */
+ sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+ mali_sync_flag_put(sync_fence_tracker->flag);
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Setup fence for tracker. */
+ _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+ fence.sync_fd = -1;
+ fence.points[timeline->id] = point;
+
+ /* Finally, add the tracker to Timeline system. */
+ mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+ point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+ return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ u32 i;
+ struct sync_fence *sync_fence_acc = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ struct sync_fence *sync_fence;
+
+ if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ /* Merge sync fences. */
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ /* This was the first sync fence created. */
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (-1 != fence->sync_fd) {
+ struct sync_fence *sync_fence;
+
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (NULL == sync_fence_acc) {
+ MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+ /* There was nothing to wait on, so return an already signaled fence. */
+
+ sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+ if (NULL == sync_fence_acc) goto error;
+ }
+
+ /* Return file descriptor for the accumulated sync fence. */
+ return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+ if (NULL != sync_fence_acc) {
+ sync_fence_put(sync_fence_acc);
+ }
+
+ return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+ /* Signal flag and release reference. */
+ mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+ mali_sync_flag_put(sync_fence_tracker->flag);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+ _mali_osk_free(sync_fence_tracker);
+}
+
+#endif /* defined(CONFIG_SYNC) */
diff --git a/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.h b/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.h
new file mode 100644
index 00000000000000..54b92d5184b88d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_timeline_sync_fence.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+ struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_ukk.h b/drivers/parrot/gpu/mali400/common/mali_ukk.h
new file mode 100644
index 00000000000000..65858dc9c1ff63
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_ukk.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open(void **context);
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close(void **context);
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args);
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem(_mali_uk_unmap_external_mem_s *args);
+
+#if defined(CONFIG_MALI400_UMP)
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args);
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args);
+#endif /* CONFIG_MALI400_UMP */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa(_mali_uk_va_to_mali_pa_s *args);
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @brief Return the total memory usage
+ *
+ * @param args see _mali_uk_profiling_memory_usage_get_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_memory_usage_get(_mali_uk_profiling_memory_usage_get_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/parrot/gpu/mali400/common/mali_user_settings_db.c b/drivers/parrot/gpu/mali400/common/mali_user_settings_db.c
new file mode 100644
index 00000000000000..b5d65216ae9096
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_user_settings_db.c
@@ -0,0 +1,147 @@
+/**
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool done = MALI_FALSE;
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+ sizeof(_mali_uk_settings_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_settings_changed_s *data;
+ data = notobjs[i]->result_buffer;
+
+ data->setting = setting;
+ data->value = value;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+ return;
+ }
+
+ if (mali_user_settings[setting] != value) {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify) {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ return 0;
+ }
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (_MALI_UK_USER_SETTING_MAX > setting) {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali400/common/mali_user_settings_db.h b/drivers/parrot/gpu/mali400/common/mali_user_settings_db.h
new file mode 100644
index 00000000000000..824e3e1782c530
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/common/mali_user_settings_db.h
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard.h
new file mode 100644
index 00000000000000..8a0e280979179b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#include "mali_osk_types.h"
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+/* Mali-200 */
+
+#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
+ MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
+ MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
+ MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+/* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+ MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+ MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+ MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
+ MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+ { \
+ .name = "Mali_L2", \
+ .flags = IORESOURCE_MEM, \
+ .start = addr, \
+ .end = addr + 0x200, \
+ },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_mmu_addr, \
+ .end = gp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_mmu_irq, \
+ .end = gp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+ { \
+ .name = "Mali_PP" #id, \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_addr, \
+ .end = pp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_mmu_irq, \
+ .end = pp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+ { \
+ .name = "Mali_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = mmu_addr, \
+ .end = mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = mmu_irq, \
+ .end = mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+ { \
+ .name = "Mali_PMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pmu_addr, \
+ .end = pmu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DMA(dma_addr) \
+ { \
+ .name = "Mali_DMA", \
+ .flags = IORESOURCE_MEM, \
+ .start = dma_addr, \
+ .end = dma_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+ { \
+ .name = "Mali_DLBU", \
+ .flags = IORESOURCE_MEM, \
+ .start = dlbu_addr, \
+ .end = dlbu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+ { \
+ .name = "Mali_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = bcast_addr, \
+ .end = bcast_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_Broadcast_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+ { \
+ .name = "Mali_PP_MMU_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_bcast_addr, \
+ .end = pp_mmu_bcast_addr + 0x100, \
+ },
+
+struct mali_gpu_utilization_data {
+ unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+ unsigned int number_of_window_jobs;
+ unsigned int number_of_window_jobs_under_pressure;
+#endif
+};
+
+struct mali_gpu_device_data {
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
+
+ /* Shared GPU memory */
+ unsigned long shared_mem_size;
+
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+
+ /* Max runtime [ms] for jobs */
+ int max_job_runtime;
+
+ /* Report GPU utilization in this interval (specified in ms) */
+ unsigned long utilization_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /*
+ * Mali PMU switch delay.
+ * Only needed if the power gates are connected to the PMU in a high fanout
+ * network. This value is the number of Mali clock cycles it takes to
+ * enable the power gates and turn on the power mesh.
+ * This value will have no effect if a daisy chain implementation is used.
+ */
+ u32 pmu_switch_delay;
+
+
+ /* Mali Dynamic power domain configuration in sequence from 0-11
+ * GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2
+ */
+ u16 pmu_domain_config[12];
+
+ /* Fuction that platform callback for freq tunning, needed when POWER_PERFORMANCE_POLICY enabled*/
+ int (*set_freq_callback)(unsigned int mhz);
+};
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ */
+int mali_pmu_powerdown(void);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ */
+int mali_pmu_powerup(void);
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ */
+void mali_dev_pause(void);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+/** @brief Set the desired number of PP cores to use.
+ *
+ * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+ *
+ * @param num_cores The number of desired cores
+ * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+ */
+int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_counters.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_counters.h
new file mode 100644
index 00000000000000..b3caaa121c00f2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_counters.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_COUNTERS_H_
+#define _MALI_UTGARD_COUNTERS_H_
+
+typedef struct {
+ void *unused;
+} mali_cinstr_counter_info;
+
+typedef enum {
+ MALI_CINSTR_COUNTER_SOURCE_EGL = 0,
+ MALI_CINSTR_COUNTER_SOURCE_OPENGLES = 1000,
+ MALI_CINSTR_COUNTER_SOURCE_OPENVG = 2000,
+ MALI_CINSTR_COUNTER_SOURCE_GP = 3000,
+ MALI_CINSTR_COUNTER_SOURCE_PP = 4000,
+} cinstr_counter_source;
+
+#define MALI_CINSTR_EGL_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_EGL
+#define MALI_CINSTR_EGL_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_EGL + 999)
+
+#define MALI_CINSTR_GLES_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENGLES
+#define MALI_CINSTR_GLES_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 999)
+
+#define MALI_CINSTR_VG_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENVG
+#define MALI_CINSTR_VG_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENVG + 999)
+
+#define MALI_CINSTR_GP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_GP
+#define MALI_CINSTR_GP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_GP + 999)
+
+#define MALI_CINSTR_PP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_PP
+#define MALI_CINSTR_PP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_PP + 999)
+
+
+typedef enum {
+ /* EGL counters */
+
+ MALI_CINSTR_EGL_BLIT_TIME = MALI_CINSTR_COUNTER_SOURCE_EGL + 0,
+
+ /* Last counter in the EGL set */
+ MALI_CINSTR_EGL_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_EGL + 1,
+
+ /* GLES counters */
+
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 0,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_INDICES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 1,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 2,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 3,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 4,
+ MALI_CINSTR_GLES_DRAW_POINTS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 5,
+ MALI_CINSTR_GLES_DRAW_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 6,
+ MALI_CINSTR_GLES_DRAW_LINE_LOOP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 7,
+ MALI_CINSTR_GLES_DRAW_LINE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 8,
+ MALI_CINSTR_GLES_DRAW_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 9,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 10,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_FAN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 11,
+ MALI_CINSTR_GLES_NON_VBO_DATA_COPY_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 12,
+ MALI_CINSTR_GLES_UNIFORM_BYTES_COPIED_TO_MALI = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 13,
+ MALI_CINSTR_GLES_UPLOAD_TEXTURE_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 14,
+ MALI_CINSTR_GLES_UPLOAD_VBO_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 15,
+ MALI_CINSTR_GLES_NUM_FLUSHES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 16,
+ MALI_CINSTR_GLES_NUM_VSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 17,
+ MALI_CINSTR_GLES_NUM_FSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 18,
+ MALI_CINSTR_GLES_VSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 19,
+ MALI_CINSTR_GLES_FSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 20,
+ MALI_CINSTR_GLES_INPUT_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 21,
+ MALI_CINSTR_GLES_VXCACHE_HIT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 22,
+ MALI_CINSTR_GLES_VXCACHE_MISS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 23,
+ MALI_CINSTR_GLES_VXCACHE_COLLISION = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 24,
+ MALI_CINSTR_GLES_CULLED_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 25,
+ MALI_CINSTR_GLES_CULLED_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 26,
+ MALI_CINSTR_GLES_BACKFACE_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 27,
+ MALI_CINSTR_GLES_GBCLIP_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 28,
+ MALI_CINSTR_GLES_GBCLIP_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 29,
+ MALI_CINSTR_GLES_TRIANGLES_DRAWN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 30,
+ MALI_CINSTR_GLES_DRAWCALL_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 31,
+ MALI_CINSTR_GLES_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 32,
+ MALI_CINSTR_GLES_INDEPENDENT_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 33,
+ MALI_CINSTR_GLES_STRIP_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 34,
+ MALI_CINSTR_GLES_FAN_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 35,
+ MALI_CINSTR_GLES_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 36,
+ MALI_CINSTR_GLES_INDEPENDENT_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 37,
+ MALI_CINSTR_GLES_STRIP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 38,
+ MALI_CINSTR_GLES_LOOP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 39,
+ MALI_CINSTR_GLES_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 40,
+
+ /* Last counter in the GLES set */
+ MALI_CINSTR_GLES_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 41,
+
+ /* OpenVG counters */
+
+ MALI_CINSTR_VG_MASK_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 0,
+ MALI_CINSTR_VG_CLEAR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 1,
+ MALI_CINSTR_VG_APPEND_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 2,
+ MALI_CINSTR_VG_APPEND_PATH_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 3,
+ MALI_CINSTR_VG_MODIFY_PATH_COORDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 4,
+ MALI_CINSTR_VG_TRANSFORM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 5,
+ MALI_CINSTR_VG_INTERPOLATE_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 6,
+ MALI_CINSTR_VG_PATH_LENGTH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 7,
+ MALI_CINSTR_VG_POINT_ALONG_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 8,
+ MALI_CINSTR_VG_PATH_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 9,
+ MALI_CINSTR_VG_PATH_TRANSFORMED_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 10,
+ MALI_CINSTR_VG_DRAW_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 11,
+ MALI_CINSTR_VG_CLEAR_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 12,
+ MALI_CINSTR_VG_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 13,
+ MALI_CINSTR_VG_GET_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 14,
+ MALI_CINSTR_VG_COPY_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 15,
+ MALI_CINSTR_VG_DRAW_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 16,
+ MALI_CINSTR_VG_SET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 17,
+ MALI_CINSTR_VG_WRITE_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 18,
+ MALI_CINSTR_VG_GET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 19,
+ MALI_CINSTR_VG_READ_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 20,
+ MALI_CINSTR_VG_COPY_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 21,
+ MALI_CINSTR_VG_COLOR_MATRIX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 22,
+ MALI_CINSTR_VG_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 23,
+ MALI_CINSTR_VG_SEPARABLE_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 24,
+ MALI_CINSTR_VG_GAUSSIAN_BLUR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 25,
+ MALI_CINSTR_VG_LOOKUP_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 26,
+ MALI_CINSTR_VG_LOOKUP_SINGLE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 27,
+ MALI_CINSTR_VG_CONTEXT_CREATE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 28,
+ MALI_CINSTR_VG_STROKED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 29,
+ MALI_CINSTR_VG_STROKED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 30,
+ MALI_CINSTR_VG_STROKED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 31,
+ MALI_CINSTR_VG_STROKED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 32,
+ MALI_CINSTR_VG_FILLED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 33,
+ MALI_CINSTR_VG_FILLED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 34,
+ MALI_CINSTR_VG_FILLED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 35,
+ MALI_CINSTR_VG_FILLED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 36,
+ MALI_CINSTR_VG_DRAW_PATH_CALLS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 37,
+ MALI_CINSTR_VG_TRIANGLES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 38,
+ MALI_CINSTR_VG_VERTICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 39,
+ MALI_CINSTR_VG_INDICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 40,
+ MALI_CINSTR_VG_FILLED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 41,
+ MALI_CINSTR_VG_STROKED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 42,
+ MALI_CINSTR_VG_FILL_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 43,
+ MALI_CINSTR_VG_DRAW_FILLED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 44,
+ MALI_CINSTR_VG_STROKE_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 45,
+ MALI_CINSTR_VG_DRAW_STROKED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 46,
+ MALI_CINSTR_VG_DRAW_PAINT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 47,
+ MALI_CINSTR_VG_DATA_STRUCTURES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 48,
+ MALI_CINSTR_VG_MEM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 49,
+ MALI_CINSTR_VG_RSW_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 50,
+
+ /* Last counter in the VG set */
+ MALI_CINSTR_VG_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 51,
+
+ /* Mali GP counters */
+
+ MALI_CINSTR_GP_DEPRECATED_0 = MALI_CINSTR_COUNTER_SOURCE_GP + 0,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_GP = MALI_CINSTR_COUNTER_SOURCE_GP + 1,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 2,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_STORER = MALI_CINSTR_COUNTER_SOURCE_GP + 3,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_LOADER = MALI_CINSTR_COUNTER_SOURCE_GP + 4,
+ MALI_CINSTR_GP_CYCLES_VERTEX_LOADER_WAITING_FOR_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 5,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_READ = MALI_CINSTR_COUNTER_SOURCE_GP + 6,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_WRITTEN = MALI_CINSTR_COUNTER_SOURCE_GP + 7,
+ MALI_CINSTR_GP_NUMBER_OF_READ_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 8,
+ MALI_CINSTR_GP_NUMBER_OF_WRITE_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 9,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_PROCESSED = MALI_CINSTR_COUNTER_SOURCE_GP + 10,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 11,
+ MALI_CINSTR_GP_NUMBER_OF_PRIMITIVES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 12,
+ MALI_CINSTR_GP_RESERVED_13 = MALI_CINSTR_COUNTER_SOURCE_GP + 13,
+ MALI_CINSTR_GP_NUMBER_OF_BACKFACE_CULLINGS_DONE = MALI_CINSTR_COUNTER_SOURCE_GP + 14,
+ MALI_CINSTR_GP_NUMBER_OF_COMMANDS_WRITTEN_TO_TILES = MALI_CINSTR_COUNTER_SOURCE_GP + 15,
+ MALI_CINSTR_GP_NUMBER_OF_MEMORY_BLOCKS_ALLOCATED = MALI_CINSTR_COUNTER_SOURCE_GP + 16,
+ MALI_CINSTR_GP_RESERVED_17 = MALI_CINSTR_COUNTER_SOURCE_GP + 17,
+ MALI_CINSTR_GP_RESERVED_18 = MALI_CINSTR_COUNTER_SOURCE_GP + 18,
+ MALI_CINSTR_GP_NUMBER_OF_VERTEX_LOADER_CACHE_MISSES = MALI_CINSTR_COUNTER_SOURCE_GP + 19,
+ MALI_CINSTR_GP_RESERVED_20 = MALI_CINSTR_COUNTER_SOURCE_GP + 20,
+ MALI_CINSTR_GP_RESERVED_21 = MALI_CINSTR_COUNTER_SOURCE_GP + 21,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 22,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 23,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_LIST_WRITER = MALI_CINSTR_COUNTER_SOURCE_GP + 24,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_THROUGH_THE_PREPARE_LIST_COMMANDS = MALI_CINSTR_COUNTER_SOURCE_GP + 25,
+ MALI_CINSTR_GP_RESERVED_26 = MALI_CINSTR_COUNTER_SOURCE_GP + 26,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PRIMITIVE_ASSEMBLY = MALI_CINSTR_COUNTER_SOURCE_GP + 27,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_VERTEX_FETCHER = MALI_CINSTR_COUNTER_SOURCE_GP + 28,
+ MALI_CINSTR_GP_RESERVED_29 = MALI_CINSTR_COUNTER_SOURCE_GP + 29,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_BOUNDINGBOX_AND_COMMAND_GENERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 30,
+ MALI_CINSTR_GP_RESERVED_31 = MALI_CINSTR_COUNTER_SOURCE_GP + 31,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_SCISSOR_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 32,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 33,
+ MALI_CINSTR_GP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_GP + 900,
+
+ /* Mali PP counters */
+
+ MALI_CINSTR_PP_ACTIVE_CLOCK_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 0,
+ MALI_CINSTR_PP_TOTAL_CLOCK_CYCLES_COUNT_REMOVED = MALI_CINSTR_COUNTER_SOURCE_PP + 1,
+ MALI_CINSTR_PP_TOTAL_BUS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 2,
+ MALI_CINSTR_PP_TOTAL_BUS_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 3,
+ MALI_CINSTR_PP_BUS_READ_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 4,
+ MALI_CINSTR_PP_BUS_WRITE_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 5,
+ MALI_CINSTR_PP_BUS_READ_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 6,
+ MALI_CINSTR_PP_BUS_WRITE_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 7,
+ MALI_CINSTR_PP_RESERVED_08 = MALI_CINSTR_COUNTER_SOURCE_PP + 8,
+ MALI_CINSTR_PP_TILE_WRITEBACK_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 9,
+ MALI_CINSTR_PP_STORE_UNIT_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 10,
+ MALI_CINSTR_PP_RESERVED_11 = MALI_CINSTR_COUNTER_SOURCE_PP + 11,
+ MALI_CINSTR_PP_PALETTE_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 12,
+ MALI_CINSTR_PP_TEXTURE_CACHE_UNCOMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 13,
+ MALI_CINSTR_PP_POLYGON_LIST_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 14,
+ MALI_CINSTR_PP_RSW_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 15,
+ MALI_CINSTR_PP_VERTEX_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 16,
+ MALI_CINSTR_PP_UNIFORM_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 17,
+ MALI_CINSTR_PP_PROGRAM_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 18,
+ MALI_CINSTR_PP_VARYING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 20,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 21,
+ MALI_CINSTR_PP_TEXTURE_CACHE_COMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 22,
+ MALI_CINSTR_PP_LOAD_UNIT_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 23,
+ MALI_CINSTR_PP_POLYGON_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 24,
+ MALI_CINSTR_PP_PIXEL_RECTANGLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 25,
+ MALI_CINSTR_PP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 26,
+ MALI_CINSTR_PP_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 27,
+ MALI_CINSTR_PP_STALL_CYCLES_POLYGON_LIST_READER = MALI_CINSTR_COUNTER_SOURCE_PP + 28,
+ MALI_CINSTR_PP_STALL_CYCLES_TRIANGLE_SETUP = MALI_CINSTR_COUNTER_SOURCE_PP + 29,
+ MALI_CINSTR_PP_QUAD_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 30,
+ MALI_CINSTR_PP_FRAGMENT_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 31,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 32,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FWD_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 33,
+ MALI_CINSTR_PP_FRAGMENT_PASSED_ZSTENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 34,
+ MALI_CINSTR_PP_PATCHES_REJECTED_EARLY_Z_STENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 35,
+ MALI_CINSTR_PP_PATCHES_EVALUATED = MALI_CINSTR_COUNTER_SOURCE_PP + 36,
+ MALI_CINSTR_PP_INSTRUCTION_COMPLETED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 37,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_RENDEZVOUS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 38,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_VARYING_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 39,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TEXTURE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 40,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_LOAD_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 41,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TILE_READ_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 42,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_STORE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 43,
+ MALI_CINSTR_PP_RENDEZVOUS_BREAKAGE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 44,
+ MALI_CINSTR_PP_PIPELINE_BUBBLES_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 45,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_MULTIPASS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 46,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 47,
+ MALI_CINSTR_PP_VERTEX_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 48,
+ MALI_CINSTR_PP_VERTEX_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 49,
+ MALI_CINSTR_PP_VARYING_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 50,
+ MALI_CINSTR_PP_VARYING_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 51,
+ MALI_CINSTR_PP_VARYING_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 52,
+ MALI_CINSTR_PP_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 53,
+ MALI_CINSTR_PP_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 54,
+ MALI_CINSTR_PP_TEXTURE_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 55,
+ MALI_CINSTR_PP_PALETTE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 200 only */
+ MALI_CINSTR_PP_PALETTE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 200 only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 400 class only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 400 class only */
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 58,
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 59,
+ MALI_CINSTR_PP_PROGRAM_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 60,
+ MALI_CINSTR_PP_PROGRAM_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 61,
+ MALI_CINSTR_PP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 900,
+} cinstr_counters_m200_t;
+
+#endif /*_MALI_UTGARD_COUNTERS_H_*/
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_ioctl.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644
index 00000000000000..a72129e4279e63
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_ioctl.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s *)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s *)
+#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s *)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s *)
+#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s *)
+#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s *)
+
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s *)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+/* Deprecated ioctls */
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, void *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, void *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_events.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644
index 00000000000000..ed3d46cab4b126
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_events.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24,
+ MALI_PROFILING_EVENT_TYPE_START = 1 << 24,
+ MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24,
+ MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+ MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE = 5,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE = 6,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK = 7,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK = 8,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS = 13,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55,
+ MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED = 56,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP = 57,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE = 58,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC = 59,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH = 62,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL = 0,
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum {
+ /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,*/
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI = 1,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD = 3,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF = 4,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26, /* used in some build configurations */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE = 33, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK = 36, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK = 37, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP = 38, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING = 40, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING = 41, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE = 42, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_DATA_CORE_GP0 = 1,
+ MALI_PROFILING_EVENT_DATA_CORE_PP0 = 5,
+ MALI_PROFILING_EVENT_DATA_CORE_PP1 = 6,
+ MALI_PROFILING_EVENT_DATA_CORE_PP2 = 7,
+ MALI_PROFILING_EVENT_DATA_CORE_PP3 = 8,
+ MALI_PROFILING_EVENT_DATA_CORE_PP4 = 9,
+ MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
+ MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
+ MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100644
index 00000000000000..20a630ffd0df82
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+/** The list of events supported by the Mali DDK. */
+typedef enum {
+ /* Vertex processor activity */
+ ACTIVITY_VP_0 = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP_0,
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+ ACTIVITY_FP_4,
+ ACTIVITY_FP_5,
+ ACTIVITY_FP_6,
+ ACTIVITY_FP_7,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+ COUNTER_L2_1_C0,
+ COUNTER_L2_1_C1,
+ COUNTER_L2_2_C0,
+ COUNTER_L2_2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0,
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+ COUNTER_FP_4_C0,
+ COUNTER_FP_4_C1,
+ COUNTER_FP_5_C0,
+ COUNTER_FP_5_C1,
+ COUNTER_FP_6_C0,
+ COUNTER_FP_6_C1,
+ COUNTER_FP_7_C0,
+ COUNTER_FP_7_C1,
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters {
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values {
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version {
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_uk_types.h b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644
index 00000000000000..7f818276cdc1af
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/include/linux/mali/mali_utgard_uk_types.h
@@ -0,0 +1,1138 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Iteration functions depend on these values being consecutive. */
+#define MALI_UK_TIMELINE_GP 0
+#define MALI_UK_TIMELINE_PP 1
+#define MALI_UK_TIMELINE_SOFT 2
+#define MALI_UK_TIMELINE_MAX 3
+
+typedef struct {
+ u32 points[MALI_UK_TIMELINE_MAX];
+ s32 sync_fd;
+} _mali_uk_fence_t;
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum {
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum {
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+ _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */
+ _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */
+ _MALI_UK_REQUEST_HIGH_PRIORITY, /**< _mali_ukk_request_high_priority() */
+ _MALI_UK_TIMELINE_GET_LATEST_POINT, /**< _mali_ukk_timeline_get_latest_point() */
+ _MALI_UK_TIMELINE_WAIT, /**< _mali_ukk_timeline_wait() */
+ _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, /**< _mali_ukk_timeline_create_sync_fence() */
+ _MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */
+ _MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_DMA_BUF, /**< _mali_ukk_attach_dma_buf() */
+ _MALI_UK_RELEASE_DMA_BUF, /**< _mali_ukk_release_dma_buf() */
+ _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+ _MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+ _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */
+ _MALI_UK_PP_AND_GP_START_JOB, /**< _mali_ukk_pp_and_gp_start_job() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+ _MALI_UK_PROFILING_GET_CONFIG, /**< __mali_uku_profiling_get_config() */
+ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+ _MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode {
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type {
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage {
+
+ _MALI_PP_READABLE = (1 << 0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1 << 1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1 << 2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1 << 3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1 << 4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1 << 5), /** Writeable by the CPU */
+ _MALI_GP_L2_ALLOC = (1 << 6), /** GP allocate mali L2 cache lines*/
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+typedef enum mali_memory_cache_settings {
+ MALI_CACHE_STANDARD = 0,
+ MALI_CACHE_GP_READ_ALLOCATE = 1,
+} mali_memory_cache_settings ;
+
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info {
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /* mali_memory_cache_settings cache_settings; */
+ struct _mali_mem_info *next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code {
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum {
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum {
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1 << (16 + 0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1 << (16 + 1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1 << (16 + 2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1 << (16 + 3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1 << (16 + 4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1 << (16 + 5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1 << (16 + 6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1 << (16 + 7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1 << (16 + 8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u32 *timeline_point_ptr; /**< [in,out] pointer to location where point on gp timeline for this job will be written */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct {
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of performance counter 1 (see ARM DDI0415A) */
+} _mali_uk_gp_job_finished_s;
+
+typedef struct {
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+ u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ u32 flags; /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+ u32 tilesx; /**< [in] number of tiles in the x direction (needed for heatmap generation */
+ u32 tilesy; /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
+ u32 heatmap_mem; /**< [in] memory address to store counter values per tile (aka heatmap) */
+ u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */
+ u32 *memory_cookies; /**< [in] memory cookies attached to job */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u32 *timeline_point_ptr; /**< [in,out] pointer to location where point on pp timeline for this job will be written */
+} _mali_uk_pp_start_job_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_gp_start_job_s *gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+ _mali_uk_pp_start_job_s *pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+} _mali_uk_pp_and_gp_start_job_s;
+
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct {
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter_src0;
+ u32 perf_counter_src1;
+} _mali_uk_pp_job_finished_s;
+
+typedef struct {
+ u32 number_of_enabled_cores; /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum {
+ _MALI_UK_PP_JOB_WB0 = 1,
+ _MALI_UK_PP_JOB_WB1 = 2,
+ _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
+ u32 wb0_memory;
+ u32 wb1_memory;
+ u32 wb2_memory;
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+/** @defgroup _mali_uk_soft_job U/K Soft Job
+ * @{ */
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 type; /**< [in] type of soft job */
+ u32 user_job; /**< [in] identifier for the job in user space */
+ u32 *job_id_ptr; /**< [in,out] pointer to location where job id will be written */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u32 point; /**< [out] point on soft timeline for this job */
+} _mali_uk_soft_job_start_s;
+
+typedef struct {
+ u32 user_job; /**< [out] identifier for the job in user space */
+} _mali_uk_soft_job_activated_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 job_id; /**< [in] id for soft job */
+} _mali_uk_soft_job_signal_s;
+
+/** @} */ /* end group _mali_uk_soft_job */
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum {
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+ _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+ _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum {
+ _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+ _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+ _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+ _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+ { \
+ "sw_events_enable", \
+ "colorbuffer_capture_enable", \
+ "depthbuffer_capture_enable", \
+ "stencilbuffer_capture_enable", \
+ "per_tile_counters_enable", \
+ "buffer_capture_compositor", \
+ "buffer_capture_window", \
+ "buffer_capture_other", \
+ "buffer_capture_n_frames", \
+ "buffer_capture_resize_factor", \
+ "sw_counters_enable", \
+ };
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct {
+ _mali_uk_user_setting_t setting;
+ u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+ _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 401
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_user_setting_t setting; /**< [in] setting to get */
+ u32 value; /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @brief Arguments for _mali_ukk_request_high_priority() */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_request_high_priority_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_dma_buf_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_dma_buf_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ const void *src; /**< [in] Pointer to source data */
+ void *dest; /**< [in] Destination Mali buffer */
+ u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */
+ u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 memory_usage; /**< [out] total memory usage */
+} _mali_uk_profiling_memory_usage_get_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+ mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event {
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 *counters; /**< [in] The array of counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_timeline U/K Mali Timeline
+ * @{ */
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 timeline; /**< [in] timeline id */
+ u32 point; /**< [out] latest point on timeline */
+} _mali_uk_timeline_get_latest_point_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] fence */
+ u32 timeout; /**< [in] timeout (0 for no wait, -1 for blocking) */
+ u32 status; /**< [out] status of fence (1 if signaled, 0 if timeout) */
+} _mali_uk_timeline_wait_s;
+
+typedef struct {
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] mali fence to create linux sync fence from */
+ s32 sync_fd; /**< [out] file descriptor for new linux sync fence */
+} _mali_uk_timeline_create_sync_fence_s;
+
+/** @} */ /* end group _mali_uk_timeline */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/license/gpl/mali_kernel_license.h b/drivers/parrot/gpu/mali400/linux/license/gpl/mali_kernel_license.h
new file mode 100755
index 00000000000000..9442bd3b469ca7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_device_pause_resume.c b/drivers/parrot/gpu/mali400/linux/mali_device_pause_resume.c
new file mode 100644
index 00000000000000..5a900d3c52f637
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_device_pause_resume.c
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+void mali_dev_pause(void)
+{
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_group_power_off(MALI_FALSE);
+ mali_l2_cache_pause_all(MALI_TRUE);
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+ mali_l2_cache_pause_all(MALI_FALSE);
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.c b/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.c
new file mode 100644
index 00000000000000..5a969947af1805
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.c
@@ -0,0 +1,816 @@
+/**
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters);
+#endif /* CONFIG_TRACEPOINTS */
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+/** the max fps the same as display vsync default 60, can set by module insert parameter */
+extern int mali_max_system_fps;
+module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC.");
+
+/** a lower limit on their desired FPS default 58, can set by module insert parameter*/
+extern int mali_desired_fps;
+module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps");
+#endif
+
+#if MALI_ENABLE_CPU_CYCLES
+#include <linux/cpumask.h>
+#include <linux/timer.h>
+#include <asm/smp.h>
+static struct timer_list mali_init_cpu_clock_timers[8];
+static u32 mali_cpu_clock_last_value[8] = {0,};
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+struct pm_ext_ops mali_dev_ext_pm_ops = {
+ .base =
+ {
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_driver_runtime_suspend,
+ .runtime_resume = mali_driver_runtime_resume,
+ .runtime_idle = mali_driver_runtime_idle,
+#endif
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ .poweroff = mali_driver_suspend_scheduler,
+};
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver = {
+ .probe = mali_probe,
+ .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_ext_pm_ops,
+#endif
+ .driver =
+ {
+ .name = MALI_GPU_NAME_UTGARD,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+#if MALI_ENABLE_CPU_CYCLES
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
+{
+ /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */
+ u32 write_value;
+
+ /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */
+ /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f));
+
+
+ /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
+ write_value = 1 << 0; /* Bit 0 set. Enable counters */
+ if (reset) {
+ write_value |= 1 << 1; /* Reset event counters */
+ write_value |= 1 << 2; /* Reset cycle counter */
+ }
+ if (enable_divide_by_64) {
+ write_value |= 1 << 3; /* Enable the Clock divider by 64 */
+ }
+ write_value |= 1 << 4; /* Export enable. Not needed */
+ asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value));
+
+ /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
+ asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+
+
+ /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
+ /* User mode access to the Performance Monitors enabled. */
+ /* Lets User space read cpu clock cycles */
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1));
+}
+
+/** A timer function that configures the cycle clock counter on current CPU.
+ * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this
+ * function to trigger on all Cpus during module load.
+ */
+static void mali_init_cpu_clock_timer_func(unsigned long data)
+{
+ int reset_counters, enable_divide_clock_counter_by_64;
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+ unsigned int sample1;
+
+ MALI_IGNORE(data);
+
+ reset_counters = 1;
+ enable_divide_clock_counter_by_64 = 0;
+ mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
+
+ sample0 = mali_get_cpu_cyclecount();
+ sample1 = mali_get_cpu_cyclecount();
+
+ MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1));
+}
+
+/** A timer functions for storing current time on all cpus.
+ * Used for checking if the clocks have similar values or if they are drifting.
+ */
+static void mali_print_cpu_clock_timer_func(unsigned long data)
+{
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+
+ MALI_IGNORE(data);
+ sample0 = mali_get_cpu_cyclecount();
+ if (current_cpu < 8) {
+ mali_cpu_clock_last_value[current_cpu] = sample0;
+ }
+}
+
+/** Init the performance registers on all CPUs to count clock cycles.
+ * For init \a print_only should be 0.
+ * If \a print_only is 1, it will intead print the current clock value of all CPUs.
+ */
+void mali_init_cpu_time_counters_on_all_cpus(int print_only)
+{
+ int i = 0;
+ int cpu_number;
+ int jiffies_trigger;
+ int jiffies_wait;
+
+ jiffies_wait = 2;
+ jiffies_trigger = jiffies + jiffies_wait;
+
+ for (i = 0 ; i < 8 ; i++) {
+ init_timer(&mali_init_cpu_clock_timers[i]);
+ if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
+ else mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
+ mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
+ }
+ cpu_number = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < 8 ; i++) {
+ int next_cpu;
+ add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
+ next_cpu = cpumask_next(cpu_number, cpu_online_mask);
+ if (next_cpu >= nr_cpu_ids) break;
+ cpu_number = next_cpu;
+ }
+
+ while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait);
+
+ for (i = 0 ; i < 8 ; i++) {
+ del_timer_sync(&mali_init_cpu_clock_timers[i]);
+ }
+
+ if (print_only) {
+ if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) {
+ /* Diff can be printed if we want to check if the clocks are in sync
+ int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
+ } else {
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3]));
+ }
+ }
+}
+#endif
+
+
+int mali_module_init(void)
+{
+ int err = 0;
+
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+#if MALI_ENABLE_CPU_CYCLES
+ mali_init_cpu_time_counters_on_all_cpus(0);
+ MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n"));
+ /* Printing the current cpu counters */
+ mali_init_cpu_time_counters_on_all_cpus(1);
+#endif
+
+ /* Initialize module wide settings */
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+ err = mali_platform_device_register();
+ if (0 != err) {
+ return err;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+ err = platform_driver_register(&mali_platform_driver);
+
+ if (0 != err) {
+ MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ mali_platform_device_unregister();
+#endif
+ mali_platform_device = NULL;
+ return err;
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (0 != err) {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ MALI_PRINT(("Mali device driver loaded\n"));
+
+ return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION));
+
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
+#endif
+
+ platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+ mali_platform_device_unregister();
+#endif
+
+ MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int mali_probe(struct platform_device *pdev)
+{
+ int err;
+
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+ if (NULL != mali_platform_device) {
+ /* Already connected to a device, return error */
+ MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+ return -EEXIST;
+ }
+
+ mali_platform_device = pdev;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
+ /* Initialize the Mali GPU HW specified by pdev */
+ if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
+ /* Register a misc device (so we are accessible from user space) */
+ err = mali_miscdevice_register(pdev);
+ if (0 == err) {
+ /* Setup sysfs entries */
+ err = mali_sysfs_register(mali_dev_name);
+ if (0 == err) {
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+ return 0;
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+ }
+ mali_miscdevice_unregister();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+ }
+ mali_terminate_subsystems();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+ }
+ _mali_osk_wq_term();
+ }
+
+ mali_platform_device = NULL;
+ return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+ MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+ mali_sysfs_unregister();
+ mali_miscdevice_unregister();
+ mali_terminate_subsystems();
+ _mali_osk_wq_term();
+ mali_platform_device = NULL;
+ return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+ int err;
+
+ mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+ mali_miscdevice.name = mali_dev_name;
+ mali_miscdevice.fops = &mali_fops;
+ mali_miscdevice.parent = get_device(&pdev->dev);
+
+ err = misc_register(&mali_miscdevice);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+ }
+
+ return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+ misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+ mali_pm_os_suspend();
+ return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+ mali_pm_os_resume();
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+ mali_pm_runtime_suspend();
+ return 0;
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+ mali_pm_runtime_resume();
+ return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+ /* Nothing to do */
+ return 0;
+}
+#endif
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data *session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void *)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode(_mali_osk_errcode_t err)
+{
+ switch (err) {
+ case _MALI_OSK_ERR_OK :
+ return 0;
+ case _MALI_OSK_ERR_FAULT:
+ return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC:
+ return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS:
+ return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM:
+ return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL:
+ return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data) {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg) {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_USER_SETTINGS:
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_REQUEST_HIGH_PRIORITY:
+ err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
+ break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_CONFIG:
+ /* Deprecated: still compatible with get_user_settings */
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+ break;
+
+
+ case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+ err = profiling_memory_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_PROFILING_START: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_STOP: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_CLEAR: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_CONFIG: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+ err = -ENOTTY;
+ break;
+
+#endif
+
+ case MALI_IOC_MEM_WRITE_SAFE:
+ err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+#if defined(CONFIG_MALI400_UMP)
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case MALI_IOC_MEM_ATTACH_DMA_BUF:
+ err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_DMA_BUF:
+ err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+ err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+ break;
+#else
+
+ case MALI_IOC_MEM_ATTACH_DMA_BUF: /* FALL-THROUGH */
+ case MALI_IOC_MEM_RELEASE_DMA_BUF: /* FALL-THROUGH */
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_AND_GP_START_JOB:
+ err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_DISABLE_WB:
+ err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+ err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_WAIT:
+ err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+ err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_START:
+ err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_SIGNAL:
+ err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_INIT: /* Fallthrough */
+ case MALI_IOC_MEM_TERM: /* Fallthrough */
+ MALI_DEBUG_PRINT(2, ("Deprecated ioctls called\n"));
+ err = -ENOTTY;
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
+ err = -ENOTTY;
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.h b/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.h
new file mode 100644
index 00000000000000..ee7444bad1249e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_kernel_linux.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk_types.h"
+
+extern struct platform_device *mali_platform_device;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.c b/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.c
new file mode 100644
index 00000000000000..10d970fad431fc
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.c
@@ -0,0 +1,1390 @@
+/**
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_pp_scheduler.h"
+
+#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src)
+#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF)
+#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF)
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum {
+ _MALI_DEVICE_SUSPEND,
+ _MALI_DEVICE_RESUME,
+ _MALI_DEVICE_DVFS_PAUSE,
+ _MALI_DEVICE_DVFS_RESUME,
+ _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char *const mali_power_events[_MALI_MAX_EVENTS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+ [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ r = snprintf(buffer, 64, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ unsigned long val;
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ r = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != r) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_group_enable(group);
+ break;
+ case 0:
+ mali_group_disable(group);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = group_enabled_read,
+ .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_hw_core *hw_core;
+
+ hw_core = (struct mali_hw_core *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+ r = snprintf(buffer, 64, "0x%08X\n", hw_core->phys_addr);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = hw_core_base_addr_read,
+};
+
+static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+ char buf[64];
+ int r;
+ u32 val;
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Get counter for a particular sub job */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job);
+ } else {
+ val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job);
+ }
+ } else {
+ /* Get default counter for all PP sub jobs */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_global_src0();
+ } else {
+ val = mali_pp_job_get_pp_counter_global_src1();
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ val = mali_gp_job_get_gp_counter_src0();
+ } else {
+ val = mali_gp_job_get_gp_counter_src1();
+ }
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Set counter for a particular sub job */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val);
+ }
+ } else {
+ /* Set default counter for all PP sub jobs */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_global_src0((u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_global_src1((u32)val);
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ mali_gp_job_set_gp_counter_src0((u32)val);
+ } else {
+ mali_gp_job_set_gp_counter_src1((u32)val);
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_counter_src_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = profiling_counter_src_read,
+ .write = profiling_counter_src_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ if (0 == src_id) {
+ val = mali_l2_cache_core_get_counter_src0(l2_core);
+ } else {
+ val = mali_l2_cache_core_get_counter_src1(l2_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id) {
+ mali_l2_cache_core_set_counter_src0(l2_core, (u32)val);
+ } else {
+ mali_l2_cache_core_set_counter_src1(l2_core, (u32)val);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ if (0 == src_id) {
+ mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val);
+ } else {
+ mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src0_read,
+ .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src1_read,
+ .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src1_write,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting (not exactly thread safe) */
+ if (1 == val && MALI_FALSE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_TRUE;
+ _mali_osk_pm_dev_ref_add();
+ } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_FALSE;
+ _mali_osk_pm_dev_ref_dec();
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (MALI_TRUE == power_always_on_enabled) {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+ } else {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+ }
+}
+
+static const struct file_operations power_always_on_fops = {
+ .owner = THIS_MODULE,
+ .read = power_always_on_read,
+ .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+
+ if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]))) {
+ mali_pm_os_suspend();
+
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]))) {
+ mali_pm_os_resume();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]))) {
+ mali_dev_pause();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]))) {
+ mali_dev_resume();
+ }
+ *ppos += cnt;
+ return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+ file->f_pos = offset;
+ return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+ .owner = THIS_MODULE,
+ .write = power_power_events_write,
+ .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if (!size) {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val != 0) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_internal_profiling_is_recording()) {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_internal_profiling_have_recording()) {
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ } else {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos) {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+ static u64 start_time = 0;
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+ if (0 == index) {
+ start_time = timestamp;
+ }
+
+ seq_printf(seq_file, "[%06u] ", index);
+
+ switch (event_id & 0x0F000000) {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ seq_printf(seq_file, "SINGLE | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ seq_printf(seq_file, "START | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ seq_printf(seq_file, "STOP | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+ seq_printf(seq_file, "SUSPEND | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_RESUME:
+ seq_printf(seq_file, "RESUME | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+ break;
+ }
+
+ switch (event_id & 0x00FF0000) {
+ case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+ seq_printf(seq_file, "SW | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GP0:
+ seq_printf(seq_file, "GP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP0:
+ seq_printf(seq_file, "PP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP1:
+ seq_printf(seq_file, "PP1 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP2:
+ seq_printf(seq_file, "PP2 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP3:
+ seq_printf(seq_file, "PP3 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP4:
+ seq_printf(seq_file, "PP4 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP5:
+ seq_printf(seq_file, "PP5 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP6:
+ seq_printf(seq_file, "PP6 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP7:
+ seq_printf(seq_file, "PP7 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GPU:
+ seq_printf(seq_file, "GPU | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+ break;
+ }
+
+ if (MALI_EVENT_ID_IS_HW(event_id)) {
+ if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
+ switch (event_id & 0x0000FFFF) {
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+ seq_printf(seq_file, "PHYSICAL | ");
+ break;
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+ seq_printf(seq_file, "VIRTUAL | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ break;
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+
+ seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_human_readable_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _mali_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = memory_used_read,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ _mali_uk_user_setting_t setting;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting */
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ mali_set_user_setting(setting, val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 value;
+ _mali_uk_user_setting_t setting;
+
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ value = mali_get_user_setting(setting);
+
+ r = snprintf(buf, 64, "%u\n", value);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = user_settings_read,
+ .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+ struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+ if (mali_user_settings_dir != NULL) {
+ int i;
+ for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void *)i, &user_settings_fops);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+ struct mali_pmu_core *pmu;
+ _mali_osk_errcode_t err;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ err = mali_pmu_power_down(pmu, val);
+ if (_MALI_OSK_ERR_OK != err) {
+ return -EINVAL;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+ struct mali_pmu_core *pmu;
+ _mali_osk_errcode_t err;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ err = mali_pmu_power_up(pmu, val);
+ if (_MALI_OSK_ERR_OK != err) {
+ return -EINVAL;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations pmu_power_down_fops = {
+ .owner = THIS_MODULE,
+ .write = pmu_power_down_write,
+};
+
+static const struct file_operations pmu_power_up_fops = {
+ .owner = THIS_MODULE,
+ .write = pmu_power_up_write,
+};
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ ret = mali_pp_scheduler_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+ if (ret) {
+ return ret;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_num_cores_enabled_write,
+ .read = pp_num_cores_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_total());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+ .owner = THIS_MODULE,
+ .read = pp_num_cores_total_read,
+};
+
+static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_pp_scheduler_core_scaling_enable();
+ break;
+ case 0:
+ mali_pp_scheduler_core_scaling_disable();
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ return simple_read_from_buffer(buf, count, offp, mali_pp_scheduler_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+}
+static const struct file_operations pp_core_scaling_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_core_scaling_enabled_write,
+ .read = pp_core_scaling_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r = 0;
+ char buffer[64];
+
+ switch (mali_kernel_core_get_product_id()) {
+ case _MALI_PRODUCT_ID_MALI200:
+ r = snprintf(buffer, 64, "Mali-200\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI300:
+ r = snprintf(buffer, 64, "Mali-300\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI400:
+ r = snprintf(buffer, 64, "Mali-400 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI450:
+ r = snprintf(buffer, 64, "Mali-450 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_UNKNOWN:
+ return -EINVAL;
+ break;
+ };
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+ .owner = THIS_MODULE,
+ .read = version_read,
+};
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ } else {
+ if (NULL != mali_debugfs_dir) {
+ /* Debugfs directory created successfully; create files now */
+ struct dentry *mali_pmu_dir;
+ struct dentry *mali_power_dir;
+ struct dentry *mali_gp_dir;
+ struct dentry *mali_pp_dir;
+ struct dentry *mali_l2_dir;
+ struct dentry *mali_profiling_dir;
+
+ debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+ mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
+ if (NULL != mali_pmu_dir) {
+ debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
+ debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
+ }
+
+ mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+ if (mali_power_dir != NULL) {
+ debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
+ debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops);
+ }
+
+ mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+ if (mali_gp_dir != NULL) {
+ u32 num_groups;
+ int i;
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core) {
+ struct dentry *mali_gp_gpx_dir;
+ mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+ if (NULL != mali_gp_gpx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+ debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+ }
+ break; /* no need to look for any other GP cores */
+ }
+
+ }
+ }
+
+ mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+ if (mali_pp_dir != NULL) {
+ u32 num_groups;
+ int i;
+
+ debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+ debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+ debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops);
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core) {
+ char buf[16];
+ struct dentry *mali_pp_ppx_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+ mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+ if (NULL != mali_pp_ppx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+ if (!mali_group_is_virtual(group)) {
+ debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+ }
+ }
+ }
+ }
+ }
+
+ mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+ if (mali_l2_dir != NULL) {
+ struct dentry *mali_l2_all_dir;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+ if (mali_l2_all_dir != NULL) {
+ debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ char buf[16];
+ struct dentry *mali_l2_l2x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+ mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+ if (NULL != mali_l2_l2x_dir) {
+ debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+ }
+
+ debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+ debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+ debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+ debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+ mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL) {
+ u32 max_sub_jobs;
+ int i;
+ struct dentry *mali_profiling_gp_dir;
+ struct dentry *mali_profiling_pp_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ struct dentry *mali_profiling_proc_dir;
+#endif
+ /*
+ * Create directory where we can set GP HW counters.
+ */
+ mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
+ if (mali_profiling_gp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+ }
+
+ /*
+ * Create directory where we can set PP HW counters.
+ * Possible override with specific HW counters for a particular sub job
+ * (Disable core scaling before using the override!)
+ */
+ mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
+ if (mali_profiling_pp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+ }
+
+ max_sub_jobs = mali_pp_scheduler_get_num_cores_total();
+ for (i = 0; i < max_sub_jobs; i++) {
+ char buf[16];
+ struct dentry *mali_profiling_pp_x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
+ mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
+ if (NULL != mali_profiling_pp_x_dir) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), &profiling_counter_src_fops);
+ }
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL) {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL) {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+ if (mali_sysfs_user_settings_register()) {
+ /* Failed to create the debugfs entries for the user settings DB. */
+ MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+ }
+ }
+ }
+
+ /* Success! */
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ if (NULL != mali_debugfs_dir) {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.h b/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.h
new file mode 100644
index 00000000000000..2e9c9a5d848518
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_kernel_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_linux_trace.h b/drivers/parrot/gpu/mali400/linux/mali_linux_trace.h
new file mode 100644
index 00000000000000..c4186130008cb7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_linux_trace.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ * encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+ unsigned int d2, unsigned int d3, unsigned int d4),
+
+ TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, d0)
+ __field(unsigned int, d1)
+ __field(unsigned int, d2)
+ __field(unsigned int, d3)
+ __field(unsigned int, d4)
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->d0 = d0;
+ __entry->d1 = d1;
+ __entry->d2 = d2;
+ __entry->d3 = d3;
+ __entry->d4 = d4;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+ );
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int counter_id, unsigned int value),
+
+ TP_ARGS(counter_id, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, counter_id)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->counter_id = counter_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+ );
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+ TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters),
+
+ TP_ARGS(pid, tid, surface_id, counters),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(pid_t, tid)
+ __field(void *, surface_id)
+ __field(unsigned int *, counters)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->tid = tid;
+ __entry->surface_id = surface_id;
+ __entry->counters = counters;
+ ),
+
+ TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL")
+ );
+
+/**
+ * Define a tracepoint used to gather core activity for systrace
+ * @param pid The process id for which the core activity originates from
+ * @param active If the core is active (1) or not (0)
+ * @param core_type The type of core active, either GP (1) or PP (0)
+ * @param core_id The core id that is active for the core_type
+ * @param frame_builder_id The frame builder id associated with this core activity
+ * @param flush_id The flush id associated with this core activity
+ */
+TRACE_EVENT(mali_core_active,
+
+ TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id),
+
+ TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(unsigned int, active)
+ __field(unsigned int, core_type)
+ __field(unsigned int, core_id)
+ __field(unsigned int, frame_builder_id)
+ __field(unsigned int, flush_id)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->active = active;
+ __entry->core_type = core_type;
+ __entry->core_id = core_id;
+ __entry->frame_builder_id = frame_builder_id;
+ __entry->flush_id = flush_id;
+ ),
+
+ TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id)
+ );
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory.c b/drivers/parrot/gpu/mali400/linux/mali_memory.c
new file mode 100644
index 00000000000000..a9cad3c6d28889
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_descriptor_mapping.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_block_alloc.h"
+
+/* session->memory_lock must be held when calling this function */
+static void mali_mem_release(mali_mem_allocation *descriptor)
+{
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ switch (descriptor->type) {
+ case MALI_MEM_OS:
+ mali_mem_os_release(descriptor);
+ break;
+ case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ mali_mem_dma_buf_release(descriptor);
+#endif
+ break;
+ case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+ mali_mem_ump_release(descriptor);
+#endif
+ break;
+ case MALI_MEM_EXTERNAL:
+ mali_mem_external_release(descriptor);
+ break;
+ case MALI_MEM_BLOCK:
+ mali_mem_block_release(descriptor);
+ break;
+ }
+}
+
+static void mali_mem_vma_open(struct vm_area_struct *vma)
+{
+ mali_mem_allocation *descriptor = (mali_mem_allocation *)vma->vm_private_data;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ descriptor->cpu_mapping.ref++;
+
+ return;
+}
+
+static void mali_mem_vma_close(struct vm_area_struct *vma)
+{
+ mali_mem_allocation *descriptor;
+ struct mali_session_data *session;
+ mali_mem_virt_cpu_mapping *mapping;
+
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ descriptor = (mali_mem_allocation *)vma->vm_private_data;
+ BUG_ON(!descriptor);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ mapping = &descriptor->cpu_mapping;
+ BUG_ON(0 == mapping->ref);
+
+ mapping->ref--;
+ if (0 != mapping->ref) {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", mapping->ref));
+ return;
+ }
+
+ session = descriptor->session;
+
+ mali_descriptor_mapping_free(session->descriptor_mapping, descriptor->id);
+
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_release(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ mali_mem_descriptor_destroy(descriptor);
+}
+
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ void __user *address;
+ mali_mem_allocation *descriptor;
+
+ address = vmf->virtual_address;
+ descriptor = (mali_mem_allocation *)vma->vm_private_data;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void *)address));
+
+ MALI_IGNORE(address);
+ MALI_IGNORE(descriptor);
+
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct mali_kernel_vm_ops = {
+ .open = mali_mem_vma_open,
+ .close = mali_mem_vma_close,
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+};
+
+/** @note munmap handler is done by vma close handler */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mali_session_data *session;
+ mali_mem_allocation *descriptor;
+ u32 size = vma->vm_end - vma->vm_start;
+ u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+
+ session = (struct mali_session_data *)filp->private_data;
+ if (NULL == session) {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
+ (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+ (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+ /* Set some bits which indicate that, the memory is IO memory, meaning
+ * that no paging is to be performed and the memory should not be
+ * included in crash dumps. And that the memory is reserved, meaning
+ * that it's present and can never be paged out (see also previous
+ * entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+#endif
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
+ if (NULL == descriptor) {
+ descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
+ if (NULL == descriptor) {
+ MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
+ return -ENOMEM;
+ }
+ }
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ vma->vm_private_data = (void *)descriptor;
+
+ /* Put on descriptor map */
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_os_release(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+/* Prepare memory descriptor */
+mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type)
+{
+ mali_mem_allocation *descriptor;
+
+ descriptor = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+ if (NULL == descriptor) {
+ MALI_DEBUG_PRINT(3, ("mali_ukk_mem_mmap: descriptor was NULL\n"));
+ return NULL;
+ }
+
+ MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+ descriptor->flags = 0;
+ descriptor->type = type;
+ descriptor->session = session;
+
+ return descriptor;
+}
+
+void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor)
+{
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+ MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_FREED_MAGIC);
+
+ kfree(descriptor);
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
+{
+ u32 size = descriptor->size;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ /* Map dma-buf into this session's page tables */
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_mapping.addr, size);
+}
+
+void mali_mem_mali_map_free(mali_mem_allocation *descriptor)
+{
+ u32 size = descriptor->size;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ /* Umap and flush L2 */
+ mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);
+
+ mali_scheduler_zap_all_active(session);
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ u32 sum = 0;
+
+ sum += mali_mem_block_allocator_stat();
+ sum += mali_mem_os_stat();
+
+ return sum;
+}
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ /* Create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping) {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+
+ if (NULL == session_data->memory_lock) {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+/** @brief Callback function that releases memory
+ *
+ * session->memory_lock must be held when calling this function.
+ */
+static void descriptor_table_cleanup_callback(int descriptor_id, void *map_target)
+{
+ mali_mem_allocation *descriptor;
+
+ descriptor = (mali_mem_allocation *)map_target;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
+
+ MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_mem_release(descriptor);
+ mali_mem_descriptor_destroy(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session)
+{
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session) {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ /* Free all allocations still in the descriptor map, and terminate the map */
+ if (NULL != session->descriptor_mapping) {
+ mali_descriptor_mapping_call_for_each(session->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session->descriptor_mapping);
+ session->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Free the lock */
+ _mali_osk_mutex_term(session->memory_lock);
+
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ return mali_mem_os_init();
+}
+
+void mali_memory_terminate(void)
+{
+ mali_mem_os_term();
+ mali_mem_block_allocator_destroy(NULL);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory.h b/drivers/parrot/gpu/mali400/linux/mali_memory.h
new file mode 100644
index 00000000000000..fb0937c77269b8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+
+_mali_osk_errcode_t mali_memory_initialize(void);
+void mali_memory_terminate(void);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ return mali_mem_os_get_table_page(table_page, mapping);
+}
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
+{
+ mali_mem_os_release_table_page(phys, virt);
+}
+
+/** @brief mmap function
+ *
+ * mmap syscalls on the Mali device node will end up here.
+ *
+ * This function allocates Mali memory and maps it on CPU and Mali.
+ */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/** @brief Allocate and initialize a Mali memory descriptor
+ *
+ * @param session Pointer to the session allocating the descriptor
+ * @param type Type of memory the descriptor will represent
+ */
+mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type);
+
+/** @brief Destroy a Mali memory descriptor
+ *
+ * This function will only free the descriptor itself, and not the memory it
+ * represents.
+ *
+ * @param descriptor Pointer to the descriptor to destroy
+ */
+void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor);
+
+/** @brief Start a new memory session
+ *
+ * Called when a process opens the Mali device node.
+ *
+ * @param session Pointer to session to initialize
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session);
+
+/** @brief Close a memory session
+ *
+ * Called when a process closes the Mali device node.
+ *
+ * Memory allocated by the session will be freed
+ *
+ * @param session Pointer to the session to terminate
+ */
+void mali_memory_session_end(struct mali_session_data *session);
+
+/** @brief Prepare Mali page tables for mapping
+ *
+ * This function will prepare the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ */
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
+
+/** @brief Free Mali page tables for mapping
+ *
+ * This function will unmap pages from Mali memory and free the page tables
+ * that are now unused.
+ *
+ * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary.
+ *
+ * @param descriptor Pointer to the memory descriptor to unmap
+ */
+void mali_mem_mali_map_free(mali_mem_allocation *descriptor);
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+
+void mali_mem_ump_release(mali_mem_allocation *descriptor);
+void mali_mem_external_release(mali_mem_allocation *descriptor);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.c b/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.c
new file mode 100644
index 00000000000000..a7698a403733ed
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+struct block_info {
+ struct block_info *next;
+};
+
+typedef struct block_info block_info;
+
+
+typedef struct block_allocator {
+ struct mutex mutex;
+ block_info *all_blocks;
+ block_info *first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+ u32 free_blocks;
+} block_allocator;
+
+static block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator *info, block_info *block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size)
+{
+ block_allocator *info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0) {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info) {
+ mutex_init(&info->mutex);
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks) {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+ info->free_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for (i = 0; i < num_blocks; i++) {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ return (mali_mem_allocator *)info;
+ }
+ _mali_osk_free(info);
+ }
+
+ return NULL;
+}
+
+void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator)
+{
+ block_allocator *info = (block_allocator *)allocator;
+
+ info = mali_mem_block_gobal_allocator;
+ if (NULL == info) return;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_free(info);
+}
+
+static void mali_mem_block_mali_map(mali_mem_allocation *descriptor, u32 phys, u32 virt, u32 size)
+{
+ struct mali_page_directory *pagedir = descriptor->session->page_directory;
+ u32 prop = descriptor->mali_mapping.properties;
+ u32 offset = 0;
+
+ while (size) {
+ mali_mmu_pagedir_update(pagedir, virt + offset, phys + offset, MALI_MMU_PAGE_SIZE, prop);
+
+ size -= MALI_MMU_PAGE_SIZE;
+ offset += MALI_MMU_PAGE_SIZE;
+ }
+}
+
+static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust)
+{
+ u32 virt = vma->vm_start + mapping_offset;
+ u32 cpu_phys = mali_phys + cpu_usage_adjust;
+ u32 offset = 0;
+ int ret;
+
+ while (size) {
+ ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset));
+
+ if (unlikely(ret)) {
+ MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n"));
+ return 1;
+ }
+
+ size -= MALI_MMU_PAGE_SIZE;
+ offset += MALI_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+{
+ _mali_osk_errcode_t err;
+ mali_mem_allocation *descriptor;
+ block_allocator *info;
+ u32 left;
+ block_info *last_allocated = NULL;
+ block_allocator_allocation *ret_allocation;
+ u32 offset = 0;
+
+ size = ALIGN(size, MALI_BLOCK_SIZE);
+
+ info = mali_mem_block_gobal_allocator;
+ if (NULL == info) return NULL;
+
+ left = size;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
+ if (NULL == descriptor) {
+ return NULL;
+ }
+
+ descriptor->mali_mapping.addr = mali_addr;
+ descriptor->size = size;
+ descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
+ descriptor->cpu_mapping.ref = 1;
+
+ if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
+ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+ } else {
+ /* Cached Mali memory mapping */
+ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ vma->vm_flags |= VM_SHARED;
+ }
+
+ ret_allocation = &descriptor->block_mem.mem;
+
+ ret_allocation->mapping_length = 0;
+
+ _mali_osk_mutex_wait(session->memory_lock);
+ mutex_lock(&info->mutex);
+
+ if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
+ MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
+ mutex_unlock(&info->mutex);
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_descriptor_destroy(descriptor);
+ return NULL;
+ }
+
+ err = mali_mem_mali_map_prepare(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
+ mutex_unlock(&info->mutex);
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_descriptor_destroy(descriptor);
+ return NULL;
+ }
+
+ while ((left > 0) && (info->first_free)) {
+ block_info *block;
+ u32 phys_addr;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ if (MALI_BLOCK_SIZE < left) {
+ current_mapping_size = MALI_BLOCK_SIZE;
+ } else {
+ current_mapping_size = left;
+ }
+
+ mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
+ if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
+ /* release all memory back to the pool */
+ while (last_allocated) {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ mutex_unlock(&info->mutex);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ mali_mem_mali_map_free(descriptor);
+ mali_mem_descriptor_destroy(descriptor);
+
+ return NULL;
+ }
+
+ left -= current_mapping_size;
+ offset += current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+
+ --info->free_blocks;
+ }
+
+ mutex_unlock(&info->mutex);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ MALI_DEBUG_ASSERT(0 == left);
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->info = info;
+
+ return descriptor;
+}
+
+void mali_mem_block_release(mali_mem_allocation *descriptor)
+{
+ block_allocator *info = descriptor->block_mem.mem.info;
+ block_info *block, *next;
+ block_allocator_allocation *allocation = &descriptor->block_mem.mem;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type);
+
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ /* unmap */
+ mali_mem_mali_map_free(descriptor);
+
+ mutex_lock(&info->mutex);
+
+ while (block) {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+
+ ++info->free_blocks;
+ }
+
+ mutex_unlock(&info->mutex);
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+ block_allocator *info = (block_allocator *)mali_mem_block_gobal_allocator;
+
+ if (NULL == info) return 0;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ return (info->num_blocks - info->free_blocks) * MALI_BLOCK_SIZE;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+ mali_mem_allocator *allocator;
+
+ /* Do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create generic block allocator object to handle it */
+ allocator = mali_mem_block_allocator_create(start, 0 /* cpu_usage_adjust */, size);
+
+ if (NULL == allocator) {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(start, size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_mem_block_gobal_allocator = (block_allocator *)allocator;
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.h b/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.h
new file mode 100644
index 00000000000000..43b13e42f37e98
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_block_alloc.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+
+#include "mali_memory_types.h"
+
+typedef struct mali_mem_allocator mali_mem_allocator;
+
+mali_mem_allocator *mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size);
+void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator);
+
+mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
+void mali_mem_block_release(mali_mem_allocation *descriptor);
+
+u32 mali_mem_block_allocator_stat(void);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.c b/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.c
new file mode 100644
index 00000000000000..0bb2e50e9e44b5
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+
+#include "mali_pp_job.h"
+
+static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem);
+
+struct mali_dma_buf_attachment {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct mali_session_data *session;
+ int map_ref;
+ struct mutex map_lock;
+ mali_bool is_mapped;
+ wait_queue_head_t wait_queue;
+};
+
+static void mali_dma_buf_release(struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* We mapped implicitly on attach, so we need to unmap on release */
+ mali_dma_buf_unmap(mem);
+#endif
+
+ /* Wait for buffer to become unmapped */
+ wait_event(mem->wait_queue, !mem->is_mapped);
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ dma_buf_detach(mem->buf, mem->attachment);
+ dma_buf_put(mem->buf);
+
+ _mali_osk_free(mem);
+}
+
+void mali_mem_dma_buf_release(mali_mem_allocation *descriptor)
+{
+ struct mali_dma_buf_attachment *mem = descriptor->dma_buf.attachment;
+
+ mali_dma_buf_release(mem);
+}
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+static int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 flags)
+{
+ struct mali_page_directory *pagedir;
+ struct scatterlist *sg;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(mem->session == session);
+
+ mutex_lock(&mem->map_lock);
+
+ mem->map_ref++;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (1 == mem->map_ref) {
+ /* First reference taken, so we need to map the dma buf */
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ pagedir = mali_session_get_page_directory(session);
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+ mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(mem->sgt)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+ return -EFAULT;
+ }
+
+ for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
+ u32 size = sg_dma_len(sg);
+ dma_addr_t phys = sg_dma_address(sg);
+
+ /* sg must be page aligned. */
+ MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ virt += size;
+ }
+
+ if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ u32 guard_phys;
+ MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+ guard_phys = sg_dma_address(mem->sgt->sgl);
+ mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ mem->is_mapped = MALI_TRUE;
+ mutex_unlock(&mem->map_lock);
+
+ /* Wake up any thread waiting for buffer to become mapped */
+ wake_up_all(&mem->wait_queue);
+ } else {
+ MALI_DEBUG_ASSERT(mem->is_mapped);
+ mutex_unlock(&mem->map_lock);
+ }
+
+ return 0;
+}
+
+static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+ mutex_lock(&mem->map_lock);
+
+ mem->map_ref--;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (0 == mem->map_ref) {
+ dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+
+ mem->is_mapped = MALI_FALSE;
+ }
+
+ mutex_unlock(&mem->map_lock);
+
+ /* Wake up any thread waiting for buffer to become unmapped */
+ wake_up_all(&mem->wait_queue);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+ mali_mem_allocation *descriptor;
+ struct mali_dma_buf_attachment *mem;
+ _mali_osk_errcode_t err;
+ int i;
+ int ret = 0;
+
+ _mali_osk_mutex_wait(job->session->memory_lock);
+
+ for (i = 0; i < job->num_memory_cookies; i++) {
+ int cookie = job->memory_cookies[i];
+
+ if (0 == cookie) {
+ /* 0 is not a valid cookie */
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ MALI_DEBUG_ASSERT(0 < cookie);
+
+ err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
+ cookie, (void **)&descriptor);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
+ ret = -EFAULT;
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ if (MALI_MEM_DMA_BUF != descriptor->type) {
+ /* Not a DMA-buf */
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ mem = descriptor->dma_buf.attachment;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == job->session);
+
+ err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
+ if (0 != err) {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
+ cookie, descriptor->mali_mapping.addr));
+ ret = -EFAULT;
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ /* Add mem to list of DMA-bufs mapped for this job */
+ job->dma_bufs[i] = mem;
+ }
+
+ _mali_osk_mutex_signal(job->session->memory_lock);
+
+ return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+ int i;
+ for (i = 0; i < job->num_dma_bufs; i++) {
+ if (NULL == job->dma_bufs[i]) continue;
+
+ mali_dma_buf_unmap(job->dma_bufs[i]);
+ job->dma_bufs[i] = NULL;
+ }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
+{
+ struct dma_buf *buf;
+ struct mali_dma_buf_attachment *mem;
+ _mali_uk_attach_dma_buf_s args;
+ mali_mem_allocation *descriptor;
+ int md;
+ int fd;
+
+ /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s))) {
+ return -EFAULT;
+ }
+
+ if (args.mali_address & ~PAGE_MASK) {
+ MALI_DEBUG_PRINT_ERROR(("Requested address (0x%08x) is not page aligned\n", args.mali_address));
+ return -EINVAL;
+ }
+
+ if (args.mali_address >= args.mali_address + args.size) {
+ MALI_DEBUG_PRINT_ERROR(("Requested address and size (0x%08x + 0x%08x) is too big\n", args.mali_address, args.size));
+ return -EINVAL;
+ }
+
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ /* Currently, mapping of the full buffer are supported. */
+ if (args.size != buf->size) {
+ MALI_DEBUG_PRINT_ERROR(("dma-buf size doesn't match mapping size.\n"));
+ dma_buf_put(buf);
+ return -EINVAL;
+ }
+
+ mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+ if (NULL == mem) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
+ dma_buf_put(buf);
+ return -ENOMEM;
+ }
+
+ mem->buf = buf;
+ mem->session = session;
+ mem->map_ref = 0;
+ mutex_init(&mem->map_lock);
+ init_waitqueue_head(&mem->wait_queue);
+
+ mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
+ if (NULL == mem->attachment) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to attach to dma-buf %d\n", fd));
+ dma_buf_put(mem->buf);
+ _mali_osk_free(mem);
+ return -EFAULT;
+ }
+
+ /* Set up Mali memory descriptor */
+ descriptor = mali_mem_descriptor_create(session, MALI_MEM_DMA_BUF);
+ if (NULL == descriptor) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
+ mali_dma_buf_release(mem);
+ return -ENOMEM;
+ }
+
+ descriptor->size = args.size;
+ descriptor->mali_mapping.addr = args.mali_address;
+
+ descriptor->dma_buf.attachment = mem;
+
+ descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+ if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ /* Map dma-buf into this session's page tables */
+ if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
+ _mali_osk_mutex_signal(session->memory_lock);
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
+ mali_mem_descriptor_destroy(descriptor);
+ mali_dma_buf_release(mem);
+ return -ENOMEM;
+ }
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Map memory into session's Mali virtual address space. */
+
+ if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
+ mali_mem_mali_map_free(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
+ mali_mem_descriptor_destroy(descriptor);
+ mali_dma_buf_release(mem);
+ return -ENOMEM;
+ }
+
+#endif
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Get descriptor mapping for memory. */
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_mali_map_free(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
+ mali_mem_descriptor_destroy(descriptor);
+ mali_dma_buf_release(mem);
+ return -EFAULT;
+ }
+
+ /* Return stuff to user space */
+ if (0 != put_user(md, &user_arg->cookie)) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_mali_map_free(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
+ mali_descriptor_mapping_free(session->descriptor_mapping, md);
+ mali_dma_buf_release(mem);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
+{
+ int ret = 0;
+ _mali_uk_release_dma_buf_s args;
+ mali_mem_allocation *descriptor;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s))) {
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
+
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+
+ if (NULL != descriptor) {
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
+
+ mali_mem_mali_map_free(descriptor);
+
+ mali_dma_buf_release(descriptor->dma_buf.attachment);
+
+ mali_mem_descriptor_destroy(descriptor);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+ ret = -EINVAL;
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return ret;
+}
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+ _mali_uk_dma_buf_get_size_s args;
+ int fd;
+ struct dma_buf *buf;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) {
+ return -EFAULT;
+ }
+
+ /* Do DMA-BUF stuff */
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ if (0 != put_user(buf->size, &user_arg->size)) {
+ dma_buf_put(buf);
+ return -EFAULT;
+ }
+
+ dma_buf_put(buf);
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.h b/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.h
new file mode 100644
index 00000000000000..5018886bd2c0df
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_dma_buf.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_DMA_BUF_H__
+#define __MALI_MEMORY_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+struct mali_pp_job;
+
+struct mali_dma_buf_attachment;
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *arg);
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *arg);
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+void mali_mem_dma_buf_release(mali_mem_allocation *descriptor);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_external.c b/drivers/parrot/gpu/mali400/linux/mali_memory_external.c
new file mode 100644
index 00000000000000..dfd14a34a47cf7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_external.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_memory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_uk_types.h"
+
+void mali_mem_external_release(mali_mem_allocation *descriptor)
+{
+ MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == descriptor->type);
+
+ mali_mem_mali_map_free(descriptor);
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
+{
+ struct mali_session_data *session;
+ mali_mem_allocation *descriptor;
+ int md;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void *)args->phys_addr,
+ (void *)(args->phys_addr + args->size - 1),
+ (void *)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->mali_mapping.addr = args->mali_address;
+ descriptor->size = args->size;
+
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ _mali_osk_mutex_wait(session->memory_lock);
+ {
+ u32 virt = descriptor->mali_mapping.addr;
+ u32 phys = args->phys_addr;
+ u32 size = args->size;
+
+ err = mali_mem_mali_map_prepare(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_descriptor_destroy(descriptor);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+ }
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_external_release(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_descriptor_destroy(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ args->cookie = md;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem(_mali_uk_unmap_external_mem_s *args)
+{
+ mali_mem_allocation *descriptor;
+ void *old_value;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ old_value = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
+
+ if (NULL != old_value) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_external_release(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_descriptor_destroy(descriptor);
+ }
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.c b/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.c
new file mode 100644
index 00000000000000..48ba42b49b513b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+
+static struct mali_mem_os_allocator {
+ spinlock_t pool_lock;
+ struct list_head pool_pages;
+ size_t pool_count;
+
+ atomic_t allocated_pages;
+ size_t allocation_limit;
+
+ struct shrinker shrinker;
+ struct delayed_work timed_shrinker;
+ struct workqueue_struct *wq;
+} mali_mem_os_allocator = {
+ .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+ .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+ .pool_count = 0,
+
+ .allocated_pages = ATOMIC_INIT(0),
+ .allocation_limit = 0,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ .shrinker.shrink = mali_mem_os_shrink,
+#else
+ .shrinker.count_objects = mali_mem_os_shrink_count,
+ .shrinker.scan_objects = mali_mem_os_shrink,
+#endif
+ .shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+static void mali_mem_os_free(mali_mem_allocation *descriptor)
+{
+ LIST_HEAD(pages);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+ atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+
+ /* Put pages on pool. */
+ list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
+
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+
+ list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+ mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
+
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+}
+
+static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
+{
+ struct page *new_page, *tmp;
+ LIST_HEAD(pages);
+ size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+ size_t remaining = page_count;
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+ INIT_LIST_HEAD(&descriptor->os_mem.pages);
+ descriptor->os_mem.count = page_count;
+
+ /* Grab pages from pool. */
+ {
+ size_t pool_pages;
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+ for (i = pool_pages; i > 0; i--) {
+ BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+ list_move(mali_mem_os_allocator.pool_pages.next, &pages);
+ }
+ mali_mem_os_allocator.pool_count -= pool_pages;
+ remaining -= pool_pages;
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+ }
+
+ /* Process pages from pool. */
+ i = 0;
+ list_for_each_entry_safe(new_page, tmp, &pages, lru) {
+ BUG_ON(NULL == new_page);
+
+ list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
+ }
+
+ /* Allocate new pages, if needed. */
+ for (i = 0; i < remaining; i++) {
+ dma_addr_t dma_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if (unlikely(NULL == new_page)) {
+ /* Calculate the number of pages actually allocated, and free them. */
+ descriptor->os_mem.count = (page_count - remaining) + i;
+ atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(descriptor);
+ return -ENOMEM;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ /* Store page phys addr */
+ SetPagePrivate(new_page);
+ set_page_private(new_page, dma_addr);
+
+ list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
+ }
+
+ atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+ return 0;
+}
+
+static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct page *page;
+ _mali_osk_errcode_t err;
+ u32 virt = descriptor->mali_mapping.addr;
+ u32 prop = descriptor->mali_mapping.properties;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+
+ err = mali_mem_mali_map_prepare(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
+ u32 phys = page_private(page);
+ mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+ mali_mem_mali_map_free(descriptor);
+}
+
+static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
+{
+ struct page *page;
+ int ret;
+ unsigned long addr = vma->vm_start;
+
+ list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
+ /* We should use vm_insert_page, but it does a dcache
+ * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+ ret = vm_insert_page(vma, addr, page);
+ */
+ ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+{
+ mali_mem_allocation *descriptor;
+ int err;
+
+ if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+ MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ size,
+ atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+ mali_mem_os_allocator.allocation_limit));
+ return NULL;
+ }
+
+ descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
+ if (NULL == descriptor) return NULL;
+
+ descriptor->mali_mapping.addr = mali_addr;
+ descriptor->size = size;
+ descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
+ descriptor->cpu_mapping.ref = 1;
+
+ if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
+ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+ } else {
+ /* Cached Mali memory mapping */
+ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ vma->vm_flags |= VM_SHARED;
+ }
+
+ err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
+ if (0 != err) goto alloc_failed;
+
+ /* Take session memory lock */
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
+ if (0 != err) goto mali_map_failed;
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
+ if (0 != err) goto cpu_map_failed;
+
+ return descriptor;
+
+cpu_map_failed:
+ mali_mem_os_mali_unmap(session, descriptor);
+mali_map_failed:
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_os_free(descriptor);
+alloc_failed:
+ mali_mem_descriptor_destroy(descriptor);
+ MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
+ return NULL;
+}
+
+void mali_mem_os_release(mali_mem_allocation *descriptor)
+{
+ struct mali_session_data *session = descriptor->session;
+
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_os_mali_unmap(session, descriptor);
+
+ /* Free pages */
+ mali_mem_os_free(descriptor);
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+ struct {
+ u32 phys;
+ mali_io_address mapping;
+ } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+ u32 count;
+ spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+ .count = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (0 < mali_mem_page_table_page_pool.count) {
+ u32 i = --mali_mem_page_table_page_pool.count;
+ *phys = mali_mem_page_table_page_pool.page[i].phys;
+ *mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+ ret = _MALI_OSK_ERR_OK;
+ }
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ if (_MALI_OSK_ERR_OK != ret) {
+ *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
+ if (NULL != *mapping) {
+ ret = _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return ret;
+}
+
+void mali_mem_os_release_table_page(u32 phys, void *virt)
+{
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+ u32 i = mali_mem_page_table_page_pool.count;
+ mali_mem_page_table_page_pool.page[i].phys = phys;
+ mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+ ++mali_mem_page_table_page_pool.count;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+ } else {
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+ }
+}
+
+static void mali_mem_os_free_page(struct page *page)
+{
+ BUG_ON(page_count(page) != 1);
+
+ dma_unmap_page(&mali_platform_device->dev, page_private(page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ ClearPagePrivate(page);
+
+ __free_page(page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+ u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ u32 i;
+
+ MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+ /* Remove nr_to_free pages from the pool and store them locally on stack. */
+ for (i = 0; i < nr_to_free; i++) {
+ u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+ phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+ virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+ }
+
+ mali_mem_page_table_page_pool.count -= nr_to_free;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ /* After releasing the spinlock: free the pages we removed from the pool. */
+ for (i = 0; i < nr_to_free; i++) {
+ dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
+ }
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+ size_t nr_to_free = 0;
+ size_t nr_to_keep;
+
+ /* Keep 2 page table pages for each 1024 pages in the page cache. */
+ nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+ /* And a minimum of eight pages, to accomodate new sessions. */
+ nr_to_keep += 8;
+
+ if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+ if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+ nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+ }
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif /* Linux < 2.6.35 */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
+{
+ struct page *page, *tmp;
+ unsigned long flags;
+ struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+ int nr = nr_to_scan;
+#else
+ int nr = sc->nr_to_scan;
+#endif
+
+ if (0 == nr) {
+ return mali_mem_os_shrink_count(shrinker, sc);
+ }
+
+ if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+ /* Not able to lock. */
+ return -1;
+ }
+
+ if (0 == mali_mem_os_allocator.pool_count) {
+ /* No pages availble */
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+ return 0;
+ }
+
+ /* Release from general page pool */
+ nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+ mali_mem_os_allocator.pool_count -= nr;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr;
+ if (0 == nr) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+ list_for_each_entry_safe(page, tmp, &pages, lru) {
+ mali_mem_os_free_page(page);
+ }
+
+ /* Release some pages from page table page pool */
+ mali_mem_os_trim_page_table_page_pool();
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ /* Pools are empty, stop timer */
+ MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+ return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+ struct page *page, *tmp;
+ struct list_head *le;
+ LIST_HEAD(pages);
+ size_t nr_to_free;
+
+ MALI_IGNORE(data);
+
+ MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+ /* Release from general page pool */
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+ const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
+ /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+ nr_to_free = max(count / 2, min_to_free);
+
+ mali_mem_os_allocator.pool_count -= nr_to_free;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr_to_free;
+ if (0 == nr_to_free) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ }
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ list_for_each_entry_safe(page, tmp, &pages, lru) {
+ mali_mem_os_free_page(page);
+ }
+
+ /* Release some pages from page table page pool */
+ mali_mem_os_trim_page_table_page_pool();
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+ mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+ if (NULL == mali_mem_os_allocator.wq) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ register_shrinker(&mali_mem_os_allocator.shrinker);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+ struct page *page, *tmp;
+
+ unregister_shrinker(&mali_mem_os_allocator.shrinker);
+ cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+ destroy_workqueue(mali_mem_os_allocator.wq);
+
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
+ mali_mem_os_free_page(page);
+
+ --mali_mem_os_allocator.pool_count;
+ }
+ BUG_ON(mali_mem_os_allocator.pool_count);
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ /* Release from page table page pool */
+ do {
+ u32 nr_to_free;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+ } while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+ mali_mem_os_allocator.allocation_limit = size;
+
+ MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+ return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.h b/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.h
new file mode 100644
index 00000000000000..753d5ccdcdc915
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_os_alloc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_OS_ALLOC_H__
+#define __MALI_MEMORY_OS_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include "mali_memory_types.h"
+
+/* OS memory allocator */
+/** @brief Allocate memory from OS
+ *
+ * This function will create a descriptor, allocate pages and map these on the CPU and Mali.
+ *
+ * @param mali_addr Mali virtual address to use for Mali mapping
+ * @param size Size to allocate
+ * @param vma Pointer to vma for CPU mapping
+ * @param session Pointer to session doing the allocation
+ */
+mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
+
+/** @brief Release Mali OS memory
+ *
+ * The session memory_lock must be held when calling this function.
+ *
+ * @param descriptor Pointer to the descriptor to release
+ */
+void mali_mem_os_release(mali_mem_allocation *descriptor);
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping);
+
+void mali_mem_os_release_table_page(u32 phys, void *virt);
+
+_mali_osk_errcode_t mali_mem_os_init(void);
+void mali_mem_os_term(void);
+u32 mali_mem_os_stat(void);
+
+#endif /* __MALI_MEMORY_OS_ALLOC_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_types.h b/drivers/parrot/gpu/mali400/linux/mali_memory_types.h
new file mode 100644
index 00000000000000..d084ad1b0ebdb5
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_types.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_TYPES_H__
+#define __MALI_MEMORY_TYPES_H__
+
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+typedef u32 mali_address_t;
+
+typedef enum mali_mem_type {
+ MALI_MEM_OS,
+ MALI_MEM_EXTERNAL,
+ MALI_MEM_DMA_BUF,
+ MALI_MEM_UMP,
+ MALI_MEM_BLOCK,
+} mali_mem_type;
+
+typedef struct mali_mem_os_mem {
+ struct list_head pages;
+ u32 count;
+} mali_mem_os_mem;
+
+typedef struct mali_mem_dma_buf {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct mali_dma_buf_attachment *attachment;
+#endif
+} mali_mem_dma_buf;
+
+typedef struct mali_mem_external {
+ dma_addr_t phys;
+ u32 size;
+} mali_mem_external;
+
+typedef struct mali_mem_ump {
+#if defined(CONFIG_MALI400_UMP)
+ ump_dd_handle handle;
+#endif
+} mali_mem_ump;
+
+typedef struct block_allocator_allocation {
+ /* The list will be released in reverse order */
+ struct block_info *last_allocated;
+ u32 mapping_length;
+ struct block_allocator *info;
+} block_allocator_allocation;
+
+typedef struct mali_mem_block_mem {
+ block_allocator_allocation mem;
+} mali_mem_block_mem;
+
+typedef struct mali_mem_virt_mali_mapping {
+ mali_address_t addr; /* Virtual Mali address */
+ u32 properties; /* MMU Permissions + cache, must match MMU HW */
+} mali_mem_virt_mali_mapping;
+
+typedef struct mali_mem_virt_cpu_mapping {
+ void __user *addr;
+ u32 ref;
+} mali_mem_virt_cpu_mapping;
+
+#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
+#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
+
+typedef struct mali_mem_allocation {
+ MALI_DEBUG_CODE(u32 magic);
+ mali_mem_type type; /**< Type of memory */
+ int id; /**< ID in the descriptor map for this allocation */
+
+ u32 size; /**< Size of the allocation */
+ u32 flags; /**< Flags for this allocation */
+
+ struct mali_session_data *session; /**< Pointer to session that owns the allocation */
+
+ /* Union selected by type. */
+ union {
+ mali_mem_os_mem os_mem; /**< MALI_MEM_OS */
+ mali_mem_external ext_mem; /**< MALI_MEM_EXTERNAL */
+ mali_mem_dma_buf dma_buf; /**< MALI_MEM_DMA_BUF */
+ mali_mem_ump ump_mem; /**< MALI_MEM_UMP */
+ mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+ };
+
+ mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+ mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+} mali_mem_allocation;
+
+#define MALI_MEM_FLAG_MALI_GUARD_PAGE (1 << 0)
+#define MALI_MEM_FLAG_DONT_CPU_MAP (1 << 1)
+
+#endif /* __MALI_MEMORY_TYPES__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_memory_ump.c b/drivers/parrot/gpu/mali400/linux/mali_memory_ump.c
new file mode 100644
index 00000000000000..8e59eacf68dbcd
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_memory_ump.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+
+#include "ump_kernel_interface.h"
+
+static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block *ump_blocks;
+ struct mali_page_directory *pagedir;
+ u32 offset = 0;
+ u32 prop;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
+
+ ump_mem = descriptor->ump_mem.handle;
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+ if (nr_blocks == 0) {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ return -EINVAL;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks);
+ if (NULL == ump_blocks) {
+ return -ENOMEM;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) {
+ _mali_osk_free(ump_blocks);
+ return -EFAULT;
+ }
+
+ pagedir = session->page_directory;
+ prop = descriptor->mali_mapping.properties;
+
+ err = mali_mem_mali_map_prepare(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
+
+ _mali_osk_free(ump_blocks);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_blocks; ++i) {
+ u32 virt = descriptor->mali_mapping.addr + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
+ ump_blocks[i].size, prop);
+
+ offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ u32 virt = descriptor->mali_mapping.addr + offset;
+
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, prop);
+
+ offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free(ump_blocks);
+
+ return 0;
+}
+
+void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+{
+ ump_dd_handle ump_mem;
+ struct mali_page_directory *pagedir;
+
+ ump_mem = descriptor->ump_mem.handle;
+ pagedir = session->page_directory;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ mali_mem_mali_map_free(descriptor);
+
+ ump_dd_reference_release(ump_mem);
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
+{
+ ump_dd_handle ump_mem;
+ struct mali_session_data *session;
+ mali_mem_allocation *descriptor;
+ int md, ret;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);
+
+ if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
+ if (NULL == descriptor) {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->ump_mem.handle = ump_mem;
+ descriptor->mali_mapping.addr = args->mali_address;
+ descriptor->size = args->size;
+ descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+ descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ ret = mali_ump_map(session, descriptor);
+ if (0 != ret) {
+ _mali_osk_mutex_signal(session->memory_lock);
+ ump_dd_reference_release(ump_mem);
+ mali_mem_descriptor_destroy(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
+ ump_dd_reference_release(ump_mem);
+ mali_mem_descriptor_destroy(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5, ("Returning from UMP attach\n"));
+
+ MALI_SUCCESS;
+}
+
+void mali_mem_ump_release(mali_mem_allocation *descriptor)
+{
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
+
+ mali_ump_unmap(session, descriptor);
+}
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args)
+{
+ mali_mem_allocation *descriptor;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
+
+ if (NULL != descriptor) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_ump_release(descriptor);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ mali_mem_descriptor_destroy(descriptor);
+ }
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_atomics.c b/drivers/parrot/gpu/mali400/linux/mali_osk_atomics.c
new file mode 100644
index 00000000000000..2f4154f25bc507
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_atomics.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom)
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom)
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
+{
+ MALI_IGNORE(atom);
+}
+
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val)
+{
+ return atomic_xchg((atomic_t *)&atom->u.val, val);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_irq.c b/drivers/parrot/gpu/mali400/linux/mali_osk_irq.c
new file mode 100644
index 00000000000000..8d24222a9e9c27
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_irq.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+typedef struct _mali_osk_irq_t_struct {
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id); /* , struct pt_regs *regs*/
+
+#if defined(DEBUG)
+
+struct test_interrupt_data {
+ _mali_osk_irq_ack_t ack_func;
+ void *probe_data;
+ mali_bool interrupt_received;
+ wait_queue_head_t wq;
+};
+
+static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) {
+ data->interrupt_received = MALI_TRUE;
+ wake_up(&data->wq);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static _mali_osk_errcode_t test_interrupt(u32 irqnum,
+ _mali_osk_irq_trigger_t trigger_func,
+ _mali_osk_irq_ack_t ack_func,
+ void *probe_data,
+ const char *description)
+{
+ unsigned long irq_flags = 0;
+ struct test_interrupt_data data = {
+ .ack_func = ack_func,
+ .probe_data = probe_data,
+ .interrupt_received = MALI_FALSE,
+ };
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ init_waitqueue_head(&data.wq);
+
+ trigger_func(probe_data);
+ wait_event_timeout(data.wq, data.interrupt_received, 100);
+
+ free_irq(irqnum, &data);
+
+ if (data.interrupt_received) {
+ MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description));
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum));
+ return _MALI_OSK_ERR_FAULT;
+ }
+}
+
+#endif /* defined(DEBUG) */
+
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description)
+{
+ mali_osk_irq_object_t *irq_object;
+ unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) {
+ return NULL;
+ }
+
+ if (-1 == irqnum) {
+ /* Probe for IRQ */
+ if ((NULL != trigger_func) && (NULL != ack_func)) {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(probe_data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(probe_data);
+ } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ } else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum) {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->data = int_data;
+
+ if (-1 == irqnum) {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ /* Verify that the configured interrupt settings are working */
+ if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
+ MALI_DEBUG_PRINT(2, ("Test of IRQ handler for core '%s' failed\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+#endif
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ return irq_object;
+}
+
+void _mali_osk_irq_term(_mali_osk_irq_t *irq)
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ free_irq(irq_object->irqnum, irq_object);
+ kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id) /* , struct pt_regs *regs*/
+{
+ irqreturn_t ret = IRQ_NONE;
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) {
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_locks.c b/drivers/parrot/gpu/mali400/linux/mali_osk_locks.c
new file mode 100644
index 00000000000000..c37613c46d2d4c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_locks.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk_locks.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+
+#ifdef DEBUG
+#ifdef LOCK_ORDER_CHECKING
+static DEFINE_SPINLOCK(lock_tracking_lock);
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order);
+#endif /* LOCK_ORDER_CHECKING */
+
+void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+{
+ checker->orig_flags = flags;
+ checker->owner = 0;
+
+#ifdef LOCK_ORDER_CHECKING
+ checker->order = order;
+ checker->next = NULL;
+#endif
+}
+
+void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
+{
+ checker->owner = _mali_osk_get_tid();
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
+ printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+ _mali_osk_get_tid(), checker);
+ dump_stack();
+ }
+ }
+#endif
+}
+
+void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker)
+{
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ remove_lock_from_log(checker, _mali_osk_get_tid());
+ }
+#endif
+ checker->owner = 0;
+}
+
+
+#ifdef LOCK_ORDER_CHECKING
+/* Lock order checking
+ * -------------------
+ *
+ * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the
+ * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s,
+ * make sure that a lock that is taken has a higher order than the current highest-order lock a
+ * thread holds.
+ *
+ * This is done in the following manner:
+ * - A linked list keeps track of locks held by a thread.
+ * - A `next' pointer is added to each lock. This is used to chain the locks together.
+ * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking
+ * the given lock is legal. It will follow the linked list to find the last
+ * lock taken by this thread. If the last lock's order was lower than the
+ * lock that is to be taken, it appends the new lock to the list and returns
+ * true, if not, it return false. This return value is assert()'ed on in
+ * _mali_osk_lock_wait().
+ */
+
+static struct _mali_osk_lock_debug_s *lock_lookup_list;
+
+static void dump_lock_tracking_list(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 1;
+
+ /* print list for debugging purposes */
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+ l = l->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+ printk(" NULL\n");
+}
+
+static int tracking_list_length(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 0;
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ l = l->next;
+ n++;
+ MALI_DEBUG_ASSERT(n < 100);
+ }
+ return n;
+}
+
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ mali_bool ret = MALI_FALSE;
+ _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST;
+ struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe;
+ struct _mali_osk_lock_debug_s *l;
+ unsigned long local_lock_flag;
+ u32 len;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+
+ l = lock_lookup_list;
+ if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */
+ lock_lookup_list = lock;
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return MALI_TRUE;
+ } else {
+ /* Traverse the locks taken and find the lock of the highest order.
+ * Since several threads may hold locks, each lock's owner must be
+ * checked so that locks not owned by this thread can be ignored. */
+ for (;;) {
+ MALI_DEBUG_ASSERT_POINTER(l);
+ if (tid == l->owner && l->order >= highest_order_for_tid) {
+ highest_order_for_tid = l->order;
+ highest_order_lock = l;
+ }
+
+ if (NULL != l->next) {
+ l = l->next;
+ } else {
+ break;
+ }
+ }
+
+ l->next = lock;
+ l->next = NULL;
+ }
+
+ /* We have now found the highest order lock currently held by this thread and can see if it is
+ * legal to take the requested lock. */
+ ret = highest_order_for_tid < lock->order;
+
+ if (!ret) {
+ printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+ lock->order, lock_order_to_string(lock->order),
+ highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
+ dump_lock_tracking_list();
+ }
+
+ if (len + 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return ret;
+}
+
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ struct _mali_osk_lock_debug_s *curr;
+ struct _mali_osk_lock_debug_s *prev = NULL;
+ unsigned long local_lock_flag;
+ u32 len;
+ u32 n = 0;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+ curr = lock_lookup_list;
+
+ if (NULL == curr) {
+ printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+ dump_lock_tracking_list();
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+
+
+ while (lock != curr) {
+ prev = curr;
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ curr = curr->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+
+ if (NULL == prev) {
+ lock_lookup_list = curr->next;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ MALI_DEBUG_ASSERT_POINTER(prev);
+ prev->next = curr->next;
+ }
+
+ lock->next = NULL;
+
+ if (len - 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+}
+
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order)
+{
+ switch (order) {
+ case _MALI_OSK_LOCK_ORDER_SESSIONS:
+ return "_MALI_OSK_LOCK_ORDER_SESSIONS";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_SESSION:
+ return "_MALI_OSK_LOCK_ORDER_MEM_SESSION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_INFO:
+ return "_MALI_OSK_LOCK_ORDER_MEM_INFO";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE:
+ return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE";
+ break;
+ case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
+ return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
+ break;
+ case _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL:
+ return "_MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL";
+ break;
+ case _MALI_OSK_LOCK_ORDER_GROUP:
+ return "_MALI_OSK_LOCK_ORDER_GROUP";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_CORE_STATE:
+ return "_MALI_OSK_LOCK_ORDER_PM_CORE_STATE";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PROFILING:
+ return "_MALI_OSK_LOCK_ORDER_PROFILING";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COUNTER:
+ return "_MALI_OSK_LOCK_ORDER_L2_COUNTER";
+ break;
+ case _MALI_OSK_LOCK_ORDER_UTILIZATION:
+ return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_EXECUTE:
+ return "_MALI_OSK_LOCK_ORDER_PM_EXECUTE";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
+ return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
+ break;
+ default:
+ return "";
+ }
+}
+#endif /* LOCK_ORDER_CHECKING */
+#endif /* DEBUG */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_locks.h b/drivers/parrot/gpu/mali400/linux/mali_osk_locks.h
new file mode 100644
index 00000000000000..f09cf170957e02
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_locks.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.h
+ * Defines OS abstraction of lock and mutex
+ */
+#ifndef _MALI_OSK_LOCKS_H
+#define _MALI_OSK_LOCKS_H
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk_types.h"
+
+#ifdef _cplusplus
+extern "C" {
+#endif
+
+ /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s {
+ u32 owner;
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_order_t order;
+ struct _mali_osk_lock_debug_s *next;
+ };
+#endif
+
+ /* Anstraction of spinlock_t */
+ struct _mali_osk_spinlock_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ spinlock_t spinlock;
+ };
+
+ /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */
+ struct _mali_osk_spinlock_irq_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+
+ spinlock_t spinlock;
+ unsigned long flags;
+ };
+
+ /* Abstraction of rw_semaphore in OS */
+ struct _mali_osk_mutex_rw_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+ _mali_osk_lock_mode_t mode;
+#endif
+
+ struct rw_semaphore rw_sema;
+ };
+
+ /* Mutex and mutex_interruptible functions share the same osk mutex struct */
+ struct _mali_osk_mutex_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ struct mutex mutex;
+ };
+
+#ifdef DEBUG
+ /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and
+ * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we
+ * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */
+ void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order);
+ void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
+ void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
+
+ /** @brief This function can return a given lock's owner when DEBUG is enabled. */
+ static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
+ {
+ return lock->owner;
+ }
+#else
+#define _mali_osk_locks_debug_init(x, y, z) do {} while (0)
+#define _mali_osk_locks_debug_add(x) do {} while (0)
+#define _mali_osk_locks_debug_remove(x) do {} while (0)
+#endif
+
+ /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/
+ static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL);
+ if (NULL == lock) {
+ return NULL;
+ }
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock a spinlock */
+ static inline void _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ spin_lock(&lock->spinlock);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock a spinlock */
+ static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock(&lock->spinlock);
+ }
+
+ /** @brief Free a memory block which the argument lock pointed to and its type must be
+ * _mali_osk_spinlock_t *. */
+ static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be
+ * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */
+ static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_irq_t *lock = NULL;
+ lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ lock->flags = 0;
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock spinlock and save the register's state */
+ static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock)
+ {
+ unsigned long tmp_flags;
+
+ BUG_ON(NULL == lock);
+ spin_lock_irqsave(&lock->spinlock, tmp_flags);
+ lock->flags = tmp_flags;
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock spinlock with saved register's state */
+ static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+ }
+
+ /** @brief Destroy a given memory block which lock pointed to, and the lock type must be
+ * _mali_osk_spinlock_irq_t *. */
+ static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call
+ * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */
+ static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_rw_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ init_rwsem(&lock->rw_sema);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode
+ * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */
+ static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ down_read(&lock->rw_sema);
+ } else {
+ down_write(&lock->rw_sema);
+ }
+
+#ifdef DEBUG
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ lock->mode = mode;
+ } else { /* mode == _MALI_OSK_LOCKMODE_RO */
+ lock->mode = mode;
+ }
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+#endif
+ }
+
+ /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */
+ static inline void _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+#ifdef DEBUG
+ /* make sure the thread releasing the lock actually was the owner */
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ /* This lock now has no owner */
+ lock->checker.owner = 0;
+ }
+#endif
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ up_read(&lock->rw_sema);
+ } else {
+ up_write(&lock->rw_sema);
+ }
+ }
+
+ /** @brief Free a given memory block which lock pointed to and its type must be
+ * _mali_sok_mutex_rw_t *. */
+ static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the
+ * same osk mutex struct, and the difference between them is which locking function they use */
+ static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+ mutex_init(&lock->mutex);
+
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock the lock->mutex with mutex_lock_interruptible function */
+ static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock)
+ {
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ BUG_ON(NULL == lock);
+
+ if (mutex_lock_interruptible(&lock->mutex)) {
+ printk(KERN_WARNING "Mali: Can not lock mutex\n");
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ return err;
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */
+ static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */
+ static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ mutex_lock(&lock->mutex);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */
+ static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Free a given memory block which lock point. */
+ static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+#ifdef _cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_low_level_mem.c b/drivers/parrot/gpu/mali400/linux/mali_osk_low_level_mem.c
new file mode 100644
index 00000000000000..e83b4ccd7d02e8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+void _mali_osk_mem_barrier(void)
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier(void)
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description)
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address virt)
+{
+ iounmap((void *)virt);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description)
+{
+#if MALI_LICENSE_IS_GPL
+ return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion(u32 phys, u32 size)
+{
+#if !MALI_LICENSE_IS_GPL
+ release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset)
+{
+ return ioread32(((u8 *)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ iowrite32(val, ((u8 *)addr) + offset);
+}
+
+void _mali_osk_cache_flushall(void)
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size)
+{
+ _mali_osk_write_mem_barrier();
+}
+
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+ u32 retval = 0;
+ void *temp_buf;
+
+ temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+ if (NULL != temp_buf) {
+ u32 bytes_left_to_copy = size;
+ u32 i;
+ for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ u32 size_to_copy;
+ u32 size_copied;
+ u32 bytes_left;
+
+ if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+ } else {
+ size_to_copy = bytes_left_to_copy;
+ }
+
+ bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy);
+ size_copied = size_to_copy - bytes_left;
+
+ bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied);
+ size_copied -= bytes_left;
+
+ bytes_left_to_copy -= size_copied;
+ retval += size_copied;
+
+ if (size_copied != size_to_copy) {
+ break; /* Early out, we was not able to copy this entire block */
+ }
+ }
+
+ kfree(temp_buf);
+ }
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Return number of bytes actually copied */
+ args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_mali.c b/drivers/parrot/gpu/mali400/linux/mali_osk_mali.c
new file mode 100644
index 00000000000000..659b64acefad39
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_mali.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_kernel_linux.h"
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+
+ if (NULL == mali_platform_device) {
+ /* Not connected to a device */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+ mali_platform_device->resource[i].start == addr) {
+ if (NULL != res) {
+ res->base = addr;
+ res->description = mali_platform_device->resource[i].name;
+
+ /* Any (optional) IRQ resource belonging to this resource will follow */
+ if ((i + 1) < mali_platform_device->num_resources &&
+ IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
+ res->irq = mali_platform_device->resource[i + 1].start;
+ } else {
+ res->irq = -1;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_resource_base_address(void)
+{
+ u32 lowest_addr = 0xFFFFFFFF;
+ u32 ret = 0;
+
+ if (NULL != mali_platform_device) {
+ int i;
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+ mali_platform_device->resource[i].start < lowest_addr) {
+ lowest_addr = mali_platform_device->resource[i].start;
+ ret = lowest_addr;
+ }
+ }
+ }
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(data);
+
+ if (NULL != mali_platform_device) {
+ struct mali_gpu_device_data *os_data = NULL;
+
+ os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+ if (NULL != os_data) {
+ /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+ BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
+ _mali_osk_memcpy(data, os_data, sizeof(*os_data));
+
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+ u32 irqs[128];
+ u32 i, j, irq, num_irqs_found = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) {
+ irq = mali_platform_device->resource[i].start;
+
+ for (j = 0; j < num_irqs_found; ++j) {
+ if (irq == irqs[j]) {
+ return MALI_TRUE;
+ }
+ }
+
+ irqs[num_irqs_found++] = irq;
+ }
+ }
+
+ return MALI_FALSE;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_math.c b/drivers/parrot/gpu/mali400/linux/mali_osk_math.c
new file mode 100644
index 00000000000000..02741b0fa5324d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_math.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 _mali_osk_clz(u32 input)
+{
+ return 32 - fls(input);
+}
+
+u32 _mali_osk_fls(u32 input)
+{
+ return fls(input);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_memory.c b/drivers/parrot/gpu/mali400/linux/mali_osk_memory.c
new file mode 100644
index 00000000000000..27dea0e604d36e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc(u32 n, u32 size)
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc(u32 size)
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free(void *ptr)
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc(u32 size)
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree(void *ptr)
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy(void *dst, const void *src, u32 len)
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset(void *s, u32 c, u32 n)
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated)
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_misc.c b/drivers/parrot/gpu/mali400/linux/mali_osk_misc.c
new file mode 100644
index 00000000000000..6e104ad6c86e6c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_misc.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+#if !defined(CONFIG_MALI_QUIET)
+void _mali_osk_dbgmsg(const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+#endif /* !defined(CONFIG_MALI_QUIET) */
+
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vscnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ dump_stack();
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ u32 tid = current->pid;
+
+ /* If the pid is 0 the core was idle. Instead of returning 0 we return a special number
+ * identifying which core we are on. */
+ if (0 == tid) {
+ tid = -(1 + raw_smp_processor_id());
+ }
+
+ return tid;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_notification.c b/drivers/parrot/gpu/mali400/linux/mali_osk_notification.c
new file mode 100644
index 00000000000000..8a1e786e4d9762
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_notification.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct {
+ spinlock_t mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct {
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void)
+{
+ _mali_osk_notification_queue_t *result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ spin_lock_init(&result->mutex);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+ GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+ if (NULL == notification) {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size) {
+ notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ } else {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete(_mali_osk_notification_t *object)
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue)
+{
+ _mali_osk_notification_t *result;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
+ _mali_osk_notification_delete(result);
+ }
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ if (!list_empty(&queue->head)) {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(result);
+
+ /* default result */
+ *result = NULL;
+
+ if (wait_event_interruptible(queue->receive_queue,
+ _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_pm.c b/drivers/parrot/gpu/mali400/linux/mali_osk_pm.c
new file mode 100644
index 00000000000000..a65cde12bb913c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_pm.c
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static _mali_osk_atomic_t mali_pm_ref_count;
+
+void _mali_osk_pm_dev_enable(void)
+{
+ _mali_osk_atomic_init(&mali_pm_ref_count, 0);
+}
+
+void _mali_osk_pm_dev_disable(void)
+{
+ _mali_osk_atomic_term(&mali_pm_ref_count);
+}
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_atomic_inc(&mali_pm_ref_count);
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ _mali_osk_atomic_dec(&mali_pm_ref_count);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+/* Can run in atomic context */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ u32 ref;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ pm_runtime_get_noresume(&(mali_platform_device->dev));
+ ref = _mali_osk_atomic_read(&mali_pm_ref_count);
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+ return ref > 0 ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_TRUE;
+#endif
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_profiling.c b/drivers/parrot/gpu/mali400/linux/mali_osk_profiling.c
new file mode 100644
index 00000000000000..8b9e274ea48e5e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_profiling.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_pp_scheduler.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ if (MALI_TRUE == auto_start) {
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ /* Nothing to do */
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_profiling_get_count(void)
+{
+ return 0;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+ return MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+ return MALI_FALSE;
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+void _mali_osk_profiling_memory_usage_get(u32 *memory_usage)
+{
+ *memory_usage = _mali_ukk_report_memory_usage();
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ _mali_osk_profiling_report_sw_counters(args->counters);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_memory_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
+{
+ _mali_osk_profiling_memory_usage_get(&args->memory_usage);
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+ if (COUNTER_VP_0_C0 == counter_id) {
+ mali_gp_job_set_gp_counter_src0(event_id);
+ } else if (COUNTER_VP_0_C1 == counter_id) {
+ mali_gp_job_set_gp_counter_src1(event_id);
+ } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) {
+ /*
+ * Two compatibility notes for this function:
+ *
+ * 1) Previously the DDK allowed per core counters.
+ *
+ * This did not make much sense on Mali-450 with the "virtual PP core" concept,
+ * so this option was removed, and only the same pair of HW counters was allowed on all cores,
+ * beginning with r3p2 release.
+ *
+ * Starting with r4p0, it is now possible to set different HW counters for the different sub jobs.
+ * This should be almost the same, since sub job 0 is designed to run on core 0,
+ * sub job 1 on core 1, and so on.
+ *
+ * The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2
+ * sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase
+ * the predictability of the returned data (as you would be guaranteed data for all the selected HW counters).
+ *
+ * PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions).
+ *
+ * The framework/#defines with Gator still indicates that the counter is for a particular core,
+ * but this is internally used as a sub job ID instead (no translation needed).
+ *
+ * 2) Global/default vs per sub job counters
+ *
+ * Releases before r3p2 had only per PP core counters.
+ * r3p2 releases had only one set of default/global counters which applied to all PP cores
+ * Starting with r4p0, we have both a set of default/global counters,
+ * and individual counters per sub job (equal to per core).
+ *
+ * To keep compatibility with Gator/DS-5/streamline, the following scheme is used:
+ *
+ * r3p2 release; only counters set for core 0 is handled,
+ * this is applied as the default/global set of counters, and will thus affect all cores.
+ *
+ * r4p0 release; counters set for core 0 is applied as both the global/default set of counters,
+ * and counters for sub job 0.
+ * Counters set for core 1-7 is only applied for the corresponding sub job.
+ *
+ * This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the
+ * values for core 0, and thus this will be applied to all PP sub jobs/cores.
+ * Advanced mode will also be supported, where individual pairs of HW counters can be selected.
+ *
+ * The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably
+ * something we can live with!
+ *
+ * Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU
+ * automatically distributes the load between whatever number of cores is available at this particular time.
+ * A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible
+ * to use a single pair of HW counters (even if the job ran on multiple PP cores).
+ * In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs.
+ */
+ u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1;
+ u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1;
+ if (0 == counter_src) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src0(event_id);
+ }
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src1(event_id);
+ }
+ }
+ } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
+ u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+ struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+ if (NULL != l2_cache_core) {
+ u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+ if (0 == counter_src) {
+ mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id);
+ } else {
+ mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id);
+ }
+ }
+ } else {
+ return 0; /* Failure, unknown event */
+ }
+
+ return 1; /* success */
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ * the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+ struct mali_l2_cache_core *l2_cache;
+ u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+ u32 i;
+ u32 ret = 0;
+
+ MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+ for (i = 0; i < l2_cores_num; i++) {
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+ if (NULL == l2_cache) {
+ continue;
+ }
+
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache)) {
+ /* It is now safe to access the L2 cache core in order to retrieve the counters */
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
+ } else {
+ /* The core was not available, set the right bit in the mask. */
+ ret |= (1 << i);
+ }
+ mali_l2_cache_unlock_power_state(l2_cache);
+ }
+
+ return ret;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+ switch (action) {
+ case FBDUMP_CONTROL_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+ break;
+ case FBDUMP_CONTROL_RATE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+ break;
+ case SW_COUNTER_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+ break;
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+ break;
+ default:
+ break; /* Ignore unimplemented actions */
+ }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+ return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+ values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+ values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+ values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+ values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+ values->num_of_vp_cores = 1;
+}
+
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_specific.h b/drivers/parrot/gpu/mali400/linux/mali_osk_specific.h
new file mode 100644
index 00000000000000..ac054be61a7066
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_specific.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/hardirq.h>
+
+#include "mali_osk_types.h"
+#include "mali_kernel_linux.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+typedef struct dma_pool *mali_dma_pool;
+
+
+MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
+{
+ return dma_pool_create("mali-dma", &mali_platform_device->dev, size, alignment, boundary);
+}
+
+MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
+{
+ dma_pool_destroy(pool);
+}
+
+MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, u32 *phys_addr)
+{
+ return dma_pool_alloc(pool, GFP_KERNEL, phys_addr);
+}
+
+MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, u32 phys_addr)
+{
+ dma_pool_free(pool, virt_addr, phys_addr);
+}
+
+
+#if MALI_ENABLE_CPU_CYCLES
+/* Reads out the clock cycle performance counter of the current cpu.
+ It is useful for cost-free (2 cycle) measuring of the time spent
+ in a code path. Sample before and after, the diff number of cycles.
+ When the CPU is idle it will not increase this clock counter.
+ It means that the counter is accurate if only spin-locks are used,
+ but mutexes may lead to too low values since the cpu might "idle"
+ waiting for the mutex to become available.
+ The clock source is configured on the CPU during mali module load,
+ but will not give useful output after a CPU has been power cycled.
+ It is therefore important to configure the system to not turn of
+ the cpu cores when using this functionallity.*/
+static inline unsigned int mali_get_cpu_cyclecount(void)
+{
+ unsigned int value;
+ /* Reading the CCNT Register - CPU clock counter */
+ asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+ return value;
+}
+
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64);
+#endif
+
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+ return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void)
+{
+ return in_atomic();
+}
+
+#define _mali_osk_put_user(x, ptr) put_user(x, ptr)
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_time.c b/drivers/parrot/gpu/mali400/linux/mali_osk_time.c
new file mode 100644
index 00000000000000..e7989ed172e78d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after(u32 ticka, u32 tickb)
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks(u32 ms)
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms(u32 ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount(void)
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay(u32 usecs)
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns(void)
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_timers.c b/drivers/parrot/gpu/mali400/linux/mali_osk_timers.c
new file mode 100644
index 00000000000000..4b3799b535ca76
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_timers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct {
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = jiffies + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_wait_queue.c b/drivers/parrot/gpu/mali400/linux/mali_osk_wait_queue.c
new file mode 100644
index 00000000000000..d14b7d5f19c3be
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_wait_queue.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct {
+ wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
+{
+ _mali_osk_wait_queue_t *ret = NULL;
+
+ ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+ if (NULL == ret) {
+ return ret;
+ }
+
+ init_waitqueue_head(&ret->wait_queue);
+ MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+ return ret;
+}
+
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event(queue->wait_queue, condition(data));
+}
+
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
+}
+
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* if queue is empty, don't attempt to wake up its elements */
+ if (!waitqueue_active(&queue->wait_queue)) return;
+
+ MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+ wake_up_all(&queue->wait_queue);
+
+ MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue)
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* Linux requires no explicit termination of wait queues */
+ kfree(queue);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_osk_wq.c b/drivers/parrot/gpu/mali400/linux/mali_osk_wq.c
new file mode 100644
index 00000000000000..64af192e48a44b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_osk_wq.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ mali_bool high_pri;
+ struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+typedef struct _mali_osk_wq_delayed_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ struct delayed_work work;
+} mali_osk_wq_delayed_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *mali_wq_normal = NULL;
+static struct workqueue_struct *mali_wq_high = NULL;
+#endif
+
+static void _mali_osk_wq_work_func(struct work_struct *work);
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL == mali_wq_high);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
+ mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0);
+#else
+ mali_wq_normal = create_workqueue("mali");
+ mali_wq_high = create_workqueue("mali_high_pri");
+#endif
+ if (NULL == mali_wq_normal || NULL == mali_wq_high) {
+ MALI_PRINT_ERROR(("Unable to create Mali workqueues\n"));
+
+ if (mali_wq_normal) destroy_workqueue(mali_wq_normal);
+ if (mali_wq_high) destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* MALI_LICENSE_IS_GPL */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+ flush_workqueue(mali_wq_high);
+ flush_workqueue(mali_wq_normal);
+#else
+ flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL != mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL != mali_wq_high);
+
+ flush_workqueue(mali_wq_normal);
+ destroy_workqueue(mali_wq_normal);
+
+ flush_workqueue(mali_wq_high);
+ destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+#else
+ flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_FALSE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_TRUE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ _mali_osk_wq_flush();
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_normal, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_high, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func(struct work_struct *work)
+{
+ mali_osk_wq_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+
+#if MALI_LICENSE_IS_GPL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+ /* We want highest Dynamic priority of the thread so that the Jobs depending
+ ** on this thread could be scheduled in time. Without this, this thread might
+ ** sometimes need to wait for some threads in user mode to finish its round-robin
+ ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
+ ** of high-priority workqueue in new kernel, this only happens in older kernel.
+ */
+ if (MALI_TRUE == work_object->high_pri) {
+ set_user_nice(current, -19);
+ }
+#endif
+#endif /* MALI_LICENSE_IS_GPL */
+
+ work_object->handler(work_object->data);
+}
+
+static void _mali_osk_wq_delayed_work_func(struct work_struct *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work);
+ work_object->handler(work_object->data);
+}
+
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+
+ INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work_sync(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+
+#if MALI_LICENSE_IS_GPL
+ queue_delayed_work(mali_wq_normal, &work_object->work, delay);
+#else
+ schedule_delayed_work(&work_object->work, delay);
+#endif
+
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_pmu_power_up_down.c b/drivers/parrot/gpu/mali400/linux/mali_pmu_power_up_down.c
new file mode 100644
index 00000000000000..77ac70ebe2f667
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_pmu_power_up_down.c
@@ -0,0 +1,71 @@
+/**
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_pmu.h"
+#include "mali_pp_scheduler.h"
+#include "linux/mali/mali_utgard.h"
+
+/* Mali PMU power up/down APIs */
+
+int mali_pmu_powerup(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ if (NULL == pmu) {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerup);
+
+int mali_pmu_powerdown(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ if (NULL == pmu) {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerdown);
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+ return mali_pp_scheduler_set_perf_level(num_cores, MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/drivers/parrot/gpu/mali400/linux/mali_profiling_events.h b/drivers/parrot/gpu/mali400/linux/mali_profiling_events.h
new file mode 100644
index 00000000000000..e41323c51e6625
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_profiling_events.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_profiling_gator_api.h b/drivers/parrot/gpu/mali400/linux/mali_profiling_gator_api.h
new file mode 100644
index 00000000000000..bb2a37b80425d3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_profiling_gator_api.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.c b/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.c
new file mode 100644
index 00000000000000..83ace8fb9d62a9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry {
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+typedef enum mali_profiling_state {
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_mutex_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry *profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+ int d2, unsigned int d3, unsigned int d4))
+{
+ add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING);
+ if (NULL == lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+ u32 count;
+
+ /* Ensure profiling is stopped */
+ _mali_internal_profiling_stop(&count);
+
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock) {
+ _mali_osk_mutex_term(lock);
+ lock = NULL;
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit)
+{
+ _mali_osk_errcode_t ret;
+ mali_profiling_entry *new_profile_entries;
+
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING == prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if (NULL == new_profile_entries) {
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_mask = 1;
+ while (profile_mask <= *limit) {
+ profile_mask <<= 1;
+ }
+ profile_mask >>= 1;
+
+ *limit = profile_mask;
+
+ profile_mask--; /* turns the power of two into a mask of one less */
+
+ if (MALI_PROFILING_STATE_IDLE != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ profile_entries = new_profile_entries;
+
+ ret = _mali_timestamp_reset();
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ } else {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+ return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ /* If event is "leave API function", add current memory usage to the event
+ * as data point 4. This is used in timeline profiling to indicate how
+ * much memory was used when leaving a function. */
+ if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+ profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+
+ tracepoint_synchronize_unregister();
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+ if (*count > profile_mask) *count = profile_mask;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_mutex_wait(lock);
+ if (MALI_PROFILING_STATE_RETURN == prof_state) {
+ retval = _mali_osk_atomic_read(&profile_insert_index);
+ if (retval > profile_mask) retval = profile_mask;
+ }
+ _mali_osk_mutex_signal(lock);
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
+{
+ u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+ _mali_osk_mutex_wait(lock);
+
+ if (index < profile_mask) {
+ if ((raw_index & ~profile_mask) != 0) {
+ index += raw_index;
+ index &= profile_mask;
+ }
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= raw_index) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+ } else {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RETURN != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.h b/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.h
new file mode 100644
index 00000000000000..db6380df5c3bf2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_profiling_internal.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 *count);
+int _mali_internal_profiling_start(u32 *limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_sync.c b/drivers/parrot/gpu/mali400/linux/mali_sync.c
new file mode 100644
index 00000000000000..4022fee64d377d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_sync.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_sync.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_timeline.h"
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+struct mali_sync_pt {
+ struct sync_pt sync_pt;
+ struct mali_sync_flag *flag;
+};
+
+/**
+ * The sync flag is used to connect sync fences to the Mali Timeline system. Sync fences can be
+ * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled.
+ */
+struct mali_sync_flag {
+ struct sync_timeline *sync_tl; /**< Sync timeline this flag is connected to. */
+ u32 point; /**< Point on timeline. */
+ int status; /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */
+ struct kref refcount; /**< Reference count. */
+};
+
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+ return container_of(pt, struct mali_sync_pt, sync_pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt, *new_mpt;
+ struct sync_pt *new_pt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+ if (NULL == new_pt) return NULL;
+
+ new_mpt = to_mali_sync_pt(new_pt);
+
+ mali_sync_flag_get(mpt->flag);
+ new_mpt->flag = mpt->flag;
+
+ return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+ return mpt->flag->status;
+}
+
+static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+ struct mali_sync_pt *mpta;
+ struct mali_sync_pt *mptb;
+ u32 a, b;
+
+ MALI_DEBUG_ASSERT_POINTER(pta);
+ MALI_DEBUG_ASSERT_POINTER(ptb);
+ mpta = to_mali_sync_pt(pta);
+ mptb = to_mali_sync_pt(ptb);
+
+ MALI_DEBUG_ASSERT_POINTER(mpta->flag);
+ MALI_DEBUG_ASSERT_POINTER(mptb->flag);
+
+ a = mpta->flag->point;
+ b = mptb->flag->point;
+
+ if (a == b) return 0;
+
+ return ((b - a) < (a - b) ? -1 : 1);
+}
+
+static void timeline_free_pt(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ mali_sync_flag_put(mpt->flag);
+}
+
+static void timeline_release(struct sync_timeline *sync_timeline)
+{
+ module_put(THIS_MODULE);
+}
+
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(s);
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ mpt = to_mali_sync_pt(sync_pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ seq_printf(s, "%u", mpt->flag->point);
+ } else {
+ seq_printf(s, "uninitialized");
+ }
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .free_pt = timeline_free_pt,
+ .release_obj = timeline_release,
+ .print_pt = timeline_print_pt,
+};
+
+struct sync_timeline *mali_sync_timeline_create(const char *name)
+{
+ struct sync_timeline *sync_tl;
+
+ sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct sync_timeline), name);
+ if (NULL == sync_tl) return NULL;
+
+ /* Grab a reference on the module to ensure the callbacks are present
+ * as long some timeline exists. The reference is released when the
+ * timeline is freed.
+ * Since this function is called from a ioctl on an open file we know
+ * we already have a reference, so using __module_get is safe. */
+ __module_get(THIS_MODULE);
+
+ return sync_tl;
+}
+
+mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl)
+{
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+ return (sync_tl->ops == &mali_timeline_ops) ? MALI_TRUE : MALI_FALSE;
+}
+
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
+{
+ s32 fd = -1;
+
+ fd = get_unused_fd();
+ if (fd < 0) {
+ sync_fence_put(sync_fence);
+ return -1;
+ }
+ sync_fence_install(sync_fence, fd);
+
+ return fd;
+}
+
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2)
+{
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+ sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2);
+ sync_fence_put(sync_fence1);
+ sync_fence_put(sync_fence2);
+
+ return sync_fence;
+}
+
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl)
+{
+ struct mali_sync_flag *flag;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ flag = mali_sync_flag_create(sync_tl, 0);
+ if (NULL == flag) return NULL;
+
+ sync_fence = mali_sync_flag_create_fence(flag);
+
+ mali_sync_flag_signal(flag, 0);
+ mali_sync_flag_put(flag);
+
+ return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point)
+{
+ struct mali_sync_flag *flag;
+
+ if (NULL == sync_tl) return NULL;
+
+ flag = _mali_osk_calloc(1, sizeof(*flag));
+ if (NULL == flag) return NULL;
+
+ flag->sync_tl = sync_tl;
+ flag->point = point;
+
+ flag->status = 0;
+ kref_init(&flag->refcount);
+
+ return flag;
+}
+
+void mali_sync_flag_get(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_get(&flag->refcount);
+}
+
+/**
+ * Free sync flag.
+ *
+ * @param ref kref object embedded in sync flag that should be freed.
+ */
+static void mali_sync_flag_free(struct kref *ref)
+{
+ struct mali_sync_flag *flag;
+
+ MALI_DEBUG_ASSERT_POINTER(ref);
+ flag = container_of(ref, struct mali_sync_flag, refcount);
+
+ _mali_osk_free(flag);
+}
+
+void mali_sync_flag_put(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_put(&flag->refcount, mali_sync_flag_free);
+}
+
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+
+ MALI_DEBUG_ASSERT(0 == flag->status);
+ flag->status = (0 > error) ? error : 1;
+
+ _mali_osk_write_mem_barrier();
+
+ sync_timeline_signal(flag->sync_tl);
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+ struct sync_pt *pt;
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+ if (NULL == pt) return NULL;
+
+ mali_sync_flag_get(flag);
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->flag = flag;
+
+ return pt;
+}
+
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+ struct sync_pt *sync_pt;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ sync_pt = mali_sync_flag_create_pt(flag);
+ if (NULL == sync_pt) return NULL;
+
+ sync_fence = sync_fence_create("mali_flag_fence", sync_pt);
+ if (NULL == sync_fence) {
+ sync_pt_free(sync_pt);
+ return NULL;
+ }
+
+ return sync_fence;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_sync.h b/drivers/parrot/gpu/mali400/linux/mali_sync.h
new file mode 100644
index 00000000000000..7f92d3b0f5fc01
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_sync.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ * Mali interface for Linux sync objects.
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#if defined(CONFIG_SYNC)
+
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#include <linux/sync.h>
+#else
+#include <sync.h>
+#endif
+
+
+#include "mali_osk.h"
+
+struct mali_sync_flag;
+
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct sync_timeline *mali_sync_timeline_create(const char *name);
+
+/**
+ * Check if sync timeline belongs to Mali.
+ *
+ * @param sync_tl Sync timeline to check.
+ * @return MALI_TRUE if sync timeline belongs to Mali, MALI_FALSE if not.
+ */
+mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl);
+
+/**
+ * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences. Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl);
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point);
+
+/**
+ * Grab sync flag reference.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_get(struct mali_sync_flag *flag);
+
+/**
+ * Release sync flag reference. If this was the last reference, the sync flag will be freed.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_put(struct mali_sync_flag *flag);
+
+/**
+ * Signal sync flag. All sync fences created from this flag will be signaled.
+ *
+ * @param flag Sync flag to signal.
+ * @param error Negative error code, or 0 if no error.
+ */
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_uk_types.h b/drivers/parrot/gpu/mali400/linux/mali_uk_types.h
new file mode 100644
index 00000000000000..c7e2ef58f38162
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_uk_types.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_core.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_core.c
new file mode 100644
index 00000000000000..112cea2ba55003
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_core.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ } else {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type)) {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+ _mali_uk_get_user_settings_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_user_settings(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+ return 0;
+}
+
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs)
+{
+ _mali_uk_request_high_priority_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_request_high_priority(&kargs);
+
+ kargs.ctx = NULL;
+
+ return map_errcode(err);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_gp.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_gp.c
new file mode 100644
index 00000000000000..573cabdda2891f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_gp.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_mem.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_mem.c
new file mode 100644
index 00000000000000..623e5aaaa2d452
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_mem.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs)
+{
+ _mali_uk_mem_write_safe_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ /* Check if we can access the buffers */
+ if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+ || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+ return -EINVAL;
+ }
+
+ /* Check if size wraps */
+ if ((kargs.size + kargs.dest) <= kargs.dest
+ || (kargs.size + kargs.src) <= kargs.src) {
+ return -EINVAL;
+ }
+
+ err = _mali_ukk_mem_write_safe(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.size, &uargs->size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user *argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL(argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s))) {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem(&uk_args);
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie)) {
+ if (_MALI_OSK_ERR_OK == err_code) {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem(&uk_args_unmap);
+ if (_MALI_OSK_ERR_OK != err_code) {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user *argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL(argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s))) {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem(&uk_args);
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user *argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL(argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s))) {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem(&uk_args);
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user *argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL(argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s))) {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem(&uk_args);
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie)) {
+ if (_MALI_OSK_ERR_OK == err_code) {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem(&uk_args_unmap);
+ if (_MALI_OSK_ERR_OK != err_code) {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* CONFIG_MALI400_UMP */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if (0 != get_user(kargs.size, &uargs->size)) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ MALI_CHECK(kargs.size > 0, -ENOMEM);
+ kargs.buffer = _mali_osk_valloc(kargs.size);
+ if (NULL == kargs.buffer) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size)) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+ return rc;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_pp.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_pp.c
new file mode 100644
index 00000000000000..9ed9acff97135b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_pp.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the jobs were started successfully, 0 is returned. If there was an error, but the
+ * jobs were started, we return -ENOENT. For anything else returned, the jobs were not
+ * started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_and_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+ _mali_uk_pp_disable_wb_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_job_disable_wb(&kargs);
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_profiling.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_profiling.c
new file mode 100644
index 00000000000000..e9180322a18026
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_profiling.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index)) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+ _mali_uk_profiling_memory_usage_get_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_memory_usage_get(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+ _mali_uk_sw_counters_report_s kargs;
+ _mali_osk_errcode_t err;
+ u32 *counter_buffer;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
+ return -EFAULT;
+ }
+
+ /* make sure that kargs.num_counters is [at least somewhat] sane */
+ if (kargs.num_counters > 10000) {
+ MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+ return -EINVAL;
+ }
+
+ counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+ if (NULL == counter_buffer) {
+ return -ENOMEM;
+ }
+
+ if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters)) {
+ kfree(counter_buffer);
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ kargs.counters = counter_buffer;
+
+ err = _mali_ukk_sw_counters_report(&kargs);
+
+ kfree(counter_buffer);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_soft_job.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_soft_job.c
new file mode 100644
index 00000000000000..c5cb848694bf58
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_soft_job.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_soft_job.h"
+#include "mali_timeline.h"
+
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
+{
+ u32 type, user_job, point;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+ struct mali_soft_job *job = NULL;
+ u32 __user *job_id_ptr = NULL;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session, -EINVAL);
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+
+ if (0 != get_user(type, &uargs->type)) return -EFAULT;
+ if (0 != get_user(user_job, &uargs->user_job)) return -EFAULT;
+ if (0 != get_user(job_id_ptr, &uargs->job_id_ptr)) return -EFAULT;
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+ if (MALI_SOFT_JOB_TYPE_USER_SIGNALED < type) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
+ return -EINVAL;
+ }
+
+ /* Create soft job. */
+ job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job);
+ if (unlikely(NULL == job)) {
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Write job id back to user space. */
+ if (0 != put_user(job->id, job_id_ptr)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id"));
+ mali_soft_job_destroy(job);
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Start soft job. */
+ point = mali_soft_job_start(job, &fence);
+
+ if (0 != put_user(point, &uargs->point)) {
+ /* Let user space know that something failed after the job was started. */
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs)
+{
+ u32 job_id;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT;
+
+ err = mali_soft_job_system_signal_job(session->soft_job_system, job_id);
+
+ return map_errcode(err);
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_timeline.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_timeline.c
new file mode 100644
index 00000000000000..c8abbe4a109203
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_timeline.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_timeline.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs)
+{
+ u32 val;
+ mali_timeline_id timeline;
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(val, &uargs->timeline)) return -EFAULT;
+
+ if (MALI_UK_TIMELINE_MAX <= val) {
+ return -EINVAL;
+ }
+
+ timeline = (mali_timeline_id)val;
+
+ point = mali_timeline_system_get_latest_point(session->timeline_system, timeline);
+
+ if (0 != put_user(point, &uargs->point)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs)
+{
+ u32 timeout, status;
+ mali_bool ret;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT;
+
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+ ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
+ status = (MALI_TRUE == ret ? 1 : 0);
+
+ if (0 != put_user(status, &uargs->status)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs)
+{
+ s32 sync_fd = -1;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+#if defined(CONFIG_SYNC)
+ sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
+#else
+ sync_fd = -1;
+#endif /* defined(CONFIG_SYNC) */
+
+ if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_vsync.c b/drivers/parrot/gpu/mali400/linux/mali_ukk_vsync.c
new file mode 100644
index 00000000000000..bf5f357726128c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_vsync.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/parrot/gpu/mali400/linux/mali_ukk_wrappers.h b/drivers/parrot/gpu/mali400/linux/mali_ukk_wrappers.h
new file mode 100644
index 00000000000000..793393c63e282c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/linux/mali_ukk_wrappers.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user *argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user *argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user *argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user *argument);
+#endif
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode(_mali_osk_errcode_t err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/mali400/platform/arm/arm.c b/drivers/parrot/gpu/mali400/platform/arm/arm.c
new file mode 100644
index 00000000000000..617ca6eaa772f5
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/platform/arm/arm.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for:
+ * - Realview Versatile platforms with ARM11 Mpcore and virtex 5.
+ * - Versatile Express platforms with ARM Cortex-A9 and virtex 6.
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <asm/io.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+
+#include "arm_core_scaling.h"
+#include "mali_pp_scheduler.h"
+
+static void mali_platform_device_release(struct device *device);
+static u32 mali_read_phys(u32 phys_addr);
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value);
+#endif
+
+static int mali_core_scaling_enable = 1;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+static struct resource mali_gpu_resources_m450_mp8[] = {
+ MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+static struct resource mali_gpu_resources_m300[] = {
+ MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp1[] = {
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp2[] = {
+ MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
+};
+
+#endif
+
+static struct mali_gpu_device_data mali_gpu_data = {
+#if defined(CONFIG_ARCH_VEXPRESS)
+ .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+#elif defined(CONFIG_ARCH_REALVIEW)
+ .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
+ .dedicated_mem_size = 0x10000000, /* 256MB */
+#endif
+ .fb_start = 0xe0000000,
+ .fb_size = 0x01000000,
+ .max_job_runtime = 60000, /* 60 seconds */
+ .utilization_interval = 1000, /* 1000ms */
+ .utilization_callback = mali_gpu_utilization_callback,
+ .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+ .pmu_domain_config = {0x1, 0x2, 0x4, 0x4, 0x4, 0x8, 0x8, 0x8, 0x8, 0x1, 0x2, 0x8},
+};
+
+static struct platform_device mali_gpu_device = {
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .dev.release = mali_platform_device_release,
+ .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+
+ .dev.platform_data = &mali_gpu_data,
+};
+
+int mali_platform_device_register(void)
+{
+ int err = -1;
+ int num_pp_cores = 0;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+ if (mali_read_phys(0xFC020000) == 0x00010100) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+ }
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m300);
+ mali_gpu_device.resource = mali_gpu_resources_m300;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp1);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp2);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+
+#endif
+ /* Register the platform device */
+ err = platform_device_register(&mali_gpu_device);
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+ pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+
+ return 0;
+ }
+
+ return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+ mali_core_scaling_term();
+ platform_device_unregister(&mali_gpu_device);
+
+ platform_device_put(&mali_gpu_device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+static u32 mali_read_phys(u32 phys_addr)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ u32 ret = 0xDEADBEEF;
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+}
+#endif
+
+static int param_set_core_scaling(const char *val, const struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_sync(mali_pp_scheduler_get_num_cores_enabled());
+ }
+ return ret;
+}
+
+static struct kernel_param_ops param_ops_core_scaling = {
+ .set = param_set_core_scaling,
+ .get = param_get_int,
+};
+
+module_param_cb(mali_core_scaling_enable, &param_ops_core_scaling, &mali_core_scaling_enable, 0644);
+MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy");
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+{
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_update(data);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.c b/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.c
new file mode 100644
index 00000000000000..f846163a7604c2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.c
+ * Example core scaling policy.
+ */
+
+#include "arm_core_scaling.h"
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/workqueue.h>
+
+static int num_cores_total;
+static int num_cores_enabled;
+
+static struct work_struct wq_work;
+
+static void set_num_cores(struct work_struct *work)
+{
+ int err = mali_perf_set_num_pp_cores(num_cores_enabled);
+ MALI_DEBUG_ASSERT(0 == err);
+ MALI_IGNORE(err);
+}
+
+static void enable_one_core(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ ++num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void disable_one_core(void)
+{
+ if (1 < num_cores_enabled) {
+ --num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void enable_max_num_cores(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
+ }
+
+ MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+}
+
+void mali_core_scaling_init(int num_pp_cores)
+{
+ INIT_WORK(&wq_work, set_num_cores);
+
+ num_cores_total = num_pp_cores;
+ num_cores_enabled = num_pp_cores;
+
+ /* NOTE: Mali is not fully initialized at this point. */
+}
+
+void mali_core_scaling_sync(int num_cores)
+{
+ num_cores_enabled = num_cores;
+}
+
+void mali_core_scaling_term(void)
+{
+ flush_scheduled_work();
+}
+
+#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
+
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
+{
+ /*
+ * This function implements a very trivial PP core scaling algorithm.
+ *
+ * It is _NOT_ of production quality.
+ * The only intention behind this algorithm is to exercise and test the
+ * core scaling functionality of the driver.
+ * It is _NOT_ tuned for neither power saving nor performance!
+ *
+ * Other metrics than PP utilization need to be considered as well
+ * in order to make a good core scaling algorithm.
+ */
+
+ MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total));
+
+ /* NOTE: this function is normally called directly from the utilization callback which is in
+ * timer context. */
+
+ if (PERCENT_OF(90, 256) < data->utilization_pp) {
+ enable_max_num_cores();
+ } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
+ enable_one_core();
+ } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
+ /* do nothing */
+ } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
+ disable_one_core();
+ } else {
+ /* do nothing */
+ }
+}
diff --git a/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.h b/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.h
new file mode 100644
index 00000000000000..9e984b840f7714
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/platform/arm/arm_core_scaling.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.h
+ * Example core scaling policy.
+ */
+
+#ifndef __ARM_CORE_SCALING_H__
+#define __ARM_CORE_SCALING_H__
+
+struct mali_gpu_utilization_data;
+
+/**
+ * Initialize core scaling policy.
+ *
+ * @note The core scaling policy will assume that all PP cores are on initially.
+ *
+ * @param num_pp_cores Total number of PP cores.
+ */
+void mali_core_scaling_init(int num_pp_cores);
+
+/**
+ * Terminate core scaling policy.
+ */
+void mali_core_scaling_term(void);
+
+/**
+ * Update core scaling policy with new utilization data.
+ *
+ * @param data Utilization data.
+ */
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data);
+
+void mali_core_scaling_sync(int num_cores);
+
+#endif /* __ARM_CORE_SCALING_H__ */
diff --git a/drivers/parrot/gpu/mali400/readme.txt b/drivers/parrot/gpu/mali400/readme.txt
new file mode 100755
index 00000000000000..26095066bb42b4
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/readme.txt
@@ -0,0 +1,24 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
diff --git a/drivers/parrot/gpu/mali400/regs/mali_200_regs.h b/drivers/parrot/gpu/mali400/regs/mali_200_regs.h
new file mode 100644
index 00000000000000..22806db1dd18f4
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/regs/mali_200_regs.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg {
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_ADDR_MGMT_PERFMON_CONTR = 0x10b0,
+ MALI200_REG_ADDR_MGMT_PERFMON_BASE = 0x10b4,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1 << 0),
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1 << 3),
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1 << 5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1 << 6),
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1 << 7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1 << 0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1 << 1),
+ MALI200_REG_VAL_IRQ_HANG = (1 << 2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1 << 3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1 << 4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1 << 5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1 << 6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1 << 7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1 << 8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1 << 10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1 << 11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1 << 12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1 << 0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1 << 4),
+};
+
+enum mali200_render_unit {
+ MALI200_REG_ADDR_FRAME = 0x0000,
+ MALI200_REG_ADDR_RSW = 0x0004,
+ MALI200_REG_ADDR_STACK = 0x0030,
+ MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+ MALI200_REG_ADDR_ORIGIN_OFFSET_X = 0x0040
+};
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+ MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+ MALI200_REG_ADDR_WB_SOURCE_ADDR = 0x0004,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/parrot/gpu/mali400/regs/mali_gp_regs.h b/drivers/parrot/gpu/mali400/regs/mali_gp_regs.h
new file mode 100644
index 00000000000000..7c9b5c80872fb0
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/regs/mali_gp_regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum {
+ MALIGP2_REG_VAL_CMD_START_VS = (1 << 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1 << 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1 << 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1 << 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1 << 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1 << 9),
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1 << 10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.c b/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 00000000000000..d1c3e996839749
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.h b/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 00000000000000..f0bd599f7c8d28
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__("MCR p15, 0, %0, c15, c12, 0" : : "r"(mask));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__("MRC p15, 0, %0, c15, c12, 1" : "=r"(result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.c b/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.c
new file mode 100644
index 00000000000000..d1c3e996839749
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.h b/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.h
new file mode 100644
index 00000000000000..268ede5f2c7b61
--- /dev/null
+++ b/drivers/parrot/gpu/mali400/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/Kbuild b/drivers/parrot/gpu/mali400_legacy/Kbuild
new file mode 100755
index 00000000000000..060639c9e2912f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/Kbuild
@@ -0,0 +1,199 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 1
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+
+# Set path to driver source
+DRIVER_DIR=$(KBUILD_SRC)/$(src)
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary
+ ifeq ($(CONFIG_MALI400_PROFILING),y)
+ $(error Profiling is incompatible with non-GPL license)
+ endif
+ ifeq ($(CONFIG_PM_RUNTIME),y)
+ $(error Runtime PM is incompatible with non-GPL license)
+ endif
+ ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
+ $(error DMA-BUF is incompatible with non-GPL license)
+ endif
+ $(error Linux Device integration is incompatible with non-GPL license)
+else
+ ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+
+mali-y += \
+ linux/mali_osk_atomics.o \
+ linux/mali_osk_irq.o \
+ linux/mali_osk_wq.o \
+ linux/mali_osk_locks.o \
+ linux/mali_osk_wait_queue.o \
+ linux/mali_osk_low_level_mem.o \
+ linux/mali_osk_math.o \
+ linux/mali_osk_memory.o \
+ linux/mali_osk_misc.o \
+ linux/mali_osk_mali.o \
+ linux/mali_osk_notification.o \
+ linux/mali_osk_time.o \
+ linux/mali_osk_timers.o
+
+mali-y += \
+ linux/mali_ukk_mem.o \
+ linux/mali_ukk_gp.o \
+ linux/mali_ukk_pp.o \
+ linux/mali_ukk_core.o
+
+# Source files which always are included in a build
+mali-y += \
+ common/mali_kernel_core.o \
+ linux/mali_kernel_linux.o \
+ common/mali_kernel_descriptor_mapping.o \
+ common/mali_session.o \
+ linux/mali_device_pause_resume.o \
+ common/mali_kernel_vsync.o \
+ linux/mali_ukk_vsync.o \
+ linux/mali_kernel_sysfs.o \
+ common/mali_mmu.o \
+ common/mali_mmu_page_directory.o \
+ common/mali_memory.o \
+ common/mali_kernel_memory_engine.o \
+ common/mali_block_allocator.o \
+ common/mali_kernel_mem_os.o \
+ common/mali_mem_validation.o \
+ common/mali_hw_core.o \
+ common/mali_gp.o \
+ common/mali_pp.o \
+ common/mali_pp_job.o \
+ common/mali_gp_job.o \
+ common/mali_scheduler.o \
+ common/mali_gp_scheduler.o \
+ common/mali_pp_scheduler.o \
+ common/mali_group.o \
+ common/mali_dlbu.o \
+ common/mali_broadcast.o \
+ common/mali_pm.o \
+ common/mali_pmu.o \
+ common/mali_user_settings_db.o \
+ common/mali_kernel_utilization.o \
+ common/mali_l2_cache.o \
+ common/mali_pm_domain.o \
+ linux/mali_osk_pm.o \
+ linux/mali_pmu_power_up_down.o \
+ __malidrv_build_info.o
+
+ifneq ($(MALI_PLATFORM_FILES),)
+ mali-y += $(MALI_PLATFORM_FILES:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_dma_buf.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o linux/mali_sync_user.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DPROFILING_SKIP_PP_JOBS=$(PROFILING_SKIP_PP_JOBS)
+ccflags-y += -DPROFILING_SKIP_PP_AND_GP_JOBS=$(PROFILING_SKIP_PP_AND_GP_JOBS)
+
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+ ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+ccflags-$(CONFIG_MALI400_UMP) += -I$(DRIVER_DIR)/../ump/include/ump
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(DRIVER_DIR); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(DRIVER_DIR); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(DRIVER_DIR); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(DRIVER_DIR); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(DRIVER_DIR)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(DRIVER_DIR)/__malidrv_build_info.c
diff --git a/drivers/parrot/gpu/mali400_legacy/Kconfig b/drivers/parrot/gpu/mali400_legacy/Kconfig
new file mode 100755
index 00000000000000..a835c6ac61ef37
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/Kconfig
@@ -0,0 +1,57 @@
+config MALI400
+ tristate "Mali-300/400/450 support"
+ depends on ARM
+ select DMA_SHARED_BUFFER
+ ---help---
+ This enables support for the ARM Mali-300, Mali-400, and Mali-450
+ GPUs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mali.
+
+config MALI400_DEBUG
+ bool "Enable debug in Mali driver"
+ depends on MALI400
+ ---help---
+ This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+ bool "Enable Mali profiling"
+ depends on MALI400
+ select TRACEPOINTS
+ default y
+ ---help---
+ This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+ bool "Enable internal Mali profiling API"
+ depends on MALI400_PROFILING
+ default n
+ ---help---
+ This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+ bool "Enable UMP support"
+ depends on MALI400
+ ---help---
+ This enables support for the UMP memory sharing API in the Mali driver.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+ bool "Map dma-buf attachments on attach"
+ depends on MALI400 && DMA_SHARED_BUFFER
+ default y
+ ---help---
+ This makes the Mali driver map dma-buf attachments after doing
+ attach. If this is not set the dma-buf attachments will be mapped for
+ every time the GPU need to access the buffer.
+
+ Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+ bool "Support for shared interrupts"
+ depends on MALI400
+ default n
+ ---help---
+ Adds functionality required to properly support shared interrupts. Without this support,
+ the device driver will fail during insmod if it detects shared interrupts. Works even if
+ the GPU is not using shared interrupts, but can cause lower performance.
diff --git a/drivers/parrot/gpu/mali400_legacy/Makefile b/drivers/parrot/gpu/mali400_legacy/Makefile
new file mode 100755
index 00000000000000..08440a9798238f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/Makefile
@@ -0,0 +1,138 @@
+#
+# Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS reqired for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifneq ($(BUILD),release)
+export CONFIG_MALI400_DEBUG=y
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/parrot/gpu/mali400_legacy/__malidrv_build_info.c b/drivers/parrot/gpu/mali400_legacy/__malidrv_build_info.c
new file mode 100644
index 00000000000000..0fd88a06a779c6
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/__malidrv_build_info.c
@@ -0,0 +1 @@
+const char *__malidrv_build_info(void) { return "malidrv: API_VERSION=26 REPO_URL=tags/os-linux-3.4.11-5-51-ge0cf911 REVISION=r3p2-01rel4-v2.6.27-193727-ge0cf911 CHANGED_REVISION=v2.6.27-193727-ge0cf911 CHANGE_DATE=2014-02-27 17:42:04 +0100 BUILD_DATE=Sat Mar 15 16:43:11 CET 2014 BUILD=release TARGET_PLATFORM= MALI_PLATFORM= KDIR= OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=16 USING_UMP= USING_PROFILING= USING_INTERNAL_PROFILING= USING_GPU_UTILIZATION=1 MALI_UPPER_HALF_SCHEDULING=1";}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.c b/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.c
new file mode 100755
index 00000000000000..2959874a4e1d47
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->stat = block_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );
+}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ _mali_osk_memset(block->mapping, 0, size);
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecessary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ block_info *block;
+ u32 free_blocks = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ info = (block_allocator*)allocator->ctx;
+ block = info->first_free;
+
+ while(block)
+ {
+ free_blocks++;
+ block = block->next;
+ }
+ return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.h b/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.h
new file mode 100755
index 00000000000000..fe57681cc590b8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.c b/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.c
new file mode 100755
index 00000000000000..4d755a9bf15c9d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static const int bcast_unit_reg_size = 0x1000;
+static const int bcast_unit_addr_broadcast_mask = 0x0;
+static const int bcast_unit_addr_irq_override_mask = 0x4;
+
+struct mali_bcast_unit
+{
+ struct mali_hw_core hw_core;
+ u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_bcast_unit *bcast_unit = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+ MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+
+ bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+ if (NULL == bcast_unit)
+ {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size))
+ {
+ bcast_unit->current_mask = 0;
+ mali_bcast_reset(bcast_unit);
+
+ return bcast_unit;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+ }
+
+ _mali_osk_free(bcast_unit);
+
+ return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ mali_hw_core_delete(&bcast_unit->hw_core);
+ _mali_osk_free(bcast_unit);
+}
+
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+ broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ bcast_unit->current_mask);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_broadcast_mask,
+ 0x0);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ bcast_unit_addr_irq_override_mask,
+ 0x0);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.h b/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.h
new file mode 100755
index 00000000000000..95bc9bb4572a72
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_broadcast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 — (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ * setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+ mali_bcast_reset(bcast_unit);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.c b/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.c
new file mode 100755
index 00000000000000..fe12211628969a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = 0;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum
+{
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr))
+ {
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ mali_mmu_release_table_page(mali_dlbu_phys_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE))
+ {
+ core->pp_cores_mask = 0;
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core))
+ {
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_dlbu_reset(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ u32 dlbu_registers[7];
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+ dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+ dlbu_registers[2] = 0;
+ dlbu_registers[3] = 0;
+ dlbu_registers[4] = 0;
+ dlbu_registers[5] = 0;
+ dlbu_registers[6] = dlbu->pp_cores_mask;
+
+ /* write reset values to core registers */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER( dlbu );
+ MALI_DEBUG_ASSERT_POINTER( group );
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask |= bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER( dlbu );
+ MALI_DEBUG_ASSERT_POINTER( group );
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask &= ~bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+ u32 *registers;
+ MALI_DEBUG_ASSERT(job);
+ registers = mali_pp_job_get_dlbu_registers(job);
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+ /* Writing 4 registers:
+ * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.h b/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.h
new file mode 100755
index 00000000000000..8fbfc35baf03f2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_dlbu.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp.c b/drivers/parrot/gpu/mali400_legacy/common/mali_gp.c
new file mode 100755
index 00000000000000..2bb9c1a7191cc7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+{
+ struct mali_gp_core* core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core)
+ {
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_gp_reset(core);
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ ret = mali_group_add_gp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_gp,
+ group,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ "mali_gp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_gp_core(group);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register))
+ {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ mali_gp_reset_async(core);
+ return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+
+ core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
+ core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ if (mali_gp_job_has_vs_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+#endif
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp.h b/drivers/parrot/gpu/mali400_legacy/common/mali_gp.h
new file mode 100755
index 00000000000000..2b3695ecf61a82
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+
+/*** Accessor functions ***/
+MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+{
+ /* Enable all interrupts, except those specified in irq_exceptions */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.c b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.c
new file mode 100755
index 00000000000000..fd67b25d3199ac
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id)
+{
+ struct mali_gp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+ if (NULL != job)
+ {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL == job->finished_notification)
+ {
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+ if (NULL == job->oom_notification)
+ {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ if (0 != copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ _mali_osk_notification_delete(job->finished_notification);
+ _mali_osk_notification_delete(job->oom_notification);
+ _mali_osk_free(job);
+ return NULL;
+ }
+
+ perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)))
+ {
+ mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+ mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->heap_current_addr = job->uargs.frame_registers[4];
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+
+ /* de-allocate the pre-allocated oom notifications */
+ if (NULL != job->oom_notification)
+ {
+ _mali_osk_notification_delete(job->oom_notification);
+ job->oom_notification = NULL;
+ }
+ if (NULL != job->finished_notification)
+ {
+ _mali_osk_notification_delete(job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+ _mali_osk_free(job);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+ return gp_counter_src0;
+}
+
+mali_bool mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+ gp_counter_src0 = counter;
+
+ return MALI_TRUE;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+ return gp_counter_src1;
+}
+
+mali_bool mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+ gp_counter_src1 = counter;
+
+ return MALI_TRUE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.h b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.h
new file mode 100755
index 00000000000000..f241812b2be92d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_job.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+
+/**
+ * The structure represents a GP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ u32 id; /**< identifier for this job in kernel space (sequential numbering) */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+mali_bool mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+mali_bool mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value1 = value;
+}
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.c b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.c
new file mode 100755
index 00000000000000..ce405abccf93f7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.c
@@ -0,0 +1,593 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_kernel_utilization.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+enum mali_gp_slot_state
+{
+ MALI_GP_SLOT_STATE_IDLE,
+ MALI_GP_SLOT_STATE_WORKING,
+ MALI_GP_SLOT_STATE_DISABLED,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot
+{
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_gp_slot_state state;
+ u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void);
+static void mali_gp_scheduler_job_queued(void);
+static void mali_gp_scheduler_job_completed(void);
+
+static _mali_osk_lock_t *gp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+ u32 num_groups;
+ u32 i;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+ gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == gp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == gp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(gp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Find all the available GP cores */
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++)
+ {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ if (0 == gp_version)
+ {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+ slot.group = group;
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ break; /* There is only one GP, no point in looking for more */
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+ MALI_DEBUG_ASSERT( MALI_GP_SLOT_STATE_IDLE == slot.state
+ || MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_ASSERT_POINTER(slot.group);
+ mali_group_delete(slot.group);
+
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(gp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+ _mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+}
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED()
+#endif
+
+static void mali_gp_scheduler_schedule(void)
+{
+ struct mali_gp_job *job;
+
+ mali_gp_scheduler_lock();
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+ mali_gp_scheduler_unlock();
+ return; /* Nothing to do, so early out */
+ }
+
+ /* Get (and remove) next job in queue */
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+ _mali_osk_list_del(&job->list);
+
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ mali_gp_scheduler_unlock();
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+ mali_group_lock(slot.group);
+ mali_group_start_gp_job(slot.group, job);
+ mali_group_unlock(slot.group);
+}
+
+static void mali_gp_scheduler_schedule_on_group(struct mali_group *group)
+{
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+ {
+ mali_gp_scheduler_unlock();
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(), 0, 0, 0);
+#endif
+ return; /* Nothing to do, so early out */
+ }
+
+ /* Get (and remove) next job in queue */
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+ _mali_osk_list_del(&job->list);
+
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ mali_gp_scheduler_unlock();
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+
+ mali_group_start_gp_job(slot.group, job);
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+
+ mali_gp_job_delete(job);
+}
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+ if (!success)
+ {
+ if (job->session->is_compositor)
+ {
+ mali_pp_scheduler_blocked_on_compositor = MALI_FALSE;
+ mali_pp_scheduler_schedule();
+ }
+ }
+
+ mali_gp_scheduler_return_job_to_user(job, success);
+
+ mali_gp_scheduler_lock();
+
+ /* Mark slot as idle again */
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+ }
+
+ mali_gp_scheduler_schedule_on_group(group);
+
+ /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+ mali_gp_scheduler_job_completed();
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s * jobres;
+ _mali_osk_notification_t * notification;
+
+ mali_gp_scheduler_lock();
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+ slot.returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = mali_gp_job_get_id(job);
+
+ mali_gp_scheduler_unlock();
+
+ jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notification);
+
+ /*
+ * If this function failed, then we could return the job to user space right away,
+ * but there is a job timer anyway that will do that eventually.
+ * This is not exactly a common case anyway.
+ */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_gp_scheduler_unlock();
+
+ _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_gp_scheduler_unlock();
+ if (0 == pause_count)
+ {
+ mali_gp_scheduler_schedule();
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data*)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if PROFILING_SKIP_PP_AND_GP_JOBS
+#warning GP jobs will not be executed
+ mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
+ return _MALI_OSK_ERR_OK;
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP");
+#endif
+
+ mali_gp_scheduler_job_queued();
+
+ mali_gp_scheduler_lock();
+
+ /* Put compositor's jobs on front of queue, and block non-compositor PP
+ * jobs from starting. */
+ if(job->session->is_compositor)
+ {
+ mali_pp_scheduler_blocked_on_compositor = MALI_TRUE;
+ _mali_osk_list_add(&job->list, &job_queue);
+ }
+ else
+ {
+ _mali_osk_list_addtail(&job->list, &job_queue);
+ }
+
+ mali_gp_scheduler_unlock();
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued. %s\n",
+ mali_gp_job_get_id(job), job, job->session->is_compositor? "Compositor": "Normal"));
+
+ mali_gp_scheduler_schedule();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *resumed_job;
+ _mali_osk_notification_t *new_notification = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data*)args->ctx;
+ if (NULL == session)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
+ {
+ new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL == new_notification)
+ {
+ MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
+ mali_group_lock(slot.group);
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ mali_group_lock(slot.group);
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
+ {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+
+ resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+ if (NULL != resumed_job)
+ {
+ resumed_job->oom_notification = new_notification;
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ mali_group_unlock(slot.group);
+ _mali_osk_notification_delete(new_notification);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ mali_group_unlock(slot.group);
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *job, *tmp;
+
+ mali_gp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ /* Check queue for jobs and remove */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list)
+ {
+ if (mali_gp_job_get_session(job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job));
+ _mali_osk_list_del(&(job->list));
+ mali_gp_job_delete(job);
+
+ mali_gp_scheduler_job_completed();
+ }
+ }
+
+ mali_gp_scheduler_unlock();
+
+ mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_gp_scheduler_lock();
+ ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE;
+ mali_gp_scheduler_unlock();
+
+ return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+
+ n += mali_group_dump_state(slot.group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+void mali_gp_scheduler_reset_all_groups(void)
+{
+ if (NULL != slot.group)
+ {
+ mali_group_lock(slot.group);
+ mali_group_reset(slot.group);
+ mali_group_unlock(slot.group);
+ }
+}
+
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ if (NULL != slot.group)
+ {
+ mali_group_zap_session(slot.group, session);
+ }
+}
+
+void mali_gp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group));
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state)
+ {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group));
+ return;
+ }
+
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ /* Pick up any jobs that might have been queued while the GP group was disabled. */
+ mali_gp_scheduler_schedule();
+}
+
+void mali_gp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(slot.group == group);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group));
+
+ mali_gp_scheduler_suspend();
+ mali_group_lock(group);
+ mali_gp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state)
+ {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group));
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
+ slot.state = MALI_GP_SLOT_STATE_DISABLED;
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group);
+ }
+
+ mali_gp_scheduler_unlock();
+ mali_group_unlock(group);
+ mali_gp_scheduler_resume();
+}
+
+static void mali_gp_scheduler_job_queued(void)
+{
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ if (mali_utilization_enabled())
+ {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a GP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+}
+
+static void mali_gp_scheduler_job_completed(void)
+{
+ /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled())
+ {
+ mali_utilization_gp_end();
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.h b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.h
new file mode 100755
index 00000000000000..ca477c5fbc4d7c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_gp_scheduler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the GP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ */
+void mali_gp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
+
+void mali_gp_scheduler_enable_group(struct mali_group *group);
+void mali_gp_scheduler_disable_group(struct mali_group *group);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_group.c b/drivers/parrot/gpu/mali400_legacy/common/mali_group.c
new file mode 100755
index 00000000000000..68d0b7df78ba60
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_group.c
@@ -0,0 +1,2046 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_core.h"
+#include "mali_osk_profiling.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+enum mali_group_activate_pd_status
+{
+ MALI_GROUP_ACTIVATE_PD_STATUS_FAILED,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD,
+};
+
+/* local helper functions */
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_mmu_page_fault(struct mali_group *group);
+
+static void mali_group_post_process_job_pp(struct mali_group *group);
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
+
+void mali_group_lock(struct mali_group *group)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(group->lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+ _mali_osk_lock_signal(group->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+{
+ struct mali_group *group = NULL;
+ _mali_osk_lock_flags_t lock_flags;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS)
+ {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_calloc(1, sizeof(struct mali_group));
+ if (NULL != group)
+ {
+ group->timeout_timer = _mali_osk_timer_init();
+
+ if (NULL != group->timeout_timer)
+ {
+ _mali_osk_lock_order_t order;
+ _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+ if (NULL != dlbu)
+ {
+ order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
+ }
+ else
+ {
+ order = _MALI_OSK_LOCK_ORDER_GROUP;
+ }
+
+ group->lock = _mali_osk_lock_init(lock_flags, 0, order);
+ if (NULL != group->lock)
+ {
+ group->l2_cache_core[0] = core;
+ group->session = NULL;
+ group->page_dir_ref_count = 0;
+ group->power_is_on = MALI_TRUE;
+ group->state = MALI_GROUP_STATE_IDLE;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->pp_scheduler_list);
+ group->parent_group = NULL;
+ group->l2_cache_core_ref_count[0] = 0;
+ group->l2_cache_core_ref_count[1] = 0;
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
+
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
+{
+ /* This group object now owns the MMU core object */
+ group->mmu= mmu_core;
+ group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+ if (NULL == group->bottom_half_work_mmu)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+ /* This group object no longer owns the MMU core object */
+ group->mmu = NULL;
+ if (NULL != group->bottom_half_work_mmu)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+ group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+ if (NULL == group->bottom_half_work_gp)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the GP core object */
+ group->gp_core = NULL;
+ if (NULL != group->bottom_half_work_gp)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+ group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+ if (NULL == group->bottom_half_work_pp)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the PP core object */
+ group->pp_core = NULL;
+ if (NULL != group->bottom_half_work_pp)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+{
+ group->pm_domain = domain;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core)
+ {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu)
+ {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group))
+ {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core)
+ {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ if (mali_global_groups[i] == group)
+ {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups)
+ {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer)
+ {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp)
+ {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+ _mali_osk_lock_term(group->lock);
+
+ _mali_osk_free(group);
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+ u32 i;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+ i = 0;
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list)
+ {
+ MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+ i++;
+ }
+})
+
+/**
+ * @brief Add child group to virtual group parent
+ *
+ * Before calling this function, child must have it's state set to JOINING_VIRTUAL
+ * to ensure it's not touched during the transition period. When this function returns,
+ * child's state will be IN_VIRTUAL.
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+{
+ mali_bool found;
+ u32 i;
+ struct mali_session_data *child_session;
+
+ MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(NULL == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+
+ _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+ child->state = MALI_GROUP_STATE_IN_VIRTUAL;
+ child->parent_group = parent;
+
+ MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+ MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+ MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+ /* Keep track of the L2 cache cores of child groups */
+ found = MALI_FALSE;
+ for (i = 0; i < 2; i++)
+ {
+ if (parent->l2_cache_core[i] == child->l2_cache_core[0])
+ {
+ MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+ parent->l2_cache_core_ref_count[i]++;
+ found = MALI_TRUE;
+ }
+ }
+
+ if (!found)
+ {
+ /* First time we see this L2 cache, add it to our list */
+ i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+ MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core[i] = child->l2_cache_core[0];
+ parent->l2_cache_core_ref_count[i]++;
+ }
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_add_group(parent->bcast_core, child);
+ mali_dlbu_add_group(parent->dlbu_core, child);
+
+ child_session = child->session;
+ child->session = NULL;
+
+ /* Above this comment, only software state is updated and the HW is not
+ * touched. Now, check if Mali is powered and skip the rest if it isn't
+ * powered.
+ */
+
+ if (!update_hw)
+ {
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+ return;
+ }
+
+ /* Update MMU */
+ MALI_DEBUG_ASSERT(0 == child->page_dir_ref_count);
+ if (parent->session == child_session)
+ {
+ mali_mmu_zap_tlb(child->mmu);
+ }
+ else
+ {
+ if (NULL == parent->session)
+ {
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ }
+ else
+ {
+
+ mali_bool activate_success = mali_mmu_activate_page_directory(child->mmu,
+ mali_session_get_page_directory(parent->session));
+ MALI_DEBUG_ASSERT(activate_success);
+ MALI_IGNORE(activate_success);
+ }
+ }
+
+ /* Update HW only if power is on */
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+
+ /* Start job on child when parent is active */
+ if (NULL != parent->pp_running_job)
+ {
+ struct mali_pp_job *job = parent->pp_running_job;
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
+ mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ *
+ * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
+ * to IDLE before it can be used.
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+ u32 i;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(parent == child->parent_group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
+ /* Removing groups while running is not yet supported. */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+
+ mali_group_lock(child);
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_remove_group(parent->bcast_core, child);
+ mali_dlbu_remove_group(parent->dlbu_core, child);
+
+ /* Update HW only if power is on */
+ if (mali_pm_is_power_on())
+ {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ _mali_osk_list_delinit(&child->group_list);
+
+ child->session = parent->session;
+ child->parent_group = NULL;
+ child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+ /* Keep track of the L2 cache cores of child groups */
+ i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core_ref_count[i]--;
+
+ if (parent->l2_cache_core_ref_count[i] == 0)
+ {
+ parent->l2_cache_core[i] = NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+
+ mali_group_unlock(child);
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+ struct mali_group *child;
+
+ MALI_ASSERT_GROUP_LOCKED(parent);
+
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+
+ mali_group_remove_group(parent, child);
+
+ return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+ /*
+ * This function should not be used to abort jobs,
+ * currently only called during insmod and PM resume
+ */
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+ group->session = NULL;
+
+ if (NULL != group->dlbu_core)
+ {
+ mali_dlbu_reset(group->dlbu_core);
+ }
+
+ if (NULL != group->bcast_core)
+ {
+ mali_bcast_reset(group->bcast_core);
+ }
+
+ if (NULL != group->mmu)
+ {
+ mali_group_reset_mmu(group);
+ }
+
+ if (NULL != group->gp_core)
+ {
+ mali_gp_reset(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_group_reset_pp(group);
+ }
+}
+
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
+{
+ return group->gp_core;
+}
+
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
+{
+ return group->pp_core;
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_gp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0])
+ {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_id(job));
+ }
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+ mali_gp_job_start(group->gp_core, job);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
+ mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+#endif
+ group->gp_running_job = job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ }
+
+ /* Setup the timeout timer value and save the job id for the job running on the gp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+
+ session = mali_pp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0])
+ {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_id(job));
+ }
+
+ if (NULL != group->l2_cache_core[1])
+ {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_id(job));
+ }
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n"));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group))
+ {
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 core_num = 0;
+
+ /* Configure DLBU for the job */
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ mali_pp_write_addr_stack(child->pp_core, job);
+ core_num++;
+ }
+ }
+
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+
+ /* if the group is virtual, loop through physical groups which belong to this group
+ * and call profiling events for its cores as virtual */
+ if (MALI_TRUE == mali_group_is_virtual(group))
+ {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+ }
+#if defined(CONFIG_MALI400_PROFILING)
+ if (0 != group->l2_cache_core_ref_count[0])
+ {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1])
+ {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+ else /* group is physical - call profiling events for physical cores */
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+#endif
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ }
+
+ /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+}
+
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (group->state != MALI_GROUP_STATE_OOM ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ return NULL; /* Illegal request or job has already been aborted */
+ }
+
+ if (NULL != group->l2_cache_core[0])
+ {
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
+ }
+
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+
+ group->state = MALI_GROUP_STATE_WORKING;
+
+ return group->gp_running_job;
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ _mali_osk_errcode_t err;
+
+ if (!mali_group_is_virtual(group))
+ {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ else /* virtual group */
+ {
+ err = mali_mmu_reset(group->mmu);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ return;
+ }
+
+ /* Loop through all members of this virtual group and wait
+ * until they are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ err = mali_mmu_reset(child->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ mali_pp_reset_async(group->pp_core);
+
+ if (!mali_group_is_virtual(group) || NULL == group->pp_running_job)
+ {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ mali_pp_reset_wait(group->pp_core);
+ }
+ else /* virtual group */
+ {
+ /* Loop through all members of this virtual group and wait until they
+ * are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ mali_pp_reset_wait(child->pp_core);
+ }
+ }
+}
+
+static void mali_group_complete_pp(struct mali_group *group, mali_bool success)
+{
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_pp(group);
+
+ if (success)
+ {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_pp_reset_async(group->pp_core);
+ }
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ mali_group_deactivate_page_directory(group, group->session);
+
+ /* Do hard reset if the job failed, or if soft reset fails */
+ if (!success || _MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core))
+ {
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset PP, need to reset entire group\n"));
+
+ mali_group_recovery_reset(group);
+ }
+
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success);
+}
+
+static void mali_group_complete_gp(struct mali_group *group, mali_bool success)
+{
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ mali_gp_reset_async(group->gp_core);
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ mali_group_deactivate_page_directory(group, group->session);
+
+ if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core))
+ {
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset GP, need to reset entire group\n"));
+
+ mali_group_recovery_reset(group);
+ }
+
+ mali_gp_scheduler_job_done(group, gp_job_to_return, success);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_gp(group, MALI_FALSE);
+}
+
+static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (MALI_GROUP_STATE_IDLE == group->state ||
+ mali_pp_job_get_id(group->pp_running_job) != job_id)
+ {
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_pp(group, MALI_FALSE);
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_pp_job *pp_job;
+ u32 gp_job_id = 0;
+ u32 pp_job_id = 0;
+ mali_bool abort_pp = MALI_FALSE;
+ mali_bool abort_gp = MALI_FALSE;
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group))
+ {
+ /* Group is member of a virtual group, don't touch it! */
+ mali_group_unlock(group);
+ return;
+ }
+
+ gp_job = group->gp_running_job;
+ pp_job = group->pp_running_job;
+
+ if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session))
+ {
+ MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+ gp_job_id = mali_gp_job_get_id(gp_job);
+ abort_gp = MALI_TRUE;
+ }
+
+ if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+ pp_job_id = mali_pp_job_get_id(pp_job);
+ abort_pp = MALI_TRUE;
+ }
+
+ if (abort_gp)
+ {
+ mali_group_abort_gp_job(group, gp_job_id);
+ }
+ if (abort_pp)
+ {
+ mali_group_abort_pp_job(group, pp_job_id);
+ }
+
+ mali_group_remove_session_if_unused(group, session);
+
+ mali_group_unlock(group);
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if(mali_global_num_groups > index)
+ {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ enum mali_group_activate_pd_status retval;
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+
+ if (0 != group->page_dir_ref_count)
+ {
+ if (group->session != session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session FAILED: 0x%08x on group 0x%08X. Existing session: 0x%08x\n", session, group, group->session));
+ return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session already activated: 0x%08x on group 0x%08X. New Ref: %d\n", session, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+
+ }
+ }
+ else
+ {
+ /* There might be another session here, but it is ok to overwrite it since group->page_dir_ref_count==0 */
+ if (group->session != session)
+ {
+ mali_bool activate_success;
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X. Ref: %d\n", session, group->session, group, 1+group->page_dir_ref_count));
+
+ activate_success = mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ MALI_DEBUG_ASSERT(activate_success);
+ if ( MALI_FALSE== activate_success ) return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+ group->session = session;
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X. Ref: %d\n", session->page_directory, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+ }
+ }
+
+ group->page_dir_ref_count++;
+ return retval;
+}
+
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(0 < group->page_dir_ref_count);
+ MALI_DEBUG_ASSERT(session == group->session);
+
+ group->page_dir_ref_count--;
+
+ /* As an optimization, the MMU still points to the group->session even if (0 == group->page_dir_ref_count),
+ and we do not call mali_mmu_activate_empty_page_directory(group->mmu); */
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+}
+
+static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (0 == group->page_dir_ref_count)
+ {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
+
+ if (group->session == session)
+ {
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+ }
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ return group->power_is_on;
+}
+
+void mali_group_power_on_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
+
+ group->power_is_on = MALI_TRUE;
+}
+
+void mali_group_power_off_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_IN_VIRTUAL == group->state
+ || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
+
+ /* It is necessary to set group->session = NULL so that the powered off MMU is not written
+ * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
+ * pending bottom_halves does not access powered off cores. */
+
+ group->session = NULL;
+ group->power_is_on = MALI_FALSE;
+}
+
+void mali_group_power_on(void)
+{
+ int i;
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state)
+ {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ }
+ else
+ {
+ mali_group_power_on_group(group);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
+}
+
+void mali_group_power_off(void)
+{
+ int i;
+
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state)
+ {
+ MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
+ }
+ else
+ {
+ mali_group_power_off_group(group);
+ }
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_stop_bus(group->pp_core);
+ }
+ else
+ {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU and clear page fault (if any) */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus, then do a hard reset on them */
+ if (NULL != group->pp_core)
+ {
+ if (mali_group_is_virtual(group))
+ {
+ struct mali_group *child, *temp;
+
+ /* Disable the broadcast unit while we do reset directly on the member cores. */
+ mali_bcast_disable(group->bcast_core);
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ mali_pp_stop_bus_wait(child->pp_core);
+ mali_pp_hard_reset(child->pp_core);
+ }
+
+ mali_bcast_enable(group->bcast_core);
+ }
+ else
+ {
+ mali_pp_stop_bus_wait(group->pp_core);
+ mali_pp_hard_reset(group->pp_core);
+ }
+ }
+ else
+ {
+ mali_gp_stop_bus_wait(group->gp_core);
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ MALI_IGNORE(err);
+
+ group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
+ if (group->gp_core)
+ {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+ }
+ if (group->pp_core)
+ {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+ group->pp_running_job, group->pp_running_sub_job);
+ }
+
+ return n;
+}
+#endif
+
+static void mali_group_mmu_page_fault(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (NULL != group->pp_core)
+ {
+ struct mali_pp_job *pp_job_to_return;
+ u32 pp_sub_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+
+ mali_group_post_process_job_pp(group);
+
+ pp_job_to_return = group->pp_running_job;
+ pp_sub_job_to_return = group->pp_running_sub_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ mali_group_deactivate_page_directory(group, group->session);
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE);
+ }
+ else
+ {
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+
+ mali_group_post_process_job_gp(group, MALI_FALSE);
+
+ gp_job_to_return = group->gp_running_job;
+ group->state = MALI_GROUP_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ mali_group_deactivate_page_directory(group, group->session);
+
+ mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+
+ mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+ }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 int_stat;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain))
+ {
+ goto out;
+ }
+#endif
+
+ /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_get_int_status(mmu);
+ if (0 != int_stat)
+ {
+ struct mali_group *parent = group->parent_group;
+
+ /* page fault or bus error, we thread them both in the same way */
+ mali_mmu_mask_all_interrupts(mmu);
+ if (NULL == parent)
+ {
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+ }
+ else
+ {
+ _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
+ }
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_mmu(void * data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_mmu_core *mmu = group->mmu;
+ u32 rawstat;
+ MALI_DEBUG_CODE(u32 status);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ mali_group_lock(group);
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+
+ if ( MALI_FALSE == mali_group_power_is_on(group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
+ mali_group_unlock(group);
+ return;
+ }
+
+ rawstat = mali_mmu_get_rawstat(mmu);
+ MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
+
+ if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ /* An actual page fault has occurred. */
+ u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
+ MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->hw_core.description));
+ MALI_IGNORE(fault_address);
+
+ mali_group_mmu_page_fault(group);
+ }
+
+ mali_group_unlock(group);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_gp_core *core = group->gp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain))
+ {
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_gp_get_int_stat(core);
+
+ if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_gp_mask_all_interrupts(core);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+
+ mali_group_lock(group);
+
+ if ( MALI_FALSE == mali_group_power_is_on(group) )
+ {
+ MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_gp_read_rawstat(group->gp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+
+ if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
+ {
+ u32 core_status = mali_gp_read_core_status(group->gp_core);
+ if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ mali_group_complete_gp(group, MALI_TRUE);
+ mali_group_unlock(group);
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+ if (0 != irq_errors)
+ {
+ MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
+ group->core_timed_out = MALI_FALSE;
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ mali_group_complete_gp(group, MALI_FALSE);
+ mali_group_unlock(group);
+ return;
+ }
+ else if (group->core_timed_out) /* SW timeout */
+ {
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job)
+ {
+ MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
+ mali_group_complete_gp(group, MALI_FALSE);
+ mali_group_unlock(group);
+ return;
+ }
+ }
+ else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ /* GP wants more memory in order to continue. */
+ MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
+
+ group->state = MALI_GROUP_STATE_OOM;
+ mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+ mali_gp_scheduler_oom(group, group->gp_running_job);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * The only way to get here is if we only got one of two needed END_CMD_LST
+ * interrupts. Enable all but not the complete interrupt that has been
+ * received and continue to run.
+ */
+ mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+{
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL == group->gp_running_job)
+ {
+ /* Nothing to do */
+ return;
+ }
+
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if (suspend)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+ }
+ else
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain))
+ {
+ goto out;
+ }
+#endif
+
+ /*
+ * For Mali-450 there is one particular case we need to watch out for:
+ *
+ * Criteria 1) this function call can be due to a shared interrupt,
+ * and not necessary because this core signaled an interrupt.
+ * Criteria 2) this core is a part of a virtual group, and thus it should
+ * not do any post processing.
+ * Criteria 3) this core has actually indicated that is has completed by
+ * having set raw_stat/int_stat registers to != 0
+ *
+ * If all this criteria is meet, then we could incorrectly start post
+ * processing on the wrong group object (this should only happen on the
+ * parent group)
+ */
+#if !defined(MALI_UPPER_HALF_SCHEDULING)
+ if (mali_group_is_in_virtual(group))
+ {
+ /*
+ * This check is done without the group lock held, which could lead to
+ * a potential race. This is however ok, since we will safely re-check
+ * this with the group lock held at a later stage. This is just an
+ * early out which will strongly benefit shared IRQ systems.
+ */
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ irq_readout = mali_pp_get_int_stat(core);
+ if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_pp_mask_all_interrupts(core);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Currently no support for this interrupt event for the virtual PP core */
+ if (!mali_group_is_virtual(group))
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
+ irq_readout, 0, 0, 0, 0);
+ }
+#endif
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
+
+ mali_group_lock(group);
+
+ /* Check if job is complete without errors, again, after taking the group lock */
+ irq_readout = mali_pp_read_rawstat(core);
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout)
+ {
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+ if (mali_group_is_virtual(group))
+ {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)
+ {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+ }
+
+ if (mali_group_is_in_virtual(group))
+ {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ err = _MALI_OSK_ERR_FAULT;
+ goto out;
+ }
+
+ group->core_timed_out = MALI_FALSE;
+ mali_group_complete_pp(group, MALI_TRUE);
+ /* No need to enable interrupts again, since the core will be reset while completing the job */
+
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ err = _MALI_OSK_ERR_OK;
+ goto out;
+ }
+
+out:
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_pm_domain_unlock_state(group->pm_domain);
+#endif
+
+ return err;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ struct mali_pp_core *core = group->pp_core;
+ u32 irq_readout;
+ u32 irq_errors;
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+
+ mali_group_lock(group);
+
+ if (mali_group_is_in_virtual(group))
+ {
+ /* We're member of a virtual group, so interrupt should be handled by the virtual group */
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ if ( MALI_FALSE == mali_group_power_is_on(group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
+ mali_group_unlock(group);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ irq_readout = mali_pp_read_rawstat(group->pp_core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+
+ /* Check if job is complete without errors */
+ if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout)
+ {
+ if (mali_group_is_virtual(group))
+ {
+ u32 status_readout = mali_pp_read_status(group->pp_core);
+
+ if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)
+ {
+ MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+ group->core_timed_out = MALI_FALSE;
+ mali_group_complete_pp(group, MALI_TRUE);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_OF_FRAME and HANG interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
+ if (0 != irq_errors)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
+ irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+ group->core_timed_out = MALI_FALSE;
+ mali_group_complete_pp(group, MALI_FALSE);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+ else if (group->core_timed_out) /* SW timeout */
+ {
+ group->core_timed_out = MALI_FALSE;
+ if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job)
+ {
+ MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
+ mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
+ mali_group_complete_pp(group, MALI_FALSE);
+ mali_group_unlock(group);
+ }
+ else
+ {
+ mali_group_unlock(group);
+ }
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+ return;
+ }
+
+ /*
+ * We should never get here, re-enable interrupts and continue
+ */
+ if (0 == irq_readout)
+ {
+ MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
+ mali_pp_get_hw_core_desc(group->pp_core)));
+ }
+ mali_pp_enable_interrupts(core);
+ mali_group_unlock(group);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), 0, 0, 0);
+}
+
+static void mali_group_post_process_job_pp(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job)
+ {
+ if (MALI_TRUE == mali_group_is_virtual(group))
+ {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job) << 8),
+ 0, 0);
+ }
+ if (0 != group->l2_cache_core_ref_count[0])
+ {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1])
+ {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ }
+ else
+ {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job) << 8),
+ 0, 0);
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+ }
+}
+
+static void mali_group_timeout(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ group->core_timed_out = MALI_TRUE;
+
+ if (NULL != group->gp_core)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ }
+}
+
+void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ /* Early out - safe even if mutex is not held */
+ if (group->session != session) return;
+
+ mali_group_lock(group);
+
+ mali_group_remove_session_if_unused(group, session);
+
+ if (group->session == session)
+ {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+ if (MALI_TRUE != zap_success)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+ mali_group_mmu_page_fault(group);
+ }
+ }
+
+ mali_group_unlock(group);
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+ u32 source0 = 0;
+ u32 value0 = 0;
+ u32 source1 = 0;
+ u32 value1 = 0;
+ u32 profiling_channel = 0;
+
+ switch(core_num)
+ {
+ case 0: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ case 1: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+ break;
+ case 2: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+ break;
+ default: profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ }
+
+ if (0 == core_num)
+ {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ if (1 == core_num)
+ {
+ if (1 == mali_l2_cache_get_id(group->l2_cache_core[0]))
+ {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1]))
+ {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+ if (2 == core_num)
+ {
+ if (2 == mali_l2_cache_get_id(group->l2_cache_core[0]))
+ {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1]))
+ {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+
+ _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+mali_bool mali_group_is_enabled(struct mali_group *group)
+{
+ mali_bool enabled = MALI_TRUE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+ if (MALI_GROUP_STATE_DISABLED == group->state)
+ {
+ enabled = MALI_FALSE;
+ }
+ mali_group_unlock(group);
+
+ return enabled;
+}
+
+void mali_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group))
+ {
+ mali_pp_scheduler_enable_group(group);
+ }
+ else
+ {
+ mali_gp_scheduler_enable_group(group);
+ }
+}
+
+void mali_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT( NULL != mali_group_get_pp_core(group)
+ || NULL != mali_group_get_gp_core(group));
+
+ if (NULL != mali_group_get_pp_core(group))
+ {
+ mali_pp_scheduler_disable_group(group);
+ }
+ else
+ {
+ mali_gp_scheduler_disable_group(group);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_group.h b/drivers/parrot/gpu/mali400_legacy/common/mali_group.h
new file mode 100755
index 00000000000000..23dfd13a6d3b53
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_group.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/* max runtime [ms] for a core job - used by timeout timers */
+#define MAX_RUNTIME 5000
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+
+enum mali_group_core_state
+{
+ MALI_GROUP_STATE_IDLE,
+ MALI_GROUP_STATE_WORKING,
+ MALI_GROUP_STATE_OOM,
+ MALI_GROUP_STATE_IN_VIRTUAL,
+ MALI_GROUP_STATE_JOINING_VIRTUAL,
+ MALI_GROUP_STATE_LEAVING_VIRTUAL,
+ MALI_GROUP_STATE_DISABLED,
+};
+
+/* Forward declaration from mali_pm_domain.h */
+struct mali_pm_domain;
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group
+{
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+ int page_dir_ref_count;
+
+ mali_bool power_is_on;
+ enum mali_group_core_state state;
+
+ struct mali_gp_core *gp_core;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ struct mali_l2_cache_core *l2_cache_core[2];
+ u32 l2_cache_core_ref_count[2];
+
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+
+ _mali_osk_lock_t *lock;
+
+ _mali_osk_list_t pp_scheduler_list;
+
+ /* List used for virtual groups. For a virtual group, the list represents the
+ * head element. */
+ _mali_osk_list_t group_list;
+
+ struct mali_group *pm_domain_list;
+ struct mali_pm_domain *pm_domain;
+
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
+ _mali_osk_wq_work_t *bottom_half_work_mmu;
+ _mali_osk_wq_work_t *bottom_half_work_gp;
+ _mali_osk_wq_work_t *bottom_half_work_pp;
+
+ _mali_osk_timer_t *timeout_timer;
+ mali_bool core_timed_out;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
+
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+ return (NULL != group->dlbu_core);
+}
+
+/** @brief Check if a group is considered as part of a virtual group
+ *
+ * @note A group is considered to be "part of" a virtual group also during the transition
+ * in to / out of the virtual group.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+ return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
+ MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+void mali_group_zap_session(struct mali_group* group, struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+/** @brief Start fragment of PP job
+ */
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+mali_bool mali_group_power_is_on(struct mali_group *group);
+void mali_group_power_on_group(struct mali_group *group);
+void mali_group_power_off_group(struct mali_group *group);
+void mali_group_power_on(void);
+void mali_group_power_off(void);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+/* MMU-related functions */
+_mali_osk_errcode_t mali_group_upper_half_mmu(void * data);
+
+/* GP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+
+/* PP-related functions */
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+/** @brief Check if group is enabled
+ *
+ * @param group group to check
+ * @return MALI_TRUE if enabled, MALI_FALSE if not
+ */
+mali_bool mali_group_is_enabled(struct mali_group *group);
+
+/** @brief Enable group
+ *
+ * An enabled job is put on the idle scheduler list and can be used to handle jobs. Does nothing if
+ * group is already enabled.
+ *
+ * @param group group to enable
+ */
+void mali_group_enable(struct mali_group *group);
+
+/** @brief Disable group
+ *
+ * A disabled group will no longer be used by the scheduler. If part of a virtual group, the group
+ * will be removed before being disabled. Cores part of a disabled group is safe to power down.
+ *
+ * @param group group to disable
+ */
+void mali_group_disable(struct mali_group *group);
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list))
+ {
+ group->state = MALI_GROUP_STATE_DISABLED;
+ group->session = NULL;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+{
+ mali_bool empty = MALI_FALSE;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+
+ if (_mali_osk_list_empty(&group->group_list))
+ {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ empty = MALI_TRUE;
+ }
+
+ return empty;
+}
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.c b/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.c
new file mode 100755
index 00000000000000..290d457ed7db53
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->description = resource->description;
+ core->size = reg_size;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description))
+ {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers)
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.h b/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.h
new file mode 100755
index 00000000000000..9c0931c6820110
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_hw_core.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core
+{
+ u32 phys_addr; /**< Physical address of the registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char* description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ if(old_val != new_val)
+ {
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+ }
+}
+
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i< nr_of_regs; i++)
+ {
+ mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+ }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32* old_array)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i< nr_of_regs; i++)
+ {
+ if(old_array[i] != write_array[i])
+ {
+ mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+ }
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_common.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_common.h
new file mode 100755
index 00000000000000..805b4b144f921c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_common.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.c
new file mode 100755
index 00000000000000..41f96cca480b70
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.c
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+unsigned int mali_shared_mem_size = 0;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+#define WATCHDOG_MSECS_DEFAULT 4000 /* 4 s */
+
+/* timer related */
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+ global_gpu_base_address = _mali_osk_resource_base_address();
+ if (0 == global_gpu_base_address)
+ {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+ switch (resource_pp->base - global_gpu_base_address)
+ {
+ case 0x08000:
+ case 0x20000: /* fall-through for aliased mapping */
+ return 0x01;
+ case 0x0A000:
+ case 0x22000: /* fall-through for aliased mapping */
+ return 0x02;
+ case 0x0C000:
+ case 0x24000: /* fall-through for aliased mapping */
+ return 0x04;
+ case 0x0E000:
+ case 0x26000: /* fall-through for aliased mapping */
+ return 0x08;
+ case 0x28000:
+ return 0x10;
+ case 0x2A000:
+ return 0x20;
+ case 0x2C000:
+ return 0x40;
+ case 0x2E000:
+ return 0x80;
+ default:
+ return 0;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ /*
+ * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
+ * Look at the version register for the first PP core in order to determine the GPU HW revision.
+ */
+
+ u32 first_pp_offset;
+ _mali_osk_resource_t first_pp_resource;
+
+ /* Find out where the first PP core is located */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL))
+ {
+ /* Mali-300/400/450 */
+ first_pp_offset = 0x8000;
+ }
+ else
+ {
+ /* Mali-200 */
+ first_pp_offset = 0x0000;
+ }
+
+ /* Find the first PP core resource (again) */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource))
+ {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+ if (NULL != group)
+ {
+ struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+ if (NULL != pp_core)
+ {
+ u32 pp_version = mali_pp_core_get_version(pp_core);
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16)
+ {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+ _mali_osk_abort();
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+void mali_resource_count(u32 *pp_count, u32 *l2_count)
+{
+ *pp_count = 0;
+ *l2_count = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL))
+ {
+ ++(*pp_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL))
+ {
+ ++(*pp_count);
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL))
+ {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL))
+ {
+ ++(*l2_count);
+ }
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL))
+ {
+ ++(*l2_count);
+ }
+}
+
+static void mali_delete_groups(void)
+{
+ while (0 < mali_group_get_glob_num_groups())
+ {
+ mali_group_delete(mali_group_get_glob_group(0));
+ }
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+ while (0 < mali_l2_cache_core_get_glob_num_l2_cores())
+ {
+ mali_l2_cache_delete(mali_l2_cache_core_get_glob_l2_core(0));
+ }
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (NULL != resource)
+ {
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+ l2_cache = mali_l2_cache_create(resource);
+ if (NULL == l2_cache)
+ {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return NULL;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+ return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (mali_is_mali400())
+ {
+ _mali_osk_resource_t l2_resource;
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource))
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ l2_cache = mali_create_l2_cache_core(&l2_resource);
+ if (NULL == l2_cache)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ else if (mali_is_mali450())
+ {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t l2_gp_resource;
+ _mali_osk_resource_t l2_pp_grp0_resource;
+ _mali_osk_resource_t l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource))
+ {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+ if (NULL == l2_cache)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Make cluster for first PP core group */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource))
+ {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+ if (NULL == l2_cache)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(MALI_PMU_M450_DOM1, l2_cache);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource))
+ {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+ if (NULL == l2_cache)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_l2(MALI_PMU_M450_DOM3, l2_cache);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the group object */
+ group = mali_group_create(cache, NULL, NULL);
+ if (NULL == group)
+ {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ return NULL;
+ }
+
+ /* Create the MMU object inside group */
+ mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+ if (NULL == mmu)
+ {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+
+ if (NULL != resource_gp)
+ {
+ /* Create the GP core object inside this group */
+ struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+ if (NULL == gp_core)
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ if (NULL != resource_pp)
+ {
+ struct mali_pp_core *pp_core;
+
+ /* Create the PP core object inside this group */
+ pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+ if (NULL == pp_core)
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ /* Reset group */
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+
+ return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+ _mali_osk_resource_t *resource_pp_bcast,
+ _mali_osk_resource_t *resource_dlbu,
+ _mali_osk_resource_t *resource_bcast)
+{
+ struct mali_mmu_core *mmu_pp_bcast_core;
+ struct mali_pp_core *pp_bcast_core;
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+ /* Create the DLBU core object */
+ dlbu_core = mali_dlbu_create(resource_dlbu);
+ if (NULL == dlbu_core)
+ {
+ MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the Broadcast unit core */
+ bcast_core = mali_bcast_unit_create(resource_bcast);
+ if (NULL == bcast_core)
+ {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+ group = mali_group_create(NULL, dlbu_core, bcast_core);
+ if (NULL == group)
+ {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+ mali_bcast_unit_delete(bcast_core);
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the MMU object inside group */
+ mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+ if (NULL == mmu_pp_bcast_core)
+ {
+ MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the PP core object inside this group */
+ pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+ if (NULL == pp_bcast_core)
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ struct mali_group *group;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+
+ _mali_osk_resource_t resource_gp;
+ _mali_osk_resource_t resource_gp_mmu;
+ _mali_osk_resource_t resource_pp[8];
+ _mali_osk_resource_t resource_pp_mmu[8];
+ _mali_osk_resource_t resource_pp_mmu_bcast;
+ _mali_osk_resource_t resource_pp_bcast;
+ _mali_osk_resource_t resource_dlbu;
+ _mali_osk_resource_t resource_bcast;
+ _mali_osk_errcode_t resource_gp_found;
+ _mali_osk_errcode_t resource_gp_mmu_found;
+ _mali_osk_errcode_t resource_pp_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+ _mali_osk_errcode_t resource_pp_bcast_found;
+ _mali_osk_errcode_t resource_dlbu_found;
+ _mali_osk_errcode_t resource_bcast_found;
+
+ if (!(mali_is_mali400() || mali_is_mali450()))
+ {
+ /* No known HW core */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (mali_is_mali450())
+ {
+ /* Mali-450 have separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+
+
+ if (mali_is_mali450())
+ {
+ resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+
+ if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_dlbu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_pp_bcast_found)
+ {
+ /* Missing mandatory core(s) for Mali-450 */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != resource_gp_found ||
+ _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_found[0])
+ {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+ if (NULL == group)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+ if (NULL == group)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ if (mali_is_mali450())
+ {
+ mali_pm_domain_add_group(MALI_PMU_M450_DOM1, group);
+ }
+ else
+ {
+ mali_pm_domain_add_group(MALI_PMU_M400_PP0, group);
+ }
+ mali_inited_pp_cores_group_1++;
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) /* First half of the PP cores belong to first core group */
+ {
+ if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1)
+ {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i])
+ {
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ if (mali_is_mali450())
+ {
+ mali_pm_domain_add_group(MALI_PMU_M450_DOM2, group);
+ }
+ else
+ {
+ mali_pm_domain_add_group(MALI_PMU_M400_PP0 + i, group);
+ }
+ mali_inited_pp_cores_group_1++;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < 8; i++) /* Second half of the PP cores belong to second core group */
+ {
+ if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2)
+ {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i])
+ {
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ if (NULL == group)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_pm_domain_add_group(MALI_PMU_M450_DOM3, group);
+ mali_inited_pp_cores_group_2++;
+ }
+ }
+ }
+
+ if(mali_is_mali450())
+ {
+ _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return err;
+ }
+ }
+
+ mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+ mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+ MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_TRUE == _mali_osk_shared_interrupts())
+ {
+ MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_pm_domains(void)
+{
+ struct mali_pm_domain *domain;
+ u32 number_of_pp_cores = 0;
+ u32 number_of_l2_caches = 0;
+
+ mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+
+ if (mali_is_mali450())
+ {
+ MALI_DEBUG_PRINT(2, ("Creating PM domains for Mali-450 MP%d\n", number_of_pp_cores));
+ switch (number_of_pp_cores)
+ {
+ case 8: /* Fall through */
+ case 6: /* Fall through */
+ domain = mali_pm_domain_create(MALI_PMU_M450_DOM3, MALI_PMU_M450_DOM3_MASK);
+ MALI_CHECK(NULL != domain, _MALI_OSK_ERR_NOMEM);
+ case 4: /* Fall through */
+ case 3: /* Fall through */
+ case 2: /* Fall through */
+ domain = mali_pm_domain_create(MALI_PMU_M450_DOM2, MALI_PMU_M450_DOM2_MASK);
+ MALI_CHECK(NULL != domain, _MALI_OSK_ERR_NOMEM);
+ domain = mali_pm_domain_create(MALI_PMU_M450_DOM1, MALI_PMU_M450_DOM1_MASK);
+ MALI_CHECK(NULL != domain, _MALI_OSK_ERR_NOMEM);
+
+ break;
+ default:
+ MALI_PRINT_ERROR(("Unsupported core configuration\n"));
+ MALI_DEBUG_ASSERT(0);
+ }
+ }
+ else
+ {
+ int i;
+ u32 mask = MALI_PMU_M400_PP0_MASK;
+
+ MALI_DEBUG_PRINT(2, ("Creating PM domains for Mali-400 MP%d\n", number_of_pp_cores));
+
+ MALI_DEBUG_ASSERT(mali_is_mali400());
+
+ for (i = 0; i < number_of_pp_cores; i++)
+ {
+ MALI_CHECK(NULL != mali_pm_domain_create(i, mask), _MALI_OSK_ERR_NOMEM);
+
+ /* Shift mask up, for next core */
+ mask = mask << 1;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_resource_t resource_pmu;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu))
+ {
+ struct mali_pmu_core *pmu;
+ u32 number_of_pp_cores = 0;
+ u32 number_of_l2_caches = 0;
+
+ mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
+
+ pmu = mali_pmu_create(&resource_pmu, number_of_pp_cores, number_of_l2_caches);
+ if (NULL == pmu)
+ {
+ MALI_PRINT_ERROR(("Failed to create PMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ /* It's ok if the PMU doesn't exist */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ _mali_osk_errcode_t ret;
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size)
+ {
+ /* Memory settings are not overridden by module parameters, so use device settings */
+ struct _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+ {
+ /* Use device specific settings (if defined) */
+ mali_dedicated_mem_start = data.dedicated_mem_start;
+ mali_dedicated_mem_size = data.dedicated_mem_size;
+ mali_shared_mem_size = data.shared_mem_size;
+ }
+
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size)
+ {
+ /* No GPU memory specified */
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
+ }
+
+ if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start)
+ {
+ /* Dedicated memory */
+ ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 < mali_shared_mem_size)
+ {
+ /* Shared OS memory */
+ ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 == mali_fb_start && 0 == mali_fb_size)
+ {
+ /* Frame buffer settings are not overridden by module parameters, so use device settings */
+ struct _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+ {
+ /* Use device specific settings (if defined) */
+ mali_fb_start = data.fb_start;
+ mali_fb_size = data.fb_size;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+
+ if (0 != mali_fb_size)
+ {
+ /* Register frame buffer */
+ ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* No biggie if we weren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+ /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+ err = mali_set_global_gpu_base_address();
+ if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+
+ err = mali_check_shared_interrupts();
+ if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+
+ err = mali_pp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+ /* Initialize the MALI PMU */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+ /* Make sure the power stays on for the rest of this function */
+ err = _mali_osk_pm_dev_ref_add();
+ if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
+
+ /*
+ * If run-time PM is used, then the mali_pm module has now already been
+ * notified that the power now is on (through the resume callback functions).
+ * However, if run-time PM is not used, then there will probably not be any
+ * calls to the resume callback functions, so we need to explicitly tell it
+ * that the power is on.
+ */
+ mali_pm_set_power_is_on();
+
+ /* Reset PMU HW and ensure all Mali power domains are on */
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu)
+ {
+ err = mali_pmu_reset(pmu);
+ if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+ }
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Create PM domains only if PMU exists */
+ if (NULL != pmu)
+ {
+ err = mali_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
+ }
+
+ /* Initialize MMU module */
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+ if (mali_is_mali450())
+ {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+ }
+
+ /* Start configuring the actual Mali hardware. */
+ err = mali_parse_config_l2_cache();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+ /* Initialize the schedulers */
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+ err = mali_gp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+
+ /* PP scheduler population can't fail */
+ mali_pp_scheduler_populate();
+
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ MALI_SUCCESS; /* all ok */
+
+ /* Error handling */
+
+utilization_init_failed:
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+ mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+ mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
+ mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
+dlbu_init_failed:
+ mali_dlbu_terminate();
+mmu_init_failed:
+ mali_pm_domain_terminate();
+pm_domain_failed:
+ /* Nothing to roll back */
+product_info_parsing_failed:
+ /* Nothing to roll back */
+pmu_reset_failed:
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+pm_always_on_failed:
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu)
+ {
+ mali_pmu_delete(pmu);
+ }
+parse_pmu_config_failed:
+ mali_pm_terminate();
+pm_init_failed:
+ mali_pp_scheduler_terminate();
+pp_scheduler_init_failed:
+check_shared_interrupts_failed:
+ global_gpu_base_address = 0;
+set_global_gpu_base_address_failed:
+ global_gpu_base_address = 0;
+parse_memory_config_failed:
+ mali_memory_terminate();
+memory_init_failed:
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+ mali_session_terminate();
+session_init_failed:
+ return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ /* shut down subsystems in reverse order from startup */
+
+ /* We need the GPU to be powered up for the terminate sequence */
+ _mali_osk_pm_dev_ref_add();
+
+ mali_utilization_term();
+ mali_pp_scheduler_depopulate();
+ mali_gp_scheduler_terminate();
+ mali_scheduler_terminate();
+ mali_delete_l2_cache_cores();
+ if (mali_is_mali450())
+ {
+ mali_dlbu_terminate();
+ }
+ mali_mmu_terminate();
+ if (NULL != pmu)
+ {
+ mali_pmu_delete(pmu);
+ }
+ mali_pm_terminate();
+ mali_memory_terminate();
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+
+ /* Allowing the system to be turned off */
+ _mali_osk_pm_dev_ref_dec();
+
+ mali_pp_scheduler_terminate();
+ mali_session_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+ return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+ return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+void _mali_ukk_compositor_priority(void * session_ptr)
+{
+ struct mali_session_data *session = session_ptr;
+ session->is_compositor = MALI_TRUE;
+ mali_pp_scheduler_blocked_on_compositor = MALI_FALSE;
+ MALI_DEBUG_PRINT(2, ("Setting session: %d as Compositor\n", _mali_osk_get_pid()));
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if (NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ struct mali_session_data *session;
+
+ /* allocated struct to track this session */
+ session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session->ioctl_queue)
+ {
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session->page_directory)
+ {
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (0 != mali_dlbu_phys_addr)
+ {
+ mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, MALI_CACHE_STANDARD);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session))
+ {
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#ifdef CONFIG_SYNC
+ _mali_osk_list_init(&session->pending_jobs);
+ session->pending_jobs_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK,
+ 0, _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS);
+ if (NULL == session->pending_jobs_lock)
+ {
+ MALI_PRINT_ERROR(("Failed to create pending jobs lock\n"));
+ mali_memory_session_end(session);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+#endif
+
+ session->is_compositor = MALI_FALSE;
+
+ *context = (void*)session;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session);
+
+ /* Initialize list of jobs on this session */
+ _MALI_OSK_INIT_LIST_HEAD(&session->job_list);
+
+ MALI_DEBUG_PRINT(2, ("Session started\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* Abort pending jobs */
+#ifdef CONFIG_SYNC
+ {
+ _mali_osk_list_t tmp_job_list;
+ struct mali_pp_job *job, *tmp;
+ _MALI_OSK_INIT_LIST_HEAD(&tmp_job_list);
+
+ _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+ /* Abort asynchronous wait on fence. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pending_jobs, struct mali_pp_job, list)
+ {
+ MALI_DEBUG_PRINT(2, ("Sync: Aborting wait for session %x job %x\n", session, job));
+ if (sync_fence_cancel_async(job->pre_fence, &job->sync_waiter))
+ {
+ MALI_DEBUG_PRINT(2, ("Sync: Failed to abort job %x\n", job));
+ }
+ _mali_osk_list_add(&job->list, &tmp_job_list);
+ }
+ _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_wq_flush();
+
+ _mali_osk_lock_term(session->pending_jobs_lock);
+
+ /* Delete jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &tmp_job_list, struct mali_pp_job, list)
+ {
+ mali_pp_job_delete(job);
+ }
+ }
+#endif
+
+ /* Abort queued and running jobs */
+ mali_gp_scheduler_abort_session(session);
+ mali_pp_scheduler_abort_session(session);
+
+ if (session->is_compositor)
+ {
+ mali_pp_scheduler_blocked_on_compositor = MALI_FALSE;
+ mali_pp_scheduler_schedule();
+ }
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_wq_flush();
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+ /* Free session data structures */
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_gp_scheduler_dump_state(buf + n, size - n);
+ n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.h
new file mode 100755
index 00000000000000..2e88392f9df2c2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_core.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+extern int mali_max_job_runtime;
+
+typedef enum
+{
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+ return _MALI_PRODUCT_ID_MALI450 == mali_kernel_core_get_product_id();
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+ u32 id = mali_kernel_core_get_product_id();
+ return _MALI_PRODUCT_ID_MALI400 == id || _MALI_PRODUCT_ID_MALI300 == id;
+}
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.c
new file mode 100755
index 00000000000000..8e9dc2c0f6a4d3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ void *old_value = NULL;
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ old_value = map->table->mappings[descriptor];
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return old_value;
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.h
new file mode 100755
index 00000000000000..e0d256de31c133
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.c
new file mode 100755
index 00000000000000..e866447fcf5fcc
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->stat = os_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ info = (os_allocator*)allocator->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB Configured maximum OS memory usage: %d kB\n",
+ (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ int allocation_order = 6; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt = NULL;
+ u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /* if the number of pages to be requested lead to exceeding the memory
+ * limit in info->num_pages_max, reduce the size that is to be requested. */
+ while ( (info->num_pages_allocated + (1 << allocation_order) > info->num_pages_max)
+ && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ if ( allocation_order > 0 ) {
+ --allocation_order;
+ } else {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+ }
+
+ /* try to allocate 2^(allocation_order) pages, if that fails, try
+ * allocation_order-1 to allocation_order 0 (inclusive) */
+ while ( allocation_order >= 0 )
+ {
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if (NULL != virt) break;
+
+ --allocation_order;
+ }
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate consistent memory. Is CONSISTENT_DMA_SIZE set too low?\n"));
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("os_allocator_allocate_page_table_block: Allocation of order %i succeeded\n",
+ allocation_order));
+
+ /* we now know the size of the allocation since we know for what
+ * allocation_order the allocation succeeded */
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += (1 << allocation_order);
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.h
new file mode 100755
index 00000000000000..150d6333fa0b1f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.c
new file mode 100755
index 00000000000000..1f001805109fc1
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(mem_engine, descriptor);
+ mali_allocation_engine_release_pt2_physical_memory_free(mem_engine, descriptor);
+}
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Calling: mali_address_manager_release() */
+ /* This function is allowed to be called several times, and it only does the release on the first call. */
+ engine->mali_address->release(descriptor);
+}
+
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ /* Remove this from a tracking list in session_data->memory_head */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(2, ("Map failed: %s %d\n", __FUNCTION__, __LINE__));
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+ MALI_DEBUG_PRINT(2, ("Map mali failed: %s %d\n", __FUNCTION__, __LINE__));
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
+{
+ u32 sum = 0;
+ while(NULL != allocator)
+ {
+ /* Only count allocators that have set up a stat function. */
+ if(allocator->stat)
+ sum += allocator->stat(allocator);
+
+ allocator = allocator->next;
+ }
+
+ return sum;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.h
new file mode 100755
index 00000000000000..4329f23eb14159
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_memory_engine.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than committing a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+ u32 cache_settings; /* type: mali_memory_cache_settings, found in <linux/mali/mali_utgard_uk_types.h> Ump DD breaks if we include it...*/
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ u32 (*stat)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function to allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.c
new file mode 100755
index 00000000000000..9e01464189ab44
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+static _mali_osk_lock_t *time_data_lock;
+
+static u32 num_running_gp_cores;
+static u32 num_running_pp_cores;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u64 period_start_time = 0;
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+static u32 mali_utilization_timeout = 1000;
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized_gpu;
+ u32 work_normalized_gp;
+ u32 work_normalized_pp;
+ u32 period_normalized;
+ u32 utilization_gpu;
+ u32 utilization_gp;
+ u32 utilization_pp;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0)
+ {
+ /*
+ * No work done for this period
+ * - No need to reschedule timer
+ * - Report zero usage
+ */
+ timer_running = MALI_FALSE;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (NULL != mali_utilization_callback)
+ {
+ struct mali_gpu_utilization_data data = { 0, };
+ mali_utilization_callback(&data);
+ }
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time_gpu != 0)
+ {
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = time_now;
+
+ /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+ if (work_start_time_gp != 0)
+ {
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = time_now;
+ }
+
+ if (work_start_time_pp != 0)
+ {
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = time_now;
+ }
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+ work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+ work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized_gpu <<= 8;
+ work_normalized_gp <<= 8;
+ work_normalized_pp <<= 8;
+ }
+
+ utilization_gpu = work_normalized_gpu / period_normalized;
+ utilization_gp = work_normalized_gp / period_normalized;
+ utilization_pp = work_normalized_pp / period_normalized;
+
+ last_utilization_gpu = utilization_gpu;
+ last_utilization_gp = utilization_gp;
+ last_utilization_pp = utilization_pp;
+
+ /* starting a new period */
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+ period_start_time = time_now;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+
+ if (NULL != mali_utilization_callback)
+ {
+ struct mali_gpu_utilization_data data = { utilization_gpu, utilization_gp, utilization_pp };
+ mali_utilization_callback(&data);
+ }
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+ struct _mali_osk_device_data data;
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data))
+ {
+ /* Use device specific settings (if defined) */
+ if (0 != data.utilization_interval)
+ {
+ mali_utilization_timeout = data.utilization_interval;
+ }
+ if (NULL != data.utilization_callback)
+ {
+ mali_utilization_callback = data.utilization_callback;
+ }
+ }
+#endif
+
+ if (NULL != mali_utilization_callback)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+ }
+
+ time_data_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ num_running_gp_cores = 0;
+ num_running_pp_cores = 0;
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (timer_running == MALI_TRUE)
+ {
+ timer_running = MALI_FALSE;
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_timer_del(utilization_timer);
+ return;
+ }
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+void mali_utilization_gp_start(void)
+{
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ ++num_running_gp_cores;
+ if (1 == num_running_gp_cores)
+ {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First GP core started, consider GP busy from now and onwards */
+ work_start_time_gp = time_now;
+
+ if (0 == num_running_pp_cores)
+ {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+ else
+ {
+ /* Nothing to do */
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+void mali_utilization_pp_start(void)
+{
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ ++num_running_pp_cores;
+ if (1 == num_running_pp_cores)
+ {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First PP core started, consider PP busy from now and onwards */
+ work_start_time_pp = time_now;
+
+ if (0 == num_running_gp_cores)
+ {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+ }
+
+ /* Start a new period (and timer) if needed */
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = time_now;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+ else
+ {
+ /* Nothing to do */
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+void mali_utilization_gp_end(void)
+{
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ --num_running_gp_cores;
+ if (0 == num_running_gp_cores)
+ {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last GP core ended, consider GP idle from now and onwards */
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = 0;
+
+ if (0 == num_running_pp_cores)
+ {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_utilization_pp_end(void)
+{
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ --num_running_pp_cores;
+ if (0 == num_running_pp_cores)
+ {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last PP core ended, consider PP idle from now and onwards */
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = 0;
+
+ if (0 == num_running_gp_cores)
+ {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+ return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+ return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+ return last_utilization_pp;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.h b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.h
new file mode 100755
index 00000000000000..3aa31483b4f4f3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_utilization.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
+{
+ return (NULL != mali_utilization_callback);
+}
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_vsync.c b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_vsync.c
new file mode 100755
index 00000000000000..a56389879de0e6
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_kernel_vsync.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event==_MALI_UK_VSYNC_EVENT_END_WAIT)
+ {
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.c b/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.c
new file mode 100755
index 00000000000000..167c40dd69abbf
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *cache = NULL;
+ _mali_osk_lock_flags_t lock_flags;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+ if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL != cache)
+ {
+ cache->core_id = mali_global_num_l2_cache_cores;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->pm_domain = NULL;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE))
+ {
+ cache->command_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+ if (NULL != cache->command_lock)
+ {
+ cache->counter_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER);
+ if (NULL != cache->counter_lock)
+ {
+ mali_l2_cache_reset(cache);
+
+ cache->last_invalidated_id = 0;
+
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+ mali_global_num_l2_cache_cores++;
+
+ return cache;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ _mali_osk_lock_term(cache->command_lock);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ mali_hw_core_delete(&cache->hw_core);
+ }
+
+ _mali_osk_free(cache);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+
+ /* reset to defaults */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ _mali_osk_lock_term(cache->counter_lock);
+ _mali_osk_lock_term(cache->command_lock);
+ mali_hw_core_delete(&cache->hw_core);
+
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+ {
+ if (mali_global_l2_cache_cores[i] == cache)
+ {
+ mali_global_l2_cache_cores[i] = NULL;
+ mali_global_num_l2_cache_cores--;
+
+ if (i != mali_global_num_l2_cache_cores)
+ {
+ /* We removed a l2 cache from the middle of the array -- move the last
+ * l2 cache to the current position to close the gap */
+ mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ return cache->core_id;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src0 = counter;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter)
+ {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_l2_cache_unlock_power_state(cache);
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src1 = counter;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter)
+ {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_l2_cache_unlock_power_state(cache);
+
+ return MALI_TRUE;
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ /* Caller must hold the PM lock and know that we are powered on */
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (mali_global_num_l2_cache_cores > index)
+ {
+ return mali_global_l2_cache_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2_cache_cores;
+}
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+ /* Restart any performance counters (if enabled) */
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_l2_cache_reset_all(void)
+{
+ int i;
+ u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ for (i = 0; i < num_cores; i++)
+ {
+ mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
+ }
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache)
+ {
+ cache->last_invalidated_id = mali_scheduler_get_new_id();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+}
+
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL != cache)
+ {
+ /* If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started. */
+ if (((s32)id) <= ((s32)cache->last_invalidated_id))
+ {
+ return MALI_FALSE;
+ }
+ else
+ {
+ cache->last_invalidated_id = mali_scheduler_get_new_id();
+ }
+
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+ return MALI_TRUE;
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+ {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
+ {
+ _mali_osk_errcode_t ret;
+ mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_id();
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+ {
+ /*additional check*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
+ {
+ u32 j;
+ for (j = 0; j < num_pages; j++)
+ {
+ _mali_osk_errcode_t ret;
+ ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
+ }
+ }
+ }
+ mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ }
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_lock_state(cache->pm_domain);
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+ return mali_pm_domain_unlock_state(cache->pm_domain);
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.h b/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.h
new file mode 100755
index 00000000000000..c75e497e913120
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_l2_cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+#include "mali_pm_domain.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+struct mali_group;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 core_id; /**< Unique core ID */
+ _mali_osk_lock_t *command_lock; /**< Serialize all L2 cache commands */
+ _mali_osk_lock_t *counter_lock; /**< Synchronize L2 cache counter access */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 last_invalidated_id;
+ struct mali_pm_domain *pm_domain;
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+{
+ cache->pm_domain = domain;
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+void mali_l2_cache_reset_all(void);
+
+struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.c b/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.c
new file mode 100755
index 00000000000000..424e521b15c57d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base)
+ {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = start;
+ mali_mem_validator.size = size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) /* Don't allow overflow (or zero size) */
+ {
+ if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) )
+ {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.h b/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.h
new file mode 100755
index 00000000000000..a9ab6d4ca7118f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_memory.c b/drivers/parrot/gpu/mali400_legacy/common/mali_memory.c
new file mode 100755
index 00000000000000..8cb87adca212c7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_memory.c
@@ -0,0 +1,1295 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_memory.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_session.h"
+#include "mali_l2_cache.h"
+#include "mali_scheduler.h"
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_list.h"
+#include "mali_osk_bitops.h"
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if defined(CONFIG_MALI400_UMP)
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * This function will fail if it is unable to put the MMU in stall mode (which
+ * might be the case if a page fault is also being processed).
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+#if defined(CONFIG_MALI400_UMP)
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* CONFIG_MALI400_UMP */
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+static void mali_mmu_page_table_cache_destroy(void);
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+mali_allocation_engine mali_mem_get_memory_engine(void)
+{
+ return memory_engine;
+}
+
+/* called during module init */
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(2, ("Memory system initializing\n"));
+
+ err = mali_mmu_page_table_cache_create();
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err);
+ }
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+void mali_memory_terminate(void)
+{
+ MALI_DEBUG_PRINT(2, ("Memory system terminating\n"));
+
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+}
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->memory_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+ if (NULL == session_data->memory_lock)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session_data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_BUSY;
+
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ err = _MALI_OSK_ERR_OK;
+
+ /* Free all memory engine allocations */
+ if (!_mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->memory_lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ err = _mali_ukk_mem_munmap_internal( &unmap_args );
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ break; /* Will jump back into: "while (err == _MALI_OSK_ERR_BUSY)" */
+ }
+ }
+ }
+ }
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->memory_lock );
+
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = 1; /* OS memory has second priority */
+
+ allocator = mali_os_allocator_create(size, 0 /* cpu_usage_adjust */, "Shared Mali GPU memory");
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while (NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = 0; /* Dedicated range has first priority */
+
+ /* do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory"))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(start, 0 /* cpu_usage_adjust */, size, "Dedicated Mali GPU memory");
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(start, size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save low level cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(start, size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = start;
+ cleanup_data->size = size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+#if defined(CONFIG_MALI400_UMP)
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->cache_settings = (u32) MALI_CACHE_STANDARD;
+ descriptor->lock = session_data->memory_lock;
+
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ descriptor = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != descriptor)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+
+}
+#endif /* CONFIG_MALI400_UMP */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(5, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Return number of bytes actually copied */
+ args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->cache_settings = (u32)MALI_CACHE_STANDARD;
+ descriptor->lock = session_data->memory_lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ void* old_value;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ old_value = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != old_value)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ struct mali_session_data *session_data;
+ u32 actual_size;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+
+ actual_size = descriptor->size;
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ actual_size += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session_data->page_directory, descriptor->mali_address, actual_size);
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ const u32 illegal_mali_address = 0xffffffff;
+ struct mali_session_data *session_data;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* It is allowed to call this function several times on the same descriptor.
+ When memory is released we set the illegal_mali_address so we can early out here. */
+ if ( illegal_mali_address == descriptor->mali_address) return;
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ mali_mmu_pagedir_unmap(session_data->page_directory, descriptor->mali_address, descriptor->size);
+
+ descriptor->mali_address = illegal_mali_address ;
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ struct mali_session_data *session_data;
+ u32 mali_address;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(phys_addr);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", *phys_addr, mali_address, size));
+
+ mali_mmu_pagedir_update(session_data->page_directory, mali_address, *phys_addr, size, descriptor->cache_settings);
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ session_data = (struct mali_session_data *)args->ctx;
+
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->cache_settings = (u32) args->cache_settings ;
+ descriptor->lock = session_data->memory_lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ /* We do not FLUSH nor TLB_ZAP on MMAP, since we do both of those on job start*/
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Unmapping the memory from the mali virtual address space.
+ It is allowed to call this function severeal times, which might happen if zapping below fails. */
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(memory_engine, descriptor);
+
+ mali_scheduler_zap_all_active(session_data);
+
+ /* Removes the descriptor from the session's memory list, releases physical memory, releases descriptor */
+ mali_allocation_engine_release_pt2_physical_memory_free(memory_engine, descriptor);
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+ _mali_osk_errcode_t err;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ err = _MALI_OSK_ERR_BUSY;
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ err = _mali_ukk_mem_munmap_internal( args );
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ }
+ }
+
+ return err;
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ return mali_allocation_engine_memory_usage(physical_memory_allocators);
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (!_mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ if (alloc->num_pages > 1)
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+ }
+ else
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.full);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated: VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ else
+ {
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+static void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, !_mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ _mali_osk_lock_term(page_table_cache.lock);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_memory.h b/drivers/parrot/gpu/mali400_legacy/common/mali_memory.h
new file mode 100755
index 00000000000000..3006a82ae56b54
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_memory.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+/** @brief Initialize Mali memory subsystem
+ *
+ * Allocate and initialize internal data structures. Must be called before
+ * allocating Mali memory.
+ *
+ * @return On success _MALI_OSK_ERR_OK, othervise some error code describing the error.
+ */
+_mali_osk_errcode_t mali_memory_initialize(void);
+
+/** @brief Terminate Mali memory system
+ *
+ * Clean up and release internal data structures.
+ */
+void mali_memory_terminate(void);
+
+/** @brief Start new Mali memory session
+ *
+ * Allocate and prepare session specific memory allocation data data. The
+ * session page directory, lock, and descriptor map is set up.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *mali_session_data);
+
+/** @brief Close a Mali memory session
+ *
+ * Release session specific memory allocation related data.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+void mali_memory_session_end(struct mali_session_data *mali_session_data);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, othervise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+void mali_mmu_release_table_page(u32 pa);
+
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+mali_allocation_engine mali_mem_get_memory_engine(void);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.c b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.c
new file mode 100755
index 00000000000000..110ec952d7aa2a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory = mali_allocate_empty_page();
+ if(0 == mali_empty_page_directory)
+ {
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page))
+ {
+ mali_free_empty_page(mali_empty_page_directory);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory);
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+ struct mali_mmu_core* mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+ if (NULL != mmu)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu))
+ {
+ if (is_virtual)
+ {
+ /* Skip reset and IRQ setup for virtual MMU */
+ return mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_mmu,
+ group,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ "mali_mmu_irq_handlers");
+ if (NULL != mmu->irq)
+ {
+ return mmu;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_group_remove_mmu_core(group);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ if (NULL != mmu->irq)
+ {
+ _mali_osk_irq_term(mmu->irq);
+ }
+
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
+ {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) )
+ {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+ return MALI_TRUE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i)
+ {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
+ {
+ break;
+ }
+ if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE)))
+ {
+ break;
+ }
+ if (0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED )))
+ {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
+ {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i)
+ {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) )
+ {
+ break;
+ }
+ if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ break;
+ }
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0)
+ {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ if (!stall_success)
+ {
+ err = _MALI_OSK_ERR_BUSY;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
+ {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success)
+ {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ if ( MALI_FALSE==stall_success ) return MALI_FALSE;
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.h b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.h
new file mode 100755
index 00000000000000..a1e767967f1d7e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+ MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core* mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu);
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu);
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.c b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.c
new file mode 100755
index 00000000000000..e834538137af0e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ u32 address;
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping))
+ {
+ /* Allocation failed */
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(address);
+ }
+ return address;
+}
+
+void mali_free_empty_page(u32 address)
+{
+ if (MALI_INVALID_PAGE != address)
+ {
+ mali_mmu_release_table_page(address);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address page_directory_mapping;
+ mali_io_address page_table_mapping;
+ mali_io_address data_page_mapping;
+
+ err = mali_mmu_get_table_page(data_page, &data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_table, &page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_directory, &page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(data_page_mapping, 0);
+ fill_page(page_table_mapping, *data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ if (MALI_INVALID_PAGE != *page_directory)
+ {
+ mali_mmu_release_table_page(*page_directory);
+ *page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table)
+ {
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page)
+ {
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ u32 pde_phys;
+ int i;
+
+ for(i = first_pde; i <= last_pde; i++)
+ {
+ if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT))
+ {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ pagedir->page_entries_usage_count[i] = 1;
+ }
+ else
+ {
+ pagedir->page_entries_usage_count[i]++;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+ mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++)
+ {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
+ {
+ size_in_pde = left;
+ }
+ else
+ {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i]--;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i])
+ {
+ u32 page_address;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_address);
+ pd_changed = MALI_TRUE;
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT(num_pages_inv < 2);
+ if (num_pages_inv < 2)
+ {
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ }
+ else
+ {
+ invalidate_all = MALI_TRUE;
+ }
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed)
+ {
+ MALI_DEBUG_ASSERT(num_pages_inv < 3);
+ if (num_pages_inv < 3)
+ {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ }
+ else
+ {
+ invalidate_all = MALI_TRUE;
+ }
+ }
+
+ if (invalidate_all)
+ {
+ mali_l2_cache_invalidate_all();
+ }
+ else
+ {
+ mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+ }
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if(NULL == pagedir)
+ {
+ return NULL;
+ }
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped))
+ {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, mali_memory_cache_settings cache_settings)
+{
+ u32 end_address = mali_address + size;
+ u32 permission_bits;
+
+ switch ( cache_settings )
+ {
+ case MALI_CACHE_GP_READ_ALLOCATE:
+ MALI_DEBUG_PRINT(5, ("Map L2 GP_Read_allocate\n"));
+ permission_bits = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ break;
+
+ case MALI_CACHE_STANDARD:
+ MALI_DEBUG_PRINT(5, ("Map L2 Standard\n"));
+ /*falltrough */
+ default:
+ if ( MALI_CACHE_STANDARD != cache_settings) MALI_PRINT_ERROR(("Wrong cache settings\n"));
+ permission_bits = MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT;
+ }
+
+ /* Map physical pages into MMU page tables */
+ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ phys_address | permission_bits);
+ }
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+/* For instrumented */
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+{
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != pagedir->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.h b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.h
new file mode 100755
index 00000000000000..ee71f9e5736c9f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_mmu_page_directory.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+
+struct mali_page_directory
+{
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(void);
+void mali_free_empty_page(u32 address);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_osk.h b/drivers/parrot/gpu/mali400_legacy/common/mali_osk.h
new file mode 100755
index 00000000000000..1a1c37191aae7d
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_osk.h
@@ -0,0 +1,1811 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_t_struct _mali_osk_wq_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)( void * arg );
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * Holding more than one lock of the same order at the same time is not
+ * allowed.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCK_ORDER_LAST = 0,
+
+ _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+ _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+ _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
+ _MALI_OSK_LOCK_ORDER_PMU,
+
+ _MALI_OSK_LOCK_ORDER_FIRST
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner(l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock );
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource
+{
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data );
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work );
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work );
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work );
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+
+/** @} */ /* end group _mali_osk_wq */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim );
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ *
+ * Put thread to sleep if the given \a codition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) );
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Disable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_disable(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
+
+
+/** @brief Release the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ */
+void _mali_osk_pm_dev_ref_dec(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device.
+ *
+ * Will leave the cores powered off if they are already powered off.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ *
+ * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
+
+
+/** @brief Releasing the reference to the power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+
+/** @brief Block untill pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_osk_bitops.h b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_bitops.h
new file mode 100755
index 00000000000000..84af3132830d6a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_osk_list.h b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_list.h
new file mode 100755
index 00000000000000..531b0acfc41bdf
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_list.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list( _mali_osk_list_t *old_list, _mali_osk_list_t *new_list )
+{
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+ if (!_mali_osk_list_empty(old_list))
+ {
+ new_list->next = old_list->next;
+ new_list->prev = old_list->prev;
+ new_list->next->prev = new_list;
+ new_list->prev->next = new_list;
+ old_list->next = old_list;
+ old_list->prev = old_list;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_osk_mali.h b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_mali.h
new file mode 100755
index 00000000000000..33acf437b05eed
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_mali.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+struct _mali_osk_device_data
+{
+ /* Dedicated GPU memory range (physical). */
+ u32 dedicated_mem_start;
+ u32 dedicated_mem_size;
+
+ /* Shared GPU memory */
+ u32 shared_mem_size;
+
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ u32 fb_start;
+ u32 fb_size;
+
+ /* Report GPU utilization in this interval (specified in ms) */
+ u32 utilization_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /*
+ * Mali PMU switch delay.
+ * Only needed if the power gates are connected to the PMU in a high fanout
+ * network. This value is the number of Mali clock cycles it takes to
+ * enable the power gates and turn on the power mesh.
+ * This value will have no effect if a daisy chain implementation is used.
+ */
+ u32 pmu_switch_delay;
+};
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+u32 _mali_osk_resource_base_address(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+
+/** @brief Copy as much data as possible from src to dest, do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_osk_profiling.h b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_profiling.h
new file mode 100755
index 00000000000000..18eb0d8633a132
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_osk_profiling.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+ /* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pm.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pm.c
new file mode 100755
index 00000000000000..2a7f3ed10f3d90
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pm.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+static mali_bool mali_power_on = MALI_FALSE;
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ _mali_osk_pm_dev_enable();
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+ mali_pm_domain_terminate();
+ _mali_osk_pm_dev_disable();
+}
+
+/* Reset GPU after power up */
+static void mali_pm_reset_gpu(void)
+{
+ /* Reset all L2 caches */
+ mali_l2_cache_reset_all();
+
+ /* Reset all groups */
+ mali_scheduler_reset_all_groups();
+}
+
+void mali_pm_os_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_utilization_suspend();
+ mali_group_power_off();
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ if (MALI_TRUE != mali_power_on)
+ {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu)
+ {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset)
+ {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+void mali_pm_runtime_suspend(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+ mali_group_power_off();
+ mali_power_on = MALI_FALSE;
+}
+
+void mali_pm_runtime_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ mali_bool do_reset = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+
+ if (MALI_TRUE != mali_power_on)
+ {
+ do_reset = MALI_TRUE;
+ }
+
+ if (NULL != pmu)
+ {
+ mali_pmu_reset(pmu);
+ }
+
+ mali_power_on = MALI_TRUE;
+ _mali_osk_write_mem_barrier();
+
+ if (do_reset)
+ {
+ mali_pm_reset_gpu();
+ mali_group_power_on();
+ }
+}
+
+void mali_pm_set_power_is_on(void)
+{
+ mali_power_on = MALI_TRUE;
+}
+
+mali_bool mali_pm_is_power_on(void)
+{
+ return mali_power_on;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pm.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pm.h
new file mode 100755
index 00000000000000..1c72aaf8e63344
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+
+/* Callback functions registered for the runtime PMM system */
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+void mali_pm_set_power_is_on(void);
+mali_bool mali_pm_is_power_on(void);
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.c
new file mode 100755
index 00000000000000..95f98b67850bbb
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_pm.h"
+#include "mali_group.h"
+
+#define MALI_PM_DOMAIN_MAX_DOMAINS 7
+
+static struct mali_pm_domain *mali_pm_domains[MALI_PM_DOMAIN_MAX_DOMAINS] = { NULL, };
+
+static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+{
+ _mali_osk_lock_wait(domain->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+{
+ _mali_osk_lock_signal(domain->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
+{
+ domain->state = state;
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 id, u32 pmu_mask)
+{
+ struct mali_pm_domain* domain;
+
+ MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+ if (NULL != domain)
+ {
+ _mali_osk_lock_flags_t flags = _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+ domain->lock = _mali_osk_lock_init(flags, 0, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
+ if (NULL == domain->lock)
+ {
+ _mali_osk_free(domain);
+ return NULL;
+ }
+
+ domain->state = MALI_PM_DOMAIN_ON;
+ domain->pmu_mask = pmu_mask;
+ domain->use_count = 0;
+ domain->group_list = NULL;
+ domain->group_count = 0;
+ domain->l2 = NULL;
+
+ MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_MAX_DOMAINS > id);
+ mali_pm_domains[id] = domain;
+
+ return domain;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+ if (NULL == domain)
+ {
+ return;
+ }
+ _mali_osk_lock_term(domain->lock);
+
+ _mali_osk_free(domain);
+}
+
+void mali_pm_domain_terminate(void)
+{
+ int i;
+
+ /* Delete all domains */
+ for (i = 0; i < MALI_PM_DOMAIN_MAX_DOMAINS; i++)
+ {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ }
+}
+
+void mali_pm_domain_add_group(u32 id, struct mali_group *group)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get(id);
+ struct mali_group *next;
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /* Assume domain is on and group is enabled initially. */
+ mali_pm_domain_ref_get(domain);
+
+ ++domain->group_count;
+ next = domain->group_list;
+
+ domain->group_list = group;
+
+ group->pm_domain_list = next;
+
+ mali_group_set_pm_domain(group, domain);
+}
+
+void mali_pm_domain_add_l2(u32 id, struct mali_l2_cache_core *l2)
+{
+ struct mali_pm_domain *domain = mali_pm_domain_get(id);
+
+ if (NULL == domain) return;
+
+ MALI_DEBUG_ASSERT(NULL == domain->l2);
+ MALI_DEBUG_ASSERT(NULL != l2);
+
+ domain->l2 = l2;
+
+ mali_l2_cache_set_pm_domain(l2, domain);
+}
+
+struct mali_pm_domain *mali_pm_domain_get(u32 id)
+{
+ MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_MAX_DOMAINS > id);
+
+ return mali_pm_domains[id];
+}
+
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state)
+ {
+ /* Power on */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+
+ if (NULL != pmu)
+ {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_up(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err)
+ {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+ }
+
+ mali_pm_domain_unlock(domain);
+}
+
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) return;
+
+ mali_pm_domain_lock(domain);
+ --domain->use_count;
+
+ if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state)
+ {
+ /* Power off */
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+
+ mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+
+ if (NULL != pmu)
+ {
+ _mali_osk_errcode_t err;
+
+ err = mali_pmu_power_down(pmu, domain->pmu_mask);
+
+ if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err)
+ {
+ MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
+ domain->pmu_mask));
+ }
+ }
+ }
+ mali_pm_domain_unlock(domain);
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+{
+ mali_bool is_powered = MALI_TRUE;
+
+ /* Take a reference without powering on */
+ if (NULL != domain)
+ {
+ mali_pm_domain_lock(domain);
+ ++domain->use_count;
+
+ if (MALI_PM_DOMAIN_ON != domain->state)
+ {
+ is_powered = MALI_FALSE;
+ }
+ mali_pm_domain_unlock(domain);
+ }
+
+ if(!_mali_osk_pm_dev_ref_add_no_power_on())
+ {
+ is_powered = MALI_FALSE;
+ }
+
+ return is_powered;
+}
+
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
+{
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ if (NULL != domain)
+ {
+ mali_pm_domain_ref_put(domain);
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.h
new file mode 100755
index 00000000000000..a8e099efd19e9b
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pm_domain.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+typedef enum
+{
+ MALI_PM_DOMAIN_ON,
+ MALI_PM_DOMAIN_OFF,
+} mali_pm_domain_state;
+
+struct mali_pm_domain
+{
+ mali_pm_domain_state state;
+ _mali_osk_lock_t *lock;
+
+ s32 use_count;
+
+ u32 pmu_mask;
+
+ int group_count;
+ struct mali_group *group_list;
+
+ struct mali_l2_cache_core *l2;
+};
+
+struct mali_pm_domain *mali_pm_domain_create(u32 id, u32 pmu_mask);
+void mali_pm_domain_add_group(u32 id, struct mali_group *group);
+void mali_pm_domain_add_l2(u32 id, struct mali_l2_cache_core *l2);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_terminate(void);
+
+/** Get PM domain from domain ID
+ */
+struct mali_pm_domain *mali_pm_domain_get(u32 id);
+
+/* Ref counting */
+void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+{
+ return domain->l2;
+}
+
+MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+{
+ return domain->state;
+}
+
+mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
+void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+
+#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
+ NULL != (group); (group) = (group)->pm_domain_list)
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.c
new file mode 100755
index 00000000000000..c052289c483d07
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core
+{
+ struct mali_hw_core hw_core;
+ _mali_osk_lock_t *lock;
+ u32 registered_cores_mask;
+ u32 active_cores_mask;
+ u32 switch_delay;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ struct mali_pmu_core* pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ if (NULL != pmu)
+ {
+ pmu->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE,
+ 0, _MALI_OSK_LOCK_ORDER_PMU);
+ if (NULL != pmu->lock)
+ {
+ pmu->registered_cores_mask = mali_pmu_detect_mask(number_of_pp_cores, number_of_l2_caches);
+ pmu->active_cores_mask = pmu->registered_cores_mask;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ _mali_osk_errcode_t err;
+ struct _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ pmu->switch_delay = data.pmu_switch_delay;
+ mali_global_pmu_core = pmu;
+ return pmu;
+ }
+ mali_hw_core_delete(&pmu->hw_core);
+ }
+ _mali_osk_lock_term(pmu->lock);
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+ _mali_osk_lock_term(pmu->lock);
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+ mali_global_pmu_core = NULL;
+}
+
+static void mali_pmu_lock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_lock_wait(pmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+{
+ _mali_osk_lock_signal(pmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_pmu_send_command_internal(struct mali_pmu_core *pmu, const u32 command, const u32 mask)
+{
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
+ & PMU_REG_VAL_IRQ));
+
+ mali_hw_core_register_write(&pmu->hw_core, command, mask);
+
+ /* Wait for the command to complete */
+ do
+ {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+ MALI_DEBUG_ASSERT(0 < timeout);
+ if (0 == timeout)
+ {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_send_command(struct mali_pmu_core *pmu, const u32 command, const u32 mask)
+{
+ u32 stat;
+
+ if (0 == mask) return _MALI_OSK_ERR_OK;
+
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ switch (command)
+ {
+ case PMU_REG_ADDR_MGMT_POWER_DOWN:
+ if (mask == stat) return _MALI_OSK_ERR_OK;
+ break;
+ case PMU_REG_ADDR_MGMT_POWER_UP:
+ if (0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+ break;
+ default:
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+
+ mali_pmu_send_command_internal(pmu, command, mask);
+
+#if defined(DEBUG)
+ {
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ switch (command)
+ {
+ case PMU_REG_ADDR_MGMT_POWER_DOWN:
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+ MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
+ MALI_DEBUG_ASSERT((pmu->registered_cores_mask & ~pmu->active_cores_mask) == stat);
+ break;
+ case PMU_REG_ADDR_MGMT_POWER_UP:
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+ MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
+ break;
+ default:
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+ }
+#endif /* defined(DEBUG) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+ u32 cores_off_mask, cores_on_mask, stat;
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ /* Get power status of cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+
+ cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
+ cores_on_mask = pmu->registered_cores_mask & (stat & pmu->active_cores_mask);
+
+ if (0 != cores_off_mask)
+ {
+ err = mali_pmu_send_command_internal(pmu, PMU_REG_ADDR_MGMT_POWER_DOWN, cores_off_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+ if (0 != cores_on_mask)
+ {
+ err = mali_pmu_send_command_internal(pmu, PMU_REG_ADDR_MGMT_POWER_UP, cores_on_mask);
+ if (_MALI_OSK_ERR_OK != err) return err;
+ }
+
+#if defined(DEBUG)
+ {
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
+ }
+#endif /* defined(DEBUG) */
+
+ mali_pmu_unlock(pmu);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
+
+ /* Make sure we have a valid power domain mask */
+ if (mask > pmu->registered_cores_mask)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
+
+ pmu->active_cores_mask &= ~mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on())
+ {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_send_command(pmu, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
+
+ /* Make sure we have a valid power domain mask */
+ if (mask & ~pmu->registered_cores_mask)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ mali_pmu_lock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+
+ pmu->active_cores_mask |= mask;
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ if (!mali_pm_is_power_on())
+ {
+ /* Don't touch hardware if all of Mali is powered off. */
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ err = mali_pmu_send_command(pmu, PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_send_command(pmu, PMU_REG_ADDR_MGMT_POWER_DOWN, pmu->registered_cores_mask);
+
+ mali_pmu_unlock(pmu);
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pmu_lock(pmu);
+
+ /* Setup the desired defaults in case we were called before mali_pmu_reset() */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+
+ err = mali_pmu_send_command(pmu, PMU_REG_ADDR_MGMT_POWER_UP, pmu->active_cores_mask);
+
+ mali_pmu_unlock(pmu);
+ return err;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ u32 mask = 0;
+
+ if (number_of_l2_caches == 1)
+ {
+ /* Mali-300 or Mali-400 */
+ u32 i;
+
+ /* GP */
+ mask = 0x01;
+
+ /* L2 cache */
+ mask |= 0x01<<1;
+
+ /* Set bit for each PP core */
+ for (i = 0; i < number_of_pp_cores; i++)
+ {
+ mask |= 0x01<<(i+2);
+ }
+ }
+ else if (number_of_l2_caches > 1)
+ {
+ /* Mali-450 */
+
+ /* GP (including its L2 cache) */
+ mask = 0x01;
+
+ /* There is always at least one PP (including its L2 cache) */
+ mask |= 0x01<<1;
+
+ /* Additional PP cores in same L2 cache */
+ if (number_of_pp_cores >= 2)
+ {
+ mask |= 0x01<<2;
+ }
+
+ /* Additional PP cores in a third L2 cache */
+ if (number_of_pp_cores >= 5)
+ {
+ mask |= 0x01<<3;
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power mask is 0x%08X (%u + %u)\n", mask, number_of_pp_cores, number_of_l2_caches));
+
+ return mask;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.h
new file mode 100755
index 00000000000000..6dd3c38fefc1c4
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pmu.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+
+#define MALI_PMU_M450_DOM1 0
+#define MALI_PMU_M450_DOM1_MASK (1 << 1)
+#define MALI_PMU_M450_DOM2 1
+#define MALI_PMU_M450_DOM2_MASK (1 << 2)
+#define MALI_PMU_M450_DOM3 2
+#define MALI_PMU_M450_DOM3_MASK (1 << 3)
+
+#define MALI_PMU_M400_PP0 0
+#define MALI_PMU_M400_PP0_MASK (1 << 2)
+#define MALI_PMU_M400_PP1 1
+#define MALI_PMU_M400_PP1_MASK (1 << 3)
+#define MALI_PMU_M400_PP2 2
+#define MALI_PMU_M400_PP2_MASK (1 << 4)
+#define MALI_PMU_M400_PP3 3
+#define MALI_PMU_M400_PP3_MASK (1 << 5)
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_up.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores. The mask will be saved so that \a
+ * mali_pmu_power_up_all will bring the PMU back to the previous state set with
+ * this function or \a mali_pmu_power_down.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pp.c
new file mode 100755
index 00000000000000..005866c7d622c2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core* mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+ struct mali_pp_core* core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+ if (NULL != core)
+ {
+ core->core_id = mali_global_num_pp_cores;
+ core->bcast_id = bcast_id;
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK))
+ {
+ _mali_osk_errcode_t ret;
+
+ if (!is_virtual)
+ {
+ ret = mali_pp_reset(core);
+ }
+ else
+ {
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ ret = mali_group_add_pp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_pp,
+ group,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ "mali_pp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_pp_core(group);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++)
+ {
+ if (mali_global_pp_cores[i] == core)
+ {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+
+ if (i != mali_global_num_pp_cores)
+ {
+ /* We removed a PP core from the middle of the array -- move the last
+ * PP core to the current position to close the gap */
+ mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+ mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] =
+{
+ 0x0, /* Renderer List Address Register */
+ 0x0, /* Renderer State Word Base Address Register */
+ 0x0, /* Renderer Vertex Base Register */
+ 0x2, /* Feature Enable Register */
+ 0x0, /* Z Clear Value Register */
+ 0x0, /* Stencil Clear Value Register */
+ 0x0, /* ABGR Clear Value 0 Register */
+ 0x0, /* ABGR Clear Value 1 Register */
+ 0x0, /* ABGR Clear Value 2 Register */
+ 0x0, /* ABGR Clear Value 3 Register */
+ 0x0, /* Bounding Box Left Right Register */
+ 0x0, /* Bounding Box Bottom Register */
+ 0x0, /* FS Stack Address Register */
+ 0x0, /* FS Stack Size and Initial Value Register */
+ 0x0, /* Reserved */
+ 0x0, /* Reserved */
+ 0x0, /* Origin Offset X Register */
+ 0x0, /* Origin Offset Y Register */
+ 0x75, /* Subpixel Specifier Register */
+ 0x0, /* Tiebreak mode Register */
+ 0x0, /* Polygon List Format Register */
+ 0x0, /* Scaling Register */
+ 0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] =
+{
+ 0x0, /* WBx Source Select Register */
+ 0x0, /* WBx Target Address Register */
+ 0x0, /* WBx Target Pixel Format Register */
+ 0x0, /* WBx Target AA Format Register */
+ 0x0, /* WBx Target Layout */
+ 0x0, /* WBx Target Scanline Length */
+ 0x0, /* WBx Target Flags Register */
+ 0x0, /* WBx MRT Enable Register */
+ 0x0, /* WBx MRT Offset Register */
+ 0x0, /* WBx Global Test Enable Register */
+ 0x0, /* WBx Global Test Reference Value Register */
+ 0x0 /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW))
+ {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++)
+ {
+ if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE))
+ {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ mali_pp_reset_async(core);
+ return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
+ core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write frame registers */
+
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+ */
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+ /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+ if (!mali_pp_job_is_virtual(job) || restart_virtual)
+ {
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+ }
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index)
+{
+ if (mali_global_num_pp_cores > index)
+ {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+#if defined(CONFIG_MALI400_PROFILING)
+ int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != parent->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != parent->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pp.h
new file mode 100755
index 00000000000000..844310debae56e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->bcast_id;
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+{
+ return core->hw_core.description;
+}
+
+/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+ u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.c
new file mode 100755
index 00000000000000..e439f4faa5e6b6
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_pp_scheduler.h"
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+{
+ struct mali_pp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+ if (NULL != job)
+ {
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ goto fail;
+ }
+
+ if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ goto fail;
+ }
+
+ if (!mali_pp_job_use_no_notification(job))
+ {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL == job->finished_notification) goto fail;
+ }
+
+ perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)))
+ {
+ mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0());
+ mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ _mali_osk_list_init(&job->session_list);
+ job->id = id;
+
+ job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ job->num_memory_cookies = job->uargs.num_memory_cookies;
+ if (job->num_memory_cookies > 0)
+ {
+ u32 size;
+
+ if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
+ goto fail;
+ }
+
+ size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+
+ job->memory_cookies = _mali_osk_malloc(size);
+ if (NULL == job->memory_cookies)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+ goto fail;
+ }
+
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size))
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+ goto fail;
+ }
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ job->num_dma_bufs = job->num_memory_cookies;
+ job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
+ if (NULL == job->dma_bufs)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+ goto fail;
+ }
+#endif
+ }
+ else
+ {
+ job->memory_cookies = NULL;
+ }
+
+ return job;
+ }
+
+fail:
+ if (NULL != job)
+ {
+ mali_pp_job_delete(job);
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+#ifdef CONFIG_SYNC
+ /* It is safe to delete the work without flushing. */
+ if (NULL != job->sync_work) _mali_osk_wq_delete_work_nonflush(job->sync_work);
+ if (NULL != job->pre_fence) sync_fence_put(job->pre_fence);
+ if (NULL != job->sync_point) sync_fence_put(job->sync_point->fence);
+#endif
+ if (NULL != job->finished_notification)
+ {
+ _mali_osk_notification_delete(job->finished_notification);
+ }
+
+ _mali_osk_free(job->memory_cookies);
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Unmap buffers attached to job */
+ if (0 < job->num_dma_bufs)
+ {
+ mali_dma_buf_unmap_job(job);
+ }
+
+ _mali_osk_free(job->dma_bufs);
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ _mali_osk_free(job);
+}
+
+u32 mali_pp_job_get_pp_counter_src0(void)
+{
+ return pp_counter_src0;
+}
+
+mali_bool mali_pp_job_set_pp_counter_src0(u32 counter)
+{
+ pp_counter_src0 = counter;
+
+ return MALI_TRUE;
+}
+
+u32 mali_pp_job_get_pp_counter_src1(void)
+{
+ return pp_counter_src1;
+}
+
+mali_bool mali_pp_job_set_pp_counter_src1(u32 counter)
+{
+ pp_counter_src1 = counter;
+
+ return MALI_TRUE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.h
new file mode 100755
index 00000000000000..b59afaa8dc023e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_job.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#ifdef CONFIG_SYNC
+#include <linux/sync.h>
+#endif
+#include "mali_dlbu.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_dma_buf.h"
+#endif
+
+/**
+ * The structure represents a PP job, including all sub-jobs
+ * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ _mali_osk_list_t session_list; /**< Used to link jobs together in the session job list */
+ _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+ u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ u32 num_memory_cookies; /**< Number of memory cookies attached to job */
+ u32 *memory_cookies; /**< Memory cookies attached to job */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ struct mali_dma_buf_attachment **dma_bufs; /**< Array of DMA-bufs used by job */
+ u32 num_dma_bufs; /**< Number of DMA-bufs used by job */
+#endif
+#ifdef CONFIG_SYNC
+ mali_sync_pt *sync_point; /**< Sync point to signal on completion */
+ struct sync_fence_waiter sync_waiter; /**< Sync waiter for async wait */
+ _mali_osk_wq_work_t *sync_work; /**< Work to schedule in callback */
+ struct sync_fence *pre_fence; /**< Sync fence this job must wait for */
+#endif
+};
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_pp_counter_src0(void);
+mali_bool mali_pp_job_set_pp_counter_src0(u32 counter);
+u32 mali_pp_job_get_pp_counter_src1(void);
+mali_bool mali_pp_job_set_pp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+ return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+ return 0 == job->uargs.num_cores;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ if (mali_pp_job_is_virtual(job))
+ {
+ return MALI_DLBU_VIRT_ADDR;
+ }
+ else if (0 == sub_job)
+ {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->uargs.frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ if (0 == sub_job)
+ {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->uargs.frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+ u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+ job->sub_job_errors += jobs_remaining;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ job->sub_jobs_completed++;
+ if ( MALI_FALSE == success )
+ {
+ job->sub_job_errors++;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ if ( 0 == job->sub_job_errors )
+ {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_active_barrier(struct mali_pp_job *job)
+{
+ return job->uargs.flags & _MALI_PP_JOB_FLAG_BARRIER ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_barrier_enforced(struct mali_pp_job *job)
+{
+ job->uargs.flags &= ~_MALI_PP_JOB_FLAG_BARRIER;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+ return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job)
+{
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job)
+{
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_src0(struct mali_pp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_src1(struct mali_pp_job *job, u32 src)
+{
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.c b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.c
new file mode 100755
index 00000000000000..6af4ac142bc49f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.c
@@ -0,0 +1,1974 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_kernel_utilization.h"
+#include "mali_session.h"
+#include "mali_pm_domain.h"
+#include "linux/mali/mali_utgard.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_dma_buf.h"
+#endif
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+
+/* With certain configurations, job deletion involves functions which cannot be called from atomic context.
+ * This #if checks for those cases and enables job deletion to be deferred and run in a different context. */
+#if defined(CONFIG_SYNC) || !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
+#endif
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
+
+static mali_bool mali_pp_scheduler_is_suspended(void);
+static void mali_pp_scheduler_do_schedule(void *arg);
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static void mali_pp_scheduler_do_job_delete(void *arg);
+#endif
+static void mali_pp_scheduler_job_queued(void);
+static void mali_pp_scheduler_job_completed(void);
+
+static u32 pp_version = 0;
+
+/* Physical job queue */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue); /* List of physical jobs with some unscheduled work */
+static u32 job_queue_depth = 0;
+
+/* Physical groups */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); /* List of physical groups with working jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); /* List of physical groups with idle jobs on the pp core */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); /* List of disabled physical groups */
+
+/* Virtual job queue (Mali-450 only) */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(virtual_job_queue); /* List of unstarted jobs for the virtual group */
+static u32 virtual_job_queue_depth = 0;
+
+/* Virtual group (Mali-450 only) */
+static struct mali_group *virtual_group = NULL; /* Virtual group (if any) */
+static enum
+{
+ VIRTUAL_GROUP_IDLE,
+ VIRTUAL_GROUP_WORKING,
+ VIRTUAL_GROUP_DISABLED,
+}
+virtual_group_state = VIRTUAL_GROUP_IDLE; /* Flag which indicates whether the virtual group is working or idle */
+
+mali_bool mali_pp_scheduler_blocked_on_compositor = MALI_FALSE;
+
+/* Number of physical cores */
+static u32 num_cores = 0;
+static u32 enabled_cores = 0;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static _mali_osk_lock_t *pp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+MALI_DEBUG_CODE(static u32 pp_scheduler_lock_owner = 0);
+
+static _mali_osk_wq_work_t *pp_scheduler_wq_schedule = NULL;
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
+static _mali_osk_lock_t *pp_scheduler_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
+#endif
+
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
+{
+ return NULL != virtual_group;
+}
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+ _mali_osk_lock_flags_t lock_flags;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#else
+ lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+#endif
+
+ pp_scheduler_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == pp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == pp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_scheduler_wq_schedule = _mali_osk_wq_create_work(mali_pp_scheduler_do_schedule, NULL);
+ if (NULL == pp_scheduler_wq_schedule)
+ {
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
+ if (NULL == pp_scheduler_wq_job_delete)
+ {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_scheduler_job_delete_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == pp_scheduler_job_delete_lock)
+ {
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ _mali_osk_lock_term(pp_scheduler_job_delete_lock);
+ _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
+#endif
+
+ _mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+}
+
+void mali_pp_scheduler_populate(void)
+{
+ struct mali_group *group;
+ struct mali_pp_core *pp_core;
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++)
+ {
+ group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group))
+ {
+ MALI_DEBUG_PRINT(3, ("Found virtual group %p\n", group));
+
+ virtual_group = group;
+ break;
+ }
+ }
+
+ /* Find all the available PP cores */
+ for (i = 0; i < num_groups; i++)
+ {
+ group = mali_group_get_glob_group(i);
+ pp_core = mali_group_get_pp_core(group);
+
+ if (NULL != pp_core && !mali_group_is_virtual(group))
+ {
+ if (0 == pp_version)
+ {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ /* Add all physical PP cores to the virtual group */
+ mali_group_lock(virtual_group);
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ mali_group_unlock(virtual_group);
+ }
+ else
+ {
+ _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
+ }
+
+ num_cores++;
+ }
+ }
+ enabled_cores = num_cores;
+}
+
+void mali_pp_scheduler_depopulate(void)
+{
+ struct mali_group *group, *temp;
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+
+ /* Delete all groups owned by scheduler */
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_delete(virtual_group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+ {
+ mali_group_delete(group);
+ }
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list)
+ {
+ mali_group_delete(group);
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n"));
+ MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock\n"));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = 0);
+ _mali_osk_lock_signal(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_disable_if_empty(virtual_group))
+ {
+ MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_DISABLED;
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
+{
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+
+ if (mali_group_virtual_enable_if_empty(virtual_group))
+ {
+ MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
+
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_pp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+}
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() mali_pp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED()
+#endif
+
+/**
+ * Returns a physical job if a physical job is ready to run (no barrier present)
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (!_mali_osk_list_empty(&job_queue))
+ {
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT(job_queue_depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+
+ if (mali_pp_scheduler_blocked_on_compositor)
+ {
+ /* Skip scheduling other jobs if there is some composition work going on */
+ if(!job->session->is_compositor)
+ {
+ if (!_mali_osk_list_empty(&group_list_idle))
+ {
+ MALI_DEBUG_PRINT(3, ("PP job start blocked on pending compositor\n"));
+ }
+ return NULL;
+ }
+ }
+
+ if (!mali_pp_job_has_active_barrier(job))
+ {
+ return job;
+ }
+ }
+
+ return NULL;
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(job_queue_depth > 0);
+
+ /* Remove job from queue */
+ if (!mali_pp_job_has_unstarted_sub_jobs(job))
+ {
+ /* All sub jobs have been started: remove job from queue */
+ _mali_osk_list_delinit(&job->list);
+ }
+
+ --job_queue_depth;
+}
+
+/**
+ * Returns a virtual job if a virtual job is ready to run (no barrier present)
+ */
+MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_job(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+
+ if (!_mali_osk_list_empty(&virtual_job_queue))
+ {
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT(virtual_job_queue_depth > 0);
+ job = _MALI_OSK_LIST_ENTRY(virtual_job_queue.next, struct mali_pp_job, list);
+
+ if (mali_pp_scheduler_blocked_on_compositor)
+ {
+ /* Skip scheduling other jobs if there is some composition work going on */
+ if(!job->session->is_compositor)
+ {
+ if (VIRTUAL_GROUP_IDLE==virtual_group_state)
+ {
+ MALI_DEBUG_PRINT(3, ("PP job start blocked on pending compositor\n"));
+ }
+ return NULL;
+ }
+ }
+
+ if (!mali_pp_job_has_active_barrier(job))
+ {
+ return job;
+ }
+ }
+
+ return NULL;
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_virtual_job(struct mali_pp_job *job)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(virtual_job_queue_depth > 0);
+
+ /* Remove job from queue */
+ _mali_osk_list_delinit(&job->list);
+ --virtual_job_queue_depth;
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_compsitor_unblock(struct mali_pp_job *job)
+{
+ if (mali_pp_scheduler_blocked_on_compositor)
+ {
+ /* If starting to schedule the compositor job, then allow other
+ * jobs to be started also. The compositor jobs are first in
+ * the queue. */
+ if(job->session->is_compositor)
+ {
+ mali_pp_scheduler_blocked_on_compositor = MALI_FALSE;
+ }
+ }
+}
+
+/**
+ * Checks if the criteria is met for removing a physical core from virtual group
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+ MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ /*
+ * The criteria for taking out a physical group from a virtual group are the following:
+ * - There virtual group is idle
+ * - There are currently no physical groups (idle and working)
+ * - There are physical jobs to be scheduled (without a barrier)
+ */
+ return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
+ _mali_osk_list_empty(&group_list_idle) &&
+ _mali_osk_list_empty(&group_list_working) &&
+ (NULL != mali_pp_scheduler_get_physical_job());
+}
+
+MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
+{
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (!_mali_osk_list_empty(&group_list_idle))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list\n"));
+ return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+ }
+ else if (mali_pp_scheduler_has_virtual_group())
+ {
+ MALI_ASSERT_GROUP_LOCKED(virtual_group);
+ if (mali_pp_scheduler_can_move_virtual_to_physical())
+ {
+ struct mali_group *group;
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group\n"));
+ group = mali_group_acquire_group(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ return group;
+ }
+ }
+
+ return NULL;
+}
+
+void mali_pp_scheduler_schedule(void)
+{
+ struct mali_group* physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+ struct mali_pp_job* physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+ u32 physical_subjobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS-1];
+ int num_physical_jobs_to_start = 0;
+ int i;
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ /* Need to lock the virtual group because we might need to grab a physical group from it */
+ mali_group_lock(virtual_group);
+ }
+
+ mali_pp_scheduler_lock();
+ if (pause_count > 0)
+ {
+ /* Scheduler is suspended, don't schedule any jobs */
+ mali_pp_scheduler_unlock();
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_unlock(virtual_group);
+ }
+ return;
+ }
+
+ /* Find physical job(s) to schedule first */
+ while (1)
+ {
+ struct mali_group *group;
+ struct mali_pp_job *job;
+ u32 subjob;
+
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL == job)
+ {
+ break; /* No job, early out */
+ }
+
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+ MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
+
+ /* Acquire a physical group, either from the idle list or from the virtual group.
+ * In case the group was acquired from the virtual group, it's state will be
+ * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
+ group = mali_pp_scheduler_acquire_physical_group();
+ if (NULL == group)
+ {
+ /* Could not get a group to run the job on, early out */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
+ break;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p\n", group));
+
+ mali_pp_scheduler_compsitor_unblock(job);
+
+ /* Mark subjob as started */
+ subjob = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, subjob);
+
+ /* Remove job from queue (if we now got the last subjob) */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ /* Move group to working list */
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
+
+ /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding */
+ physical_groups_to_start[num_physical_jobs_to_start] = group;
+ physical_jobs_to_start[num_physical_jobs_to_start] = job;
+ physical_subjobs_to_start[num_physical_jobs_to_start] = subjob;
+ ++num_physical_jobs_to_start;
+
+ MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
+ }
+
+ /* See if we have a virtual job to schedule */
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ if (VIRTUAL_GROUP_IDLE == virtual_group_state)
+ {
+ struct mali_pp_job *job = mali_pp_scheduler_get_virtual_job();
+ if (NULL != job)
+ {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+ MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_scheduler_compsitor_unblock(job);
+
+ /* Mark the one and only subjob as started */
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ /* Remove job from queue */
+ mali_pp_scheduler_dequeue_virtual_job(job);
+
+ /* Virtual group is now working */
+ virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+ /*
+ * We no longer need the scheduler lock,
+ * but we still need the virtual lock in order to start the virtual job
+ */
+ mali_pp_scheduler_unlock();
+
+ /* Start job */
+ mali_group_start_pp_job(virtual_group, job, 0);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from schedule)\n",
+ mali_pp_job_get_id(job), job, 1,
+ mali_pp_job_get_sub_job_count(job)));
+
+ /* And now we are all done with the virtual_group lock as well */
+ mali_group_unlock(virtual_group);
+ }
+ else
+ {
+ /* No virtual job, release the two locks we are holding */
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(virtual_group);
+ }
+ }
+ else
+ {
+ /* Virtual core busy, release the two locks we are holding */
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(virtual_group);
+ }
+
+ }
+ else
+ {
+ /* There is no virtual group, release the only lock we are holding */
+ mali_pp_scheduler_unlock();
+ }
+
+ /*
+ * Now we have released the scheduler lock, and we are ready to kick of the actual starting of the
+ * physical jobs.
+ * The reason we want to wait until we have released the scheduler lock is that job start actually
+ * may take quite a bit of time (quite many registers needs to be written). This will allow new jobs
+ * from user space to come in, and post processing of other PP jobs to happen at the same time as we
+ * start jobs.
+ */
+ for (i = 0; i < num_physical_jobs_to_start; i++)
+ {
+ struct mali_group *group = physical_groups_to_start[i];
+ struct mali_pp_job *job = physical_jobs_to_start[i];
+ u32 sub_job = physical_subjobs_to_start[i];
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_group_lock(group);
+
+ /* In case this group was acquired from a virtual core, update it's state to IDLE */
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ mali_group_start_pp_job(group, job, sub_job);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule)\n",
+ mali_pp_job_get_id(job), job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job)));
+
+ mali_group_unlock(group);
+
+ /* remove the return value from mali_group_start_xx_job, since we can't fail on Mali-300++ */
+ }
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
+{
+ if (MALI_FALSE == mali_pp_job_use_no_notification(job))
+ {
+ u32 i;
+ u32 num_counters_to_copy;
+ mali_bool success = mali_pp_job_was_success(job);
+
+ _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_virtual(job))
+ {
+ num_counters_to_copy = num_cores; /* Number of physical cores available */
+ }
+ else
+ {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++)
+ {
+ jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+ }
+
+ mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ if (MALI_TRUE == deferred)
+ {
+ /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
+ _mali_osk_lock_wait(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); /* This job object should not be on any list */
+ _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
+
+ _mali_osk_lock_signal(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
+ }
+ else
+ {
+ mali_pp_job_delete(job);
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
+ mali_pp_job_delete(job);
+#endif
+}
+
+static void mali_pp_scheduler_do_schedule(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mali_pp_scheduler_schedule();
+}
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+static void mali_pp_scheduler_do_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ _mali_osk_lock_wait(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
+ * we start deleting the job objects (without any locks held
+ */
+ _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
+
+ _mali_osk_lock_signal(pp_scheduler_job_delete_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list)
+ {
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+#endif
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success)
+{
+ mali_bool job_is_done;
+ mali_bool barrier_enforced = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s)\n",
+ mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job),
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ success ? "success" : "failure"));
+ MALI_ASSERT_GROUP_LOCKED(group);
+ mali_pp_scheduler_lock();
+
+ mali_pp_job_mark_sub_job_completed(job, success);
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job) == mali_group_is_virtual(group));
+
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done)
+ {
+ struct mali_session_data *session = mali_pp_job_get_session(job);
+ struct mali_pp_job *job_head;
+
+ /* Remove job from session list */
+ _mali_osk_list_del(&job->session_list);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X)\n",
+ mali_pp_job_is_virtual(job) ? "virtual" : "physical",
+ mali_pp_job_get_id(job), job));
+#if defined(CONFIG_SYNC)
+ if (job->sync_point)
+ {
+ int error;
+ if (success) error = 0;
+ else error = -EFAULT;
+ MALI_DEBUG_PRINT(4, ("Sync: Signal %spoint for job %d\n",
+ success ? "" : "failed ",
+ mali_pp_job_get_id(job)));
+ mali_sync_signal_pt(job->sync_point, error);
+ }
+#endif
+
+ /* Send notification back to user space */
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
+#else
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+#endif
+
+ mali_pp_scheduler_job_completed();
+
+ /* Resolve any barriers */
+ if (!_mali_osk_list_empty(&session->job_list))
+ {
+ job_head = _MALI_OSK_LIST_ENTRY(session->job_list.next, struct mali_pp_job, session_list);
+ if (mali_pp_job_has_active_barrier(job_head))
+ {
+ barrier_enforced = MALI_TRUE;
+ mali_pp_job_barrier_enforced(job_head);
+ }
+ }
+ }
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ /* Wake up sleeping workers. Their wake-up condition is that
+ * num_slots == num_slots_idle, so unless we are done working, no
+ * threads will actually be woken up.
+ */
+ if (mali_group_is_virtual(group))
+ {
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+ }
+ else
+ {
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ }
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+ _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+ mali_pp_scheduler_unlock();
+ return;
+ }
+
+ if (barrier_enforced)
+ {
+ /* A barrier was resolved, so schedule previously blocked jobs */
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_schedule);
+ }
+
+ /* Recycle variables */
+ job = NULL;
+ sub_job = 0;
+
+ if (mali_group_is_virtual(group))
+ {
+ /* Virtual group */
+
+ /* Now that the virtual group is idle, check if we should reconfigure */
+ struct mali_pp_job *physical_job = NULL;
+ struct mali_group *physical_group = NULL;
+
+ /* Obey the policy */
+ virtual_group_state = VIRTUAL_GROUP_IDLE;
+
+ if (mali_pp_scheduler_can_move_virtual_to_physical())
+ {
+ /* There is a runnable physical job and we can acquire a physical group */
+ physical_job = mali_pp_scheduler_get_physical_job();
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
+
+ mali_pp_scheduler_compsitor_unblock(physical_job);
+
+ /* Mark subjob as started */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
+ mali_pp_job_mark_sub_job_started(physical_job, sub_job);
+
+ /* Remove job from queue (if we now got the last subjob) */
+ mali_pp_scheduler_dequeue_physical_job(physical_job);
+
+ /* Acquire a physical group from the virtual group
+ * It's state will be LEAVING_VIRTUAL and must be set to IDLE before it can be used */
+ physical_group = mali_group_acquire_group(virtual_group);
+
+ /* Move physical group to the working list, as we will soon start a job on it */
+ _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ /* Start the next virtual job */
+ job = mali_pp_scheduler_get_virtual_job();
+ if (NULL != job && VIRTUAL_GROUP_IDLE == virtual_group_state)
+ {
+ /* There is a runnable virtual job */
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+ MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_scheduler_compsitor_unblock(job);
+
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ /* Remove job from queue */
+ mali_pp_scheduler_dequeue_virtual_job(job);
+
+ /* Virtual group is now working */
+ virtual_group_state = VIRTUAL_GROUP_WORKING;
+
+ mali_pp_scheduler_unlock();
+
+ /* Start job */
+ mali_group_start_pp_job(group, job, 0);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done)\n",
+ mali_pp_job_get_id(job), job, 1,
+ mali_pp_job_get_sub_job_count(job)));
+ }
+ else
+ {
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
+#endif
+ mali_pp_scheduler_unlock();
+ }
+
+ /* Start a physical job (if we acquired a physical group earlier) */
+ if (NULL != physical_job && NULL != physical_group)
+ {
+ mali_group_lock(physical_group);
+
+ /* Set the group state from LEAVING_VIRTUAL to IDLE to complete the transition */
+ physical_group->state = MALI_GROUP_STATE_IDLE;
+
+ /* Start job on core */
+ mali_group_start_pp_job(physical_group, physical_job, sub_job);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done)\n",
+ mali_pp_job_get_id(physical_job), physical_job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(physical_job)));
+
+ mali_group_unlock(physical_group);
+ }
+ }
+ else
+ {
+ /* Physical group */
+ job = mali_pp_scheduler_get_physical_job();
+ if (NULL != job)
+ {
+ /* There is a runnable physical job */
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
+
+ mali_pp_scheduler_compsitor_unblock(job);
+
+ /* Mark subjob as started */
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Remove job from queue (if we now got the last subjob) */
+ mali_pp_scheduler_dequeue_physical_job(job);
+
+ mali_pp_scheduler_unlock();
+
+ /* Group is already on the working list, so start the job */
+ mali_group_start_pp_job(group, job, sub_job);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done)\n",
+ mali_pp_job_get_id(job), job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job)));
+ }
+ else if (mali_pp_scheduler_has_virtual_group())
+ {
+ /* Rejoin virtual group */
+ /* In the future, a policy check might be useful here */
+
+ /* We're no longer needed on the scheduler list */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+
+ /* Make sure no interrupts are handled for this group during
+ * the transition from physical to virtual */
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_pp_scheduler_enable_empty_virtual();
+ }
+
+ /* We need to recheck the group state since it is possible that someone has
+ * modified the group before we locked the virtual group. */
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state)
+ {
+ mali_group_add_group(virtual_group, group, MALI_TRUE);
+ }
+
+ mali_group_unlock(virtual_group);
+
+ if (mali_pp_scheduler_has_virtual_group() && VIRTUAL_GROUP_IDLE == virtual_group_state)
+ {
+ _mali_osk_wq_schedule_work(pp_scheduler_wq_schedule);
+ }
+
+ /* We need to return from this function with the group lock held */
+ mali_group_lock(group);
+ }
+ else
+ {
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
+#endif
+ mali_pp_scheduler_unlock();
+ }
+ }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_pp_scheduler_unlock();
+
+ /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+ * mali_pp_scheduler_suspended() function will be called. This will return true
+ * iff state is idle and pause_count > 0, so if the core is active this
+ * will not do anything.
+ */
+ _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ mali_pp_scheduler_unlock();
+ if (0 == pause_count)
+ {
+ mali_pp_scheduler_schedule();
+ }
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_queue_job(struct mali_pp_job *job, struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
+#endif
+
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Map buffers attached to job */
+ if (0 != job->num_memory_cookies)
+ {
+ mali_dma_buf_map_job(job);
+ }
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+ mali_pp_scheduler_job_queued();
+
+ mali_pp_scheduler_lock();
+
+ if (mali_pp_job_is_virtual(job))
+ {
+ /* Virtual job */
+ virtual_job_queue_depth += 1;
+
+ /* Put compositor jobs on front */
+ if(job->session->is_compositor)
+ {
+ struct mali_pp_job *iter, *tmp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(iter, tmp, &virtual_job_queue, struct mali_pp_job, list)
+ {
+ if (iter->session != job->session) break;
+ }
+
+ _mali_osk_list_addtail(&job->list, &iter->list);
+ }
+ else
+ {
+ _mali_osk_list_addtail(&job->list, &virtual_job_queue);
+ }
+ }
+ else
+ {
+ job_queue_depth += mali_pp_job_get_sub_job_count(job);
+
+ /* Put compository jobs on front */
+ if(job->session->is_compositor)
+ {
+ struct mali_pp_job *iter, *tmp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(iter, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (iter->session != job->session) break;
+ }
+
+ _mali_osk_list_addtail(&job->list, &iter->list);
+ }
+ else
+ {
+ _mali_osk_list_addtail(&job->list, &job_queue);
+ }
+ }
+
+ if (mali_pp_job_has_active_barrier(job) && _mali_osk_list_empty(&session->job_list))
+ {
+ /* No running jobs on this session, so barrier condition already met */
+ mali_pp_job_barrier_enforced(job);
+ }
+
+ /* Add job to session list */
+ _mali_osk_list_addtail(&job->session_list, &session->job_list);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued\n",
+ mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+ mali_pp_scheduler_unlock();
+}
+
+#if defined(CONFIG_SYNC)
+static void sync_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
+{
+ struct mali_pp_job *job = _MALI_OSK_CONTAINER_OF(waiter, struct mali_pp_job, sync_waiter);
+
+ /* Schedule sync_callback_work */
+ _mali_osk_wq_schedule_work(job->sync_work);
+}
+
+static void sync_callback_work(void *arg)
+{
+ struct mali_pp_job *job = (struct mali_pp_job *)arg;
+ struct mali_session_data *session;
+ int err;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = job->session;
+
+ /* Remove job from session pending job list */
+ _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ err = sync_fence_wait(job->pre_fence, 0);
+ if (likely(0 == err))
+ {
+ MALI_DEBUG_PRINT(3, ("Mali sync: Job %d ready to run\n", mali_pp_job_get_id(job)));
+
+ mali_pp_scheduler_queue_job(job, session);
+
+ mali_pp_scheduler_schedule();
+ }
+ else
+ {
+ /* Fence signaled error */
+ MALI_DEBUG_PRINT(3, ("Mali sync: Job %d abort due to sync error\n", mali_pp_job_get_id(job)));
+
+ if (job->sync_point) mali_sync_signal_pt(job->sync_point, -EFAULT);
+
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ }
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs, int *fence)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data*)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ MALI_PRINT_ERROR(("Failed to create job!\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job))
+ {
+ /* Not a valid job, return to user immediately */
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+#if PROFILING_SKIP_PP_JOBS || PROFILING_SKIP_PP_AND_GP_JOBS
+#warning PP jobs will not be executed
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
+ return _MALI_OSK_ERR_OK;
+#endif
+
+#if defined(CONFIG_SYNC)
+ if (_MALI_PP_JOB_FLAG_FENCE & job->uargs.flags)
+ {
+ int post_fence = -1;
+
+ job->sync_point = mali_stream_create_point(job->uargs.stream);
+
+ if (unlikely(NULL == job->sync_point))
+ {
+ /* Fence creation failed. */
+ MALI_DEBUG_PRINT(2, ("Failed to create sync point for job %d\n", mali_pp_job_get_id(job)));
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ post_fence = mali_stream_create_fence(job->sync_point);
+
+ if (unlikely(0 > post_fence))
+ {
+ /* Fence creation failed. */
+ /* mali_stream_create_fence already freed the sync_point */
+ MALI_DEBUG_PRINT(2, ("Failed to create fence for job %d\n", mali_pp_job_get_id(job)));
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ /* Grab a reference to the fence. It must be around when the
+ * job is completed, so the point can be signalled. */
+ sync_fence_fdget(post_fence);
+
+ *fence = post_fence;
+
+ MALI_DEBUG_PRINT(3, ("Sync: Created fence %d for job %d\n", post_fence, mali_pp_job_get_id(job)));
+ }
+ else if (_MALI_PP_JOB_FLAG_EMPTY_FENCE & job->uargs.flags)
+ {
+ int empty_fence_fd = job->uargs.stream;
+ struct sync_fence *empty_fence;
+ struct sync_pt *pt;
+ int ret;
+
+ /* Grab and keep a reference to the fence. It must be around
+ * when the job is completed, so the point can be signalled. */
+ empty_fence = sync_fence_fdget(empty_fence_fd);
+
+ if (unlikely(NULL == empty_fence))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to accept empty fence: %d\n", empty_fence_fd));
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK;
+ }
+
+ if (unlikely(list_empty(&empty_fence->pt_list_head)))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to accept empty fence: %d\n", empty_fence_fd));
+ sync_fence_put(empty_fence);
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK;
+ }
+
+ pt = list_first_entry(&empty_fence->pt_list_head, struct sync_pt, pt_list);
+
+ ret = mali_sync_timed_commit(pt);
+
+ if (unlikely(0 != ret))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Empty fence not valid: %d\n", empty_fence_fd));
+ sync_fence_put(empty_fence);
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK;
+ }
+
+ job->sync_point = pt;
+
+ *fence = empty_fence_fd;
+
+ MALI_DEBUG_PRINT(3, ("Sync: Job %d now backs fence %d\n", mali_pp_job_get_id(job), empty_fence_fd));
+ }
+
+ if (0 < job->uargs.fence)
+ {
+ int pre_fence_fd = job->uargs.fence;
+ int err;
+
+ MALI_DEBUG_PRINT(3, ("Sync: Job %d waiting for fence %d\n", mali_pp_job_get_id(job), pre_fence_fd));
+
+ job->pre_fence = sync_fence_fdget(pre_fence_fd); /* Reference will be released when job is deleted. */
+ if (NULL == job->pre_fence)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to import fence %d\n", pre_fence_fd));
+ if (job->sync_point) mali_sync_signal_pt(job->sync_point, -EINVAL);
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ job->sync_work = _mali_osk_wq_create_work(sync_callback_work, (void*)job);
+ if (NULL == job->sync_work)
+ {
+ if (job->sync_point) mali_sync_signal_pt(job->sync_point, -ENOMEM);
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ /* Add pending job to session pending job list */
+ _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_addtail(&job->list, &session->pending_jobs);
+ _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ sync_fence_waiter_init(&job->sync_waiter, sync_callback);
+ err = sync_fence_wait_async(job->pre_fence, &job->sync_waiter);
+
+ if (0 != err)
+ {
+ /* No async wait started, remove job from session pending job list */
+ _mali_osk_lock_wait(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_delinit(&job->list);
+ _mali_osk_lock_signal(session->pending_jobs_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ if (1 == err)
+ {
+ /* Fence has already signalled */
+ mali_pp_scheduler_queue_job(job, session);
+ if (!_mali_osk_list_empty(&group_list_idle) || VIRTUAL_GROUP_IDLE == virtual_group_state)
+ {
+ mali_pp_scheduler_schedule();
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ else if (0 > err)
+ {
+ /* Sync fail */
+ if (job->sync_point) mali_sync_signal_pt(job->sync_point, -EFAULT);
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job, MALI_FALSE); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+ }
+ else
+#endif /* CONFIG_SYNC */
+ {
+ mali_pp_scheduler_queue_job(job, session);
+
+ if (!_mali_osk_list_empty(&group_list_idle) || VIRTUAL_GROUP_IDLE == virtual_group_state)
+ {
+ mali_pp_scheduler_schedule();
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->number_of_total_cores = num_cores;
+ args->number_of_enabled_cores = enabled_cores;
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_pp_scheduler_get_num_cores_total(void)
+{
+ return num_cores;
+}
+
+u32 mali_pp_scheduler_get_num_cores_enabled(void)
+{
+ return enabled_cores;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data*)args->ctx;
+
+ /* Check queue for jobs that match */
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (mali_pp_job_get_session(job) == session &&
+ mali_pp_job_get_frame_builder_id(job) == (u32)args->fb_id &&
+ mali_pp_job_get_flush_id(job) == (u32)args->flush_id)
+ {
+ if (args->wbx & _MALI_UK_PP_JOB_WB0)
+ {
+ mali_pp_job_disable_wb0(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB1)
+ {
+ mali_pp_job_disable_wb1(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB2)
+ {
+ mali_pp_job_disable_wb2(job);
+ }
+ break;
+ }
+ }
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_pp_job *job, *tmp_job;
+ struct mali_group *group, *tmp_group;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(deferred_deletion_list);
+#endif
+
+ mali_pp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->job_list, struct mali_pp_job, session_list)
+ {
+ /* Remove job from queue (if it's not queued, list_del has no effect) */
+ _mali_osk_list_delinit(&job->list);
+
+ if (mali_pp_job_is_virtual(job))
+ {
+ MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
+ if (0 == mali_pp_job_get_first_unstarted_sub_job(job))
+ {
+ --virtual_job_queue_depth;
+ }
+ }
+ else
+ {
+ job_queue_depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
+ }
+
+ /* Mark all unstarted jobs as failed */
+ mali_pp_job_mark_unstarted_failed(job);
+
+ if (mali_pp_job_is_complete(job))
+ {
+ _mali_osk_list_del(&job->session_list);
+
+ /* It is safe to delete the job, since it won't land in job_done() */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Aborted PP job 0x%08x\n", job));
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ _mali_osk_list_addtail(&job->list, &deferred_deletion_list);
+#else
+ mali_pp_job_delete(job);
+#endif
+
+ mali_pp_scheduler_job_completed();
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Keeping partially started PP job 0x%08x in session\n", job));
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list)
+ {
+ groups[i++] = group;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list)
+ {
+ groups[i++] = group;
+ }
+
+ mali_pp_scheduler_unlock();
+
+#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &deferred_deletion_list, struct mali_pp_job, list)
+ {
+ mali_pp_job_delete(job);
+ }
+#endif
+
+ /* Abort running jobs from this session */
+ while (i > 0)
+ {
+ mali_group_abort_session(groups[--i], session);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_abort_session(virtual_group, session);
+ }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_pp_scheduler_lock();
+
+ ret = pause_count > 0
+ && _mali_osk_list_empty(&group_list_working)
+ && VIRTUAL_GROUP_WORKING != virtual_group_state;
+
+ mali_pp_scheduler_unlock();
+
+ return ret;
+}
+
+int mali_pp_scheduler_get_queue_depth(void)
+{
+ return job_queue_depth;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list)
+ {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+ {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list)
+ {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ n += mali_group_dump_state(virtual_group, buf + n, size -n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+ return n;
+}
+#endif
+
+/* This function is intended for power on reset of all cores.
+ * No locking is done for the list iteration, which can only be safe if the
+ * scheduler is paused and all cores idle. That is always the case on init and
+ * power on. */
+void mali_pp_scheduler_reset_all_groups(void)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_lock(virtual_group);
+ mali_group_reset(virtual_group);
+ mali_group_unlock(virtual_group);
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list)
+ {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0)
+ {
+ group = groups[--i];
+
+ mali_group_lock(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group, *temp;
+ struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
+ s32 i = 0;
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_zap_session(virtual_group, session);
+ }
+
+ mali_pp_scheduler_lock();
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list)
+ {
+ groups[i++] = group;
+ }
+ mali_pp_scheduler_unlock();
+
+ while (i > 0)
+ {
+ mali_group_zap_session(groups[--i], session);
+ }
+}
+
+/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
+ * before calling this function to avoid Mali powering down as HW is accessed.
+ */
+static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_group_lock(group);
+
+ if (MALI_GROUP_STATE_DISABLED != group->state)
+ {
+ mali_group_unlock(group);
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled\n", group));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p\n", group));
+
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+ ++enabled_cores;
+
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_bool update_hw;
+
+ /* Add group to virtual group */
+ _mali_osk_list_delinit(&(group->pp_scheduler_list));
+ group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+
+ mali_group_lock(virtual_group);
+
+ update_hw = mali_pm_is_power_on();
+
+ mali_pm_domain_ref_get(group->pm_domain);
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ if (update_hw)
+ {
+ mali_group_lock(group);
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ mali_group_unlock(group);
+ }
+
+ mali_pp_scheduler_enable_empty_virtual();
+ mali_group_add_group(virtual_group, group, update_hw);
+ MALI_DEBUG_PRINT(4, ("Done enabling group %p. Added to virtual group.\n", group));
+
+ mali_group_unlock(virtual_group);
+ }
+ else
+ {
+ mali_pm_domain_ref_get(group->pm_domain);
+ MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
+ MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
+
+ /* Put group on idle list */
+ if (mali_pm_is_power_on())
+ {
+ mali_group_power_on_group(group);
+ mali_group_reset(group);
+ }
+
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
+ group->state = MALI_GROUP_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(4, ("Done enabling group %p. Now on idle list.\n", group));
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+ }
+}
+
+void mali_pp_scheduler_enable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_enable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ /* Pick up any jobs that might have been queued if all PP groups were disabled. */
+ mali_pp_scheduler_schedule();
+}
+
+static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
+{
+ if (mali_pp_scheduler_has_virtual_group())
+ {
+ mali_group_lock(virtual_group);
+
+ MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
+ if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state)
+ {
+ /* The group was in the process of being added to the virtual group. We
+ * only need to change the state to reverse this. */
+ group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+ }
+ else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state)
+ {
+ /* Remove group from virtual group. The state of the group will be
+ * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
+ mali_group_remove_group(virtual_group, group);
+
+ mali_pp_scheduler_disable_empty_virtual();
+ }
+
+ mali_group_unlock(virtual_group);
+ }
+
+ mali_group_lock(group);
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ if (MALI_GROUP_STATE_DISABLED == group->state)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled\n", group));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p\n", group));
+
+ --enabled_cores;
+ _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
+ group->state = MALI_GROUP_STATE_DISABLED;
+
+ mali_group_power_off_group(group);
+ mali_pm_domain_ref_put(group->pm_domain);
+ }
+
+ mali_pp_scheduler_unlock();
+ mali_group_unlock(group);
+}
+
+void mali_pp_scheduler_disable_group(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_pp_scheduler_suspend();
+ _mali_osk_pm_dev_ref_add_no_power_on();
+
+ mali_pp_scheduler_disable_group_internal(group);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+ mali_pp_scheduler_resume();
+}
+
+
+static void mali_pp_scheduler_notify_core_change(u32 num_cores)
+{
+ if (!mali_is_mali450())
+ {
+ /* Notify all user space sessions about the change, so number of master tile lists can be adapter */
+ struct mali_session_data *session, *tmp;
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link)
+ {
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobj)
+ {
+ _mali_uk_pp_num_cores_changed_s *data = notobj->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ mali_session_send_notification(session, notobj);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change\n"));
+ }
+ }
+ mali_session_unlock();
+ }
+}
+
+static void mali_pp_scheduler_set_perf_level_mali400(u32 target_core_nr)
+{
+ struct mali_group *group;
+ MALI_DEBUG_ASSERT(mali_is_mali400());
+
+ if (target_core_nr > enabled_cores)
+ {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ _mali_osk_pm_dev_barrier();
+
+ while (target_core_nr > enabled_cores)
+ {
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&group_list_disabled));
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_enable_group_internal(group);
+ }
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_schedule();
+ }
+ else if (target_core_nr < enabled_cores)
+ {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+
+ mali_pp_scheduler_suspend();
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ while (target_core_nr < enabled_cores)
+ {
+ mali_pp_scheduler_lock();
+
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&group_list_idle));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT( MALI_GROUP_STATE_IDLE == group->state
+ || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
+ || MALI_GROUP_STATE_DISABLED == group->state);
+
+ mali_pp_scheduler_unlock();
+
+ mali_pp_scheduler_disable_group_internal(group);
+ }
+
+ mali_pp_scheduler_resume();
+ }
+
+ mali_pp_scheduler_notify_core_change(target_core_nr);
+}
+
+static void mali_pp_scheduler_set_perf_level_mali450(u32 target_core_nr)
+{
+ struct mali_group *group;
+ MALI_DEBUG_ASSERT(mali_is_mali450());
+
+ if (target_core_nr > enabled_cores)
+ {
+ /* Enable some cores */
+ struct mali_pm_domain *domain;
+
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+
+ _mali_osk_pm_dev_ref_add_no_power_on();
+ _mali_osk_pm_dev_barrier();
+
+ domain = mali_pm_domain_get(MALI_PMU_M450_DOM2);
+
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain)
+ {
+ mali_pp_scheduler_enable_group_internal(group);
+ if (target_core_nr == enabled_cores) break;
+ }
+
+ if (target_core_nr > enabled_cores)
+ {
+ domain = mali_pm_domain_get(MALI_PMU_M450_DOM3);
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain)
+ {
+ mali_pp_scheduler_enable_group_internal(group);
+ if (target_core_nr == enabled_cores) break;
+ }
+ }
+
+ MALI_DEBUG_ASSERT(target_core_nr == enabled_cores);
+
+ _mali_osk_pm_dev_ref_dec_no_power_on();
+
+ mali_pp_scheduler_schedule();
+ }
+ else if (target_core_nr < enabled_cores)
+ {
+ /* Disable some cores */
+ struct mali_pm_domain *domain;
+
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+
+ mali_pp_scheduler_suspend();
+
+ domain = mali_pm_domain_get(MALI_PMU_M450_DOM3);
+ if (NULL != domain)
+ {
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain)
+ {
+ mali_pp_scheduler_disable_group_internal(group);
+ if (target_core_nr == enabled_cores) break;
+ }
+ }
+
+ if (target_core_nr < enabled_cores)
+ {
+ domain = mali_pm_domain_get(MALI_PMU_M450_DOM2);
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain)
+ {
+ mali_pp_scheduler_disable_group_internal(group);
+ if (target_core_nr == enabled_cores) break;
+ }
+ }
+
+ MALI_DEBUG_ASSERT(target_core_nr == enabled_cores);
+
+ mali_pp_scheduler_resume();
+ }
+}
+
+int mali_pp_scheduler_set_perf_level(unsigned int cores)
+{
+ if (cores == enabled_cores) return 0;
+ if (cores > num_cores) return -EINVAL;
+ if (0 == cores) return -EINVAL;
+
+ if (!mali_pp_scheduler_has_virtual_group())
+ {
+ /* Mali-400 */
+ mali_pp_scheduler_set_perf_level_mali400(cores);
+ }
+ else
+ {
+ /* Mali-450 */
+ mali_pp_scheduler_set_perf_level_mali450(cores);
+ }
+
+ return 0;
+}
+
+static void mali_pp_scheduler_job_queued(void)
+{
+ /* We hold a PM reference for every job we hold queued (and running) */
+ _mali_osk_pm_dev_ref_add();
+
+ if (mali_utilization_enabled())
+ {
+ /*
+ * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
+ * This will be fine because we only loose the tiny idle gap between jobs, but
+ * we will instead get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+}
+
+static void mali_pp_scheduler_job_completed(void)
+{
+ /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
+ _mali_osk_pm_dev_ref_dec();
+
+ if (mali_utilization_enabled())
+ {
+ mali_utilization_pp_end();
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.h b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.h
new file mode 100755
index 00000000000000..0391969f72dc89
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_pp_scheduler.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "linux/mali/mali_utgard.h"
+
+/** Initalize the HW independent parts of the PP scheduler
+ */
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+/** Poplulate the PP scheduler with groups
+ */
+void mali_pp_scheduler_populate(void);
+void mali_pp_scheduler_depopulate(void);
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+void mali_pp_scheduler_schedule(void);
+
+extern mali_bool mali_pp_scheduler_blocked_on_compositor;
+
+/** @brief Abort all PP jobs from session running or queued
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the queue and jobs
+ * currently running on a core will be aborted.
+ *
+ * @param session Pointer to session whose jobs should be aborted
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the PP scheuduler. This must be
+ * called after the Mali HW has been powered on in order to reset the HW.
+ *
+ * This function is intended for power on reset of all cores.
+ * No locking is done, which can only be safe if the scheduler is paused and
+ * all cores idle. That is always the case on init and power on.
+ */
+void mali_pp_scheduler_reset_all_groups(void);
+
+/**
+ * @brief Zap TLB on all groups with \a session active
+ *
+ * The scheculer will zap the session on all groups it owns.
+ */
+void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
+
+int mali_pp_scheduler_get_queue_depth(void);
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_pp_scheduler_enable_group(struct mali_group *group);
+void mali_pp_scheduler_disable_group(struct mali_group *group);
+
+int mali_pp_scheduler_set_perf_level(u32 cores);
+
+u32 mali_pp_scheduler_get_num_cores_total(void);
+u32 mali_pp_scheduler_get_num_cores_enabled(void);
+
+/**
+ * @brief Returns the number of Pixel Processors in the system irrespective of the context
+ *
+ * @return number of physical Pixel Processor cores in the system
+ */
+u32 mali_pp_scheduler_get_num_cores_total(void);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.c b/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.c
new file mode 100755
index 00000000000000..f288b32ec433ce
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static _mali_osk_atomic_t mali_job_autonumber;
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0))
+ {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+ _mali_osk_atomic_term(&mali_job_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+ u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
+ return job_id;
+}
+
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.h b/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.h
new file mode 100755
index 00000000000000..d9c0b2c1017b67
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_scheduler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+
+/**
+ * @brief Reset all groups
+ *
+ * This function resets all groups known by the both the PP and GP scheuduler.
+ * This must be called after the Mali HW has been powered on in order to reset
+ * the HW.
+ */
+MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+{
+ mali_gp_scheduler_reset_all_groups();
+ mali_pp_scheduler_reset_all_groups();
+}
+
+/**
+ * @brief Zap TLB on all active groups running \a session
+ *
+ * @param session Pointer to the session to zap
+ */
+MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+{
+ mali_gp_scheduler_zap_all_active(session);
+ mali_pp_scheduler_zap_all_active(session);
+}
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_session.c b/drivers/parrot/gpu/mali400_legacy/common/mali_session.c
new file mode 100755
index 00000000000000..3118b57cf9a77a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_session.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+
+_mali_osk_lock_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ const _mali_osk_lock_flags_t lock_flags = _MALI_OSK_LOCKFLAG_READERWRITER |
+ _MALI_OSK_LOCKFLAG_ORDERED |
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
+
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+ mali_sessions_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+ if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ _mali_osk_lock_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_unlock();
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_session.h b/drivers/parrot/gpu/mali400_legacy/common/mali_session.h
new file mode 100755
index 00000000000000..37f8dbf4e3fe67
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_session.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_session_data
+{
+ _mali_osk_notification_queue_t * ioctl_queue;
+
+#ifdef CONFIG_SYNC
+ _mali_osk_list_t pending_jobs;
+ _mali_osk_lock_t *pending_jobs_lock;
+#endif
+
+ _mali_osk_lock_t *memory_lock; /**< Lock protecting the vm manipulation */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+
+ _MALI_OSK_LIST_HEAD(job_list); /**< List of all jobs on this session */
+ mali_bool is_compositor; /**< Gives compositor priority to jobs from this session if TRUE */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_lock_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_lock_wait(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_lock_signal(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_ukk.h b/drivers/parrot/gpu/mali400_legacy/common/mali_ukk.h
new file mode 100755
index 00000000000000..472639c10140c8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_ukk.h
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+
+
+/* Make a U/K call to _mali_ukk_compositor_priority().
+ * Used to set that the current process is doing graphical compositing. Gives its rendering jobs higher priority
+ */
+void _mali_ukk_compositor_priority(void * session_ptr);
+
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe( _mali_uk_mem_write_safe_s *args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if defined(CONFIG_MALI400_UMP)
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* CONFIG_MALI400_UMP */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @oaram ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( void *ctx, _mali_uk_pp_start_job_s *uargs, int *fence );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @oaram ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( void *ctx, _mali_uk_gp_start_job_s *uargs );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.c b/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.c
new file mode 100755
index 00000000000000..6aa14bc0e97423
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.c
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ struct mali_session_data *session, *tmp;
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link)
+ {
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, sizeof(_mali_uk_settings_changed_s));
+ _mali_uk_settings_changed_s *data = notobj->result_buffer;
+ data->setting = setting;
+ data->value = value;
+
+ mali_session_send_notification(session, notobj);
+ }
+ mali_session_unlock();
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ if (setting >= _MALI_UK_USER_SETTING_MAX || setting < 0)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+ return;
+ }
+
+ if (mali_user_settings[setting] != value)
+ {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify)
+ {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ if (setting >= _MALI_UK_USER_SETTING_MAX || setting < 0)
+ {
+ return 0;
+ }
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (0 <= setting && _MALI_UK_USER_SETTING_MAX > setting)
+ {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.h b/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.h
new file mode 100755
index 00000000000000..578b8e3d002ef1
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/common/mali_user_settings_db.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard.h
new file mode 100755
index 00000000000000..3f14d363e00ecf
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+/* Mali-200 */
+
+#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
+ MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
+ MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
+ MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+
+/* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+ { \
+ .name = "Mali_L2", \
+ .flags = IORESOURCE_MEM, \
+ .start = addr, \
+ .end = addr + 0x200, \
+ },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_mmu_addr, \
+ .end = gp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_mmu_irq, \
+ .end = gp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+ { \
+ .name = "Mali_PP" #id, \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_addr, \
+ .end = pp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_mmu_irq, \
+ .end = pp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+ { \
+ .name = "Mali_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = mmu_addr, \
+ .end = mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = mmu_irq, \
+ .end = mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+ { \
+ .name = "Mali_PMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pmu_addr, \
+ .end = pmu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+ { \
+ .name = "Mali_DLBU", \
+ .flags = IORESOURCE_MEM, \
+ .start = dlbu_addr, \
+ .end = dlbu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+ { \
+ .name = "Mali_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = bcast_addr, \
+ .end = bcast_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_Broadcast_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+ { \
+ .name = "Mali_PP_MMU_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_bcast_addr, \
+ .end = pp_mmu_bcast_addr + 0x100, \
+ },
+
+struct mali_gpu_utilization_data
+{
+ unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+};
+
+struct mali_gpu_device_data
+{
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
+
+ /* Shared GPU memory */
+ unsigned long shared_mem_size;
+
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+
+ /* Report GPU utilization in this interval (specified in ms) */
+ unsigned long utilization_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /*
+ * Mali PMU switch delay.
+ * Only needed if the power gates are connected to the PMU in a high fanout
+ * network. This value is the number of Mali clock cycles it takes to
+ * enable the power gates and turn on the power mesh.
+ * This value will have no effect if a daisy chain implementation is used.
+ */
+ unsigned long pmu_switch_delay;
+};
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ */
+int mali_pmu_powerdown(void);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ */
+int mali_pmu_powerup(void);
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ */
+void mali_dev_pause(void);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+/** @brief Set the desired number of PP cores to use.
+ *
+ * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+ *
+ * @param num_cores The number of desired cores
+ * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+ */
+int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_counters.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_counters.h
new file mode 100755
index 00000000000000..c8ebba6d631738
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_counters.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_COUNTERS_H_
+#define _MALI_UTGARD_COUNTERS_H_
+
+typedef struct
+{
+ void *unused;
+} mali_cinstr_counter_info;
+
+typedef enum
+{
+ MALI_CINSTR_COUNTER_SOURCE_EGL = 0,
+ MALI_CINSTR_COUNTER_SOURCE_OPENGLES = 1000,
+ MALI_CINSTR_COUNTER_SOURCE_OPENVG = 2000,
+ MALI_CINSTR_COUNTER_SOURCE_GP = 3000,
+ MALI_CINSTR_COUNTER_SOURCE_PP = 4000,
+} cinstr_counter_source;
+
+#define MALI_CINSTR_EGL_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_EGL
+#define MALI_CINSTR_EGL_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_EGL + 999)
+
+#define MALI_CINSTR_GLES_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENGLES
+#define MALI_CINSTR_GLES_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 999)
+
+#define MALI_CINSTR_VG_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_OPENVG
+#define MALI_CINSTR_VG_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_OPENVG + 999)
+
+#define MALI_CINSTR_GP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_GP
+#define MALI_CINSTR_GP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_GP + 999)
+
+#define MALI_CINSTR_PP_FIRST_COUNTER MALI_CINSTR_COUNTER_SOURCE_PP
+#define MALI_CINSTR_PP_LAST_COUNTER (MALI_CINSTR_COUNTER_SOURCE_PP + 999)
+
+
+typedef enum
+{
+ /* EGL counters */
+
+ MALI_CINSTR_EGL_BLIT_TIME = MALI_CINSTR_COUNTER_SOURCE_EGL + 0,
+
+ /* Last counter in the EGL set */
+ MALI_CINSTR_EGL_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_EGL + 1,
+
+ /* GLES counters */
+
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 0,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_INDICES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 1,
+ MALI_CINSTR_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 2,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_CALLS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 3,
+ MALI_CINSTR_GLES_DRAW_ARRAYS_NUM_TRANSFORMED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 4,
+ MALI_CINSTR_GLES_DRAW_POINTS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 5,
+ MALI_CINSTR_GLES_DRAW_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 6,
+ MALI_CINSTR_GLES_DRAW_LINE_LOOP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 7,
+ MALI_CINSTR_GLES_DRAW_LINE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 8,
+ MALI_CINSTR_GLES_DRAW_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 9,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_STRIP = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 10,
+ MALI_CINSTR_GLES_DRAW_TRIANGLE_FAN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 11,
+ MALI_CINSTR_GLES_NON_VBO_DATA_COPY_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 12,
+ MALI_CINSTR_GLES_UNIFORM_BYTES_COPIED_TO_MALI = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 13,
+ MALI_CINSTR_GLES_UPLOAD_TEXTURE_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 14,
+ MALI_CINSTR_GLES_UPLOAD_VBO_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 15,
+ MALI_CINSTR_GLES_NUM_FLUSHES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 16,
+ MALI_CINSTR_GLES_NUM_VSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 17,
+ MALI_CINSTR_GLES_NUM_FSHADERS_GENERATED = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 18,
+ MALI_CINSTR_GLES_VSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 19,
+ MALI_CINSTR_GLES_FSHADER_GEN_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 20,
+ MALI_CINSTR_GLES_INPUT_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 21,
+ MALI_CINSTR_GLES_VXCACHE_HIT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 22,
+ MALI_CINSTR_GLES_VXCACHE_MISS = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 23,
+ MALI_CINSTR_GLES_VXCACHE_COLLISION = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 24,
+ MALI_CINSTR_GLES_CULLED_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 25,
+ MALI_CINSTR_GLES_CULLED_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 26,
+ MALI_CINSTR_GLES_BACKFACE_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 27,
+ MALI_CINSTR_GLES_GBCLIP_TRIANGLES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 28,
+ MALI_CINSTR_GLES_GBCLIP_LINES = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 29,
+ MALI_CINSTR_GLES_TRIANGLES_DRAWN = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 30,
+ MALI_CINSTR_GLES_DRAWCALL_TIME = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 31,
+ MALI_CINSTR_GLES_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 32,
+ MALI_CINSTR_GLES_INDEPENDENT_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 33,
+ MALI_CINSTR_GLES_STRIP_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 34,
+ MALI_CINSTR_GLES_FAN_TRIANGLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 35,
+ MALI_CINSTR_GLES_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 36,
+ MALI_CINSTR_GLES_INDEPENDENT_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 37,
+ MALI_CINSTR_GLES_STRIP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 38,
+ MALI_CINSTR_GLES_LOOP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 39,
+ MALI_CINSTR_GLES_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 40,
+
+ /* Last counter in the GLES set */
+ MALI_CINSTR_GLES_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENGLES + 41,
+
+ /* OpenVG counters */
+
+ MALI_CINSTR_VG_MASK_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 0,
+ MALI_CINSTR_VG_CLEAR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 1,
+ MALI_CINSTR_VG_APPEND_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 2,
+ MALI_CINSTR_VG_APPEND_PATH_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 3,
+ MALI_CINSTR_VG_MODIFY_PATH_COORDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 4,
+ MALI_CINSTR_VG_TRANSFORM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 5,
+ MALI_CINSTR_VG_INTERPOLATE_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 6,
+ MALI_CINSTR_VG_PATH_LENGTH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 7,
+ MALI_CINSTR_VG_POINT_ALONG_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 8,
+ MALI_CINSTR_VG_PATH_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 9,
+ MALI_CINSTR_VG_PATH_TRANSFORMED_BOUNDS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 10,
+ MALI_CINSTR_VG_DRAW_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 11,
+ MALI_CINSTR_VG_CLEAR_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 12,
+ MALI_CINSTR_VG_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 13,
+ MALI_CINSTR_VG_GET_IMAGE_SUB_DATA_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 14,
+ MALI_CINSTR_VG_COPY_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 15,
+ MALI_CINSTR_VG_DRAW_IMAGE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 16,
+ MALI_CINSTR_VG_SET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 17,
+ MALI_CINSTR_VG_WRITE_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 18,
+ MALI_CINSTR_VG_GET_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 19,
+ MALI_CINSTR_VG_READ_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 20,
+ MALI_CINSTR_VG_COPY_PIXELS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 21,
+ MALI_CINSTR_VG_COLOR_MATRIX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 22,
+ MALI_CINSTR_VG_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 23,
+ MALI_CINSTR_VG_SEPARABLE_CONVOLVE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 24,
+ MALI_CINSTR_VG_GAUSSIAN_BLUR_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 25,
+ MALI_CINSTR_VG_LOOKUP_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 26,
+ MALI_CINSTR_VG_LOOKUP_SINGLE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 27,
+ MALI_CINSTR_VG_CONTEXT_CREATE_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 28,
+ MALI_CINSTR_VG_STROKED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 29,
+ MALI_CINSTR_VG_STROKED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 30,
+ MALI_CINSTR_VG_STROKED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 31,
+ MALI_CINSTR_VG_STROKED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 32,
+ MALI_CINSTR_VG_FILLED_CUBICS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 33,
+ MALI_CINSTR_VG_FILLED_QUADS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 34,
+ MALI_CINSTR_VG_FILLED_ARCS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 35,
+ MALI_CINSTR_VG_FILLED_LINES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 36,
+ MALI_CINSTR_VG_DRAW_PATH_CALLS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 37,
+ MALI_CINSTR_VG_TRIANGLES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 38,
+ MALI_CINSTR_VG_VERTICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 39,
+ MALI_CINSTR_VG_INDICES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 40,
+ MALI_CINSTR_VG_FILLED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 41,
+ MALI_CINSTR_VG_STROKED_PATHS_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 42,
+ MALI_CINSTR_VG_FILL_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 43,
+ MALI_CINSTR_VG_DRAW_FILLED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 44,
+ MALI_CINSTR_VG_STROKE_EXTRACT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 45,
+ MALI_CINSTR_VG_DRAW_STROKED_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 46,
+ MALI_CINSTR_VG_DRAW_PAINT_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 47,
+ MALI_CINSTR_VG_DATA_STRUCTURES_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 48,
+ MALI_CINSTR_VG_MEM_PATH_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 49,
+ MALI_CINSTR_VG_RSW_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 50,
+
+ /* Last counter in the VG set */
+ MALI_CINSTR_VG_MAX_COUNTER = MALI_CINSTR_COUNTER_SOURCE_OPENVG + 51,
+
+ /* Mali GP counters */
+
+ MALI_CINSTR_GP_DEPRECATED_0 = MALI_CINSTR_COUNTER_SOURCE_GP + 0,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_GP = MALI_CINSTR_COUNTER_SOURCE_GP + 1,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 2,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_STORER = MALI_CINSTR_COUNTER_SOURCE_GP + 3,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_LOADER = MALI_CINSTR_COUNTER_SOURCE_GP + 4,
+ MALI_CINSTR_GP_CYCLES_VERTEX_LOADER_WAITING_FOR_VERTEX_SHADER = MALI_CINSTR_COUNTER_SOURCE_GP + 5,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_READ = MALI_CINSTR_COUNTER_SOURCE_GP + 6,
+ MALI_CINSTR_GP_NUMBER_OF_WORDS_WRITTEN = MALI_CINSTR_COUNTER_SOURCE_GP + 7,
+ MALI_CINSTR_GP_NUMBER_OF_READ_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 8,
+ MALI_CINSTR_GP_NUMBER_OF_WRITE_BURSTS = MALI_CINSTR_COUNTER_SOURCE_GP + 9,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_PROCESSED = MALI_CINSTR_COUNTER_SOURCE_GP + 10,
+ MALI_CINSTR_GP_NUMBER_OF_VERTICES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 11,
+ MALI_CINSTR_GP_NUMBER_OF_PRIMITIVES_FETCHED = MALI_CINSTR_COUNTER_SOURCE_GP + 12,
+ MALI_CINSTR_GP_RESERVED_13 = MALI_CINSTR_COUNTER_SOURCE_GP + 13,
+ MALI_CINSTR_GP_NUMBER_OF_BACKFACE_CULLINGS_DONE = MALI_CINSTR_COUNTER_SOURCE_GP + 14,
+ MALI_CINSTR_GP_NUMBER_OF_COMMANDS_WRITTEN_TO_TILES = MALI_CINSTR_COUNTER_SOURCE_GP + 15,
+ MALI_CINSTR_GP_NUMBER_OF_MEMORY_BLOCKS_ALLOCATED = MALI_CINSTR_COUNTER_SOURCE_GP + 16,
+ MALI_CINSTR_GP_RESERVED_17 = MALI_CINSTR_COUNTER_SOURCE_GP + 17,
+ MALI_CINSTR_GP_RESERVED_18 = MALI_CINSTR_COUNTER_SOURCE_GP + 18,
+ MALI_CINSTR_GP_NUMBER_OF_VERTEX_LOADER_CACHE_MISSES = MALI_CINSTR_COUNTER_SOURCE_GP + 19,
+ MALI_CINSTR_GP_RESERVED_20 = MALI_CINSTR_COUNTER_SOURCE_GP + 20,
+ MALI_CINSTR_GP_RESERVED_21 = MALI_CINSTR_COUNTER_SOURCE_GP + 21,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_VERTEX_SHADER_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 22,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_COMMAND_PROCESSOR = MALI_CINSTR_COUNTER_SOURCE_GP + 23,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_LIST_WRITER = MALI_CINSTR_COUNTER_SOURCE_GP + 24,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_THROUGH_THE_PREPARE_LIST_COMMANDS = MALI_CINSTR_COUNTER_SOURCE_GP + 25,
+ MALI_CINSTR_GP_RESERVED_26 = MALI_CINSTR_COUNTER_SOURCE_GP + 26,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PRIMITIVE_ASSEMBLY = MALI_CINSTR_COUNTER_SOURCE_GP + 27,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_VERTEX_FETCHER = MALI_CINSTR_COUNTER_SOURCE_GP + 28,
+ MALI_CINSTR_GP_RESERVED_29 = MALI_CINSTR_COUNTER_SOURCE_GP + 29,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_BOUNDINGBOX_AND_COMMAND_GENERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 30,
+ MALI_CINSTR_GP_RESERVED_31 = MALI_CINSTR_COUNTER_SOURCE_GP + 31,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_SCISSOR_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 32,
+ MALI_CINSTR_GP_ACTIVE_CYCLES_PLBU_TILE_ITERATOR = MALI_CINSTR_COUNTER_SOURCE_GP + 33,
+ MALI_CINSTR_GP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_GP + 900,
+
+ /* Mali PP counters */
+
+ MALI_CINSTR_PP_ACTIVE_CLOCK_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 0,
+ MALI_CINSTR_PP_TOTAL_CLOCK_CYCLES_COUNT_REMOVED = MALI_CINSTR_COUNTER_SOURCE_PP + 1,
+ MALI_CINSTR_PP_TOTAL_BUS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 2,
+ MALI_CINSTR_PP_TOTAL_BUS_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 3,
+ MALI_CINSTR_PP_BUS_READ_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 4,
+ MALI_CINSTR_PP_BUS_WRITE_REQUEST_CYCLES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 5,
+ MALI_CINSTR_PP_BUS_READ_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 6,
+ MALI_CINSTR_PP_BUS_WRITE_TRANSACTIONS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 7,
+ MALI_CINSTR_PP_RESERVED_08 = MALI_CINSTR_COUNTER_SOURCE_PP + 8,
+ MALI_CINSTR_PP_TILE_WRITEBACK_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 9,
+ MALI_CINSTR_PP_STORE_UNIT_WRITES = MALI_CINSTR_COUNTER_SOURCE_PP + 10,
+ MALI_CINSTR_PP_RESERVED_11 = MALI_CINSTR_COUNTER_SOURCE_PP + 11,
+ MALI_CINSTR_PP_PALETTE_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 12,
+ MALI_CINSTR_PP_TEXTURE_CACHE_UNCOMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 13,
+ MALI_CINSTR_PP_POLYGON_LIST_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 14,
+ MALI_CINSTR_PP_RSW_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 15,
+ MALI_CINSTR_PP_VERTEX_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 16,
+ MALI_CINSTR_PP_UNIFORM_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 17,
+ MALI_CINSTR_PP_PROGRAM_CACHE_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 18,
+ MALI_CINSTR_PP_VARYING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 19,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 20,
+ MALI_CINSTR_PP_TEXTURE_DESCRIPTORS_REMAPPING_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 21,
+ MALI_CINSTR_PP_TEXTURE_CACHE_COMPRESSED_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 22,
+ MALI_CINSTR_PP_LOAD_UNIT_READS = MALI_CINSTR_COUNTER_SOURCE_PP + 23,
+ MALI_CINSTR_PP_POLYGON_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 24,
+ MALI_CINSTR_PP_PIXEL_RECTANGLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 25,
+ MALI_CINSTR_PP_LINES_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 26,
+ MALI_CINSTR_PP_POINTS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 27,
+ MALI_CINSTR_PP_STALL_CYCLES_POLYGON_LIST_READER = MALI_CINSTR_COUNTER_SOURCE_PP + 28,
+ MALI_CINSTR_PP_STALL_CYCLES_TRIANGLE_SETUP = MALI_CINSTR_COUNTER_SOURCE_PP + 29,
+ MALI_CINSTR_PP_QUAD_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 30,
+ MALI_CINSTR_PP_FRAGMENT_RASTERIZED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 31,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 32,
+ MALI_CINSTR_PP_FRAGMENT_REJECTED_FWD_FRAGMENT_KILL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 33,
+ MALI_CINSTR_PP_FRAGMENT_PASSED_ZSTENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 34,
+ MALI_CINSTR_PP_PATCHES_REJECTED_EARLY_Z_STENCIL_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 35,
+ MALI_CINSTR_PP_PATCHES_EVALUATED = MALI_CINSTR_COUNTER_SOURCE_PP + 36,
+ MALI_CINSTR_PP_INSTRUCTION_COMPLETED_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 37,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_RENDEZVOUS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 38,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_VARYING_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 39,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TEXTURE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 40,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_LOAD_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 41,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_TILE_READ_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 42,
+ MALI_CINSTR_PP_INSTRUCTION_FAILED_STORE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 43,
+ MALI_CINSTR_PP_RENDEZVOUS_BREAKAGE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 44,
+ MALI_CINSTR_PP_PIPELINE_BUBBLES_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 45,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_MULTIPASS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 46,
+ MALI_CINSTR_PP_TEXTURE_MAPPER_CYCLE_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 47,
+ MALI_CINSTR_PP_VERTEX_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 48,
+ MALI_CINSTR_PP_VERTEX_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 49,
+ MALI_CINSTR_PP_VARYING_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 50,
+ MALI_CINSTR_PP_VARYING_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 51,
+ MALI_CINSTR_PP_VARYING_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 52,
+ MALI_CINSTR_PP_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 53,
+ MALI_CINSTR_PP_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 54,
+ MALI_CINSTR_PP_TEXTURE_CACHE_CONFLICT_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 55,
+ MALI_CINSTR_PP_PALETTE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 200 only */
+ MALI_CINSTR_PP_PALETTE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 200 only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 56, /* Mali 400 class only */
+ MALI_CINSTR_PP_COMPRESSED_TEXTURE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 57, /* Mali 400 class only */
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 58,
+ MALI_CINSTR_PP_LOAD_STORE_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 59,
+ MALI_CINSTR_PP_PROGRAM_CACHE_HIT_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 60,
+ MALI_CINSTR_PP_PROGRAM_CACHE_MISS_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 61,
+ MALI_CINSTR_PP_JOB_COUNT = MALI_CINSTR_COUNTER_SOURCE_PP + 900,
+} cinstr_counters_m200_t;
+
+#endif /*_MALI_UTGARD_COUNTERS_H_*/
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_ioctl.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_ioctl.h
new file mode 100755
index 00000000000000..f9d5aa6968e5c7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_ioctl.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_STREAM_CREATE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_STREAM_CREATE, _mali_uk_stream_create_s *)
+#define MALI_IOC_FENCE_CREATE_EMPTY _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_FENCE_CREATE_EMPTY, _mali_uk_fence_create_empty_s *)
+#define MALI_IOC_FENCE_VALIDATE _IOR(MALI_IOC_CORE_BASE, _MALI_UK_FENCE_VALIDATE, _mali_uk_fence_validate_s *)
+#define MALI_IOC_COMPOSITOR_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_COMPOSITOR_PRIORITY, _mali_uk_compositor_priority_s *)
+
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_events.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100755
index 00000000000000..69e52d48040229
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_events.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24,
+ MALI_PROFILING_EVENT_TYPE_START = 1 << 24,
+ MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24,
+ MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+ MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE = 5,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE = 6,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK = 7,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK = 8,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS = 13,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55,
+ MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED = 56,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL = 0,
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum
+{
+ /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,*/
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI = 1,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD = 3,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF = 4,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26, /* used in some build configurations */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE= 33, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK = 36, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK = 37, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP = 38, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING =40, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING =41, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum
+{
+ MALI_PROFILING_EVENT_DATA_CORE_GP0 = 1,
+ MALI_PROFILING_EVENT_DATA_CORE_PP0 = 5,
+ MALI_PROFILING_EVENT_DATA_CORE_PP1 = 6,
+ MALI_PROFILING_EVENT_DATA_CORE_PP2 = 7,
+ MALI_PROFILING_EVENT_DATA_CORE_PP3 = 8,
+ MALI_PROFILING_EVENT_DATA_CORE_PP4 = 9,
+ MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
+ MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
+ MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100755
index 00000000000000..3aefffea5e38d9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_profiling_gator_api.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+/** The list of events supported by the Mali DDK. */
+typedef enum
+{
+ /* Vertex processor activity */
+ ACTIVITY_VP_0 = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP_0,
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+ ACTIVITY_FP_4,
+ ACTIVITY_FP_5,
+ ACTIVITY_FP_6,
+ ACTIVITY_FP_7,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+ COUNTER_L2_1_C0,
+ COUNTER_L2_1_C1,
+ COUNTER_L2_2_C0,
+ COUNTER_L2_2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0,
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+ COUNTER_FP_4_C0,
+ COUNTER_FP_4_C1,
+ COUNTER_FP_5_C0,
+ COUNTER_FP_5_C1,
+ COUNTER_FP_6_C0,
+ COUNTER_FP_6_C1,
+ COUNTER_FP_7_C0,
+ COUNTER_FP_7_C1,
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters
+{
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values
+{
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version
+{
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_uk_types.h b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_uk_types.h
new file mode 100755
index 00000000000000..9c4987ab2bf342
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/include/linux/mali/mali_utgard_uk_types.h
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+ _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */
+ _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */
+ _MALI_UK_STREAM_CREATE, /**< _mali_ukk_stream_create() */
+ _MALI_UK_FENCE_CREATE_EMPTY, /**< _mali_ukk_fence_create_empty() */
+ _MALI_UK_FENCE_VALIDATE, /**< _mali_ukk_fence_validate() */
+ _MALI_UK_COMPOSITOR_PRIORITY, /**< _mali_ukk_compositor_priority() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_DMA_BUF, /**< _mali_ukk_attach_dma_buf() */
+ _MALI_UK_RELEASE_DMA_BUF, /**< _mali_ukk_release_dma_buf() */
+ _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+ _MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+ _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+ _MALI_UK_PROFILING_GET_CONFIG, /**< __mali_uku_profiling_get_config() */
+ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_GP_L2_ALLOC = (1<<6), /** GP allocate mali L2 cache lines*/
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+typedef enum mali_memory_cache_settings
+{
+ MALI_CACHE_STANDARD = 0,
+ MALI_CACHE_GP_READ_ALLOCATE = 1,
+} mali_memory_cache_settings ;
+
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /* mali_memory_cache_settings cache_settings; */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_BARRIER (1<<1)
+#define _MALI_PP_JOB_FLAG_FENCE (1<<2)
+#define _MALI_PP_JOB_FLAG_EMPTY_FENCE (1<<3)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+ u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-4) */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ u32 flags; /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+ s32 fence; /**< [in,out] Fence to wait on / fence that will be signalled on job completion, if _MALI_PP_JOB_FLAG_FENCE is set */
+ s32 stream; /**< [in] Steam identifier if _MALI_PP_JOB_FLAG_FENCE, an empty fence to use for this job if _MALI_PP_JOB_FLAG_EMPTY_FENCE is set */
+ u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */
+ u32 *memory_cookies; /**< [in] memory cookies attached to job */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+} _mali_uk_pp_job_finished_s;
+
+typedef struct
+{
+ u32 number_of_enabled_cores; /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum
+{
+ _MALI_UK_PP_JOB_WB0 = 1,
+ _MALI_UK_PP_JOB_WB1 = 2,
+ _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
+ u32 flush_id; /**< [in] Flush ID of job to disable WB units for */
+ _mali_uk_pp_job_wbx_flag wbx; /**< [in] write-back units to disable */
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+ _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum
+{
+ _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+ _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+ _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+ _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+{ \
+ "sw_events_enable", \
+ "colorbuffer_capture_enable", \
+ "depthbuffer_capture_enable", \
+ "stencilbuffer_capture_enable", \
+ "per_tile_counters_enable", \
+ "buffer_capture_compositor", \
+ "buffer_capture_window", \
+ "buffer_capture_other", \
+ "buffer_capture_n_frames", \
+ "buffer_capture_resize_factor", \
+ "sw_counters_enable", \
+};
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct
+{
+ _mali_uk_user_setting_t setting;
+ u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 26
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings
+ * @{ */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_user_setting_t setting; /**< [in] setting to get */
+ u32 value; /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @} */ /* end group _mali_uk_get_user_settings_s */
+
+/** @brief Arguments for _mali_ukk_compositor_priority */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_compositor_priority_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_dma_buf_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_dma_buf_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ const void *src; /**< [in] Pointer to source data */
+ void *dest; /**< [in] Destination Mali buffer */
+ u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */
+ u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+ mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32* counters; /**< [in] The array of counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_stream U/K Mali stream module
+ * @{ */
+
+/** @brief Create stream
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ int fd; /**< [out] file descriptor describing stream */
+} _mali_uk_stream_create_s;
+
+/** @brief Destroy stream
+*/
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ int fd; /**< [in] file descriptor describing stream */
+} _mali_uk_stream_destroy_s;
+
+/** @brief Create empty fence
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ s32 stream; /**< [in] stream to create fence on */
+ s32 fence; /**< [out] file descriptor describing fence */
+} _mali_uk_fence_create_empty_s;
+
+/** @brief Check fence validity
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ int fd; /**< [in] file descriptor describing fence */
+} _mali_uk_fence_validate_s;
+
+/** @} */ /* end group _mali_uk_stream */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/license/gpl/mali_kernel_license.h b/drivers/parrot/gpu/mali400_legacy/linux/license/gpl/mali_kernel_license.h
new file mode 100755
index 00000000000000..3791cfe2d60e01
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_device_pause_resume.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_device_pause_resume.c
new file mode 100755
index 00000000000000..b81d072da947d0
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_device_pause_resume.c
@@ -0,0 +1,35 @@
+/**
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+
+void mali_dev_pause(void)
+{
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.c
new file mode 100755
index 00000000000000..dfbbf8ec2431f8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_kernel_memory_engine.h"
+#include "mali_memory.h"
+#include "mali_dma_buf.h"
+
+
+struct mali_dma_buf_attachment {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct mali_session_data *session;
+ int map_ref;
+ struct mutex map_lock;
+ mali_bool is_mapped;
+ wait_queue_head_t wait_queue;
+};
+
+void mali_dma_buf_release(void *ctx, void *handle)
+{
+ struct mali_dma_buf_attachment *mem;
+
+ mem = (struct mali_dma_buf_attachment *)handle;
+
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* We mapped implicitly on attach, so we need to unmap on release */
+ mali_dma_buf_unmap(mem);
+#endif
+
+ /* Wait for buffer to become unmapped */
+ wait_event(mem->wait_queue, !mem->is_mapped);
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ dma_buf_detach(mem->buf, mem->attachment);
+ dma_buf_put(mem->buf);
+
+ _mali_osk_free(mem);
+}
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 *offset, u32 flags)
+{
+ struct mali_page_directory *pagedir;
+ struct scatterlist *sg;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(mem->session == session);
+
+ mutex_lock(&mem->map_lock);
+
+ mem->map_ref++;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (1 == mem->map_ref)
+ {
+ /* First reference taken, so we need to map the dma buf */
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ pagedir = mali_session_get_page_directory(session);
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+ mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(mem->sgt))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+ return -EFAULT;
+ }
+
+ for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i)
+ {
+ u32 size = sg_dma_len(sg);
+ dma_addr_t phys = sg_dma_address(sg);
+
+ /* sg must be page aligned. */
+ MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_CACHE_STANDARD);
+
+ virt += size;
+ *offset += size;
+ }
+
+ if (flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ u32 guard_phys;
+ MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+ guard_phys = sg_dma_address(mem->sgt->sgl);
+ mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_CACHE_STANDARD);
+ }
+
+ mem->is_mapped = MALI_TRUE;
+ mutex_unlock(&mem->map_lock);
+
+ /* Wake up any thread waiting for buffer to become mapped */
+ wake_up_all(&mem->wait_queue);
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT(mem->is_mapped);
+ mutex_unlock(&mem->map_lock);
+ }
+
+ return 0;
+}
+
+void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+
+ mutex_lock(&mem->map_lock);
+
+ mem->map_ref--;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (0 == mem->map_ref)
+ {
+ dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+
+ mem->is_mapped = MALI_FALSE;
+ }
+
+ mutex_unlock(&mem->map_lock);
+
+ /* Wake up any thread waiting for buffer to become unmapped */
+ wake_up_all(&mem->wait_queue);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+ mali_memory_allocation *descriptor;
+ struct mali_dma_buf_attachment *mem;
+ _mali_osk_errcode_t err;
+ int i;
+ u32 offset = 0;
+ int ret = 0;
+
+ _mali_osk_lock_wait( job->session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ for (i = 0; i < job->num_memory_cookies; i++)
+ {
+ int cookie = job->memory_cookies[i];
+
+ if (0 == cookie)
+ {
+ /* 0 is not a valid cookie */
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ MALI_DEBUG_ASSERT(0 < cookie);
+
+ err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
+ cookie, (void**)&descriptor);
+
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
+ ret = -EFAULT;
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ if (mali_dma_buf_release != descriptor->physical_allocation.release)
+ {
+ /* Not a DMA-buf */
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ mem = (struct mali_dma_buf_attachment *)descriptor->physical_allocation.handle;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == job->session);
+
+ err = mali_dma_buf_map(mem, mem->session, descriptor->mali_address, &offset, descriptor->flags);
+ if (0 != err)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
+ cookie, descriptor->mali_address));
+ ret = -EFAULT;
+ MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ continue;
+ }
+
+ /* Add mem to list of DMA-bufs mapped for this job */
+ job->dma_bufs[i] = mem;
+ }
+
+ _mali_osk_lock_signal( job->session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+ int i;
+ for (i = 0; i < job->num_dma_bufs; i++)
+ {
+ if (NULL == job->dma_bufs[i]) continue;
+
+ mali_dma_buf_unmap(job->dma_bufs[i]);
+ job->dma_bufs[i] = NULL;
+ }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+/* Callback from memory engine which will map into Mali virtual address space */
+static mali_physical_memory_allocation_result mali_dma_buf_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ struct mali_session_data *session;
+ struct mali_dma_buf_attachment *mem;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ /* Mapping dma-buf with an offset is not supported. */
+ MALI_DEBUG_ASSERT(0 == *offset);
+
+ session = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mem = (struct mali_dma_buf_attachment *)ctx;
+
+ MALI_DEBUG_ASSERT(mem->session == session);
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ if (0 == mali_dma_buf_map(mem, session, descriptor->mali_address, offset, descriptor->flags))
+ {
+ MALI_DEBUG_ASSERT(*offset == descriptor->size);
+#else
+ {
+#endif
+ alloc_info->ctx = NULL;
+ alloc_info->handle = mem;
+ alloc_info->next = NULL;
+ alloc_info->release = mali_dma_buf_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+ }
+
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+}
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ struct dma_buf *buf;
+ struct mali_dma_buf_attachment *mem;
+ _mali_uk_attach_dma_buf_s args;
+ mali_memory_allocation *descriptor;
+ int md;
+ int fd;
+
+ /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s)))
+ {
+ return -EFAULT;
+ }
+
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ /* Currently, mapping of the full buffer are supported. */
+ if (args.size != buf->size)
+ {
+ MALI_DEBUG_PRINT_ERROR(("dma-buf size doesn't match mapping size.\n"));
+ dma_buf_put(buf);
+ return -EINVAL;
+ }
+
+ mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+ if (NULL == mem)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
+ dma_buf_put(buf);
+ return -ENOMEM;
+ }
+
+ mem->buf = buf;
+ mem->session = session;
+ mem->map_ref = 0;
+ mutex_init(&mem->map_lock);
+ init_waitqueue_head(&mem->wait_queue);
+
+ mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
+ if (NULL == mem->attachment)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to attach to dma-buf %d\n", fd));
+ dma_buf_put(mem->buf);
+ _mali_osk_free(mem);
+ return -EFAULT;
+ }
+
+ /* Map dma-buf into this session's page tables */
+
+ /* Set up Mali memory descriptor */
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
+ mali_dma_buf_release(NULL, mem);
+ return -ENOMEM;
+ }
+
+ descriptor->size = args.size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args.mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = session->memory_lock;
+
+ if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ /* Get descriptor mapping for memory. */
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
+ _mali_osk_free(descriptor);
+ mali_dma_buf_release(NULL, mem);
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_ASSERT(0 < md);
+
+ external_memory_allocator.allocate = mali_dma_buf_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = mem;
+ external_memory_allocator.name = "DMA-BUF Memory";
+ external_memory_allocator.next = NULL;
+
+ /* Map memory into session's Mali virtual address space. */
+ _mali_osk_lock_wait(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(mali_mem_get_memory_engine(), descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
+ mali_descriptor_mapping_free(session->descriptor_mapping, md);
+ mali_dma_buf_release(NULL, mem);
+ return -ENOMEM;
+ }
+ _mali_osk_lock_signal(session->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Return stuff to user space */
+ if (0 != put_user(md, &user_arg->cookie))
+ {
+ /* Roll back */
+ MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
+ mali_descriptor_mapping_free(session->descriptor_mapping, md);
+ mali_dma_buf_release(NULL, mem);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
+{
+ int ret = 0;
+ _mali_uk_release_dma_buf_s args;
+ mali_memory_allocation *descriptor;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s)) )
+ {
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
+
+ _mali_osk_lock_wait( session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+
+ if (NULL != descriptor)
+ {
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_address));
+
+ /* Will call back to mali_dma_buf_release() which will release the dma-buf attachment. */
+ mali_allocation_engine_release_memory(mali_mem_get_memory_engine(), descriptor);
+
+ _mali_osk_free(descriptor);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+ ret = -EINVAL;
+ }
+
+ _mali_osk_lock_signal( session->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return ret;
+}
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+ _mali_uk_dma_buf_get_size_s args;
+ int fd;
+ struct dma_buf *buf;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s)) )
+ {
+ return -EFAULT;
+ }
+
+ /* Do DMA-BUF stuff */
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf))
+ {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ if (0 != put_user(buf->size, &user_arg->size))
+ {
+ dma_buf_put(buf);
+ return -EFAULT;
+ }
+
+ dma_buf_put(buf);
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.h
new file mode 100755
index 00000000000000..6f44d31bbe92be
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_dma_buf.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DMA_BUF_H__
+#define __MALI_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_osk.h"
+#include "common/mali_pp_job.h"
+
+struct mali_dma_buf_attachment;
+
+int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *arg);
+int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *arg);
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 *offset, u32 flags);
+void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem);
+void mali_dma_buf_release(void *ctx, void *handle);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.c
new file mode 100755
index 00000000000000..f89575e225597f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.c
@@ -0,0 +1,725 @@
+/**
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_dma_buf.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS */
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+struct pm_ext_ops mali_dev_ext_pm_ops =
+{
+ .base =
+ {
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops =
+{
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_driver_runtime_suspend,
+ .runtime_resume = mali_driver_runtime_resume,
+ .runtime_idle = mali_driver_runtime_idle,
+#endif
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+};
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver =
+{
+ .probe = mali_probe,
+ .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+ .pm = &mali_dev_ext_pm_ops,
+#endif
+ .driver =
+ {
+ .name = MALI_GPU_NAME_UTGARD,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+
+
+
+
+int mali_module_init(void)
+{
+ int err = 0;
+
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+ /* Initialize module wide settings */
+ mali_osk_low_level_mem_init();
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+ err = mali_platform_device_register();
+ if (0 != err)
+ {
+ return err;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+ err = platform_driver_register(&mali_platform_driver);
+
+ if (0 != err)
+ {
+ MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ mali_platform_device_unregister();
+#endif
+ mali_platform_device = NULL;
+ return err;
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (0 != err)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ MALI_PRINT(("Mali device driver loaded\n"));
+
+ return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
+#endif
+
+ platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+ mali_platform_device_unregister();
+#endif
+
+ mali_osk_low_level_mem_term();
+
+ MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int mali_probe(struct platform_device *pdev)
+{
+ int err;
+
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+ if (NULL != mali_platform_device)
+ {
+ /* Already connected to a device, return error */
+ MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+ return -EEXIST;
+ }
+
+ mali_platform_device = pdev;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_wq_init())
+ {
+ /* Initialize the Mali GPU HW specified by pdev */
+ if (_MALI_OSK_ERR_OK == mali_initialize_subsystems())
+ {
+ /* Register a misc device (so we are accessible from user space) */
+ err = mali_miscdevice_register(pdev);
+ if (0 == err)
+ {
+ /* Setup sysfs entries */
+ err = mali_sysfs_register(mali_dev_name);
+ if (0 == err)
+ {
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+ return 0;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+ }
+ mali_miscdevice_unregister();
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+ }
+ mali_terminate_subsystems();
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+ }
+ _mali_osk_wq_term();
+ }
+
+ mali_platform_device = NULL;
+ return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+ MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+ mali_sysfs_unregister();
+ mali_miscdevice_unregister();
+ mali_terminate_subsystems();
+ _mali_osk_wq_term();
+ mali_platform_device = NULL;
+ return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+ int err;
+
+ mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+ mali_miscdevice.name = mali_dev_name;
+ mali_miscdevice.fops = &mali_fops;
+ mali_miscdevice.parent = get_device(&pdev->dev);
+
+ err = misc_register(&mali_miscdevice);
+ if (0 != err)
+ {
+ MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+ }
+
+ return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+ misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+ mali_pm_os_suspend();
+ return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+ mali_pm_os_resume();
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+ mali_pm_runtime_suspend();
+ return 0;
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+ mali_pm_runtime_resume();
+ return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+ /* Nothing to do */
+ return 0;
+}
+#endif
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ if ( VM_SHARED== (VM_SHARED & vma->vm_flags))
+ {
+ args.cache_settings = MALI_CACHE_STANDARD ;
+ MALI_DEBUG_PRINT(3,("Allocate - Standard - Size: %d kb\n", args.size/1024));
+ }
+ else
+ {
+ args.cache_settings = MALI_CACHE_GP_READ_ALLOCATE;
+ MALI_DEBUG_PRINT(3,("Allocate - GP Cached - Size: %d kb\n", args.size/1024));
+ }
+ /* Setting it equal to VM_SHARED and not Private, which would have made the later io_remap fail for MALI_CACHE_GP_READ_ALLOCATE */
+ vma->vm_flags = 0x000000fb;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode))
+ {
+ MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode))
+ {
+ MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_USER_SETTINGS:
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_COMPOSITOR_PRIORITY:
+ err = compositor_priority_wrapper(session_data);
+ break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_CONFIG:
+ /* Deprecated: still compatible with get_user_settings */
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_PROFILING_START: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_STOP: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_CLEAR: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_GET_CONFIG: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+ err = -ENOTTY;
+ break;
+
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_WRITE_SAFE:
+ err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+#if defined(CONFIG_MALI400_UMP)
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case MALI_IOC_MEM_ATTACH_DMA_BUF:
+ err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_DMA_BUF:
+ err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+ err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+ break;
+#else
+
+ case MALI_IOC_MEM_ATTACH_DMA_BUF: /* FALL-THROUGH */
+ case MALI_IOC_MEM_RELEASE_DMA_BUF: /* FALL-THROUGH */
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_DISABLE_WB:
+ err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_STREAM_CREATE:
+#if defined(CONFIG_SYNC)
+ err = stream_create_wrapper(session_data, (_mali_uk_stream_create_s __user *)arg);
+ break;
+#endif
+ case MALI_IOC_FENCE_CREATE_EMPTY:
+#if defined(CONFIG_SYNC)
+ err = sync_fence_create_empty_wrapper(session_data, (_mali_uk_fence_create_empty_s __user *)arg);
+ break;
+#endif
+ case MALI_IOC_FENCE_VALIDATE:
+#if defined(CONFIG_SYNC)
+ err = sync_fence_validate_wrapper(session_data, (_mali_uk_fence_validate_s __user *)arg);
+ break;
+#else
+ MALI_DEBUG_PRINT(2, ("Sync objects not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
+ err = -ENOTTY;
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.h
new file mode 100755
index 00000000000000..9650103283fa93
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_linux.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk.h"
+
+extern struct platform_device *mali_platform_device;
+
+#if MALI_LICENSE_IS_GPL
+/* Defined in mali_osk_irq.h */
+extern struct workqueue_struct * mali_wq;
+#endif
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.c
new file mode 100755
index 00000000000000..ab8248c915c097
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.c
@@ -0,0 +1,1707 @@
+/**
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_pp_scheduler.h"
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND,
+ _MALI_DEVICE_RESUME,
+ _MALI_DEVICE_DVFS_PAUSE,
+ _MALI_DEVICE_DVFS_RESUME,
+ _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char* const mali_power_events[_MALI_MAX_EVENTS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+ [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ r = sprintf(buffer, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ unsigned long val;
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ r = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != r)
+ {
+ return -EINVAL;
+ }
+
+ switch (val)
+ {
+ case 1:
+ mali_group_enable(group);
+ break;
+ case 0:
+ mali_group_disable(group);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = group_enabled_read,
+ .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_hw_core *hw_core;
+
+ hw_core = (struct mali_hw_core *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+ r = sprintf(buffer, "0x%08X\n", hw_core->phys_addr);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = hw_core_base_addr_read,
+};
+
+static ssize_t gp_gpx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+
+ if (0 == src_id)
+ {
+ val = mali_gp_job_get_gp_counter_src0();
+ }
+ else
+ {
+ val = mali_gp_job_get_gp_counter_src1();
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, gpos, buf, r);
+}
+
+static ssize_t gp_gpx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_gp_job_set_gp_counter_src0((u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_gp_job_set_gp_counter_src1((u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *gpos += cnt;
+ return cnt;
+}
+
+static ssize_t gp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 num_groups;
+ int i;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++)
+ {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_gp_job_set_gp_counter_src0((u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_gp_job_set_gp_counter_src1((u32)val))
+ {
+ return 0;
+ }
+ }
+ }
+ }
+
+ *gpos += cnt;
+ return cnt;
+}
+
+static ssize_t gp_gpx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_read(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_gpx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_gpx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_gpx_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static ssize_t gp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 0);
+}
+
+static ssize_t gp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *gpos)
+{
+ return gp_all_counter_srcx_write(filp, ubuf, cnt, gpos, 1);
+}
+
+static const struct file_operations gp_gpx_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = gp_gpx_counter_src0_read,
+ .write = gp_gpx_counter_src0_write,
+};
+
+static const struct file_operations gp_gpx_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = gp_gpx_counter_src1_read,
+ .write = gp_gpx_counter_src1_write,
+};
+
+static const struct file_operations gp_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = gp_all_counter_src0_write,
+};
+
+static const struct file_operations gp_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = gp_all_counter_src1_write,
+};
+
+static ssize_t pp_ppx_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+
+ if (0 == src_id)
+ {
+ val = mali_pp_job_get_pp_counter_src0();
+ }
+ else
+ {
+ val = mali_pp_job_get_pp_counter_src1();
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t pp_ppx_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_pp_job_set_pp_counter_src0((u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_pp_job_set_pp_counter_src1((u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t pp_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 num_groups;
+ int i;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++)
+ {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_pp_job_set_pp_counter_src0((u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_pp_job_set_pp_counter_src1((u32)val))
+ {
+ return 0;
+ }
+ }
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t pp_ppx_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_ppx_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_ppx_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_ppx_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t pp_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t pp_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return pp_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations pp_ppx_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = pp_ppx_counter_src0_read,
+ .write = pp_ppx_counter_src0_write,
+};
+
+static const struct file_operations pp_ppx_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = pp_ppx_counter_src1_read,
+ .write = pp_ppx_counter_src1_write,
+};
+
+static const struct file_operations pp_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_all_counter_src0_write,
+};
+
+static const struct file_operations pp_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_all_counter_src1_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ if (0 == src_id)
+ {
+ val = mali_l2_cache_core_get_counter_src0(l2_core);
+ }
+ else
+ {
+ val = mali_l2_cache_core_get_counter_src1(l2_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val)
+ {
+ r = sprintf(buf, "-1\n");
+ }
+ else
+ {
+ r = sprintf(buf, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_core, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val < 0)
+ {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache)
+ {
+ if (0 == src_id)
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val))
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE != mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val))
+ {
+ return 0;
+ }
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src0_read,
+ .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src1_read,
+ .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src1_write,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (0 != ret)
+ {
+ return ret;
+ }
+
+ /* Update setting (not exactly thread safe) */
+ if (1 == val && MALI_FALSE == power_always_on_enabled)
+ {
+ power_always_on_enabled = MALI_TRUE;
+ _mali_osk_pm_dev_ref_add();
+ }
+ else if (0 == val && MALI_TRUE == power_always_on_enabled)
+ {
+ power_always_on_enabled = MALI_FALSE;
+ _mali_osk_pm_dev_ref_dec();
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (MALI_TRUE == power_always_on_enabled)
+ {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+ }
+ else
+ {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+ }
+}
+
+static const struct file_operations power_always_on_fops = {
+ .owner = THIS_MODULE,
+ .read = power_always_on_read,
+ .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+
+ if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_SUSPEND],strlen(mali_power_events[_MALI_DEVICE_SUSPEND])))
+ {
+ mali_pm_os_suspend();
+
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_RESUME],strlen(mali_power_events[_MALI_DEVICE_RESUME])))
+ {
+ mali_pm_os_resume();
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_PAUSE],strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE])))
+ {
+ mali_dev_pause();
+ }
+ else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_RESUME],strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME])))
+ {
+ mali_dev_resume();
+ }
+ *ppos += cnt;
+ return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+ file->f_pos = offset;
+ return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+ .owner = THIS_MODULE,
+ .write = power_power_events_write,
+ .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if(!size)
+ {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val != 0)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_internal_profiling_is_recording())
+ {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_internal_profiling_have_recording())
+ {
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear())
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit))
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ }
+ else
+ {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count))
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos)
+ {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1))
+ {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32)*spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data))
+ {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+ #define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+ static u64 start_time = 0;
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32)*spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data))
+ {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+ if (0 == index)
+ {
+ start_time = timestamp;
+ }
+
+ seq_printf(seq_file, "[%06u] ", index);
+
+ switch(event_id & 0x0F000000)
+ {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ seq_printf(seq_file, "SINGLE | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ seq_printf(seq_file, "START | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ seq_printf(seq_file, "STOP | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+ seq_printf(seq_file, "SUSPEND | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_RESUME:
+ seq_printf(seq_file, "RESUME | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+ break;
+ }
+
+ switch(event_id & 0x00FF0000)
+ {
+ case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+ seq_printf(seq_file, "SW | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GP0:
+ seq_printf(seq_file, "GP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP0:
+ seq_printf(seq_file, "PP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP1:
+ seq_printf(seq_file, "PP1 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP2:
+ seq_printf(seq_file, "PP2 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP3:
+ seq_printf(seq_file, "PP3 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP4:
+ seq_printf(seq_file, "PP4 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP5:
+ seq_printf(seq_file, "PP5 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP6:
+ seq_printf(seq_file, "PP6 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP7:
+ seq_printf(seq_file, "PP7 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GPU:
+ seq_printf(seq_file, "GPU | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+ break;
+ }
+
+ if (MALI_EVENT_ID_IS_HW(event_id))
+ {
+ if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP))
+ {
+ switch(event_id & 0x0000FFFF)
+ {
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+ seq_printf(seq_file, "PHYSICAL | ");
+ break;
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+ seq_printf(seq_file, "VIRTUAL | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ break;
+ }
+ }
+ else
+ {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+ }
+ else
+ {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+
+ seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_human_readable_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _mali_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = memory_used_read,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval= _mali_ukk_utilization_gp_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval= _mali_ukk_utilization_gp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval= _mali_ukk_utilization_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ _mali_uk_user_setting_t setting;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (0 != ret)
+ {
+ return ret;
+ }
+
+ /* Update setting */
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ mali_set_user_setting(setting, val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 value;
+ _mali_uk_user_setting_t setting;
+
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ value = mali_get_user_setting(setting);
+
+ r = snprintf(buf, 64, "%u\n", value);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = user_settings_read,
+ .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+ struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+ if (mali_user_settings_dir != NULL)
+ {
+ int i;
+ for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++)
+ {
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void*)i, &user_settings_fops);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+ struct mali_pmu_core *pmu;
+ _mali_osk_errcode_t err;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ err = mali_pmu_power_down(pmu, val);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return -EINVAL;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+ struct mali_pmu_core *pmu;
+ _mali_osk_errcode_t err;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ err = mali_pmu_power_up(pmu, val);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return -EINVAL;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations pmu_power_down_fops = {
+ .owner = THIS_MODULE,
+ .write = pmu_power_down_write,
+};
+
+static const struct file_operations pmu_power_up_fops = {
+ .owner = THIS_MODULE,
+ .write = pmu_power_up_write,
+};
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ ret = mali_perf_set_num_pp_cores(val);
+ if (ret)
+ {
+ return ret;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_num_cores_enabled_write,
+ .read = pp_num_cores_enabled_read,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_total());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+ .owner = THIS_MODULE,
+ .read = pp_num_cores_total_read,
+};
+
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r = 0;
+ char buffer[64];
+
+ switch (mali_kernel_core_get_product_id())
+ {
+ case _MALI_PRODUCT_ID_MALI200:
+ r = sprintf(buffer, "Mali-200\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI300:
+ r = sprintf(buffer, "Mali-300\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI400:
+ r = sprintf(buffer, "Mali-400 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI450:
+ r = sprintf(buffer, "Mali-450 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_UNKNOWN:
+ return -EINVAL;
+ break;
+ };
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+ .owner = THIS_MODULE,
+ .read = version_read,
+};
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if(ERR_PTR(-ENODEV) == mali_debugfs_dir)
+ {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ }
+ else
+ {
+ if(NULL != mali_debugfs_dir)
+ {
+ /* Debugfs directory created successfully; create files now */
+ struct dentry *mali_pmu_dir;
+ struct dentry *mali_power_dir;
+ struct dentry *mali_gp_dir;
+ struct dentry *mali_pp_dir;
+ struct dentry *mali_l2_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ struct dentry *mali_profiling_dir;
+#endif
+
+ debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+ mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
+ if (NULL != mali_pmu_dir)
+ {
+ debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
+ debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
+ }
+
+ mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+ if (mali_power_dir != NULL)
+ {
+ debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
+ debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops);
+ }
+
+ mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+ if (mali_gp_dir != NULL)
+ {
+ struct dentry *mali_gp_all_dir;
+ u32 num_groups;
+ int i;
+
+ mali_gp_all_dir = debugfs_create_dir("all", mali_gp_dir);
+ if (mali_gp_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0200, mali_gp_all_dir, NULL, &gp_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_gp_all_dir, NULL, &gp_all_counter_src1_fops);
+ }
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++)
+ {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ struct dentry *mali_gp_gpx_dir;
+ mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+ if (NULL != mali_gp_gpx_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_gp_gpx_dir, gp_core, &gp_gpx_counter_src1_fops);
+ debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+ debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+ }
+ break; /* no need to look for any other GP cores */
+ }
+
+ }
+ }
+
+ mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+ if (mali_pp_dir != NULL)
+ {
+ struct dentry *mali_pp_all_dir;
+ u32 num_groups;
+ int i;
+
+ debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+ debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+
+ mali_pp_all_dir = debugfs_create_dir("all", mali_pp_dir);
+ if (mali_pp_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0200, mali_pp_all_dir, NULL, &pp_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_pp_all_dir, NULL, &pp_all_counter_src1_fops);
+ }
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++)
+ {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ char buf[16];
+ struct dentry *mali_pp_ppx_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+ mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+ if (NULL != mali_pp_ppx_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_pp_ppx_dir, pp_core, &pp_ppx_counter_src1_fops);
+ debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+ if (!mali_group_is_virtual(group))
+ {
+ debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+ }
+ }
+ }
+ }
+ }
+
+ mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+ if (mali_l2_dir != NULL)
+ {
+ struct dentry *mali_l2_all_dir;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+ if (mali_l2_all_dir != NULL)
+ {
+ debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache)
+ {
+ char buf[16];
+ struct dentry *mali_l2_l2x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+ mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+ if (NULL != mali_l2_l2x_dir)
+ {
+ debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+ }
+
+ debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+ debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+ debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+ debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL)
+ {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void*)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+ }
+#endif
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+ if (mali_sysfs_user_settings_register())
+ {
+ /* Failed to create the debugfs entries for the user settings DB. */
+ MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+ }
+ }
+ }
+
+ /* Success! */
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ if(NULL != mali_debugfs_dir)
+ {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.h
new file mode 100755
index 00000000000000..26ee7e4cd772a3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_kernel_sysfs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_linux_trace.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_linux_trace.h
new file mode 100755
index 00000000000000..1547547e2023d3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_linux_trace.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ * encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+ unsigned int d2, unsigned int d3, unsigned int d4),
+
+ TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, d0)
+ __field(unsigned int, d1)
+ __field(unsigned int, d2)
+ __field(unsigned int, d3)
+ __field(unsigned int, d4)
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->d0 = d0;
+ __entry->d1 = d1;
+ __entry->d2 = d2;
+ __entry->d3 = d3;
+ __entry->d4 = d4;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int counter_id, unsigned int value),
+
+ TP_ARGS(counter_id, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, counter_id)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->counter_id = counter_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+);
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+ TP_PROTO(pid_t pid, pid_t tid, void * surface_id, unsigned int * counters),
+
+ TP_ARGS(pid, tid, surface_id, counters),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(pid_t, tid)
+ __field(void *, surface_id)
+ __field(unsigned int *, counters)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->tid = tid;
+ __entry->surface_id = surface_id;
+ __entry->counters = counters;
+ ),
+
+ TP_printk("counters were %s", __entry->counters == NULL? "NULL" : "not NULL")
+);
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_atomics.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_atomics.c
new file mode 100755
index 00000000000000..14f8e16908d30f
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_irq.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_irq.c
new file mode 100755
index 00000000000000..74144e8680a489
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_irq.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+ unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object)
+ {
+ return NULL;
+ }
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(probe_data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(probe_data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->data = int_data;
+
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ return irq_object;
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ free_irq(irq_object->irqnum, irq_object);
+ kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ irqreturn_t ret = IRQ_NONE;
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data))
+ {
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_locks.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_locks.c
new file mode 100755
index 00000000000000..b03f9ef1568a40
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_locks.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use mutex_unlock()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use mutex_unlock()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {mutex_unlock,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct mutex mutex;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+
+ /* id of the thread currently holding this lock, 0 if no
+ * threads hold it. */
+ u32 owner;
+ /* what mode the lock was taken in */
+ _mali_osk_lock_mode_t mode;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ mutex_init(&lock->obj.mutex);
+ }
+
+#ifdef DEBUG
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+
+ /* Debug tracking of lock owner */
+ lock->owner = 0;
+#endif /* DEBUG */
+
+ return lock;
+}
+
+#ifdef DEBUG
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock )
+{
+ return lock->owner;
+}
+
+u32 _mali_osk_lock_get_mode( _mali_osk_lock_t *lock )
+{
+ return lock->mode;
+}
+#endif /* DEBUG */
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ {
+ unsigned long tmp_flags;
+ spin_lock_irqsave(&lock->obj.spinlock, tmp_flags);
+ lock->flags = tmp_flags;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if (mutex_lock_interruptible(&lock->obj.mutex))
+ {
+ MALI_PRINT_ERROR(("Can not lock mutex\n"));
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ mutex_lock(&lock->obj.mutex);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+#ifdef DEBUG
+ /* This thread is now the owner of this lock */
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ if (mode == _MALI_OSK_LOCKMODE_RW)
+ {
+ if (0 != lock->owner)
+ {
+ printk(KERN_ERR "%d: ERROR: Lock %p already has owner %d\n", _mali_osk_get_tid(), lock, lock->owner);
+ dump_stack();
+ }
+ lock->owner = _mali_osk_get_tid();
+ lock->mode = mode;
+ }
+ else /* mode == _MALI_OSK_LOCKMODE_RO */
+ {
+ lock->mode = mode;
+ }
+ }
+#endif
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+#ifdef DEBUG
+ /* make sure the thread releasing the lock actually was the owner */
+ if (mode == _MALI_OSK_LOCKMODE_RW)
+ {
+ if (_mali_osk_get_tid() != lock->owner)
+ {
+ printk(KERN_ERR "%d: ERROR: Lock %p owner was %d\n", _mali_osk_get_tid(), lock, lock->owner);
+ dump_stack();
+ }
+ /* This lock now has no owner */
+ lock->owner = 0;
+ } /* else if (mode == _MALI_OSK_LOCKMODE_RO) Nothing to check */
+#endif /* DEBUG */
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ mutex_unlock(&lock->obj.mutex);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_low_level_mem.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_low_level_mem.c
new file mode 100755
index 00000000000000..7b89c2b9ecc080
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
+#include <linux/shrinker.h>
+#endif
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+#define INVALID_PAGE 0xffffffff
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+ struct AllocationList *tail;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+static DEFINE_SPINLOCK(allocation_list_spinlock);
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 16 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static int mali_mem_shrink(int nr_to_scan, gfp_t gfp_mask)
+ #else
+static int mali_mem_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+ #endif
+#else
+static int mali_mem_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif
+{
+ unsigned long flags;
+ AllocationList *item;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ int nr = nr_to_scan;
+#else
+ int nr = sc->nr_to_scan;
+#endif
+
+ if (0 == nr)
+ {
+ return pre_allocated_memory_size_current / PAGE_SIZE;
+ }
+
+ if (0 == pre_allocated_memory_size_current)
+ {
+ /* No pages availble */
+ return 0;
+ }
+
+ if (0 == spin_trylock_irqsave(&allocation_list_spinlock, flags))
+ {
+ /* Not able to lock. */
+ return -1;
+ }
+
+ while (pre_allocated_memory && nr > 0)
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free(item);
+
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+ --nr;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ return pre_allocated_memory_size_current / PAGE_SIZE;
+}
+
+struct shrinker mali_mem_shrinker = {
+ .shrink = mali_mem_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+void mali_osk_low_level_mem_init(void)
+{
+ pre_allocated_memory = (AllocationList*) NULL ;
+
+ register_shrinker(&mali_mem_shrinker);
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ unregister_shrinker(&mali_mem_shrinker);
+
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return INVALID_PAGE;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( INVALID_PAGE == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+ MALI_IGNORE(address);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier( void )
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_writecombine(NULL, size, phys, GFP_KERNEL | GFP_DMA | __GFP_ZERO);
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_writecombine(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+#if MALI_LICENSE_IS_GPL
+ return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+#if !MALI_LICENSE_IS_GPL
+ release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ _mali_osk_write_mem_barrier();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_DONTCOPY;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP;
+#endif
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+ if (NULL == alloc_item)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ MALI_PRINT_ERROR(("%s %d could not remap_pfn_range()\n", __FUNCTION__, __LINE__));
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ if (NULL == mappingInfo->list)
+ {
+ mappingInfo->list = alloc_item;
+ }
+ else
+ {
+ mappingInfo->tail->next = alloc_item;
+ }
+
+ mappingInfo->tail = alloc_item;
+ alloc_item->next = NULL;
+ alloc_item->offset = offset;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ *prev = alloc->next;
+ _allocation_list_item_release(alloc);
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
+
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+ u32 retval = 0;
+ void *temp_buf;
+
+ temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+ if (NULL != temp_buf)
+ {
+ u32 bytes_left_to_copy = size;
+ u32 i;
+ for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE)
+ {
+ u32 size_to_copy;
+ u32 size_copied;
+ u32 bytes_left;
+
+ if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE)
+ {
+ size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+ }
+ else
+ {
+ size_to_copy = bytes_left_to_copy;
+ }
+
+ bytes_left = copy_from_user(temp_buf, ((char*)src) + i, size_to_copy);
+ size_copied = size_to_copy - bytes_left;
+
+ bytes_left = copy_to_user(((char*)dest) + i, temp_buf, size_copied);
+ size_copied -= bytes_left;
+
+ bytes_left_to_copy -= size_copied;
+ retval += size_copied;
+
+ if (size_copied != size_to_copy)
+ {
+ break; /* Early out, we was not able to copy this entire block */
+ }
+ }
+
+ kfree(temp_buf);
+ }
+
+ return retval;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_mali.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_mali.c
new file mode 100755
index 00000000000000..b0032d2b75d302
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_mali.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h"
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+
+ if (NULL == mali_platform_device)
+ {
+ /* Not connected to a device */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ for (i = 0; i < mali_platform_device->num_resources; i++)
+ {
+ if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+ mali_platform_device->resource[i].start == addr)
+ {
+ if (NULL != res)
+ {
+ res->base = addr;
+ res->description = mali_platform_device->resource[i].name;
+
+ /* Any (optional) IRQ resource belonging to this resource will follow */
+ if ((i + 1) < mali_platform_device->num_resources &&
+ IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i+1])))
+ {
+ res->irq = mali_platform_device->resource[i+1].start;
+ }
+ else
+ {
+ res->irq = -1;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_resource_base_address(void)
+{
+ u32 lowest_addr = 0xFFFFFFFF;
+ u32 ret = 0;
+
+ if (NULL != mali_platform_device)
+ {
+ int i;
+ for (i = 0; i < mali_platform_device->num_resources; i++)
+ {
+ if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+ mali_platform_device->resource[i].start < lowest_addr)
+ {
+ lowest_addr = mali_platform_device->resource[i].start;
+ ret = lowest_addr;
+ }
+ }
+ }
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(data);
+
+ if (NULL != mali_platform_device)
+ {
+ struct mali_gpu_device_data* os_data = NULL;
+
+ os_data = (struct mali_gpu_device_data*)mali_platform_device->dev.platform_data;
+ if (NULL != os_data)
+ {
+ /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+ data->dedicated_mem_start = os_data->dedicated_mem_start;
+ data->dedicated_mem_size = os_data->dedicated_mem_size;
+ data->shared_mem_size = os_data->shared_mem_size;
+ data->fb_start = os_data->fb_start;
+ data->fb_size = os_data->fb_size;
+ data->utilization_interval = os_data->utilization_interval;
+ data->utilization_callback = os_data->utilization_callback;
+ data->pmu_switch_delay = os_data->pmu_switch_delay;
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+ u32 irqs[128];
+ u32 i, j, irq, num_irqs_found = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+ for (i = 0; i < mali_platform_device->num_resources; i++)
+ {
+ if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags)
+ {
+ irq = mali_platform_device->resource[i].start;
+
+ for (j = 0; j < num_irqs_found; ++j)
+ {
+ if (irq == irqs[j])
+ {
+ return MALI_TRUE;
+ }
+ }
+
+ irqs[num_irqs_found++] = irq;
+ }
+ }
+
+ return MALI_FALSE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_math.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_math.c
new file mode 100755
index 00000000000000..869ddcb30efed7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_memory.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_memory.c
new file mode 100755
index 00000000000000..f236fc31547864
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc( u32 size )
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree( void *ptr )
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_misc.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_misc.c
new file mode 100755
index 00000000000000..dbc01bc2569d11
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_misc.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vscnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ dump_stack();
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_notification.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_notification.c
new file mode 100755
index 00000000000000..f9af7fcb05f069
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_notification.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ spinlock_t mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ spin_lock_init(&result->mutex);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size,
+ GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ _mali_osk_notification_t *result;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result))
+ {
+ _mali_osk_notification_delete( result );
+ }
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ if (wait_event_interruptible(queue->receive_queue,
+ _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_pm.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_pm.c
new file mode 100755
index 00000000000000..d020724b21597a
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_pm.c
@@ -0,0 +1,110 @@
+/**
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static _mali_osk_atomic_t mali_pm_ref_count;
+
+void _mali_osk_pm_dev_enable(void)
+{
+ _mali_osk_atomic_init(&mali_pm_ref_count, 0);
+}
+
+void _mali_osk_pm_dev_disable(void)
+{
+ _mali_osk_atomic_term(&mali_pm_ref_count);
+}
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err)
+ {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_atomic_inc(&mali_pm_ref_count);
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ _mali_osk_atomic_dec(&mali_pm_ref_count);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+/* Can run in atomic context */
+mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ u32 ref;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ pm_runtime_get_noresume(&(mali_platform_device->dev));
+ ref = _mali_osk_atomic_read(&mali_pm_ref_count);
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+ return ref > 0 ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_TRUE;
+#endif
+}
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+ MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_profiling.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_profiling.c
new file mode 100755
index 00000000000000..8ba83a1b46f6d2
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_profiling.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_pp_scheduler.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ if (MALI_TRUE == auto_start)
+ {
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ /* Nothing to do */
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_profiling_get_count(void)
+{
+ return 0;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+{
+ /* Nothing to do */
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_osk_profiling_is_recording(void)
+{
+ return MALI_FALSE;
+}
+
+mali_bool _mali_osk_profiling_have_recording(void)
+{
+ return MALI_FALSE;
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ return _mali_osk_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ return _mali_osk_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ return _mali_osk_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ _mali_osk_profiling_report_sw_counters(args->counters);
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+ if (COUNTER_VP_0_C0 == counter_id)
+ {
+ if (MALI_TRUE == mali_gp_job_set_gp_counter_src0(event_id))
+ {
+ return 1;
+ }
+ }
+
+ if (COUNTER_VP_0_C1 == counter_id)
+ {
+ if (MALI_TRUE == mali_gp_job_set_gp_counter_src1(event_id))
+ {
+ return 1;
+ }
+ }
+
+ if (COUNTER_FP_0_C0 == counter_id)
+ {
+ if (MALI_TRUE == mali_pp_job_set_pp_counter_src0(event_id))
+ {
+ return 1;
+ }
+ }
+
+ if (COUNTER_FP_0_C1 == counter_id)
+ {
+ if (MALI_TRUE == mali_pp_job_set_pp_counter_src1(event_id))
+ {
+ return 1;
+ }
+ }
+
+ if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id)
+ {
+ u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+ struct mali_l2_cache_core* l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+ if (NULL != l2_cache_core)
+ {
+ u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+ if (0 == counter_src)
+ {
+ if (MALI_TRUE == mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id))
+ {
+ return 1;
+ }
+ }
+ else
+ {
+ if (MALI_TRUE == mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id))
+ {
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ * the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+ struct mali_l2_cache_core *l2_cache;
+ u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+ u32 i;
+ u32 ret = 0;
+
+ MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+ for (i = 0; i < l2_cores_num; i++)
+ {
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+ if (NULL == l2_cache)
+ {
+ continue;
+ }
+
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache))
+ {
+ /* It is now safe to access the L2 cache core in order to retrieve the counters */
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
+ }
+ else
+ {
+ /* The core was not available, set the right bit in the mask. */
+ ret |= (1 << i);
+ }
+ mali_l2_cache_unlock_power_state(l2_cache);
+ }
+
+ return ret;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+ switch(action)
+ {
+ case FBDUMP_CONTROL_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+ break;
+ case FBDUMP_CONTROL_RATE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+ break;
+ case SW_COUNTER_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+ break;
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+ break;
+ default:
+ break; /* Ignore unimplemented actions */
+ }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+ return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+ values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+ values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+ values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+ values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+ values->num_of_vp_cores = 1;
+}
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_specific.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_specific.h
new file mode 100755
index 00000000000000..4ba1e1bd274139
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_specific.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+
+#include "mali_sync.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef CONFIG_SYNC
+typedef struct sync_timeline mali_sync_tl;
+typedef struct sync_pt mali_sync_pt;
+#endif /* CONFIG_SYNC */
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+ return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_time.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_time.c
new file mode 100755
index 00000000000000..4bf2ce153530b5
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_timers.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_timers.c
new file mode 100755
index 00000000000000..69e0c022889e59
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_timers.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = jiffies + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wait_queue.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wait_queue.c
new file mode 100755
index 00000000000000..e81df2c8e5f4a0
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wait_queue.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct
+{
+ wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
+{
+ _mali_osk_wait_queue_t* ret = NULL;
+
+ ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+ if (NULL == ret)
+ {
+ return ret;
+ }
+
+ init_waitqueue_head(&ret->wait_queue);
+ MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+ return ret;
+}
+
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event(queue->wait_queue, condition());
+}
+
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* if queue is empty, don't attempt to wake up its elements */
+ if (!waitqueue_active(&queue->wait_queue)) return;
+
+ MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+ wake_up_all(&queue->wait_queue);
+
+ MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* Linux requires no explicit termination of wait queues */
+ kfree(queue);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wq.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wq.c
new file mode 100755
index 00000000000000..33d5df1b5a299c
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_osk_wq.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_t_struct
+{
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+struct workqueue_struct *mali_wq = NULL;
+#endif
+
+static void _mali_osk_wq_work_func ( struct work_struct *work );
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL == mali_wq);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+ mali_wq = alloc_workqueue("mali", WQ_UNBOUND, 0);
+#else
+ mali_wq = create_workqueue("mali");
+#endif
+ if(NULL == mali_wq)
+ {
+ MALI_PRINT_ERROR(("Unable to create Mali workqueue\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+ flush_workqueue(mali_wq);
+#else
+ flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL != mali_wq);
+
+ flush_workqueue(mali_wq);
+ destroy_workqueue(mali_wq);
+ mali_wq = NULL;
+#else
+ flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data )
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+
+ INIT_WORK( &work->work_handle, _mali_osk_wq_work_func );
+
+ return work;
+}
+
+void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work )
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ _mali_osk_wq_flush();
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work )
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work )
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func ( struct work_struct *work )
+{
+ mali_osk_wq_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+ work_object->handler(work_object->data);
+}
+
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_pmu_power_up_down.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_pmu_power_up_down.c
new file mode 100755
index 00000000000000..b8faf54b754886
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_pmu_power_up_down.c
@@ -0,0 +1,75 @@
+/**
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_pmu.h"
+#include "mali_pp_scheduler.h"
+#include "linux/mali/mali_utgard.h"
+
+/* Mali PMU power up/down APIs */
+
+int mali_pmu_powerup(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ if (NULL == pmu)
+ {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerup);
+
+int mali_pmu_powerdown(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ if (NULL == pmu)
+ {
+ return -ENXIO;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mali_pmu_powerdown);
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+ return mali_pp_scheduler_set_perf_level(num_cores);
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_events.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_events.h
new file mode 100755
index 00000000000000..10da081af48ad3
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_events.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_gator_api.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_gator_api.h
new file mode 100755
index 00000000000000..e3d836fedef765
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_gator_api.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.c
new file mode 100755
index 00000000000000..b2558008f0a0fa
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+ int d2, unsigned int d3, unsigned int d4))
+{
+ add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PROFILING);
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+ u32 count;
+
+ /* Ensure profiling is stopped */
+ _mali_internal_profiling_stop(&count);
+
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
+{
+ _mali_osk_errcode_t ret;
+ mali_profiling_entry *new_profile_entries;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (MALI_PROFILING_STATE_RUNNING == prof_state)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if (NULL == new_profile_entries)
+ {
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit)
+ {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_mask = 1;
+ while (profile_mask <= *limit)
+ {
+ profile_mask <<= 1;
+ }
+ profile_mask >>= 1;
+
+ *limit = profile_mask;
+
+ profile_mask--; /* turns the power of two into a mask of one less */
+
+ if (MALI_PROFILING_STATE_IDLE != prof_state)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ profile_entries = new_profile_entries;
+
+ ret = _mali_timestamp_reset();
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ /* If event is "leave API function", add current memory usage to the event
+ * as data point 4. This is used in timeline profiling to indicate how
+ * much memory was used when leaving a function. */
+ if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC))
+ {
+ profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (MALI_PROFILING_STATE_RUNNING != prof_state)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ tracepoint_synchronize_unregister();
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+ if (*count > profile_mask) *count = profile_mask;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (MALI_PROFILING_STATE_RETURN == prof_state)
+ {
+ retval = _mali_osk_atomic_read(&profile_insert_index);
+ if (retval > profile_mask) retval = profile_mask;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+ u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (index < profile_mask)
+ {
+ if ((raw_index & ~profile_mask) != 0)
+ {
+ index += raw_index;
+ index &= profile_mask;
+ }
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if(index >= raw_index)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+ }
+ else
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (MALI_PROFILING_STATE_RETURN != prof_state)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.h
new file mode 100755
index 00000000000000..43357a4a73ca90
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_profiling_internal.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 * count);
+int _mali_internal_profiling_start(u32 * limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.c
new file mode 100755
index 00000000000000..ef49869a16ba5e
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.c
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/sync.h>
+#include <linux/timer.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_sync_timeline
+{
+ struct sync_timeline timeline;
+ atomic_t counter;
+ atomic_t signalled;
+};
+
+struct mali_sync_pt
+{
+ struct sync_pt pt;
+ u32 order;
+ s32 error;
+ struct timer_list timer;
+};
+
+static void mali_sync_timed_pt_timeout(unsigned long data);
+
+static inline struct mali_sync_timeline *to_mali_sync_timeline(struct sync_timeline *timeline)
+{
+ return container_of(timeline, struct mali_sync_timeline, timeline);
+}
+
+static inline struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+ return container_of(pt, struct mali_sync_pt, pt);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_pt *new_mpt;
+ struct sync_pt *new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+
+ if (!new_pt)
+ {
+ return NULL;
+ }
+
+ new_mpt = to_mali_sync_pt(new_pt);
+ new_mpt->order = mpt->order;
+
+ return new_pt;
+
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(pt->parent);
+ long diff;
+
+ if (0 != mpt->error)
+ {
+ return mpt->error;
+ }
+
+ diff = atomic_read(&mtl->signalled) - mpt->order;
+
+ return diff >= 0;
+}
+
+static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
+ struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
+
+ long diff = ma->order - mb->order;
+
+ if (diff < 0)
+ {
+ return -1;
+ }
+ else if (diff == 0)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+}
+
+static void timeline_free_pt(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
+ if (mpt->timer.function == mali_sync_timed_pt_timeout)
+ {
+ del_timer_sync(&mpt->timer);
+ }
+}
+
+static void timeline_print_tl(struct seq_file *s, struct sync_timeline *sync_timeline)
+{
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(sync_timeline);
+
+ seq_printf(s, "%u, %u", atomic_read(&mtl->signalled), atomic_read(&mtl->counter));
+}
+
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(sync_pt);
+
+ seq_printf(s, "%u", mpt->order);
+
+}
+
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .free_pt = timeline_free_pt,
+ .print_obj = timeline_print_tl,
+ .print_pt = timeline_print_pt
+};
+
+int mali_sync_timeline_is_ours(struct sync_timeline *timeline)
+{
+ return (timeline->ops == &mali_timeline_ops);
+}
+
+struct sync_timeline *mali_sync_timeline_alloc(const char * name)
+{
+ struct sync_timeline *tl;
+ struct mali_sync_timeline *mtl;
+
+ tl = sync_timeline_create(&mali_timeline_ops,
+ sizeof(struct mali_sync_timeline), name);
+ if (!tl)
+ {
+ return NULL;
+ }
+
+ /* Set the counter in our private struct */
+ mtl = to_mali_sync_timeline(tl);
+ atomic_set(&mtl->counter, 0);
+ atomic_set(&mtl->signalled, 0);
+
+ return tl;
+}
+
+struct sync_pt *mali_sync_pt_alloc(struct sync_timeline *parent)
+{
+ struct sync_pt *pt = sync_pt_create(parent, sizeof(struct mali_sync_pt));
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(parent);
+ struct mali_sync_pt *mpt;
+
+ if (!pt)
+ {
+ return NULL;
+ }
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->order = atomic_inc_return(&mtl->counter);
+ mpt->error = 0;
+
+ return pt;
+}
+
+static void mali_sync_timed_pt_timeout(unsigned long data)
+{
+ struct sync_pt *pt = (struct sync_pt *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mali_sync_signal_pt(pt, -ETIMEDOUT);
+}
+
+struct sync_pt *mali_sync_timed_pt_alloc(struct sync_timeline *parent)
+{
+ struct sync_pt *pt;
+ struct mali_sync_pt *mpt;
+ const u32 timeout = msecs_to_jiffies(MALI_SYNC_TIMED_FENCE_TIMEOUT);
+
+ pt = mali_sync_pt_alloc(parent);
+ if (NULL == pt) return NULL;
+ mpt = to_mali_sync_pt(pt);
+
+ init_timer(&mpt->timer);
+
+ mpt->timer.function = mali_sync_timed_pt_timeout;
+ mpt->timer.data = (unsigned long)pt;
+ mpt->timer.expires = jiffies + timeout;
+
+ add_timer(&mpt->timer);
+
+ return pt;
+}
+
+/*
+ * Returns 0 if sync_pt has been committed and is ready for use, -ETIMEDOUT if
+ * timeout already happened and the fence has been signalled.
+ *
+ * If an error occurs the sync point can not be used.
+ */
+int mali_sync_timed_commit(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ int ret;
+
+ if (!mali_sync_timeline_is_ours(pt->parent))
+ {
+ return -EINVAL;
+ }
+
+ /* Stop timer */
+ ret = del_timer_sync(&mpt->timer);
+
+ if (0 == ret)
+ {
+ return -ETIMEDOUT;
+ }
+
+ MALI_DEBUG_ASSERT(0 == timeline_has_signaled(pt));
+
+ return 0;
+}
+
+void mali_sync_signal_pt(struct sync_pt *pt, int error)
+{
+ struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+ struct mali_sync_timeline *mtl = to_mali_sync_timeline(pt->parent);
+ int signalled;
+ long diff;
+
+ if (0 != error)
+ {
+ MALI_DEBUG_ASSERT(0 > error);
+ mpt->error = error;
+ }
+
+ do {
+
+ signalled = atomic_read(&mtl->signalled);
+
+ diff = signalled - mpt->order;
+
+ if (diff > 0)
+ {
+ /* The timeline is already at or ahead of this point. This should not happen unless userspace
+ * has been signalling fences out of order, so warn but don't violate the sync_pt API.
+ * The warning is only in debug builds to prevent a malicious user being able to spam dmesg.
+ */
+ MALI_DEBUG_PRINT_ERROR(("Sync points were triggerd in a different order to allocation!\n"));
+ return;
+ }
+ } while (atomic_cmpxchg(&mtl->signalled, signalled, mpt->order) != signalled);
+
+ sync_timeline_signal(pt->parent);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.h
new file mode 100755
index 00000000000000..ccd218a0a97750
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#ifdef CONFIG_SYNC
+
+#include <linux/seq_file.h>
+#include <linux/sync.h>
+
+#define MALI_SYNC_TIMED_FENCE_TIMEOUT 4000 /* 4s */
+
+/*
+ * Create a stream object.
+ * Built on top of timeline object.
+ * Exposed as a file descriptor.
+ * Life-time controlled via the file descriptor:
+ * - dup to add a ref
+ * - close to remove a ref
+ */
+_mali_osk_errcode_t mali_stream_create(const char * name, int * out_fd);
+
+/*
+ * Create a fence in a stream object
+ */
+struct sync_pt *mali_stream_create_point(int tl_fd);
+int mali_stream_create_fence(struct sync_pt *pt);
+int mali_stream_create_empty_fence(int tl_fd);
+
+/**
+ * Commit an empty timed fence
+ *
+ * This stops the timer of the empty fence and returns wether or not the fence
+ * is still suitable for use.
+ *
+ * Returns -ETIMEDOUT if fence is already signalled, in which case it can not be
+ * used, or 0 when the timer was stopped and the fence is OK to use.
+ */
+int mali_sync_timed_commit(struct sync_pt *pt);
+
+/*
+ * Validate a fd to be a valid fence
+ * No reference is taken.
+ *
+ * This function is only usable to catch unintentional user errors early,
+ * it does not stop malicious code changing the fd after this function returns.
+ */
+_mali_osk_errcode_t mali_fence_validate(int fd);
+
+
+/* Returns true if the specified timeline is allocated by Mali */
+int mali_sync_timeline_is_ours(struct sync_timeline *timeline);
+
+/* Allocates a timeline for Mali
+ *
+ * One timeline should be allocated per API context.
+ */
+struct sync_timeline *mali_sync_timeline_alloc(const char *name);
+
+/* Allocates a sync point within the timeline.
+ *
+ * The timeline must be the one allocated by mali_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ */
+struct sync_pt *mali_sync_pt_alloc(struct sync_timeline *parent);
+
+/* Allocates a timed sync point within the timeline.
+ *
+ * The timeline must be the one allocated by mali_sync_timeline_alloc
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ *
+ * Timed sync points should be backed by a proper event before reaching the
+ * timeout. If timeout is reached the fence will be signalled with an error (-ETIMEDOUT).
+ */
+struct sync_pt *mali_sync_timed_pt_alloc(struct sync_timeline *parent);
+
+/* Signals a particular sync point
+ *
+ * Sync points must be triggered in *exactly* the same order as they are allocated.
+ *
+ * If they are signalled in the wrong order then a message will be printed in debug
+ * builds and otherwise attempts to signal order sync_pts will be ignored.
+ */
+void mali_sync_signal_pt(struct sync_pt *pt, int error);
+
+#endif /* CONFIG_SYNC */
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_sync_user.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync_user.c
new file mode 100755
index 00000000000000..7ea6dc3ebe1166
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_sync_user.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync_user.c
+ *
+ */
+
+#ifdef CONFIG_SYNC
+
+#include <linux/sched.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/version.h>
+#include <asm/uaccess.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+static int mali_stream_close(struct inode * inode, struct file * file)
+{
+ struct sync_timeline * tl;
+ tl = (struct sync_timeline*)file->private_data;
+ BUG_ON(!tl);
+ sync_timeline_destroy(tl);
+ return 0;
+}
+
+static struct file_operations stream_fops =
+{
+ .owner = THIS_MODULE,
+ .release = mali_stream_close,
+};
+
+_mali_osk_errcode_t mali_stream_create(const char * name, int *out_fd)
+{
+ struct sync_timeline * tl;
+ BUG_ON(!out_fd);
+
+ tl = mali_sync_timeline_alloc(name);
+ if (!tl)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *out_fd = anon_inode_getfd(name, &stream_fops, tl, O_RDONLY | O_CLOEXEC);
+
+ if (*out_fd < 0)
+ {
+ sync_timeline_destroy(tl);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ else
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+}
+
+static mali_sync_pt *mali_stream_create_point_internal(int tl_fd, mali_bool timed)
+{
+ struct sync_timeline *tl;
+ struct sync_pt * pt;
+ struct file *tl_file;
+
+ tl_file = fget(tl_fd);
+ if (tl_file == NULL)
+ return NULL;
+
+ if (tl_file->f_op != &stream_fops)
+ {
+ pt = NULL;
+ goto out;
+ }
+
+ tl = tl_file->private_data;
+
+ if (unlikely(timed))
+ {
+ pt = mali_sync_timed_pt_alloc(tl);
+ }
+ else
+ {
+ pt = mali_sync_pt_alloc(tl);
+ }
+
+ if (!pt)
+ {
+ pt = NULL;
+ goto out;
+ }
+
+out:
+ fput(tl_file);
+
+ return pt;
+}
+
+mali_sync_pt *mali_stream_create_point(int tl_fd)
+{
+ return mali_stream_create_point_internal(tl_fd, MALI_FALSE);
+}
+
+int mali_stream_create_fence(mali_sync_pt *pt)
+{
+ struct sync_fence *fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
+ struct fdtable * fdt;
+ struct files_struct * files;
+#endif
+ int fd = -1;
+
+ fence = sync_fence_create("mali_fence", pt);
+ if (!fence)
+ {
+ sync_pt_free(pt);
+ fd = -EFAULT;
+ goto out;
+ }
+
+ /* create a fd representing the fence */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ {
+ sync_fence_put(fence);
+ goto out;
+ }
+#else
+ fd = get_unused_fd();
+ if (fd < 0)
+ {
+ sync_fence_put(fence);
+ goto out;
+ }
+
+ files = current->files;
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+ __set_close_on_exec(fd, fdt);
+#else
+ FD_SET(fd, fdt->close_on_exec);
+#endif /* Linux >= 3.4 */
+ spin_unlock(&files->file_lock);
+#endif /* Linux >= 3.6 */
+
+ /* bind fence to the new fd */
+ sync_fence_install(fence, fd);
+
+out:
+ return fd;
+}
+
+int mali_stream_create_empty_fence(int tl_fd)
+{
+ int fd;
+ mali_sync_pt *pt;
+
+ pt = mali_stream_create_point_internal(tl_fd, MALI_TRUE);
+
+ if (NULL == pt) return -ENOMEM;
+
+ fd = mali_stream_create_fence(pt);
+
+ return fd;
+}
+
+_mali_osk_errcode_t mali_fence_validate(int fd)
+{
+ struct sync_fence * fence;
+ fence = sync_fence_fdget(fd);
+ if (NULL != fence)
+ {
+ sync_fence_put(fence);
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+}
+
+#endif /* CONFIG_SYNC */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_uk_types.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_uk_types.h
new file mode 100755
index 00000000000000..4a8f34bcc75979
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_uk_types.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_core.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_core.c
new file mode 100755
index 00000000000000..a162e295064d85
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_core.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_sync.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int compositor_priority_wrapper(struct mali_session_data *session_data)
+{
+#ifndef CONFIG_SYNC
+ /* Compositor super priority is currently only needed and supported in
+ * systems without linux fences */
+ _mali_ukk_compositor_priority(session_data);
+#else
+ MALI_DEBUG_PRINT(2, ("Compositor Pid: %d - Using native fence\n", _mali_osk_get_pid() ));
+#endif
+
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+ _mali_uk_get_user_settings_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_user_settings(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+ return 0;
+}
+
+#ifdef CONFIG_SYNC
+int stream_create_wrapper(struct mali_session_data *session_data, _mali_uk_stream_create_s __user *uargs)
+{
+ _mali_uk_stream_create_s kargs;
+ _mali_osk_errcode_t err;
+ char name[32];
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ snprintf(name, 32, "mali-%u", _mali_osk_get_pid());
+
+ kargs.ctx = session_data;
+ err = mali_stream_create(name, &kargs.fd);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_stream_create_s))) return -EFAULT;
+
+ return 0;
+}
+
+int sync_fence_create_empty_wrapper(struct mali_session_data *session_data, _mali_uk_fence_create_empty_s __user *uargs)
+{
+ _mali_uk_fence_create_empty_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.stream, &uargs->stream)) return -EFAULT;
+
+ kargs.fence = mali_stream_create_empty_fence(kargs.stream);
+ if (0 > kargs.fence)
+ {
+ return kargs.fence;
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_fence_create_empty_s))) return -EFAULT;
+
+ return 0;
+}
+
+int sync_fence_validate_wrapper(struct mali_session_data *session, _mali_uk_fence_validate_s __user *uargs)
+{
+ int fd;
+ _mali_osk_errcode_t err;
+
+ if (0 != get_user(fd, &uargs->fd))
+ {
+ return -EFAULT;
+ }
+
+ err = mali_fence_validate(fd);
+
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_gp.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_gp.c
new file mode 100755
index 00000000000000..c6339dddc55667
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_gp.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_mem.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_mem.c
new file mode 100755
index 00000000000000..011ee159efdeb9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_mem.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs)
+{
+ _mali_uk_mem_write_safe_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ /* Check if we can access the buffers */
+ if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+ || !access_ok(VERIFY_READ, kargs.src, kargs.size))
+ {
+ return -EINVAL;
+ }
+
+ /* Check if size wraps */
+ if ((kargs.size + kargs.dest) <= kargs.dest
+ || (kargs.size + kargs.src) <= kargs.src)
+ {
+ return -EINVAL;
+ }
+
+ err = _mali_ukk_mem_write_safe(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.size, &uargs->size))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* CONFIG_MALI400_UMP */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ MALI_CHECK(kargs.size > 0, -ENOMEM);
+ kargs.buffer = _mali_osk_valloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+ return rc;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_pp.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_pp.c
new file mode 100755
index 00000000000000..e48f3c39298260
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_pp.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+ int fence = -1;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_start_job(session_data, uargs, &fence);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+#if defined(CONFIG_SYNC)
+ if (0 != put_user(fence, &uargs->fence))
+ {
+ /* Since the job has started we can't return an error. */
+ }
+#endif /* CONFIG_SYNC */
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+ _mali_uk_pp_disable_wb_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_job_disable_wb(&kargs);
+
+ return 0;
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_profiling.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_profiling.c
new file mode 100755
index 00000000000000..1a51cc58792738
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_profiling.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+ _mali_uk_sw_counters_report_s kargs;
+ _mali_osk_errcode_t err;
+ u32 *counter_buffer;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ /* make sure that kargs.num_counters is [at least somewhat] sane */
+ if (kargs.num_counters > 10000) {
+ MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+ return -EINVAL;
+ }
+
+ counter_buffer = (u32*)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+ if (NULL == counter_buffer)
+ {
+ return -ENOMEM;
+ }
+
+ if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters))
+ {
+ kfree(counter_buffer);
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ kargs.counters = counter_buffer;
+
+ err = _mali_ukk_sw_counters_report(&kargs);
+
+ kfree(counter_buffer);
+
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_vsync.c b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_vsync.c
new file mode 100755
index 00000000000000..b0c4729707f408
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_wrappers.h b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_wrappers.h
new file mode 100755
index 00000000000000..370e4da8d607a0
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/linux/mali_ukk_wrappers.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int compositor_priority_wrapper(struct mali_session_data *session_data);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+#if defined(CONFIG_SYNC)
+int stream_create_wrapper(struct mali_session_data *session_data, _mali_uk_stream_create_s __user *uargs);
+int sync_fence_create_empty_wrapper(struct mali_session_data *session_data, _mali_uk_fence_create_empty_s __user *uargs);
+int sync_fence_validate_wrapper(struct mali_session_data *session, _mali_uk_fence_validate_s __user *uargs);
+#endif
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if defined(CONFIG_MALI400_UMP)
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/platform/arm/arm.c b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm.c
new file mode 100755
index 00000000000000..3445facb4f3017
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for:
+ * - Realview Versatile platforms with ARM11 Mpcore and virtex 5.
+ * - Versatile Express platforms with ARM Cortex-A9 and virtex 6.
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <asm/io.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include "arm_core_scaling.h"
+
+static void mali_platform_device_release(struct device *device);
+static u32 mali_read_phys(u32 phys_addr);
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value);
+#endif
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+static struct resource mali_gpu_resources_m450_mp8[] =
+{
+ MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+static struct resource mali_gpu_resources_m300[] =
+{
+ MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp1[] =
+{
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp2[] =
+{
+ MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
+};
+
+#endif
+
+static struct platform_device mali_gpu_device =
+{
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .dev.release = mali_platform_device_release,
+};
+
+static struct mali_gpu_device_data mali_gpu_data =
+{
+#if defined(CONFIG_ARCH_VEXPRESS)
+ .shared_mem_size =256 * 1024 * 1024, /* 256MB */
+#elif defined(CONFIG_ARCH_REALVIEW)
+ .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
+ .dedicated_mem_size = 0x10000000, /* 256MB */
+#endif
+ .fb_start = 0xe0000000,
+ .fb_size = 0x01000000,
+ .utilization_interval = 1000, /* 1000ms */
+ .utilization_callback = mali_gpu_utilization_callback,
+ .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+};
+
+int mali_platform_device_register(void)
+{
+ int err = -1;
+ int num_pp_cores = 0;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+ if (mali_read_phys(0xFC020000) == 0x00010100)
+ {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources_m450_mp8, sizeof(mali_gpu_resources_m450_mp8) / sizeof(mali_gpu_resources_m450_mp8[0]));
+ }
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000)
+ {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources_m300, sizeof(mali_gpu_resources_m300) / sizeof(mali_gpu_resources_m300[0]));
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000)
+ {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F)
+ {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources_m400_mp1, sizeof(mali_gpu_resources_m400_mp1) / sizeof(mali_gpu_resources_m400_mp1[0]));
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ else if (fpga_fw_version == 0x130C000F)
+ {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources_m400_mp2, sizeof(mali_gpu_resources_m400_mp2) / sizeof(mali_gpu_resources_m400_mp2[0]));
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+
+#endif
+
+ if (0 == err)
+ {
+ err = platform_device_add_data(&mali_gpu_device, &mali_gpu_data, sizeof(mali_gpu_data));
+ if (0 == err)
+ {
+ /* Register the platform device */
+ err = platform_device_register(&mali_gpu_device);
+ if (0 == err)
+ {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+ pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+
+ return 0;
+ }
+ }
+
+ platform_device_unregister(&mali_gpu_device);
+ }
+
+ return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+ mali_core_scaling_term();
+ platform_device_unregister(&mali_gpu_device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+static u32 mali_read_phys(u32 phys_addr)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ u32 ret = 0xDEADBEEF;
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped)
+ {
+ ret = (u32)ioread32(((u8*)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped)
+ {
+ iowrite32(value, ((u8*)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+}
+#endif
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+{
+ mali_core_scaling_update(data);
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.c b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.c
new file mode 100755
index 00000000000000..d7930a4d6012ee
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.c
+ * Example core scaling policy.
+ */
+
+#include "arm_core_scaling.h"
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/workqueue.h>
+
+static int num_cores_total;
+static int num_cores_enabled;
+
+static struct work_struct wq_work;
+
+static void set_num_cores(struct work_struct *work)
+{
+ int err = mali_perf_set_num_pp_cores(num_cores_enabled);
+ MALI_DEBUG_ASSERT(0 == err);
+ MALI_IGNORE(err);
+}
+
+static void enable_one_core(void)
+{
+ if (num_cores_enabled < num_cores_total)
+ {
+ ++num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
+ }
+
+ MALI_DEBUG_ASSERT( 1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void disable_one_core(void)
+{
+ if (1 < num_cores_enabled)
+ {
+ --num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ }
+
+ MALI_DEBUG_ASSERT( 1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void enable_max_num_cores(void)
+{
+ if (num_cores_enabled < num_cores_total)
+ {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
+ }
+
+ MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+}
+
+void mali_core_scaling_init(int num_pp_cores)
+{
+ INIT_WORK(&wq_work, set_num_cores);
+
+ num_cores_total = num_pp_cores;
+ num_cores_enabled = num_pp_cores;
+
+ /* NOTE: Mali is not fully initialized at this point. */
+}
+
+void mali_core_scaling_term(void)
+{
+ flush_scheduled_work();
+}
+
+#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
+
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
+{
+ /*
+ * This function implements a very trivial PP core scaling algorithm.
+ *
+ * It is _NOT_ of production quality.
+ * The only intention behind this algorithm is to exercise and test the
+ * core scaling functionality of the driver.
+ * It is _NOT_ tuned for neither power saving nor performance!
+ *
+ * Other metrics than PP utilization need to be considered as well
+ * in order to make a good core scaling algorithm.
+ */
+
+ MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total));
+
+ /* NOTE: this function is normally called directly from the utilization callback which is in
+ * timer context. */
+
+ if ( PERCENT_OF(90, 256) < data->utilization_pp)
+ {
+ enable_max_num_cores();
+ }
+ else if (PERCENT_OF(50, 256) < data->utilization_pp)
+ {
+ enable_one_core();
+ }
+ else if (PERCENT_OF(40, 256) < data->utilization_pp)
+ {
+ /* do nothing */
+ }
+ else if (PERCENT_OF( 0, 256) < data->utilization_pp)
+ {
+ disable_one_core();
+ }
+ else
+ {
+ /* do nothing */
+ }
+}
diff --git a/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.h b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.h
new file mode 100755
index 00000000000000..5c5def60635919
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/platform/arm/arm_core_scaling.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.h
+ * Example core scaling policy.
+ */
+
+#ifndef __ARM_CORE_SCALING_H__
+#define __ARM_CORE_SCALING_H__
+
+struct mali_gpu_utilization_data;
+
+/**
+ * Initialize core scaling policy.
+ *
+ * @note The core scaling policy will assume that all PP cores are on initially.
+ *
+ * @param num_pp_cores Total number of PP cores.
+ */
+void mali_core_scaling_init(int num_pp_cores);
+
+/**
+ * Terminate core scaling policy.
+ */
+void mali_core_scaling_term(void);
+
+/**
+ * Update core scaling policy with new utilization data.
+ *
+ * @param data Utilization data.
+ */
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data);
+
+#endif /* __ARM_CORE_SCALING_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/readme.txt b/drivers/parrot/gpu/mali400_legacy/readme.txt
new file mode 100755
index 00000000000000..26095066bb42b4
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/readme.txt
@@ -0,0 +1,24 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
diff --git a/drivers/parrot/gpu/mali400_legacy/regs/mali_200_regs.h b/drivers/parrot/gpu/mali400_legacy/regs/mali_200_regs.h
new file mode 100755
index 00000000000000..230b3e0e7fea44
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/regs/mali_200_regs.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+ MALI200_REG_ADDR_RSW = 0x0004,
+ MALI200_REG_ADDR_STACK = 0x0030,
+ MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+ MALI200_REG_ADDR_ORIGIN_OFFSET_X = 0x0040
+};
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+ MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/parrot/gpu/mali400_legacy/regs/mali_gp_regs.h b/drivers/parrot/gpu/mali400_legacy/regs/mali_gp_regs.h
new file mode 100755
index 00000000000000..7079d5cbb2a2a9
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/regs/mali_gp_regs.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.c b/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.c
new file mode 100755
index 00000000000000..86c8e31fe485f7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.h b/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.h
new file mode 100755
index 00000000000000..79d2d5f968f248
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.c b/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.c
new file mode 100755
index 00000000000000..86c8e31fe485f7
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.h b/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.h
new file mode 100755
index 00000000000000..c3a3e1c975a7a8
--- /dev/null
+++ b/drivers/parrot/gpu/mali400_legacy/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/parrot/gpu/p7gpu-pm.c b/drivers/parrot/gpu/p7gpu-pm.c
new file mode 100644
index 00000000000000..c77b42e3b41edc
--- /dev/null
+++ b/drivers/parrot/gpu/p7gpu-pm.c
@@ -0,0 +1,169 @@
+/*
+ * @file p7gpu-pm.c
+ * @brief Driver for handling Parrot's Mali200/400 GPU power management
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author alvaro.moran@parrot.com
+ * @date 2011-01-25
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ *
+ * This driver covers the power management for the possible GPUs handled in
+ * Parrot7, Mali200 and Mali400. The clocks are different, so we have to check
+ * out the revision.
+ *
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <mach/clkdev.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include "p7gpu-pm.h"
+
+MODULE_AUTHOR( "Alvaro Moran Parrot S.A. <alvaro.moran@parrot.com>" );
+MODULE_DESCRIPTION( "Parrot7 Mali200/400 power management" );
+MODULE_LICENSE( "GPL" );
+
+static int inline p7gpu_pm_activate_clk (struct platform_device *pdev, const char* name)
+{
+ int ret = 0;
+ struct clk* clock;
+
+ clock = clk_get(&pdev->dev, name);
+ if (IS_ERR(clock))
+ {
+ printk(KERN_ERR "Parrot7 GPU error getting %s clock\n",name);
+ return PTR_ERR(clock);
+ }
+ ret = clk_prepare_enable(clock);
+ if (ret)
+ {
+ printk(KERN_ERR "Parrot7 GPU error enabling %s clock\n",name);
+ }
+ return ret;
+}
+
+static void inline p7gpu_pm_disable_clk (struct platform_device *pdev, const char* name)
+{
+ struct clk* clock;
+
+ clock = clk_get(&pdev->dev, name);
+ // if there is an error getting the clock, return silently
+ if (IS_ERR(clock))
+ return;
+ clk_disable_unprepare(clock);
+ /* put ref from clk_get from disable */
+ clk_put(clock);
+ /* put ref from clk_get from activate */
+ clk_put(clock);
+ return;
+}
+
+static int p7gpu_pm_clocks_on(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+ struct p7gpu_pm_plat_data const* const pdata =
+ (struct p7gpu_pm_plat_data const*) dev_get_platdata(&pdev->dev);
+
+ for (i = 0; i < pdata->clocks_number; i++) {
+ ret = p7gpu_pm_activate_clk(pdev, pdata->clocks_names[i]);
+ if (ret != 0)
+ goto fail_probe;
+ }
+
+ printk(KERN_INFO "Parrot7 GPU clocks started\n");
+ return 0;
+
+fail_probe:
+ return ret;
+}
+
+static int p7gpu_pm_clocks_off(struct platform_device *pdev)
+{
+ int i;
+ struct p7gpu_pm_plat_data const* const pdata =
+ (struct p7gpu_pm_plat_data const*) dev_get_platdata(&pdev->dev);
+
+ printk(KERN_INFO "Parrot7 GPU clocks stopping\n");
+ /* Disable clocks in inversed order */
+ for (i = pdata->clocks_number - 1; i >= 0; i--)
+ p7gpu_pm_disable_clk(pdev, pdata->clocks_names[i]);
+
+ return 0;
+}
+
+static int p7gpu_pm_probe(struct platform_device *pdev)
+{
+ return p7gpu_pm_clocks_on(pdev);
+}
+
+static int p7gpu_pm_remove(struct platform_device *pdev)
+{
+ return p7gpu_pm_clocks_off(pdev);
+}
+
+
+#ifdef CONFIG_PM
+static int p7gpu_pm_suspend(struct platform_device* pdev, pm_message_t state)
+{
+ return p7gpu_pm_clocks_off(pdev);
+}
+
+static int p7gpu_pm_resume(struct platform_device* pdev)
+{
+ return p7gpu_pm_clocks_on(pdev);
+}
+#else
+
+#define p7gpu_pm_suspend NULL
+#define p7gpu_pm_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct platform_driver p7gpu_pm_driver = {
+ .probe = p7gpu_pm_probe,
+ .remove = p7gpu_pm_remove,
+ .suspend = p7gpu_pm_suspend,
+ .resume = p7gpu_pm_resume,
+ .driver = {
+ .name = "p7gpu",
+ },
+};
+
+/// Module initialization function
+static int __init p7gpu_pm_init( void )
+{
+ int ret;
+ ret = platform_driver_register(&p7gpu_pm_driver);
+ return ret;
+}
+
+/// Module Cleanup Function
+static void __exit p7gpu_pm_exit( void )
+{
+ platform_driver_unregister(&p7gpu_pm_driver);
+}
+
+module_init( p7gpu_pm_init );
+module_exit( p7gpu_pm_exit );
diff --git a/drivers/parrot/gpu/p7gpu-pm.h b/drivers/parrot/gpu/p7gpu-pm.h
new file mode 100644
index 00000000000000..3f0cae21bb81b5
--- /dev/null
+++ b/drivers/parrot/gpu/p7gpu-pm.h
@@ -0,0 +1,32 @@
+/**
+ * p7gpu-pm.h - Parrot7 GPU PM driver interface
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 2013-05-30
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7_GPU_PM_H
+#define _P7_GPU_PM_H
+
+#if defined(CONFIG_GPU_PARROT7) || defined(CONFIG_GPU_PARROT7_MODULE)
+
+/* Clock names */
+#define M200_BUS_CLK "bus"
+#define M200_CORE_CLK "core"
+#define M400_CLK "gpu"
+
+/*
+ * For now the clock names and number is passed as platform data, we might want
+ * to put it somewhere else as soon as we find a better solution.
+ */
+struct p7gpu_pm_plat_data {
+ char** clocks_names;
+ int clocks_number;
+};
+
+#endif
+#endif
diff --git a/drivers/parrot/gpu/p7ump.c b/drivers/parrot/gpu/p7ump.c
new file mode 100644
index 00000000000000..5da615c59289ce
--- /dev/null
+++ b/drivers/parrot/gpu/p7ump.c
@@ -0,0 +1,279 @@
+/**
+ * linux/driver/parrot/gpuo/p7ump.c - Parrot 7 Mali UMP API
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 2013-12-04
+ *
+ * This file is released under the GPL
+ */
+
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/signal.h>
+#include "ump_kernel_interface_ref_drv.h"
+#include "p7ump.h"
+
+#define DRV_NAME "p7ump"
+
+/* This is to keep track of all mapped regions */
+struct p7ump_map {
+ struct list_head list;
+ u32 addr;
+ size_t size;
+ atomic_t cnt;
+};
+
+struct p7ump_record {
+ struct list_head list;
+ u32 id;
+ ump_dd_handle ump_h;
+};
+
+static LIST_HEAD( p7ump_maps );
+static DEFINE_MUTEX(p7ump_lock);
+
+int p7ump_add_map(u32 addr, size_t size)
+{
+ struct list_head *pos, *q;
+ struct p7ump_map* map;
+
+ if ((0 == addr) || (0 == size))
+ return -EINVAL;
+
+ /* Check if address is already present */
+ mutex_lock(&p7ump_lock);
+ list_for_each_safe(pos, q, &p7ump_maps) {
+ map = list_entry(pos, struct p7ump_map, list);
+ if ((map->addr == addr) && (map->size == size)) {
+ atomic_inc(&map->cnt);
+ mutex_unlock(&p7ump_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&p7ump_lock);
+
+ /* Add new map */
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (unlikely(! map)) {
+ return -ENOMEM;
+ }
+ map->addr = addr;
+ map->size = size;
+ atomic_set(&map->cnt, 1);
+ //printk(KERN_DEBUG "P7UMP: Adding map 0x%08x sz %d\n", addr, size);
+ mutex_lock(&p7ump_lock);
+ list_add(&map->list, &p7ump_maps);
+ mutex_unlock(&p7ump_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p7ump_add_map);
+
+int p7ump_remove_map(u32 addr, size_t size)
+{
+ struct p7ump_map* map;
+ struct list_head *pos, *q;
+ int ret = -EFAULT;
+
+ if (addr == 0 || size == 0)
+ return -EINVAL;
+
+ mutex_lock(&p7ump_lock);
+ list_for_each_safe(pos, q, &p7ump_maps) {
+ map = list_entry(pos, struct p7ump_map, list);
+ if ((map->addr == addr) && (map->size == size) &&
+ atomic_dec_and_test(&map->cnt)) {
+ //printk(KERN_DEBUG "P7UMP: Removing map 0x%08x sz %d\n",
+ // addr, size);
+ list_del(pos);
+ kfree(map);
+ ret = 0;
+ goto unlock_exit;
+ }
+ }
+
+unlock_exit:
+ mutex_unlock(&p7ump_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(p7ump_remove_map);
+
+static struct p7ump_token* p7ump_record_get(struct list_head* rlist,
+ struct p7ump_token* token)
+{
+ struct p7ump_map *map;
+ struct p7ump_record* record;
+ ump_dd_physical_block ump_memory_description;
+ ump_dd_handle ump_h;
+ bool valid_map = false;
+ u32 size = PAGE_ALIGN(token->size);
+ u32 addr = token->bus;
+ u32 end = token->bus + size;
+
+ token->id = 0;
+
+ /* Look for a valid registered mapping */
+ mutex_lock(&p7ump_lock);
+ list_for_each_entry(map, &p7ump_maps, list) {
+ if ((addr >= map->addr) && (end <= (map->addr + map->size))) {
+ valid_map = true;
+ break;
+ }
+ }
+ mutex_unlock(&p7ump_lock);
+ if (!valid_map)
+ return token;
+
+ ump_memory_description.addr = token->bus;
+ ump_memory_description.size = size;
+ ump_h =
+ ump_dd_handle_create_from_phys_blocks(&ump_memory_description, 1);
+ if (UMP_DD_HANDLE_INVALID == ump_h)
+ return token;
+ token->id = ump_dd_secure_id_get(ump_h);
+ token->size = size;
+
+ /*
+ * If everything went fine then save token in a new record and
+ * append to the list
+ */
+ record = kmalloc(sizeof(*record), GFP_KERNEL);
+ if (unlikely(! record)) {
+ return token;
+ }
+ record->ump_h = ump_h;
+ record->id = token->id;
+
+ mutex_lock(&p7ump_lock);
+ list_add(&record->list, rlist);
+ mutex_unlock(&p7ump_lock);
+
+ return token;
+}
+
+static void p7ump_record_put(struct list_head* rlist, int secure_id)
+{
+ struct p7ump_record* record;
+ struct list_head *pos, *q;
+ ump_dd_handle ump_h = ump_dd_handle_create_from_secure_id(secure_id);
+ /* now that we have the handle, we can decrease the reference again */
+ ump_dd_reference_release(ump_h);
+
+ mutex_lock(&p7ump_lock);
+ list_for_each_safe(pos, q, rlist) {
+ record = list_entry(pos, struct p7ump_record, list);
+ if (record->ump_h == ump_h)
+ {
+ list_del(pos);
+ ump_dd_reference_release(record->ump_h);
+ kfree(record);
+ break;
+ }
+ }
+ mutex_unlock(&p7ump_lock);
+}
+
+static long p7ump_ioctl(struct file* filep, unsigned int cmd, unsigned long arg)
+{
+ struct list_head* rlist = filep->private_data;
+ struct p7ump_token __user * ptoken;
+ int __user * pid;
+ struct p7ump_token token;
+ int id;
+
+ switch (cmd) {
+ case P7UMP_GET_ID:
+ ptoken = (struct p7ump_token __user *) arg;
+ if (copy_from_user(&token, ptoken, sizeof(token)))
+ return -EFAULT;
+ p7ump_record_get(rlist, &token);
+ if (0 == token.id)
+ return -EINVAL;
+ return copy_to_user( ptoken, &token, sizeof(token));
+ case P7UMP_REL_ID:
+ pid = (int __user *) arg;
+ get_user(id, pid);
+ p7ump_record_put(rlist, id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int p7ump_release(struct inode* inode, struct file* filep)
+{
+ struct list_head* rlist = filep->private_data;
+ struct p7ump_record* record;
+ struct list_head *pos, *q;
+
+ /* cleanup records and list */
+ mutex_lock(&p7ump_lock);
+ list_for_each_safe(pos, q, rlist) {
+ record = list_entry(pos, struct p7ump_record, list);
+ list_del(pos);
+ ump_dd_reference_release(record->ump_h);
+ kfree(record);
+ }
+ mutex_unlock(&p7ump_lock);
+
+ kfree(rlist);
+ return 0;
+}
+
+static int p7ump_open(struct inode* inode, struct file* filep)
+{
+ static struct list_head* rlist;
+
+ rlist = kmalloc(sizeof(*rlist), GFP_KERNEL);
+ if (unlikely(! rlist)) {
+ return -ENOMEM;
+ }
+ /* Init record list head */
+ INIT_LIST_HEAD(rlist);
+
+ filep->private_data = rlist;
+ return 0;
+}
+
+static struct file_operations const p7ump_fops = {
+ .owner = THIS_MODULE,
+ .open = p7ump_open,
+ .release = p7ump_release,
+ .unlocked_ioctl = p7ump_ioctl,
+};
+
+static struct miscdevice p7ump_dev =
+{
+ MISC_DYNAMIC_MINOR,
+ DRV_NAME,
+ &p7ump_fops
+};
+
+static int __init p7ump_init(void)
+{
+ return misc_register(&p7ump_dev);
+}
+
+static void __exit p7ump_exit(void)
+{
+ misc_deregister(&p7ump_dev);
+}
+
+module_init(p7ump_init);
+module_exit(p7ump_exit);
+
+
+MODULE_AUTHOR("Alvaro Moran");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/parrot/gpu/p7ump.h b/drivers/parrot/gpu/p7ump.h
new file mode 100644
index 00000000000000..a3be3fd16adbcc
--- /dev/null
+++ b/drivers/parrot/gpu/p7ump.h
@@ -0,0 +1,44 @@
+/**
+ * linux/driver/parrot/gpuo/p7ump.h - Parrot 7 Mali UMP API
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Alvaro Moran <alvaro.moran@parrot.com>
+ * date: 2013-12-04
+ *
+ * This file is released under the GPL
+ */
+
+
+#ifndef _P7UMP_H
+#define _P7UMP_H
+
+#define P7UMP_IO_MAGIC 'k'
+
+struct p7ump_token {
+ int bus;
+ int size;
+ int id;
+};
+
+#define P7UMP_GET_ID _IOWR(P7UMP_IO_MAGIC, 1, struct p7ump_token)
+#define P7UMP_REL_ID _IOWR(P7UMP_IO_MAGIC, 2, int)
+
+
+#if defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE)
+extern int p7ump_add_map(u32 addr, size_t size);
+extern int p7ump_remove_map(u32 addr, size_t size);
+#else /* defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE) */
+static inline int p7ump_add_map(u32 addr, size_t size)
+{
+ return -ENOSYS;
+}
+
+static inline int p7ump_remove_map(u32 addr, size_t size)
+{
+ return -ENOSYS;
+}
+#endif /* defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE) */
+
+
+#endif
diff --git a/drivers/parrot/gpu/ump/Kbuild b/drivers/parrot/gpu/ump/Kbuild
new file mode 100755
index 00000000000000..44f621f71a10a3
--- /dev/null
+++ b/drivers/parrot/gpu/ump/Kbuild
@@ -0,0 +1,102 @@
+#
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# Set default configuration to use, if Makefile didn't provide one.
+# Change this to use a different config.h
+ifneq (,$(filter $(CONFIG_UMP),y m))
+
+DRIVER_DIR=$(KBUILD_SRC)/$(src)
+
+CONFIG ?= os_memory_64m
+
+# Validate selected config
+# ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+# $(warning Current directory is $(src))
+# $(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+# else
+# # Link arch to the selected arch-config directory
+# $(shell [ -L $(src)/arch ] && rm $(src)/arch)
+# $(shell ln -sf arch-$(CONFIG) $(src)/arch)
+# $(shell touch $(src)/arch/config.h)
+# endif
+
+#UDD_FILE_PREFIX = ../mali/
+UDD_FILE_PREFIX = mali_files/
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(DRIVER_DIR); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(DRIVER_DIR); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(DRIVER_DIR); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV=$(SVN_REV)
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/../mali400/common -I$(src)/../mali400/linux -I$(DRIVER_DIR)/../../ump/include/ump
+ccflags-y += -I$(DRIVER_DIR)/include/ump
+ccflags-y += -DMALI_STATE_TRACKING=0
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=0
+ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary -I$(DRIVER_DIR)/../mali400/linux/license/proprietary
+else
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl -I$(DRIVER_DIR)/../mali400/linux/license/gpl
+endif
+
+ump-y = common/ump_kernel_common.o \
+ common/ump_kernel_descriptor_mapping.o \
+ common/ump_kernel_api.o \
+ common/ump_kernel_ref_drv.o \
+ linux/ump_kernel_linux.o \
+ linux/ump_kernel_memory_backend_os.o \
+ linux/ump_kernel_memory_backend_dedicated.o \
+ linux/ump_memory_backend.o \
+ linux/ump_ukk_wrappers.o \
+ linux/ump_ukk_ref_wrappers.o \
+ linux/ump_osk_atomics.o \
+ linux/ump_osk_low_level_mem.o \
+ linux/ump_osk_misc.o \
+ linux/ump_kernel_random_mapping.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.o
+
+obj-$(CONFIG_UMP) := ump.o
+endif
+
diff --git a/drivers/parrot/gpu/ump/Kbuild.original b/drivers/parrot/gpu/ump/Kbuild.original
new file mode 100755
index 00000000000000..4f62b785317787
--- /dev/null
+++ b/drivers/parrot/gpu/ump/Kbuild.original
@@ -0,0 +1,94 @@
+#
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# Set default configuration to use, if Makefile didn't provide one.
+# Change this to use a different config.h
+CONFIG ?= os_memory_64m
+
+# Validate selected config
+ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(src))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(src)/arch ] && rm $(src)/arch)
+$(shell ln -sf arch-$(CONFIG) $(src)/arch)
+$(shell touch $(src)/arch/config.h)
+endif
+
+UDD_FILE_PREFIX = ../mali/
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV=$(SVN_REV)
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+ccflags-y += -I$(src) -I$(src)/common -I$(src)/linux -I$(src)/../mali/common -I$(src)/../mali/linux -I$(src)/../../ump/include/ump
+ccflags-y += -DMALI_STATE_TRACKING=0
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=0
+ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(src)/linux/license/gpl/*),)
+ccflags-y += -I$(src)/linux/license/proprietary -I$(src)/../mali/linux/license/proprietary
+else
+ccflags-y += -I$(src)/linux/license/gpl -I$(src)/../mali/linux/license/gpl
+endif
+
+ump-y = common/ump_kernel_common.o \
+ common/ump_kernel_descriptor_mapping.o \
+ common/ump_kernel_api.o \
+ common/ump_kernel_ref_drv.o \
+ linux/ump_kernel_linux.o \
+ linux/ump_kernel_memory_backend_os.o \
+ linux/ump_kernel_memory_backend_dedicated.o \
+ linux/ump_memory_backend.o \
+ linux/ump_ukk_wrappers.o \
+ linux/ump_ukk_ref_wrappers.o \
+ linux/ump_osk_atomics.o \
+ linux/ump_osk_low_level_mem.o \
+ linux/ump_osk_misc.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.o
+
+obj-$(CONFIG_UMP) := ump.o
+
diff --git a/drivers/parrot/gpu/ump/Kconfig b/drivers/parrot/gpu/ump/Kconfig
new file mode 100755
index 00000000000000..e5825f897b1702
--- /dev/null
+++ b/drivers/parrot/gpu/ump/Kconfig
@@ -0,0 +1,16 @@
+config UMP
+ tristate "UMP support"
+ depends on ARM && !MALI400
+ ---help---
+ This enables support for the UMP memory allocation and sharing API.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ump.
+
+config UMP_DEBUG
+ bool "Enable extra debug in UMP"
+ depends on UMP
+ default y
+ ---help---
+ This enabled extra debug checks and messages in UMP.
+
diff --git a/drivers/parrot/gpu/ump/Makefile b/drivers/parrot/gpu/ump/Makefile
new file mode 100755
index 00000000000000..778032ac260bf8
--- /dev/null
+++ b/drivers/parrot/gpu/ump/Makefile
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+export ARCH ?= arm
+BUILD ?= debug
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# Check that required parameters are supplied.
+ifeq ($(CONFIG),)
+$(error "CONFIG must be specified.")
+endif
+ifeq ($(CPU)$(KDIR),)
+$(error "KDIR or CPU must be specified.")
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+export CONFIG
+
+export CONFIG_UMP := m
+ifeq ($(BUILD),debug)
+export CONFIG_UMP_DEBUG := y
+else
+export CONFIG_UMP_DEBUG := n
+endif
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) -C $(KDIR) M=$(CURDIR)/../mali clean
diff --git a/drivers/parrot/gpu/ump/Makefile.common b/drivers/parrot/gpu/ump/Makefile.common
new file mode 100755
index 00000000000000..9bd2583c518050
--- /dev/null
+++ b/drivers/parrot/gpu/ump/Makefile.common
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/parrot/gpu/ump/arch-default/config.h b/drivers/parrot/gpu/ump/arch-default/config.h
new file mode 100644
index 00000000000000..54d29dfc9dedb1
--- /dev/null
+++ b/drivers/parrot/gpu/ump/arch-default/config.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2010, 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Use OS memory. */
+#define ARCH_UMP_BACKEND_DEFAULT 1
+
+/* OS memory won't need a base address. */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000
+
+/* 512 MB maximum limit for UMP allocations. */
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 512UL * 1024UL * 1024UL
+
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/ump/arch-pb-virtex5/config.h b/drivers/parrot/gpu/ump/arch-pb-virtex5/config.h
new file mode 100644
index 00000000000000..99679cd13fbf5e
--- /dev/null
+++ b/drivers/parrot/gpu/ump/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xE1000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 16UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/ump/arch/config.h b/drivers/parrot/gpu/ump/arch/config.h
new file mode 100644
index 00000000000000..14933af1560d24
--- /dev/null
+++ b/drivers/parrot/gpu/ump/arch/config.h
@@ -0,0 +1,8 @@
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 1
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0 /* This won't be used anyway */
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 128UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_api.c b/drivers/parrot/gpu/ump/common/ump_kernel_api.c
new file mode 100644
index 00000000000000..aa7e43f173e908
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_api.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_random_mapping.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem *mem;
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ mem = ump_random_mapping_get(device.secure_id_map, (int)secure_id);
+ if (NULL == mem) {
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Keep the reference taken in ump_random_mapping_get() */
+
+ return (ump_dd_handle)mem;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem *mem = (ump_dd_mem *) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block *blocks, unsigned long num_blocks)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL) {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks) {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block *block)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL) {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks) {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ ump_random_mapping_put(mem);
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args)
+{
+ ump_session_data *session_data;
+
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION) {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n",
+ GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ } else {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
+ GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info)
+{
+ ump_session_memory_list_element *session_memory_element;
+ ump_session_memory_list_element *tmp;
+ ump_session_data *session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER(release_info);
+ DEBUG_ASSERT_POINTER(release_info->ctx);
+
+ /* Retreive the session data */
+ session_data = (ump_session_data *)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_mutex_wait(session_data->lock);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
+ if (session_memory_element->mem->secure_id == secure_id) {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_mutex_signal(session_data->lock);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction)
+{
+ ump_dd_mem *mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER(user_interaction);
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ mem = ump_random_mapping_get(device.secure_id_map, user_interaction->secure_id);
+ if (NULL != mem) {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
+ (ump_secure_id)user_interaction->secure_id,
+ (unsigned long)user_interaction->size));
+ ump_random_mapping_put(mem);
+ ret = _MALI_OSK_ERR_OK;
+ } else {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
+ (ump_secure_id)user_interaction->secure_id));
+ }
+
+ return ret;
+}
+
+
+
+void _ump_ukk_msync(_ump_uk_msync_s *args)
+{
+ ump_dd_mem *mem = NULL;
+ void *virtual = NULL;
+ u32 size = 0;
+ u32 offset = 0;
+
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
+ if (NULL == mem) {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached = mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if (_UMP_UK_MSYNC_READOUT_CACHE_ENABLED == args->op) {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ goto msync_release_and_return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if (0 == mem->is_cached) {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ goto msync_release_and_return;
+ }
+ DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
+ (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
+
+ if (args->address) {
+ virtual = (void *)((u32)args->address);
+ offset = (u32)((args->address) - (args->mapping));
+ } else {
+ /* Flush entire mapping when no address is specified. */
+ virtual = args->mapping;
+ }
+ if (args->size) {
+ size = args->size;
+ } else {
+ /* Flush entire mapping when no size is specified. */
+ size = mem->size_bytes - offset;
+ }
+
+ if ((offset + size) > mem->size_bytes) {
+ DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
+ goto msync_release_and_return;
+ }
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync(mem, virtual, offset, size, args->op, NULL);
+
+msync_release_and_return:
+ ump_random_mapping_put(mem);
+ return;
+}
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args)
+{
+ ump_session_data *session_data;
+ ump_uk_cache_op_control op;
+
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
+
+ op = args->op;
+ session_data = (ump_session_data *)args->ctx;
+
+ _mali_osk_mutex_wait(session_data->lock);
+ if (op == _UMP_UK_CACHE_OP_START) {
+ session_data->cache_operations_ongoing++;
+ DBG_MSG(4, ("Cache ops start\n"));
+ if (session_data->cache_operations_ongoing != 1) {
+ DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing));
+ }
+ } else if (op == _UMP_UK_CACHE_OP_FINISH) {
+ DBG_MSG(4, ("Cache ops finish\n"));
+ session_data->cache_operations_ongoing--;
+#if 0
+ if (session_data->has_pending_level1_cache_flush) {
+ /* This function will set has_pending_level1_cache_flush=0 */
+ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ }
+#endif
+
+ /* to be on the safe side: always flush l1 cache when cache operations are done */
+ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ DBG_MSG(4, ("Cache ops finish end\n"));
+ } else {
+ DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
+ }
+ _mali_osk_mutex_signal(session_data->lock);
+
+}
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
+{
+ ump_dd_mem *mem = NULL;
+ ump_uk_user old_user;
+ ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
+ ump_session_data *session_data;
+
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
+
+ session_data = (ump_session_data *)args->ctx;
+
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
+ if (NULL == mem) {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ old_user = mem->hw_device;
+ mem->hw_device = args->new_user;
+
+ DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n",
+ (ump_secure_id)args->secure_id,
+ args->new_user ? "MALI" : "CPU",
+ old_user ? "MALI" : "CPU"));
+
+ if (!mem->is_cached) {
+ DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
+ }
+
+ if (old_user == args->new_user) {
+ DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
+ }
+ if (
+ /* Previous AND new is both different from CPU */
+ (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
+ ) {
+ DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
+ }
+
+ if ((old_user != _UMP_UK_USED_BY_CPU) && (args->new_user == _UMP_UK_USED_BY_CPU)) {
+ cache_op = _UMP_UK_MSYNC_INVALIDATE;
+ DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
+#ifdef UMP_SKIP_INVALIDATION
+#error
+ DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
+ goto out;
+#endif
+ }
+
+ /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
+ _mali_osk_mutex_wait(session_data->lock);
+ /* Actual cache flush */
+ _ump_osk_msync(mem, NULL, 0, mem->size_bytes, cache_op, session_data);
+ _mali_osk_mutex_signal(session_data->lock);
+
+out:
+ ump_random_mapping_put(mem);
+ DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
+ return;
+}
+
+void _ump_ukk_lock(_ump_uk_lock_s *args)
+{
+ ump_dd_mem *mem = NULL;
+
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
+ if (NULL == mem) {
+ DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage));
+
+ mem->lock_usage = (ump_lock_usage) args->lock_usage;
+
+ ump_random_mapping_put(mem);
+}
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args)
+{
+ ump_dd_mem *mem = NULL;
+
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
+ if (NULL == mem) {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
+ (u32)args->secure_id, (u32) mem->lock_usage));
+
+ mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
+
+ ump_random_mapping_put(mem);
+}
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_common.c b/drivers/parrot/gpu/ump/common/ump_kernel_common.c
new file mode 100644
index 00000000000000..e1d8b3a0275a31
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_common.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device));
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map = ump_random_mapping_create();
+ if (NULL == device.secure_id_map) {
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend) {
+ MSG_ERR(("Failed to create memory backend\n"));
+ ump_random_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+
+ ump_random_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open(void **context)
+{
+ struct ump_session_data *session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data) {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
+ if (NULL == session_data->lock) {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create(
+ UMP_COOKIES_PER_SESSION_INITIAL,
+ UMP_COOKIES_PER_SESSION_MAXIMUM);
+
+ if (NULL == session_data->cookies_map) {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_mutex_term(session_data->lock);
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void *)session_data;
+
+ session_data->cache_operations_ongoing = 0 ;
+ session_data->has_pending_level1_cache_flush = 0;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close(void **context)
+{
+ struct ump_session_data *session_data;
+ ump_session_memory_list_element *item;
+ ump_session_memory_list_element *tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data) {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list)) {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list) {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void *)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem(&unmap_args);
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT(_mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list));
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy(session_data->cookies_map);
+
+ _mali_osk_mutex_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args)
+{
+ struct ump_session_data *session_data;
+ ump_memory_allocation *descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem *mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if (NULL == session_data) {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation *) _mali_osk_calloc(1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor) {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if (UMP_DD_HANDLE_INVALID == handle) {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem *)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size) {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping(session_data->cookies_map, (void *) descriptor);
+
+ if (map_id < 0) {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if (mem->is_cached) {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ } else {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init(&descriptor->list);
+
+ err = _ump_osk_mem_mapregion_init(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free(session_data->cookies_map, map_id);
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++) {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size) {
+ size_to_map = mem->block_array[block].size;
+ } else {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *) & (mem->block_array[block].addr), size_to_map)) {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free(session_data->cookies_map, map_id);
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term(descriptor);
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_mutex_wait(session_data->lock);
+ _mali_osk_list_add(&descriptor->list, &session_data->list_head_session_memory_mappings_list);
+ _mali_osk_mutex_signal(session_data->lock);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args)
+{
+ struct ump_session_data *session_data;
+ ump_memory_allocation *descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if (NULL == session_data) {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get(session_data->cookies_map, (int)args->cookie, (void **)&descriptor)) {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if (UMP_DD_HANDLE_INVALID == handle) {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_mutex_wait(session_data->lock);
+ _mali_osk_list_del(&descriptor->list);
+ _mali_osk_mutex_signal(session_data->lock);
+
+ ump_descriptor_mapping_free(session_data->cookies_map, (int)args->cookie);
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term(descriptor);
+ _mali_osk_free(descriptor);
+}
+
+u32 _ump_ukk_report_memory_usage(void)
+{
+ if (device.backend->stat)
+ return device.backend->stat(device.backend);
+ else
+ return 0;
+}
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_common.h b/drivers/parrot/gpu/ump/common/ump_kernel_common.h
new file mode 100644
index 00000000000000..efc6c3f68c6e21
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_common.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_COMMON_H__
+#define __UMP_KERNEL_COMMON_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_random_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+extern int ump_debug_level;
+#define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+#define UMP_DEBUG_CODE(args) args
+#define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+#define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+#define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+#define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+#define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+#define UMP_DEBUG_PRINT(args) do {} while(0)
+#define UMP_DEBUG_CODE(args)
+#define DBG_MSG(level,args) do {} while(0)
+#define DBG_MSG_IF(level,condition,args) do {} while(0)
+#define DBG_MSG_ELSE(level,args) do {} while(0)
+#define DEBUG_ASSERT(condition) do {} while(0)
+#define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data {
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_mutex_t *lock;
+ ump_descriptor_mapping *cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+ int cache_operations_ongoing;
+ int has_pending_level1_cache_flush;
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element {
+ struct ump_dd_mem *mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev {
+ ump_random_mapping *secure_id_map;
+ ump_memory_backend *backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode(_mali_osk_errcode_t err);
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_COMMON_H__ */
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.c b/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000000..c89324e608cefa
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table *descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table *table);
+
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map) {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table) {
+ map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
+ if (NULL != map->lock) {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_mutex_rw_term(map->lock);
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings) {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table *new_table;
+ ump_descriptor_table *old_table = map->table;
+ nr_mappings_new = map->current_nr_mappings * 2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table) {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor > 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ } else *target = NULL;
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ((descriptor > 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor)
+{
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ((descriptor > 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table *descriptor_table_alloc(int count)
+{
+ ump_descriptor_table *table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));
+
+ if (NULL != table) {
+ table->usage = (u32 *)((u8 *)table + sizeof(ump_descriptor_table));
+ table->mappings = (void **)((u8 *)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table *table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.h b/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000000..160e20ec56a754
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table {
+ u32 *usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void **mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping {
+ _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table *table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_memory_backend.h b/drivers/parrot/gpu/ump/common/ump_kernel_memory_backend.h
new file mode 100644
index 00000000000000..4fa4bda70c189b
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_memory_backend.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation {
+ void *phys_addr;
+ void *mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void *process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data *ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend {
+ int (*allocate)(void *ctx, ump_dd_mem *descriptor);
+ void (*release)(void *ctx, ump_dd_mem *descriptor);
+ void (*shutdown)(struct ump_memory_backend *backend);
+ u32(*stat)(struct ump_memory_backend *backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32(*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void *ctx;
+} ump_memory_backend;
+
+ump_memory_backend *ump_memory_backend_create(void);
+void ump_memory_backend_destroy(void);
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_ref_drv.c b/drivers/parrot/gpu/ump/common/ump_kernel_ref_drv.c
new file mode 100644
index 00000000000000..3898218317a138
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_ref_drv.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block *blocks, unsigned long num_blocks)
+{
+ ump_dd_mem *mem;
+ unsigned long size_total = 0;
+ int ret;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i = 0; i < num_blocks; i++) {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr)) {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size)) {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem) {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block) * num_blocks);
+ if (NULL == mem->block_array) {
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+ mem->hw_device = _UMP_UK_USED_BY_CPU;
+ mem->lock_usage = UMP_NOT_LOCKED;
+
+ /* Find a secure ID for this allocation */
+ ret = ump_random_mapping_insert(device.secure_id_map, mem);
+ if (unlikely(ret)) {
+ _mali_osk_free(mem->block_array);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
+{
+ ump_session_data *session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element *session_memory_element = NULL;
+ int ret;
+
+ DEBUG_ASSERT_POINTER(user_interaction);
+ DEBUG_ASSERT_POINTER(user_interaction->ctx);
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc(1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element) {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc(1, sizeof(ump_dd_mem));
+ if (NULL == new_allocation) {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ _mali_osk_atomic_init(&new_allocation->ref_count, 1);
+ if (0 == (UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints))
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ /* Special case a size of 0, we should try to emulate what malloc does
+ * in this case, which is to return a valid pointer that must be freed,
+ * but can't be dereferenced */
+ if (0 == user_interaction->size) {
+ /* Emulate by actually allocating the minimum block size */
+ user_interaction->size = 1;
+ }
+
+ /* Page align the size */
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size);
+ new_allocation->lock_usage = UMP_NOT_LOCKED;
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
+ new_allocation->size_bytes,
+ (unsigned long)user_interaction->size));
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+ new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_mutex_wait(session_data->lock);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_mutex_signal(session_data->lock);
+
+ /* Create a secure ID for this allocation */
+ ret = ump_random_mapping_insert(device.secure_id_map, new_allocation);
+ if (unlikely(ret)) {
+ new_allocation->release_func(new_allocation->ctx, new_allocation);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
+ new_allocation->secure_id,
+ new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/ump/common/ump_kernel_types.h b/drivers/parrot/gpu/ump/common/ump_kernel_types.h
new file mode 100644
index 00000000000000..7718956175ca28
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_kernel_types.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+#include <linux/rbtree.h>
+
+typedef enum {
+ UMP_USED_BY_CPU = 0,
+ UMP_USED_BY_MALI = 1,
+ UMP_USED_BY_UNKNOWN_DEVICE = 100,
+} ump_hw_usage;
+
+typedef enum {
+ UMP_NOT_LOCKED = 0,
+ UMP_READ = 1,
+ UMP_READ_WRITE = 3,
+} ump_lock_usage;
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem {
+ struct rb_node node;
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block *block_array;
+ void (*release_func)(void *ctx, struct ump_dd_mem *descriptor);
+ void *ctx;
+ void *backend_info;
+ int is_cached;
+ ump_hw_usage hw_device;
+ ump_lock_usage lock_usage;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/parrot/gpu/ump/common/ump_osk.h b/drivers/parrot/gpu/ump/common/ump_osk.h
new file mode 100644
index 00000000000000..5759ddb719be4b
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_osk.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include "ump_uk_types.h"
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_mali_osk_errcode_t _ump_osk_init(void);
+
+_mali_osk_errcode_t _ump_osk_term(void);
+
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom);
+
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom);
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor);
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size);
+
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor);
+
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/parrot/gpu/ump/common/ump_uk_types.h b/drivers/parrot/gpu/ump/common/ump_uk_types.h
new file mode 100644
index 00000000000000..48b588f8e2b76a
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_uk_types.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(3)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+ _UMP_IOC_CACHE_OPERATIONS_CONTROL,
+ _UMP_IOC_SWITCH_HW_USAGE,
+ _UMP_IOC_LOCK,
+ _UMP_IOC_UNLOCK,
+} _ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_INVALIDATE = 2,
+ _UMP_UK_MSYNC_FLUSH_L1 = 3,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+typedef enum
+{
+ _UMP_UK_CACHE_OP_START = 0,
+ _UMP_UK_CACHE_OP_FINISH = 1,
+} ump_uk_cache_op_control;
+
+typedef enum
+{
+ _UMP_UK_READ = 1,
+ _UMP_UK_READ_WRITE = 3,
+} ump_uk_lock_usage;
+
+typedef enum
+{
+ _UMP_UK_USED_BY_CPU = 0,
+ _UMP_UK_USED_BY_MALI = 1,
+ _UMP_UK_USED_BY_UNKNOWN_DEVICE = 100,
+} ump_uk_user;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void *_ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void *_ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+typedef struct _ump_uk_cache_operations_control_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
+} _ump_uk_cache_operations_control_s;
+
+
+typedef struct _ump_uk_switch_hw_usage_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
+
+} _ump_uk_switch_hw_usage_s;
+
+typedef struct _ump_uk_lock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_lock_usage lock_usage;
+} _ump_uk_lock_s;
+
+typedef struct _ump_uk_unlock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+} _ump_uk_unlock_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/ump/common/ump_ukk.h b/drivers/parrot/gpu/ump/common/ump_ukk.h
new file mode 100644
index 00000000000000..da7917a893ca48
--- /dev/null
+++ b/drivers/parrot/gpu/ump/common/ump_ukk.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open(void **context);
+
+_mali_osk_errcode_t _ump_ukk_close(void **context);
+
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction);
+
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info);
+
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction);
+
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args);
+
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args);
+
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args);
+
+void _ump_ukk_msync(_ump_uk_msync_s *args);
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args);
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args);
+
+void _ump_ukk_lock(_ump_uk_lock_s *args);
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args);
+
+u32 _ump_ukk_report_memory_usage(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface.h b/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface.h
new file mode 100755
index 00000000000000..3f47637606433e
--- /dev/null
+++ b/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void *ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block *blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block *block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface_ref_drv.h b/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface_ref_drv.h
new file mode 100755
index 00000000000000..75a78ebc1598ab
--- /dev/null
+++ b/drivers/parrot/gpu/ump/include/ump/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block *blocks, unsigned long num_blocks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/parrot/gpu/ump/include/ump/ump_kernel_platform.h b/drivers/parrot/gpu/ump/include/ump/ump_kernel_platform.h
new file mode 100755
index 00000000000000..eca30c6c0cf3fd
--- /dev/null
+++ b/drivers/parrot/gpu/ump/include/ump/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/license/gpl/ump_kernel_license.h b/drivers/parrot/gpu/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100755
index 00000000000000..567d803b312e42
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/ump_ioctl.h b/drivers/parrot/gpu/ump/linux/ump_ioctl.h
new file mode 100644
index 00000000000000..239e8ab33db5f8
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_ioctl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include <ump_uk_types.h>
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_msync_s)
+
+#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR, _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
+#define UMP_IOC_SWITCH_HW_USAGE _IOW(UMP_IOCTL_NR, _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
+#define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s)
+#define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_linux.c b/drivers/parrot/gpu/ump/linux/ump_kernel_linux.c
new file mode 100644
index 00000000000000..c99caa6dff54bb
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_linux.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+#if UMP_LICENSE_IS_GPL
+static struct dentry *ump_debugfs_dir = NULL;
+#endif
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker {
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device {
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class *ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops = {
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+static ssize_t ump_memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _ump_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ump_memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = ump_memory_used_read,
+};
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+#if UMP_LICENSE_IS_GPL
+ ump_debugfs_dir = debugfs_create_dir(ump_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == ump_debugfs_dir) {
+ ump_debugfs_dir = NULL;
+ } else {
+ debugfs_create_file("memory_usage", 0400, ump_debugfs_dir, NULL, &ump_memory_usage_fops);
+ }
+#endif
+
+ if (0 == ump_major) {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ } else {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err) {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err) {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class)) {
+ err = PTR_ERR(ump_device.ump_class);
+ } else {
+ struct device *mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev)) {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+
+#if UMP_LICENSE_IS_GPL
+ if (ump_debugfs_dir)
+ debugfs_remove_recursive(ump_debugfs_dir);
+#endif
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data *session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void **) &session_data);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode(err);
+ }
+
+ filp->private_data = (void *)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void **) &filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user *argument;
+ struct ump_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data) {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd) {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_CACHE_OPERATIONS_CONTROL:
+ err = ump_cache_operations_control_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SWITCH_HW_USAGE:
+ err = ump_switch_hw_usage_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_LOCK:
+ err = ump_lock_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_UNLOCK:
+ err = ump_unlock_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int map_errcode(_mali_osk_errcode_t err)
+{
+ switch (err) {
+ case _MALI_OSK_ERR_OK :
+ return 0;
+ case _MALI_OSK_ERR_FAULT:
+ return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC:
+ return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS:
+ return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM:
+ return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL:
+ return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EFAULT;
+ }
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data *session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data) {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+ /* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
+ vma->vm_flags |= VM_DONTCOPY;
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem(&args);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode(err);
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_linux.h b/drivers/parrot/gpu/ump/linux/ump_kernel_linux.h
new file mode 100644
index 00000000000000..c071b77b2cabfe
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_LINUX_H__
+#define __UMP_KERNEL_LINUX_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644
index 00000000000000..32ea8b0b070a36
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info {
+ struct block_info *next;
+} block_info;
+
+
+
+typedef struct block_allocator {
+ struct semaphore mutex;
+ block_info *all_blocks;
+ block_info *first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend *backend);
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem);
+static void block_allocator_release(void *ctx, ump_dd_mem *handle);
+static inline u32 get_phys(block_allocator *allocator, block_info *block);
+static u32 block_allocator_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend *backend;
+ block_allocator *allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size) {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend) {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator) {
+ allocator->all_blocks = kmalloc(sizeof(block_info) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks) {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++) {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->stat = block_allocator_stat;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend *backend)
+{
+ block_allocator *allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator *)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem)
+{
+ block_allocator *allocator;
+ u32 left;
+ block_info *last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator *)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array) {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex)) {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free)) {
+ block_info *block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left) {
+ block_info *block;
+ /* release all memory back to the pool */
+ while (last_allocated) {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+ mem->is_cached = 0;
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void *ctx, ump_dd_mem *handle)
+{
+ block_allocator *allocator;
+ block_info *block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator *)ctx;
+ block = (block_info *)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex)) {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block) {
+ next = block->next;
+
+ BUG_ON((block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator *allocator, block_info *block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
+
+static u32 block_allocator_stat(struct ump_memory_backend *backend)
+{
+ block_allocator *allocator;
+ BUG_ON(!backend);
+ allocator = (block_allocator *)backend->ctx;
+ BUG_ON(!allocator);
+
+ return (allocator->num_blocks - allocator->num_free) * UMP_BLOCK_SIZE;
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644
index 00000000000000..5a5a4a315555e8
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.c b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644
index 00000000000000..86a8132e3b231e
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator {
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void *ctx, ump_dd_mem *descriptor);
+static int os_allocate(void *ctx, ump_dd_mem *descriptor);
+static void os_memory_backend_destroy(ump_memory_backend *backend);
+static u32 os_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend *backend;
+ os_allocator *info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info) {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend) {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->stat = os_stat;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend *backend)
+{
+ os_allocator *info = (os_allocator *)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void *ctx, ump_dd_mem *descriptor)
+{
+ u32 left;
+ os_allocator *info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator *)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex)) {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array) {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max)) {
+ struct page *new_page;
+
+ if (is_cached) {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+ } else {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page) {
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if (is_cached) {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE) {
+ left = 0;
+ } else {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left) {
+ DBG_MSG(1, ("Failed to allocate needed pages\n"));
+
+ while (pages_allocated) {
+ pages_allocated--;
+ if (!is_cached) {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT));
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void *ctx, ump_dd_mem *descriptor)
+{
+ os_allocator *info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator *)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex)) {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for (i = 0; i < descriptor->nr_blocks; i++) {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if (! descriptor->is_cached) {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr >> PAGE_SHIFT));
+ }
+
+ vfree(descriptor->block_array);
+}
+
+
+static u32 os_stat(struct ump_memory_backend *backend)
+{
+ os_allocator *info;
+ info = (os_allocator *)backend->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.h b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644
index 00000000000000..9ac3cb7a35d879
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.c b/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.c
new file mode 100644
index 00000000000000..b66e04af3acc6d
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_types.h"
+#include "ump_kernel_random_mapping.h"
+
+#include <linux/random.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+
+
+static ump_dd_mem *search(struct rb_root *root, int id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ ump_dd_mem *e = container_of(node, ump_dd_mem, node);
+
+ if (id < e->secure_id) {
+ node = node->rb_left;
+ } else if (id > e->secure_id) {
+ node = node->rb_right;
+ } else {
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+static mali_bool insert(struct rb_root *root, int id, ump_dd_mem *mem)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ ump_dd_mem *this = container_of(*new, ump_dd_mem, node);
+
+ parent = *new;
+ if (id < this->secure_id) {
+ new = &((*new)->rb_left);
+ } else if (id > this->secure_id) {
+ new = &((*new)->rb_right);
+ } else {
+ printk(KERN_ERR "UMP: ID already used %x\n", id);
+ return MALI_FALSE;
+ }
+ }
+
+ rb_link_node(&mem->node, parent, new);
+ rb_insert_color(&mem->node, root);
+
+ return MALI_TRUE;
+}
+
+
+ump_random_mapping *ump_random_mapping_create(void)
+{
+ ump_random_mapping *map = _mali_osk_calloc(1, sizeof(ump_random_mapping));
+
+ if (NULL == map)
+ return NULL;
+
+ map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock) {
+ map->root = RB_ROOT;
+#if UMP_RANDOM_MAP_DELAY
+ map->failed.count = 0;
+ map->failed.timestamp = jiffies;
+#endif
+ return map;
+ }
+ return NULL;
+}
+
+void ump_random_mapping_destroy(ump_random_mapping *map)
+{
+ _mali_osk_mutex_rw_term(map->lock);
+ _mali_osk_free(map);
+}
+
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem)
+{
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ while (1) {
+ u32 id;
+
+ get_random_bytes(&id, sizeof(id));
+
+ /* Try a new random number if id happened to be the invalid
+ * secure ID (-1). */
+ if (unlikely(id == UMP_INVALID_SECURE_ID))
+ continue;
+
+ /* Insert into the tree. If the id was already in use, get a
+ * new random id and try again. */
+ if (insert(&map->root, id, mem)) {
+ mem->secure_id = id;
+ break;
+ }
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return 0;
+}
+
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id)
+{
+ ump_dd_mem *mem = NULL;
+#if UMP_RANDOM_MAP_DELAY
+ int do_delay = 0;
+#endif
+
+ DEBUG_ASSERT(map);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ mem = search(&map->root, id);
+
+ if (unlikely(NULL == mem)) {
+#if UMP_RANDOM_MAP_DELAY
+ map->failed.count++;
+
+ if (time_is_before_jiffies(map->failed.timestamp +
+ UMP_FAILED_LOOKUP_DELAY * HZ))
+ {
+ /* If it is a long time since last failure, reset
+ * the counter and skip the delay this time. */
+ map->failed.count = 0;
+ } else if (map->failed.count > UMP_FAILED_LOOKUPS_ALLOWED) {
+ do_delay = 1;
+ }
+
+ map->failed.timestamp = jiffies;
+#endif /* UMP_RANDOM_MAP_DELAY */
+ } else {
+ ump_dd_reference_add(mem);
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+
+#if UMP_RANDOM_MAP_DELAY
+ if (do_delay) {
+ /* Apply delay */
+ schedule_timeout_killable(UMP_FAILED_LOOKUP_DELAY);
+ }
+#endif /* UMP_RANDOM_MAP_DELAY */
+
+ return mem;
+}
+
+static ump_dd_mem *ump_random_mapping_remove_internal(ump_random_mapping *map, int id)
+{
+ ump_dd_mem *mem = NULL;
+
+ mem = search(&map->root, id);
+
+ if (mem) {
+ rb_erase(&mem->node, &map->root);
+ }
+
+ return mem;
+}
+
+void ump_random_mapping_put(ump_dd_mem *mem)
+{
+ int new_ref;
+
+ _mali_osk_mutex_rw_wait(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+ DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n",
+ mem->secure_id, new_ref));
+
+ if (0 == new_ref) {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_random_mapping_remove_internal(device.secure_id_map, mem->secure_id);
+
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+
+ _mali_osk_mutex_rw_signal(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int descriptor)
+{
+ ump_dd_mem *mem;
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ mem = ump_random_mapping_remove_internal(map, descriptor);
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return mem;
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.h b/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.h
new file mode 100644
index 00000000000000..7f89ef827b6f18
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_kernel_random_mapping.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_random_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_RANDOM_MAPPING_H__
+#define __UMP_KERNEL_RANDOM_MAPPING_H__
+
+#include "mali_osk.h"
+#include <linux/rbtree.h>
+
+#define UMP_RANDOM_MAP_DELAY 1
+#define UMP_FAILED_LOOKUP_DELAY 10 /* ms */
+#define UMP_FAILED_LOOKUPS_ALLOWED 10 /* number of allowed failed lookups */
+
+/**
+ * The random mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_random_mapping {
+ _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+ struct rb_root root;
+#if UMP_RANDOM_MAP_DELAY
+ struct {
+ unsigned long count;
+ unsigned long timestamp;
+ } failed;
+#endif
+} ump_random_mapping;
+
+/**
+ * Create a random mapping object
+ * Create a random mapping capable of holding 2^20 entries
+ * @return Pointer to a random mapping object, NULL on failure
+ */
+ump_random_mapping *ump_random_mapping_create(void);
+
+/**
+ * Destroy a random mapping object
+ * @param map The map to free
+ */
+void ump_random_mapping_destroy(ump_random_mapping *map);
+
+/**
+ * Allocate a new mapping entry (random ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The random allocated, a negative value on error
+ */
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem);
+
+/**
+ * Get the value mapped to by a random ID
+ *
+ * If the lookup fails, punish the calling thread by applying a delay.
+ *
+ * @param map The map to lookup the random id in
+ * @param id The ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return ump_dd_mem pointer on successful lookup, NULL on error
+ */
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id);
+
+void ump_random_mapping_put(ump_dd_mem *mem);
+
+/**
+ * Free the random ID
+ * For the random to be reused it has to be freed
+ * @param map The map to free the random from
+ * @param id The ID to free
+ */
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int id);
+
+#endif /* __UMP_KERNEL_RANDOM_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/ump_memory_backend.c b/drivers/parrot/gpu/ump/linux/ump_memory_backend.c
new file mode 100644
index 00000000000000..b0915435e51b4f
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_memory_backend.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend *ump_memory_backend_create(void)
+{
+ ump_memory_backend *backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend) {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory")) {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ } else if (1 == ump_backend) {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+
+ return backend;
+}
+
+void ump_memory_backend_destroy(void)
+{
+ if (0 == ump_backend) {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_osk_atomics.c b/drivers/parrot/gpu/ump/linux/ump_osk_atomics.c
new file mode 100644
index 00000000000000..04934234ee73ad
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom)
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom)
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_osk_low_level_mem.c b/drivers/parrot/gpu/ump/linux/ump_osk_low_level_mem.c
new file mode 100644
index 00000000000000..3cd429adb047ab
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/uaccess.h> /* to verify pointers from user space */
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker {
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct *vma);
+static void ump_vma_close(struct vm_area_struct *vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops = {
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user *address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct *vma)
+{
+ ump_vma_usage_tracker *vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct *vma)
+{
+ ump_vma_usage_tracker *vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ if (0 == new_val) {
+ ump_memory_allocation *descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem(& args);
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor)
+{
+ ump_vma_usage_tracker *vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker) {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
+ if (NULL == vma) {
+ kfree(vma_usage_tracker);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP;
+#endif
+
+
+ if (0 == descriptor->is_cached) {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user *)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor)
+{
+ struct vm_area_struct *vma;
+ ump_vma_usage_tracker *vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size)
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
+
+ if (NULL == vma) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range(vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+static void level1_cache_flush_all(void)
+{
+ DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
+ __cpuc_flush_kern_all();
+}
+
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data)
+{
+ int i;
+
+ /* Flush L1 using virtual address, the entire range in one go.
+ * Only flush if user space process has a valid write mapping on given address. */
+ if ((mem) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) {
+ __cpuc_flush_dcache_area(virt, size);
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size));
+ } else {
+ if (session_data) {
+ if (op == _UMP_UK_MSYNC_FLUSH_L1) {
+ DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
+ session_data->has_pending_level1_cache_flush = 0;
+ level1_cache_flush_all();
+ return;
+ } else {
+ if (session_data->cache_operations_ongoing) {
+ session_data->has_pending_level1_cache_flush++;
+ DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush));
+ } else {
+ /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
+ level1_cache_flush_all();
+ }
+ }
+ } else {
+ DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
+ level1_cache_flush_all();
+ }
+ }
+
+ if (NULL == mem) return;
+
+ if (mem->size_bytes == size) {
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n", mem->secure_id));
+ } else {
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
+ mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
+ }
+
+
+ /* Flush L2 using physical addresses, block for block. */
+ for (i = 0 ; i < mem->nr_blocks; i++) {
+ u32 start_p, end_p;
+ ump_dd_physical_block *block;
+ block = &mem->block_array[i];
+
+ if (offset >= block->size) {
+ offset -= block->size;
+ continue;
+ }
+
+ if (offset) {
+ start_p = (u32)block->addr + offset;
+ /* We'll zero the offset later, after using it to calculate end_p. */
+ } else {
+ start_p = (u32)block->addr;
+ }
+
+ if (size < block->size - offset) {
+ end_p = start_p + size - 1;
+ size = 0;
+ } else {
+ if (offset) {
+ end_p = start_p + (block->size - offset - 1);
+ size -= block->size - offset;
+ offset = 0;
+ } else {
+ end_p = start_p + block->size - 1;
+ size -= block->size;
+ }
+ }
+
+ switch (op) {
+ case _UMP_UK_MSYNC_CLEAN:
+ outer_clean_range(start_p, end_p);
+ break;
+ case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
+ outer_flush_range(start_p, end_p);
+ break;
+ case _UMP_UK_MSYNC_INVALIDATE:
+ outer_inv_range(start_p, end_p);
+ break;
+ default:
+ break;
+ }
+
+ if (0 == size) {
+ /* Nothing left to flush. */
+ break;
+ }
+ }
+
+ return;
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_osk_misc.c b/drivers/parrot/gpu/ump/linux/ump_osk_misc.c
new file mode 100644
index 00000000000000..0f6829dfb67b86
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_osk_misc.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init(void)
+{
+ if (0 != ump_kernel_device_initialize()) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term(void)
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.c b/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644
index 00000000000000..ed14987cb7bb92
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate(&user_interaction);
+ if (_MALI_OSK_ERR_OK != err) {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release(&release_args);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.h b/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644
index 00000000000000..c88b6665abbf00
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.c b/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.c
new file mode 100644
index 00000000000000..49d58d797506c6
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void *) session_data;
+ err = _ump_uku_get_api_version(&version_info);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void *) session_data;
+ err = _ump_ukk_release(&release_args);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get(&user_interaction);
+ if (_MALI_OSK_ERR_OK != err) {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Do cache maintenance on specified UMP memory.
+ */
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync(&user_interaction);
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_cache_operations_control_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_cache_operations_control()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s *) &user_interaction);
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_cache_operations_control()\n"));
+ return -EFAULT;
+ }
+#endif
+ return 0; /* success */
+}
+
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_switch_hw_usage_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_switch_hw_usage(&user_interaction);
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+ return 0; /* success */
+}
+
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_lock_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_lock(&user_interaction);
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+
+ return 0; /* success */
+}
+
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data *session_data)
+{
+ _ump_uk_unlock_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data) {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction))) {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_unlock(&user_interaction);
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction))) {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+
+ return 0; /* success */
+}
diff --git a/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.h b/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.h
new file mode 100644
index 00000000000000..e9110b7b153e32
--- /dev/null
+++ b/drivers/parrot/gpu/ump/linux/ump_ukk_wrappers.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/ump/mali_files b/drivers/parrot/gpu/ump/mali_files
new file mode 120000
index 00000000000000..8f878e88cc08f7
--- /dev/null
+++ b/drivers/parrot/gpu/ump/mali_files
@@ -0,0 +1 @@
+../mali400/ \ No newline at end of file
diff --git a/drivers/parrot/gpu/ump/readme.txt b/drivers/parrot/gpu/ump/readme.txt
new file mode 100755
index 00000000000000..c238cf0f2b1f0b
--- /dev/null
+++ b/drivers/parrot/gpu/ump/readme.txt
@@ -0,0 +1,28 @@
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+ build_option: debug or release. Debug is default.
+
+The config.h contains following configuration parameters:
+
+ARCH_UMP_BACKEND_DEFAULT
+ 0 specifies the dedicated memory allocator.
+ 1 specifies the OS memory allocator.
+ARCH_UMP_MEMORY_ADDRESS_DEFAULT
+ This is only required for the dedicated memory allocator, and specifies
+ the physical start address of the memory block reserved for UMP.
+ARCH_UMP_MEMORY_SIZE_DEFAULT
+ This specified the size of the memory block reserved for UMP, or the
+ maximum limit for allocations from the OS.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command. The driver can also be built as a part of the
+kernel itself.
diff --git a/drivers/parrot/gpu/ump_legacy/Kbuild b/drivers/parrot/gpu/ump_legacy/Kbuild
new file mode 100755
index 00000000000000..a9d8fea9736883
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/Kbuild
@@ -0,0 +1,102 @@
+#
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+ifneq (,$(filter $(CONFIG_UMP),y m))
+
+DRIVER_DIR=$(KBUILD_SRC)/$(src)
+$(warning DRIVER_DIR = $(DRIVER_DIR))
+
+CONFIG ?= os_memory_64m
+
+# Validate selected config
+# ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+# $(warning Current directory is $(src))
+# $(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+# else
+# # Link arch to the selected arch-config directory
+# $(shell [ -L $(src)/arch ] && rm $(src)/arch)
+# $(shell ln -sf arch-$(CONFIG) $(src)/arch)
+# $(shell touch $(src)/arch/config.h)
+# endif
+
+UDD_FILE_PREFIX = mali_files/
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV := $(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+ccflags-y += -DSVN_REV="$(SVN_REV)"
+ccflags-y += -DSVN_REV_STRING="\"$(SVN_REV)\""
+
+ifneq ($(CONFIG_MALI400_LEGACY),y)
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/../mali400/common -I$(DRIVER_DIR)/../mali400/linux -I$(DRIVER_DIR)/../../ump/include/ump -I$(DRIVER_DIR)/include/ump
+else
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/../mali400_legacy/common -I$(DRIVER_DIR)/../mali400_legacy/linux -I$(DRIVER_DIR)/include/ump
+endif
+ccflags-y += -DMALI_STATE_TRACKING=0
+ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifneq ($(CONFIG_MALI400_LEGACY),y)
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary -I$(DRIVER_DIR)/../mali400/linux/license/proprietary
+else
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl -I$(DRIVER_DIR)/../mali400/linux/license/gpl
+endif
+else
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary -I$(DRIVER_DIR)/../mali400_legacy/linux/license/proprietary
+else
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl -I$(DRIVER_DIR)/../mali400_legacy/linux/license/gpl
+endif
+endif
+
+ifneq ($(CONFIG_MALI400_LEGACY),y)
+ump-y = common/ump_kernel_common.o \
+ common/ump_kernel_descriptor_mapping.o \
+ common/ump_kernel_api.o \
+ common/ump_kernel_ref_drv.o \
+ linux/ump_kernel_linux.o \
+ linux/ump_kernel_memory_backend_os.o \
+ linux/ump_kernel_memory_backend_dedicated.o \
+ linux/ump_memory_backend.o \
+ linux/ump_ukk_wrappers.o \
+ linux/ump_ukk_ref_wrappers.o \
+ linux/ump_osk_atomics.o \
+ linux/ump_osk_low_level_mem.o \
+ linux/ump_osk_misc.o \
+ ../mali400/linux/mali_osk_atomics.o \
+ ../mali400/linux/mali_osk_locks.o \
+ ../mali400/linux/mali_osk_memory.o \
+ ../mali400/linux/mali_osk_math.o \
+ ../mali400/linux/mali_osk_misc.o
+else
+ump-y = common/ump_kernel_common.o \
+ common/ump_kernel_descriptor_mapping.o \
+ common/ump_kernel_api.o \
+ common/ump_kernel_ref_drv.o \
+ linux/ump_kernel_linux.o \
+ linux/ump_kernel_memory_backend_os.o \
+ linux/ump_kernel_memory_backend_dedicated.o \
+ linux/ump_memory_backend.o \
+ linux/ump_ukk_wrappers.o \
+ linux/ump_ukk_ref_wrappers.o \
+ linux/ump_osk_atomics.o \
+ linux/ump_osk_low_level_mem.o \
+ linux/ump_osk_misc.o \
+ ../mali400_legacy/linux/mali_osk_atomics.o \
+ ../mali400_legacy/linux/mali_osk_locks.o \
+ ../mali400_legacy/linux/mali_osk_memory.o \
+ ../mali400_legacy/linux/mali_osk_math.o \
+ ../mali400_legacy/linux/mali_osk_misc.o
+endif
+obj-$(CONFIG_UMP) := ump.o
+endif
diff --git a/drivers/parrot/gpu/ump_legacy/Kconfig b/drivers/parrot/gpu/ump_legacy/Kconfig
new file mode 100755
index 00000000000000..3ae316c90ca3e8
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/Kconfig
@@ -0,0 +1,16 @@
+config UMP
+ tristate "UMP support"
+ depends on ARM
+ ---help---
+ This enables support for the UMP memory allocation and sharing API.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ump.
+
+config UMP_DEBUG
+ bool "Enable extra debug in UMP"
+ depends on UMP
+ default y
+ ---help---
+ This enabled extra debug checks and messages in UMP.
+
diff --git a/drivers/parrot/gpu/ump_legacy/Makefile b/drivers/parrot/gpu/ump_legacy/Makefile
new file mode 100755
index 00000000000000..dd1b612ff5b0cd
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/Makefile
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+export ARCH ?= arm
+BUILD ?= debug
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# Check that required parameters are supplied.
+ifeq ($(CONFIG),)
+$(error "CONFIG must be specified.")
+endif
+ifeq ($(CPU)$(KDIR),)
+$(error "KDIR or CPU must be specified.")
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+export CONFIG
+
+export CONFIG_UMP := m
+ifeq ($(BUILD),debug)
+export CONFIG_UMP_DEBUG := y
+else
+export CONFIG_UMP_DEBUG := n
+endif
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) -C $(KDIR) M=$(CURDIR)/../mali clean
diff --git a/drivers/parrot/gpu/ump_legacy/Makefile.common b/drivers/parrot/gpu/ump_legacy/Makefile.common
new file mode 100755
index 00000000000000..e750ed7d5e3aaf
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/Makefile.common
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/parrot/gpu/ump_legacy/arch-pb-virtex5/config.h b/drivers/parrot/gpu/ump_legacy/arch-pb-virtex5/config.h
new file mode 100755
index 00000000000000..850e28d5e99f35
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xE1000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 16UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/arch/config.h b/drivers/parrot/gpu/ump_legacy/arch/config.h
new file mode 100644
index 00000000000000..17f2c526bb7628
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/arch/config.h
@@ -0,0 +1,10 @@
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 1
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0 /* This won't be used anyway */
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 128UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
+
+
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_api.c b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_api.c
new file mode 100755
index 00000000000000..c662566d9b3ac7
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_api.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ ump_dd_reference_add(mem);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks)
+ {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks)
+ {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ int new_ref;
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+ that elements in the ump_descriptor_mapping table is always valid. If they
+ are not, userspace may accidently map in this secure_ids right before its freed
+ giving a mapped backdoor into unallocated memory.*/
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+ DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+ if (0 == new_ref)
+ {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+ ump_session_data * session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION)
+ {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else if (args->version == MAKE_VERSION_ID(1))
+ {
+ DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else
+ {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+ ump_session_memory_list_element * session_memory_element;
+ ump_session_memory_list_element * tmp;
+ ump_session_data * session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER( release_info );
+ DEBUG_ASSERT_POINTER( release_info->ctx );
+
+ /* Retreive the session data */
+ session_data = (ump_session_data*)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ if ( session_memory_element->mem->secure_id == secure_id)
+ {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+ ump_dd_mem * mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+ {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ ret = _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ void *virtual = NULL;
+ u32 size = 0;
+ u32 offset = 0;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+ if (NULL == mem)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+ /* Ensure the memory doesn't dissapear when we are flushing it. */
+ ump_dd_reference_add(mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached=mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ goto msync_release_and_return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if ( 0==mem->is_cached )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ goto msync_release_and_return;
+ }
+ DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
+ (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
+
+ if ( args->address )
+ {
+ virtual = (void *)((u32)args->address);
+ offset = (u32)((args->address) - (args->mapping));
+ } else {
+ /* Flush entire mapping when no address is specified. */
+ virtual = args->mapping;
+ }
+ if ( args->size )
+ {
+ size = args->size;
+ } else {
+ /* Flush entire mapping when no size is specified. */
+ size = mem->size_bytes - offset;
+ }
+
+ if ( (offset + size) > mem->size_bytes )
+ {
+ DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
+ goto msync_release_and_return;
+ }
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
+
+msync_release_and_return:
+ ump_dd_reference_release(mem);
+ return;
+}
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
+{
+ ump_session_data * session_data;
+ ump_uk_cache_op_control op;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ op = args->op;
+ session_data = (ump_session_data *)args->ctx;
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( op== _UMP_UK_CACHE_OP_START )
+ {
+ session_data->cache_operations_ongoing++;
+ DBG_MSG(4, ("Cache ops start\n" ));
+ if ( session_data->cache_operations_ongoing != 1 )
+ {
+ DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
+ }
+ }
+ else if ( op== _UMP_UK_CACHE_OP_FINISH )
+ {
+ DBG_MSG(4, ("Cache ops finish\n"));
+ session_data->cache_operations_ongoing--;
+ #if 0
+ if ( session_data->has_pending_level1_cache_flush)
+ {
+ /* This function will set has_pending_level1_cache_flush=0 */
+ _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ }
+ #endif
+
+ /* to be on the safe side: always flush l1 cache when cache operations are done */
+ _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ DBG_MSG(4, ("Cache ops finish end\n" ));
+ }
+ else
+ {
+ DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
+ }
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ ump_uk_user old_user;
+ ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
+ ump_session_data *session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+ if (NULL == mem)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ old_user = mem->hw_device;
+ mem->hw_device = args->new_user;
+
+ DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
+
+ if ( ! mem->is_cached )
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ if ( old_user == args->new_user)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+ if (
+ /* Previous AND new is both different from CPU */
+ (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU )
+ )
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) )
+ {
+ cache_op =_UMP_UK_MSYNC_INVALIDATE;
+ DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
+#ifdef UMP_SKIP_INVALIDATION
+#error
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
+ return;
+#endif
+ }
+ /* Ensure the memory doesn't dissapear when we are flushing it. */
+ ump_dd_reference_add(mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ /* Actual cache flush */
+ _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_dd_reference_release(mem);
+ DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
+ return;
+}
+
+void _ump_ukk_lock(_ump_uk_lock_s *args )
+{
+ ump_dd_mem * mem = NULL;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+ if (NULL == mem)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+ ump_dd_reference_add(mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
+
+ mem->lock_usage = (ump_lock_usage) args->lock_usage;
+
+ ump_dd_reference_release(mem);
+}
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args )
+{
+ ump_dd_mem * mem = NULL;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+
+ if (NULL == mem)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+ ump_dd_reference_add(mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
+
+ mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
+
+ ump_dd_reference_release(mem);
+}
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.c b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.c
new file mode 100755
index 00000000000000..0f5afc30c3f40b
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device) );
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+ if (NULL == device.secure_id_map_lock)
+ {
+ MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ if (NULL == device.secure_id_map)
+ {
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend)
+ {
+ MSG_ERR(("Failed to create memory backend\n"));
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+ DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ device.secure_id_map_lock = NULL;
+
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+ struct ump_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+ if( NULL == session_data->lock )
+ {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+ if ( NULL == session_data->cookies_map )
+ {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_lock_term( session_data->lock );
+ _mali_osk_free( session_data );
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void*)session_data;
+
+ session_data->cache_operations_ongoing = 0 ;
+ session_data->has_pending_level1_cache_flush = 0;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+ struct ump_session_data * session_data;
+ ump_session_memory_list_element * item;
+ ump_session_memory_list_element * tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+ {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+ {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void*)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem( &unmap_args );
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+ _mali_osk_lock_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem*)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+ if (map_id < 0)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if ( mem->is_cached )
+ {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ }
+ else
+ {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init( &descriptor->list );
+
+ err = _ump_osk_mem_mapregion_init( descriptor );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++)
+ {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size)
+ {
+ size_to_map = mem->block_array[block].size;
+ }
+ else
+ {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+ {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+ {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_del( &descriptor->list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+}
+
+u32 _ump_ukk_report_memory_usage( void )
+{
+ if(device.backend->stat)
+ return device.backend->stat(device.backend);
+ else
+ return 0;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.h b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.h
new file mode 100755
index 00000000000000..7c9d27868a821b
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_common.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_COMMON_H__
+#define __UMP_KERNEL_COMMON_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+ extern int ump_debug_level;
+ #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+ #define UMP_DEBUG_CODE(args) args
+ #define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+ #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+ #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+ #define UMP_DEBUG_PRINT(args) do {} while(0)
+ #define UMP_DEBUG_CODE(args)
+ #define DBG_MSG(level,args) do {} while(0)
+ #define DBG_MSG_IF(level,condition,args) do {} while(0)
+ #define DBG_MSG_ELSE(level,args) do {} while(0)
+ #define DEBUG_ASSERT(condition) do {} while(0)
+ #define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_lock_t * lock;
+ ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+ int cache_operations_ongoing;
+ int has_pending_level1_cache_flush;
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+ struct ump_dd_mem * mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+ _mali_osk_lock_t * secure_id_map_lock;
+ ump_descriptor_mapping * secure_id_map;
+ ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_COMMON_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.c b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.c
new file mode 100755
index 00000000000000..f34a1242598cba
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+ if ( NULL != map->lock )
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term( map->lock );
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table * new_table;
+ ump_descriptor_table * old_table = map->table;
+ nr_mappings_new= map->current_nr_mappings *2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+ ump_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.h b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.h
new file mode 100755
index 00000000000000..e9c4b26b2668a4
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_memory_backend.h b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_memory_backend.h
new file mode 100755
index 00000000000000..b613153aa42193
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_memory_backend.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+ void * phys_addr;
+ void * mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void * process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+ int (*allocate)(void* ctx, ump_dd_mem * descriptor);
+ void (*release)(void* ctx, ump_dd_mem * descriptor);
+ void (*shutdown)(struct ump_memory_backend * backend);
+ u32 (*stat)(struct ump_memory_backend *backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_ref_drv.c b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_ref_drv.c
new file mode 100755
index 00000000000000..857b6764d9fcc7
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_ref_drv.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem;
+ unsigned long size_total = 0;
+ int map_id;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i=0; i < num_blocks; i++)
+ {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+ {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+ {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem)
+ {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Find a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ if (NULL == mem->block_array)
+ {
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->secure_id = (ump_secure_id)map_id;
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+ mem->hw_device = _UMP_UK_USED_BY_CPU;
+ mem->lock_usage = UMP_NOT_LOCKED;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+ ump_session_data * session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element * session_memory_element = NULL;
+ int map_id;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element)
+ {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+ if (NULL==new_allocation)
+ {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Create a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return - _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ new_allocation->secure_id = (ump_secure_id)map_id;
+ _mali_osk_atomic_init(&new_allocation->ref_count,1);
+ if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ if (0 == user_interaction->size)
+ {
+ user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ }
+
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+ new_allocation->lock_usage = UMP_NOT_LOCKED;
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+ {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+ new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_kernel_types.h b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_types.h
new file mode 100755
index 00000000000000..ffa3727616eb8c
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_kernel_types.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+
+typedef enum
+{
+ UMP_USED_BY_CPU = 0,
+ UMP_USED_BY_MALI = 1,
+ UMP_USED_BY_UNKNOWN_DEVICE= 100,
+} ump_hw_usage;
+
+typedef enum
+{
+ UMP_NOT_LOCKED = 0,
+ UMP_READ = 1,
+ UMP_READ_WRITE = 3,
+} ump_lock_usage;
+
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block * block_array;
+ void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+ void * ctx;
+ void * backend_info;
+ int is_cached;
+ ump_hw_usage hw_device;
+ ump_lock_usage lock_usage;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_osk.h b/drivers/parrot/gpu/ump_legacy/common/ump_osk.h
new file mode 100755
index 00000000000000..99363a890bae48
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_osk.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include "ump_uk_types.h"
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_uk_types.h b/drivers/parrot/gpu/ump_legacy/common/ump_uk_types.h
new file mode 100755
index 00000000000000..3d65d17f6bcf15
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_uk_types.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+ _UMP_IOC_CACHE_OPERATIONS_CONTROL,
+ _UMP_IOC_SWITCH_HW_USAGE,
+ _UMP_IOC_LOCK,
+ _UMP_IOC_UNLOCK,
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_INVALIDATE = 2,
+ _UMP_UK_MSYNC_FLUSH_L1 = 3,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+typedef enum
+{
+ _UMP_UK_CACHE_OP_START = 0,
+ _UMP_UK_CACHE_OP_FINISH = 1,
+} ump_uk_cache_op_control;
+
+typedef enum
+{
+ _UMP_UK_READ = 1,
+ _UMP_UK_READ_WRITE = 3,
+} ump_uk_lock_usage;
+
+typedef enum
+{
+ _UMP_UK_USED_BY_CPU = 0,
+ _UMP_UK_USED_BY_MALI = 1,
+ _UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
+} ump_uk_user;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+typedef struct _ump_uk_cache_operations_control_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
+} _ump_uk_cache_operations_control_s;
+
+
+typedef struct _ump_uk_switch_hw_usage_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
+
+} _ump_uk_switch_hw_usage_s;
+
+typedef struct _ump_uk_lock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_lock_usage lock_usage;
+} _ump_uk_lock_s;
+
+typedef struct _ump_uk_unlock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+} _ump_uk_unlock_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/common/ump_ukk.h b/drivers/parrot/gpu/ump_legacy/common/ump_ukk.h
new file mode 100755
index 00000000000000..b285cf9d80a436
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/common/ump_ukk.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args);
+
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args );
+
+void _ump_ukk_lock(_ump_uk_lock_s *args );
+
+void _ump_ukk_unlock(_ump_uk_unlock_s *args );
+
+u32 _ump_ukk_report_memory_usage( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface.h b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface.h
new file mode 100755
index 00000000000000..067b30bf1a4288
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface_ref_drv.h b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface_ref_drv.h
new file mode 100755
index 00000000000000..0225130638567e
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_platform.h b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_platform.h
new file mode 100755
index 00000000000000..5322aa278051b6
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/include/ump/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/linux/license/gpl/ump_kernel_license.h b/drivers/parrot/gpu/ump_legacy/linux/license/gpl/ump_kernel_license.h
new file mode 100755
index 00000000000000..0eb758d8d374f2
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_ioctl.h b/drivers/parrot/gpu/ump_legacy/linux/ump_ioctl.h
new file mode 100755
index 00000000000000..b88a341c42cdbd
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_ioctl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include <ump_uk_types.h>
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_msync_s)
+
+#define UMP_IOC_CACHE_OPERATIONS_CONTROL _IOW(UMP_IOCTL_NR, _UMP_IOC_CACHE_OPERATIONS_CONTROL, _ump_uk_cache_operations_control_s)
+#define UMP_IOC_SWITCH_HW_USAGE _IOW(UMP_IOCTL_NR, _UMP_IOC_SWITCH_HW_USAGE, _ump_uk_switch_hw_usage_s)
+#define UMP_IOC_LOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_LOCK, _ump_uk_lock_s)
+#define UMP_IOC_UNLOCK _IOW(UMP_IOCTL_NR, _UMP_IOC_UNLOCK, _ump_uk_unlock_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.c b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.c
new file mode 100755
index 00000000000000..7e017aff2ab592
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+#if UMP_LICENSE_IS_GPL
+static struct dentry *ump_debugfs_dir = NULL;
+#endif
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+static ssize_t ump_memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _ump_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ump_memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = ump_memory_used_read,
+};
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+#if UMP_LICENSE_IS_GPL
+ ump_debugfs_dir = debugfs_create_dir(ump_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == ump_debugfs_dir)
+ {
+ ump_debugfs_dir = NULL;
+ }
+ else
+ {
+ debugfs_create_file("memory_usage", 0400, ump_debugfs_dir, NULL, &ump_memory_usage_fops);
+ }
+#endif
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class))
+ {
+ err = PTR_ERR(ump_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+
+#if UMP_LICENSE_IS_GPL
+ if(ump_debugfs_dir)
+ debugfs_remove_recursive(ump_debugfs_dir);
+#endif
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev))
+ {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void**) &session_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode( err );
+ }
+
+ filp->private_data = (void*)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void**) &filp->private_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user * argument;
+ struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd)
+ {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_CACHE_OPERATIONS_CONTROL:
+ err = ump_cache_operations_control_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SWITCH_HW_USAGE:
+ err = ump_switch_hw_usage_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_LOCK:
+ err = ump_lock_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_UNLOCK:
+ err = ump_unlock_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data * session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+ /* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
+ vma->vm_flags |= VM_DONTCOPY;
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem( &args );
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.h b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.h
new file mode 100755
index 00000000000000..7c0a17c55c87e4
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_LINUX_H__
+#define __UMP_KERNEL_LINUX_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.c b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100755
index 00000000000000..7ca406ea625bfd
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+ struct semaphore mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+static u32 block_allocator_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend * backend;
+ block_allocator * allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size)
+ {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend)
+ {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator)
+ {
+ allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks)
+ {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->stat = block_allocator_stat;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+ block_allocator * allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+ block_allocator * allocator;
+ u32 left;
+ block_info * last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator*)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array)
+ {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free))
+ {
+ block_info * block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left)
+ {
+ block_info * block;
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+ mem->is_cached=0;
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+ block_allocator * allocator;
+ block_info * block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator*)ctx;
+ block = (block_info*)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block)
+ {
+ next = block->next;
+
+ BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
+
+static u32 block_allocator_stat(struct ump_memory_backend *backend)
+{
+ block_allocator *allocator;
+ BUG_ON(!backend);
+ allocator = (block_allocator*)backend->ctx;
+ BUG_ON(!allocator);
+
+ return (allocator->num_blocks - allocator->num_free)* UMP_BLOCK_SIZE;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.h b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100755
index 00000000000000..84948877532e91
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.c b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.c
new file mode 100755
index 00000000000000..e362facc0533db
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+static u32 os_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ os_allocator * info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->stat = os_stat;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+ os_allocator * info = (os_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+ u32 left;
+ os_allocator * info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array)
+ {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+ {
+ struct page * new_page;
+
+ if (is_cached)
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+ } else
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page)
+ {
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if ( is_cached )
+ {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else
+ {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE)
+ {
+ left = 0;
+ }
+ else
+ {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left)
+ {
+ DBG_MSG(1, ("Failed to allocate needed pages\n"));
+
+ while(pages_allocated)
+ {
+ pages_allocated--;
+ if ( !is_cached )
+ {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+ os_allocator * info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for ( i = 0; i < descriptor->nr_blocks; i++)
+ {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if ( ! descriptor->is_cached)
+ {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ }
+
+ vfree(descriptor->block_array);
+}
+
+
+static u32 os_stat(struct ump_memory_backend *backend)
+{
+ os_allocator *info;
+ info = (os_allocator*)backend->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.h b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.h
new file mode 100755
index 00000000000000..5bb3f1049462b6
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_memory_backend.c b/drivers/parrot/gpu/ump_legacy/linux/ump_memory_backend.c
new file mode 100755
index 00000000000000..d138f119130f27
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_memory_backend.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+ ump_memory_backend * backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+ {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ }
+ else if (1 == ump_backend)
+ {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+
+ return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_osk_atomics.c b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_atomics.c
new file mode 100755
index 00000000000000..1e048b1baceb0c
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_osk_low_level_mem.c b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_low_level_mem.c
new file mode 100755
index 00000000000000..2f8aeaabe8abb1
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/uaccess.h> /* to verify pointers from user space */
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ if (0 == new_val)
+ {
+ ump_memory_allocation * descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem( & args );
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker)
+ {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ if (NULL == vma )
+ {
+ kfree(vma_usage_tracker);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP;
+#endif
+
+
+ if (0==descriptor->is_cached)
+ {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+static void level1_cache_flush_all(void)
+{
+ DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
+ __cpuc_flush_kern_all();
+}
+
+void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
+{
+ int i;
+
+ /* Flush L1 using virtual address, the entire range in one go.
+ * Only flush if user space process has a valid write mapping on given address. */
+ if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
+ {
+ __cpuc_flush_dcache_area(virt, size);
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size));
+ }
+ else
+ {
+ if (session_data)
+ {
+ if (op == _UMP_UK_MSYNC_FLUSH_L1 )
+ {
+ DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
+ session_data->has_pending_level1_cache_flush = 0;
+ level1_cache_flush_all();
+ return;
+ }
+ else
+ {
+ if (session_data->cache_operations_ongoing)
+ {
+ session_data->has_pending_level1_cache_flush++;
+ DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
+ }
+ else
+ {
+ /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
+ level1_cache_flush_all();
+ }
+ }
+ }
+ else
+ {
+ DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
+ level1_cache_flush_all();
+ }
+ }
+
+ if ( NULL == mem ) return;
+
+ if ( mem->size_bytes==size)
+ {
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
+ }
+ else
+ {
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
+ mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
+ }
+
+
+ /* Flush L2 using physical addresses, block for block. */
+ for (i=0 ; i < mem->nr_blocks; i++)
+ {
+ u32 start_p, end_p;
+ ump_dd_physical_block *block;
+ block = &mem->block_array[i];
+
+ if(offset >= block->size)
+ {
+ offset -= block->size;
+ continue;
+ }
+
+ if(offset)
+ {
+ start_p = (u32)block->addr + offset;
+ /* We'll zero the offset later, after using it to calculate end_p. */
+ }
+ else
+ {
+ start_p = (u32)block->addr;
+ }
+
+ if(size < block->size - offset)
+ {
+ end_p = start_p + size - 1;
+ size = 0;
+ }
+ else
+ {
+ if(offset)
+ {
+ end_p = start_p + (block->size - offset - 1);
+ size -= block->size - offset;
+ offset = 0;
+ }
+ else
+ {
+ end_p = start_p + block->size - 1;
+ size -= block->size;
+ }
+ }
+
+ switch(op)
+ {
+ case _UMP_UK_MSYNC_CLEAN:
+ outer_clean_range(start_p, end_p);
+ break;
+ case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
+ outer_flush_range(start_p, end_p);
+ break;
+ case _UMP_UK_MSYNC_INVALIDATE:
+ outer_inv_range(start_p, end_p);
+ break;
+ default:
+ break;
+ }
+
+ if(0 == size)
+ {
+ /* Nothing left to flush. */
+ break;
+ }
+ }
+
+ return;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_osk_misc.c b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_misc.c
new file mode 100755
index 00000000000000..775c648ee431f4
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_osk_misc.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+ if (0 != ump_kernel_device_initialize())
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.c b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.c
new file mode 100755
index 00000000000000..aaf112cce0776a
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release( &release_args );
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.h b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.h
new file mode 100755
index 00000000000000..878359a131e647
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.c b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.c
new file mode 100755
index 00000000000000..aec6dd70adc3ad
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void*) session_data;
+ err = _ump_uku_get_api_version( &version_info );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void*) session_data;
+ err = _ump_ukk_release( &release_args );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Do cache maintenance on specified UMP memory.
+ */
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_cache_operations_control_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_cache_operations_control()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s*) &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_cache_operations_control()\n"));
+ return -EFAULT;
+ }
+#endif
+ return 0; /* success */
+}
+
+int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_switch_hw_usage_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_switch_hw_usage( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+ return 0; /* success */
+}
+
+int ump_lock_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_lock_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_lock( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+
+ return 0; /* success */
+}
+
+int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_unlock_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_unlock( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+#if 0 /* No data to copy back */
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_switch_hw_usage()\n"));
+ return -EFAULT;
+ }
+#endif
+
+ return 0; /* success */
+}
diff --git a/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.h b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.h
new file mode 100755
index 00000000000000..6465b710d6dac5
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/linux/ump_ukk_wrappers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_lock_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
diff --git a/drivers/parrot/gpu/ump_legacy/readme.txt b/drivers/parrot/gpu/ump_legacy/readme.txt
new file mode 100755
index 00000000000000..c238cf0f2b1f0b
--- /dev/null
+++ b/drivers/parrot/gpu/ump_legacy/readme.txt
@@ -0,0 +1,28 @@
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+ build_option: debug or release. Debug is default.
+
+The config.h contains following configuration parameters:
+
+ARCH_UMP_BACKEND_DEFAULT
+ 0 specifies the dedicated memory allocator.
+ 1 specifies the OS memory allocator.
+ARCH_UMP_MEMORY_ADDRESS_DEFAULT
+ This is only required for the dedicated memory allocator, and specifies
+ the physical start address of the memory block reserved for UMP.
+ARCH_UMP_MEMORY_SIZE_DEFAULT
+ This specified the size of the memory block reserved for UMP, or the
+ maximum limit for allocations from the OS.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command. The driver can also be built as a part of the
+kernel itself.
diff --git a/drivers/parrot/i2c/Kconfig b/drivers/parrot/i2c/Kconfig
new file mode 100644
index 00000000000000..b681952cbffbf6
--- /dev/null
+++ b/drivers/parrot/i2c/Kconfig
@@ -0,0 +1,49 @@
+config I2CM_PARROT7
+ tristate "Parrot7 I2C master driver"
+ depends on I2C
+ default n
+ default m if (ARCH_VEXPRESS_P7FPGA)
+ help
+ Select Parrot7 I2C master driver.
+
+config I2CM_PARROT7_BUILTIN
+ bool
+ select I2CM_PARROT7
+
+config I2CM_PARROT7_DEBUG
+ bool "Parrot I2C master driver debug"
+ depends on I2CM_PARROT7
+ default n
+ help
+ Add debug informations, only available on legacy driver at the moment
+
+config I2CM_PARROT7_DEBUGFS
+ bool "Parrot I2C master debugfs interface"
+ depends on (I2CM_PARROT7 && DEBUG_FS)
+ default n
+ help
+ Creates a debugfs interface for easily changing the I2C bus speed.
+
+config PLDS_I2CS
+ tristate "I2CS PLDS block layer driver"
+ default n
+ help
+ I2CS PLDS block layer driver for DVD player, can be used with
+ /dev/plds peripheral
+
+config TI_UB925_LVDS
+ bool "TI DS90UB925Q LVDS Serializer support"
+ depends on I2C
+ default n
+ help
+ Enable support for TI DS90UB925Q LVDS serialized, including I2C and
+ interrupt pass through.
+
+
+config PARROT_SMSC_USB82514_USB_HUB
+ bool "SMSC USB82514 USB HUB"
+ depends on I2C
+ help
+ Automotive Grade USB 2.0 Hi-Speed 4-Port Hub
+
+source "drivers/parrot/i2c/muxes/Kconfig"
diff --git a/drivers/parrot/i2c/Makefile b/drivers/parrot/i2c/Makefile
new file mode 100644
index 00000000000000..b67c61506259ad
--- /dev/null
+++ b/drivers/parrot/i2c/Makefile
@@ -0,0 +1,9 @@
+
+p7-i2cm-core-objs := p7-i2cm_legacy.o
+obj-$(CONFIG_I2CM_PARROT7) += p7-i2cm-core.o
+obj-$(CONFIG_I2CM_PARROT7_DEBUGFS) += p7-i2cm_debugfs.o
+obj-$(CONFIG_PLDS_I2CS) += plds_i2cs.o
+obj-$(CONFIG_PARROT_SMSC_USB82514_USB_HUB) += smsc-82514-usb-hub.o
+obj-$(CONFIG_TI_UB925_LVDS) += ti_ub925_lvds.o
+
+obj-y += muxes/
diff --git a/drivers/parrot/i2c/muxes/Kconfig b/drivers/parrot/i2c/muxes/Kconfig
new file mode 100644
index 00000000000000..152912e35b23df
--- /dev/null
+++ b/drivers/parrot/i2c/muxes/Kconfig
@@ -0,0 +1,7 @@
+config I2C_MUX_PARROT7
+ tristate "Parrot I2C master multiplexer"
+ depends on I2C_MUX && I2CM_PARROT7
+ help
+ This driver will create I2C adapters to access multiple buses from the
+ same master trough multiplexed pin configurations (e.g. I2C_2_CLKa,
+ I2C_2_CLKb etc...).
diff --git a/drivers/parrot/i2c/muxes/Makefile b/drivers/parrot/i2c/muxes/Makefile
new file mode 100644
index 00000000000000..919c3e5940a2ae
--- /dev/null
+++ b/drivers/parrot/i2c/muxes/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I2C_MUX_PARROT7) += p7-i2cmux.o
diff --git a/drivers/parrot/i2c/muxes/p7-i2cmux.c b/drivers/parrot/i2c/muxes/p7-i2cmux.c
new file mode 100644
index 00000000000000..46775c11be0d6a
--- /dev/null
+++ b/drivers/parrot/i2c/muxes/p7-i2cmux.c
@@ -0,0 +1,211 @@
+/**
+ * linux/drivers/parrot/gpio/mux/p7-i2cmux.c - Parrot7 I2CM bus muxing
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Lionel Flandrin <lionel.flandrin@parrot.com>
+ * date: 19-Sep-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/module.h>
+#include "p7-i2cmux.h"
+
+struct p7i2cmux_channel {
+ /* I2C adapter for this channel */
+ struct i2c_adapter *adap;
+ /* PIN state for this adapter */
+ struct pinctrl_state *pstate;
+};
+
+struct p7i2cmux {
+ struct i2c_adapter *parent;
+ struct pinctrl *pctl;
+ struct pinctrl_state *default_pstate;
+ struct p7i2cmux_channel *channels;
+ unsigned nchannels;
+};
+
+static int p7i2cmux_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct p7i2cmux *mux = data;
+
+ BUG_ON(chan >= mux->nchannels);
+
+ /* Select the channel mux configuration */
+ return pinctrl_select_state(mux->pctl, mux->channels[chan].pstate);
+}
+
+static int p7i2cmux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+ struct p7i2cmux *mux = data;
+
+ BUG_ON(chan >= mux->nchannels);
+
+ /* Switch back to the default channel (i.e. the parent's channel) */
+ return pinctrl_select_state(mux->pctl, mux->default_pstate);
+}
+
+static int __devinit p7i2cmux_probe(struct platform_device *pdev)
+{
+ struct p7i2cmux *mux;
+ const struct p7i2cmux_plat_data *pdata = pdev->dev.platform_data;
+ int i;
+ int ret = 0;
+
+ if (!pdata)
+ return -ENODEV;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->nchannels = pdata->nr_channels;
+
+ if (mux->nchannels == 0) {
+ ret = -ENODEV;
+ goto free_mux;
+ }
+
+ mux->channels = kzalloc(mux->nchannels * sizeof(*(mux->channels)),
+ GFP_KERNEL);
+
+ if (!mux->channels) {
+ ret = -ENOMEM;
+ goto free_mux;
+ }
+
+ mux->parent = i2c_get_adapter(pdata->parent);
+ if (!mux->parent) {
+ dev_err(&pdev->dev, "Couldn't find parent I2C adapter %d\n",
+ pdata->parent);
+ ret = -ENODEV;
+ goto free;
+ }
+
+ mux->pctl = pinctrl_get(&pdev->dev);
+
+ if (IS_ERR(mux->pctl)) {
+ dev_err(&pdev->dev, "Couldn't get pin controller\n");
+ ret = PTR_ERR(mux->pctl);
+ goto put_parent;
+ }
+
+ mux->default_pstate =
+ pinctrl_lookup_state(mux->pctl, PINCTRL_STATE_DEFAULT);
+
+ if (IS_ERR(mux->default_pstate)) {
+ dev_err(&pdev->dev, "Failed to find default pin state\n");
+ ret = PTR_ERR(mux->default_pstate);
+ /* Force the removal of this adapter */
+ goto put_pctl;
+ }
+
+ /* Add muxed adapters */
+ for (i = 0; i < mux->nchannels; i++) {
+ mux->channels[i].adap =
+ i2c_add_mux_adapter(mux->parent, mux,
+ pdata->parent * 10 + i,
+ i,
+ p7i2cmux_select, p7i2cmux_deselect);
+
+ if (mux->channels[i].adap == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to add mux %s for I2C adapter %s\n",
+ pdata->channel_names[i], mux->parent->name);
+ ret = -EINVAL;
+ goto remove_mux;
+ }
+
+ mux->channels[i].pstate =
+ pinctrl_lookup_state(mux->pctl,
+ pdata->channel_names[i]);
+
+ if (IS_ERR(mux->channels[i].pstate)) {
+ dev_err(&pdev->dev, "Failed to find pin state for %s\n",
+ pdata->channel_names[i]);
+ ret = PTR_ERR(mux->channels[i].pstate);
+ /* Force the removal of this adapter */
+ i++;
+ goto remove_mux;
+ }
+ }
+
+ /* switch to default bus */
+ ret = pinctrl_select_state(mux->pctl, mux->default_pstate);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to switch to default mux config\n");
+ goto remove_mux;
+ }
+
+ dev_info(&pdev->dev, "Initialized %d mux channels for adapter %s\n",
+ mux->nchannels, mux->parent->name);
+
+ platform_set_drvdata(pdev, mux);
+
+ return 0;
+
+remove_mux:
+ while (--i)
+ i2c_del_mux_adapter(mux->channels[i].adap);
+put_pctl:
+ pinctrl_put(mux->pctl);
+put_parent:
+ i2c_put_adapter(mux->parent);
+free:
+ kfree(mux->channels);
+free_mux:
+ kfree(mux);
+
+ return ret;
+}
+
+static int __devexit p7i2cmux_remove(struct platform_device *pdev)
+{
+ struct p7i2cmux *mux = platform_get_drvdata(pdev);
+ int i;
+
+ if (!mux)
+ return -ENODEV;
+
+ for (i = 0; i < mux->nchannels; i++)
+ i2c_del_mux_adapter(mux->channels[i].adap);
+ pinctrl_put(mux->pctl);
+ i2c_put_adapter(mux->parent);
+ kfree(mux->channels);
+ kfree(mux);
+
+ return 0;
+}
+
+static struct platform_driver p7i2cmux_driver = {
+ .probe = p7i2cmux_probe,
+ .remove = __devexit_p(p7i2cmux_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = P7I2CMUX_DRV_NAME,
+ },
+};
+
+#ifdef MODULE
+module_platform_driver(p7i2cmux_driver);
+#else
+static int __init p7_i2cmux_init(void)
+{
+ return platform_driver_register(&p7i2cmux_driver);
+}
+
+postcore_initcall(p7_i2cmux_init);
+#endif /* MODULE */
+
+MODULE_DESCRIPTION("Pin muxing support for multiple I2C buses "
+ "on the same master");
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/i2c/muxes/p7-i2cmux.h b/drivers/parrot/i2c/muxes/p7-i2cmux.h
new file mode 100644
index 00000000000000..60ddabba01bb93
--- /dev/null
+++ b/drivers/parrot/i2c/muxes/p7-i2cmux.h
@@ -0,0 +1,19 @@
+#ifndef _P7_I2CMUX_H_
+#define _P7_I2CMUX_H_
+
+#define P7I2CMUX_DRV_NAME "p7-i2cmux"
+
+struct p7i2cmux_plat_data {
+ /* ID of the parent I2C adapter */
+ int parent;
+ /* base adapter ID we wish to use for the mux channel adapters. 0 to
+ * auto-assign. */
+ unsigned base;
+ /* name of the channels, used to fetch the pin configuration from
+ * pinctrl */
+ const char * const *channel_names;
+ /* number of channels */
+ size_t nr_channels;
+};
+
+#endif /* _P7_I2CMUX_H_ */
diff --git a/drivers/parrot/i2c/p7-i2cm.h b/drivers/parrot/i2c/p7-i2cm.h
new file mode 100644
index 00000000000000..d4f957ebeb0e53
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cm.h
@@ -0,0 +1,41 @@
+#ifndef _P7_I2CM_H
+#define _P7_I2CM_H
+
+#if defined(CONFIG_I2CM_PARROT7) || \
+ defined(CONFIG_I2CM_PARROT7_MODULE)
+
+#define P7I2CM_DRV_NAME "p7-i2cm"
+
+enum p7i2cm_revision {
+ I2CM_REVISION_1 = 1,
+ I2CM_REVISION_2,
+ I2CM_REVISION_3,
+ I2CM_REVISION_NR
+};
+
+struct p7i2cm_plat_data {
+ /* Bus frequency in kHz. Should be:
+ *
+ * <= 100kHz for standard-mode
+ * <= 400kHz for fast-mode,
+ * <= 1MHz for fast-mode+
+ */
+ unsigned int bus_freq;
+ /* High speed mode (enabled globaly for all devices on the bus) */
+ bool high_speed;
+ /* 3bit master ID, used in high speed mode arbitration. 0 shouldn't be
+ * used ("reserved for test and diagnostic purposes") */
+ unsigned master_id;
+ /* bus frequency for high speed mode (in kHz). Up to 3.4MHz. */
+ unsigned int bus_high_freq;
+ /* if true the controller is behind a mux and has no direct control on
+ * its output pins. */
+ unsigned muxed;
+ /* Chip revision */
+ enum p7i2cm_revision revision;
+};
+
+#endif /* defined(CONFIG_I2CM_PARROT7) || \
+ defined(CONFIG_I2CM_PARROT7_MODULE) */
+
+#endif
diff --git a/drivers/parrot/i2c/p7-i2cm_debugfs.c b/drivers/parrot/i2c/p7-i2cm_debugfs.c
new file mode 100644
index 00000000000000..85ef24c09c3b22
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cm_debugfs.c
@@ -0,0 +1,350 @@
+/**
+ * linux/drivers/parrot/i2c/p7-i2cm-debugfs.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 26-Sep-2011
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include "p7-i2cm_debugfs.h"
+#include "p7-i2cm_regs.h"
+
+static struct dentry *p7i2cm_debugfs_root = NULL;
+static atomic_t p7i2cm_debugfs_cnt = ATOMIC_INIT(0);
+
+static int p7i2cm_display_freq(struct seq_file *s,
+ struct p7i2cm_debugfs *d,
+ u32 prescale)
+{
+ seq_printf(s, "%luHz (prescale: 0x%04x)\n",
+ d->in_clk / (12 * (prescale + 1)),
+ prescale);
+
+ return 0;
+}
+
+static ssize_t p7i2cm_read_prescale(struct p7i2cm_debugfs *d,
+ const char __user *_buf,
+ size_t size,
+ loff_t *ppos)
+{
+ unsigned long freq;
+ char *last;
+ char buf[16];
+ unsigned tmp;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(buf, _buf, size))
+ return -EFAULT;
+
+ buf[size] = '\0';
+
+ freq = simple_strtoul(buf, &last, 0);
+
+ switch (*last) {
+ case 'M':
+ freq *= 1000;
+ case 'k':
+ freq *= 1000;
+ case '\0':
+ case '\n':
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Compute the value of prescale for the desired frequency */
+ tmp = 12 * freq;
+
+ if (tmp == 0)
+ return -ERANGE;
+
+ tmp = (d->in_clk + tmp - 1) / tmp - 1;
+ if (tmp > 0xffff)
+ return 0xffff;
+
+ return tmp;
+}
+
+static int p7i2cm_speed_show(struct seq_file *s, void *unused)
+{
+ struct p7i2cm_debugfs *d = s->private;
+
+ return p7i2cm_display_freq(s, d, __raw_readl(d->base + I2CM_PRESCALE));
+}
+
+static ssize_t p7i2cm_speed_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct p7i2cm_debugfs *d = m->private;
+ ssize_t prescale;
+
+ prescale = p7i2cm_read_prescale(d, buf, size, ppos);
+
+ if (prescale < 0)
+ return prescale;
+
+ __raw_writel(prescale, d->base + I2CM_PRESCALE);
+
+ return size;
+}
+
+static int p7i2cm_speed_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, p7i2cm_speed_show, inode->i_private);
+}
+
+static const struct file_operations p7i2cm_speed_debug_ops = {
+ .open = p7i2cm_speed_debug_open,
+ .read = seq_read,
+ .write = p7i2cm_speed_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int p7i2cm_highspeed_show(struct seq_file *s, void *unused)
+{
+ struct p7i2cm_debugfs *d = s->private;
+
+ return p7i2cm_display_freq(s, d,
+ __raw_readl(d->base + I2CM_HIGH_PRESCALE));
+}
+
+static ssize_t p7i2cm_highspeed_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct p7i2cm_debugfs *d = m->private;
+ ssize_t prescale;
+
+ prescale = p7i2cm_read_prescale(d, buf, size, ppos);
+
+ if (prescale < 0)
+ return prescale;
+
+ __raw_writel(prescale, d->base + I2CM_HIGH_PRESCALE);
+
+ return size;
+}
+
+static int p7i2cm_highspeed_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, p7i2cm_highspeed_show, inode->i_private);
+}
+
+static const struct file_operations p7i2cm_highspeed_debug_ops = {
+ .open = p7i2cm_highspeed_debug_open,
+ .read = seq_read,
+ .write = p7i2cm_highspeed_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int p7i2cm_dbglvl_show(struct seq_file *s, void *unused)
+{
+ struct p7i2cm_debugfs *d = s->private;
+
+ seq_printf(s, "%d\n",d->dbglvl);
+
+ return 0;
+}
+
+static ssize_t p7i2cm_dbglvl_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct p7i2cm_debugfs *d = m->private;
+ unsigned long level;
+ char *last;
+ char _buf[16];
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ level = simple_strtoul(_buf, &last, 0);
+ d->dbglvl = (unsigned int) level;
+
+ return size;
+}
+
+static int p7i2cm_dbglvl_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, p7i2cm_dbglvl_show, inode->i_private);
+}
+static const struct file_operations p7i2cm_dbglvl_debug_ops = {
+ .open = p7i2cm_dbglvl_debug_open,
+ .read = seq_read,
+ .write = p7i2cm_dbglvl_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define VALIDATION_TESTS
+#ifdef VALIDATION_TESTS
+
+static int p7i2cm_test_clkstch_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "Write a value in this file to launch the clock stretching test\n");
+
+ return 0;
+}
+
+static ssize_t p7i2cm_test_clkstch_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct p7i2cm_debugfs *d = s->private;
+
+ /* Read the 4 ID registers from P7MU (8 bytes at @ 0x1A) */
+ __raw_writel(I2CM_WFIFO_START | I2CM_WFIFO_WR | (0x31 << 1),
+ d->base + I2CM_WFIFO);
+ __raw_writel(I2CM_WFIFO_WR | 0x0,
+ d->base + I2CM_WFIFO);
+ __raw_writel(I2CM_WFIFO_WR | 0x1A,
+ d->base + I2CM_WFIFO);
+
+ __raw_writel(I2CM_WFIFO_START | I2CM_WFIFO_WR | (0x31 << 1) | 1,
+ d->base + I2CM_WFIFO);
+ __raw_writel(I2CM_WFIFO_LAST_NACK | I2CM_WFIFO_RD | 8,
+ d->base + I2CM_WFIFO);
+ __raw_writel(I2CM_WFIFO_STOP, d->base + I2CM_WFIFO);
+
+ return size;
+}
+
+static int p7i2cm_test_clkstch_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, p7i2cm_test_clkstch_show, inode->i_private);
+}
+static const struct file_operations p7i2cm_test_clkstch_debug_ops = {
+ .open = p7i2cm_test_clkstch_debug_open,
+ .read = seq_read,
+ .write = p7i2cm_test_clkstch_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* VALIDATION_TESTS */
+
+int p7i2cm_debugfs_init(struct p7i2cm_debugfs *d,
+ void __iomem *base,
+ const char *name,
+ unsigned long in_clk)
+{
+ struct dentry *file;
+ int cnt;
+ int ret = 0;
+
+ /* We have to create the root debugfs directory if we're the first one
+ * using it */
+ if (atomic_inc_return(&p7i2cm_debugfs_cnt) == 1)
+ p7i2cm_debugfs_root = debugfs_create_dir("i2c", NULL);
+
+ if (IS_ERR_OR_NULL(p7i2cm_debugfs_root)) {
+ ret = PTR_ERR(p7i2cm_debugfs_root) ?: -ENOMEM;
+ goto release;
+ }
+
+ d->base = base;
+ d->in_clk = in_clk;
+ d->dbglvl = 0;
+ d->dir = debugfs_create_dir(name, p7i2cm_debugfs_root);
+ if (IS_ERR_OR_NULL(d->dir)) {
+ ret = PTR_ERR(d->dir) ?: -ENOMEM;
+ goto release;
+ }
+
+ file = debugfs_create_file("speed", S_IRUGO, d->dir,
+ d, &p7i2cm_speed_debug_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ ret = PTR_ERR(file) ?: -ENOMEM;
+ goto rm;
+ }
+
+ file = debugfs_create_file("highspeed", S_IRUGO, d->dir,
+ d, &p7i2cm_highspeed_debug_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ ret = PTR_ERR(file) ?: -ENOMEM;
+ goto rm;
+ }
+
+ file = debugfs_create_file("dbglvl", S_IRUGO, d->dir,
+ d, &p7i2cm_dbglvl_debug_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ ret = PTR_ERR(file) ?: -ENOMEM;
+ goto rm;
+ }
+
+#ifdef VALIDATION_TESTS
+ file = debugfs_create_file("test1_clkstrch", S_IRUGO, d->dir,
+ d, &p7i2cm_test_clkstch_debug_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ ret = PTR_ERR(file) ?: -ENOMEM;
+ goto rm;
+ }
+#endif /* VALIDATION_TESTS */
+
+ printk(KERN_INFO "%s debugfs interface registered\n", name);
+
+ return 0;
+
+rm:
+ debugfs_remove_recursive(d->dir);
+release:
+ cnt = atomic_dec_return(&p7i2cm_debugfs_cnt);
+ if (cnt == 0 && !IS_ERR_OR_NULL(p7i2cm_debugfs_root))
+ /* nobody is using the rootdir anymore, destroy it */
+ debugfs_remove_recursive(p7i2cm_debugfs_root);
+
+ d->base = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL(p7i2cm_debugfs_init);
+
+void p7i2cm_debugfs_remove(struct p7i2cm_debugfs *d)
+{
+ int cnt;
+
+ if (d->base) {
+ debugfs_remove_recursive(d->dir);
+
+ cnt = atomic_dec_return(&p7i2cm_debugfs_cnt);
+
+ if (cnt == 0 && !IS_ERR_OR_NULL(p7i2cm_debugfs_root))
+ /* nobody is using the rootdir anymore, destroy it */
+ debugfs_remove_recursive(p7i2cm_debugfs_root);
+ }
+}
+EXPORT_SYMBOL(p7i2cm_debugfs_remove);
+
+MODULE_DESCRIPTION("P7-I2CM debugfs module");
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/i2c/p7-i2cm_debugfs.h b/drivers/parrot/i2c/p7-i2cm_debugfs.h
new file mode 100644
index 00000000000000..7c570c80c3c35f
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cm_debugfs.h
@@ -0,0 +1,19 @@
+#ifndef _P7_I2CM_DEBUGFS_H_
+#define _P7_I2CM_DEBUGFS_H_
+
+#include <linux/debugfs.h>
+
+struct p7i2cm_debugfs {
+ struct dentry *dir;
+ void __iomem *base;
+ unsigned long in_clk;
+ unsigned int dbglvl; /* 0 : no debug message
+ * 1 : Essential messages
+ * other : All messages */
+};
+
+extern int p7i2cm_debugfs_init(struct p7i2cm_debugfs *, void __iomem *,
+ const char *, unsigned long);
+extern void p7i2cm_debugfs_remove(struct p7i2cm_debugfs *);
+
+#endif /* _P7_I2CM_DEBUGFS_H_ */
diff --git a/drivers/parrot/i2c/p7-i2cm_legacy.c b/drivers/parrot/i2c/p7-i2cm_legacy.c
new file mode 100644
index 00000000000000..ac52ad1d82c742
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cm_legacy.c
@@ -0,0 +1,1409 @@
+/**
+ * @file parrot5_i2cm.c
+ *
+ * @brief Parrot I2C Master driver
+ *
+ * Copyright (C) 2008 Parrot S.A.
+ *
+ * @author david.guilloteau@parrot.com
+ * @author matthieu.castet@parrot.com
+ * @date 2008-08-28
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/io.h>
+#include "p7-i2cm.h"
+#include "p7-i2cm_regs.h"
+
+/* compat value */
+#define I2CM_ITEN_ITDIS 0
+
+#define I2CM_COMMAND_STA I2CM_COMMAND_START
+#define I2CM_COMMAND_STO I2CM_COMMAND_STOP
+
+#include "p7-i2cm_debugfs.h"
+
+#define MULTI_MASTER_SUPPORT 0
+
+#if 0
+#define dprintk(...) dev_dbg(drv_data->dev, __VA_ARGS__)
+#else
+static int trace_addr = -1;
+module_param(trace_addr, int, 0644);
+#define dprintk(...) do { \
+ if ((drv_data->msgs && trace_addr == drv_data->msgs[drv_data->msgs_idx].addr) || trace_addr == 0xffff) \
+ dev_info(drv_data->dev,__VA_ARGS__); \
+ } while (0)
+
+#endif
+
+#define DEFAULT_TIMEOUT 20000 //in us
+#define parrot5_i2cm_wait_transfert(a,b) parrot5_i2cm_wait_transfert_timeout(a,b,DEFAULT_TIMEOUT)
+
+#define PARROT5_I2CM_NAME P7I2CM_DRV_NAME
+
+#define PARROT5_I2CM_TIMEOUT 10000 // in ms
+
+enum {
+ DELAY_DEBUG_IRQ_BACK = 1,
+ DELAY_DEBUG_IRQ_AACK = 2,
+};
+static int test_driver = 0;
+
+static int useirq = 1;
+module_param(useirq, int, 0644);
+
+static int usefifo = 1;
+module_param(usefifo, int, 0644);
+
+enum parrot5_i2cm_pm_mode {
+ RUNNING,
+ SUSPENDING,
+};
+
+struct parrot5_i2cm_data {
+
+ //spinlock_t lock;
+ wait_queue_head_t wait;
+ int wait_up;
+ int stop_sent;
+ int irq;
+
+ struct i2c_msg* msgs;
+ int msgs_num;
+ int msgs_idx;
+ int data_pos;
+ int state;
+ unsigned int status;
+
+ struct i2c_adapter adapter;
+ struct p7i2cm_plat_data *pdata;
+
+ struct clk *clk;
+
+ struct resource *ioarea;
+ void __iomem *base;
+
+ int rmsgs_idx;
+ int rdata_pos;
+
+ int usefifo;
+ int hs_mode;
+
+ struct device *dev;
+
+ struct pinctrl *pctl;
+ enum parrot5_i2cm_pm_mode pm_mode;
+#ifdef CONFIG_I2CM_PARROT7_DEBUGFS
+ struct p7i2cm_debugfs debug;
+#endif /* CONFIG_I2CM_PARROT7_DEBUGFS */
+};
+
+enum {
+ RESTART,
+ STOP,
+ DATA_W,
+ DATA_R,
+ HS_MCODE,
+};
+
+/**
+ * Interrupt functions
+ */
+static int atomic_context(struct parrot5_i2cm_data *drv_data)
+{
+ return in_interrupt() || in_atomic() ||
+ /*oops_in_progress ||*/ drv_data->pm_mode != RUNNING;
+}
+static int irq_on(struct parrot5_i2cm_data *drv_data)
+{
+ if (atomic_context(drv_data))
+ return 0;
+ return useirq;
+}
+
+static void parrot5_i2cm_show_debug(struct parrot5_i2cm_data *drv_data)
+{
+ u32 reg = readl(drv_data->base + I2CM_COMMAND);
+ dev_warn(drv_data->dev, "debug cmd : SDA(%sB%d) SCL(%sB%dP7%d) %x\n",
+ reg & (1 << 13) ? "S" : "M", /* 0 : p7 drive the line */
+ !!(reg & (1 << 14)), /* unmasked real value of sda */
+ reg & (1 << 9) ? "S" : "M", /* 0 : p7 drive the line */
+ !!(reg & (1 << 11)), /* unmasked real value of scl */
+ !!(reg & (1 << 10)), /* Internal value of the P7 SCL output */
+ reg);
+ if ((reg & (1 << 13)) && (reg & (1 << 9))) {
+ if (!(reg & (1 << 14)))
+ dev_warn(drv_data->dev, "a slave keep sda to 0!!!\n");
+ if (!(reg & (1 << 11)))
+ dev_warn(drv_data->dev, "a slave keep scl to 0!!!\n");
+ }
+}
+
+static void parrot5_i2cm_acknowledge_irq(struct parrot5_i2cm_data *drv_data)
+{
+ writel(I2CM_ITACK_ACK, drv_data->base + I2CM_ITACK);
+}
+
+static void parrot5_i2cm_enable_irq(struct parrot5_i2cm_data *drv_data, int clean_status)
+{
+ u32 tmp;
+ /* we need to clear irq before enable otherwise we got
+ spurious irq, if transfert were done in polling mode
+ */
+ if (clean_status)
+ parrot5_i2cm_acknowledge_irq(drv_data);
+
+ tmp = I2CM_ITEN_ITEN;
+ /* the I2CM_ITEN_DBG_FIFO irq will make the hw stop cmd fifo
+ in case of a nak during write
+ */
+ if (drv_data->usefifo)
+ tmp |= I2CM_ITEN_DBG_FIFO;
+ writel(tmp, drv_data->base + I2CM_ITEN);
+}
+
+static void parrot5_i2cm_disable_irq(struct parrot5_i2cm_data *drv_data)
+{
+ writel(I2CM_ITEN_ITDIS, drv_data->base + I2CM_ITEN);
+}
+
+static int parrot5_i2cm_irq_active(struct parrot5_i2cm_data *drv_data)
+{
+ return readl(drv_data->base + I2CM_ITEN);
+}
+
+static int parrot5_i2cm_get_addr(struct i2c_msg *msg)
+{
+ int addr = (msg->addr & 0x7f) << 1;
+
+ if (msg->flags & I2C_M_RD) {
+ addr |= 1;
+ }
+
+ return addr;
+}
+
+static void parrot5_i2cm_write_byte_fifo(struct parrot5_i2cm_data *drv_data,
+ char byte, int stop)
+{
+ int ctrl;
+
+ dprintk("write byte fifo %x (stop %d)\n", byte, stop);
+ ctrl = I2CM_WFIFO_WR | byte;
+
+ if (stop) {
+ ctrl |= I2CM_WFIFO_STOP;
+ drv_data->stop_sent = 1;
+ }
+
+ if (drv_data->hs_mode)
+ ctrl |= I2CM_WFIFO_HIGH_SPEED;
+
+ writel(ctrl, drv_data->base + I2CM_WFIFO);
+}
+
+static void parrot5_i2cm_read_byte_fifo(struct parrot5_i2cm_data *drv_data,
+ struct i2c_msg* msg, int stop)
+{
+ int ctrl;
+ int len;
+ int noack = 0;
+ len = msg->len - drv_data->data_pos;
+ if (len > 255)
+ len = 255;
+
+ ctrl = I2CM_WFIFO_RD;
+
+ if (len > 1)
+ ctrl |= len;
+
+ if (msg->len == len + drv_data->data_pos) {
+ if (len == 1)
+ ctrl |= I2CM_WFIFO_NACK;
+ else
+ ctrl |= I2CM_WFIFO_LAST_NACK;
+ noack = 1;
+ }
+
+ if (stop && noack) {
+ if (len == 1)
+ ctrl |= I2CM_WFIFO_STOP;
+ else
+ ctrl |= I2CM_WFIFO_LAST_STOP;
+ drv_data->stop_sent = 1;
+ }
+
+ if (drv_data->hs_mode)
+ ctrl |= I2CM_WFIFO_HIGH_SPEED;
+
+ drv_data->data_pos+= len;
+
+ dprintk("read byte fifo len %d (noack %d, stop %d)\n", len, noack, stop);
+ writel(ctrl, drv_data->base + I2CM_WFIFO);
+}
+
+static void parrot5_i2cm_start_message_fifo(struct parrot5_i2cm_data *drv_data,
+ struct i2c_msg* msg, int stop)
+{
+ int addr, ctrl;
+
+ ctrl = (I2CM_WFIFO_START | I2CM_WFIFO_WR);
+ /* send stop only if msg len is 0 (no data) */
+ if (stop && msg->len == 0) {
+ ctrl |= I2CM_WFIFO_STOP;
+ drv_data->stop_sent = 1;
+ }
+
+ addr = parrot5_i2cm_get_addr(msg);
+
+ ctrl |= addr;
+
+ if (drv_data->state == HS_MCODE)
+ ctrl |= I2CM_WFIFO_NACK;
+ else if (drv_data->hs_mode)
+ ctrl |= I2CM_WFIFO_HIGH_SPEED;
+
+ dprintk("start fifo addr %x (stop %d)\n", addr, stop && msg->len == 0);
+ writel(ctrl, drv_data->base + I2CM_WFIFO);
+}
+
+
+static void parrot5_i2cm_write_byte(struct parrot5_i2cm_data *drv_data,
+ char byte, int stop)
+{
+ int ctrl;
+
+ if (drv_data->usefifo) {
+ parrot5_i2cm_write_byte_fifo(drv_data, byte, stop);
+ return;
+ }
+
+ dprintk("write byte %x (stop %d)\n", byte, stop);
+ ctrl = I2CM_COMMAND_WR;
+
+ if (stop) {
+ ctrl |= I2CM_COMMAND_STO;
+ if (drv_data->pdata->muxed)
+ ctrl |= I2CM_COMMAND_MASK_SDA|I2CM_COMMAND_MASK_SCL;
+ drv_data->stop_sent = 1;
+ }
+
+ if (drv_data->hs_mode)
+ ctrl |= I2CM_COMMAND_HIGH_SPEED;
+
+ writel(byte, drv_data->base + I2CM_TRANSMIT);
+ writel(ctrl, drv_data->base + I2CM_COMMAND);
+}
+
+static void parrot5_i2cm_read_byte(struct parrot5_i2cm_data *drv_data,
+ struct i2c_msg* msg,
+ int noack, int stop)
+{
+ int ctrl;
+ dprintk("read byte (noack %d, stop %d)\n", noack, stop);
+
+ ctrl = I2CM_COMMAND_RD;
+
+ if (stop) {
+ ctrl |= I2CM_COMMAND_STO;
+ if (drv_data->pdata->muxed)
+ ctrl |= I2CM_COMMAND_MASK_SDA|I2CM_COMMAND_MASK_SCL;
+ drv_data->stop_sent = 1;
+ }
+
+ if (noack) {
+ ctrl |= I2CM_COMMAND_NACK;
+ }
+ else if (!(msg->flags & I2C_M_NO_RD_ACK)) {
+ ctrl |= I2CM_COMMAND_ACK; /* XXX do nothing : bit already set to 0 */
+ }
+
+ if (drv_data->hs_mode)
+ ctrl |= I2CM_COMMAND_HIGH_SPEED;
+
+ writel(ctrl, drv_data->base + I2CM_COMMAND);
+}
+
+static void parrot5_i2cm_start_message(struct parrot5_i2cm_data *drv_data,
+ struct i2c_msg* msg, int stop)
+{
+ int addr, ctrl;
+
+ if (drv_data->usefifo) {
+ parrot5_i2cm_start_message_fifo(drv_data, msg, stop);
+ return;
+ }
+
+ ctrl = (I2CM_COMMAND_STA | I2CM_COMMAND_WR);
+ /* send stop only if msg len is 0 (no data) */
+ if (stop && msg->len == 0) {
+ ctrl |= I2CM_COMMAND_STO;
+ if (drv_data->pdata->muxed)
+ ctrl |= I2CM_COMMAND_MASK_SDA|I2CM_COMMAND_MASK_SCL;
+ drv_data->stop_sent = 1;
+ }
+
+ addr = parrot5_i2cm_get_addr(msg);
+
+ if (drv_data->hs_mode && drv_data->state != HS_MCODE)
+ ctrl |= I2CM_COMMAND_HIGH_SPEED;
+
+ dprintk("start addr %x (stop %d)\n", addr, stop && msg->len == 0);
+ writel(addr, drv_data->base + I2CM_TRANSMIT);
+ writel(ctrl, drv_data->base + I2CM_COMMAND);
+}
+
+static void parrot5_i2cm_send_stop(struct parrot5_i2cm_data *drv_data)
+{
+ int ctrl;
+ dprintk("stop\n");
+ drv_data->stop_sent = 1;
+
+ ctrl = I2CM_COMMAND_STO;
+ if (drv_data->pdata->muxed)
+ ctrl |= I2CM_COMMAND_MASK_SDA|I2CM_COMMAND_MASK_SCL;
+
+ if (drv_data->hs_mode)
+ ctrl |= I2CM_COMMAND_HIGH_SPEED;
+
+ /* write to this register also clear the fifo */
+ writel(ctrl, drv_data->base + I2CM_COMMAND);
+}
+
+static int parrot5_i2cm_wait_transfert_timeout(struct parrot5_i2cm_data *drv_data,
+ u32 flags, unsigned long wait)
+{
+ u32 status;
+ int ret = -1;
+ //in case of no irq, jiffies won't update.
+ long timeout = 1000000;
+
+ /* wait the end of current transfert */
+ do {
+ status = readl(drv_data->base + I2CM_STATUS);
+ if ((status & flags) == 0) {
+ ret = 0;
+ break;
+ }
+ if (atomic_context(drv_data)) {
+ cpu_relax();
+ timeout -= 10;
+ }
+ else {
+ if (wait < 100) // < 100 us
+ udelay(wait); //active wait
+ else if (wait < 10000) // 10 ms
+ usleep_range(wait, wait);
+ else
+ msleep(wait);
+
+ timeout -= wait;
+ }
+ } while (timeout > 0);
+ return ret;
+}
+
+static int parrot5_i2cm_badstatus(u32 status, int busy_check)
+{
+ if (status & (I2CM_STATUS_TIP|I2CM_STATUS_IF|I2CM_STATUS_FIFO_FILLED))
+ return 1;
+ if (busy_check && (status & I2CM_STATUS_BUSY))
+ return 1;
+ if ((status & I2CM_STATUS_CMD_EMPTY) != I2CM_STATUS_CMD_EMPTY)
+ return 1;
+ return 0;
+}
+
+/* reset controller state :
+ wait end of TIP, [end of busy].
+ clean irq [and busy]
+
+ busy is touched only is clean_busy is true (we should own the bus)
+ */
+static int parrot5_i2cm_reset(struct parrot5_i2cm_data *drv_data, int clean_busy)
+{
+ u32 status, bus_state;
+ int ret;
+ int err = -1;
+ int irq_on = parrot5_i2cm_irq_active(drv_data);
+ /* disable irq */
+ if (irq_on)
+ parrot5_i2cm_disable_irq(drv_data);
+
+ /* wait a stable state */
+ ret = parrot5_i2cm_wait_transfert(drv_data, I2CM_STATUS_TIP);
+ if (ret) {
+ dev_warn(drv_data->dev,
+ "timeout while waiting the end of current transfert\n");
+ goto exit;
+ }
+#if 0
+ status = readl(drv_data->base + I2CM_STATUS);
+ if (clean_busy && drv_data->stop_sent && (status & I2CM_STATUS_BUSY)) {
+ ret = parrot5_i2cm_wait_transfert(drv_data, I2CM_STATUS_BUSY);
+ if (ret) {
+ dev_warn(drv_data->dev, "timeout while waiting busy\n");
+ }
+ }
+#endif
+ bus_state = readl(drv_data->base + I2CM_COMMAND);
+
+ /* P7 don't drive SDA but it is 0 */
+ if ((bus_state & (1 << 13)) && !(bus_state & (1 << 14))) {
+ dev_warn(drv_data->dev, "try bus defreeze\n");
+ /* try to defreeze sda */
+ writel(I2CM_COMMAND_BUS_CLEAR, drv_data->base + I2CM_COMMAND);
+ ret = parrot5_i2cm_wait_transfert_timeout(drv_data, I2CM_STATUS_TIP, 25);
+ if (ret) {
+ dev_warn(drv_data->dev, "timeout while clearing bus\n");
+ goto exit;
+ }
+
+ /* check if it worked */
+ bus_state = readl(drv_data->base + I2CM_COMMAND);
+ if ((bus_state & (1 << 13)) && !(bus_state & (1 << 14))) {
+ dev_warn(drv_data->dev, "try bus defreeze #2\n");
+ /* try to defreeze sda */
+ writel(I2CM_COMMAND_BUS_CLEAR, drv_data->base + I2CM_COMMAND);
+ ret = parrot5_i2cm_wait_transfert_timeout(drv_data, I2CM_STATUS_TIP, 25);
+ if (ret) {
+ dev_warn(drv_data->dev, "timeout while clearing bus #2\n");
+ goto exit;
+ }
+ }
+ }
+
+ status = readl(drv_data->base + I2CM_STATUS);
+
+ /* send stop if needed */
+ if (clean_busy && (status & I2CM_STATUS_BUSY)) {
+ parrot5_i2cm_send_stop(drv_data);
+ ret = parrot5_i2cm_wait_transfert_timeout(drv_data, I2CM_STATUS_TIP, 10);
+ if (ret) {
+ dev_warn(drv_data->dev, "timeout while sending stop bit\n");
+ goto exit;
+ }
+ }
+
+ err = 0;
+exit:
+
+ if (irq_on)
+ parrot5_i2cm_enable_irq(drv_data, 1);
+ return err;
+}
+
+static void parrot5_i2cm_read_fifo(struct parrot5_i2cm_data *drv_data)
+{
+ u32 data;
+ while (1) {
+ struct i2c_msg* msg;
+ dprintk("i2cm_read_fifo msg idx %d, data pos %d\n",
+ drv_data->rmsgs_idx, drv_data->rdata_pos);
+
+ if (drv_data->rmsgs_idx >= drv_data->msgs_num) {
+ /* we will check that fifo is empty at the end
+ of transaction
+ */
+ break;
+ }
+
+ msg = &drv_data->msgs[drv_data->rmsgs_idx];
+
+ if (!(msg->flags & I2C_M_RD)) {
+ /* find next read descriptor */
+ drv_data->rmsgs_idx++;
+ continue;
+ }
+ data = readl(drv_data->base + I2CM_RFIFO);
+ if (data & I2CM_RFIFO_EMPTY)
+ break;
+ if (drv_data->rdata_pos >= msg->len) {
+ dev_warn(drv_data->dev, "i2c read fifo overlfow\n");
+ break;
+ }
+ msg->buf[drv_data->rdata_pos] = data & 0xff;
+ dprintk("i2c read 0x%x\n", msg->buf[drv_data->rdata_pos]);
+ drv_data->rdata_pos++;
+
+ if (drv_data->rdata_pos >= msg->len) {
+ /* go to next descriptor */
+ drv_data->rmsgs_idx++;
+ drv_data->rdata_pos = 0;
+ }
+ }
+}
+
+static void parrot5_i2cm_irq_(struct parrot5_i2cm_data *drv_data, unsigned int status1, const char *mode)
+{
+ struct i2c_msg* msg = &drv_data->msgs[drv_data->msgs_idx];
+ int noack;
+ int last_msg;
+ /* NOTE : we need to read status after ack to not miss event,
+ but some even are clear by ack (BAD_x) ... */
+ unsigned int status = readl(drv_data->base + I2CM_STATUS) | status1;
+
+ dprintk("%s status %x state %d\n", mode,
+ status, drv_data->state);
+
+ /* BAD_STOP is buggy in case we send start stop in the same command */
+ if (status1 & ~(I2CM_STATUS_BAD_ACK|I2CM_STATUS_AL|I2CM_STATUS_BAD_STOP)) {
+ dev_warn(drv_data->dev, "protocol error : %s%s%s%s%s%s %x\n",
+ status1 & I2CM_STATUS_BAD_CMD ? "CMD":"",
+ status1 & I2CM_STATUS_BAD_WR ? "WR":"",
+ status1 & I2CM_STATUS_BAD_RD ? "RD":"",
+ status1 & I2CM_STATUS_BAD_STOP ? "STOP":"",
+ status1 & I2CM_STATUS_BAD_START ? "START":"",
+ status1 & I2CM_STATUS_BAD_SCL ? "SCL":"",
+ status1);
+ parrot5_i2cm_show_debug(drv_data);
+ }
+ if (drv_data->wait_up) {
+ /* If we are here we got a spurious irq. This should not
+ happen anymore : we are ack again irq when setting wait_up
+ */
+ dev_warn(drv_data->dev, "irq after transfert end, status %x\n", status);
+ return;
+ }
+
+ if (!drv_data->usefifo && (status & I2CM_STATUS_TIP)) {
+ dev_warn(drv_data->dev, "TIP in irq ??? %x\n", status);
+ return;
+ }
+
+ if (status & I2CM_STATUS_AL) {
+ dev_warn(drv_data->dev, "ab lost detected \n");
+ parrot5_i2cm_show_debug(drv_data);
+ /* abitration lost */
+ drv_data->status = I2CM_STATUS_AL;
+ /* abort transfert without generating stop */
+ drv_data->wait_up = 1;
+ }
+ else if (drv_data->usefifo) {
+ /* I2C_M_IGNORE_NAK is not supported in this mode */
+ if (status & I2CM_STATUS_BAD_ACK) {
+ /* TODO in hs mode, we should detect master code failure */
+ drv_data->status = I2CM_STATUS_RXACK;
+ /* do we need to sent a stop ?
+ we don't know...
+ if the stop was after the aborted command it won't be sent
+ if the stop command was in the aborted command it will be sent (case of scaning with a command that do start write stop)
+
+ we assume that if we have tip, we are sending the stop command ...
+ */
+ if (1 || (status & I2CM_STATUS_BUSY) && !(status & I2CM_STATUS_TIP)) {
+ drv_data->stop_sent = 0;
+ drv_data->state = DATA_W;
+ }
+ status &= ~I2CM_STATUS_BAD_ACK;
+ }
+ if (status & I2CM_STATUS_FIFO_FILLED)
+ parrot5_i2cm_read_fifo(drv_data);
+ }
+ else {
+ if (drv_data->state == DATA_R) {
+ WARN_ON(!(msg->flags & I2C_M_RD));
+ /* read received data */
+ msg->buf[drv_data->data_pos] = readl(drv_data->base + I2CM_RECEIVE);
+ dprintk("i2c read 0x%x\n", msg->buf[drv_data->data_pos]);
+ drv_data->data_pos++;
+ }
+ else if (drv_data->state == HS_MCODE) {
+ if (!(status & I2CM_STATUS_RXACK)) {
+ drv_data->status = I2CM_STATUS_RXACK;
+ }
+ }
+ else if ((status & I2CM_STATUS_RXACK) && !(msg->flags & I2C_M_IGNORE_NAK)) {
+ /* nak : a stop we be sent to finish the transfert */
+ drv_data->status = I2CM_STATUS_RXACK;
+ }
+ }
+
+ /* if it was last cmd and transfert is finished, return */
+ if (drv_data->stop_sent || drv_data->wait_up) {
+ if (!(status & I2CM_STATUS_TIP) /* not in transfert and no cmd in fifo */
+ && (!(drv_data->usefifo)
+ || (drv_data->usefifo
+ && (status & I2CM_STATUS_CMD_EMPTY)))) {
+
+ drv_data->wait_up = 1;
+ /* TODO if we do multimaster, we need to mask FIFOCMD_ERROR
+ when transfert is finished, otherwise we will get
+ BAD_SCL each time others master use the clock
+ */
+ /* NOTE re ack to clear spurious irq */
+ parrot5_i2cm_acknowledge_irq(drv_data);
+ wake_up(&drv_data->wait);
+ }
+ goto exit;
+ }
+
+resubmit:
+ if (drv_data->usefifo && ((status & I2CM_STATUS_CMD_FULL) ||
+ (status & I2CM_STATUS_BAD_ACK)))
+ goto exit;
+
+ BUG_ON(drv_data->state == STOP);
+
+ /* after here we will sumbit a new command */
+
+ if (drv_data->status) {
+ /* nak, abort transfert : we didn't submit stop if we are here */
+ drv_data->state = STOP;
+ }
+ else if (drv_data->state == HS_MCODE) {
+ drv_data->state = RESTART;
+ }
+ else if (drv_data->state == RESTART && msg->len) {
+ if (msg->flags & I2C_M_RD)
+ drv_data->state = DATA_R;
+ else
+ drv_data->state = DATA_W;
+ }
+ else {
+ if (drv_data->data_pos >= msg->len) {
+ /* we are at the end of the data buffer,
+ check next operation */
+ if (drv_data->msgs_idx + 1 >= drv_data->msgs_num) {
+ /* no more operation */
+ drv_data->state = STOP;
+ WARN_ON(1);
+ }
+ else {
+ drv_data->data_pos = 0;
+ drv_data->msgs_idx++;
+ msg = &drv_data->msgs[drv_data->msgs_idx];
+#if 0
+ /* XXX we could avoid restart between some transaction
+ I2C_M_NOSTART but we don't support it ATM
+ */
+ if ((msg->flags & I2C_M_NOSTART) && (
+ ((msg->flags & I2C_M_RD) && drv_data->state == DATA_R) ||
+ (!(msg->flags & I2C_M_RD) && drv_data->state == DATA_W)
+ ))
+ dprintk("restart skip\n");
+ else
+#endif
+ /* restart */
+ if (!((msg->flags & I2C_M_NOSTART) && msg->len))
+ drv_data->state = RESTART;
+ }
+ }
+ }
+ dprintk("data pos %d, len %d\n", drv_data->data_pos, msg->len);
+ dprintk("msg idx %d, num %d\n", drv_data->msgs_idx, drv_data->msgs_num);
+
+ /* for read nack the last read */
+ noack = (drv_data->data_pos + 1 == msg->len);
+ /* send stop when last data (noack) and last cmd */
+ last_msg = (drv_data->msgs_idx + 1 == drv_data->msgs_num);
+
+ switch (drv_data->state) {
+ case DATA_W:
+ parrot5_i2cm_write_byte(drv_data,
+ msg->buf[drv_data->data_pos], noack && last_msg);
+ drv_data->data_pos++;
+ break;
+ case DATA_R:
+ if (drv_data->usefifo)
+ parrot5_i2cm_read_byte_fifo(drv_data, msg, last_msg);
+ else
+ parrot5_i2cm_read_byte(drv_data, msg, noack, noack && last_msg);
+ break;
+ case RESTART:
+ parrot5_i2cm_start_message(drv_data, msg, last_msg);
+ break;
+ case STOP:
+ parrot5_i2cm_send_stop(drv_data);
+ break;
+ }
+
+ if (drv_data->usefifo && !drv_data->stop_sent) {
+ status = readl(drv_data->base + I2CM_STATUS);
+ dprintk("resubmit status %x state %d\n", status, drv_data->state);
+ goto resubmit;
+ }
+
+exit:
+ return;
+}
+
+static irqreturn_t parrot5_i2cm_irq(int irq, void *dev_id)
+{
+ struct parrot5_i2cm_data *drv_data = dev_id;
+ unsigned int status = readl(drv_data->base + I2CM_STATUS);
+
+ if (!useirq)
+ dev_warn(drv_data->dev, "i2c irq handler %x in poll mode\n", status);
+
+ if ((status & I2CM_STATUS_IF) == 0)
+ return IRQ_NONE;
+
+ if (test_driver & DELAY_DEBUG_IRQ_BACK)
+ udelay(100);
+ /* acknowledge interrupt */
+ parrot5_i2cm_acknowledge_irq(drv_data);
+ if (test_driver & DELAY_DEBUG_IRQ_AACK)
+ udelay(100);
+
+ parrot5_i2cm_irq_(drv_data, status&(I2CM_STATUS_ERROR_MASK|I2CM_STATUS_AL), "irq");
+ return IRQ_HANDLED;
+}
+
+/*
+ * Calculate the correct value of the prescale registers.
+ * the formula is: i2c_clk = in_clk / (12 * (PRESCALE + 1))
+ *
+ * The result is rounded *up* since prescale is a divider and we don't want the
+ * output frequency to be superior to the specified speed.
+ *
+ * i2c_clk is in kHz
+ */
+static inline unsigned p7_i2cm_get_prescale(unsigned long in_clk,
+ unsigned long i2c_clk)
+{
+ unsigned tmp;
+
+ tmp = 12 * i2c_clk * 1000;
+
+ tmp = (in_clk + tmp - 1) / tmp - 1;
+
+ if (tmp == 0)
+ tmp = 1;
+ return tmp;
+}
+/**
+ * Hardware init
+ */
+static int parrot5_i2cm_hw_init(struct parrot5_i2cm_data *drv_data, struct platform_device *pdev)
+{
+ int prescale = 0;
+ int ret = 0;
+ const char *const clk_name = "i2cm";
+
+ /* enable I2C master clock */
+ drv_data->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(drv_data->clk)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = clk_prepare_enable(drv_data->clk);
+ if (ret) {
+ clk_put(drv_data->clk);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Workaround for HW bug: RX FIFO might be left in an undefined state
+ * post RESET. Should only affect MPW1. */
+ if (drv_data->pdata->revision == I2CM_REVISION_1) {
+ writel(1, drv_data->base + I2CM_PRESCALE);
+ clk_disable(drv_data->clk);
+ clk_enable(drv_data->clk);
+ }
+
+ /* If the PRESCALE register is programmed with 0, the I2C master
+ is reset as if the chip was just powered on
+ */
+ writel(0, drv_data->base + I2CM_PRESCALE);
+
+ /* program prescale register */
+ prescale = p7_i2cm_get_prescale(clk_get_rate(drv_data->clk),
+ drv_data->pdata->bus_freq);
+ /* Warning: we have to write a non-0 value to the I2CM_PRESCALE register
+ before we configure any other register */
+ writel(prescale, drv_data->base + I2CM_PRESCALE);
+ if (readl(drv_data->base + I2CM_PRESCALE) != prescale) {
+ writel(0xffffffff, drv_data->base + I2CM_PRESCALE);
+
+ WARN(1, KERN_ERR"i2cm prescaler is too big %d (max %d)\n",
+ prescale, readl(drv_data->base + I2CM_PRESCALE));
+ }
+
+ if (drv_data->pdata->high_speed) {
+ unsigned int high_prescale;
+ high_prescale = p7_i2cm_get_prescale(clk_get_rate(drv_data->clk),
+ drv_data->pdata->bus_high_freq);
+
+ /* ignore if hs speed is low than normal speed */
+ if (high_prescale >= prescale) {
+ drv_data->hs_mode = 0;
+ }
+ else {
+ writel(high_prescale, drv_data->base + I2CM_HIGH_PRESCALE);
+ drv_data->hs_mode = 1;
+ }
+ }
+
+ /* interrupt mode */
+ parrot5_i2cm_enable_irq(drv_data, 1);
+
+exit:
+ return ret;
+}
+
+static int parrot5_i2cm_master_start(struct parrot5_i2cm_data *drv_data,
+ struct i2c_msg *msgs,
+ int num)
+{
+ unsigned long flags;
+ int time_left;
+ u32 status;
+
+ drv_data->msgs = msgs;
+ drv_data->msgs_num = num;
+ drv_data->msgs_idx = 0;
+ drv_data->data_pos = 0;
+ drv_data->rmsgs_idx = 0;
+ drv_data->rdata_pos = 0;
+ drv_data->state = RESTART;
+ drv_data->status = 0;
+ drv_data->stop_sent = 0;
+ drv_data->wait_up = 0;
+ drv_data->usefifo = usefifo;
+
+ if (drv_data->usefifo && irq_on(drv_data) == 0) {
+ /* we need error status that are available only with irq ! In theory
+ we could disable irq at interrupt controller level like it is done
+ in fifo case */
+ dev_err(drv_data->dev, "i2c fifo mode without irq is not supported\n");
+ drv_data->usefifo = 0;
+ }
+
+ if (irq_on(drv_data) == 0) {
+ /* irq off, no fifo */
+ parrot5_i2cm_disable_irq(drv_data);
+ }
+ else {
+ /* irq on */
+ parrot5_i2cm_enable_irq(drv_data, 0);
+ }
+
+ /* if another master take the bus before we issue the start condition we
+ have a race. We should make the busy check + sending start in less
+ than 4 µs @ 100Khz. That with we disable irq.
+
+ an disassembling show the lock is taken for 30 instructions.
+ */
+ local_irq_save(flags);
+ status = readl(drv_data->base + I2CM_STATUS);
+ if (status & I2CM_STATUS_BUSY) {
+ local_irq_restore(flags);
+ dev_info(drv_data->dev, "i2c bus is busy. Waiting...\n");
+ return -EBUSY;
+ }
+
+ if (irq_on(drv_data) && drv_data->usefifo)
+ /* we need error status that are available only with irq enable in IP
+ so don't disable irq on IP level but on interrupt controller level */
+ disable_irq(drv_data->irq);
+
+ if (drv_data->hs_mode) {
+ /* In high speed mode we have to send a master code for
+ * arbitration. This transfer is done in fast-mode (or slower). */
+ struct i2c_msg master_code = {
+ .addr = (0x8 | (drv_data->pdata->master_id & 0x7)) >> 1,
+ .flags = (drv_data->pdata->master_id & 1) ? I2C_M_RD : 0,
+ .len = 0,
+ .buf = NULL,
+ };
+ drv_data->state = HS_MCODE;
+ parrot5_i2cm_start_message(drv_data, &master_code, 0);
+ }
+ else {
+ parrot5_i2cm_start_message(drv_data, &msgs[0], num == 1);
+ }
+ /* we sent the start, we can release the irq lock */
+ local_irq_restore(flags);
+ if (drv_data->usefifo) {
+ /* fill the fifo with irq off */
+ parrot5_i2cm_irq_(drv_data, 0, "fifo fill");
+ /* fifo is full or fill all our msgs, enable irq */
+ if (irq_on(drv_data))
+ enable_irq(drv_data->irq);
+ }
+
+ if (irq_on(drv_data)) {
+ int timeout;
+ timeout = msecs_to_jiffies(drv_data->adapter.timeout);
+ time_left = wait_event_timeout(drv_data->wait,
+ drv_data->wait_up,
+ timeout);
+ }
+ else {
+ /* polling mode no fifo */
+ do {
+ time_left = parrot5_i2cm_wait_transfert(drv_data, I2CM_STATUS_TIP);
+ if (time_left) {
+ dev_warn(drv_data->dev, "timeout while waiting the end of current transfert\n");
+ time_left = 0;
+ break;
+ }
+ time_left = 1;
+
+ parrot5_i2cm_irq_(drv_data, 0, "poll");
+ } while (!drv_data->wait_up);
+
+ parrot5_i2cm_enable_irq(drv_data, 1);
+ }
+
+ if (time_left <= 0) {
+ dev_warn(drv_data->dev, "xfer timeout %d, wake %d,%d\n", time_left,
+ drv_data->stop_sent, drv_data->wait_up);
+ /* clear fifo */
+ writel(0, drv_data->base + I2CM_TRANSMIT);
+ return -ETIMEDOUT;
+ }
+
+ if (drv_data->status == I2CM_STATUS_AL) {
+ /* clear fifo */
+ writel(0, drv_data->base + I2CM_TRANSMIT);
+ dev_info(drv_data->dev, "i2c arbitration lost. Waiting...\n");
+ return -EBUSY;
+ }
+ return num;
+}
+
+/**
+ * I2C transfer main function
+ */
+static int parrot5_i2cm_master_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct parrot5_i2cm_data *drv_data = i2c_adap->algo_data;
+ int ret;
+ u32 status;
+ int retry = 0;
+
+retry_transfert:
+ /* first check if i2c state look sane */
+ status = readl(drv_data->base + I2CM_STATUS);
+
+ /* i2c busy is checked later */
+ if (parrot5_i2cm_badstatus(status, 0)) {
+ dev_warn(drv_data->dev, "bad status before transfert %x, aborting\n", status);
+ return -EREMOTEIO;
+ }
+
+ ret = parrot5_i2cm_master_start(drv_data, msgs, num);
+ if (ret == -EINVAL)
+ return -EINVAL;
+
+ if (ret == -EBUSY)
+ goto arbitration_error;
+
+ /* check i2c state after transfert */
+ status = readl(drv_data->base + I2CM_STATUS);
+
+ if (parrot5_i2cm_badstatus(status, 1)) {
+ dev_warn(drv_data->dev, "xfer end hw status %x, status %d\n",
+ status,
+ drv_data->status);
+
+ if (ret >= 0) {
+ dev_warn(drv_data->dev, "i2c bus is bad but transfert did not report"
+ "any error");
+ }
+ /* we own the bus and need to release it */
+ parrot5_i2cm_reset(drv_data, 1);
+
+ /* recheck i2c state after clean */
+ status = readl(drv_data->base + I2CM_STATUS);
+
+ if (parrot5_i2cm_badstatus(status, 1)) {
+ dev_warn(drv_data->dev, "bad status after reset %x\n",
+ status);
+ parrot5_i2cm_show_debug(drv_data);
+ }
+ }
+
+ if (ret == -ETIMEDOUT) {
+ dev_warn(drv_data->dev, "i2c timeout\n");
+ ret = -EREMOTEIO;
+ }
+ else if (drv_data->status) {
+ dprintk("bad status : drv_data->status=%x\n", drv_data->status);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+
+arbitration_error:
+ /* an other master is driving the bus, wait for the stop condition */
+ if (!MULTI_MASTER_SUPPORT || parrot5_i2cm_wait_transfert(drv_data, I2CM_STATUS_BUSY)) {
+ dev_info(drv_data->dev, "i2c : ab timeout while waiting busy\n");
+
+ /* we suppose we own the bus */
+ parrot5_i2cm_reset(drv_data, 1);
+ /* recheck i2c state after clean */
+ status = readl(drv_data->base + I2CM_STATUS);
+
+ if (parrot5_i2cm_badstatus(status, 1)) {
+ dev_warn(drv_data->dev, "bad status after reset %x\n", status);
+ parrot5_i2cm_show_debug(drv_data);
+ }
+ goto xfer_error;
+ }
+ if (retry >= 3) {
+ dev_info(drv_data->dev, "i2c too much retry. Aborting.\n");
+ goto xfer_error;
+ }
+ retry++;
+
+ goto retry_transfert;
+
+xfer_error:
+ return -EREMOTEIO;
+}
+
+static u32 parrot5_i2cm_funct(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+static const struct i2c_algorithm parrot5_i2cm_algorithm = {
+ .master_xfer = parrot5_i2cm_master_xfer,
+ .functionality = parrot5_i2cm_funct,
+};
+
+/* debug stuff */
+static ssize_t set_tests(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev;
+ struct parrot5_i2cm_data *drv_data;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ drv_data = platform_get_drvdata(pdev);
+
+ if (!strncmp(buf, "hs_emu", count)) {
+ /* set hs mode with speed = 2 * normal speed */
+ u32 tmp = readl(drv_data->base + I2CM_PRESCALE);
+ writel(tmp/2, drv_data->base + I2CM_HIGH_PRESCALE);
+ drv_data->hs_mode = 1;
+ }
+ else if (!strncmp(buf, "hs_off", count)) {
+ drv_data->hs_mode = 0;
+ writel(0, drv_data->base + I2CM_HIGH_PRESCALE);
+ }
+ else if (!strncmp(buf, "bus_stop", count)) {
+ int ret;
+ u32 status = readl(drv_data->base + I2CM_STATUS);
+
+ /* send stop if needed */
+ if (status & I2CM_STATUS_BUSY) {
+ parrot5_i2cm_send_stop(drv_data);
+ ret = parrot5_i2cm_wait_transfert(drv_data, I2CM_STATUS_BUSY);
+ if (ret) {
+ dev_warn(drv_data->dev, "timeout while sending stop bit\n");
+ }
+ }
+ }
+ else if (!strncmp(buf, "delay_bef_ackno", count)) {
+ test_driver = DELAY_DEBUG_IRQ_BACK;
+ }
+ else if (!strncmp(buf, "delay_aft_ackno", count)) {
+ test_driver = DELAY_DEBUG_IRQ_AACK;
+ }
+ else if (!strncmp(buf, "nodelay_bef_ackno", count)) {
+ test_driver &= ~DELAY_DEBUG_IRQ_BACK;
+ }
+ else if (!strncmp(buf, "nodelay_aft_ackno", count)) {
+ test_driver &= ~DELAY_DEBUG_IRQ_AACK;
+ }
+ else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t get_tests(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /* TODO */
+ return scnprintf(buf, PAGE_SIZE, "hs_emu hs_off bus_stop delay_bef_ackno delay_bef_ackno nodelay_bef_ackno nodelay_bef_ackno\n");
+}
+
+static DEVICE_ATTR(tests, 0644, get_tests, set_tests);
+/**
+ * probe function
+ */
+int parrot5_i2cm_probe(struct platform_device *pdev)
+{
+ struct parrot5_i2cm_data *drv_data;
+ struct resource *res;
+ int ret;
+
+ drv_data = kzalloc(sizeof(struct parrot5_i2cm_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ drv_data->dev = &pdev->dev;
+ drv_data->pdata = pdev->dev.platform_data;
+ if (!drv_data->pdata) {
+ dev_err(&pdev->dev, "no driver data\n");
+ ret = -ENODEV;
+ goto no_pdata;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "get memory region resource failed\n");
+ ret = -ENOENT;
+ goto no_res;
+ }
+
+ drv_data->ioarea = request_mem_region(res->start,
+ res->end - res->start + 1,
+ pdev->name);
+ if (!drv_data->ioarea) {
+ dev_err(&pdev->dev, "request memory region failed\n");
+ ret = -ENOENT;
+ goto no_res;
+ }
+
+ drv_data->base = ioremap(res->start, res->end - res->start + 1);
+ if (!drv_data->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto no_map;
+ }
+
+ if (!drv_data->pdata->muxed) {
+ drv_data->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(drv_data->pctl)) {
+ ret = PTR_ERR(drv_data->pctl);
+ clk_put(drv_data->clk);
+ goto no_pinctrl;
+ }
+ }
+ else
+ /* masking scl and sda does not work in fifo mode. To activate it
+ we need to program COMMAND register in parallel of fifo, and we
+ need to do an operation...
+ Programming COMMAND with MASK_SDA|MASK_SCL and writing to
+ I2CM_TRANSMIT do not work.
+ We assume that share i2c bus do not need fifo.
+ */
+ usefifo = 0;
+
+ ret = parrot5_i2cm_hw_init(drv_data, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Hardware init failed (%d)\n", ret);
+ goto err_hw;
+ }
+
+ init_waitqueue_head(&drv_data->wait);
+
+ platform_set_drvdata(pdev, drv_data);
+
+ ret = request_irq(platform_get_irq(pdev, 0),
+ parrot5_i2cm_irq, IRQF_SHARED,
+ pdev->name, drv_data);
+ if (ret) {
+ dev_err(&pdev->dev, "request irq failed (%d)\n", ret);
+ goto no_irq;
+ }
+ drv_data->irq = platform_get_irq(pdev, 0);
+
+
+ /* setup info for the i2c core */
+ memcpy(drv_data->adapter.name, pdev->name, strlen(pdev->name));
+ drv_data->adapter.algo = &parrot5_i2cm_algorithm;
+ drv_data->adapter.algo_data = drv_data;
+ drv_data->adapter.dev.parent = &pdev->dev;
+ drv_data->adapter.owner = THIS_MODULE;
+ drv_data->adapter.timeout = PARROT5_I2CM_TIMEOUT;
+
+ /* we don't use i2c_add_adapter to allow
+ board config to use i2c_register_board_info
+ */
+ drv_data->adapter.nr = pdev->id;
+ i2c_add_numbered_adapter(&drv_data->adapter);
+
+ dev_info(&pdev->dev, "controller probe successfully\n");
+
+#ifdef CONFIG_I2CM_PARROT7_DEBUGFS
+ p7i2cm_debugfs_init(&drv_data->debug, drv_data->base,
+ dev_name(&pdev->dev),
+ clk_get_rate(drv_data->clk));
+#endif /* CONFIG_I2CM_PARROT7_DEBUGFS */
+ device_create_file(&pdev->dev, &dev_attr_tests);
+
+ drv_data->pm_mode = RUNNING;
+
+ if (drv_data->pdata->revision == I2CM_REVISION_1)
+ usefifo = 0;
+
+ return 0;
+
+no_irq:
+err_hw:
+no_pinctrl:
+no_map:
+ iounmap(drv_data->base);
+ release_resource(drv_data->ioarea);
+no_res:
+no_pdata:
+ kfree(drv_data);
+
+no_mem:
+ return ret;
+
+}
+
+/**
+ * remove function
+ */
+int parrot5_i2cm_remove(struct platform_device *pdev)
+{
+ struct parrot5_i2cm_data *drv_data = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_tests);
+#ifdef CONFIG_I2CM_PARROT7_DEBUGFS
+ p7i2cm_debugfs_remove(&drv_data->debug);
+#endif /* CONFIG_I2CM_PARROT7_DEBUGFS */
+
+ i2c_del_adapter(&drv_data->adapter);
+ parrot5_i2cm_disable_irq(drv_data);
+ clk_disable_unprepare(drv_data->clk);
+ clk_put(drv_data->clk);
+ free_irq(platform_get_irq(pdev, 0), drv_data);
+
+ if (drv_data->pctl)
+ pinctrl_put(drv_data->pctl);
+
+ iounmap(drv_data->base);
+
+ release_resource(drv_data->ioarea);
+
+ kfree(drv_data);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+void parrot5_i2cm_shutdown(struct platform_device *pdev)
+{
+ struct parrot5_i2cm_data *drv_data = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "shutdown\n");
+ drv_data->pm_mode = SUSPENDING;
+}
+
+#ifdef CONFIG_PM_SLEEP
+int parrot5_i2cm_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct parrot5_i2cm_data *drv_data = platform_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "suspend\n");
+ drv_data->pm_mode = SUSPENDING;
+ clk_disable_unprepare(drv_data->clk);
+
+ return 0;
+}
+
+int parrot5_i2cm_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct parrot5_i2cm_data *drv_data = platform_get_drvdata(pdev);
+ int ret;
+
+ drv_data->pm_mode = RUNNING;
+ dev_info(&pdev->dev, "resuming\n");
+
+ ret = parrot5_i2cm_hw_init(drv_data, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Hardware init failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct dev_pm_ops parrot5_i2cm_dev_pm_ops = {
+ .suspend_noirq = parrot5_i2cm_suspend_noirq,
+ .resume_noirq = parrot5_i2cm_resume_noirq,
+};
+#endif
+
+/* This code is commented because p7-i2cm_probe does the job of selecting the
+ * correct implementation */
+#if 1
+/**
+ * platform driver structure
+ */
+static struct platform_driver parrot5_i2cm_driver = {
+ .probe = parrot5_i2cm_probe,
+ .remove = parrot5_i2cm_remove,
+ .shutdown = parrot5_i2cm_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = P7I2CM_DRV_NAME,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &parrot5_i2cm_dev_pm_ops,
+#endif
+ },
+};
+
+/**
+ * init function
+ */
+static int __init parrot5_i2cm_init(void)
+{
+ return platform_driver_register(&parrot5_i2cm_driver);
+}
+#ifdef MODULE
+module_init(parrot5_i2cm_init);
+#else
+postcore_initcall(parrot5_i2cm_init);
+#endif
+
+/**
+ * exit function
+ */
+static void __exit parrot5_i2cm_exit(void)
+{
+ platform_driver_unregister(&parrot5_i2cm_driver);
+}
+module_exit(parrot5_i2cm_exit);
+
+MODULE_AUTHOR("Parrot SA by David Guilloteau, Matthieu CASTET");
+MODULE_DESCRIPTION("Parrot I2C Master driver");
+MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/parrot/i2c/p7-i2cm_regs.h b/drivers/parrot/i2c/p7-i2cm_regs.h
new file mode 100644
index 00000000000000..48b044043d0f72
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cm_regs.h
@@ -0,0 +1,123 @@
+/**
+ * @file linux/include/asm-arm/arch-parrot/i2c.h
+ *
+ * @brief Parrot I2C registers
+ *
+ * Copyright (C) 2008 Parrot S.A.
+ *
+ * @author david.guilloteau@parrot.com
+ * @date 2008-08-28
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARCH_PARROT_REGS_I2C_P5_H
+#define __ASM_ARCH_PARROT_REGS_I2C_P5_H
+
+/* Parrot I2C registers */
+
+#define I2CM_TRANSMIT 0x00 /* Transmit register */
+#define I2CM_RECEIVE 0x04 /* Receive register */
+#define I2CM_ITEN 0x08 /* Interrupt Enable register */
+#define I2CM_ITACK 0x0C /* Interrupt Ack register */
+#define I2CM_STATUS 0x10 /* Status register */
+#define I2CM_COMMAND 0x14 /* Command register */
+#define I2CM_PRESCALE 0x18 /* Prescale register */
+#define I2CM_HIGH_PRESCALE 0x1c /* High speed prescale register */
+#define I2CM_WFIFO 0x20 /* Command and write FIFO register */
+#define I2CM_RFIFO 0x24 /* Read FIFO register */
+
+#ifdef CONFIG_ARCH_VEXPRESS
+#define I2CM_VEXPRESS_SELECT 0x28 /* Vexpress I2CM selection */
+#endif
+
+/* FIFO depths */
+#define I2CM_WFIFO_DEPTH 8
+#define I2CM_RFIFO_DEPTH 128
+
+/* Registers bitwise definitions */
+
+/* I2CM Status Register */
+#define I2CM_STATUS_IF (1 << 0) /* Interrupt Flag bit */
+#define I2CM_STATUS_TIP (1 << 1) /* Transfer In Progress bit */
+#define I2CM_STATUS_AL (1 << 2) /* Arbitration Lost bit */
+#define I2CM_STATUS_BUSY (1 << 3) /* I2C Busy bit */
+#define I2CM_STATUS_RXACK (1 << 4) /* Receive Acknowledge from slave */
+#define I2CM_STATUS_BAD_CMD (1 << 8) /* Bad command */
+#define I2CM_STATUS_BAD_WR (1 << 9) /* Bad write command */
+#define I2CM_STATUS_BAD_RD (1 << 10) /* Bad read command */
+#define I2CM_STATUS_BAD_STOP (1 << 11) /* Bad stop command */
+#define I2CM_STATUS_BAD_START (1 << 12) /* Bad start command */
+#define I2CM_STATUS_BAD_SCL (1 << 13) /* ? */
+#define I2CM_STATUS_BAD_ACK (1 << 14) /* Received ACK is not the expected one */
+#define I2CM_STATUS_CMD_EMPTY (1 << 16) /* Command FIFO empty interrupt flag */
+#define I2CM_STATUS_CMD_FULL (1 << 17) /* Command FIFO full flag */
+#define I2CM_STATUS_FIFO_FULL (1 << 18) /* Read FIFO full interrupt flag */
+#define I2CM_STATUS_FIFO_FILLED (1 << 19) /* Read fifo not empty flag */
+
+#define I2CM_STATUS_ERROR_MASK 0xff00
+
+/* I2CM Command Register */
+#define I2CM_COMMAND_ACK (0<< 0) // Acknowledge bit
+#define I2CM_COMMAND_NACK (1<< 0) // Acknowledge bit
+#define I2CM_COMMAND_WR (1<< 1) // Write bit command
+#define I2CM_COMMAND_RD (1<< 2) // Read bit command
+#define I2CM_COMMAND_STOP (1<< 3) // Stop bit command
+#define I2CM_COMMAND_START (1<< 4) // Start bit command
+#define I2CM_COMMAND_HIGH_SPEED (1<< 5) // Transfer data at high speed
+#define I2CM_COMMAND_MASK_SCL (1<< 6) // Mask the SCL input
+#define I2CM_COMMAND_MASK_SDA (1<< 7) // Mask the SDA input
+#define I2CM_COMMAND_BUS_CLEAR (1<< 8) // Try to clear the I2C bus
+
+/* I2CM Interrupt Enable register */
+
+/* The normal interrupt is set at the end of each individual Command, at the end
+ * of the last FIFO Command, at the end of a repeated Read Command, or when the
+ * Read FIFO is full. */
+#define I2CM_ITEN_ITEN (1 << 0) /* Interrupt enabled */
+#define I2CM_ITEN_FIFO_ITEN (1 << 1) /* Interrupt generated when repeat read
+ * has been completed */
+#define I2CM_ITEN_DEBUG_EN (1 << 2) /* Debug interrupt enabled for individual
+ * command */
+#define I2CM_ITEN_DBG_FIFO (1 << 3) /* Debug interrupt enabled for FIFO
+ * command, the corresponding interrupt
+ * clears the command FIFO */
+
+/* I2CM Interrupt Acknowledge register */
+#define I2CM_ITACK_ACK (1 << 0) /* clear pending interrupt not caused by
+ * FIFO full read*/
+
+/* I2CM Transmit register */
+#define I2CM_TRANSMIT_WRITE (0 << 0) /* for addresses: RW bit : writing to
+ * slave */
+#define I2CM_TRANSMIT_READ (1 << 0) /* for addresses: RW bit : reading from
+ * slave */
+
+// I2CM Command and Write FIFO
+#define I2CM_WFIFO_ACK (0<< 8) // Acknowledge bit: 0: ACK - 1: NACK
+#define I2CM_WFIFO_NACK (1<< 8) // Acknowledge bit: 0: ACK - 1: NACK
+#define I2CM_WFIFO_WR (1<< 9) // Write bit command
+#define I2CM_WFIFO_RD (1<<10) // Read bit command
+#define I2CM_WFIFO_STOP (1<<11) // Stop bit command
+#define I2CM_WFIFO_START (1<<12) // Start bit command
+#define I2CM_WFIFO_HIGH_SPEED (1<<13) // Transfer data at high speed
+#define I2CM_WFIFO_LAST_NACK (1<<14) // Last acknowledge bit
+#define I2CM_WFIFO_LAST_STOP (1<<15) // Stop command at the last read
+#define I2CM_WFIFO_RX_CLEAR (1<<16) // Clear the Read FIFO before executing the rest of the command
+
+// I2CM Read FIFO
+#define I2CM_RFIFO_EMPTY (1<< 8) // Empty bit: 0: RX_BYTE is valid - 1: RX_BYTE is not valid (FIFO is empty)
+
+#endif /* __ASM_ARCH_PARROT_I2C_H */
diff --git a/drivers/parrot/i2c/p7-i2cs_regs.h b/drivers/parrot/i2c/p7-i2cs_regs.h
new file mode 100644
index 00000000000000..b4eb8badeae35d
--- /dev/null
+++ b/drivers/parrot/i2c/p7-i2cs_regs.h
@@ -0,0 +1,43 @@
+#ifndef _P7_I2CS_REGS_H_
+#define _P7_I2CS_REGS_H_
+
+#define I2CS_ADDRESS 0x00
+#define I2CS_ITEN 0x04
+#define I2CS_ITACK 0x08
+#define I2CS_TRANSMIT 0x0C
+#define I2CS_FIFO_FLUSH 0x10
+#define I2CS_STATUS 0x14
+#define I2CS_TIMING 0x18
+#define I2CS_CONTROL 0x1C
+#define I2CS_RECEIVE 0x20 /* 8 contiguous registers starting
+ * at this address */
+
+#define I2CS_ITEN_TX BIT(0)
+#define I2CS_ITEN_RX BIT(1)
+#define I2CS_ITEN_TRANSFER_DONE BIT(2)
+
+#define I2CS_ITACK_TX BIT(0)
+#define I2CS_ITACK_RX BIT(1)
+#define I2CS_ITACK_TRANSFER_DONE BIT(2)
+#define I2CS_ITACK_CORE_SLEEP BIT(26)
+
+#define I2CS_FIFO_FLUSH_TX BIT(0)
+#define I2CS_FIFO_FLUSH_RX BIT(1)
+
+#define I2CS_STATUS_IT_TX BIT(0)
+#define I2CS_STATUS_IT_RX BIT(1)
+#define I2CS_STATUS_IT_TRANSFER_DONE BIT(2)
+#define I2CS_STATUS_TX_EMPTY BIT(8)
+#define I2CS_STATUS_TX_FULL BIT(9)
+#define I2CS_STATUS_RX_EMPTY BIT(16)
+#define I2CS_STATUS_RX_FULL BIT(17)
+#define I2CS_STATUS_BUS_BUSY BIT(24)
+#define I2CS_STATUS_CORE_BUSY BIT(25)
+#define I2CS_STATUS_CORE_SLEEP BIT(26)
+
+#define I2CS_CONTROL_W_ACK_POLLING_MODE BIT(16)
+
+#define I2CS_RECEIVE_INVAL BIT(8)
+#define I2CS_RECEIVE_STOP BIT(12)
+
+#endif /* _P7_I2CS_REGS_H_ */
diff --git a/drivers/parrot/i2c/plds_i2cs.c b/drivers/parrot/i2c/plds_i2cs.c
new file mode 100644
index 00000000000000..74a0117fe02c77
--- /dev/null
+++ b/drivers/parrot/i2c/plds_i2cs.c
@@ -0,0 +1,1222 @@
+/**
+ * linux/driver/parrot/i2c/plds_i2cs.c - Parrot7 I2C slave driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Victor Lambret <victor.lambret.ext@parrot.com>
+ * date: 18-Sep-2013
+ *
+ * I2C Slave driver dedicated to PLDS protocol.
+ *
+ * Limitation :
+ * - This driver can only run on P7R3
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include "plds_i2cs.h"
+#include "p7-i2cs_regs.h"
+
+/* This driver implement the block protocol layer of the PLDS DVD player. It
+ * allows the transmission of commands.
+ *
+ * The application read or write a complete command at a time using read or
+ * write syscalls.
+ *
+ * PLDS commands are encapsulated in block loayer trames like this :
+ *
+ * LEN command CRC
+ *
+ * LEN : the number of bytes of command + 1 for CRC
+ * CRC : sum of command bytes on 8 bits
+ *
+ * Example : application command : 0x10 0x11 0x12
+ * block layer trame : 0x4 0x10 0x11 0x12 0x33
+ *
+ * TODO : - Avoid to put CRQ down during a transfer, wait for the transfer
+ * to STOP before putting it down.
+ * - Add an mecanism to report problems to applciation with read
+ * syscall : RX buffer overflow or premature STOP while transmitting
+ * a message.
+ * */
+
+/***************************
+ * PARAMETERS
+ ***************************/
+
+#define I2CS_RX_THRESHOLD 0x7
+#define I2CS_ADDR 0x70
+#define I2CS_MAX_RETRIES 5
+
+int plds_msg_size = 128;
+int plds_msg_nb = 32;
+int plds_i2cs_dbg = 0;
+
+module_param(plds_msg_size, int, 0);
+module_param(plds_msg_nb, int, 0);
+module_param(plds_i2cs_dbg, int, 0);
+
+static DECLARE_WAIT_QUEUE_HEAD(plds_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(plds_write_wait);
+
+/***************************
+ * GENERIC HELPERS
+ ***************************/
+
+#define LOG(fun, fmt, arg...) fun(&i2cs.pdev->dev, fmt "\n", ##arg)
+#define LOG_INFO(fmt, arg...) LOG(dev_info, fmt, ##arg)
+#define LOG_WARN(fmt, arg...) LOG(dev_warn, fmt, ##arg)
+#define LOG_ERROR(fmt, arg...) LOG(dev_err, fmt, ##arg)
+#define LOG_DEBUG(fmt, arg...) \
+do { \
+ if (plds_i2cs_dbg) \
+ LOG(dev_info, fmt, ##arg);\
+} while(0)
+
+/***************************
+ * Messages management
+ ***************************/
+
+struct plds_msg {
+ __u16 size; /* buffer size */
+ __u16 len; /* Number of significant bytes */
+ __u8 *buf;
+ struct list_head list;
+};
+
+struct msg_pool {
+ spinlock_t lock;
+ struct plds_msg *msgs; /* messages descriptors */
+ void *mem; /* memory allocated for all messages */
+ struct list_head free;
+ struct list_head filled;
+};
+
+// TODO: merge struct plds and struct plds_i2cs
+struct plds {
+ struct cdev cdev;
+ unsigned int major;
+ struct class *class;
+ unsigned int open_cnt;
+
+ struct msg_pool rx_pool;
+ struct msg_pool tx_pool;
+} plds;
+
+struct plds_i2cs {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ const struct plds_i2cs_pdata *pdata;
+ struct resource *iomem;
+ void __iomem *base;
+ struct clk *clk;
+ struct pinctrl *pctl;
+ int irq;
+
+ /* Current RX and TX messages */
+ /* RX */
+ int retry;
+ int flush;
+ struct plds_msg *rx_msg;
+
+ /* TX */
+ int send;
+ struct plds_msg *tx_msg;
+} i2cs;
+
+/* Allocate memory for both message buffers and descriptors and initialise
+ * message descriptors*/
+static int plds_init_pool(struct msg_pool *pool)
+{
+ int i, rt = -ENOMEM;
+
+ pool->mem = kzalloc(plds_msg_size * plds_msg_nb, GFP_KERNEL);
+ if (!pool->mem )
+ return rt;
+
+ pool->msgs = kzalloc(plds_msg_nb * sizeof(struct plds_msg), GFP_KERNEL);
+ if (!pool->msgs)
+ goto free_mem;
+
+ INIT_LIST_HEAD(&pool->free);
+ INIT_LIST_HEAD(&pool->filled);
+ spin_lock_init(&pool->lock);
+ for (i = 0; i < plds_msg_nb; i++) {
+ pool->msgs[i].size = plds_msg_size;
+ pool->msgs[i].buf = pool->mem + i * plds_msg_size;
+ list_add_tail(&pool->msgs[i].list, &pool->free);
+ };
+
+ return 0;
+free_mem:
+ kfree(pool->mem);
+ return rt;
+}
+
+static void plds_release_pool(struct msg_pool *pool)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ INIT_LIST_HEAD(&pool->free);
+ INIT_LIST_HEAD(&pool->filled);
+
+
+ kfree(pool->msgs);
+ kfree(pool->mem);
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+/***************************
+ * I2C Slave
+ ***************************/
+
+static int plds_i2cs_enable(void)
+{
+ writel(I2CS_RX_THRESHOLD, i2cs.base + I2CS_CONTROL);
+ writel(I2CS_FIFO_FLUSH_TX | I2CS_FIFO_FLUSH_RX,
+ i2cs.base + I2CS_FIFO_FLUSH);
+ writel(I2CS_ITEN_TX | I2CS_ITEN_RX | I2CS_ITEN_TRANSFER_DONE,
+ i2cs.base + I2CS_ITEN);
+ writel(I2CS_ADDR ,i2cs.base + I2CS_ADDRESS);
+
+ LOG_DEBUG("Enabled");
+
+ return 0;
+}
+
+static int plds_i2cs_disable(void)
+{
+ LOG_DEBUG("Disabled");
+
+ writel(0,i2cs.base + I2CS_ADDRESS);
+ writel(0, i2cs.base + I2CS_ITEN);
+
+ return 0;
+}
+
+/**************************************
+ * PLDS API
+ **************************************/
+
+static u8 plds_crc(const char *buf, int n)
+{
+ u8 crc = 0;
+ int i;
+
+ for (i = 0; i < n; i++)
+ crc += buf[i];
+
+ return crc;
+};
+
+static inline void plds_put_request_down(void)
+{
+ LOG_DEBUG("Put CRQ down");
+ gpio_set_value(i2cs.pdata->gpio_request, 0);
+}
+
+static inline void plds_put_request_up(void)
+{
+ LOG_DEBUG("Put CRQ up");
+ gpio_set_value(i2cs.pdata->gpio_request, 1);
+}
+
+static inline void plds_reset(void)
+{
+ unsigned long i2cs_flags, rx_pool_flags,
+ tx_pool_flags;
+ struct plds_msg *p_msg;
+
+ LOG_INFO("Reset plds");
+
+ if (gpio_is_valid(i2cs.pdata->gpio_reset)) {
+ spin_lock_irqsave(&i2cs.lock, i2cs_flags);
+
+ /* reset plds */
+ gpio_set_value(i2cs.pdata->gpio_reset, 0);
+ plds_put_request_up();
+
+ /* dissable plds */
+ plds_i2cs_disable();
+
+ /* flush data */
+ spin_lock_irqsave(&plds.tx_pool.lock, tx_pool_flags);
+ if (i2cs.tx_msg) {
+ i2cs.send = 0;
+ i2cs.retry = 0;
+ i2cs.flush = 0;
+ i2cs.tx_msg->len = 0;
+ list_add_tail(&i2cs.tx_msg->list, &plds.tx_pool.free);
+ i2cs.tx_msg = NULL;
+ }
+
+ list_for_each_entry(p_msg, &plds.tx_pool.filled, list) {
+ p_msg->len = 0;
+ list_add_tail(&p_msg->list, &plds.tx_pool.free);
+ }
+ spin_unlock_irqrestore(&plds.tx_pool.lock, tx_pool_flags);
+
+ spin_lock_irqsave(&plds.rx_pool.lock, rx_pool_flags);
+ if (i2cs.rx_msg) {
+ i2cs.rx_msg->len = 0;
+ list_add_tail(&i2cs.rx_msg->list, &plds.rx_pool.free);
+ i2cs.rx_msg = NULL;
+ }
+
+ list_for_each_entry(p_msg, &plds.rx_pool.filled, list) {
+ p_msg->len = 0;
+ list_add_tail(&p_msg->list, &plds.rx_pool.free);
+ }
+ spin_unlock_irqrestore(&plds.rx_pool.lock, rx_pool_flags);
+
+ plds_i2cs_enable();
+ gpio_set_value(i2cs.pdata->gpio_reset, 1);
+ mdelay(50);
+
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+
+ //Not mandatory
+ wake_up_interruptible(&plds_read_wait);
+ wake_up_interruptible(&plds_write_wait);
+ } else
+ LOG_ERROR("Unable to perform reset.");
+
+}
+
+/* */
+static int plds_wait_rx_filled_msg(void)
+{
+ unsigned long flags;
+ int rt = 0;
+ spin_lock_irqsave(&plds.rx_pool.lock,flags);
+ if (list_empty(&plds.rx_pool.filled))
+ rt = 1;
+ spin_unlock_irqrestore(&plds.rx_pool.lock,flags);
+
+ return rt;
+}
+
+static int plds_wait_tx_free_msg(void)
+{
+ unsigned long flags;
+ int rt = 0;
+ spin_lock_irqsave(&plds.tx_pool.lock,flags);
+ if (list_empty(&plds.tx_pool.free))
+ rt = 1;
+ spin_unlock_irqrestore(&plds.tx_pool.lock,flags);
+
+ return rt;
+}
+
+static int plds_count_list_entries(struct list_head *list,
+ spinlock_t *plock)
+{
+ struct list_head *msg;
+ unsigned long flags;
+ int count = 0;
+
+ spin_lock_irqsave(plock, flags);
+ list_for_each(msg, list)
+ count++;
+ spin_unlock_irqrestore(plock, flags);
+
+ return count;
+}
+
+static void plds_debug(void)
+{
+ u32 addr, iten, status, count;
+ struct plds_msg *p_msg;
+ unsigned long flags;
+
+ addr = readl(i2cs.base + I2CS_ADDRESS);
+ iten = readl(i2cs.base + I2CS_ITEN);
+ status = readl(i2cs.base + I2CS_STATUS);
+
+ spin_lock_irqsave(&i2cs.lock, flags);
+ LOG_INFO("== GPIOS ===\n"
+ " request=%d",
+ gpio_get_value(i2cs.pdata->gpio_request));
+
+ if (i2cs.rx_msg) {
+ LOG_INFO("[RX] received [%d/%d] size=%d flush=%d",
+ i2cs.rx_msg->len,
+ i2cs.rx_msg->buf[0],
+ i2cs.rx_msg->size,
+ i2cs.flush);
+
+ print_hex_dump(KERN_INFO, "[RX]>",
+ DUMP_PREFIX_NONE,
+ 16, 1, i2cs.tx_msg->buf,
+ i2cs.tx_msg->len, 1);
+
+
+ if (!list_empty(&plds.tx_pool.free)) {
+ /* Last message sent */
+ p_msg = list_entry((plds.tx_pool.free).prev,
+ struct plds_msg, list);
+
+ print_hex_dump(KERN_INFO, "[TX] (last sent)>",
+ DUMP_PREFIX_NONE,
+ 16, 1, p_msg->buf,
+ p_msg->len, 1);
+ }
+
+ } else {
+ LOG_INFO("[RX] none");
+ }
+
+ if (i2cs.tx_msg) {
+ LOG_INFO("[TX] send [%d/%d] size=%d",
+ i2cs.send,
+ i2cs.tx_msg->len,
+ i2cs.tx_msg->size);
+ } else {
+ LOG_INFO("[TX] none");
+ }
+
+ LOG_INFO("== PLDS Messages lists ===\n"
+ " TX free=%d, filled=%d\n"
+ " RX free=%d, filled=%d",
+ plds_count_list_entries(&plds.tx_pool.free,
+ &plds.tx_pool.lock),
+ plds_count_list_entries(&plds.tx_pool.filled,
+ &plds.tx_pool.lock),
+ plds_count_list_entries(&plds.rx_pool.free,
+ &plds.rx_pool.lock),
+ plds_count_list_entries(&plds.rx_pool.filled,
+ &plds.rx_pool.lock));
+
+ count = 0;
+
+ list_for_each_entry(p_msg, &plds.rx_pool.filled, list) {
+ count++;
+
+ LOG_INFO("[RX][%d] (%d bytes)>\n",
+ count, p_msg->len);
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_NONE,
+ 16, 1, p_msg->buf,
+ p_msg->len, 1);
+ }
+
+ count = 0;
+
+ list_for_each_entry(p_msg, &plds.tx_pool.filled, list) {
+
+ LOG_INFO("[TX][%d] (%d bytes)>\n",
+ count, p_msg->len);
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_NONE,
+ 16, 1, p_msg->buf,
+ p_msg->len, 1);
+ }
+
+ LOG_INFO("== I2CS %d DUMP REG ===\n"
+ " address = 0x%08x\n"
+ " iten = 0x%08x\n"
+ " status = 0x%08x",
+ i2cs.pdata->i2c_bus, addr, iten, status);
+
+ spin_unlock_irqrestore(&i2cs.lock, flags);
+}
+
+/**************************************
+ * IRQ I2CS HELPERS
+ **************************************/
+
+static void plds_i2cs_next_txbuffer(void)
+{
+ BUG_ON(i2cs.tx_msg);
+ BUG_ON(i2cs.send != 0);
+
+ if (list_empty(&plds.tx_pool.filled)) {
+ LOG_DEBUG("No TX buffer to transmit");
+ } else {
+ plds_put_request_up();
+ i2cs.tx_msg = list_first_entry(&plds.tx_pool.filled,
+ struct plds_msg,
+ list);
+ list_del(&i2cs.tx_msg->list);
+ LOG_DEBUG("New TX buffer: %p",i2cs.tx_msg);
+ }
+}
+
+static void plds_receive_bytes(void)
+{
+ u32 rx, spurious = 0;
+ unsigned long i2cs_flags, rx_pool_flags;
+
+ rx = readl(i2cs.base + I2CS_RECEIVE);
+ while (!(rx & I2CS_RECEIVE_INVAL)) {
+ LOG_DEBUG("rx=%x",rx);
+
+ /* RX STOP */
+ if (rx & I2CS_RECEIVE_STOP) {
+ spin_lock_irqsave(&i2cs.lock, i2cs_flags);
+
+ i2cs.flush = 0;
+ if (!i2cs.rx_msg) {
+
+ if (!list_empty(&plds.rx_pool.free))
+ spurious = 1;
+
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+
+ if (spurious) {
+ LOG_INFO("Spurious STOP %x",rx);
+ } else {
+ LOG_ERROR("RX message missed %x",rx);
+ }
+ } else {
+ LOG_DEBUG("STOP RX, %d bytes received",
+ i2cs.rx_msg->len);
+
+ spin_lock_irqsave(&plds.rx_pool.lock,
+ rx_pool_flags);
+ list_add_tail(&i2cs.rx_msg->list,
+ &plds.rx_pool.filled);
+ spin_unlock_irqrestore(&plds.rx_pool.lock,
+ rx_pool_flags);
+
+ i2cs.rx_msg = NULL;
+
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+ wake_up_interruptible(&plds_read_wait);
+ }
+ goto next_byte;
+ }
+
+ /* Try to get next free buffer if needed */
+ spin_lock_irqsave(&i2cs.lock, i2cs_flags);
+
+ spin_lock_irqsave(&plds.rx_pool.lock, rx_pool_flags);
+ if (!i2cs.flush
+ && (!i2cs.rx_msg)
+ && !list_empty(&plds.rx_pool.free)) {
+ i2cs.rx_msg = list_first_entry(&plds.rx_pool.free,
+ struct plds_msg,
+ list);
+ list_del(&i2cs.rx_msg->list);
+ i2cs.rx_msg->len = 0;
+ }
+ spin_unlock_irqrestore(&plds.rx_pool.lock, rx_pool_flags);
+
+ /* XXX : No RX buffer we flush at the moment. A better way to
+ * handle it would be to wait for a free RX buffer */
+ if ((!i2cs.rx_msg) || (i2cs.rx_msg->len >= i2cs.rx_msg->size)) {
+ i2cs.flush = 1;
+ /* set length to 0 */
+ /* i2cs.rx_msg->len = 0; */
+ /* inform users */
+ }
+
+ if (!i2cs.flush) {
+ LOG_DEBUG("buf[%d] = %x",i2cs.rx_msg->len, rx);
+ i2cs.rx_msg->buf[i2cs.rx_msg->len] = (__u8) (rx & 0xFF);
+ i2cs.rx_msg->len++;
+ }
+
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+next_byte:
+ rx = readl(i2cs.base + I2CS_RECEIVE);
+ }
+}
+
+static void plds_transmit_bytes(void)
+{
+ u32 status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2cs.lock, flags);
+ if (!i2cs.tx_msg) {
+ plds_i2cs_next_txbuffer();
+ if (!i2cs.tx_msg) {
+ LOG_ERROR("No message to transmit");
+ goto terminate_tx;
+ }
+ LOG_DEBUG("New buffer %p",i2cs.tx_msg);
+ } else /* retransmission of a message */
+ plds_put_request_up();
+
+ if (i2cs.send >= i2cs.tx_msg->len) {
+ LOG_ERROR("TX underflow (send=%d, len=%d)",
+ i2cs.send,i2cs.tx_msg->len);
+ goto terminate_tx;
+ }
+
+ /* Transmit bytes until TX FIFO is full or end of message */
+ status = readl(i2cs.base + I2CS_STATUS);
+ while ((~status & I2CS_STATUS_TX_FULL)
+ && (i2cs.send < i2cs.tx_msg->len)) {
+ LOG_DEBUG("TX [%d/%d] = %x",
+ i2cs.send,i2cs.tx_msg->len,
+ i2cs.tx_msg->buf[i2cs.send]);
+ writel(i2cs.tx_msg->buf[i2cs.send],
+ i2cs.base + I2CS_TRANSMIT);
+ status = readl(i2cs.base + I2CS_STATUS);
+ i2cs.send++;
+ // XXX : Temporary ACK : transmit one byte at a time
+ //return;
+ }
+ spin_unlock_irqrestore(&i2cs.lock, flags);
+
+ return;
+
+terminate_tx:
+ spin_unlock_irqrestore(&i2cs.lock, flags);
+ // XXX : To be implemented here
+ LOG_ERROR("Terminate TX");
+}
+
+static irqreturn_t plds_i2cs_irq(int irq, void *dev_id)
+{
+ unsigned long flags, i2cs_flags;
+ u32 status = readl(i2cs.base + I2CS_STATUS);
+ LOG_DEBUG("IRQ status=%x",status);
+
+#if 1
+ /* IT RX, we empty RX FIFO */
+ if (status & I2CS_STATUS_IT_RX) {
+ plds_receive_bytes();
+ writel(I2CS_ITACK_RX, i2cs.base + I2CS_ITACK);
+ }
+
+ if( (status & I2CS_STATUS_IT_TRANSFER_DONE)
+ && (~status & I2CS_STATUS_RX_EMPTY)
+ && (~status & I2CS_STATUS_IT_RX)
+ && (~status & I2CS_STATUS_IT_TX) ){
+ /* Manage I2C RX STOP when the received message
+ is multiple of the RX FIFO in size.
+
+ I2CS_STATUS_IT_TRANSFER_DONE is set, RX FIFO is not empty
+ and IT_RX or IT_TX are not set.
+
+ IT_RX has been set in previous interrupt to empty the RX FIFO
+ but there was no place left for the I2C RX STOP.
+ Then the acknoledge is made in 2nd interrupt */
+
+ plds_receive_bytes();
+ }
+
+ /* IT TX */
+ if (status & I2CS_STATUS_IT_TX) {
+ plds_transmit_bytes();
+ writel(I2CS_ITACK_TX, i2cs.base + I2CS_ITACK);
+ }
+
+ /* RX STOP */
+ spin_lock_irqsave(&i2cs.lock, i2cs_flags);
+ if (((!i2cs.tx_msg)
+ || (i2cs.tx_msg && !i2cs.send)) /* avoid retransmission case */
+ && (status & I2CS_STATUS_IT_TRANSFER_DONE)) {
+ writel(I2CS_ITACK_TRANSFER_DONE,
+ i2cs.base + I2CS_ITACK);
+ }
+
+ /* TX STOP */
+ if (i2cs.tx_msg && (status & I2CS_STATUS_IT_TRANSFER_DONE)) {
+ /* When transfer is not complete it's a premature stop */
+ if (((~status & I2CS_STATUS_TX_EMPTY)
+ || (i2cs.send < i2cs.tx_msg->len))
+ && (i2cs.retry < I2CS_MAX_RETRIES)) {
+
+ LOG_ERROR("Premature TX STOP: transfer [%d/%d] FIFO TX:%lu (%04x)",
+ i2cs.send,
+ i2cs.tx_msg->len,
+ (~status & I2CS_STATUS_TX_EMPTY),
+ status);
+
+ /* Previous transfer was a failure, try again */
+ /* TODO : Add a try counter here ? */
+ writel(I2CS_FIFO_FLUSH_TX,
+ i2cs.base + I2CS_FIFO_FLUSH);
+ i2cs.send = 0;
+ i2cs.retry++;
+
+ print_hex_dump(KERN_ERR, "TX>", DUMP_PREFIX_NONE,
+ 16, 1, i2cs.tx_msg->buf,
+ i2cs.tx_msg->len, 0);
+ } else {
+ /* Message correctly transmitted
+ or message dropped */
+ if (i2cs.retry >= I2CS_MAX_RETRIES)
+ LOG_ERROR("Message dropped\n");
+
+ i2cs.send = 0;
+ i2cs.retry = 0;
+ spin_lock_irqsave(&plds.tx_pool.lock, flags);
+ list_add_tail(&i2cs.tx_msg->list, &plds.tx_pool.free);
+ spin_unlock_irqrestore(&plds.tx_pool.lock, flags);
+
+ i2cs.tx_msg = NULL;
+ wake_up_interruptible(&plds_write_wait);
+ }
+ writel(I2CS_ITACK_TRANSFER_DONE,
+ i2cs.base + I2CS_ITACK);
+
+ /* Request again if there is still messages to transmit */
+ spin_lock_irqsave(&plds.tx_pool.lock, flags);
+ if (!list_empty(&plds.tx_pool.filled) ||
+ /* retransmission of a message */
+ (list_empty(&plds.tx_pool.filled)
+ && i2cs.tx_msg
+ && !i2cs.send) )
+ plds_put_request_down();
+ spin_unlock_irqrestore(&plds.tx_pool.lock, flags);
+ }
+
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+
+#endif
+ return IRQ_HANDLED;
+}
+
+/*****************************************
+ * DEV INTERFACE
+ ****************************************/
+
+/* read : Receive an complete application command.
+ *
+ * ERRORS
+ * no error : command size
+ * EAGAIN : no command available
+ * ENOBUFS : command is larger than buffer size
+ * EMSGSIZE : bad frame (length error)
+ * EBADMSG : bad frame (CRC error)
+ * ERESTARTSYS : call interrupted by signal
+ */
+static ssize_t plds_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
+{
+ unsigned long flags;
+ struct plds_msg *msg;
+ int rt;
+ u8 len, crc, crc_msg;
+
+ LOG_DEBUG("%s",__func__);
+
+ /* Wait for an incoming frame to be enqueued */
+ spin_lock_irqsave(&plds.rx_pool.lock,flags);
+
+ if (list_empty(&plds.rx_pool.filled)) {
+ if (file->f_flags & O_NONBLOCK) {
+ rt = -EAGAIN;
+ goto unlock;
+ }
+ else {
+ spin_unlock_irqrestore(&plds.rx_pool.lock,flags);
+ wait_event_interruptible(plds_read_wait,
+ plds_wait_rx_filled_msg());
+ spin_lock_irqsave(&plds.rx_pool.lock,flags);
+ if (signal_pending(current)) {
+ rt = -ERESTARTSYS;
+ goto unlock;
+ }
+ }
+ }
+
+ msg = list_first_entry(&plds.rx_pool.filled,
+ struct plds_msg,
+ list);
+
+ if (msg->len - 1 > count) {
+ rt = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_del(&msg->list);
+
+ len = msg->buf[0];
+ if (len != msg->len -1 || len == 1) {
+ LOG_DEBUG("read: message len error (len=%d, buf_len=%d)",
+ len, msg->len);
+ rt = EMSGSIZE;
+ goto requeue_buf;
+ }
+
+ crc_msg = msg->buf[len];
+ crc = plds_crc(msg->buf + 1, len-1);
+ if (crc != crc_msg) {
+ LOG_ERROR("read: message len error (crc=%x, crc_msg=%x)",
+ crc, crc_msg);
+ rt = EMSGSIZE;
+ goto requeue_buf;
+ }
+
+ //rt = copy_to_user(msg->buf+1, buf, len-1);
+ rt = copy_to_user(buf, msg->buf+1, len-1);
+ if (rt) {
+ // TODO : Message is lost, requeue it in filled queue
+ LOG_DEBUG("read: copy to user error: %d",rt);
+ rt = -EFAULT;
+ goto requeue_buf;
+ }
+ rt = len - 1;
+
+requeue_buf:
+ list_add_tail(&msg->list, &plds.rx_pool.free);
+unlock:
+ spin_unlock_irqrestore(&plds.rx_pool.lock,flags);
+ return rt;
+
+}
+
+/* write : Send a complete command to dvd player
+ *
+ * ERRORS
+ * no error : command size
+ * EAGAIN : PLDS block busy with a previous command
+ * EMSGSIZE : command too large for internal buffer
+ * ERESTARTSYS : call interrupted by signal
+ */
+static ssize_t plds_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ int rt;
+ unsigned long flags, i2cs_flags;
+ struct plds_msg *msg;
+
+ LOG_DEBUG("write %d",count);
+
+ if (count + 2 > plds_msg_size)
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&plds.tx_pool.lock,flags);
+ if (list_empty(&plds.tx_pool.free)) {
+ if (file->f_flags & O_NONBLOCK) {
+ rt = -EAGAIN;
+ goto unlock;
+ }
+ else {
+ spin_unlock_irqrestore(&plds.tx_pool.lock,flags);
+ wait_event_interruptible(plds_write_wait,
+ plds_wait_tx_free_msg());
+ spin_lock_irqsave(&plds.tx_pool.lock,flags);
+ if (signal_pending(current)) {
+ rt = -ERESTARTSYS;
+ goto unlock;
+ }
+ }
+ }
+
+ msg = list_first_entry(&plds.tx_pool.free,
+ struct plds_msg,
+ list);
+ list_del(&msg->list);
+
+ /* Construct the PLDS command LEN + DATAS + CRC in message */
+ rt = copy_from_user(msg->buf+1, buf, count);
+ if (rt) {
+ list_add_tail(&msg->list, &plds.tx_pool.free);
+ goto unlock;
+ }
+ msg->buf[0] = count + 1;
+ msg->buf[count + 1] = plds_crc(buf, count);
+
+ msg->len = count + 2;
+
+ /* Put CRQ down when not transfer in progress and no previous data */
+ spin_lock_irqsave(&i2cs.lock, i2cs_flags);
+ if ((list_empty(&plds.tx_pool.filled)) && (!i2cs.tx_msg))
+ plds_put_request_down();
+ spin_unlock_irqrestore(&i2cs.lock, i2cs_flags);
+
+ list_add_tail(&msg->list, &plds.tx_pool.filled);
+ rt = count;
+unlock:
+ spin_unlock_irqrestore(&plds.tx_pool.lock,flags);
+
+ return rt;
+}
+
+/* ioctl
+ *
+ * PLDS_SET_WBUF: Set the write buffer size. returns 0 if success or
+ * ENOMEM for errors.
+ * PLDS_SET_RBUF: Set the read buffer size. returns 0 if success or
+ * ENOMEM for errors.
+ */
+static long plds_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ LOG_DEBUG("%s:cmd=%d",__func__,cmd);
+
+ switch (cmd) {
+ case PLDS_DUMP_DEBUG:
+ plds_debug();
+ break;
+ case PLDS_RESET:
+ plds_reset();
+ break;
+ case PLDS_FLUSH_ALL:
+ LOG_DEBUG("flush FIFOS... not implemented !");
+ return -ENOTTY;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int plds_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ if (plds.open_cnt > 0) {
+ plds.open_cnt++;
+ LOG_DEBUG("Already Opened");
+ return 0;
+ }
+
+ ret= plds_init_pool(&plds.rx_pool);
+ if (ret)
+ return ret;
+
+ ret = plds_init_pool(&plds.tx_pool);
+ if (ret)
+ goto release_rx_pool;
+
+ ret = plds_i2cs_enable();
+ if (ret)
+ goto release_tx_pool;
+
+ LOG_DEBUG("Opened");
+
+ plds.open_cnt++;
+
+ return 0;
+
+release_tx_pool:
+ plds_release_pool(&plds.tx_pool);
+release_rx_pool:
+ plds_release_pool(&plds.rx_pool);
+
+ LOG_DEBUG("Cant open, error %d", ret);
+ return ret;
+}
+
+static int plds_release(struct inode *inode, struct file *file)
+{
+ unsigned long flags;
+
+ plds.open_cnt--;
+ if (plds.open_cnt > 0) {
+ LOG_DEBUG("Keep opened");
+ return 0;
+ }
+
+ plds_i2cs_disable();
+ plds_put_request_up();
+
+ spin_lock_irqsave(&i2cs.lock, flags);
+ i2cs.send = 0;
+ i2cs.retry = 0;
+ i2cs.flush = 0;
+
+ /* avoid memory leak */
+ if (i2cs.tx_msg) {
+ list_add_tail(&i2cs.tx_msg->list, &plds.tx_pool.free);
+ i2cs.tx_msg = NULL;
+ }
+
+ if (i2cs.rx_msg) {
+ list_add_tail(&i2cs.rx_msg->list, &plds.rx_pool.free);
+ i2cs.rx_msg = NULL;
+ }
+ spin_unlock_irqrestore(&i2cs.lock, flags);
+
+ plds_release_pool(&plds.tx_pool);
+ plds_release_pool(&plds.rx_pool);
+
+ LOG_DEBUG("Release");
+ return 0;
+}
+
+static unsigned int plds_poll(struct file *file, poll_table * wait)
+{
+ unsigned long flags;
+ unsigned int mask;
+
+ poll_wait(file, &plds_read_wait, wait);
+ poll_wait(file, &plds_write_wait, wait);
+ mask = 0;
+
+ spin_lock_irqsave(&plds.rx_pool.lock,flags);
+ if (!list_empty(&plds.rx_pool.filled))
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&plds.rx_pool.lock,flags);
+
+ spin_lock_irqsave(&plds.tx_pool.lock,flags);
+ if (!list_empty(&plds.tx_pool.free))
+ mask |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&plds.tx_pool.lock,flags);
+
+ return mask;
+}
+
+static const struct file_operations plds_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = plds_read,
+ .write = plds_write,
+ .unlocked_ioctl = plds_ioctl,
+ .open = plds_open,
+ .release = plds_release,
+ .poll = plds_poll,
+};
+
+static int plds_create_dev(void)
+{
+ int rt;
+ dev_t devt;
+ struct device *device;
+
+ rt = alloc_chrdev_region(&devt, 0, 1, PLDS_I2CS_NAME);
+ if (rt) {
+ LOG_ERROR("cant request region");
+ return rt;
+ }
+
+ plds.major = MAJOR(devt);
+
+ plds.class = class_create(THIS_MODULE, PLDS_I2CS_NAME);
+ if (IS_ERR(plds.class)) {
+ LOG_ERROR("cant create class");
+ rt = PTR_ERR(plds.class);
+ goto unregister;
+ }
+
+ cdev_init(&plds.cdev, &plds_fops);
+ plds.cdev.owner = THIS_MODULE;
+
+ rt = cdev_add(&plds.cdev, devt,1);
+ if (rt) {
+ LOG_ERROR("cant add char device");
+ goto destroy_class;
+ }
+
+ device = device_create(plds.class, NULL, /* no parent device */
+ devt, NULL, /* no additional data */
+ PLDS_I2CS_NAME);
+
+ if (IS_ERR(device)) {
+ LOG_ERROR("cant create device");
+ rt = PTR_ERR(device);
+ goto del_cdev;
+ }
+
+ plds.open_cnt = 0;
+
+ LOG_INFO("PLDS driver registred");
+
+ return 0;
+
+del_cdev:
+ cdev_del(&plds.cdev);
+destroy_class:
+ class_destroy(plds.class);
+unregister:
+ unregister_chrdev_region(devt,1);
+
+ return rt;
+}
+
+static int plds_destroy_dev(void)
+{
+ dev_t devt = MKDEV(plds.major, 0);
+
+ device_destroy(plds.class, devt);
+ cdev_del(&plds.cdev);
+ class_destroy(plds.class);
+ unregister_chrdev_region(devt,1);
+
+ pr_info("plds freed\n");
+
+ return 0;
+}
+
+/*****************************************
+ * MODULE INTERFACE
+ ****************************************/
+
+static int __devinit plds_i2cs_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ char clk_name[16];
+
+ BUG_ON(!pdev->dev.platform_data);
+
+ i2cs.retry = 0;
+ i2cs.pdev = pdev;
+ i2cs.pdata = pdev->dev.platform_data;
+ spin_lock_init(&i2cs.lock);
+
+ /* P7R1 I2C Slave is too limited to be used */
+ if (i2cs.pdata->revision != I2CS_REVISION_3) {
+ LOG_ERROR("PLDS I2C Slave cant work on P7R1 or P7R2");
+ return -EINVAL;
+ }
+
+ sprintf(clk_name, "p7-i2cs.%d", pdev->id );
+ i2cs.clk = clk_get_sys(clk_name, NULL);
+ if (IS_ERR(i2cs.clk)) {
+ LOG_ERROR("cant get clock %s", clk_name);
+ return PTR_ERR(i2cs.clk);
+ }
+
+ ret = clk_prepare_enable(i2cs.clk);
+ if (ret) {
+ LOG_ERROR("cant prepare clock");
+ goto put_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ LOG_ERROR("cant get iomem resource");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ i2cs.iomem = request_mem_region(res->start,
+ resource_size(res),
+ pdev->name);
+ if (!i2cs.iomem) {
+ LOG_ERROR("cant request mem region");
+ ret = -EBUSY;
+ goto clk_disable;
+ }
+
+ i2cs.base = ioremap(res->start, resource_size(res));
+ if (!i2cs.base) {
+ LOG_ERROR("cant ioremap");
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ i2cs.irq = platform_get_irq(pdev, 0);
+
+ if (i2cs.irq < 0) {
+ LOG_ERROR("cant get irq");
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ i2cs.pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(i2cs.pctl)) {
+ LOG_ERROR("cant get pins");
+ ret = PTR_ERR(i2cs.pctl);
+ goto unmap;
+ }
+
+ ret = request_irq(i2cs.irq, plds_i2cs_irq, 0, pdev->name, &i2cs);
+ if (ret) {
+ LOG_ERROR("cant request irq");
+ goto put_pin;
+ }
+
+ ret = gpio_request_one(i2cs.pdata->gpio_request,
+ GPIOF_OUT_INIT_HIGH,
+ PLDS_I2CS_NAME);
+ if (ret) {
+ LOG_ERROR("cant request gpio %d for CRQREQ",
+ i2cs.pdata->gpio_request);
+ goto free_irq;
+ }
+
+ platform_set_drvdata(pdev, &i2cs);
+
+ /* XXX : Adjust here plds_msg_size for message excessing 256 bytes */
+ ret = plds_create_dev();
+ if (ret) {
+ goto free_request;
+ }
+
+ LOG_INFO("I2CS driver initialized [id=%d]", i2cs.pdata->i2c_bus);
+
+ return 0;
+
+free_request:
+ gpio_free(i2cs.pdata->gpio_request);
+free_irq:
+ free_irq(i2cs.irq, &i2cs);
+put_pin:
+ pinctrl_put(i2cs.pctl);
+unmap:
+ iounmap(i2cs.base);
+release:
+ release_mem_region(i2cs.iomem->start, resource_size(i2cs.iomem));
+clk_disable:
+ clk_disable_unprepare(i2cs.clk);
+put_clk:
+ clk_put(i2cs.clk);
+ return ret;
+}
+
+static int __devexit plds_i2cs_remove(struct platform_device *pdev)
+{
+ /* Turn PLDS DVD player off before */
+
+ plds_destroy_dev();
+ gpio_free(i2cs.pdata->gpio_request);
+ free_irq(i2cs.irq, &i2cs);
+ pinctrl_put(i2cs.pctl);
+ iounmap(i2cs.base);
+ release_mem_region(i2cs.iomem->start, resource_size(i2cs.iomem));
+ clk_disable_unprepare(i2cs.clk);
+ clk_put(i2cs.clk);
+
+ return 0;
+}
+
+static int plds_i2cs_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ clk_disable_unprepare(i2cs.clk);
+
+ return 0;
+}
+
+static int plds_i2cs_resume(struct platform_device *pdev)
+{
+ /* need reopen from userspace ATM */
+ clk_prepare_enable(i2cs.clk);
+
+ return 0;
+}
+
+/**
+ * platform driver structure
+ */
+static struct platform_driver plds_i2cs_driver = {
+ .probe = plds_i2cs_probe,
+ .remove = plds_i2cs_remove,
+ .suspend = plds_i2cs_suspend,
+ .resume = plds_i2cs_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PLDS_I2CS_NAME,
+ },
+};
+module_platform_driver(plds_i2cs_driver);
+
+MODULE_AUTHOR("Victor Lambret <victor.lambret.ext@parrot.com>");
+MODULE_DESCRIPTION("Parrot7 PLDS block protococal I2C Slave based driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/i2c/plds_i2cs.h b/drivers/parrot/i2c/plds_i2cs.h
new file mode 100644
index 00000000000000..a0cec1fb5fc6f4
--- /dev/null
+++ b/drivers/parrot/i2c/plds_i2cs.h
@@ -0,0 +1,25 @@
+#ifndef _P7_I2CS_H
+#define _P7_I2CS_H
+
+#define PLDS_I2CS_NAME "plds"
+
+/* IOCTLS */
+#define PLDS_FLUSH_ALL _IO(0x08,1)
+#define PLDS_DUMP_DEBUG _IO(0x08,2)
+#define PLDS_RESET _IO(0x08,3)
+
+enum p7i2cs_revision {
+ I2CS_REVISION_1 = 1,
+ I2CS_REVISION_2,
+ I2CS_REVISION_3,
+ I2CS_REVISION_NR
+};
+
+struct plds_i2cs_pdata {
+ int gpio_request;
+ int gpio_reset;
+ int i2c_bus;
+ enum p7i2cs_revision revision;
+};
+
+#endif /* _P7_I2CS_H */
diff --git a/drivers/parrot/i2c/smsc-82514-usb-hub.c b/drivers/parrot/i2c/smsc-82514-usb-hub.c
new file mode 100644
index 00000000000000..4d76bfb5e182f8
--- /dev/null
+++ b/drivers/parrot/i2c/smsc-82514-usb-hub.c
@@ -0,0 +1,321 @@
+/**
+********************************************************************************
+* @file smsc-82514-usb-hub.c
+* @brief SMSC USB82514 Automotive Grade USB 2.0 Hi-Speed 4-Port Hub
+*
+* Copyright (C) 2012 Parrot S.A.
+*
+* @author Christian ROSALIE <christian.rosalie@parrot.com>
+* @date 2012-08-13
+********************************************************************************
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <mach/gpio.h>
+
+#include "smsc-82514-usb-hub.h"
+
+
+#define SMSC_USB82514_DRV_NAME "smsc82514"
+#define SMSC_USB82514_DRV_VERSION 0x1
+
+/* Registers */
+#define SMSC82514_VENDOR_ID_LSB 0x0 /* Vendor ID (LSB) */
+#define SMSC82514_VENDOR_ID_MSB 0x1 /* Vendor ID (MSB) */
+#define SMSC82514_PRODUCT_ID_LSB 0x2 /* Product ID (LSB) */
+#define SMSC82514_PRODUCT_ID_MSB 0x3 /* Product ID (MSB) */
+
+#define SMSC82514_DEVICE_ID_LSB 0x4 /* Device ID (LSB) */
+#define SMSC82514_DEVICE_ID_MSB 0x5 /* Device ID (MSB) */
+
+#define SMSC82514_CFG_DATA_BYTE1 0x6 /* Configuration Data Byte 1 */
+#define SMSC82514_CFG_DATA_BYTE2 0x7 /* Configuration Data Byte 2 */
+#define SMSC82514_CFG_DATA_BYTE3 0x8 /* Configuration Data Byte 3 */
+
+#define SMSC82514_N_REMOV_DEV 0x9 /* Non-Removable Device */
+#define SMSC82514_PORT_DIS_SELF_PWDED 0xa /* Port Disable for Self-Powered Operation */
+#define SMSC82514_PORT_DIS_BUS_PWDED 0xb /* Port Disable for Bus-Powered Operation */
+
+#define SMSC82514_MAX_PWR_SELF_PWDED 0x0c /* Max Power for Self-Powered Operation */
+#define SMSC82514_MAX_PWR_BUS_PWDED 0x0d /* Max Power for Bus-Powered Operation */
+#define SMSC82514_HUB_CTRL_MAX_CUR_SELF_PWDED 0x0E /* Hub Controller Max Current for Self-Powered Operation */
+#define SMSC82514_HUB_CTRL_MAX_CUR_BUS_PWDED 0x0f /* Hub Controller Max Current for Bus-Powered Operation */
+#define SMSC82514_POWER_ON_TIME 0x10 /* Power-on Time 32h */
+
+#define SMSC82514_LANGUAGE_ID_HIGH 0x11 /* Language ID High */
+#define SMSC82514_LANGUAGE_ID_LOW 0x12 /* Language ID Low */
+#define SMSC82514_MANUFACTURER_STR_LENGTH 0x13
+#define SMSC82514_PRODUCT_STR_LENGTH 0x14
+#define SMSC82514_SERIAL_STR_LENGTH 0x15
+
+#define SMSC82514_MANUFACTURER_STR 0x16 /* 16h-53h Manufacturer String 00h */
+#define SMSC82514_PRODUCT_STR 0x54 /* 54h-91h Product String 00h */
+#define SMSC82514_SERIAL_STR 0x92 /* 92h-Cfh Serial String 00h */
+
+#define SMSC82514_BOOST_UP 0xf6
+#define SMSC82514_BOOST_4_0 0xf8
+
+/* Reserved addresses
+ 0xf7 Reserved
+ 0xd0-0xf5 Reserved
+ 0xf9 Reserved
+ 0xfd Reserved
+ */
+
+#define SMSC82514_RESERVED1 0xf9
+#define SMSC82514_RESERVED2 0xfd
+
+#define SMSC82514_PORTSWAP 0xfa
+#define SMSC82514_PORTMAP_12 0xfb
+#define SMSC82514_PORTMAP_34 0xfc
+#define SMSC82514_STATUS_COMMAND 0xff
+
+struct smsc82514_data {
+ struct smsc82514_pdata ds_data;
+};
+
+/* rom default */
+static u8 smsc82514_init_seq[][2] = {
+ {SMSC82514_VENDOR_ID_LSB, 0x24},
+ {SMSC82514_VENDOR_ID_MSB, 0x04},
+ {SMSC82514_PRODUCT_ID_LSB, 0x14},/* depends of chip ... */
+ {SMSC82514_PRODUCT_ID_MSB, 0x25},
+ {SMSC82514_DEVICE_ID_LSB, 0xA0},
+ {SMSC82514_DEVICE_ID_MSB, 0x80},
+ {SMSC82514_CFG_DATA_BYTE1, 0x9B},
+ {SMSC82514_CFG_DATA_BYTE2, 0x20},
+ {SMSC82514_CFG_DATA_BYTE3, 0x02},
+ {SMSC82514_N_REMOV_DEV, 0x00},
+ {SMSC82514_PORT_DIS_SELF_PWDED, 0x00},
+ {SMSC82514_PORT_DIS_BUS_PWDED, 0x00},
+ {SMSC82514_MAX_PWR_SELF_PWDED, 0x01},
+ {SMSC82514_MAX_PWR_BUS_PWDED, 0x32},
+ {SMSC82514_HUB_CTRL_MAX_CUR_SELF_PWDED, 0x01},
+ {SMSC82514_HUB_CTRL_MAX_CUR_BUS_PWDED, 0x32},
+ {SMSC82514_POWER_ON_TIME, 0x32},
+
+};
+
+#define SMSC82514_INIT_SEQ_SIZE ARRAY_SIZE(smsc82514_init_seq)
+
+static int smsc82514_init_client(struct i2c_client *client)
+{
+ struct smsc82514_data *data = i2c_get_clientdata(client);
+ u8 value;
+ int ret;
+ int i;
+
+ //reset hub
+ if( data->ds_data.reset_pin ){
+ gpio_set_value(data->ds_data.reset_pin, 0);
+
+ /* check if hub reply to i2c */
+ ret = i2c_smbus_write_block_data(client, smsc82514_init_seq[0][0],
+ 1, &smsc82514_init_seq[0][1]);
+
+ if (ret == 0) {
+ dev_err(&client->dev, " Reset failed: "
+ "the reset i/o is not correctly driven\n");
+ return -EIO;
+ }
+
+ mdelay(20);
+
+ gpio_set_value(data->ds_data.reset_pin, 1);
+ }
+ else {
+ value = 0x02; /* hub reset */
+
+ ret = i2c_smbus_write_block_data(client, SMSC82514_STATUS_COMMAND,
+ 1, &value);
+
+ if (ret < 0) {
+ dev_err(&client->dev, " i2c transfer failed\n\
+ Unable to reset usb hub\n");
+ return -EIO;
+ }
+ }
+
+ mdelay(10);
+
+ /* set rom default value (otherwise everything is 0 ...) */
+ for( i = 0 ; i < SMSC82514_INIT_SEQ_SIZE ; i++ ){
+
+ ret = i2c_smbus_write_block_data(client, smsc82514_init_seq[i][0],
+ 1, &smsc82514_init_seq[i][1]);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: i2c transfer failed\n\
+ Unable to write (0x%x) register\n",
+ __func__, smsc82514_init_seq[i][0] );
+ return -EIO;
+ }
+ }
+
+ // Apply USB husb boost
+ // Boost upstream usb hub port if necessary
+ value = data->ds_data.us_port & 0x3;
+
+ if (value) {
+ ret = i2c_smbus_write_block_data(client, SMSC82514_BOOST_UP,
+ 1, &value);
+
+ if (ret < 0) {
+ dev_err(&client->dev, " i2c transfer failed\n\
+ Unable apply hub boost (value == 0x%x)\n",
+ value);
+ return -EIO;
+ }
+ }
+
+
+ //Boost downstream usb hub port if necessary
+ value = data->ds_data.ds_port_1
+ | (data->ds_data.ds_port_2 << 2)
+ | (data->ds_data.ds_port_3 << 4)
+ | (data->ds_data.ds_port_4 << 6);
+
+ if (value) {
+ ret = i2c_smbus_write_block_data(client, SMSC82514_BOOST_4_0,
+ 1, &value);
+
+ if (ret < 0) {
+ dev_err(&client->dev, " i2c transfer failed\n\
+ Unable apply hub boost (value == 0x%x)\n",
+ value);
+ return -EIO;
+ }
+ }
+
+ //Start hub
+ //Once started, registers are protected
+ //The hub must be reset to update configuration
+ value = 0x01;
+
+ ret = i2c_smbus_write_block_data(client, SMSC82514_STATUS_COMMAND,
+ 1, &value);
+
+ if (ret < 0) {
+ dev_err(&client->dev, " i2c transfer failed\n\
+ Unable to start usb hub\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int smsc82514_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct smsc82514_data *data = NULL;
+ struct smsc82514_pdata *pds_data;
+ int ret = 0;
+
+ pr_info(SMSC_USB82514_DRV_NAME ": version %d\n", SMSC_USB82514_DRV_VERSION );
+
+ data = kzalloc(sizeof(struct smsc82514_data), GFP_KERNEL);
+ if (!data) {
+ pr_err(SMSC_USB82514_DRV_NAME ": Memory allocation failed\n" );
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if( client->dev.platform_data ){
+
+ pds_data = (struct smsc82514_pdata *) client->dev.platform_data;
+
+ if( pds_data ) /* copy drive stregth settings */
+ memcpy(&data->ds_data, pds_data, sizeof(struct smsc82514_pdata));
+ else /* init with the default value */
+ memset(&data->ds_data, 0x0, sizeof(struct smsc82514_pdata));
+ }
+
+ i2c_set_clientdata(client, data);
+
+ /* Initialize chip */
+ ret = smsc82514_init_client(client);
+
+exit:
+ return ret;
+}
+
+static int __devexit smsc82514_remove(struct i2c_client *client)
+{
+ struct smsc82514_data *data = i2c_get_clientdata(client);
+
+ kfree(data);
+ return 0;
+}
+
+static int smsc82514_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ return 0;
+}
+static int smsc82514_resume(struct i2c_client *client)
+{
+ int ret;
+
+ ret = smsc82514_init_client(client);
+ if (ret < 0)
+ pr_err(SMSC_USB82514_DRV_NAME ": resume failure %d\n", ret);
+
+ return ret;
+}
+
+
+static const struct i2c_device_id smsc82514_i2c_table[] = {
+ {"smsc82514", 0}, /* 4 ports */
+ {"smsc82512", 0}, /* 2 ports */
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, smsc82514_i2c_table);
+
+static struct i2c_driver smsc82514_i2c_driver = {
+ .driver = {
+ .name = SMSC_USB82514_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = smsc82514_probe,
+ .remove = __devexit_p(smsc82514_remove),
+ .suspend = smsc82514_suspend,
+ .resume = smsc82514_resume,
+ .id_table = smsc82514_i2c_table,
+};
+
+
+static int __init smsc82514_modinit(void)
+{
+ int ret = 0;
+
+ ret = i2c_add_driver(&smsc82514_i2c_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMSC USB82514 driver: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void __exit smsc82514_modexit(void)
+{
+ i2c_del_driver(&smsc82514_i2c_driver);
+}
+
+subsys_initcall(smsc82514_modinit);
+module_exit(smsc82514_modexit);
+
+
+MODULE_AUTHOR("PARROT SA by Christian ROSALIE <christian.rosalie@parrot.com>");
+MODULE_DESCRIPTION("SMSC USB82514 USB 2.0 Hub");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/i2c/smsc-82514-usb-hub.h b/drivers/parrot/i2c/smsc-82514-usb-hub.h
new file mode 100644
index 00000000000000..ab62e7d68a39de
--- /dev/null
+++ b/drivers/parrot/i2c/smsc-82514-usb-hub.h
@@ -0,0 +1,39 @@
+/*
+********************************************************************************
+* @file smsc-82514-usb-hub.h
+* @brief SMSC USB82514 Automotive Grade USB 2.0 Hi-Speed 4-Port Hub
+* header file
+*
+* Copyright (C) 2012 Parrot S.A.
+*
+* @author Christian ROSALIE <christian.rosalie@parrot.com>
+* @date 2012-09-25
+********************************************************************************
+*/
+
+#ifndef __SMSC82514_H
+#define __SMSC82514_H
+
+/* By default only port4 and port 3 are set to DS_HIGH */
+
+enum drive_strength_boost {
+ DS_NOBOOST = 0, // Normal electrical drive strength = No boost
+ DS_LOW, // Elevated electrical drive strength = Low (approximately 4% boost)
+ DS_MEDIUM, // Elevated electrical drive strength = Medium (approximately 8% boost)
+ DS_HIGH, // Elevated electrical drive strength = High (approximately 12% boost)
+};
+
+/* Drive strength settings by port */
+struct smsc82514_pdata {
+ //upstream port
+ u8 us_port;
+
+ //downstream port
+ u8 ds_port_4;
+ u8 ds_port_3;
+ u8 ds_port_2;
+ u8 ds_port_1;
+ u8 reset_pin;
+};
+
+#endif /* __SMSC82514_H */
diff --git a/drivers/parrot/i2c/ti_ub925_lvds.c b/drivers/parrot/i2c/ti_ub925_lvds.c
new file mode 100644
index 00000000000000..5bd8fe87f3daeb
--- /dev/null
+++ b/drivers/parrot/i2c/ti_ub925_lvds.c
@@ -0,0 +1,412 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include "ti_ub925_lvds.h"
+
+#define LVDS_MAXIRQ 2
+struct ti_lvds_data {
+ struct i2c_client *client;
+ const struct ti_lvds_platform_data *pdata;
+};
+
+
+/* data */
+static struct {
+ irq_handler_t handler;
+ void *dev_id;
+} lvds_irq [LVDS_MAXIRQ];
+static int ti_lvds_irq;
+
+static DEFINE_SPINLOCK(ldvs_lock);
+
+/* func */
+static int _ti_lvds_read_reg(struct i2c_client *client, int addr,
+ u8 reg)
+{
+ struct i2c_msg xfer[2];
+ u8 buf[1];
+
+ buf[0] = reg & 0xff;
+
+ /* Write register */
+ xfer[0].addr = addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = buf;
+
+ /* Read data */
+ xfer[1].addr = addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 1;
+ xfer[1].buf = buf;
+
+ if (i2c_transfer(client->adapter, xfer, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c transfer failed for (%x,%x)\n",
+ __func__, addr, reg);
+ return -EIO;
+ }
+
+ return buf[0];
+}
+static int ti_lvds_read_reg(struct i2c_client *client,
+ u8 reg)
+{
+ return _ti_lvds_read_reg(client, client->addr, reg);
+}
+
+static int ti_lvds_read_write(struct i2c_client *client,
+ u8 reg, u8 val)
+{
+ char buff[2] = {reg, val};
+ if (i2c_master_send(client, buff, 2) != 2) {
+ dev_err(&client->dev, "%s: i2c transfer failed for (%x,%x)\n",
+ __func__, client->addr, reg);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int _ti_lvds_read_write(struct i2c_client *client, int addr,
+ u8 reg, u8 val)
+{
+ char buff[2] = {reg, val};
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = addr;
+ msg.flags = client->flags & I2C_M_TEN;
+ msg.len = 2;
+ msg.buf = (char *)buff;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ if (ret != 1) {
+ dev_err(&client->dev, "%s: i2c transfer failed\n", __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+static ssize_t i2c_pass_all_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ti_lvds_read_reg(client, 0x17) & 0x80);
+}
+
+static ssize_t i2c_pass_all_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(device);
+ unsigned long val = simple_strtoull(buf, NULL, 10);
+
+ ti_lvds_read_write(client, 0x17, 0x5e | (!!val)<<7 );
+
+ return count;
+}
+
+
+static struct device_attribute ti_lvds_sysfs_attrs =
+ __ATTR(i2c_pass_all, S_IRUGO | S_IWUSR, i2c_pass_all_show, i2c_pass_all_store);
+
+static int ti_lvds_initialize(struct ti_lvds_data *data)
+{
+ struct i2c_client *client = data->client;
+ const struct ti_lvds_platform_data *settings = NULL;
+ int remote;
+
+ if (ti_lvds_read_reg(client, 0xF0) != '_')
+ return -ENODEV;
+ if (ti_lvds_read_reg(client, 0xF1) != 'U')
+ return -ENODEV;
+ if (ti_lvds_read_reg(client, 0xF2) != 'B')
+ return -ENODEV;
+ if (ti_lvds_read_reg(client, 0xF3) != '9')
+ return -ENODEV;
+ if (ti_lvds_read_reg(client, 0xF4) != '2')
+ return -ENODEV;
+ if (ti_lvds_read_reg(client, 0xF5) != '5')
+ return -ENODEV;
+
+ remote = ti_lvds_read_reg(client, 0x6);
+ remote >>= 1;
+ if (!remote) {
+ dev_err(&client->dev,"lvds there is no remote");
+ return -ENODEV;
+ }
+ dev_info(&client->dev, "TI UB925 LVDS deser addr %x\n", remote);
+ if (data->pdata->deser_callback)
+ data->pdata->deser_callback(remote, &settings);
+
+ if(!settings){
+ settings = data->pdata;
+ }
+
+ /* i2c pass thu : access to remote deser and other slave on local i2c bus */
+ ti_lvds_read_write(client, 0x3, 0xda |
+ (0x1 & settings->clock_rising));
+
+ if (settings->nb_i2c_slave != 1)
+ /* i2c pass all : all i2c transfert forwarded to remote i2c bus (default is
+ remote + slave_alias) */
+ ti_lvds_read_write(client, 0x17, 0xde);
+
+ /* irq ena */
+ ti_lvds_read_write(client, 0xc6, 0x21);
+
+ /* i2c address to remap */
+ if(settings->premap.slave_id && settings->premap.slave_alias){
+ ti_lvds_read_write(client, 0x7, settings->premap.slave_id << 1); /* i2c addr of slave */
+ ti_lvds_read_write(client, 0x8, settings->premap.slave_alias << 1); /* i2c addr seem by the master */
+ }
+
+ /* increase remote i2c speed */
+ _ti_lvds_read_write(client, remote, 0x26, 0x25);
+ _ti_lvds_read_write(client, remote, 0x27, 0x25);
+
+ /* deser : Pixel Clock Edge Select rising
+ (compliant with DS9OUB926Q & DS9OUB928Q) */
+ if(settings->clock_rising)
+ _ti_lvds_read_write(client, remote, 0x3, 0xf1);
+
+ if(settings->cmd){
+ int i = 0;
+
+ while(settings->cmd[i].reg && settings->cmd[i].data){
+ _ti_lvds_read_write(client, remote,
+ settings->cmd[i].reg,
+ settings->cmd[i].data);
+
+ i++;
+ }
+ }
+ return 0;
+}
+
+#if 0
+{
+ static struct irq_chip twl4030_irq_chip;
+ int irq_base = TWL_IRQ_END;
+ int irq_end = irq_base + 4;
+ int i;
+
+ /* install an irq handler for each of the SIH modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+ twl4030_irq_chip = dummy_irq_chip;
+ twl4030_irq_chip.name = "lvds";
+
+ for (i = irq_base; i < irq_end; i++) {
+ set_irq_chip_and_handler(i, &twl4030_irq_chip,
+ handle_simple_irq);
+ activate_irq(i);
+ }
+
+}
+#endif
+
+static irqreturn_t lvds_interrupt(int irq, void *dev_id)
+{
+ struct ti_lvds_data *data = dev_id;
+ struct i2c_client *client = data->client;
+ int count = 0;
+
+ ti_lvds_read_reg(client, 0xc7);
+#if 0
+ if (!status)
+ printk(KERN_DEBUG "%s spurious irq ???\n", __func__);
+#endif
+ do {
+ int r = 0;
+ int i;
+
+ for (i = 0; i < LVDS_MAXIRQ; i++) {
+ if (!lvds_irq[i].handler)
+ continue;
+ r = lvds_irq[i].handler(irq, lvds_irq[i].dev_id);
+ if (r)
+ break;
+ }
+ /* no irq handle for all handler */
+ if (!r)
+ break;
+
+ if (count++ == 10)
+ printk(KERN_DEBUG "ti ldvs irq storm. May be driver don't return IRQ_NONE !\n");
+
+ /* ack lvds */
+ ti_lvds_read_reg(client, 0xc7);
+ } while (1);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ti_lvds_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct ti_lvds_platform_data *pdata = client->dev.platform_data;
+ struct ti_lvds_data *data;
+ int error;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ data->client = client;
+
+
+ i2c_set_clientdata(client, data);
+
+ data->pdata = pdata;
+
+ error = ti_lvds_initialize(data);
+ if (error)
+ goto err_free_object;
+
+ error = request_threaded_irq(client->irq, NULL, lvds_interrupt,
+ IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_object;
+ }
+ ti_lvds_irq = client->irq;
+ ti_lvds_read_reg(client, 0xc7);
+
+ error = device_create_file(&client->dev, &ti_lvds_sysfs_attrs);
+ if (error) {
+ goto err_free_object;
+ }
+
+ dev_info(&client->dev, "TI UB925 LVDS serializer initialized.\n");
+
+ return 0;
+
+err_free_object:
+ kfree(data);
+
+ return error;
+}
+
+static int __devexit ti_lvds_remove(struct i2c_client *client)
+{
+ struct ti_lvds_data *data = i2c_get_clientdata(client);
+
+ free_irq(client->irq, data);
+ ti_lvds_irq = 0;
+ device_remove_file(&client->dev, &ti_lvds_sysfs_attrs);
+ kfree(data);
+
+ return 0;
+}
+
+static int ti_lvds_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ return 0;
+}
+static int ti_lvds_resume(struct i2c_client *client)
+{
+ struct ti_lvds_data *data = i2c_get_clientdata(client);
+ int error;
+ /* on workbench, after resume the chip comme out of reset (gpio
+ expander reinit) and we need to wait to address it on i2c
+ */
+ int timeout = 10;
+ while (timeout-- && ti_lvds_read_reg(client, 0xF0) < 0);
+ error = ti_lvds_initialize(data);
+ if (!error)
+ ti_lvds_read_reg(client, 0xc7);
+ return 0;
+}
+
+static const struct i2c_device_id ti_lvds_id[] = {
+ { "lvds", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ti_lvds_id);
+
+static struct i2c_driver ti_lvds_driver = {
+ .driver = {
+ .name = "lvds",
+ .owner = THIS_MODULE,
+ },
+ .probe = ti_lvds_probe,
+ .remove = __devexit_p(ti_lvds_remove),
+ .suspend = ti_lvds_suspend,
+ .resume = ti_lvds_resume,
+ .id_table = ti_lvds_id,
+};
+
+static int __init ti_lvds_init(void)
+{
+ return i2c_add_driver(&ti_lvds_driver);
+}
+
+static void __exit ti_lvds_exit(void)
+{
+ i2c_del_driver(&ti_lvds_driver);
+}
+
+/* need to be started before other i2c device to init bus */
+subsys_initcall(ti_lvds_init);
+module_exit(ti_lvds_exit);
+
+
+int
+lvds_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags1,
+ const char *name, void *dev)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&ldvs_lock, flags);
+ for (i = 0; i < LVDS_MAXIRQ; i++) {
+ if (!lvds_irq[i].handler) {
+ lvds_irq[i].dev_id = dev;
+ lvds_irq[i].handler = handler;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ldvs_lock, flags);
+ if (i == LVDS_MAXIRQ)
+ return -EINVAL;
+
+ /* Call one time handler, to prevent from unprocessed irq blocking */
+ handler(irq, dev);
+
+ return 0;
+}
+
+void lvds_free_irq(unsigned int irq, void *dev_id)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&ldvs_lock, flags);
+ for (i = 0; i < LVDS_MAXIRQ; i++) {
+ if (lvds_irq[i].dev_id == dev_id ) {
+ lvds_irq[i].handler = NULL;
+ lvds_irq[i].dev_id = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ldvs_lock, flags);
+}
+
+void lvds_enable_irq(unsigned int irq)
+{
+ enable_irq(ti_lvds_irq);
+}
+
+void lvds_disable_irq(unsigned int irq)
+{
+ disable_irq(ti_lvds_irq);
+}
+
+EXPORT_SYMBOL(lvds_request_irq);
+EXPORT_SYMBOL(lvds_free_irq);
+EXPORT_SYMBOL(lvds_enable_irq);
+EXPORT_SYMBOL(lvds_disable_irq);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/i2c/ti_ub925_lvds.h b/drivers/parrot/i2c/ti_ub925_lvds.h
new file mode 100644
index 00000000000000..c8496139a0f585
--- /dev/null
+++ b/drivers/parrot/i2c/ti_ub925_lvds.h
@@ -0,0 +1,56 @@
+struct remap_i2c {
+ unsigned int slave_id;
+ unsigned int slave_alias;
+};
+
+/* Deserializer extra command */
+struct dsr_i2c_cmd {
+ u8 reg;
+ u8 data;
+};
+
+struct ti_lvds_platform_data{
+ /* nb of i2c slave of deser
+ allow to optimise speed if = 1 and premap is configured
+ */
+ int nb_i2c_slave;
+ /* i2c address to remap */
+ struct remap_i2c premap;
+
+ /* Deserializer command */
+ struct dsr_i2c_cmd *cmd;
+
+ /* Pixel Clock Edge Select */
+ int clock_rising;
+
+ /* Callback when deserializer detected */
+ int (*deser_callback)(unsigned deser_addrgpio, const struct ti_lvds_platform_data **altconf);
+};
+
+#if 1
+int
+lvds_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags1,
+ const char *name, void *dev);
+void lvds_free_irq(unsigned int irq, void *dev_id);
+void lvds_enable_irq(unsigned int irq);
+void lvds_disable_irq(unsigned int irq);
+#else
+static inline int lvds_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags1, const char *name,
+ void *dev)
+{
+ return -EINVAL;
+}
+
+static inline void lvds_free_irq(unsigned int irq, void *dev_id)
+{
+}
+
+static inline void lvds_enable_irq(unsigned int irq)
+{
+}
+
+static inline void lvds_disable_irq(unsigned int irq)
+{
+}
+#endif
diff --git a/drivers/parrot/iio/Kconfig b/drivers/parrot/iio/Kconfig
new file mode 100644
index 00000000000000..b3b6217b07d27e
--- /dev/null
+++ b/drivers/parrot/iio/Kconfig
@@ -0,0 +1,15 @@
+
+menu "IIO Parrot drivers"
+ depends on IIO
+
+source "drivers/parrot/iio/adc/Kconfig"
+source "drivers/parrot/iio/imu/Kconfig"
+source "drivers/parrot/iio/bldc/Kconfig"
+source "drivers/parrot/iio/magnetometer/Kconfig"
+source "drivers/parrot/iio/pressure/Kconfig"
+if IIO_TRIGGER
+source "drivers/parrot/iio/trigger/Kconfig"
+endif # IIO_TRIGGER
+
+endmenu
+
diff --git a/drivers/parrot/iio/Makefile b/drivers/parrot/iio/Makefile
new file mode 100644
index 00000000000000..a15c6cf25da4b2
--- /dev/null
+++ b/drivers/parrot/iio/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the industrial I/O core for PARROT
+#
+
+obj-y += adc/
+obj-y += imu/
+obj-y += bldc/
+obj-y += magnetometer/
+obj-y += pressure/
+obj-y += trigger/
diff --git a/drivers/parrot/iio/adc/Kconfig b/drivers/parrot/iio/adc/Kconfig
new file mode 100644
index 00000000000000..3dd35f6fe4220b
--- /dev/null
+++ b/drivers/parrot/iio/adc/Kconfig
@@ -0,0 +1,53 @@
+#
+# ADC drivers
+#
+menu "Analog to Digital converters"
+
+config P7MU_ADC
+ tristate "P7MU ADC driver"
+ depends on SPI
+ depends on SPI_SLAVE_PARROT7
+ depends on MFD_P7MU
+ select P7_ADC
+ select P7MU_ADC_RING_BUFFER
+ help
+ Say yes here to build support for the P7MU
+ SPI/I2C analog to digital converter (ADC).
+
+config P7MU_ADC_RING_BUFFER
+ bool "Analog Devices P7MUADC: use buffer"
+ depends on P7MU_ADC
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to include ring buffer support in the P7MUADC driver.
+
+config P7MU_ADC_DEBUG
+ bool "P7 ADC debug"
+ depends on P7MU_ADC
+ help
+ Say yes here if you have trouble with the P7 ADC driver and want to
+ debug it.
+
+config P7_TEMPERATURE
+ tristate "P7 temperature driver"
+ select P7MU_ADC
+ help
+ Say yes here to build support for the thermal sensor.
+
+config P7_TEMPERATURE_FC7100
+ tristate "P7 FC7100 temperature driver"
+ select P7_ADC
+ help
+ Say yes here to build support for the thermal sensor.
+
+config P7_ADC
+ bool "P7 ADC driver"
+ depends on (P7_TEMPERATURE || P7_TEMPERATURE_FC7100 || P7MU_ADC)
+ help
+ Say yes here to build support for the P7
+ analog to digital converter (ADC).
+
+
+endmenu
+
diff --git a/drivers/parrot/iio/adc/Makefile b/drivers/parrot/iio/adc/Makefile
new file mode 100644
index 00000000000000..3a5d8f64bf5f35
--- /dev/null
+++ b/drivers/parrot/iio/adc/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for industrial I/O ADC drivers for PARROT
+#
+
+ccflags-y += -I$(srctree)/drivers/parrot
+#ccflags-y += -I$(srctree)/drivers/staging
+
+obj-$(CONFIG_P7_ADC) += p7adc.o
+obj-$(CONFIG_P7MU_ADC) += p7muadc.o
+obj-$(CONFIG_P7_TEMPERATURE) += p7temp.o
+obj-$(CONFIG_P7_TEMPERATURE_FC7100) += p7temp_hw08.o
+
+p7adc-y := p7-adc_core.o
+
+p7muadc-y := p7mu-adc_core.o
+p7muadc-$(CONFIG_P7MU_ADC_RING_BUFFER) += p7mu-adc_ring.o
+
diff --git a/drivers/parrot/iio/adc/p7-adc_core.c b/drivers/parrot/iio/adc/p7-adc_core.c
new file mode 100644
index 00000000000000..a03e4ddf922cd7
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7-adc_core.c
@@ -0,0 +1,128 @@
+/**
+ ************************************************
+ * @file p7-adc_core.c
+ * @brief P7 Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-19
+ ************************************************
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+#include "p7-adc_core.h"
+
+static LIST_HEAD(iiodev_list);
+static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
+
+static struct p7_adc_iio_chip_info *p7_adc_iio_get_chip_info(unsigned int type)
+{
+ struct p7_adc_iio_chip_info *chip = NULL, *cix;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(cix, &iiodev_list, list) {
+ if (cix->type == type) {
+ chip = cix;
+ break;
+ }
+ }
+ mutex_unlock(&list_lock);
+
+ return chip;
+}
+
+int p7_adc_register_iio_chip(struct p7_adc_iio_chip_info *ch_info)
+{
+ struct p7_adc_iio_chip_info *cix;
+
+ if (!ch_info->ops->enable ||
+ !ch_info->ops->disable ||
+ !ch_info->ops->config ||
+ !ch_info->ops->read)
+ return -EINVAL;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(cix, &iiodev_list, list) {
+ if (cix->type == ch_info->type) {
+ mutex_unlock(&list_lock);
+ return -EBUSY;
+ }
+ }
+
+ list_add_tail(&ch_info->list, &iiodev_list);
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_register_iio_chip);
+
+int p7_adc_unregister_iio_chip(struct p7_adc_iio_chip_info *ch_info)
+{
+ mutex_lock(&list_lock);
+ list_del(&ch_info->list);
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_unregister_iio_chip);
+
+int p7_adc_iio_chip_get_channel_template(unsigned type, struct iio_chan_spec **ch_templ)
+{
+ struct p7_adc_iio_chip_info *ci;
+
+ ci = p7_adc_iio_get_chip_info(type);
+ if (!ci)
+ return -EPROTONOSUPPORT;
+
+ *ch_templ = &ci->channel_template;
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_iio_chip_get_channel_template);
+
+int p7_adc_iio_chip_get_channel_ops(unsigned type, struct
+ p7_adc_iio_chip_channel_ops **ops)
+{
+ struct p7_adc_iio_chip_info *ci;
+
+ ci = p7_adc_iio_get_chip_info(type);
+ if (!ci)
+ return -EPROTONOSUPPORT;
+
+ *ops = ci->ops;
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_iio_chip_get_channel_ops);
+
+int p7_adc_get_iio_chip(unsigned int type)
+{
+ struct p7_adc_iio_chip_info *ci;
+
+ ci = p7_adc_iio_get_chip_info(type);
+ if (!ci)
+ return -EPROTONOSUPPORT;
+
+ if (!try_module_get(ci->owner))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_get_iio_chip);
+
+int p7_adc_put_iio_chip(unsigned int type)
+{
+ struct p7_adc_iio_chip_info *ci;
+
+ ci = p7_adc_iio_get_chip_info(type);
+ if (!ci)
+ return -EPROTONOSUPPORT;
+
+ module_put(ci->owner);
+
+ return 0;
+}
+EXPORT_SYMBOL(p7_adc_put_iio_chip);
+
diff --git a/drivers/parrot/iio/adc/p7-adc_core.h b/drivers/parrot/iio/adc/p7-adc_core.h
new file mode 100644
index 00000000000000..f4a69eab2fa0a5
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7-adc_core.h
@@ -0,0 +1,48 @@
+/**
+ *************************************************
+ * @file p7-adc_core.h
+ * @brief P7 Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-1
+ *************************************************
+ */
+#ifndef ADC_P7_ADC_CORE_H_
+#define ADC_P7_ADC_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+#include <mach/p7-adc.h>
+
+struct p7_adc_iio_chip_channel_ops
+{
+ int (*enable)(unsigned int ch);
+ int (*disable)(unsigned int ch);
+ int (*config)(unsigned int ch,
+ unsigned int freq);
+ int (*read)(unsigned int ch,
+ unsigned short *val);
+};
+
+struct p7_adc_iio_chip_info
+{
+ struct list_head list;
+ unsigned int type;
+ struct iio_chan_spec channel_template;
+ struct p7_adc_iio_chip_channel_ops *ops;
+ struct module *owner;
+};
+
+/* Prototype of global function */
+int p7_adc_iio_chip_get_channel_ops (unsigned type,
+ struct p7_adc_iio_chip_channel_ops **ops);
+int p7_adc_iio_chip_get_channel_template (unsigned type,
+ struct iio_chan_spec **ch_templ);
+int p7_adc_register_iio_chip (struct p7_adc_iio_chip_info *ch_info);
+int p7_adc_unregister_iio_chip (struct p7_adc_iio_chip_info *ch_info);
+int p7_adc_get_iio_chip (unsigned int type);
+int p7_adc_put_iio_chip (unsigned int type);
+
+#endif /* ADC_P7_ADC_CORE_H_ */
diff --git a/drivers/parrot/iio/adc/p7-adc_regs.h b/drivers/parrot/iio/adc/p7-adc_regs.h
new file mode 100644
index 00000000000000..0cb7c869ff2e2b
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7-adc_regs.h
@@ -0,0 +1,36 @@
+#ifndef __P7MU_ADC_REGS_H
+#define __P7MU_ADC_REGS_H
+
+/*********************************/
+/* P7MU SPI register and options */
+/* Register address */
+#define P7MU_SPI_CTRL_REG 0x00
+
+/* Options */
+#define P7MU_SPI_EN (1 << 4)
+#define P7MU_SPI_RESET (1 << 5)
+#define P7MU_SPI_DISABLE (0 << 4)
+#define P7MU_SPI_CPOL_CPHA_MASK 0x3
+#define P7MU_SPI_MODE_0 0
+#define P7MU_SPI_CPHA 0x1
+#define P7MU_SPI_CPOL 0x2
+#define P7MU_SPI_16MHZ 16000000
+#define P7MU_SPI_8MHZ 8000000
+#define P7MU_SPI_4MHZ 4000000
+#define P7MU_SPI_2MHZ 2000000
+#define P7MU_SPI_24MHZ 24000000
+#define P7MU_SPI_12MHZ 12000000
+#define P7MU_SPI_6MHZ 6000000
+#define P7MU_SPI_3MHZ 3000000
+#define P7MU_SPI_CLK_SEL_16MHZ (0 << 2)
+#define P7MU_SPI_CLK_SEL_8MHZ (1 << 2)
+#define P7MU_SPI_CLK_SEL_4MHZ (2 << 2)
+#define P7MU_SPI_CLK_SEL_2MHZ (3 << 2)
+#define P7MU_SPI_CLK_SEL_24MHZ (0 << 2)
+#define P7MU_SPI_CLK_SEL_12MHZ (1 << 2)
+#define P7MU_SPI_CLK_SEL_6MHZ (2 << 2)
+#define P7MU_SPI_CLK_SEL_3MHZ (3 << 2)
+
+
+#endif /* __P7MU_ADC_REGS_H */
+
diff --git a/drivers/parrot/iio/adc/p7mu-adc.h b/drivers/parrot/iio/adc/p7mu-adc.h
new file mode 100644
index 00000000000000..a6c06c93cd9573
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7mu-adc.h
@@ -0,0 +1,109 @@
+/**
+ ***************************************************
+ * @file p7mu-adc.h
+ * @brief P7MU Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-19
+ ***************************************************
+ */
+#ifndef ADC_P7MU_ADC_H_
+#define ADC_P7MU_ADC_H_
+
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+/* ADC frequency in kHz */
+#define P7MU_ADC_FREQ 8000000
+
+/**********************************/
+/* P7MU ADC registers and options */
+/* Registers address */
+#define P7MU_ADC_CTRL_REG 0x00
+#define P7MU_ADC_CH_MOD_0_REG(_ch) (_ch*2 + 0x02)
+#define P7MU_ADC_CH_MOD_1_REG(_ch) (_ch*2 + 0x03)
+#define P7MU_ADC_CH0_TRIG_0_REG 0x12
+#define P7MU_ADC_CH_EN_REG 0x22
+#define P7MU_ADC_CH_DATA_REG(_ch) (_ch + 0x23)
+#define P7MU_ADC_CH_IF_SEL_REG 0x2B
+#define P7MU_ADC_PRIORITY_SEL_0_REG 0x2C
+#define P7MU_ADC_PRIORITY_SEL_1_REG 0x2D
+#define P7MU_ADC_MEASUREMENT_STATUS_REG 0x32
+#define P7MU_ADC_MEASUREMENT_ERROR_REG 0x33
+
+/* Options */
+#define P7MU_ADC_EN (1 << 4)
+#define P7MU_ADC_ONE_POINT_TEMP_CORR (1 << 3)
+#define P7MU_ADC_OM_ON (u16)(0x3) /* Operating Mode On */
+#define P7MU_ADC_CH_SAMPLING_EN(_ch) (u16)(1 << _ch)
+#define P7MU_ADC_CH_ONE_SAMPLE_EN(_ch) (u16)(1 << (8+_ch))
+#define P7MU_ADC_CH_I2C_IF_SEL(_ch) (u16)(1 << (8+_ch))
+#define P7MU_ADC_CH_SPI_IF_SEL(_ch) (u16)(1 << _ch)
+#define P7MU_ADC_CH_MOD_0_MASK 0xffff
+#define P7MU_ADC_CH_MOD_1_MASK 0x7f
+#define P7MU_ADC_CH_PRIO_SEL(_ch, _prio) (_ch << (3*_prio))
+#define P7MU_ADC_PRIO_MASK 0x0FFF
+
+/* Buffer max size */
+#define P7MU_ADC_BUF_MAX 20000
+
+/*
+ * P7MUADC value masks
+ */
+#define P7MU_ADC_SPI_CHANNEL_MASK 0x7
+#define P7MU_ADC_I2C_VALID_MASK 0x8000
+
+/* State */
+#define P7MU_ADC_STATE_RUNNING BIT(0)
+#define P7MU_ADC_STATE_FAULT_SPI BIT(1)
+#define P7MU_ADC_STATE_MARKER BIT(2)
+
+/* one channel per iio_dev */
+struct p7mu_adc_iio_dev {
+ struct iio_dev *indio_dev;
+ unsigned int channel;
+ struct list_head l;
+};
+
+struct p7mu_adc_chip_info
+{
+ struct spi_device *spi; /* spi_device reference */
+ u32 spi_freq;
+ u32 msg_pending;
+ u32 msg_full;
+ u32 state_mask;
+ u16 chan_en_reg_val;
+ struct resource const* p7mu_adc_res;
+ struct resource const* p7mu_spi_res;
+ int p7mu_adc_init;
+
+ struct spi_message msg; /* spi message for the spi API */
+ struct spi_transfer xfer; /* only one transfer per message */
+
+ struct list_head iio_dev_list;
+
+ int ch_masked;
+ int ch_marker;
+ struct completion send_marker_done;
+ struct mutex send_mutex; /* Serialize send marker access */
+ struct p7_adc_iio_chip_channel_ops *marker_ops;
+ struct mutex mutex; /* Serialize chip info struct access */
+
+ unsigned char rx[P7MU_ADC_BUF_MAX] ____cacheline_aligned;
+};
+
+struct p7mu_adc_state
+{
+ struct p7mu_adc_chip_info *chip_info;
+ struct p7_adc_iio_chip_channel_ops *ops;
+};
+
+int p7mu_adc_register_ring_funcs_and_init(struct iio_dev *indio_dev);
+void p7mu_adc_ring_cleanup(struct iio_dev *indio_dev);
+int p7mu_adc_configure_marker(struct p7mu_adc_chip_info *ci,
+ unsigned int ch_marker);
+int p7mu_adc_configure_ring(struct p7mu_adc_chip_info *ci);
+
+#endif /* ADC_P7MU_ADC_H_ */
diff --git a/drivers/parrot/iio/adc/p7mu-adc_core.c b/drivers/parrot/iio/adc/p7mu-adc_core.c
new file mode 100644
index 00000000000000..b506daf12e6980
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7mu-adc_core.c
@@ -0,0 +1,686 @@
+/**
+ ************************************************
+ * @file p7mu-adc_core.c
+ * @brief P7 Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-19
+ *************************************************
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <mfd/p7mu.h>
+#include <mfd/p7mu-clk.h>
+#include <spi/p7-spis.h> /* Necessary to flush SPIS fifo. */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#include "p7-adc_regs.h"
+#include "p7-adc_core.h"
+#include "p7mu-adc.h"
+
+#define P7MU_ADC_DRV_NAME "p7mu-adc"
+
+typedef enum {
+ CAPTURE_SAMPLING_MODE = 0,
+ CAPTURE_ONE_SAMPLE_MODE,
+} capture_mode_t;
+
+/* These modules parameters allow to override platform data.
+ * Could be used for testing */
+static unsigned int spi_freq = 0;
+module_param(spi_freq, uint, 0644);
+
+static struct p7mu_adc_chip_info p7mu_adc;
+
+static inline u32 p7mu_get_mod(u32 freq)
+{
+ u32 mod;
+ mod = ((P7MU_ADC_FREQ / freq) - 1);
+ return mod;
+}
+
+static void p7mu_adc_start(struct p7mu_adc_chip_info* st)
+{
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_CH_EN_REG,
+ st->chan_en_reg_val);
+
+ /* Enable ADC */
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_CTRL_REG,
+ P7MU_ADC_EN|P7MU_ADC_OM_ON);
+}
+
+static void p7mu_adc_stop(struct p7mu_adc_chip_info* st)
+{
+ /* Disable all channels */
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_CH_EN_REG, 0);
+ /* Disable ADC */
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_CTRL_REG, 0);
+}
+
+static void p7mu_adc_config(struct p7mu_adc_chip_info* st)
+{
+ u32 ch = 0, prio = 0;
+ u32 ch_priority_hi = 0, ch_priority_lo = 0;
+
+ /* Set marker channel with the highest priority
+ PRIORITY_SEL_0 Register
+ [15:12] not implemented.
+ [11:9] PRI3_CH_SEL – Channel with priority 3.
+ [8:6] PRI2_CH_SEL – Channel with priority 2.
+ [5:3] PRI1_CH_SEL – Channel with priority 1.
+ [2:0] PRI0_CH_SEL – Channel with priority 0 (the highest priority).
+
+ PRIORITY_SEL_1 Register
+ [15:12] not implemented.
+ [11:9] PRI7_CH_SEL – Channel with priority 7 (the lowest priority).
+ [8:6] PRI6_CH_SEL – Channel with priority 6.
+ [5:3] PRI5_CH_SEL – Channel with priority 5.
+ [2:0] PRI4_CH_SEL – Channel with priority 4.
+ */
+ if (st->ch_marker != -1) {
+ ch_priority_hi |= st->ch_marker;
+ prio++;
+ }
+
+ for (; prio < 4; ++ch)
+ if (ch != st->ch_marker)
+ ch_priority_hi |= P7MU_ADC_CH_PRIO_SEL(ch, prio++);
+
+ for (prio = 0; prio < 4; ++ch)
+ if (ch != st->ch_marker)
+ ch_priority_lo |= P7MU_ADC_CH_PRIO_SEL(ch, prio++);
+
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_PRIORITY_SEL_0_REG,
+ ch_priority_hi & P7MU_ADC_PRIO_MASK);
+ p7mu_write16(st->p7mu_adc_res->start + P7MU_ADC_PRIORITY_SEL_1_REG,
+ ch_priority_lo & P7MU_ADC_PRIO_MASK);
+
+ p7mu_adc_start(st);
+}
+
+static int p7mu_adc_channel_if_select(unsigned int ch, int capture_mode)
+{
+ u16 val;
+
+ mutex_lock(&p7mu_adc.mutex);
+ if (capture_mode == CAPTURE_SAMPLING_MODE) {
+ p7mu_read16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_IF_SEL_REG,
+ &val);
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_IF_SEL_REG,
+ val | P7MU_ADC_CH_SPI_IF_SEL(ch));
+ } else {
+ p7mu_read16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_IF_SEL_REG,
+ &val);
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_IF_SEL_REG,
+ val | P7MU_ADC_CH_I2C_IF_SEL(ch));
+ }
+ mutex_unlock(&p7mu_adc.mutex);
+
+ return 0;
+}
+
+static int p7mu_adc_channel_enable(unsigned int ch, int capture_mode)
+{
+ mutex_lock(&p7mu_adc.mutex);
+ if (capture_mode == CAPTURE_SAMPLING_MODE)
+ p7mu_adc.chan_en_reg_val |= P7MU_ADC_CH_SAMPLING_EN(ch);
+ else
+ p7mu_adc.chan_en_reg_val |= P7MU_ADC_CH_ONE_SAMPLE_EN(ch);
+
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_EN_REG,
+ p7mu_adc.chan_en_reg_val);
+ mutex_unlock(&p7mu_adc.mutex);
+
+ return 0;
+}
+
+static int p7mu_adc_sampling_channel_enable(unsigned int ch)
+{
+ return p7mu_adc_channel_enable(ch, CAPTURE_SAMPLING_MODE);
+}
+
+static int p7mu_adc_one_sample_channel_enable(unsigned int ch)
+{
+ return p7mu_adc_channel_enable(ch, CAPTURE_ONE_SAMPLE_MODE);
+}
+
+static int p7mu_adc_channel_disable(unsigned int ch, int capture_mode)
+{
+ mutex_lock(&p7mu_adc.mutex);
+ if (capture_mode == CAPTURE_SAMPLING_MODE)
+ p7mu_adc.chan_en_reg_val &= ~P7MU_ADC_CH_SAMPLING_EN(ch);
+ else
+ p7mu_adc.chan_en_reg_val &= ~P7MU_ADC_CH_ONE_SAMPLE_EN(ch);
+
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_EN_REG,
+ p7mu_adc.chan_en_reg_val);
+ mutex_unlock(&p7mu_adc.mutex);
+
+ return 0;
+}
+
+static inline int p7mu_adc_sampling_channel_disable(unsigned int ch)
+{
+ return p7mu_adc_channel_disable(ch, CAPTURE_SAMPLING_MODE);
+}
+
+static inline int p7mu_adc_one_sample_channel_disable(unsigned int ch)
+{
+ return p7mu_adc_channel_disable(ch, CAPTURE_ONE_SAMPLE_MODE);
+}
+
+static int p7mu_adc_config_channel(unsigned int ch, int capture_mode, u32 freq)
+{
+ u32 mod;
+
+ if (freq > 0)
+ {
+ mod = p7mu_get_mod(freq);
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_MOD_0_REG(ch),
+ (u16)(mod & P7MU_ADC_CH_MOD_0_MASK));
+ p7mu_write16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_MOD_1_REG(ch),
+ (u16)((mod >> 16) & P7MU_ADC_CH_MOD_1_MASK));
+ }
+
+ p7mu_adc_channel_if_select(ch, capture_mode);
+
+ return 0;
+}
+
+static inline int p7mu_adc_sampling_channel_config(unsigned int ch, u32 freq)
+{
+ return p7mu_adc_config_channel(ch, CAPTURE_SAMPLING_MODE, freq);
+}
+
+static inline int p7mu_adc_one_sample_channel_config(unsigned int ch, u32 freq)
+{
+ return p7mu_adc_config_channel(ch, CAPTURE_ONE_SAMPLE_MODE, freq);
+}
+
+static int p7mu_adc_read_one_sample_channel(unsigned int ch, unsigned short *val)
+{
+ p7mu_adc_channel_enable(ch,
+ CAPTURE_ONE_SAMPLE_MODE);
+
+ p7mu_read16(p7mu_adc.p7mu_adc_res->start + P7MU_ADC_CH_DATA_REG(ch),
+ val);
+
+ if ((*val & P7MU_ADC_I2C_VALID_MASK) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int p7mu_adc_read_sampling_channel(unsigned int ch, unsigned short *val)
+{
+ /* Not supported */
+ return -EINVAL;
+}
+
+static int p7mu_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ return 0;
+}
+
+static const struct iio_info p7mu_adc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &p7mu_adc_read_raw,
+};
+
+static struct p7mu_adc_iio_dev *p7mu_adc_alloc_channel(struct device *dev,
+ struct p7mu_adc_chip_info *chip_info,
+ const struct p7_adc_chan *chan_info)
+{
+ struct p7mu_adc_state *st;
+ struct p7mu_adc_iio_dev *new;
+ struct iio_chan_spec *chan;
+ struct iio_chan_spec *ch_templ;
+ struct iio_dev *indio_dev;
+ int err = 0;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return NULL;
+
+ st = iio_priv(indio_dev);
+ st->chip_info = chip_info;
+ p7_adc_iio_chip_get_channel_ops(chan_info->type,
+ &st->ops);
+
+ chan = kzalloc(sizeof(struct iio_chan_spec), GFP_KERNEL);
+ if (chan == NULL) {
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ p7_adc_iio_chip_get_channel_template(chan_info->type,
+ &ch_templ);
+
+ *chan = *ch_templ;
+ chan->channel = chan_info->channel;
+ chan->address = chan_info->channel;
+ chan->scan_index = chan_info->channel;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &p7mu_adc_info;
+ indio_dev->channels = chan;
+ indio_dev->num_channels = 1;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = kasprintf(GFP_KERNEL, "%s_%d", P7MU_ADC_DRV_NAME,
+ chan->channel);
+
+ err = p7mu_adc_register_ring_funcs_and_init(indio_dev);
+ if (err)
+ goto free_chan;
+
+ err = iio_buffer_register(indio_dev,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (err)
+ goto clear_ring;
+
+ /* associate channel and buffer */
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, chan->channel);
+
+ /* Set default buffer length */
+// indio_dev->buffer->access->set_bytes_per_datum(indio_dev->buffer,
+// indio_dev->channels[0].scan_type.storagebits/8);
+ indio_dev->buffer->access->set_length(indio_dev->buffer,
+ chan_info->samples);
+
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err)
+ goto free_buf;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ goto free_buf;
+ }
+
+ new->channel = chan_info->channel;
+ new->indio_dev = indio_dev;
+ list_add(&new->l, &chip_info->iio_dev_list);
+
+ return new;
+
+free_buf:
+ iio_buffer_unregister(indio_dev);
+clear_ring:
+ p7mu_adc_ring_cleanup(indio_dev);
+free_device:
+ devm_iio_device_free(dev, indio_dev);
+free_chan:
+ kfree(indio_dev->channels);
+ kfree(indio_dev->name);
+
+ return NULL;
+}
+
+static void p7mu_adc_iiodev_delete(struct p7mu_adc_iio_dev *d)
+{
+ struct iio_dev *indio_dev = d->indio_dev;
+
+ list_del(&d->l);
+ devm_iio_device_unregister(&indio_dev->dev, indio_dev);
+ iio_buffer_unregister(indio_dev);
+ p7mu_adc_ring_cleanup(indio_dev);
+ kfree(indio_dev->name);
+ kfree(indio_dev->channels);
+ devm_iio_device_free(indio_dev->dev.parent, indio_dev);
+ kfree(d);
+}
+
+int p7mu_adc_remove(struct p7mu_adc_chip_info *ch_info)
+{
+ struct p7mu_adc_iio_dev *d, *n;
+
+ list_for_each_entry_safe(d, n, &ch_info->iio_dev_list, l)
+ p7mu_adc_iiodev_delete(d);
+
+ p7mu_adc_stop(ch_info);
+
+ return 0;
+}
+
+#define P7_ADC_CHAN(_type, _shift, _mask) { \
+ .type = _type, \
+ .indexed = 1, \
+ .info_mask_separate = _mask, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = _shift, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static struct p7_adc_iio_chip_channel_ops sampling_chann_ops = {
+ .enable = p7mu_adc_sampling_channel_enable,
+ .disable = p7mu_adc_sampling_channel_disable,
+ .config = p7mu_adc_sampling_channel_config,
+ .read = p7mu_adc_read_sampling_channel,
+};
+
+static struct p7_adc_iio_chip_channel_ops one_sample_chann_ops = {
+ .enable = p7mu_adc_one_sample_channel_enable,
+ .disable = p7mu_adc_one_sample_channel_disable,
+ .config = p7mu_adc_one_sample_channel_config,
+ .read = p7mu_adc_read_one_sample_channel,
+};
+
+static struct p7_adc_iio_chip_info tci = {
+ .type = P7MUADC_IIO_TEMP,
+ .channel_template = P7_ADC_CHAN(IIO_TEMP, 0, BIT(IIO_CHAN_INFO_RAW)),
+ .owner = THIS_MODULE,
+ .ops = &one_sample_chann_ops,
+};
+
+static struct p7_adc_iio_chip_info rbci = {
+ .type = P7MUADC_IIO_RING_BUFFER,
+ .channel_template = P7_ADC_CHAN(IIO_VOLTAGE, 4, BIT(IIO_CHAN_INFO_RAW)),
+ .ops = &sampling_chann_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct p7_adc_iio_chip_info mci = {
+ .type = P7MUADC_IIO_MARKER,
+ .channel_template = P7_ADC_CHAN(IIO_VOLTAGE, 4, BIT(IIO_CHAN_INFO_RAW)),
+ .ops = &sampling_chann_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit p7mu_adc_plat_probe(struct platform_device* pdev)
+{
+ int i, ret;
+ struct p7mu_plat_data const* const pdata = p7mu_pdata();
+
+ if (p7mu_adc.p7mu_adc_init)
+ return -EBUSY;
+
+ p7mu_adc.p7mu_adc_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!p7mu_adc.p7mu_adc_res) {
+ dev_err(&pdev->dev, "failed to find P7MU ADC I/O region address\n");
+ return -ENXIO;
+ }
+ p7mu_adc.p7mu_adc_res = p7mu_request_region(pdev, p7mu_adc.p7mu_adc_res);
+ if (IS_ERR(p7mu_adc.p7mu_adc_res)) {
+ dev_err(&pdev->dev, "failed to request P7MU ADC I/O region\n");
+ ret = PTR_ERR(p7mu_adc.p7mu_adc_res);
+ return -ENXIO;
+ }
+
+ p7mu_adc.p7mu_spi_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (!p7mu_adc.p7mu_spi_res) {
+ dev_err(&pdev->dev, "failed to find P7MU SPI I/O region address\n");
+ p7mu_adc.state_mask |= P7MU_ADC_STATE_FAULT_SPI;
+ goto spi_skip;
+ }
+ p7mu_adc.p7mu_spi_res = p7mu_request_region(pdev, p7mu_adc.p7mu_spi_res);
+ if (IS_ERR(p7mu_adc.p7mu_spi_res)) {
+ dev_err(&pdev->dev, "failed to request P7MU SPI I/O region\n");
+ p7mu_adc.state_mask |= P7MU_ADC_STATE_FAULT_SPI;
+ }
+
+spi_skip:
+ p7_adc_register_iio_chip(&tci);
+ p7_adc_register_iio_chip(&rbci);
+ p7_adc_register_iio_chip(&mci);
+
+ mutex_init(&p7mu_adc.mutex);
+
+ INIT_LIST_HEAD(&p7mu_adc.iio_dev_list);
+ p7mu_adc.ch_masked = -1;
+ p7mu_adc.ch_marker = -1;
+
+ if (pdata && pdata->chan_data) {
+ for (i = 0; i < pdata->chan_data->num_channels; i++) {
+ const struct p7_adc_chan *chan_pdata =
+ &pdata->chan_data->channels[i];
+
+ if (chan_pdata->type == P7MUADC_IIO_MARKER) {
+ p7mu_adc_configure_marker(&p7mu_adc,
+ chan_pdata->channel);
+ } else {
+ if (!p7mu_adc_alloc_channel(&pdev->dev,
+ &p7mu_adc,
+ chan_pdata))
+ continue;
+ }
+
+ p7mu_adc_config_channel(chan_pdata->channel,
+ CAPTURE_SAMPLING_MODE,
+ chan_pdata->freq);
+ }
+ } else {
+ dev_warn(&pdev->dev, "No ADC channel pdata, disabling SPI\n");
+ p7mu_adc.state_mask |= P7MU_ADC_STATE_FAULT_SPI;
+ }
+
+ if (!(p7mu_adc.state_mask & P7MU_ADC_STATE_FAULT_SPI))
+ p7mu_adc_configure_ring(&p7mu_adc);
+
+ /* Setup and start ADC */
+ p7mu_adc.state_mask |= P7MU_ADC_STATE_RUNNING;
+ p7mu_adc_config(&p7mu_adc);
+
+ p7mu_adc.p7mu_adc_init = 1;
+ dev_info(&pdev->dev, "P7MU ADC - ready\n");
+
+ return 0;
+}
+
+static int __devexit p7mu_adc_plat_remove(struct platform_device* pdev)
+{
+ p7mu_adc_remove(&p7mu_adc);
+
+ p7_adc_unregister_iio_chip(&tci);
+ p7_adc_unregister_iio_chip(&rbci);
+ p7_adc_unregister_iio_chip(&mci);
+
+ if (!IS_ERR_OR_NULL(p7mu_adc.p7mu_spi_res))
+ p7mu_release_region(p7mu_adc.p7mu_spi_res);
+
+ if (!IS_ERR_OR_NULL(p7mu_adc.p7mu_adc_res))
+ p7mu_release_region(p7mu_adc.p7mu_adc_res);
+
+ p7mu_adc.p7mu_adc_init = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7mu_adc_suspend(struct device *dev)
+{
+ struct p7mu_adc_iio_dev *d;
+
+ list_for_each_entry(d, &p7mu_adc.iio_dev_list, l)
+ if (iio_buffer_enabled(d->indio_dev))
+ d->indio_dev->setup_ops->postdisable(d->indio_dev);
+
+ p7mu_adc_stop(&p7mu_adc);
+
+ return 0;
+}
+
+static int p7mu_adc_resume(struct device *dev)
+{
+ struct p7mu_adc_iio_dev *d;
+
+ p7mu_adc_start(&p7mu_adc);
+
+ list_for_each_entry(d, &p7mu_adc.iio_dev_list, l)
+ if (iio_buffer_enabled(d->indio_dev))
+ d->indio_dev->setup_ops->postenable(d->indio_dev);
+
+ return 0;
+}
+#else
+#define p7mu_adc_suspend NULL
+#define p7mu_adc_resume NULL
+#endif
+
+static const struct dev_pm_ops p7mu_adc_dev_pm_ops = {
+ .suspend = p7mu_adc_suspend,
+ .resume = p7mu_adc_resume,
+};
+
+/* Driver structure for platform driver, used to get the i2c resources */
+static struct platform_driver p7mu_adc_plat_driver = {
+ .driver = {
+ .name = P7MU_ADC_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7mu_adc_dev_pm_ops,
+ },
+ .probe = p7mu_adc_plat_probe,
+ .remove = __devexit_p(&p7mu_adc_plat_remove),
+};
+
+/* SPI */
+
+static int p7mu_adc_spi_setup(struct p7mu_adc_chip_info* st)
+{
+ u16 const spi_mode = (u16)(st->spi->mode & P7MU_SPI_CPOL_CPHA_MASK);
+ u32 freq = spi_freq? spi_freq : st->spi->max_speed_hz;
+ u32 clk_spi_sel;
+
+ /* Round SPI frequency to possible configurations :
+ p7mu spi base clk could be P7MU_SPI_HZ_MIN or P7MU_SPI_HZ_MAX
+ generate correct divisor
+ */
+ if (p7mu_get_spiclk_rate() == P7MU_SPI_HZ_MIN) {
+ if (freq > P7MU_SPI_8MHZ) {
+ st->spi_freq = P7MU_SPI_16MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_16MHZ;
+ } else if (freq > P7MU_SPI_4MHZ) {
+ st->spi_freq = P7MU_SPI_8MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_8MHZ;
+ } else if (freq > P7MU_SPI_2MHZ) {
+ st->spi_freq = P7MU_SPI_4MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_4MHZ;
+ } else {
+ st->spi_freq = P7MU_SPI_2MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_2MHZ;
+ }
+ } else {
+ if (freq > P7MU_SPI_12MHZ) {
+ st->spi_freq = P7MU_SPI_24MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_24MHZ;
+ } else if (freq > P7MU_SPI_6MHZ) {
+ st->spi_freq = P7MU_SPI_12MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_12MHZ;
+ } else if (freq > P7MU_SPI_3MHZ) {
+ st->spi_freq = P7MU_SPI_6MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_6MHZ;
+ } else {
+ st->spi_freq = P7MU_SPI_3MHZ;
+ clk_spi_sel = P7MU_SPI_CLK_SEL_3MHZ;
+ }
+ }
+ if (st->spi_freq != freq)
+ pr_info( "P7MU SPI frequency corrected to: %d Hz (wanted %d Hz)\n",
+ st->spi_freq, freq);
+
+ /* Enable spi with given options */
+ p7mu_write16(st->p7mu_spi_res->start + P7MU_SPI_CTRL_REG,
+ P7MU_SPI_EN|clk_spi_sel|spi_mode);
+
+ return 0;
+}
+
+static int p7mu_adc_spi_probe(struct spi_device *spi)
+{
+ if (!p7mu_adc.p7mu_adc_init)
+ return -ENODEV;
+
+ if (p7mu_adc.state_mask & P7MU_ADC_STATE_FAULT_SPI)
+ return -ENOTSUPP;
+
+ p7mu_adc.spi = spi;
+ p7mu_adc_spi_setup(&p7mu_adc);
+ p7spis_pause(spi->master);
+
+ return 0;
+}
+
+static int p7mu_adc_spi_remove(struct spi_device *spi)
+{
+ unsigned limit = 500;
+
+ p7mu_adc.state_mask &= ~P7MU_ADC_STATE_RUNNING;
+ if (p7mu_adc.ch_marker != -1)
+ p7mu_adc.marker_ops->enable(p7mu_adc.ch_marker);
+
+ while (p7mu_adc.msg_pending && limit--)
+ msleep(10);
+
+ if (p7mu_adc.ch_marker != -1)
+ p7mu_adc.marker_ops->disable(p7mu_adc.ch_marker);
+
+ /* Disable SPI */
+ p7mu_write16(p7mu_adc.p7mu_spi_res->start + P7MU_SPI_CTRL_REG, 0);
+ p7spis_flush(spi->master);
+
+ return 0;
+}
+
+static struct spi_device_id const p7mu_adc_spi_id[] = {
+ { P7MU_ADC_DRV_NAME, 0 },
+ { }
+};
+
+/* driver structure for the spi interface. */
+static struct spi_driver p7mu_adc_spi_driver = {
+ .probe = p7mu_adc_spi_probe,
+ .remove = __devexit_p(p7mu_adc_spi_remove),
+ .driver = {
+ .name = P7MU_ADC_DRV_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .id_table = p7mu_adc_spi_id,
+};
+
+static int __init p7mu_adc_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&p7mu_adc_plat_driver);
+ if (err)
+ return err;
+
+ return spi_register_driver(&p7mu_adc_spi_driver);
+}
+module_init(p7mu_adc_init);
+
+static void __exit p7mu_adc_exit(void)
+{
+ spi_unregister_driver(&p7mu_adc_spi_driver);
+ platform_driver_unregister(&p7mu_adc_plat_driver);
+}
+module_exit(p7mu_adc_exit);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("Parrot Power Management Unit ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/adc/p7mu-adc_ring.c b/drivers/parrot/iio/adc/p7mu-adc_ring.c
new file mode 100644
index 00000000000000..063fb7a8cd9fa4
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7mu-adc_ring.c
@@ -0,0 +1,292 @@
+/**
+ *************************************************
+ * @file p7mu-adc_ring.c
+ * @brief P7 Analogic to Digital Converter driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2013-09-19
+ *************************************************
+ */
+#define DEBUG
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#include <spi/p7-spis.h> /* Necessary to flush SPIS fifo. */
+#include "p7-adc_core.h"
+#include "p7mu-adc.h"
+
+/* Timeout before to received the marker */
+#define SEND_MARKER_TIMEOUT 5 /* ms */
+static int p7mu_adc_send_marker(struct iio_dev *indio_dev, unsigned int ch)
+{
+ struct p7mu_adc_state *st = iio_priv(indio_dev);
+ struct p7mu_adc_chip_info *chip_info = st->chip_info;
+ int err = 0;
+ int retry = 10;
+
+ mutex_lock(&chip_info->send_mutex);
+
+ do {
+ chip_info->ch_masked = ch;
+ chip_info->marker_ops->enable(chip_info->ch_marker);
+ usleep_range(2000, 2000);
+ chip_info->marker_ops->disable(chip_info->ch_marker);
+
+ /* wait for marker to receive */
+ err = wait_for_completion_timeout(&chip_info->send_marker_done,
+ msecs_to_jiffies(SEND_MARKER_TIMEOUT));
+ if (!err) {
+ dev_dbg(&indio_dev->dev, "send marker timeout\n");
+ err = -ETIMEDOUT;
+ continue;
+ } else {
+ err = 0;
+ break;
+ }
+ } while (retry--);
+
+ mutex_unlock(&chip_info->send_mutex);
+
+ return err;
+}
+
+static int iio_setup_buffer_postenable(struct iio_dev *indio_dev)
+{
+ unsigned int channel;
+ struct p7mu_adc_state *st = iio_priv(indio_dev);
+
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ return -EINVAL;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ if (st->chip_info->state_mask & P7MU_ADC_STATE_MARKER)
+ p7mu_adc_send_marker(indio_dev, channel);
+ else {
+ int size;
+ if (st->chip_info->chan_en_reg_val & 0xf) {
+ /* we support only one spi chanel in single mode */
+ printk(KERN_WARNING "p7mu adc : only one spi channel supported\n");
+ return -EINVAL;
+ }
+ /* libHAL really expect to have a complete read (and not a partial read) */
+ size = indio_dev->buffer->access->get_length(indio_dev->buffer);
+ if (size <= P7MU_ADC_BUF_MAX)
+ st->chip_info->xfer.len = size;
+ else
+ st->chip_info->xfer.len = 1024;
+ }
+ if ((st->chip_info->chan_en_reg_val & 0xf) == 0) {
+ /* start transfert */
+ int ret;
+ p7spis_resume(st->chip_info->spi->master);
+ st->chip_info->msg_pending = 1;
+ st->chip_info->msg_full = 0;
+ ret = spi_async(st->chip_info->spi, &st->chip_info->msg);
+ if (ret) {
+ dev_err(&st->chip_info->spi->dev, "spi_async --> %d\n", ret);
+ return ret;
+ }
+ }
+
+ st->ops->enable(channel);
+
+// dev_dbg(&indio_dev->dev, "%s channel #%d\n", __func__, channel);
+
+ return 0;
+}
+
+static int iio_setup_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ unsigned int val, channel;
+ struct p7mu_adc_state *st = iio_priv(indio_dev);
+
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ return -EINVAL;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ st->ops->disable(channel);
+
+ /* invalidate dma and buffer */
+ if ((st->chip_info->chan_en_reg_val & 0xf) == 0)
+ p7spis_pause(st->chip_info->spi->master);
+
+ val = indio_dev->buffer->access->get_bytes_per_datum(indio_dev->buffer);
+ indio_dev->buffer->access->set_bytes_per_datum(indio_dev->buffer, 0);
+ indio_dev->buffer->access->set_bytes_per_datum(indio_dev->buffer, val);
+ indio_dev->buffer->access->request_update(indio_dev->buffer);
+
+// dev_dbg(&indio_dev->dev, "%s channel #%d\n", __func__, channel);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops p7mu_adc_ring_setup_ops = {
+ .postenable = &iio_setup_buffer_postenable,
+ .postdisable = &iio_setup_buffer_postdisable,
+};
+
+int p7mu_adc_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+
+ buffer = iio_kfifo_allocate(indio_dev);
+
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Ring buffer functions - here start/stop new capture related */
+ indio_dev->setup_ops = &p7mu_adc_ring_setup_ops;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ indio_dev->modes = INDIO_BUFFER_HARDWARE;
+
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static struct p7mu_adc_iio_dev *p7mu_adc_find_dev_from_ch(struct p7mu_adc_chip_info *chip_info, int ch)
+{
+ struct p7mu_adc_iio_dev *dev;
+
+ list_for_each_entry(dev, &chip_info->iio_dev_list, l)
+ if (dev->channel == ch)
+ return dev;
+
+ return NULL;
+}
+
+static int p7mu_adc_push_to_buffer(struct p7mu_adc_chip_info* chip_info)
+{
+ int i, ch;
+ u16 data;
+ struct p7mu_adc_iio_dev *dev;
+ int ret = 0;
+
+// pr_debug("%s: len=%d\n", __func__, chip_info->xfer.len);
+
+ if (chip_info->state_mask & P7MU_ADC_STATE_MARKER) {
+ /* XXX sending sample by sample is very slow ... */
+ for (i = 0; i < chip_info->xfer.len / 2; i++)
+ {
+ data = le16_to_cpup((const __le16 *)&chip_info->rx[i*2]);
+ ch = data & P7MU_ADC_SPI_CHANNEL_MASK;
+ data &= ~P7MU_ADC_SPI_CHANNEL_MASK;
+
+ if (ch == chip_info->ch_marker) {
+
+ if (chip_info->ch_masked != -1) {
+ chip_info->ch_masked = -1;
+ complete(&chip_info->send_marker_done);
+ }
+ continue;
+ }
+
+ if (chip_info->ch_masked != ch) {
+ dev = p7mu_adc_find_dev_from_ch(chip_info, ch);
+ if (dev != NULL)
+ if (iio_buffer_enabled(dev->indio_dev))
+ ret |= dev->indio_dev->buffer->access->store_to(dev->indio_dev->buffer,
+ (void *)&data);
+ }
+ }
+ }
+ else {
+ int n;
+ data = le16_to_cpup((const __le16 *)&chip_info->rx[0]);
+ ch = data & P7MU_ADC_SPI_CHANNEL_MASK;
+ dev = p7mu_adc_find_dev_from_ch(chip_info, ch);
+ if (!dev)
+ return -ENODEV;
+ if (!dev->indio_dev->buffer->access->store_to_buffer)
+ return -EINVAL;
+ n = dev->indio_dev->buffer->access->store_to_buffer(dev->indio_dev->buffer,
+ (void *)chip_info->rx, chip_info->xfer.len / 2);
+ if (n != chip_info->xfer.len / 2)
+ ret = -EBUSY;
+ }
+
+ if (ret && !chip_info->msg_full) {
+ pr_warn("p7mu_adc_push_to_buffer : fifo is full\n");
+ chip_info->msg_full = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * This function may be called from context where it can't sleep,
+ * so we must protect it with a spinlock
+ */
+static void p7mu_read_ring_data(void *data)
+{
+ struct p7mu_adc_chip_info *chip_info = (struct p7mu_adc_chip_info *) data;
+ struct spi_message* mesg = &chip_info->msg;
+
+
+ if (!(chip_info->state_mask & P7MU_ADC_STATE_RUNNING)) {
+ chip_info->msg_pending = 0;
+ return;
+ }
+ if (mesg->status) {
+ chip_info->msg_pending = 0;
+ return;
+ }
+
+ p7mu_adc_push_to_buffer(chip_info);
+ spi_async(chip_info->spi, mesg);
+}
+
+void p7mu_adc_ring_cleanup(struct iio_dev *indio_dev)
+{
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ if (iio_buffer_enabled(indio_dev)) {
+ /* Force update to free ring buffer */
+ indio_dev->buffer->access->set_bytes_per_datum(indio_dev->buffer, 0);
+ indio_dev->buffer->access->request_update(indio_dev->buffer);
+ }
+ iio_kfifo_free(indio_dev->buffer);
+}
+
+int p7mu_adc_configure_marker(struct p7mu_adc_chip_info *ci,
+ unsigned int ch_marker)
+{
+ int ret;
+
+ ci->ch_marker = ch_marker;
+ ret = p7_adc_iio_chip_get_channel_ops(P7MUADC_IIO_MARKER,
+ &ci->marker_ops);
+ if (ret < 0)
+ return ret;
+ init_completion(&ci->send_marker_done);
+ mutex_init(&ci->send_mutex);
+ ci->state_mask |= P7MU_ADC_STATE_MARKER;
+ return 0;
+}
+
+int p7mu_adc_configure_ring(struct p7mu_adc_chip_info *ci)
+{
+ spi_message_init(&ci->msg);
+ ci->msg.complete = p7mu_read_ring_data;
+ ci->msg.context = ci;
+
+ ci->xfer.rx_buf = ci->rx;
+ ci->xfer.len = 1024;
+ ci->xfer.cs_change = 1;
+ spi_message_add_tail(&ci->xfer, &ci->msg);
+
+ return 0;
+}
diff --git a/drivers/parrot/iio/adc/p7temp.c b/drivers/parrot/iio/adc/p7temp.c
new file mode 100644
index 00000000000000..b13f6a8aafddf3
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7temp.c
@@ -0,0 +1,484 @@
+/**
+ ***********************************************
+ * @file p7-temperature.c
+ * @brief P7 Temperature driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ * Check https://smet/issues/10348 for more information
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @author Fran§ois MULLER <francois.muller@parrot.com>
+ *
+ * @date 2013-09-19
+ ***********************************************
+ */
+// #define DEBUG
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <linux/sort.h>
+#include <asm/io.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#include <mfd/p7mu.h>
+#include "p7-adc_regs.h"
+#include "p7-adc_core.h"
+
+#define P7_TEMP_DRV_NAME "p7-temperature"
+
+#define P7_ADC_CHANNEL 0
+#define P7MU_ADC_CHANNEL 7
+
+#define FIXED_POINT_DIV_FACTOR 8
+#define ALPHA_DIV_FACTOR 0x100
+#define TEMP_NUM_ROUND (4*4) // Need to be a multiple of 4
+#define HALF_TEMP_NUM_ROUND (TEMP_NUM_ROUND/2)
+#define GPIO_V1 49
+#define GPIO_V2 50
+#define PINCTRL_STATE_VOLTAGE1 "v1"
+#define PINCTRL_STATE_VOLTAGE2 "v2"
+#define GPIO_TIME (8000) /* value in us : There are a big capacitor. This capacitor takes a least 8 ms to be charged or discharged */
+
+#define P7MU_OTP_PAGE ((u16) 0x0e00)
+#define P7MU_OTP_TS_POINT_0 (P7MU_OTP_PAGE + 0xc)
+#define P7MU_OTP_TS_POINT_1 (P7MU_OTP_PAGE + 0xd)
+
+#define P7MU_OTP_INTERRUPT_EN_1 (P7MU_OTP_PAGE + 0x12) // P7MU_ADC_OFFSET_0V5_TEMP_0 (val >> 8)
+#define P7MU_OTP_TEMP_ERR_TIME (P7MU_OTP_PAGE + 0x13) // P7MU_ADC_OFFSET_2V7_TEMP_0 (val >> 8)
+#define P7MU_OTP_IC_SETUP (P7MU_OTP_PAGE + 0x18) // P7MU_ADC_OFFSET_0V5_TEMP_1 (val >> 8)
+#define P7MU_OTP_CLK_GEN_BCKP_CFG (P7MU_OTP_PAGE + 0x1A) // P7MU_ADC_OFFSET_2V7_TEMP_1 (val >> 8)
+
+struct p7_temp_state
+{
+ unsigned int type;
+ int num_channels;
+ int gpio_time;
+ struct timer_list poll_timer;
+ struct p7_adc_iio_chip_channel_ops *ops;
+ // p7mu temperature calibration
+ u16 ts_temp_0;
+ int ts_adc_read_temp_0;
+ u16 ts_temp_1;
+ int ts_adc_read_temp_1;
+ // p7 temperature calibration
+ s8 offset_0v5_0;
+ s8 offset_2v7_0;
+ s8 offset_0v5_1;
+ s8 offset_2v7_1;
+ s8 calibration_valid;
+};
+
+int compare (const void * a, const void * b)
+{
+ return ( *(int*)a - *(int*)b );
+}
+
+static int p7_temperature_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ int v1, v2, round, sum = 0, alpha = 0;
+ int adc_offset_0v5, adc_offset_2v7;
+ int values[TEMP_NUM_ROUND];
+ u16 data;
+ struct p7_temp_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+
+ if(m != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP) {
+ ret = -EINVAL;
+ goto out;
+ }/* else Reading the current channel */
+
+ switch (chan->channel) {
+
+ case P7_ADC_CHANNEL:
+ case P7MU_ADC_CHANNEL:
+/***************** P7MU_ADC_CHANNEL *************************/
+ for (round = 0; round < TEMP_NUM_ROUND; round++) {
+ ret = st->ops->read(P7MU_ADC_CHANNEL, &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "p7mu ADC read not valid. Got 0x%x\n", data);
+ ret = -EINVAL;
+ goto out;
+ }
+ values[round] = data >> chan->scan_type.shift;
+ values[round] &= (1 << chan->scan_type.realbits) - 1;
+ }
+ // Sort values so that we remove first quarter and last quarter results
+ sort (values, TEMP_NUM_ROUND, sizeof(int), compare, NULL);
+ // With that we keep only "correct" values and we remove "spike" values from mean calculation
+ for(round = TEMP_NUM_ROUND/4; round < (3*TEMP_NUM_ROUND)/4; round++)
+ sum += values[round];
+
+ // Check if OTP value are ok
+ if(st->calibration_valid) {
+ alpha = ALPHA_DIV_FACTOR * (sum - st->ts_adc_read_temp_0 * HALF_TEMP_NUM_ROUND) /
+ ((st->ts_adc_read_temp_1 - st->ts_adc_read_temp_0) * HALF_TEMP_NUM_ROUND);
+ }
+
+ dev_dbg(&indio_dev->dev, "alpha = %i\n", alpha);
+
+ if(chan->channel == P7MU_ADC_CHANNEL) {
+ if(st->calibration_valid)
+ *val = st->ts_temp_0 + ((st->ts_temp_1 - st->ts_temp_0)*alpha + (int)(0.5*ALPHA_DIV_FACTOR)) / ALPHA_DIV_FACTOR;
+ else
+ *val = (sum * 185 / (4095 * HALF_TEMP_NUM_ROUND)) - 50;
+
+ ret = IIO_VAL_INT;
+ goto out;
+ }
+ //no break;
+/***************** P7_ADC_CHANNEL *************************/
+ adc_offset_0v5 = st->offset_0v5_0*FIXED_POINT_DIV_FACTOR +
+ alpha * (st->offset_0v5_1 - st->offset_0v5_0) /
+ (ALPHA_DIV_FACTOR/FIXED_POINT_DIV_FACTOR);
+ adc_offset_2v7 = st->offset_2v7_0*FIXED_POINT_DIV_FACTOR +
+ alpha * (st->offset_2v7_1 - st->offset_2v7_0) /
+ (ALPHA_DIV_FACTOR/FIXED_POINT_DIV_FACTOR);
+
+ dev_dbg(&indio_dev->dev, "adc_offset_0v5=%i adc_offset_2v7%i\n", adc_offset_0v5, adc_offset_2v7);
+
+ sum = 0;
+
+ gpio_direction_output(GPIO_V1, 1);
+ gpio_direction_input(GPIO_V2);
+ usleep_range(st->gpio_time, st->gpio_time);
+
+ for (round = 0; round < TEMP_NUM_ROUND; round++) {
+ ret = st->ops->read(chan->address, &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v1 ADC read not valid. Got 0x%x\n", data);
+ goto out;
+ }
+ v1 = data >> chan->scan_type.shift;
+ v1 &= (1 << chan->scan_type.realbits) - 1;
+
+ values[round] = v1;
+ }
+ gpio_direction_output(GPIO_V2, 1);
+ gpio_direction_input(GPIO_V1);
+ usleep_range(st->gpio_time, st->gpio_time);
+
+ for (round = 0; round < TEMP_NUM_ROUND; round++) {
+ ret = st->ops->read(chan->address, &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v2 ADC read not valid. Got 0x%x\n", data);
+ ret = -EINVAL;
+ goto out;
+ }
+ v2 = data >> chan->scan_type.shift;
+ v2 &= (1 << chan->scan_type.realbits) - 1;
+
+ // It's better to "randomize" the substract part than to sort each v1 v2 and substract at the end
+ values[round] = values[round] - v2;
+ }
+ // Sort values so that we remove first quarter and last quarter results
+ sort (values, TEMP_NUM_ROUND, sizeof(int), compare, NULL);
+ // With that we keep only "correct" values and we remove "spike" values from mean calculation
+ for(round = TEMP_NUM_ROUND/4; round < (3*TEMP_NUM_ROUND)/4; round++)
+ sum += values[round];
+
+ dev_dbg(&indio_dev->dev, "%i %i %i\n", sum,
+ (sum * (int)(4313.72 * FIXED_POINT_DIV_FACTOR)),
+ ((sum * (int)(4313.72 * FIXED_POINT_DIV_FACTOR) /
+ (adc_offset_2v7 - adc_offset_0v5 + 3003*FIXED_POINT_DIV_FACTOR))));
+
+ // sum is a multiple of HALF_TEMP_NUM_ROUND
+ *val = (((sum * (int)((4313.72) * FIXED_POINT_DIV_FACTOR) /
+ (adc_offset_2v7 - adc_offset_0v5 + 3003*FIXED_POINT_DIV_FACTOR)) -
+ (int)(304.7*HALF_TEMP_NUM_ROUND)) + (int)(0.5*HALF_TEMP_NUM_ROUND))
+ / (HALF_TEMP_NUM_ROUND);
+
+ ret = IIO_VAL_INT;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t p7_temp_show_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static inline ssize_t p7_temp_set_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return 0;
+}
+
+static ssize_t p7_temp_show_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static inline ssize_t p7_temp_set_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return 0;
+}
+
+static IIO_DEVICE_ATTR(t_alarm_high,
+ S_IRUGO | S_IWUSR,
+ p7_temp_show_t_alarm_high, p7_temp_set_t_alarm_high, 0);
+static IIO_DEVICE_ATTR(t_alarm_low,
+ S_IRUGO | S_IWUSR,
+ p7_temp_show_t_alarm_low, p7_temp_set_t_alarm_low, 0);
+
+static struct attribute *p7_temp_event_int_attributes[] = {
+ &iio_dev_attr_t_alarm_high.dev_attr.attr,
+ &iio_dev_attr_t_alarm_low.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group p7_temp_event_attribute_group = {
+ .attrs = p7_temp_event_int_attributes,
+ .name = "events",
+};
+
+static const struct iio_info p7_temperature_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &p7_temperature_read_raw,
+ .event_attrs = &p7_temp_event_attribute_group,
+};
+
+static void poll_temperature(unsigned long data)
+{
+ struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct p7_temp_state *st = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns();
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ mod_timer(&st->poll_timer, jiffies + (10*HZ));
+}
+
+static int p7temp_alloc_channels(struct iio_dev *indio_dev,
+ struct p7_temp_chan *channels_info)
+{
+ struct p7_temp_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ struct iio_chan_spec *ch_templ;
+ unsigned int i, err;
+
+ err = p7_adc_iio_chip_get_channel_template(st->type,
+ &ch_templ);
+ if (err)
+ return err;
+
+ channels = kcalloc(st->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->num_channels; ++i) {
+ channels[i] = *ch_templ;
+ channels[i].channel = channels_info[i].channel;
+ channels[i].address = channels_info[i].channel;
+ channels[i].scan_index = channels_info[i].channel;
+ channels[i].extend_name = (char *)channels_info[i].name;
+
+ st->ops->config(channels_info[i].channel,
+ channels_info[i].freq);
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = st->num_channels;
+
+ return 0;
+}
+
+static int __devinit p7_temp_probe(struct platform_device* pdev)
+{
+ int ret;
+ u16 tmp;
+ struct p7_temp_state *st;
+ struct iio_dev *indio_dev;
+ const struct p7_temp_chan_data *pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata)
+ return -ENODEV;
+ if (pdata->num_channels != 2)
+ return -ENODEV;
+
+ ret = p7_adc_get_iio_chip(P7MUADC_IIO_TEMP);
+ if (ret) {
+ dev_err(&pdev->dev, "%s():p7_adc_get_iio_chip returns %d\n", __func__, ret );
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ goto put_chip;
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(&pdev->dev, indio_dev);
+ p7_adc_iio_chip_get_channel_ops(P7MUADC_IIO_TEMP, &st->ops);
+ st->type = P7MUADC_IIO_TEMP;
+ st->gpio_time = GPIO_TIME;
+ st->num_channels = pdata->num_channels;
+
+/*************** Read P7MU OTP values *****************/
+ // P7MU temperature calibration
+ p7mu_read16(P7MU_OTP_TS_POINT_0, &tmp);
+ st->ts_temp_0 = ((u8)(tmp & 0x7F) * 185 / 127) - 50;
+ st->ts_adc_read_temp_0 = (((s16)(tmp & 0xFF10) >> 7) * 8);
+ st->ts_adc_read_temp_0 += (u8)(tmp & 0x7F) * 4095 / 127;
+ dev_dbg(&indio_dev->dev, "ts0 0x%x temp_0=%i adc_read_temp0=%i\n", tmp, st->ts_temp_0, st->ts_adc_read_temp_0);
+
+ p7mu_read16(P7MU_OTP_TS_POINT_1, &tmp);
+ st->ts_temp_1 = ((u8)(tmp & 0x7F) * 185 / 127) - 50;
+ st->ts_adc_read_temp_1 = (((s16)(tmp & 0xFF10) >> 7) * 8);
+ st->ts_adc_read_temp_1 += (u8)(tmp & 0x7F) * 4095 / 127;
+ dev_dbg(&indio_dev->dev, "ts1 0x%x temp_1=%i adc_read_temp1=%i\n", tmp, st->ts_temp_1, st->ts_adc_read_temp_1);
+
+ // P7 temperature calibration
+ p7mu_read16(P7MU_OTP_INTERRUPT_EN_1, &tmp);
+ st->offset_0v5_0 = (s8)(tmp >> 8);
+ dev_dbg(&indio_dev->dev, "offset_0v5_0=%i\n", st->offset_0v5_0);
+
+ p7mu_read16(P7MU_OTP_TEMP_ERR_TIME, &tmp);
+ st->offset_2v7_0 = (s8)(tmp >> 8);
+ dev_dbg(&indio_dev->dev, "offset_2v7_0=%i\n", st->offset_2v7_0);
+
+ p7mu_read16(P7MU_OTP_IC_SETUP, &tmp);
+ st->offset_0v5_1 = (s8)(tmp >> 8);
+ dev_dbg(&indio_dev->dev, "offset_0v5_1=%i\n", st->offset_0v5_1);
+
+ p7mu_read16(P7MU_OTP_CLK_GEN_BCKP_CFG, &tmp);
+ st->offset_2v7_1 = (s8)(tmp >> 8);
+ dev_dbg(&indio_dev->dev, "offset_2v7_1=%i\n", st->offset_2v7_1);
+
+ if(st->ts_adc_read_temp_1 - st->ts_adc_read_temp_0 == 0)
+ {
+ st->calibration_valid = 0;
+ dev_err(&pdev->dev, "P7 temperature is not calibrated\n");
+ }
+ else
+ st->calibration_valid = 1;
+
+ ret = gpio_request_one(GPIO_V1,
+ GPIOF_OUT_INIT_LOW,
+ PINCTRL_STATE_VOLTAGE1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request GPIO-%d %s\n",
+ GPIO_V1, PINCTRL_STATE_VOLTAGE1);
+ goto free_device;
+ }
+
+ ret = gpio_request_one(GPIO_V2,
+ GPIOF_OUT_INIT_LOW,
+ PINCTRL_STATE_VOLTAGE2);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request GPIO-%d %s\n",
+ GPIO_V2, PINCTRL_STATE_VOLTAGE2);
+ goto free_gpio_v1;
+ }
+
+ ret = p7temp_alloc_channels(indio_dev,
+ pdata->channels);
+ if (ret)
+ goto free_gpio_v2;
+
+ /* Schedule this module to run every 10 seconds */
+ init_timer(&st->poll_timer);
+ st->poll_timer.expires = jiffies + (10*HZ);
+ st->poll_timer.function = poll_temperature;
+ st->poll_timer.data = (u_long)indio_dev;
+ add_timer(&st->poll_timer);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &p7_temperature_info;
+ indio_dev->name = P7_TEMP_DRV_NAME;
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ goto free_channels;
+
+ dev_info(&pdev->dev, "temperature sensor and ADC registered.\n");
+
+ return 0;
+
+free_channels:
+ kfree(indio_dev->channels);
+free_gpio_v2:
+ gpio_free(GPIO_V2);
+free_gpio_v1:
+ gpio_free(GPIO_V1);
+free_device:
+ devm_iio_device_free(&pdev->dev, indio_dev);
+put_chip:
+ p7_adc_put_iio_chip(P7MUADC_IIO_TEMP);
+
+ return ret;
+}
+
+static int __devexit p7_temp_remove(struct platform_device* pdev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
+ struct p7_temp_state *st = iio_priv(indio_dev);
+
+ p7_adc_put_iio_chip(st->type);
+ devm_iio_device_unregister(&pdev->dev, indio_dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ del_timer(&st->poll_timer);
+ gpio_free(GPIO_V1);
+ gpio_free(GPIO_V2);
+ kfree(indio_dev->channels);
+ devm_iio_device_free(&pdev->dev, indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver p7_temp_driver = {
+ .driver = {
+ .name = P7_TEMP_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = p7_temp_probe,
+ .remove = __devexit_p(p7_temp_remove),
+};
+
+module_platform_driver(p7_temp_driver);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("P7 Temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/adc/p7temp_hw08.c b/drivers/parrot/iio/adc/p7temp_hw08.c
new file mode 100644
index 00000000000000..78025f9006a3ef
--- /dev/null
+++ b/drivers/parrot/iio/adc/p7temp_hw08.c
@@ -0,0 +1,557 @@
+/**
+********************************************************************************
+* @file p7tempgen.c
+* @brief P7 Temperature driver
+*
+* Copyright (C) 2013 Parrot S.A.
+*
+* @author Karl Leplat <karl.leplat@parrot.com>
+* @date 2013-09-19
+********************************************************************************
+*/
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/io.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#include "p7-adc_regs.h"
+#include "p7-adc_core.h"
+
+#define TEMP_DRV_NAME "fc7100-temperature"
+
+struct temp_gen_state
+{
+ unsigned int type;
+ int num_channels;
+ int gpio_time;
+ struct timer_list poll_timer;
+ struct p7_adc_iio_chip_channel_ops *ops;
+};
+
+/********* HW04 ************/
+#define TEMP_NUM_ROUND 16
+#define TEMP_NUM_ROUND_SHIFT 4
+
+/*
+ * parameters of the cubic equation a.x^3 + b.x^2 + c.x + d
+ */
+
+/* a = 3.65938.10^-9 ~> 4024 * 2^-40 */
+#define P7MU_TEMP_A 4024
+#define P7MU_TEMP_A_SHIFT 40
+
+/* b = -2.19291.10^-5 ~> -5887 * 2^-28 */
+#define P7MU_TEMP_B -5887
+#define P7MU_TEMP_B_SHIFT 28
+
+/* c = 6.544126.10^-2 ~> 4289 * 2^-16 */
+#define P7MU_TEMP_C 4289
+#define P7MU_TEMP_C_SHIFT 16
+
+/* d = -45.27694 ~> -11591 * 2^-8 */
+#define P7MU_TEMP_D -11591
+#define P7MU_TEMP_D_SHIFT 8
+
+/* Since the value of the adc has to be cubed we downscale it a bit
+ before the multiplications */
+#define P7MU_TEMP_ADC_SHIFT 3
+
+/* We keep 8 bit of fractional precision before the final sum */
+#define P7MU_TEMP_SUM_SHIFT 8
+
+static inline int p7_get_temp_hw04(int adc_val)
+{
+ int x = adc_val >> P7MU_TEMP_ADC_SHIFT;
+ int x_2 = x * x;
+ int x_3 = x * x_2;
+
+ /* We shift x_3 a bit so that the multiplication with a doesn't overflow */
+ int a = ((x_3 >> 10) * P7MU_TEMP_A)
+ >> (P7MU_TEMP_A_SHIFT - P7MU_TEMP_ADC_SHIFT * 3
+ - P7MU_TEMP_SUM_SHIFT - 10);
+
+ int b = ((x_2 >> 7) * P7MU_TEMP_B)
+ >> (P7MU_TEMP_B_SHIFT - P7MU_TEMP_ADC_SHIFT * 2
+ - P7MU_TEMP_SUM_SHIFT - 7);
+
+ int c = (x * P7MU_TEMP_C)
+ >> (P7MU_TEMP_C_SHIFT - P7MU_TEMP_ADC_SHIFT * 1
+ - P7MU_TEMP_SUM_SHIFT);
+
+ int d = P7MU_TEMP_D >> (P7MU_TEMP_D_SHIFT - P7MU_TEMP_SUM_SHIFT);
+
+
+ return (a + b + c + d) >> P7MU_TEMP_SUM_SHIFT;
+}
+
+
+static int p7_temperature_read_raw_hw04(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ int v1, round, sum;
+ u16 data;
+ struct temp_gen_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW: /* Reading the current channel */
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ sum = 0;
+ for (round = 0; round < TEMP_NUM_ROUND; round++) {
+ ret = st->ops->read(chan->address,
+ &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v1 ADC read not valid. Got 0x%x\n", data);
+ goto out;
+ }
+ v1 = data >> chan->scan_type.shift;
+ v1 &= (1 << chan->scan_type.realbits) - 1;
+
+ sum += v1;
+ }
+
+ *val = p7_get_temp_hw04(sum >> TEMP_NUM_ROUND_SHIFT);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+
+ }
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+/********** HW08 *************/
+typedef struct ntc_characteristics_t {
+ int temp;
+ int r_ctn;
+} ntc_characteristics_t;
+
+# define RCTN_KOHM(val) (val * 1000)
+static const ntc_characteristics_t ntc[] = {
+ { -40, RCTN_KOHM(1227.2628) },
+ { -35, RCTN_KOHM(874.4491) },
+ { -30, RCTN_KOHM(630.8514) },
+ { -25, RCTN_KOHM(460.4568) },
+ { -20, RCTN_KOHM(339.7972) },
+ { -15, RCTN_KOHM(253.3626) },
+ { -10, RCTN_KOHM(190.7661) },
+ { -5, RCTN_KOHM(144.9635) },
+ { 0, RCTN_KOHM(111.0867) },
+ { 5, RCTN_KOHM(85.8417) },
+ { 10, RCTN_KOHM(66.8613) },
+ { 15, RCTN_KOHM(52.4701) },
+ { 20, RCTN_KOHM(41.4709) },
+ { 25, RCTN_KOHM(33) },
+ { 30, RCTN_KOHM(26.4303) },
+ { 35, RCTN_KOHM(21.2983) },
+ { 40, RCTN_KOHM(17.2658) },
+ { 45, RCTN_KOHM(14.0761) },
+ { 50, RCTN_KOHM(11.5377) },
+ { 55, RCTN_KOHM(9.5058) },
+ { 60, RCTN_KOHM(7.8702) },
+ { 65, RCTN_KOHM(6.5494) },
+ { 70, RCTN_KOHM(5.4751) },
+ { 75, RCTN_KOHM(4.595) },
+ { 80, RCTN_KOHM(3.8742) },
+ { 85, RCTN_KOHM(3.2815) },
+ { 90, RCTN_KOHM(2.7887) },
+ { 95, RCTN_KOHM(2.3787) },
+ { 100, RCTN_KOHM(2.0375) },
+ { 105, RCTN_KOHM(1.7513) },
+ { 110, RCTN_KOHM(1.5093) },
+ { 115, RCTN_KOHM(1.3058) },
+ { 120, RCTN_KOHM(1.1341) },
+ { 125, RCTN_KOHM(0.9872) },
+};
+
+static inline int get_index_min(int rctn)
+{
+ int i;
+ for (i = ARRAY_SIZE(ntc) - 1; i >= 0; i--)
+ if ( ntc[i].r_ctn >= rctn)
+ return i;
+ return 0;
+}
+
+#define R1 2400
+static inline int p7_get_temp_hw08(int v1, int v2)
+{
+ int r_ctn = ((R1 * 2 * v2) / v1) - R1;
+ int min, max;
+ if (!v1)
+ v1 = 1;
+
+ r_ctn = ((R1 * 2 * v2) / v1) - R1;
+
+ min = get_index_min(r_ctn);
+ if (min == ARRAY_SIZE(ntc) - 1)
+ return ntc[min].temp;
+
+ max = min + 1;
+ /* linerar interpolation with data from table */
+ return ntc[min].temp +
+ ((ntc[max].temp - ntc[min].temp) * (ntc[min].r_ctn - r_ctn)) /
+ (ntc[min].r_ctn - ntc[max].r_ctn);
+}
+
+static int p7_temperature_read_raw_hw08(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ int v1, v2;
+ u16 data;
+ struct temp_gen_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW: /* Reading the current channel */
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = st->ops->read(chan->address,
+ &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v1 ADC read not valid. Got 0x%x\n", data);
+ goto out;
+ }
+ v1 = data >> chan->scan_type.shift;
+ v1 &= (1 << chan->scan_type.realbits) - 1;
+
+ /* ref is requested, return direct value from ADC */
+ if (chan->address == chan->channel2) {
+ *val = v1;
+ ret = IIO_VAL_INT;
+ break;
+ }
+
+ ret = st->ops->read(chan->channel2,
+ &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v2 ADC read not valid. Got 0x%x\n", data);
+ goto out;
+ }
+ v2 = data >> chan->scan_type.shift;
+ v2 &= (1 << chan->scan_type.realbits) - 1;
+
+ *val = p7_get_temp_hw08(v1, v2);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+
+ }
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int p7_temperature_read_raw_sicilia(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ int v1;
+ u16 data;
+ struct temp_gen_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW: /* Reading the current channel */
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = st->ops->read(chan->address,
+ &data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "v1 ADC read not valid. Got 0x%x\n", data);
+ goto out;
+ }
+ v1 = data >> chan->scan_type.shift;
+ v1 &= (1 << chan->scan_type.realbits) - 1;
+
+ /* No ADC on the 3.3V reference, use a
+ * hardcoded value of 4096 */
+ *val = p7_get_temp_hw08(v1, 4096);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+
+ }
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t p7mu_adc_show_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static inline ssize_t p7mu_adc_set_t_alarm_high(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return 0;
+}
+
+static ssize_t p7mu_adc_show_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static inline ssize_t p7mu_adc_set_t_alarm_low(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ return 0;
+}
+
+static IIO_DEVICE_ATTR(t_alarm_high,
+ S_IRUGO | S_IWUSR,
+ p7mu_adc_show_t_alarm_high, p7mu_adc_set_t_alarm_high, 0);
+static IIO_DEVICE_ATTR(t_alarm_low,
+ S_IRUGO | S_IWUSR,
+ p7mu_adc_show_t_alarm_low, p7mu_adc_set_t_alarm_low, 0);
+
+static struct attribute *p7mu_adc_event_int_attributes[] = {
+ &iio_dev_attr_t_alarm_high.dev_attr.attr,
+ &iio_dev_attr_t_alarm_low.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group p7mu_adc_event_attribute_group = {
+ .attrs = p7mu_adc_event_int_attributes,
+ .name = "events",
+};
+
+static struct iio_info p7_temperature_info = {
+ .driver_module = THIS_MODULE,
+ .event_attrs = &p7mu_adc_event_attribute_group,
+};
+
+static void poll_temperature(unsigned long data)
+{
+ struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct temp_gen_state *st = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns();
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ mod_timer(&st->poll_timer, jiffies + (10*HZ));
+}
+
+static int p7tempgen_alloc_channels(struct iio_dev *indio_dev,
+ struct p7_temp_chan *channels_info, int channel2)
+{
+ struct temp_gen_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ struct iio_chan_spec *ch_templ;
+ unsigned int i, err;
+
+ err = p7_adc_iio_chip_get_channel_template(st->type, &ch_templ);
+ if (err)
+ return err;
+
+ channels = kcalloc(st->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->num_channels; ++i) {
+ channels[i] = *ch_templ;
+ channels[i].channel = channels_info[i].channel;
+ channels[i].channel2 = channel2;
+ channels[i].address = channels_info[i].channel;
+ channels[i].scan_index = channels_info[i].channel;
+ channels[i].extend_name = (char *)channels_info[i].name;
+
+ st->ops->config(channels_info[i].channel,
+ channels_info[i].freq);
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int __devinit temp_gen_probe(struct platform_device* pdev)
+{
+ int ret;
+ struct temp_gen_state *st;
+ struct iio_dev *indio_dev;
+ struct p7_temp_chan *channels_info;
+ int channel2;
+ struct p7_temp_chan_data *pdata
+ = dev_get_platdata(&pdev->dev);
+
+ if (!pdata)
+ return -ENODEV;
+
+ ret = p7_adc_get_iio_chip(P7MUADC_IIO_TEMP);
+
+ if (ret)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ goto put_chip;
+ }
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(&pdev->dev, indio_dev);
+ p7_adc_iio_chip_get_channel_ops(P7MUADC_IIO_TEMP,
+ &st->ops);
+ st->type = P7MUADC_IIO_TEMP;
+
+ channels_info = pdata->channels;
+ if (pdata->temp_mode == P7_TEMP_FC7100_HW08) {
+ p7_temperature_info.read_raw = p7_temperature_read_raw_hw08;
+ channel2 = channels_info[0].channel;
+ st->ops->config(channels_info[0].channel,
+ channels_info[0].freq);
+
+ st->num_channels = pdata->num_channels;
+ }
+ else if (pdata->temp_mode == P7_TEMP_FC7100_HW04) {
+ p7_temperature_info.read_raw = p7_temperature_read_raw_hw04;
+ channel2 = 0;
+ st->num_channels = pdata->num_channels;
+ }
+ else if (pdata->temp_mode == P7_TEMP_SICILIA) {
+ p7_temperature_info.read_raw = p7_temperature_read_raw_sicilia;
+ channel2 = 0;
+ st->num_channels = pdata->num_channels;
+ }
+ else {
+ goto free_device;
+ }
+
+ ret = p7tempgen_alloc_channels(indio_dev,
+ channels_info, channel2);
+ if(ret)
+ goto free_device;
+
+ /* Schedule this module to run every 10 seconds */
+ init_timer(&st->poll_timer);
+ st->poll_timer.expires = jiffies + (10*HZ);
+ st->poll_timer.function = poll_temperature;
+ st->poll_timer.data = (u_long)indio_dev;
+ add_timer(&st->poll_timer);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->num_channels = st->num_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &p7_temperature_info;
+ indio_dev->name = TEMP_DRV_NAME;
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ goto free_channels;
+
+ dev_info(&pdev->dev, "%s temperature sensor and ADC registered.\n",
+ indio_dev->name);
+
+
+ return 0;
+
+free_channels:
+ kfree(indio_dev->channels);
+free_device:
+ devm_iio_device_free(&pdev->dev, indio_dev);
+put_chip:
+ p7_adc_put_iio_chip(P7MUADC_IIO_TEMP);
+
+ return ret;
+}
+
+static int __devexit temp_gen_remove(struct platform_device* pdev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
+ struct temp_gen_state *st = iio_priv(indio_dev);
+
+ p7_adc_put_iio_chip(st->type);
+ devm_iio_device_unregister(&pdev->dev, indio_dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ del_timer(&st->poll_timer);
+ kfree(indio_dev->channels);
+ devm_iio_device_free(&pdev->dev, indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver temp_gen_driver = {
+ .driver = {
+ .name = TEMP_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = temp_gen_probe,
+ .remove = __devexit_p(temp_gen_remove),
+};
+
+module_platform_driver(temp_gen_driver);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("Temperature generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/bldc/Kconfig b/drivers/parrot/iio/bldc/Kconfig
new file mode 100644
index 00000000000000..f6c81e01d06c8f
--- /dev/null
+++ b/drivers/parrot/iio/bldc/Kconfig
@@ -0,0 +1,20 @@
+#
+# BLDC drivers
+#
+
+config IIO_PARROT_BLDC_CYPRESS
+ tristate "PARROT BLDC CYPRESS IIO driver"
+ depends on I2C && SYSFS
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the BLDC cypress driver.
+
+config IIO_PARROT_BLDC_CYPRESS_I2C
+ tristate "PARROT BLDC CYPRESS IIO i2c driver"
+ depends on I2C && SYSFS
+ select IIO_PARROT_BLDC_CYPRESS
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the BLDC cypress i2c driver.
diff --git a/drivers/parrot/iio/bldc/Makefile b/drivers/parrot/iio/bldc/Makefile
new file mode 100644
index 00000000000000..0e99331f79dfc6
--- /dev/null
+++ b/drivers/parrot/iio/bldc/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for BLDC cypress IIO driver.
+#
+ccflags-y += -I$(srctree)/drivers/parrot
+
+obj-$(CONFIG_IIO_PARROT_BLDC_CYPRESS) += bldc-cypress.o
+bldc-cypress-objs := bldc_cypress_core.o bldc_i2c.o bldc_cypress_ring.o
+
+obj-$(CONFIG_IIO_PARROT_BLDC_CYPRESS_I2C) += bldc_cypress_i2c.o
diff --git a/drivers/parrot/iio/bldc/bldc_cypress_core.c b/drivers/parrot/iio/bldc/bldc_cypress_core.c
new file mode 100644
index 00000000000000..ee082557141368
--- /dev/null
+++ b/drivers/parrot/iio/bldc/bldc_cypress_core.c
@@ -0,0 +1,434 @@
+/**
+ ************************************************
+ * @file bldc_cypress_core.c
+ * @brief BLDC cypress IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/bldc/parrot_bldc_cypress_iio.h>
+#include <linux/iio/bldc/parrot_bldc_iio.h>
+#include <iio/platform_data/bldc_cypress.h>
+
+static const struct iio_chan_spec bldc_cypress_channels[] = {
+ /* keep 4th first motors channels at first table place */
+ PARROT_BLDC_OBS_CHAN (SPEED_MOTOR1, 0, IIO_ANGL_VEL, 16, 16),
+ PARROT_BLDC_OBS_CHAN (SPEED_MOTOR2, 1, IIO_ANGL_VEL, 16, 16),
+ PARROT_BLDC_OBS_CHAN (SPEED_MOTOR3, 2, IIO_ANGL_VEL, 16, 16),
+ PARROT_BLDC_OBS_CHAN (SPEED_MOTOR4, 3, IIO_ANGL_VEL, 16, 16),
+ PARROT_BLDC_OBS_CHAN (BATT_VOLTAGE, 0, IIO_VOLTAGE, 16, 16),
+ PARROT_BLDC_OBS_CHAN_EXT_NAME(STATUS, 0, IIO_CURRENT, 8, 8),
+ PARROT_BLDC_OBS_CHAN_EXT_NAME(ERRNO, 1, IIO_CURRENT, 8, 8),
+ PARROT_BLDC_OBS_CHAN_EXT_NAME(FAULT_MOTOR, 2, IIO_CURRENT, 4, 8),
+ PARROT_BLDC_OBS_CHAN (TEMP, 0, IIO_TEMP, 8, 8),
+ IIO_CHAN_SOFT_TIMESTAMP(PARROT_BLDC_SCAN_TIMESTAMP),
+};
+
+static ssize_t bldc_cypress_store_clear_error(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_CLEAR_ERROR, 0, NULL);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_store_led(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ u8 data;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret)
+ return -EINVAL;
+
+ data &= 0x3; /* Set P7 reboot gpio bit to zero */
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_LED, 1, &data);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_store_motors_speed(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ int ret, i;
+ u8 data[10];
+ u8 crc;
+ unsigned short m[4];
+ unsigned short *pm = (__be16 *)&data[0];
+
+ ret = sscanf(buf, "%hu %hu %hu %hu",
+ &m[0],
+ &m[1],
+ &m[2],
+ &m[3]);
+
+ if (ret != 4)
+ return -EINVAL;
+
+ for (i = 0; i < 4; i++) {
+ *pm++ = cpu_to_be16(m[st->pdata.lut[i]]);
+ }
+
+ data[8] = 0; /* force enable security to 0 */
+
+ crc = PARROT_BLDC_REG_REF_SPEED;
+ for (i = 0; i < 9; i++)
+ crc ^= data[i];
+ data[9] = crc;
+
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_REF_SPEED, 10, data);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_store_reboot(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ u8 data = 4;
+ int ret;
+
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_LED, 1, &data);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_start_with_dir(struct bldc_state *st, u8 spin_dir)
+{
+ int ret, i;
+ u8 data;
+
+ /* apply lut on spin direction */
+ data = 0;
+ for (i = 0; i < 4; i++) {
+ if (spin_dir & (1 << st->pdata.lut[i]))
+ data |= 1 << i;
+ }
+
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_START_PROP, 1, &data);
+
+ return ret;
+}
+
+static ssize_t bldc_cypress_store_start(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /* use spin direction given in platform data */
+ ret = bldc_cypress_start_with_dir(st, st->pdata.spin_dir);
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_store_start_with_dir(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ u8 spin_dir;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &spin_dir);
+ if (ret)
+ return -EINVAL;
+
+ /* use spin direction given in argument */
+ ret = bldc_cypress_start_with_dir(st, spin_dir);
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_store_stop(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = st->tf->write_multiple_byte(st->dev,
+ PARROT_BLDC_REG_STOP_PROP, 0, NULL);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t bldc_cypress_show_info(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int result;
+ __u8 *ibuf;
+ unsigned short val16;
+ unsigned int val32;
+
+ ibuf = kzalloc(PARROT_BLDC_GET_INFO_LENGTH, GFP_KERNEL);
+
+ result = st->tf->read_multiple_byte(st->dev,
+ PARROT_BLDC_REG_INFO,
+ PARROT_BLDC_GET_INFO_LENGTH, ibuf);
+
+ if (result != PARROT_BLDC_GET_INFO_LENGTH) {
+ result = -EINVAL;
+ goto free_buf;
+ }
+
+ switch ((u32)this_attr->address) {
+ case ATTR_INFO_SOFT_VERSION:
+ sprintf(buf, "%d %d %c %d\n", ibuf[0],
+ ibuf[1],
+ ibuf[2],
+ ibuf[3]);
+ break;
+ case ATTR_INFO_FT_NB_FLIGHTS:
+ val16 = ibuf[4] << 8 | ibuf[5];
+ sprintf(buf, "%hu\n", val16);
+ break;
+ case ATTR_INFO_FT_PREVIOUS_TIME:
+ val16 = ibuf[6] << 8 | ibuf[7];
+ sprintf(buf, "%hu\n", val16);
+ break;
+ case ATTR_INFO_FT_TOTAL_TIME:
+ val32 = ibuf[8] << 24 | ibuf[9] << 16 | ibuf[10] << 8 | ibuf[11];
+ sprintf(buf, "%u\n", val32);
+ break;
+ case ATTR_INFO_FT_LAST_ERROR:
+ sprintf(buf, "%d\n", ibuf[12]);
+ break;
+ default:
+ result = -ENODEV;
+ }
+
+free_buf:
+ kfree(ibuf);
+
+ return result;
+}
+
+static ssize_t bldc_cypress_show_spin_dir(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bldc_state *st = iio_priv(indio_dev);
+
+ if (!st)
+ return -ENOENT;
+
+ return sprintf(buf, "%d\n", st->pdata.spin_dir);
+}
+
+static IIO_DEVICE_ATTR(start,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_start,
+ ATTR_START_MOTORS);
+
+static IIO_DEVICE_ATTR(start_with_dir,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_start_with_dir,
+ ATTR_START_MOTORS_WITH_DIR);
+
+static IIO_DEVICE_ATTR(stop,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_stop,
+ ATTR_STOP_MOTORS);
+
+static IIO_DEVICE_ATTR(led,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_led,
+ ATTR_LED);
+
+static IIO_DEVICE_ATTR(reboot,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_reboot,
+ ATTR_REBOOT);
+
+static IIO_DEVICE_ATTR(clear_error,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_clear_error,
+ ATTR_CLEAR_ERROR);
+
+static IIO_DEVICE_ATTR(motors_speed,
+ S_IWUSR,
+ NULL,
+ bldc_cypress_store_motors_speed,
+ ATTR_MOTORS_SPEED);
+
+static IIO_DEVICE_ATTR(soft_version,
+ S_IRUGO,
+ bldc_cypress_show_info,
+ NULL,
+ ATTR_INFO_SOFT_VERSION);
+
+static IIO_DEVICE_ATTR(nb_flights,
+ S_IRUGO,
+ bldc_cypress_show_info,
+ NULL,
+ ATTR_INFO_FT_NB_FLIGHTS);
+
+static IIO_DEVICE_ATTR(previous_flight_time,
+ S_IRUGO,
+ bldc_cypress_show_info,
+ NULL,
+ ATTR_INFO_FT_PREVIOUS_TIME);
+
+static IIO_DEVICE_ATTR(total_flight_time,
+ S_IRUGO,
+ bldc_cypress_show_info,
+ NULL,
+ ATTR_INFO_FT_TOTAL_TIME);
+
+static IIO_DEVICE_ATTR(last_flight_error,
+ S_IRUGO,
+ bldc_cypress_show_info,
+ NULL,
+ ATTR_INFO_FT_LAST_ERROR);
+
+static IIO_DEVICE_ATTR(spin_dir,
+ S_IRUGO,
+ bldc_cypress_show_spin_dir,
+ NULL,
+ ATTR_INFO_SPIN_DIR);
+
+static struct attribute *inv_attributes[] = {
+ &iio_dev_attr_start.dev_attr.attr,
+ &iio_dev_attr_start_with_dir.dev_attr.attr,
+ &iio_dev_attr_stop.dev_attr.attr,
+ &iio_dev_attr_led.dev_attr.attr,
+ &iio_dev_attr_reboot.dev_attr.attr,
+ &iio_dev_attr_clear_error.dev_attr.attr,
+ &iio_dev_attr_motors_speed.dev_attr.attr,
+ &iio_dev_attr_soft_version.dev_attr.attr,
+ &iio_dev_attr_nb_flights.dev_attr.attr,
+ &iio_dev_attr_previous_flight_time.dev_attr.attr,
+ &iio_dev_attr_total_flight_time.dev_attr.attr,
+ &iio_dev_attr_last_flight_error.dev_attr.attr,
+ &iio_dev_attr_spin_dir.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group inv_attribute_group = {
+ .attrs = inv_attributes
+};
+
+static int bldc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct bldc_state *st = iio_priv(indio_dev);
+
+ kfree(st->buffer);
+
+ st->buffer = kmalloc(indio_dev->scan_bytes +
+ PARROT_BLDC_OBS_DATA_LENGTH,
+ GFP_KERNEL);
+ if (st->buffer == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct iio_info bldc_cypress_info = {
+ .update_scan_mode = bldc_update_scan_mode,
+ .driver_module = THIS_MODULE,
+ .attrs = &inv_attribute_group,
+};
+
+int bldc_cypress_probe(struct iio_dev *indio_dev)
+{
+ struct bldc_state *st;
+ int result, i;
+
+ st = iio_priv(indio_dev);
+
+ mutex_init(&st->mutex);
+
+ /* copy default channels */
+ memcpy(st->channels, bldc_cypress_channels, sizeof(st->channels));
+
+ /* update first 4th channels scan_index given motor lut */
+ for (i = 0; i < 4; i++)
+ st->channels[i].scan_index = st->pdata.lut[i];
+
+ indio_dev->channels = st->channels;
+ indio_dev->num_channels = ARRAY_SIZE(st->channels);
+ indio_dev->info = &bldc_cypress_info;
+ indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+
+ result = iio_triggered_buffer_setup(indio_dev,
+ NULL,
+ bldc_cypress_read_fifo,
+ NULL);
+ if (result) {
+ dev_err(st->dev, "configure buffer fail %d\n",
+ result);
+ return result;
+ }
+
+ result = devm_iio_device_register(st->dev, indio_dev);
+ if (result) {
+ dev_err(st->dev, "IIO register fail %d\n", result);
+ goto out_remove_trigger;
+ }
+
+ dev_info(st->dev,
+ "PARROT BLDC (%s) registered\n",
+ indio_dev->name);
+
+ return 0;
+
+out_remove_trigger:
+ return result;
+}
+EXPORT_SYMBOL(bldc_cypress_probe);
+
+void bldc_cypress_remove(struct iio_dev *indio_dev)
+{
+ struct bldc_state *st = iio_priv(indio_dev);
+
+ devm_iio_device_unregister(st->dev, indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ kfree(st->buffer);
+}
+EXPORT_SYMBOL(bldc_cypress_remove);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("Parrot BLDC cypress IIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/bldc/bldc_cypress_i2c.c b/drivers/parrot/iio/bldc/bldc_cypress_i2c.c
new file mode 100644
index 00000000000000..73d24509e1b40a
--- /dev/null
+++ b/drivers/parrot/iio/bldc/bldc_cypress_i2c.c
@@ -0,0 +1,100 @@
+/**
+ ************************************************
+ * @file bldc_cypress_core.c
+ * @brief BLDC cypress IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/bldc/parrot_bldc_cypress_iio.h>
+#include <linux/iio/bldc/parrot_bldc_iio.h>
+
+#define BLDC_CYPRESS_DRV_NAME "bldc-cypress-i2c"
+
+static const struct bldc_cypress_platform_data default_pdata = {
+ .lut = {0, 1, 2, 3},
+ .spin_dir = 0x5, /* 0b0101, which is CW/CCW/CW/CCW */
+};
+
+static int bldc_cypress_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bldc_state *st;
+ const struct bldc_cypress_platform_data *pdata;
+
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENOSYS;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
+ pdata = &default_pdata;
+
+ memcpy(&st->pdata, pdata, sizeof(st->pdata));
+ dev_info(&client->dev, "lut={%d, %d, %d, %d} spin_dir=0x%X\n",
+ st->pdata.lut[0], st->pdata.lut[1], st->pdata.lut[2],
+ st->pdata.lut[3], st->pdata.spin_dir);
+
+ bldc_i2c_configure(indio_dev, client, st);
+
+ return bldc_cypress_probe(indio_dev);
+}
+
+static int bldc_cypress_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ bldc_cypress_remove(indio_dev);
+ devm_iio_device_free(&client->dev, indio_dev);
+ return 0;
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id bldc_cypress_id[] = {
+ {BLDC_CYPRESS_DRV_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bldc_cypress_id);
+
+static struct i2c_driver bldc_cypress_driver = {
+ .probe = bldc_cypress_i2c_probe,
+ .remove = bldc_cypress_i2c_remove,
+ .id_table = bldc_cypress_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = BLDC_CYPRESS_DRV_NAME,
+ },
+};
+
+module_i2c_driver(bldc_cypress_driver);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("Parrot BLDC cypress IIO i2c driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/bldc/bldc_cypress_ring.c b/drivers/parrot/iio/bldc/bldc_cypress_ring.c
new file mode 100644
index 00000000000000..2107de6545e5ea
--- /dev/null
+++ b/drivers/parrot/iio/bldc/bldc_cypress_ring.c
@@ -0,0 +1,115 @@
+/**
+ ************************************************
+ * @file bldc_cypress_ring.c
+ * @brief BLDC cypress IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+
+#include <linux/iio/bldc/parrot_bldc_cypress_iio.h>
+#include <linux/iio/bldc/parrot_bldc_iio.h>
+
+/**
+ * bldc_cypress_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t bldc_cypress_read_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ const struct iio_chan_spec *chan;
+ struct bldc_state *st = iio_priv(indio_dev);
+ int result, i, crc;
+ u8 *tx, *rx;
+
+ mutex_lock(&st->mutex);
+
+ tx = st->buffer;
+ rx = tx + indio_dev->scan_bytes;
+
+ result = st->tf->read_multiple_byte(st->dev,
+ PARROT_BLDC_REG_OBS_DATA,
+ PARROT_BLDC_OBS_DATA_LENGTH, rx);
+
+ if (result != PARROT_BLDC_OBS_DATA_LENGTH) {
+ mutex_unlock(&st->mutex);
+ goto err_read;
+ }
+
+ crc = 0;
+ for (i = 0; i < PARROT_BLDC_OBS_DATA_LENGTH - 1; ++i)
+ crc ^= *rx++;
+
+ if (crc != *rx) {
+ mutex_unlock(&st->mutex);
+ goto err_read;
+ }
+
+ rx = tx + indio_dev->scan_bytes;
+ chan = indio_dev->channels;
+ for (i = 0; i < indio_dev->num_channels; i++, chan++) {
+ if (!test_bit(chan->scan_index, indio_dev->buffer->scan_mask)) {
+ if (chan->scan_type.storagebits == 16)
+ rx++;
+ rx++;
+ continue;
+ }
+
+ if (chan->scan_type.storagebits == 16) {
+ __be16 data = *((__be16 *)rx);
+ *tx++ = data & 0xff;
+ *tx++ = (data >> 8) & 0xff;
+ rx += 2;
+ } else {
+ if (chan->scan_index == PARROT_BLDC_SCAN_OBS_DATA_FAULT_MOTOR) {
+ /* apply lut for fault motors */
+ uint8_t j, new_fmot, fmot = *rx++;
+ for (new_fmot = 0, j = 0; j < 4; j++, fmot >>= 1)
+ new_fmot |= (fmot & 0x01) << st->pdata.lut[j];
+ *tx++ = new_fmot;
+ } else {
+ *tx++ = *rx++;
+ }
+ }
+ }
+
+ mutex_unlock(&st->mutex);
+
+ result = iio_push_to_buffers_with_timestamp(indio_dev,
+ st->buffer,
+ iio_get_time_ns());
+ if (result) {
+ /* only warn once in overflow (no iio reader) */
+ if (result != -EBUSY || !st->is_overflow)
+ dev_err(st->dev, "IIO push data failed(%d)\n", result);
+
+ /* update overflow flag */
+ if (result == -EBUSY && !st->is_overflow)
+ st->is_overflow = 1;
+
+ } else if (st->is_overflow) {
+ st->is_overflow = 0;
+ }
+
+err_read:
+ /* Flush HW and SW FIFOs. */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
diff --git a/drivers/parrot/iio/bldc/bldc_i2c.c b/drivers/parrot/iio/bldc/bldc_i2c.c
new file mode 100644
index 00000000000000..413e7d1a24a07a
--- /dev/null
+++ b/drivers/parrot/iio/bldc/bldc_i2c.c
@@ -0,0 +1,55 @@
+/**
+ ************************************************
+ * @file bldc_i2c.c
+ * @brief BLDC IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/bldc/parrot_bldc_iio.h>
+
+static int bldc_i2c_read_multiple_byte(struct device *dev,
+ u8 reg_addr, int len, u8 *data)
+{
+ return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
+ reg_addr,
+ len, data);
+}
+
+static int bldc_i2c_write_multiple_byte(struct device *dev,
+ u8 reg_addr, int len, u8 *data)
+{
+ return i2c_smbus_write_i2c_block_data(to_i2c_client(dev),
+ reg_addr, len, data);
+}
+
+static const struct bldc_transfer_function bldc_tf_i2c = {
+ .write_multiple_byte = bldc_i2c_write_multiple_byte,
+ .read_multiple_byte = bldc_i2c_read_multiple_byte,
+};
+
+void bldc_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct bldc_state *st)
+{
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->name;
+
+ st->dev = &client->dev;
+ st->tf = &bldc_tf_i2c;
+}
+EXPORT_SYMBOL(bldc_i2c_configure);
+
+MODULE_AUTHOR("Karl Leplat <karl.leplat@parrot.com>");
+MODULE_DESCRIPTION("Parrot BLDC IIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/imu/Kconfig b/drivers/parrot/iio/imu/Kconfig
new file mode 100644
index 00000000000000..66229e9ec52389
--- /dev/null
+++ b/drivers/parrot/iio/imu/Kconfig
@@ -0,0 +1,10 @@
+#
+# IIO imu drivers configuration
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Inertial measurement units"
+
+source "drivers/parrot/iio/imu/inv_mpu6050/Kconfig"
+
+endmenu
diff --git a/drivers/parrot/iio/imu/Makefile b/drivers/parrot/iio/imu/Makefile
new file mode 100644
index 00000000000000..27a22483de6f17
--- /dev/null
+++ b/drivers/parrot/iio/imu/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Inertial Measurement Units
+#
+
+# When adding new entries keep the list in alphabetical order
+ccflags-y += -I$(srctree)/drivers/parrot
+
+obj-y += inv_mpu6050/
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/Kconfig b/drivers/parrot/iio/imu/inv_mpu6050/Kconfig
new file mode 100644
index 00000000000000..361006e26069f7
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/Kconfig
@@ -0,0 +1,16 @@
+#
+# inv-mpu6050 drivers for Invensense MPU devices and combos
+#
+
+config PARROT_IIO_INV_MPU6050
+ tristate "Invensense MPU6050 devices"
+ depends on I2C && SYSFS
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ This driver supports the Invensense MPU6050 devices.
+ This driver can also support MPU6500 in MPU6050 compatibility mode
+ and also in MPU6500 mode with some limitations.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050.
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/Makefile b/drivers/parrot/iio/imu/inv_mpu6050/Makefile
new file mode 100644
index 00000000000000..0e7782074ab859
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Invensense MPU6050 device.
+#
+ccflags-y += -I$(srctree)/drivers/parrot
+
+obj-$(CONFIG_PARROT_IIO_INV_MPU6050) += inv-mpu6050.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_core.c
new file mode 100644
index 00000000000000..a2f7a83cc51184
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -0,0 +1,1290 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+//#define DEBUG
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include "inv_mpu_iio.h"
+
+/*
+ * this is the gyro scale translated from dynamic range plus/minus
+ * {250, 500, 1000, 2000} to rad/s
+ */
+/* pi */
+# define M_PI 3.14159265358979323846
+static const int gyro_scale_6050[NUM_MPU6050_FSR] = {
+ [INV_MPU6050_FSR_250DPS] = (M_PI*1000000000LL)/(180*131.0), //133090,
+ [INV_MPU6050_FSR_500DPS] = (M_PI*1000000000LL)/(180*65.5), //266181,
+ [INV_MPU6050_FSR_1000DPS] = (M_PI*1000000000LL)/(180*32.8), //532362,
+ [INV_MPU6050_FSR_2000DPS] = (M_PI*1000000000LL)/(180*16.4), //1064724
+};
+
+/*
+ * this is the accel scale translated from dynamic range plus/minus
+ * {2, 4, 8, 16} to m/s^2
+ */
+/* standard acceleration of gravity (gee, free-fall on Earth) */
+#define G_TO_MS2 9.80665
+static const int accel_scale[NUM_ACCL_FSR] = {
+ [INV_MPU6050_FS_02G] = (G_TO_MS2*1000000000LL)/16384, //598,
+ [INV_MPU6050_FS_04G] = (G_TO_MS2*1000000000LL)/8192, //1196,
+ [INV_MPU6050_FS_08G] = (G_TO_MS2*1000000000LL)/4096, //2392,
+ [INV_MPU6050_FS_16G] = (G_TO_MS2*1000000000LL)/2048, //4785
+};
+
+static const unsigned long temp_scale[INV_NUM_PARTS][2] = {
+ [INV_MPU6050] = {0, INV_MPU6050_TEMP_SCALE},
+ [INV_MPU6500] = {0, INV_MPU6500_TEMP_SCALE}
+};
+
+static const unsigned long temp_offset[INV_NUM_PARTS][2] = {
+ [INV_MPU6050] = {INV_MPU6050_TEMP_OFFSET, 0},
+ [INV_MPU6500] = {INV_MPU6500_TEMP_OFFSET, 0}
+};
+
+static const struct inv_mpu6050_reg_map reg_set_6050 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .config = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .int_pin_cfg_set = 0,
+ /* INT_LEVEL=0: active high,
+ * INT_OPEN=0: push-pull
+ * LATCH_INT_EN=0: 50 us long pulse
+ * INT_RD_CLEAR=1: interrupt status bits are cleared on any read operation
+ * FSYNC_INT_LEVEL=0: FSYNC active high
+ * FSYNC_INT_EN=0: disables FSYNC interrupt
+ * I2C_BYPASS_EN=0: no access the auxiliary i2c bus
+ */
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+};
+
+static const struct inv_mpu6050_reg_map reg_set_6500 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .config = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .int_pin_cfg_set = INV_MPU6050_BIT_INT_RD_CLEAR,
+ /* INT_LEVEL=0: active high,
+ * INT_OPEN=0: push-pull
+ * LATCH_INT_EN=0: 50 us long pulse
+ * INT_RD_CLEAR=1: interrupt status bits are cleared on any read operation
+ * FSYNC_INT_LEVEL=0: FSYNC active high
+ * FSYNC_INT_EN=0: disables FSYNC interrupt
+ * I2C_BYPASS_EN=0: no access the auxiliary i2c bus
+ */
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6050 = {
+ .fsync = INV_MPU6050_FSYNC_ACCEL_ZOUT,
+ .clksel = INV_CLK_EXTERNAL_32_KHZ,
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_256HZ_NOLPF2,
+ .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .fifo_period_ns = 1000000000LL/INV_MPU6050_INIT_FIFO_RATE,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_04G,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6500 = {
+ .fsync = INV_MPU6050_FSYNC_DISABLED,
+ .clksel = INV_CLK_PLL,
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_20HZ,
+ .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .fifo_period_ns = 1000000000LL/INV_MPU6050_INIT_FIFO_RATE,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_02G,
+};
+
+static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+ [INV_MPU6050] = {
+ .num_reg = 117,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+ [INV_MPU6500] = {
+ .num_reg = 117,
+ .name = "MPU6500",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6500,
+ },
+};
+
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
+{
+ return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
+}
+
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
+{
+ u8 d, mgmt_1;
+ int result;
+
+ /* switch clock needs to be careful. Only when gyro is on, can
+ clock source be switched to gyro. Otherwise, it must be set to
+ internal clock */
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_1, 1, &mgmt_1);
+ if (result != 1)
+ return result;
+
+ mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
+ }
+
+ if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
+ /* turning off gyro requires switch to internal clock first.
+ Then turn off gyro engine */
+ mgmt_1 |= INV_CLK_INTERNAL;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_2, 1, &d);
+ if (result != 1)
+ return result;
+ if (en)
+ d &= ~mask;
+ else
+ d |= mask;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ if (result)
+ return result;
+
+ if (en) {
+ /* Wait for output stabilize */
+ msleep(INV_MPU6050_TEMP_UP_TIME);
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ /* switch internal clock to PLL */
+ mgmt_1 |= st->chip_config.clksel;
+ result = inv_mpu6050_write_reg(st,
+ st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
+{
+ int result;
+
+ if (power_on)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, 0);
+ else
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ if (result)
+ return result;
+
+ if (power_on)
+ msleep(INV_MPU6050_REG_UP_TIME);
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
+ *
+ * Initial configuration:
+ * FSR: Âħ 2000DPS
+ * DLPF: 20Hz
+ * FIFO rate: 50Hz
+ * Clock source: Gyro PLL
+ */
+static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_FILTER_256HZ_NOLPF2 << INV_MPU6050_BIT_DLPF_CFG_SHIFT;
+ d |= INV_MPU6050_FSYNC_ACCEL_ZOUT << INV_MPU6050_BIT_EXT_SYNC_SET_SHIFT;
+ result = inv_mpu6050_write_reg(st, st->reg->config, d);
+ if (result)
+ return result;
+
+ /* When DLPF config is INV_MPU6050_FILTER_256HZ_NOLPF2, the sample
+ * rate is 8000Hz */
+ d = (8 * INV_MPU6050_ONE_K_HZ) / INV_MPU6050_INIT_FIFO_RATE - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ return result;
+
+ d = (INV_MPU6050_FS_04G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+
+ memcpy(&st->chip_config, hw_info[st->chip_type].config,
+ sizeof(struct inv_mpu6050_chip_config));
+ result = inv_mpu6050_set_power_itg(st, false);
+
+ return result;
+}
+
+static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
+ int axis, int *val)
+{
+ int ind, result;
+ __be16 d;
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
+ (u8 *)&d);
+ if (result != 2)
+ return -EINVAL;
+ *val = (short)be16_to_cpup(&d);
+
+ return IIO_VAL_INT;
+}
+
+static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ int ret, result;
+
+ ret = IIO_VAL_INT;
+ result = 0;
+ mutex_lock(&indio_dev->mlock);
+ if (!st->chip_config.enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_read_raw;
+ }
+ /* when enable is on, power is already on */
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_ACCEL:
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
+ chan->channel2, val);
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_TEMP:
+ /* wait for stablization */
+ msleep(INV_MPU6050_SENSOR_UP_TIME);
+ inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error_read_raw:
+ if (!st->chip_config.enable)
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = gyro_scale_6050[st->chip_config.fsr];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = accel_scale[st->chip_config.accl_fs];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = temp_scale[st->chip_type][0];
+ *val2 = temp_scale[st->chip_type][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = temp_offset[st->chip_type][0];
+ *val2 = temp_offset[st->chip_type][1];
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val, int val2)
+{
+ int result, i;
+ u8 d;
+
+ if (val)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+ if (gyro_scale_6050[i] == val2) {
+ d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st,
+ st->reg->gyro_config, d);
+ if (result)
+ return result;
+ st->chip_config.fsr = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val, int val2)
+{
+ int result, i;
+ u8 d;
+
+ if (val)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+ if (accel_scale[i] == val2) {
+ d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st,
+ st->reg->accl_config, d);
+ if (result)
+ return result;
+ st->chip_config.accl_fs = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int inv_mpu6050_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return IIO_VAL_INT_PLUS_NANO;
+ break;
+ case IIO_ACCEL:
+ return IIO_VAL_INT_PLUS_NANO;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return -EINVAL;
+}
+
+static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ /* we should only update scale when the chip is disabled, i.e.,
+ not running */
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto error_write_raw;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_write_raw;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_write_gyro_scale(st,
+ val,
+ val2);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_write_accel_scale(st,
+ val,
+ val2);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+error_write_raw:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+/**
+ * inv_mpu6050_set_lpf() - set low pass filer based on fifo rate.
+ *
+ * Based on the Nyquist principle, the sampling rate must
+ * exceed twice of the bandwidth of the signal, or there
+ * would be alising. This function basically search for the
+ * correct low pass parameters based on the fifo rate, e.g,
+ * sampling frequency.
+ */
+static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
+{
+ const int hz[] = {188, 98, 42, 20, 10, 5};
+ const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ int i, h, result;
+ u8 data;
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->config, 1, &data);
+ if (result != 1)
+ return result;
+ data &= ~(INV_MPU6050_BIT_DLPF_CFG_MASK<<INV_MPU6050_BIT_DLPF_CFG_SHIFT);
+ h = (rate >> 1);
+ i = 0;
+ while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+ i++;
+
+ data |= (d[i] & INV_MPU6050_BIT_DLPF_CFG_MASK)<<INV_MPU6050_BIT_DLPF_CFG_SHIFT;
+
+ result = inv_mpu6050_write_reg(st, st->reg->config, data);
+ if (result)
+ return result;
+ st->chip_config.lpf = data;
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_set_fsync() - Set external Frame Synchronization (FSYNC).
+ *
+ */
+static int inv_mpu6050_set_fsync(struct inv_mpu6050_state *st)
+{
+ int result;
+ u8 data;
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->config, 1, &data);
+ if (result != 1)
+ return result;
+ data &= ~(INV_MPU6050_BIT_EXT_SYNC_SET_MASK<<INV_MPU6050_BIT_EXT_SYNC_SET_SHIFT);
+
+ data |= ( st->chip_config.fsync & INV_MPU6050_BIT_EXT_SYNC_SET_MASK)<<INV_MPU6050_BIT_EXT_SYNC_SET_SHIFT;
+
+ result = inv_mpu6050_write_reg(st, st->reg->config, data);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_fifo_rate_store() - Set fifo rate.
+ */
+static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 fifo_rate;
+ u8 d;
+ int result;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (kstrtoint(buf, 10, &fifo_rate))
+ return -EINVAL;
+ if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ return -EINVAL;
+ if (fifo_rate == st->chip_config.fifo_rate)
+ return count;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto fifo_rate_fail;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto fifo_rate_fail;
+
+ /* When DLPF config is INV_MPU6050_FILTER_256HZ_NOLPF2, the sample
+ * rate is 8000Hz */
+ d = (8 * INV_MPU6050_ONE_K_HZ) / INV_MPU6050_INIT_FIFO_RATE - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ goto fifo_rate_fail;
+ st->chip_config.fifo_rate = fifo_rate;
+
+ /* convert FIFO rate in FIFO period (nanoseconds) */
+ st->chip_config.fifo_period_ns = div_u64(1000000000LL, fifo_rate);
+ /* filter period (nanoseconds)
+ * from inv_mpu6050_platform_data.filter_rate
+ */
+ st->chip_config.filter_period_ns =
+ st->plat_data.filter_rate * st->chip_config.fifo_period_ns;
+
+ result = inv_mpu6050_set_lpf(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail;
+
+ result = inv_mpu6050_set_fsync(st);
+ if (result)
+ goto fifo_rate_fail;
+
+fifo_rate_fail:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return count;
+}
+
+/**
+ * inv_fifo_rate_show() - Get the current sampling rate.
+ */
+static ssize_t inv_fifo_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+
+ return sprintf(buf, "%d\n", st->chip_config.fifo_rate);
+}
+
+#define STR_INV_CLK_INTERNAL "internal"
+#define STR_INV_CLK_PLL_X "pll_gyro_x"
+#define STR_INV_CLK_PLL_Y "pll_gyro_y"
+#define STR_INV_CLK_PLL_Z "pll_gyro_z"
+#define STR_INV_CLK_EXTERNAL_32_KHZ "external_32k"
+#define STR_INV_CLK_EXTERNAL_19_MHZ "external_19m"
+#define STR_INV_CLK_PLL "pll" /* for MPU6500 */
+
+static const char *inv_mpu6050_clock_sel_e_str[INV_NUM_PARTS][NUM_CLK] = {
+ [INV_MPU6050] = {
+ [INV_CLK_INTERNAL] = STR_INV_CLK_INTERNAL,
+ [INV_CLK_PLL_X] = STR_INV_CLK_PLL_X,
+ [INV_CLK_PLL_Y] = STR_INV_CLK_PLL_Y,
+ [INV_CLK_PLL_Z] = STR_INV_CLK_PLL_Z,
+ [INV_CLK_EXTERNAL_32_KHZ] = STR_INV_CLK_EXTERNAL_32_KHZ,
+ [INV_CLK_EXTERNAL_19_MHZ] = STR_INV_CLK_EXTERNAL_19_MHZ
+ },
+ [INV_MPU6500] = {
+ [INV_CLK_INTERNAL] = STR_INV_CLK_INTERNAL,
+ [INV_CLK_PLL] = STR_INV_CLK_PLL
+ }
+};
+
+static const int inv_mpu6050_clock_sel_NR[INV_NUM_PARTS] = {
+ [INV_MPU6050] = NUM_CLK_MPU6050,
+ [INV_MPU6500] = NUM_CLK_MPU6500
+};
+
+static const bool inv_mpu6050_clock_sel_available[INV_NUM_PARTS][NUM_CLK] = {
+ /* For MPU6050,
+ * availability of clksel selector depending on clkin in inv_mpu6050_platform_data
+ */
+ [INV_MPU6050] = {
+ [INV_CLK_INTERNAL] = false, /* always available */
+ [INV_CLK_PLL_X] = false,
+ [INV_CLK_PLL_Y] = false,
+ [INV_CLK_PLL_Z] = false,
+ [INV_CLK_EXTERNAL_32_KHZ] = true, /* depending on clkin */
+ [INV_CLK_EXTERNAL_19_MHZ] = true
+ },
+ /* For MPU6500,
+ * clksel selector should be INV_CLK_INTERNAL or INV_CLK_PLL only.
+ */
+ [INV_MPU6500] = {
+ [INV_CLK_INTERNAL] = false, /* always available */
+ [INV_CLK_PLL_X] = false,
+ [INV_CLK_PLL_Y] = true, /* always unvailable */
+ [INV_CLK_PLL_Z] = true,
+ [INV_CLK_EXTERNAL_32_KHZ] = true,
+ [INV_CLK_EXTERNAL_19_MHZ] = true
+ }
+};
+
+static inline bool inv_mpu_clock_sel_available(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_clock_sel_e sel)
+{
+ return !inv_mpu6050_clock_sel_available[st->chip_type][sel] ||
+ st->plat_data.clkin;
+}
+
+/**
+ * inv_mpu6050_clksel_store() - Set clock source.
+ */
+static ssize_t inv_mpu6050_clksel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ enum inv_mpu6050_clock_sel_e prev_clksel, new_clksel, i;
+ int f;
+
+ prev_clksel = new_clksel = st->chip_config.clksel;
+ f=0;
+ for (i = 0; i < inv_mpu6050_clock_sel_NR[st->chip_type]; i++)
+ {
+ if (sysfs_streq(inv_mpu6050_clock_sel_e_str[st->chip_type][i], buf)) {
+ new_clksel = i;
+ f = 1;
+ break;
+ }
+ }
+
+ if (!f)
+ return -EINVAL;
+
+ if (!inv_mpu_clock_sel_available(st, new_clksel))
+ return -EINVAL;
+
+ if (prev_clksel == new_clksel)
+ return count;
+
+ if (st->chip_config.enable)
+ return -EBUSY;
+
+ mutex_lock(&indio_dev->mlock);
+ st->chip_config.clksel = new_clksel;
+ mutex_unlock(&indio_dev->mlock);
+
+ return count;
+}
+
+#define STR_INV_MPU6050_FSYNC_DISABLED "disabled"
+#define STR_INV_MPU6050_FSYNC_TEMP_OUT "temp_out"
+#define STR_INV_MPU6050_FSYNC_GYRO_XOUT "gyro_x_out"
+#define STR_INV_MPU6050_FSYNC_GYRO_YOUT "gyro_y_out"
+#define STR_INV_MPU6050_FSYNC_GYRO_ZOUT "gyro_z_out"
+#define STR_INV_MPU6050_FSYNC_ACCEL_XOUT "accel_x_out"
+#define STR_INV_MPU6050_FSYNC_ACCEL_YOUT "accel_y_out"
+#define STR_INV_MPU6050_FSYNC_ACCEL_ZOUT "accel_z_out"
+
+static const char *inv_mpu6050_ext_sync_set_e_str[INV_MPU6050_FSYNC_NR] = {
+ [INV_MPU6050_FSYNC_DISABLED] = STR_INV_MPU6050_FSYNC_DISABLED,
+ [INV_MPU6050_FSYNC_TEMP_OUT] = STR_INV_MPU6050_FSYNC_TEMP_OUT,
+ [INV_MPU6050_FSYNC_GYRO_XOUT] = STR_INV_MPU6050_FSYNC_GYRO_XOUT,
+ [INV_MPU6050_FSYNC_GYRO_YOUT] = STR_INV_MPU6050_FSYNC_GYRO_YOUT,
+ [INV_MPU6050_FSYNC_GYRO_ZOUT] = STR_INV_MPU6050_FSYNC_GYRO_ZOUT,
+ [INV_MPU6050_FSYNC_ACCEL_XOUT] = STR_INV_MPU6050_FSYNC_ACCEL_XOUT,
+ [INV_MPU6050_FSYNC_ACCEL_YOUT] = STR_INV_MPU6050_FSYNC_ACCEL_YOUT,
+ [INV_MPU6050_FSYNC_ACCEL_ZOUT] = STR_INV_MPU6050_FSYNC_ACCEL_ZOUT
+};
+
+/* For MPU6050, MPU6500,
+ * availability of fsync selector depending on fsync in inv_mpu6050_platform_data
+ */
+static const bool inv_mpu6050_fsync_available[INV_MPU6050_FSYNC_NR] = {
+ [INV_MPU6050_FSYNC_DISABLED] = false, /* always available */
+ [INV_MPU6050_FSYNC_TEMP_OUT] = true, /* depending on fsync */
+ [INV_MPU6050_FSYNC_GYRO_XOUT] = true,
+ [INV_MPU6050_FSYNC_GYRO_YOUT] = true,
+ [INV_MPU6050_FSYNC_GYRO_ZOUT] = true,
+ [INV_MPU6050_FSYNC_ACCEL_XOUT] = true,
+ [INV_MPU6050_FSYNC_ACCEL_YOUT] = true,
+ [INV_MPU6050_FSYNC_ACCEL_ZOUT] = true
+};
+
+static inline bool inv_mpu_fsync_available(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_ext_sync_set_e sel)
+{
+ return !inv_mpu6050_fsync_available[sel] || st->plat_data.fsync;
+}
+
+/**
+ * inv_mpu6050_fsync_store() - Set external Frame Synchronization (FSYNC)
+ */
+static ssize_t inv_mpu6050_fsync_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ enum inv_mpu6050_ext_sync_set_e prev_fsync, new_fsync, i;
+ int f;
+
+ prev_fsync = new_fsync = st->chip_config.fsync;
+ f=0;
+ for (i = 0; i < INV_MPU6050_FSYNC_NR; i++) {
+ if (sysfs_streq(inv_mpu6050_ext_sync_set_e_str[i], buf)) {
+ new_fsync = i;
+ f = 1;
+ break;
+ }
+ }
+
+ if (!f)
+ return -EINVAL;
+
+ if (!inv_mpu_fsync_available(st, new_fsync))
+ return -EINVAL;
+
+ if (prev_fsync == new_fsync)
+ return count;
+
+ if (st->chip_config.enable)
+ return -EBUSY;
+
+ mutex_lock(&indio_dev->mlock);
+ st->chip_config.fsync = new_fsync;
+ mutex_unlock(&indio_dev->mlock);
+
+ return count;
+}
+
+/**
+ * inv_attr_show() - calling this function will show current
+ * parameters.
+ */
+static ssize_t inv_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s8 *m;
+
+ switch (this_attr->address) {
+ /* In MPU6050, the two matrix are the same because gyro and accel
+ are integrated in one chip */
+ case ATTR_GYRO_MATRIX:
+ case ATTR_ACCL_MATRIX:
+ m = (s8 *)st->plat_data.orientation;
+
+ return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
+ break;
+ /* Get the current clock source. */
+ case ATTR_CLOCK_SOURCE:
+ return sprintf(buf, "%s\n",
+ inv_mpu6050_clock_sel_e_str[st->chip_type][st->chip_config.clksel]);
+ break;
+ case ATTR_FRAME_SYNCHRONISATION:
+ return sprintf(buf, "%s\n",
+ inv_mpu6050_ext_sync_set_e_str[st->chip_config.fsync]);
+ break;
+
+ case ATTR_CLOCK_SOURCE_AVAILABLE:
+ {
+ enum inv_mpu6050_clock_sel_e i;
+ bool av;
+ int c = 0;
+ for (i = 0; i < NUM_CLK ; i++) {
+ av = inv_mpu_clock_sel_available(st, i);
+ if (av)
+ c += sprintf(&buf[c], "%s ",
+ inv_mpu6050_clock_sel_e_str[st->chip_type][i]);
+ }
+ if (c>0) c--;
+ c += sprintf(&buf[c], "\n");
+ return c;
+ }
+ break;
+
+ case ATTR_FRAME_SYNCHRONISATION_AVAILABLE:
+ {
+ enum inv_mpu6050_ext_sync_set_e i;
+ bool av;
+ int c = 0;
+ for (i = 0; i < INV_MPU6050_FSYNC_NR ; i++) {
+ av = inv_mpu_fsync_available(st, i);
+ if (av)
+ c += sprintf(&buf[c], "%s ", inv_mpu6050_ext_sync_set_e_str[i]);
+ }
+ if (c>0) c--;
+ c += sprintf(&buf[c], "\n");
+ return c;
+ }
+ break;
+ case ATTR_GYRO_SCALE_AVAILABLE:
+ {
+ int c = 0, i;
+ for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+ c += sprintf(&buf[c], "0.%09d ", gyro_scale_6050[i]);
+ }
+ if (c>0) c--;
+ c += sprintf(&buf[c], "\n");
+ return c;
+ }
+ break;
+ case ATTR_ACCEL_SCALE_AVAILABLE:
+ {
+ int c = 0, i;
+ for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+ c += sprintf(&buf[c], "0.%09d ", accel_scale[i]);
+ }
+ if (c>0) c--;
+ c += sprintf(&buf[c], "\n");
+ return c;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * inv_mpu6050_validate_trigger() - validate_trigger callback for invensense
+ * MPU6050 device.
+ * @indio_dev: The IIO device
+ * @trig: The new trigger
+ *
+ * Returns: 0 if the 'trig' matches the trigger registered by the MPU6050
+ * device, -EINVAL otherwise.
+ */
+static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define INV_MPU6050_CHAN(_type, _channel2, _index) \
+ { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = _channel2, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0 , \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_OFFSET)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+};
+
+
+/* get/set sampling frequency */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("5 10 20 50 100 200 500 1000");
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
+ inv_fifo_rate_show,
+ inv_mpu6050_fifo_rate_store);
+
+/* get/set clock source */
+static IIO_DEVICE_ATTR(clock_source_available,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_CLOCK_SOURCE_AVAILABLE);
+static IIO_DEVICE_ATTR(clock_source,
+ S_IRUGO | S_IWUSR,
+ inv_attr_show,
+ inv_mpu6050_clksel_store,
+ ATTR_CLOCK_SOURCE);
+
+/* get/set frame synchronisation */
+static IIO_DEVICE_ATTR(frame_synchronisation_available,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_FRAME_SYNCHRONISATION_AVAILABLE);
+static IIO_DEVICE_ATTR(frame_synchronisation,
+ S_IRUGO | S_IWUSR,
+ inv_attr_show,
+ inv_mpu6050_fsync_store,
+ ATTR_FRAME_SYNCHRONISATION);
+
+/* show gyro(anglvel) and accel available scales */
+static IIO_DEVICE_ATTR(in_anglvel_scale_available,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_GYRO_SCALE_AVAILABLE);
+static IIO_DEVICE_ATTR(in_accel_scale_available,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_ACCEL_SCALE_AVAILABLE);
+
+/* show gyro(anglvel) and accel orientation matrixes */
+static IIO_DEVICE_ATTR(in_anglvel_matrix,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(in_accel_matrix,
+ S_IRUGO,
+ inv_attr_show,
+ NULL,
+ ATTR_ACCL_MATRIX);
+
+static struct attribute *inv_attributes[] = {
+ &iio_dev_attr_in_anglvel_matrix.dev_attr.attr,
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_clock_source.dev_attr.attr,
+ &iio_dev_attr_clock_source_available.dev_attr.attr,
+ &iio_dev_attr_frame_synchronisation.dev_attr.attr,
+ &iio_dev_attr_frame_synchronisation_available.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group inv_attribute_group = {
+ .attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &inv_mpu6050_read_raw,
+ .write_raw = &inv_mpu6050_write_raw,
+ .write_raw_get_fmt = &inv_mpu6050_write_raw_get_fmt,
+ .attrs = &inv_attribute_group,
+ .validate_trigger = inv_mpu6050_validate_trigger,
+};
+
+/**
+ * inv_check_and_setup_chip() - check and setup chip.
+ */
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
+ const struct i2c_device_id *id)
+{
+ int result;
+
+ st->chip_type = (enum inv_devices)(id->driver_data);
+ st->hw = &hw_info[st->chip_type];
+ st->reg = hw_info[st->chip_type].reg;
+
+ if ( st->chip_type == INV_MPU6500 )
+ st->plat_data.clkin= false;/* MPU6500 does not have CLKIN pin */
+
+ /* reset to make sure previous state are not there */
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
+ if (result)
+ return result;
+ msleep(INV_MPU6050_POWER_UP_TIME);
+ /* toggle power state. After reset, the sleep bit could be on
+ or off depending on the OTP settings. Toggling power would
+ make it in a definite state as well as making the hardware
+ state align with the software state */
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ struct iio_dev *indio_dev;
+ struct inv_mpu6050_platform_data *pdata;
+ char *name;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENOSYS;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->client = client;
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata)
+ st->plat_data = *pdata;
+ /* power is turned on inside check chip type*/
+ result = inv_check_and_setup_chip(st, id);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_init_config(indio_dev);
+ if (result) {
+ dev_err(&client->dev,
+ "Could not initialize device.\n");
+ return result;
+ }
+
+ memset(st->name, 0, sizeof(st->name));
+ /* id will be NULL when enumerated via ACPI */
+ if (id)
+ name = (char *)id->name;
+ else
+ name = (char *)dev_name(&client->dev);
+ result = snprintf(st->name,
+ sizeof(st->name),
+ "%s.%d",
+ name,
+ st->plat_data.id);
+ st->name[result] = '\0';
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->name = st->name;
+ indio_dev->info = &mpu_info;
+ indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+
+ result = iio_triggered_buffer_setup(indio_dev,
+ inv_mpu6050_irq_handler,
+ inv_mpu6050_read_fifo,
+ NULL);
+ if (result) {
+ dev_err(&st->client->dev, "configure buffer fail %d\n",
+ result);
+ return result;
+ }
+ result = inv_mpu6050_probe_trigger(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ goto out_unreg_ring;
+ }
+
+ INIT_KFIFO(st->timestamps);
+ spin_lock_init(&st->time_stamp_lock);
+ result = devm_iio_device_register(&st->client->dev, indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ goto out_remove_trigger;
+ }
+
+ dev_info(&st->client->dev,
+ "Invensense MPU6050 6-axis gyroscope/accelometer (%s)%s%s\n",
+ indio_dev->name,
+ st->plat_data.fsync?" FSYNC":"",
+ st->plat_data.clkin?" CLKIN":"");
+
+ return 0;
+
+out_remove_trigger:
+ inv_mpu6050_remove_trigger(st);
+out_unreg_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ kfree(st->rx_buffer);
+ kfree(st->rx_buffer_ts);
+ devm_iio_device_unregister(&client->dev, indio_dev);
+ inv_mpu6050_remove_trigger(st);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+
+static int inv_mpu_resume(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+}
+
+static int inv_mpu_suspend(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+}
+static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+
+#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
+#else
+#define INV_MPU6050_PMOPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {"mpu6500", INV_MPU6500},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu6050",
+ .pm = INV_MPU6050_PMOPS,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_iio.h
new file mode 100644
index 00000000000000..6e0a370e6f5109
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -0,0 +1,345 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <iio/platform_data/invensense_mpu6050.h>
+
+/**
+ * struct inv_mpu6050_reg_map - Notable registers.
+ * @sample_rate_div: Divider applied to gyro output rate.
+ * @config: Configures internal low pass filter/external frame synchronisation.
+ * @user_ctrl: Enables/resets the FIFO.
+ * @fifo_en: Determines which data will appear in FIFO.
+ * @gyro_config: gyro config register.
+ * @accl_config: accel config register
+ * @fifo_count_h: Upper byte of FIFO count.
+ * @fifo_r_w: FIFO register.
+ * @raw_gyro: Address of first gyro register.
+ * @raw_accl: Address of first accel register.
+ * @temperature: temperature register
+ * @int_pin_cfg: INT Pin / Bypass Enable Configuration
+ * @int_pin_cfg_set: Setting of INT Pin / Bypass Enable Configuration
+ * @int_enable: Interrupt enable register.
+ * @pwr_mgmt_1: Controls chip's power state and clock source.
+ * @pwr_mgmt_2: Controls power state of individual sensors.
+ */
+struct inv_mpu6050_reg_map {
+ u8 sample_rate_div;
+ u8 config;
+ u8 user_ctrl;
+ u8 fifo_en;
+ u8 gyro_config;
+ u8 accl_config;
+ u8 fifo_count_h;
+ u8 fifo_r_w;
+ u8 raw_gyro;
+ u8 raw_accl;
+ u8 temperature;
+ u8 int_pin_cfg;
+ u8 int_pin_cfg_set;
+ u8 int_enable;
+ u8 pwr_mgmt_1;
+ u8 pwr_mgmt_2;
+};
+
+/*device enum */
+enum inv_devices {
+ INV_MPU6050,
+ INV_MPU6500,
+ INV_NUM_PARTS
+};
+
+/**
+ * struct inv_mpu6050_chip_config - Cached chip configuration data.
+ * @clksel: Clock source select
+ * @fsync: external Frame Synchronization select
+ * @fsr: Full scale range.
+ * @lpf: Digital low pass filter frequency.
+ * @accl_fs: accel full scale range.
+ * @enable: master enable state.
+ * @accl_fifo_enable: enable accel data output
+ * @gyro_fifo_enable: enable gyro data output
+ * @fifo_rate: FIFO update rate.
+ * @fifo_period_ns: FIFO update period (inverse in nanoseconds of fifo_rate)
+ * @filter_period_ns: interrupt filter: platform_data.filter_rate * fifo_period_ns
+ */
+struct inv_mpu6050_chip_config {
+ unsigned int clksel:3;
+ unsigned int fsync:3;
+ unsigned int fsr:2;
+ unsigned int lpf:3;
+ unsigned int accl_fs:2;
+ unsigned int enable:1;
+ unsigned int accl_fifo_enable:1;
+ unsigned int gyro_fifo_enable:1;
+ u16 fifo_rate;
+ s64 fifo_period_ns;
+ s64 filter_period_ns;
+};
+
+/**
+ * struct inv_mpu6050_hw - Other important hardware information.
+ * @num_reg: Number of registers on device.
+ * @name: name of the chip.
+ * @reg: register map of the chip.
+ * @config: configuration of the chip.
+ */
+struct inv_mpu6050_hw {
+ u8 num_reg;
+ u8 *name;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_chip_config *config;
+};
+
+/*
+ * struct inv_mpu6050_state - Driver state variables.
+ * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ * @trig: IIO trigger.
+ * @chip_config: Cached attribute information.
+ * @reg: Map of important registers.
+ * @hw: Other hardware-specific information.
+ * @chip_type: chip type.
+ * @time_stamp_lock: spin lock to time stamp.
+ * @client: i2c client handle.
+ * @plat_data: platform data.
+ * @timestamps: kfifo queue to store time stamp.
+ * @rx_buffer buffer used to store received samples from I2C
+ * @rx_buffer_size size of rx_buffer
+ * @rx_buffer_ts buffer used to store received samples from I2C,
+ * with timestamps
+ * @rx_buffer_ts_size size of rx_buffer_ts
+ */
+struct inv_mpu6050_state {
+#define TIMESTAMP_FIFO_SIZE 16
+ struct iio_trigger *trig;
+ struct inv_mpu6050_chip_config chip_config;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_hw *hw;
+ enum inv_devices chip_type;
+ spinlock_t time_stamp_lock;
+ struct i2c_client *client;
+ struct inv_mpu6050_platform_data plat_data;
+ char name[32];
+ DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+ u8 *rx_buffer;
+ size_t rx_buffer_size;
+ u8 *rx_buffer_ts;
+ size_t rx_buffer_ts_size;
+};
+
+/*register and associated bit definition*/
+#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
+#define INV_MPU6050_REG_CONFIG 0x1A
+#define INV_MPU6050_BIT_DLPF_CFG_MASK 0x07
+#define INV_MPU6050_BIT_DLPF_CFG_SHIFT 0
+#define INV_MPU6050_BIT_EXT_SYNC_SET_MASK 0x07
+#define INV_MPU6050_BIT_EXT_SYNC_SET_SHIFT 3
+
+#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+
+#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
+
+#define INV_MPU6050_REG_INT_PIN_CFG 0x37
+#define INV_MPU6050_BIT_INT_LEVEL (1<<7)
+#define INV_MPU6050_BIT_INT_OPEN (1<<6)
+#define INV_MPU6050_BIT_LATCH_INT_EN (1<<5)
+#define INV_MPU6050_BIT_INT_RD_CLEAR (1<<4)
+#define INV_MPU6050_BIT_FSYNC_INT_LEVEL (1<<3)
+#define INV_MPU6050_BIT_FSYNC_INT_EN (1<<2)
+#define INV_MPU6050_BIT_I2C_BYPASS_EN (1<<1)
+
+
+#define INV_MPU6050_REG_INT_ENABLE 0x38
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+
+#define INV_MPU6050_REG_RAW_ACCEL 0x3B
+#define INV_MPU6050_REG_TEMPERATURE 0x41
+#define INV_MPU6050_REG_RAW_GYRO 0x43
+
+#define INV_MPU6050_REG_USER_CTRL 0x6A
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
+
+#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CYCLE 0x20
+#define INV_MPU6050_BIT_TEMP_DIS 0x08
+#define INV_MPU6050_BIT_CLK_MASK 0x07
+
+#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+
+#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
+#define INV_MPU6050_REG_FIFO_R_W 0x74
+
+#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
+#define INV_MPU6050_FIFO_COUNT_BYTE 2
+#define INV_MPU6050_FIFO_THRESHOLD 500
+#define INV_MPU6050_POWER_UP_TIME 100
+#define INV_MPU6050_TEMP_UP_TIME 100
+#define INV_MPU6050_SENSOR_UP_TIME 30
+#define INV_MPU6050_REG_UP_TIME 5
+
+#define INV_MPU6050_TEMP_SENSITIVITY 340L
+#define INV_MPU6050_TEMP_OFFSET_C 35
+#define INV_MPU6050_TEMP_OFFSET_LSB -521
+
+#define INV_MPU6050_TEMP_OFFSET ((INV_MPU6050_TEMP_OFFSET_C \
+ *INV_MPU6050_TEMP_SENSITIVITY) \
+ -INV_MPU6050_TEMP_OFFSET_LSB)
+
+/* decimal part of IIO_VAL_INT_PLUS_MICRO */
+#define INV_MPU6050_TEMP_SCALE (1000000L/INV_MPU6050_TEMP_SENSITIVITY)
+
+#define INV_MPU6500_TEMP_SENSITIVITY 333.87
+#define INV_MPU6500_TEMP_OFFSET_C 21
+#define INV_MPU6500_TEMP_OFFSET_LSB 0
+
+#define INV_MPU6500_TEMP_OFFSET ((INV_MPU6500_TEMP_OFFSET_C \
+ *INV_MPU6500_TEMP_SENSITIVITY) \
+ -INV_MPU6500_TEMP_OFFSET_LSB)
+
+/* decimal part of IIO_VAL_INT_PLUS_MICRO */
+#define INV_MPU6500_TEMP_SCALE (1000000L/INV_MPU6500_TEMP_SENSITIVITY)
+
+#define INV_MPU6050_MAX_GYRO_FS_PARAM 3
+#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
+#define INV_MPU6050_THREE_AXIS 3
+#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
+#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
+
+/* 6 + 6 round up and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+
+/* init parameters */
+#define INV_MPU6050_INIT_FIFO_RATE 1000
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
+
+/* scan element definition */
+enum inv_mpu6050_scan {
+ INV_MPU6050_SCAN_ACCL_X,
+ INV_MPU6050_SCAN_ACCL_Y,
+ INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_GYRO_X,
+ INV_MPU6050_SCAN_GYRO_Y,
+ INV_MPU6050_SCAN_GYRO_Z,
+ INV_MPU6050_SCAN_TIMESTAMP,
+};
+
+enum inv_mpu6050_filter_e {
+ INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
+ INV_MPU6050_FILTER_188HZ,
+ INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ,
+ INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ,
+ INV_MPU6050_FILTER_5HZ,
+ INV_MPU6050_FILTER_2100HZ_NOLPF,
+ NUM_MPU6050_FILTER
+};
+
+/*
+ * An external signal connected to the FSYNC pin can be sampled by configuring EXT_SYNC_SET.
+ * Signal changes to the FSYNC pin are latched so that short strobes may be captured. The latched
+ * FSYNC signal will be sampled at the Sampling Rate, as defined in register 25. After sampling, the
+ * latch will reset to the current FSYNC signal state.
+ * The sampled value will be reported in place of the least significant bit in a sensor data register
+ * determined by the value of EXT_SYNC_SET according to enum inv_mpu6050_ext_sync_set_e.
+ */
+enum inv_mpu6050_ext_sync_set_e {
+ INV_MPU6050_FSYNC_DISABLED=0,
+ INV_MPU6050_FSYNC_TEMP_OUT,
+ INV_MPU6050_FSYNC_GYRO_XOUT,
+ INV_MPU6050_FSYNC_GYRO_YOUT,
+ INV_MPU6050_FSYNC_GYRO_ZOUT,
+ INV_MPU6050_FSYNC_ACCEL_XOUT,
+ INV_MPU6050_FSYNC_ACCEL_YOUT,
+ INV_MPU6050_FSYNC_ACCEL_ZOUT,
+ INV_MPU6050_FSYNC_NR
+};
+
+/* IIO attribute address */
+enum INV_MPU6050_IIO_ATTR_ADDR {
+ ATTR_GYRO_MATRIX,
+ ATTR_ACCL_MATRIX,
+ ATTR_GYRO_SCALE_AVAILABLE,
+ ATTR_ACCEL_SCALE_AVAILABLE,
+ ATTR_CLOCK_SOURCE_AVAILABLE,
+ ATTR_CLOCK_SOURCE,
+ ATTR_FRAME_SYNCHRONISATION_AVAILABLE,
+ ATTR_FRAME_SYNCHRONISATION
+};
+
+enum INV_MPU6050_IIO_TRIGGER_ATTR_ADDR {
+ TRIGGER_ATTR_ENABLE,
+};
+
+enum inv_mpu6050_accl_fs_e {
+ INV_MPU6050_FS_02G = 0,
+ INV_MPU6050_FS_04G,
+ INV_MPU6050_FS_08G,
+ INV_MPU6050_FS_16G,
+ NUM_ACCL_FSR
+};
+
+enum inv_mpu6050_fsr_e {
+ INV_MPU6050_FSR_250DPS = 0,
+ INV_MPU6050_FSR_500DPS,
+ INV_MPU6050_FSR_1000DPS,
+ INV_MPU6050_FSR_2000DPS,
+ NUM_MPU6050_FSR
+};
+
+enum inv_mpu6050_clock_sel_e {
+ INV_CLK_INTERNAL = 0,
+ INV_CLK_PLL_X,
+ INV_CLK_PLL_Y,
+ INV_CLK_PLL_Z,
+ INV_CLK_EXTERNAL_32_KHZ,
+ INV_CLK_EXTERNAL_19_MHZ,
+ NUM_CLK,
+
+ NUM_CLK_MPU6050 = NUM_CLK,
+ NUM_CLK_MPU6500 = 2,
+ INV_CLK_RESET = 7, /* Stops the clock and keeps timing generator in reset */
+ INV_CLK_PLL = INV_CLK_PLL_X /* for MPU6500 */
+};
+
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st);
+int inv_reset_fifo(struct iio_dev *indio_dev);
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_ring.c
new file mode 100644
index 00000000000000..957175365dd4ad
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -0,0 +1,309 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include "inv_mpu_iio.h"
+
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
+int inv_reset_fifo(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ /* disable interrupt */
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result) {
+ dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ return result;
+ }
+ /* disable the sensor output to FIFO */
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ goto reset_fifo_fail;
+ /* disable fifo reading */
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* reset FIFO*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* clear timestamps fifo */
+ inv_clear_kfifo(st);
+
+ /* enable FIFO reading and I2C master interface*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable sensor output to FIFO */
+ d = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ d |= INV_MPU6050_BITS_GYRO_OUT;
+ if (st->chip_config.accl_fifo_enable)
+ d |= INV_MPU6050_BIT_ACCEL_OUT;
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* enable interrupt */
+ if (st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+ if (result)
+ return result;
+ }
+
+ return 0;
+
+reset_fifo_fail:
+ dev_err(&st->client->dev, "reset fifo failed %d\n", result);
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+
+ return result;
+}
+
+/**
+ * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ s64 timestamp;
+
+ timestamp = iio_get_time_ns();
+ /* if interrupts are filtered, compensate delay */
+ timestamp -= st->chip_config.filter_period_ns;
+ kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
+ &st->time_stamp_lock);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * inv_mpu6050_read_samples() - Transfer data from hardware FIFO to a buffer.
+ *
+ * Samples are stored in st->rx_buffer
+ */
+static int inv_mpu6050_read_samples(struct inv_mpu6050_state *st,
+ u16 fifo_count)
+{
+ u16 remaining_size;
+ u8 *dest;
+ int result;
+
+ /* Grow buffer if required */
+ if (st->rx_buffer_size < fifo_count) {
+ st->rx_buffer = krealloc(st->rx_buffer, fifo_count, GFP_KERNEL);
+ if (!st->rx_buffer) {
+ st->rx_buffer_size = 0;
+ return -ENOMEM;
+ }
+
+ st->rx_buffer_size = fifo_count;
+ }
+
+ dest = st->rx_buffer;
+ remaining_size = fifo_count;
+ while (remaining_size > 0) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_r_w,
+ remaining_size, dest);
+ if (result < 0)
+ return result;
+
+ dest += result;
+ remaining_size -= result;
+ }
+
+ return fifo_count;
+}
+
+/**
+ * inv_mpu6050_create_timestamped_buffer() - create buffer with timestamps
+ *
+ * Samples are stored in st->rx_buffer_ts
+ */
+static int inv_mpu6050_create_timestamped_buffer(struct inv_mpu6050_state *st,
+ size_t bytes_per_datum,
+ u16 samples)
+{
+ s64 timestamp = 0;
+ u8 *src;
+ u8 *dest;
+ size_t ts_padding;
+ size_t buff_size;
+ size_t i;
+ int result;
+
+ /* Compute required buffer size */
+ ts_padding = sizeof(s64) - (bytes_per_datum % sizeof(s64));
+ buff_size = (bytes_per_datum + ts_padding + sizeof(s64)) * samples;
+
+ /* Resize buffer if required */
+ if (buff_size > st->rx_buffer_ts_size) {
+ st->rx_buffer_ts = krealloc(st->rx_buffer_ts, buff_size,
+ GFP_KERNEL);
+ if (!st->rx_buffer_ts) {
+ st->rx_buffer_size = 0;
+ return -ENOMEM;
+ }
+
+ st->rx_buffer_size = buff_size;
+ }
+
+ dest = st->rx_buffer_ts;
+ src = st->rx_buffer;
+
+ for (i = 0; i < samples; i++) {
+ if (i == 0) {
+ result = kfifo_out(&st->timestamps, &timestamp, 1);
+ if (result == 0)
+ dev_err(&st->client->dev, "no timestamp\n");
+ } else {
+ /* estimate timestamp
+ * by adding fifo fifo period
+ * this is the case
+ * when interrupts are filtered at source
+ */
+ timestamp += st->chip_config.fifo_period_ns;
+ }
+
+ /* Store sample as-is, no padding required */
+ memcpy(dest, src, bytes_per_datum);
+ src += bytes_per_datum;
+ dest += bytes_per_datum;
+
+ /* Store timestamp */
+ memcpy(dest + ts_padding, &timestamp, sizeof(s64));
+ dest += ts_padding + sizeof(s64);
+ }
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ size_t bytes_per_datum;
+ size_t samples;
+ int result;
+ u16 raw_fifo_count;
+ u16 fifo_count;
+
+ mutex_lock(&indio_dev->mlock);
+ if (!(st->chip_config.accl_fifo_enable |
+ st->chip_config.gyro_fifo_enable))
+ goto end_session;
+
+ /* Calculate IMU sample size (depends of enabled channels) */
+ bytes_per_datum = 0;
+ if (st->chip_config.accl_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ if (st->chip_config.gyro_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ /* Fetch FIFO size */
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_count_h,
+ INV_MPU6050_FIFO_COUNT_BYTE,
+ (u8 *) &raw_fifo_count);
+ if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ goto end_session;
+
+ fifo_count = be16_to_cpup((__be16 *)(&raw_fifo_count));
+ if (fifo_count < bytes_per_datum)
+ goto end_session;
+
+ samples = fifo_count / bytes_per_datum;
+
+ /* fifo count can't be odd number, if it is odd, reset fifo*/
+ if (fifo_count & 1)
+ goto flush_fifo;
+
+ if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
+ goto flush_fifo;
+
+ /* Timestamp mismatch. */
+ if (kfifo_len(&st->timestamps) > samples + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
+
+ /* Read samples from I2C */
+ result = inv_mpu6050_read_samples(st, fifo_count);
+ if (result < 0)
+ goto flush_fifo;
+
+ if (indio_dev->scan_timestamp) {
+ result = inv_mpu6050_create_timestamped_buffer(
+ st, bytes_per_datum, samples);
+ if (result)
+ goto flush_fifo;
+
+ result = iio_push_to_buffers_n(indio_dev, st->rx_buffer_ts,
+ samples);
+ if (result)
+ goto flush_fifo;
+ } else {
+ result = iio_push_to_buffers_n(indio_dev, st->rx_buffer,
+ samples);
+ if (result)
+ goto flush_fifo;
+ }
+
+end_session:
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+
+flush_fifo:
+ /* Flush HW and SW FIFOs. */
+ inv_reset_fifo(indio_dev);
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_trigger.c
new file mode 100644
index 00000000000000..9225d9cd0fb3ca
--- /dev/null
+++ b/drivers/parrot/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -0,0 +1,231 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include "inv_mpu_iio.h"
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (!indio_dev->active_scan_mask) {
+ dev_err(&st->client->dev,
+ "mpu6050 should be started "
+ "before using trig-mpu6050\n");
+ st->chip_config.gyro_fifo_enable = false;
+ st->chip_config.accl_fifo_enable = false;
+ return;
+ }
+
+ st->chip_config.gyro_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_GYRO_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
+
+ st->chip_config.accl_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_ACCL_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
+}
+
+/**
+ * inv_mpu6050_get_enable() - check if chip is enabled.
+ * @indio_dev: Device driver instance.
+ * @returns: enable/disable
+ */
+static unsigned int inv_mpu6050_get_enable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ return st->chip_config.enable;
+}
+
+/**
+ * inv_mpu6050_set_enable() - enable chip functions.
+ * @indio_dev: Device driver instance.
+ * @enable: enable/disable
+ */
+static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ if (enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ inv_scan_query(indio_dev);
+ if (st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+ }
+ if (st->chip_config.accl_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ }
+ result = inv_mpu6050_write_reg(st,
+ st->reg->int_pin_cfg,
+ st->reg->int_pin_cfg_set);
+ if (result)
+ return result;
+
+ result = inv_reset_fifo(indio_dev);
+ if (result)
+ return result;
+ } else {
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ }
+ st->chip_config.enable = enable;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_data_rdy_trigger_set_state() - set data ready interrupt state
+ * @trig: Trigger instance
+ * @state: Desired trigger state
+ */
+static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ return 0;
+}
+
+static const struct iio_trigger_ops inv_mpu_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
+};
+
+/* Device trigger attributes */
+static ssize_t inv_mpu6050_trigger_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ unsigned int enabled;
+
+ enabled = inv_mpu6050_get_enable(indio_dev);
+ return sprintf(buf, "%u\n", enabled);
+}
+
+static ssize_t inv_mpu6050_trigger_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ bool requested_state;
+ int ret;
+
+ ret = strtobool(buf, &requested_state);
+ if (ret < 0)
+ return ret;
+
+ ret = inv_mpu6050_set_enable(indio_dev, requested_state);
+ return (ret < 0) ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(enable,
+ S_IRUGO | S_IWUSR,
+ inv_mpu6050_trigger_enable_show,
+ inv_mpu6050_trigger_enable_store,
+ TRIGGER_ATTR_ENABLE);
+
+static struct attribute *inv_trigger_attributes[] = {
+ &iio_dev_attr_enable.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group inv_trigger_attribute_group = {
+ .attrs = inv_trigger_attributes
+};
+
+static const struct attribute_group *inv_trigger_attribute_groups[] = {
+ &inv_trigger_attribute_group,
+ NULL
+};
+
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+ "trig-%s",
+ st->name);
+ if (!st->trig) {
+ dev_err(&st->client->dev,
+ "devm_iio_trigger_alloc(%s) failed ENOMEM\n", st->name);
+ return -ENOMEM;
+ }
+
+ st->trig->dev.groups = inv_trigger_attribute_groups;
+
+ ret = devm_request_irq(&st->client->dev,
+ st->client->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ st->name,
+ st->trig);
+ if (ret)
+ return ret;
+
+ dev_info(&st->client->dev, "requested IRQ %d\n", st->client->irq);
+
+ st->trig->dev.parent = &st->client->dev;
+ st->trig->ops = &inv_mpu_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ return ret;
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return 0;
+}
+
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
+{
+ iio_trigger_unregister(st->trig);
+}
diff --git a/drivers/parrot/iio/magnetometer/Kconfig b/drivers/parrot/iio/magnetometer/Kconfig
new file mode 100644
index 00000000000000..aab3ad4cee382a
--- /dev/null
+++ b/drivers/parrot/iio/magnetometer/Kconfig
@@ -0,0 +1,27 @@
+#
+# Magnetometer sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Magnetometers"
+
+config PARROT_IIO_AK8975
+ tristate "Asahi Kasei AK8975 3-Axis Magnetometer"
+ depends on I2C && SYSFS && GPIOLIB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Asahi Kasei AK8975 3-Axis
+ Magnetometer. This driver can also support AK8963, if i2c
+ device name is identified as ak8963.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ak8975.
+
+
+config PARROT_IIO_AK8975_TIMESTAMPS
+ bool "AK8975: allow display of timestamps"
+ depends on PARROT_IIO_AK8975
+ default n
+
+endmenu
diff --git a/drivers/parrot/iio/magnetometer/Makefile b/drivers/parrot/iio/magnetometer/Makefile
new file mode 100644
index 00000000000000..89ad3eaf0a9b3a
--- /dev/null
+++ b/drivers/parrot/iio/magnetometer/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for industrial I/O Magnetometer sensors for PARROT
+#
+
+ccflags-y += -I$(srctree)/drivers/parrot
+#ccflags-y += -I$(srctree)/drivers/staging
+
+obj-$(CONFIG_PARROT_IIO_AK8975) += ak8975.o
diff --git a/drivers/parrot/iio/magnetometer/ak8975.c b/drivers/parrot/iio/magnetometer/ak8975.c
new file mode 100644
index 00000000000000..706bb35463e226
--- /dev/null
+++ b/drivers/parrot/iio/magnetometer/ak8975.c
@@ -0,0 +1,874 @@
+/*
+ * A sensor driver for the magnetometer AK8975/AK8963.
+ *
+ * Magnetic compass sensor driver for monitoring magnetic flux information.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+//#define DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <iio/platform_data/ak8975.h>
+#include "ak8975_regs.h"
+
+/* module parameters */
+static unsigned mode = 1;
+module_param(mode, int, S_IRUGO);
+
+static unsigned precision = 16;
+module_param(precision, int, S_IRUGO);
+
+enum ak8975_timestamps_list {
+ TS_trigger,
+ TS_request,
+ TS_data_ready,
+ TS_push,
+ TS_NR
+};
+
+/*
+ * Per-instance context data for the device.
+ */
+struct ak8975_data {
+ struct i2c_client *client;
+ struct ak8975_platform_data *pdata;
+ struct attribute_group attrs;
+ struct mutex lock;
+ u8 asa[3];
+ long long raw_to_gauss[3];
+ int clamp[2];
+ bool mode;
+ u8 reg_cache[AK8975_MAX_REGS];
+ int drdy_gpio;/* DRDY pin */
+ int drdy_irq; /* DRDY interrupt */
+ int trg_gpio; /* TRG pin on AK8963 */
+ wait_queue_head_t data_ready_queue;
+ unsigned long flags;
+ char name[32];
+ enum asahi_compass_chipset chipset;
+ s64 timestamps[TS_NR];
+};
+
+static const int ak8975_index_to_reg[] = {
+ AK8975_REG_HXL, AK8975_REG_HYL, AK8975_REG_HZL,
+};
+
+/*
+ * Helper function to write to the I2C device's registers.
+ */
+static int ak8975_write_data(struct i2c_client *client,
+ u8 reg, u8 val, u8 mask, u8 shift)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ u8 regval;
+ int ret;
+
+ regval = (data->reg_cache[reg] & ~mask) | (val << shift);
+ ret = i2c_smbus_write_byte_data(client, reg, regval);
+ if (ret < 0) {
+ dev_err(&client->dev, "Write to device fails status %x\n", ret);
+ return ret;
+ }
+ data->reg_cache[reg] = regval;
+
+ return 0;
+}
+
+/*
+ * Handle data ready irq
+ */
+static irqreturn_t ak8975_irq_handler(int irq, void *data)
+{
+ struct ak8975_data *ak8975 = data;
+
+ set_bit(0, &ak8975->flags);
+ wake_up(&ak8975->data_ready_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Install data ready interrupt handler
+ */
+static int ak8975_setup_irq(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ int rc;
+ int irq;
+
+ if (client->irq >= 0)
+ irq = client->irq;
+ else
+ irq = gpio_to_irq(data->drdy_gpio);
+
+ rc = devm_request_irq(&client->dev, irq, ak8975_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ data->name, data);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "irq %d request failed: %d\n",
+ irq, rc);
+ return rc;
+ }
+ dev_info(&client->dev, "requested IRQ %d\n", irq);
+
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
+ data->drdy_irq = irq;
+
+ return rc;
+}
+
+/*
+ * Check device id
+ */
+static int ak8975_check_id(struct i2c_client *client)
+{
+ u8 device_id;
+ int ret;
+ /* Confirm that the device we're talking to is really an AK8975. */
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_WIA);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error reading WIA\n");
+ return ret;
+ }
+ device_id = ret;
+ if (device_id != AK8975_DEVICE_ID) {
+ dev_err(&client->dev, "Device ak8975 not found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+/*
+ * Perform some start-of-day setup, including reading the asa calibration
+ * values and caching them.
+ */
+static int ak8975_setup(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ bool output_setting;
+ int ret;
+
+ if ((ret=ak8975_check_id(client))<0)
+ return ret;
+
+ /* Write the fused rom access mode. */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_FUSE_ROM,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting fuse access mode\n");
+ return ret;
+ }
+
+ /* Get asa data and store in the device data. */
+ ret = i2c_smbus_read_i2c_block_data(client, AK8975_REG_ASAX,
+ 3, data->asa);
+ if (ret < 0) {
+ dev_err(&client->dev, "Not able to read ASA data\n");
+ return ret;
+ }
+
+ /* After reading fuse ROM data set power-down mode */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_POWER_DOWN,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+
+ /* We may not have a GPIO based IRQ to scan, that is fine, we will
+ * poll if so */
+ if (gpio_is_valid(data->drdy_gpio)) {
+ ret = gpio_request_one(data->drdy_gpio, GPIOF_IN, "ak8975_drdy");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed to request DRDY GPIO %d, error %d\n",
+ data->drdy_gpio, ret);
+ return ret;
+ }
+ dev_info(&client->dev, "requested DRDY GPIO %d\n", data->drdy_gpio);
+ }
+
+ if (data->drdy_gpio >= 0 || client->irq >= 0) {
+ ret = ak8975_setup_irq(data);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Error setting data ready interrupt\n");
+ return ret;
+ }
+ }
+
+ if (data->pdata) {
+ if (data->chipset == AK8963) {
+ if (gpio_is_valid(data->pdata->trg_gpio)) {
+ int err = gpio_request_one(data->pdata->trg_gpio, GPIOF_IN, "ak8963_trg");
+ if (err < 0) {
+ dev_err(&client->dev,
+ "failed to request TRG GPIO %d, error %d\n",
+ data->pdata->trg_gpio, err);
+ }
+ else {
+ data->trg_gpio = data->pdata->trg_gpio;
+ dev_info(&client->dev,
+ "requested TRG GPIO %d\n",
+ data->trg_gpio);
+ }
+ }
+ }
+ }
+/*
+ * Precalculate scale factor (in Gauss units) for each axis and
+ * store in the device data.
+ *
+ * This scale factor is axis-dependent, and is derived from 3 calibration
+ * factors ASA(x), ASA(y), and ASA(z).
+ *
+ * These ASA values are read from the sensor device at start of day, and
+ * cached in the device context struct.
+ *
+ * Adjusting the flux value with the sensitivity adjustment value should be
+ * done via the following formula:
+ *
+ * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 )
+ *
+ * where H is the raw value, ASA is the sensitivity adjustment, and Hadj
+ * is the resultant adjusted value.
+ *
+ * We reduce the formula to:
+ *
+ * Hadj = H * (ASA + 128) / 256
+ *
+ * Chip | precision | H range | Flux range (uT) | Scale
+ * AK8975 | 13 | -4096 to 4095 | -1229 to 1229 | 1229/4095 0,300122100122 |
+ * AK8963 | 14 | -8190 to 8190 | -4912 to 4912 | 4912/8190 0,599755799756 |
+ * AK8963 | 16 | -32760 to 32760 | -4912 to 4912 | 4912/32760 0,149938949939 |
+ *
+ *
+ * To go from the raw value to uT is:
+ *
+ * HuT = H * scale.
+ *
+ * Since 1uT = 0.01 gauss, and to have better precision with mG,
+ * our final scale factor becomes:
+ * *10000: 1 T = 10000 G
+ * *1000: for mG
+ * *1000: to 10e-9 precision
+ * Hadj = H * ((ASA + 128) / 256) * scale * 10000*1000
+ *
+ * Since ASA doesn't change, we cache the resultant scale factor into the
+ * device context in ak8975_setup().
+ */
+#define RAW_TO_mGAUSS_8975(asa) ((((asa) + 128) * (1229*10000000000LL/4095)) / 256)
+#define RAW_TO_mGAUSS_8963_14BITS(asa) ((((asa) + 128) * (4912*10000000000LL/8190)) / 256)
+#define RAW_TO_mGAUSS_8963_16BITS(asa) ((((asa) + 128) * (4912*10000000000LL/32760)) / 256)
+ if (data->chipset == AK8963) {
+ if ( precision == 16 ) {
+ output_setting = 1;
+ data->raw_to_gauss[0] = RAW_TO_mGAUSS_8963_16BITS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_mGAUSS_8963_16BITS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_mGAUSS_8963_16BITS(data->asa[2]);
+ data->clamp[0] = -32760;
+ data->clamp[1] = +32760;
+ precision = 16;
+ dev_info(&client->dev,
+ "AK8963 16 bits precision\n");
+ }
+ else {
+ output_setting = 0;
+ data->raw_to_gauss[0] = RAW_TO_mGAUSS_8963_14BITS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_mGAUSS_8963_14BITS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_mGAUSS_8963_14BITS(data->asa[2]);
+ data->clamp[0] = -8190;
+ data->clamp[1] = +8190;
+ precision = 14;
+ dev_info(&client->dev,
+ "AK8963 14 bits precision\n");
+ }
+ /* Set up precision on ak8963. */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ output_setting,
+ AK8963_REG_CNTL_BIT_MASK,
+ AK8963_REG_CNTL_BIT_SHIFT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting AK8963 precision mode\n");
+ return ret;
+ }
+ } else {
+ data->raw_to_gauss[0] = RAW_TO_mGAUSS_8975(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_mGAUSS_8975(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_mGAUSS_8975(data->asa[2]);
+ data->clamp[0] = -4096;
+ data->clamp[1] = +4095;
+ precision = 13;
+ dev_info(&client->dev,
+ "AK8975 13 bits precision\n");
+ }
+ dev_info(&client->dev,
+ "Scale X %Ld, Y %Ld, Z %Ld\n", data->raw_to_gauss[0], data->raw_to_gauss[1], data->raw_to_gauss[2]);
+
+ return 0;
+}
+
+/*
+ * Shows the device's mode. 0 = off, 1 = on.
+ */
+static ssize_t show_mode(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->mode);
+}
+
+/*
+ * Sets the device's mode. 0 = off, 1 = on. The device's mode must be on
+ * for the magn raw attributes to be available.
+ */
+static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ bool value;
+ int ret;
+
+ /* Convert mode string and do some basic sanity checking on it.
+ only 0 or 1 are valid. */
+ ret = strtobool(buf, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->lock);
+
+ /* Write the mode to the device. */
+ if (data->mode != value) {
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ (u8)value,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting mode\n");
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ data->mode = value;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * Shows the device's orientation matrix.
+ */
+static ssize_t show_in_magn_matrix(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+#define M data->pdata->orientation
+
+ return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+ M[0][0], M[0][1], M[0][2],
+ M[1][0], M[1][1], M[1][2],
+ M[2][0], M[2][1], M[2][2]);
+#undef M
+}
+
+#ifdef PARROT_IIO_AK8975_TIMESTAMPS
+/*
+ * Shows the device's timestamps.
+ */
+static ssize_t show_timestamps(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int i, c=0;
+
+ for (i=TS_trigger; i< TS_NR; i++) {
+ c+=sprintf(&buf[c],"%Ld ", data->timestamps[i]);
+ }
+ return c;
+}
+#endif /* PARROT_IIO_AK8975_TIMESTAMPS */
+
+static int wait_conversion_complete_gpio(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT;
+ int ret;
+
+ /* Wait for the conversion to complete. */
+ dev_dbg(&client->dev, "%s on GPIO %d\n", __func__, data->drdy_gpio);
+ while (timeout_ms) {
+ msleep(AK8975_CONVERSION_DONE_POLL_TIME);
+ if (gpio_get_value(data->drdy_gpio))
+ break;
+ timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
+ }
+ if (!timeout_ms) {
+ dev_err(&client->dev, "Conversion timeout happened\n");
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error in reading ST1\n");
+
+ return ret;
+}
+
+static int wait_conversion_complete_polled(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 read_status;
+ u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT;
+ int ret;
+
+ /* Wait for the conversion to complete. */
+ dev_dbg(&client->dev, "%s\n", __func__);
+ while (timeout_ms) {
+ msleep(AK8975_CONVERSION_DONE_POLL_TIME);
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in reading ST1\n");
+ return ret;
+ }
+ read_status = ret;
+ if (read_status & AK8975_REG_ST1_DRDY_MASK)
+ break;
+ timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME;
+ }
+ if (!timeout_ms) {
+ dev_err(&client->dev, "Conversion timeout happened\n");
+ return -EINVAL;
+ }
+ return read_status;
+}
+
+/* Returns 0 if the end of conversion interrupt occured or -ETIME otherwise */
+static int wait_conversion_complete_interrupt(struct ak8975_data *data)
+{
+ int ret;
+
+ dev_dbg(&data->client->dev, "%s on IRQ %d\n", __func__, data->drdy_irq);
+
+ ret = wait_event_timeout(data->data_ready_queue,
+ test_bit(0, &data->flags),
+ AK8975_DATA_READY_TIMEOUT);
+ clear_bit(0, &data->flags);
+
+ return ret > 0 ? 0 : -ETIME;
+}
+
+/* trigger single measurement */
+static int ak8975_request_single(struct ak8975_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+
+ data->timestamps[TS_request] = iio_get_time_ns();
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ /* Set up the device for taking a sample. */
+ ret = ak8975_write_data(client,
+ AK8975_REG_CNTL,
+ AK8975_REG_CNTL_MODE_ONCE,
+ AK8975_REG_CNTL_MODE_MASK,
+ AK8975_REG_CNTL_MODE_SHIFT);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: Error in setting operating mode\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(&client->dev, "%s: Single measurement mode\n", __func__);
+
+
+ /* Wait for the conversion to complete. */
+ if (data->drdy_irq >=0 ) {
+ ret = wait_conversion_complete_interrupt(data);
+ if (ret == 0) {
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST1);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: Error in reading ST1\n", __func__);
+ return ret;
+ }
+ }
+ }
+ else if (gpio_is_valid(data->drdy_gpio))
+ ret = wait_conversion_complete_gpio(data);
+ else
+ ret = wait_conversion_complete_polled(data);
+ if (ret < 0)
+ return ret;
+ data->timestamps[TS_data_ready] = iio_get_time_ns();
+
+ dev_dbg(&client->dev, "%s: ST1: %02X\n", __func__, ret);
+
+ if (ret & AK8975_REG_ST1_DRDY_MASK) {
+ ret = i2c_smbus_read_byte_data(client, AK8975_REG_ST2);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: Error in reading ST2\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(&client->dev, "%s: ST2: %02X\n", __func__, ret);
+
+ if (ret & (AK8975_REG_ST2_DERR_MASK |
+ AK8975_REG_ST2_HOFL_MASK)) {
+ dev_err(&client->dev, "%s: ST2 status error 0x%x\n", __func__, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Emits the raw flux value for the x, y, or z axis.
+ */
+static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ data->timestamps[TS_trigger] = iio_get_time_ns();
+ mutex_lock(&data->lock);
+
+ if (data->mode == 0) {
+ dev_err(&client->dev, "Operating mode is in power down mode\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ ret = ak8975_request_single(data);
+ if (ret < 0)
+ goto exit;
+
+ /* Read the flux value from the appropriate register
+ * (the register is specified in the iio device attributes). */
+ ret = i2c_smbus_read_word_data(client, ak8975_index_to_reg[index]);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: Read axis data fails\n", __func__);
+ goto exit;
+ }
+
+ mutex_unlock(&data->lock);
+
+ /* Clamp to valid range. */
+ *val = clamp_t(s16, ret, data->clamp[0], data->clamp[1]);
+ data->timestamps[TS_push] = iio_get_time_ns();
+ return IIO_VAL_INT;
+
+exit:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ak8975_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val2 = 0;
+ return ak8975_read_axis(indio_dev, chan->address, val);
+ case IIO_CHAN_INFO_SCALE:
+ tmp = data->raw_to_gauss[chan->address];
+ *val = tmp / 1000000000L;
+ *val2 = tmp % 1000000000L;
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ return -EINVAL;
+}
+
+static int ak8975_read(struct ak8975_data *data, u16 buf[3])
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = ak8975_request_single(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ AK8975_REG_HXL, 3 * sizeof(u16), (u8 *) buf);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t ak8975_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ak8975_data *data = iio_priv(indio_dev);
+ u8 buffer[16]; /* 3 16-bit channels + padding + ts */
+ int ret;
+
+ data->timestamps[TS_trigger] = iio_get_time_ns();
+
+ ret = ak8975_read(data, (u16 *) buffer);
+ if (ret < 0)
+ goto done;
+
+ data->timestamps[TS_push] = iio_get_time_ns();
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ data->timestamps[TS_push]);
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+enum ak8975_channel_index {
+ CHAN_MAGN_X=0,
+ CHAN_MAGN_Y,
+ CHAN_MAGN_Z,
+ CHAN_MAGN_TIMESTAMP,
+ CHAN_MAGN_NR
+};
+
+#define AK8975_CHANNEL(axis) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = CHAN_MAGN_##axis, \
+ .scan_index = CHAN_MAGN_##axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .storagebits = 16, \
+ .realbits = 16, \
+ .shift = 0, \
+ .endianness = IIO_LE, \
+ } \
+ }
+
+static const struct iio_chan_spec ak8975_channels[] = {
+ AK8975_CHANNEL(X),
+ AK8975_CHANNEL(Y),
+ AK8975_CHANNEL(Z),
+ /*
+ * Convenience macro for timestamps.
+ * CHAN_MAGN_TIMESTAMP is the index in the buffer.
+ */
+ IIO_CHAN_SOFT_TIMESTAMP(CHAN_MAGN_TIMESTAMP)
+};
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode, 0);
+static IIO_DEVICE_ATTR(in_magn_matrix, S_IRUGO, show_in_magn_matrix, NULL, 1);
+
+#ifdef PARROT_IIO_AK8975_TIMESTAMPS
+static IIO_DEVICE_ATTR(timestamps, S_IRUGO, show_timestamps, NULL, 2);
+#endif /* PARROT_IIO_AK8975_TIMESTAMPS */
+
+static struct attribute *ak8975_attr[] = {
+ &iio_dev_attr_mode.dev_attr.attr,
+ &iio_dev_attr_in_magn_matrix.dev_attr.attr,
+#ifdef PARROT_IIO_AK8975_TIMESTAMPS
+ &iio_dev_attr_timestamps.dev_attr.attr,
+#endif /* PARROT_IIO_AK8975_TIMESTAMPS */
+ NULL
+};
+
+static struct attribute_group ak8975_attr_group = {
+ .attrs = ak8975_attr,
+};
+
+static const struct iio_info ak8975_info = {
+ .attrs = &ak8975_attr_group,
+ .read_raw = &ak8975_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long ak8975_scan_masks[] = {0x7, 0}; /* 3 channels X Y Z */
+
+static int ak8975_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ak8975_data *data = NULL;
+ struct ak8975_platform_data *pdata = NULL;
+ struct iio_dev *indio_dev = NULL;
+ int drdy_gpio = -1;
+ int err;
+ const char *name = NULL;
+
+ if ((err=ak8975_check_id(client))<0)
+ return err;
+
+ /* Grab and set up the supplied GPIO for DRDY. */
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata)
+ drdy_gpio = pdata->drdy_gpio;
+
+ if (drdy_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* Register with IIO */
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->pdata = pdata;
+ data->drdy_gpio = drdy_gpio;
+ data->drdy_irq = -1;
+ data->trg_gpio = -1;
+ data->mode = mode;
+
+ if (id) {
+ data->chipset = (enum asahi_compass_chipset)(id->driver_data);
+ name = id->name;
+ } else {
+ return -ENOSYS;
+ }
+ dev_info(&client->dev, "Asahi Kasei Compass chip %s\n", name);
+
+ snprintf(data->name,
+ sizeof(data->name),
+ "%s_%s",
+ name,
+ dev_name(&client->dev));
+
+ /* Perform some basic start-of-day setup of the device. */
+ err = ak8975_setup(client);
+ if (err < 0) {
+ dev_err(&client->dev, "AK8975 initialization fails\n");
+ goto exit_gpio;
+ }
+
+ mutex_init(&data->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ak8975_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
+ indio_dev->info = &ak8975_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = ak8975_scan_masks;
+ indio_dev->name = name;
+
+ err = iio_triggered_buffer_setup(indio_dev, NULL,
+ ak8975_trigger_handler, NULL);
+ if (err < 0)
+ goto exit_gpio;
+
+ err = devm_iio_device_register(&client->dev, indio_dev);
+ if (err < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+exit_gpio:
+ if (data->drdy_irq >= 0)
+ devm_free_irq(&client->dev, data->drdy_irq, data);
+ if (gpio_is_valid(data->drdy_gpio))
+ gpio_free(data->drdy_gpio);
+ if (gpio_is_valid(data->trg_gpio))
+ gpio_free(data->trg_gpio);
+ return err;
+}
+
+static int ak8975_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+
+ devm_iio_device_unregister(&client->dev, indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (data->drdy_irq >= 0)
+ devm_free_irq(&client->dev, data->drdy_irq, data);
+ if (gpio_is_valid(data->drdy_gpio))
+ gpio_free(data->drdy_gpio);
+ if (gpio_is_valid(data->trg_gpio))
+ gpio_free(data->trg_gpio);
+
+ return 0;
+}
+
+static const struct i2c_device_id ak8975_id[] = {
+ {IIO_MAGNETOMETER_AK8975, AK8975},
+ {IIO_MAGNETOMETER_AK8963, AK8963},
+ {"AK8963", AK8963},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ak8975_id);
+
+static struct i2c_driver ak8975_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = IIO_MAGNETOMETER_AK8975,
+ },
+ .probe = ak8975_probe,
+ .remove = __devexit_p(ak8975_remove),
+ .id_table = ak8975_id,
+};
+module_i2c_driver(ak8975_driver);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("AK8975/AK8963 magnetometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/magnetometer/ak8975_regs.h b/drivers/parrot/iio/magnetometer/ak8975_regs.h
new file mode 100644
index 00000000000000..2499803743c397
--- /dev/null
+++ b/drivers/parrot/iio/magnetometer/ak8975_regs.h
@@ -0,0 +1,130 @@
+#ifndef _AK8975_REGS_H_
+#define _AK8975_REGS_H_
+/*
+ * A sensor driver for the magnetometer AK8975/AK8963.
+ *
+ * Magnetic compass sensor driver for monitoring magnetic flux information.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+/*
+ * Register definitions, as well as various shifts and masks to get at the
+ * individual fields of the registers.
+ */
+/* Register WIA: Device ID */
+#define AK8975_REG_WIA 0x00
+#define AK8975_DEVICE_ID 0x48
+
+/* Register INFO: Information */
+#define AK8975_REG_INFO 0x01
+
+/* Register ST1: Status 1 */
+#define AK8975_REG_ST1 0x02
+
+/* DRDY: Data Ready for AK8975/AK8963 */
+#define AK8975_REG_ST1_DRDY_SHIFT 0
+#define AK8975_REG_ST1_DRDY_MASK (1 << AK8975_REG_ST1_DRDY_SHIFT)
+
+/* DRDY: Data Ready for AK8963 */
+#define AK8963_REG_ST1_DOR_SHIFT 1
+#define AK8963_REG_ST1_DOR_MASK (1 << AK8975_REG_ST1_DOR_SHIFT)
+
+/* Register HXL to HZH: Measurement Data */
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+
+/* Register ST2: Status 2 */
+#define AK8975_REG_ST2 0x09
+
+/* DERR: Data Error for AK8975 */
+#define AK8975_REG_ST2_DERR_SHIFT 2
+#define AK8975_REG_ST2_DERR_MASK (1 << AK8975_REG_ST2_DERR_SHIFT)
+
+/* HOFL: Magnetic sensor overflow for AK8975/AK8963 */
+#define AK8975_REG_ST2_HOFL_SHIFT 3
+#define AK8975_REG_ST2_HOFL_MASK (1 << AK8975_REG_ST2_HOFL_SHIFT)
+
+/* BITM: Output bit setting (mirror) for AK8963 */
+#define AK8963_REG_ST2_BITM_SHIFT 4
+#define AK8963_REG_ST2_BITM_MASK (1 << AK8975_REG_ST2_BITM_SHIFT)
+
+/* Register CNTL: Control for AK8975 */
+/* Register CNTL1: Control 1 for AK8963 */
+#define AK8975_REG_CNTL 0x0A
+/* MODE[3:0]: Operation mode setting for AK8975/AK8963 */
+#define AK8975_REG_CNTL_MODE_SHIFT 0
+#define AK8975_REG_CNTL_MODE_MASK (0xF << AK8975_REG_CNTL_MODE_SHIFT)
+#define AK8975_REG_CNTL_MODE_POWER_DOWN 0
+#define AK8975_REG_CNTL_MODE_ONCE 1
+#define AK8975_REG_CNTL_MODE_SELF_TEST 8
+#define AK8975_REG_CNTL_MODE_FUSE_ROM 0xF
+/* MODE[3:0]: Operation mode setting for AK8963 */
+#define AK8963_REG_CNTL_MODE_CONTINUOUS_1 2
+#define AK8963_REG_CNTL_MODE_CONTINUOUS_2 6
+#define AK8963_REG_CNTL_MODE_EXTERNAL_TRIGGER 4
+
+/* BIT: Output bit setting for AK8963 */
+#define AK8963_REG_CNTL_BIT_SHIFT 4
+#define AK8963_REG_CNTL_BIT_MASK (1 << AK8963_REG_CNTL_BIT_SHIFT)
+#define AK8963_REG_CNTL_BIT_14BITS 0
+#define AK8963_REG_CNTL_BIT_16BITS 1
+
+/* Register CNTL2: Control 2 for AK8963 */
+#define AK8963_REG_CNTL2 0x0B
+/* SRST: Soft reset */
+#define AK8963_REG_CNTL2_SRST_SHIFT 0
+#define AK8963_REG_CNTL2_SRST_MASK (0x1 << AK8963_REG_CNTL2_SRST_SHIFT)
+#define AK8963_REG_CNTL2_SRST_NORMAL 0
+#define AK8963_REG_CNTL2_SRST_RESET 1
+
+/* Register RSV: Reserved for AK8975 */
+#define AK8975_REG_RSVC 0x0B
+
+/* Register ASTC: Self Test Control for AK8975/AK8963 */
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_ASTC_SELF_SHIFT 0
+#define AK8975_REG_ASTC_SELF_MASK (0x1 << AK8975_REG_ASTC_SELF_SHIFT)
+#define AK8975_REG_ASTC_SELF_NORMAL 0
+#define AK8975_REG_ASTC_SELF_GENERATE 1
+
+/* Registers TS1, TS2: Test 1, 2 for AK8975/AK8963 */
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+
+/* Register I2CDIS: I2C Disable for AK8975/AK8963 */
+#define AK8975_REG_I2CDIS 0x0F
+
+/* Registers ASAX, ASAY, ASAZ: Sensitivity Adjustment values for AK8975/AK8963 */
+#define AK8975_REG_ASAX 0x10
+#define AK8975_REG_ASAY 0x11
+#define AK8975_REG_ASAZ 0x12
+
+#define AK8975_MAX_REGS AK8975_REG_ASAZ
+
+/*
+ * Miscellaneous values.
+ */
+#define AK8975_MAX_CONVERSION_TIMEOUT 500
+#define AK8975_CONVERSION_DONE_POLL_TIME 10
+#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+
+#endif /* _AK8975_REGS_H_ */
+
diff --git a/drivers/parrot/iio/platform_data/ak8975.h b/drivers/parrot/iio/platform_data/ak8975.h
new file mode 100644
index 00000000000000..e0e188cf4d4b19
--- /dev/null
+++ b/drivers/parrot/iio/platform_data/ak8975.h
@@ -0,0 +1,54 @@
+#ifndef _AK8975_H_
+#define _AK8975_H_
+/**
+ * @file ak8975h
+ * Parrot IIO driver for Magnetometer AK8975/AK8963
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2015-02-05
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/**
+ * Magnetometer AK8975/AK8963 Device Platform Data
+ *
+ * struct ak8975_platform_data - Platform data for the mpu driver
+ * @orientation: Orientation matrix of the chip
+ *
+ * Contains platform specific information on how to configure the AKM8975 to
+ * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct ak8975_platform_data {
+ s8 orientation[3][3]; /* Orientation matrix of the chip */
+ int drdy_gpio; /* GPIO connected to DRDY pin exclusive with I2C irq */
+ int trg_gpio; /* GPIO connected to TRG pin (AK8963) */
+};
+
+/* Compatible Asahi Kasei Compass parts */
+enum asahi_compass_chipset {
+ AK8975,
+ AK8963,
+};
+
+#define IIO_MAGNETOMETER_AK8975 "ak8975"
+#define IIO_MAGNETOMETER_AK8963 "ak8963"
+
+#endif /* _AK8975_H_ */
diff --git a/drivers/parrot/iio/platform_data/bldc_cypress.h b/drivers/parrot/iio/platform_data/bldc_cypress.h
new file mode 100644
index 00000000000000..4d6f58f20455f9
--- /dev/null
+++ b/drivers/parrot/iio/platform_data/bldc_cypress.h
@@ -0,0 +1,18 @@
+
+#ifndef _BLDC_CYPRESS_PLATFORM_H_
+#define _BLDC_CYPRESS_PLATFORM_H_
+
+
+/**
+ * struct bldc_cypress_platform_data - Platform data for the bldc
+ * cypress driver
+ *
+ * @lut: Motors look up table ex: {0, 1, 2, 3}
+ * @spin_dir: Motors spin direction ex: 0b1010, which is CCW/CW/CCW/CW
+ */
+struct bldc_cypress_platform_data {
+ u8 lut[4];
+ u8 spin_dir;
+};
+
+#endif
diff --git a/drivers/parrot/iio/platform_data/invensense_mpu6050.h b/drivers/parrot/iio/platform_data/invensense_mpu6050.h
new file mode 100644
index 00000000000000..9fadfe56f629e7
--- /dev/null
+++ b/drivers/parrot/iio/platform_data/invensense_mpu6050.h
@@ -0,0 +1,41 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __INV_MPU6050_PLATFORM_H_
+#define __INV_MPU6050_PLATFORM_H_
+
+
+/**
+ * struct inv_mpu6050_platform_data - Platform data for the mpu driver
+ * @id: Device identifier
+ * @orientation: Orientation matrix of the chip
+ *
+ * Contains platform specific information on how to configure the MPU6050 to
+ * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ *
+ * @fsync: external frame synchronisation (FSYNC) pin sampling. pin connected or not
+ * @clkin: external frame external reference clock input. pin connected or not
+ * @filter_rate: frequency of filter on interrupt
+ */
+struct inv_mpu6050_platform_data {
+ u32 id;
+ s8 orientation[3][3];
+ bool fsync;
+ bool clkin;
+ u32 filter_rate;
+};
+
+#endif
diff --git a/drivers/parrot/iio/pressure/Kconfig b/drivers/parrot/iio/pressure/Kconfig
new file mode 100644
index 00000000000000..39d2fbefb7a634
--- /dev/null
+++ b/drivers/parrot/iio/pressure/Kconfig
@@ -0,0 +1,25 @@
+#
+# Pressure drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Pressure sensors"
+
+config PARROT_IIO_MS5607
+ tristate "Measurement Specialties MS5607-02BA03 pressure sensor driver"
+ depends on I2C && SYSFS
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the Measurement Specialties MS5607
+ pressure sensor connected via I2C.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ms5607.
+
+config PARROT_IIO_MS5607_TIMESTAMPS
+ bool "MS5607: allow display of timestamps"
+ depends on PARROT_IIO_MS5607
+ default n
+
+endmenu
diff --git a/drivers/parrot/iio/pressure/Makefile b/drivers/parrot/iio/pressure/Makefile
new file mode 100644
index 00000000000000..d7d5809bd91808
--- /dev/null
+++ b/drivers/parrot/iio/pressure/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O pressure drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_PARROT_IIO_MS5607) += ms5607.o
diff --git a/drivers/parrot/iio/pressure/ms5607.c b/drivers/parrot/iio/pressure/ms5607.c
new file mode 100644
index 00000000000000..acda81b4ed3f83
--- /dev/null
+++ b/drivers/parrot/iio/pressure/ms5607.c
@@ -0,0 +1,905 @@
+/*
+ * ms5607.c - Support for Measurement Specialties MS5607-02BA03 pressure/temperature sensor
+ *
+ * Copyright (c) 2015 Parrot <didier.leymarie.ext@parrot.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x76 or 0x77)
+ *
+ */
+//#define DEBUG
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+#include <linux/math64.h>
+
+#define MS5607_CMD_RESET 0x1E /* ADC reset command */
+#define MS5607_CMD_ADC_READ 0x00 /* ADC read command */
+#define MS5607_CMD_ADC_CONV 0x40 /* ADC conversion command */
+#define MS5607_CMD_ADC_D1 0x00 /* ADC D1 conversion */
+#define MS5607_CMD_ADC_D2 0x10 /* ADC D2 conversion */
+#define MS5607_CMD_ADC_OSR_256 0x00 /* ADC conversion OSR=256 */
+#define MS5607_CMD_ADC_OSR_512 0x02 /* ADC conversion OSR=512 */
+#define MS5607_CMD_ADC_OSR_1024 0x04 /* ADC conversion OSR=1024 */
+#define MS5607_CMD_ADC_OSR_2048 0x06 /* ADC conversion OSR=2048 */
+#define MS5607_CMD_ADC_OSR_4096 0x08 /* ADC conversion OSR=4096 */
+#define MS5607_CMD_PROM_RD 0xA0 /* PROM read command */
+
+#define MS5607_PROM_SZ 8 /* size in word of calibration PROM */
+enum ms5607_oversampling_ratio {
+ OSR_256=0,
+ OSR_512,
+ OSR_1024,
+ OSR_2048,
+ OSR_4096,
+ OSR_NR
+};
+
+struct ms5607_factory_calibration {
+ uint32_t C1; /* Pressure sensitivity | SENS T1 */
+ uint32_t C2; /* Pressure offset | OFF T1 */
+ uint32_t C3; /* Temperature coefficient of pressure sensitivity | TCS */
+ uint32_t C4; /* Temperature coefficient of pressure offset | TCO */
+ uint32_t C5; /* Reference temperature | T REF */
+ uint32_t C6; /* Temperature coefficient of the temperature | TEMPSENS */
+};
+
+enum ms5607_attributs {
+ ATTR_PRESSURE,
+ ATTR_TEMPERATURE
+};
+
+enum ms5607_channel_index {
+ CHAN_PRESSURE=0,
+ CHAN_TEMPERATURE,
+ CHAN_TIMESTAMP,
+ CHAN_NR,
+ CHAN_DATA_NR=2
+};
+
+enum ms5607_timestamps_list {
+ TS_trigger,
+
+ TS_request_temperature,
+ TS_ready_temperature,
+ TS_read_temperature,
+ TS_compute_temperature,
+ TS_push_temperature,
+
+ TS_request_pressure,
+ TS_ready_pressure,
+ TS_read_pressure,
+ TS_compute_pressure,
+ TS_push_pressure,
+
+ TS_NR
+};
+
+struct ms5607_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ enum ms5607_oversampling_ratio pressure_osr;
+ enum ms5607_oversampling_ratio temperature_osr;
+ struct ms5607_factory_calibration factory_calibration;
+ s64 timestamps[TS_NR];
+ struct {
+ int32_t data[CHAN_DATA_NR];
+ s64 timestamp;
+ } processed_buffer;
+ struct {
+ uint32_t temperature;
+ uint32_t pressure;
+ } raw_buffer;
+};
+
+static const int ms5607_oversampling_ratio_values[OSR_NR] = {
+ [OSR_256] = 256,
+ [OSR_512] = 512,
+ [OSR_1024] = 1024,
+ [OSR_2048] = 2048,
+ [OSR_4096] = 4096
+};
+#define OSR_AVAILABLE "256 512 1024 2048 4096"
+
+#define PRESSURE_OSR_DEFAULT OSR_1024
+#define TEMPERATURE_OSR_DEFAULT OSR_1024
+
+/* module parameters */
+static unsigned pressure_osr = 1<<(PRESSURE_OSR_DEFAULT+8);
+module_param(pressure_osr, int, S_IRUGO);
+
+static unsigned temperature_osr = 1<<(TEMPERATURE_OSR_DEFAULT+8);
+module_param(temperature_osr, int, S_IRUGO);
+
+static void setup_oversampling_ratios(struct ms5607_data *data)
+{
+ enum ms5607_oversampling_ratio new_osr, i;
+
+ new_osr = PRESSURE_OSR_DEFAULT;
+ for (i = OSR_256; i < OSR_NR; i++) {
+ if (pressure_osr == ms5607_oversampling_ratio_values[i])
+ {
+ new_osr = i;
+ break;
+ }
+ }
+ data->pressure_osr = new_osr;
+
+ new_osr = TEMPERATURE_OSR_DEFAULT;
+ for (i = OSR_256; i < OSR_NR; i++) {
+ if (temperature_osr == ms5607_oversampling_ratio_values[i])
+ {
+ new_osr = i;
+ break;
+ }
+ }
+ data->temperature_osr = new_osr;
+}
+
+/*
+ * Shows the oversampling ratio
+ */
+static ssize_t show_oversampling_ratio(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
+ struct ms5607_data *data = iio_priv(indio_dev);
+ int value;
+
+ switch (this_attr->address) {
+ case ATTR_PRESSURE:
+ value = ms5607_oversampling_ratio_values[data->pressure_osr];
+ break;
+ case ATTR_TEMPERATURE:
+ value = ms5607_oversampling_ratio_values[data->temperature_osr];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%d\n", value);
+}
+
+/*
+ * Sets the oversampling ratio
+ */
+static ssize_t store_oversampling_ratio(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ms5607_data *data = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
+ int value;
+ int ret, f;
+ enum ms5607_oversampling_ratio new_osr, prev_osr, i;
+
+ switch (this_attr->address) {
+ case ATTR_PRESSURE:
+ prev_osr = data->pressure_osr;
+ break;
+ case ATTR_TEMPERATURE:
+ prev_osr = data->temperature_osr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = sscanf(buf, "%d", &value);
+ if (ret != 1)
+ return -EINVAL;
+
+ f = 0;
+ for (i = OSR_256; i < OSR_NR; i++) {
+ if (value == ms5607_oversampling_ratio_values[i])
+ {
+ new_osr = i;
+ f = 1;
+ break;
+ }
+ }
+ if (!f)
+ return -EINVAL;
+
+ if (new_osr == prev_osr)
+ return count;
+
+ dev_dbg(&data->client->dev, "%s: Attribut %Lu, OSR %d -> %d\n", __func__,this_attr->address,
+ ms5607_oversampling_ratio_values[prev_osr],
+ ms5607_oversampling_ratio_values[new_osr]);
+
+ mutex_lock(&data->lock);
+
+ switch (this_attr->address) {
+ case ATTR_PRESSURE:
+ data->pressure_osr = new_osr;
+ break;
+ case ATTR_TEMPERATURE:
+ data->temperature_osr = new_osr;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+#ifdef CONFIG_PARROT_IIO_MS5607_TIMESTAMPS
+/*
+ * Shows the device's timestamps.
+ */
+static ssize_t show_timestamps(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ms5607_data *data = iio_priv(indio_dev);
+ int i, c=0;
+
+ for (i=TS_trigger; i< TS_NR; i++) {
+ c+=sprintf(&buf[c],"%Ld ", data->timestamps[i]);
+ }
+ return c;
+}
+#endif /* CONFIG_PARROT_IIO_MS5607_TIMESTAMPS */
+
+#if 0
+/*
+ * Shows calibration values
+ */
+static ssize_t show_calibration(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ms5607_data *data = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
+ uint32_t value;
+
+ switch (this_attr->address) {
+ case 0:
+ value = data->factory_calibration.C1;
+ break;
+ case 1:
+ value = data->factory_calibration.C2;
+ break;
+ case 2:
+ value = data->factory_calibration.C3;
+ break;
+ case 3:
+ value = data->factory_calibration.C4;
+ break;
+ case 4:
+ value = data->factory_calibration.C5;
+ break;
+ case 5:
+ value = data->factory_calibration.C6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+#endif
+
+static int ms5607_read_calibration(struct ms5607_data *data)
+{
+ __be16 prom[MS5607_PROM_SZ];
+ int i, ret;
+ unsigned int crc_read, n_rem;
+ unsigned char n_bit;
+
+ for (i = 0; i < MS5607_PROM_SZ; i++) {
+ ret = i2c_smbus_write_byte(data->client, MS5607_CMD_PROM_RD+2*i);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "%s: Command calibration fails\n", __func__);
+ return ret;
+ }
+ ret = i2c_master_recv(data->client, (char *)&prom[i], sizeof(s16));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "%s: Read calibration fails\n", __func__);
+ return ret;
+ }
+ prom[i] = be16_to_cpu(prom[i]);
+ }
+
+ /* check CRC4 */
+ n_rem = 0;
+ crc_read = prom[7];
+ prom[7] = prom[7] & 0xFF00;
+ for ( i = 0; i < sizeof(prom); i++ ) {
+ if ( i % 2 ==1)
+ n_rem ^= prom[i>>1] & 0xFF;
+ else
+ n_rem ^= prom[i>>1] >> 8;
+ for ( n_bit = 8; n_bit > 0; n_bit-- ) {
+ if (n_rem & 0x8000)
+ n_rem = (n_rem << 1) ^ 0x3000;
+ else
+ n_rem = (n_rem << 1);
+ }
+ }
+ n_rem = (0x000F & (n_rem >> 12));
+ prom[7] = crc_read;
+ n_rem = n_rem ^ 0x0;
+
+ if ( n_rem != (crc_read & 0x000F)) {
+ dev_err(&data->client->dev,
+ "Invalid CRC4. Expected 0x%01X, Compute 0x%01X\n",
+ crc_read & 0x000F, n_rem);
+ return -EINVAL;
+ }
+
+ /* retrieve calibration data */
+ i=1;
+ data->factory_calibration.C1 = prom[i++];
+ dev_info(&data->client->dev, "C1: %u\n", data->factory_calibration.C1);
+ data->factory_calibration.C2 = prom[i++];
+ dev_info(&data->client->dev, "C2: %u\n", data->factory_calibration.C2);
+ data->factory_calibration.C3 = prom[i++];
+ dev_info(&data->client->dev, "C3: %u\n", data->factory_calibration.C3);
+ data->factory_calibration.C4 = prom[i++];
+ dev_info(&data->client->dev, "C4: %u\n", data->factory_calibration.C4);
+ data->factory_calibration.C5 = prom[i++];
+ dev_info(&data->client->dev, "C5: %u\n", data->factory_calibration.C5);
+ data->factory_calibration.C6 = prom[i++];
+ dev_info(&data->client->dev, "C6: %u\n", data->factory_calibration.C6);
+
+ return 0;
+}
+
+static const u8 ms5607_cmd_adc_conv[OSR_NR] = {
+ [OSR_256] = MS5607_CMD_ADC_CONV|MS5607_CMD_ADC_OSR_256,
+ [OSR_512] = MS5607_CMD_ADC_CONV|MS5607_CMD_ADC_OSR_512,
+ [OSR_1024] = MS5607_CMD_ADC_CONV|MS5607_CMD_ADC_OSR_1024,
+ [OSR_2048] = MS5607_CMD_ADC_CONV|MS5607_CMD_ADC_OSR_2048,
+ [OSR_4096] = MS5607_CMD_ADC_CONV|MS5607_CMD_ADC_OSR_4096
+};
+
+/* for usleep_range {min, max} */
+static const unsigned long ms5607_conversion_time[OSR_NR][2] = {
+ [OSR_256] = { 620, 650}, /* | 0.48 | 0.54 | 0.60 | ms */
+ [OSR_512] = { 1200, 1300}, /* | 0.95 | 1.06 | 1.17 | ms */
+ [OSR_1024] = { 2300, 2300}, /* | 1.88 | 2.08 | 2.28 | ms */
+ [OSR_2048] = { 4600, 4600}, /* | 3.72 | 4.13 | 4.54 | ms */
+ [OSR_4096] = { 9100, 9100}, /* | 7.40 | 8.22 | 9.04 | ms */
+
+};
+
+#define MS5607_PRESSURE_MAX_VALUE 120000L
+#define MS5607_PRESSURE_MIN_VALUE 30000L
+
+#define MS5607_TEMPERATURE_MIN_VALUE -19L
+#define MS5607_TEMPERATURE_MAX_VALUE 100L
+
+#define C_TEMPERATURE_SCALER (100L) /**< x 100 scaler */
+#define C_20_DEG_CELSIUS (20L * C_TEMPERATURE_SCALER) /**< +20 degC */
+#define C_15_DEG_CELSIUS (15L * C_TEMPERATURE_SCALER) /**< +15 degC */
+
+#define C_PRESSURE_SCALER (1000LL) /**< x 1000 scaler */
+
+#define POW_2_4 ((uint64_t) (1U << 4U))
+#define POW_2_6 ((uint64_t) (1U << 6U))
+#define POW_2_7 ((uint64_t) (1U << 7U))
+#define POW_2_8 ((uint64_t) (1U << 8U))
+#define POW_2_15 ((uint64_t) (1U << 15U))
+#define POW_2_16 ((uint64_t) (1U << 16U))
+#define POW_2_17 ((uint64_t) (1U << 17U))
+#define POW_2_21 ((uint64_t) (1U << 21U))
+#define POW_2_23 ((uint64_t) (1U << 23U))
+#define POW_2_31 ((uint64_t) (1U << 31U))
+
+/**
+ * See the data-sheet to understand this computation.
+ */
+static
+int32_t ms5607_temperature_computation(struct ms5607_data *data, const uint32_t D2)
+{
+ int64_t T2 = 0;
+ int64_t dT = D2 - data->factory_calibration.C5 * POW_2_8;
+ int32_t T = C_20_DEG_CELSIUS + (dT * data->factory_calibration.C6) / POW_2_23;
+
+ if (T < C_20_DEG_CELSIUS)
+ {
+ T2 = dT * dT / POW_2_31;
+ }
+
+ T = T - T2;
+ return T;/* unscaled temperature */
+}
+
+/**
+ * See the data-sheet to understand this computation.
+ * temp = (T - C_20_DEG_CELSIUS);
+ * dT = (temp * POW_2_23) / C6;
+ * OFF = C2 * POW_2_17 + (dT * C4) / POW_2_6;
+ * SENS = C1 * POW_2_16 + (dT * C3) / POW_2_7;
+ */
+static
+int32_t ms5607_pressure_computation(struct ms5607_data *data,
+ const int32_t temperature,
+ const uint32_t D1)
+{
+#define K1 61LL
+#define K2 2LL
+#define K3 15LL
+#define K4 8LL
+ int64_t OFF2 = 0;
+ int64_t SENS2 = 0;
+ const int64_t T = temperature;
+ int64_t temp = T - C_20_DEG_CELSIUS;
+ int64_t OFF,SENS;
+ int32_t P;
+
+ OFF = (C_PRESSURE_SCALER * data->factory_calibration.C2 * POW_2_17)
+ + div_u64(C_PRESSURE_SCALER * temp * POW_2_17 * data->factory_calibration.C4,
+ data->factory_calibration.C6);
+
+ SENS = (C_PRESSURE_SCALER * data->factory_calibration.C1 * POW_2_16)
+ + div_u64(C_PRESSURE_SCALER * temp * POW_2_16 * data->factory_calibration.C3,
+ data->factory_calibration.C6);
+
+ if (T < C_20_DEG_CELSIUS)
+ {
+ temp = temp * temp;
+ OFF2 = (C_PRESSURE_SCALER * K1 * temp) / POW_2_4;
+ SENS2 = (C_PRESSURE_SCALER * K2 * temp);
+
+ if (T < -C_15_DEG_CELSIUS)
+ {
+ temp = (T + C_15_DEG_CELSIUS);
+ temp = temp * temp;
+ OFF2 = OFF2 + (C_PRESSURE_SCALER * K3 * temp);
+ SENS2 = SENS2 + (C_PRESSURE_SCALER * K4 * temp);
+ }
+ }
+ SENS = SENS - SENS2;
+ OFF = OFF - OFF2;
+ P = (((D1 * SENS) / POW_2_21) - OFF) / POW_2_15;
+
+ return P;
+}
+
+#define _RAW_PRESSURE data->raw_buffer.pressure
+static int ms5607_read_raw_pressure(struct ms5607_data *data)
+{
+ union {
+ u8 byte[4];
+ __be32 ulong;
+ } buffer;
+ int ret;
+
+ data->timestamps[TS_request_pressure] = iio_get_time_ns();
+
+ ret = i2c_smbus_write_byte(data->client,
+ MS5607_CMD_ADC_D1 | ms5607_cmd_adc_conv[data->pressure_osr]);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_smbus_write_byte(MS5607_CMD_ADC_D1)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ usleep_range(ms5607_conversion_time[data->pressure_osr][0],
+ ms5607_conversion_time[data->pressure_osr][1]);
+
+ data->timestamps[TS_ready_pressure] = iio_get_time_ns();
+
+ ret = i2c_smbus_write_byte(data->client, MS5607_CMD_ADC_READ);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_smbus_write_byte(MS5607_CMD_ADC_READ)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buffer.ulong = 0;
+ ret = i2c_master_recv(data->client, &buffer.byte[1], 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_master_recv\n", __func__, ret);
+ return ret;
+ }
+
+ _RAW_PRESSURE = be32_to_cpu(buffer.ulong);
+
+ data->timestamps[TS_read_pressure] = iio_get_time_ns();
+
+ dev_dbg(&data->client->dev, "%s: raw_pressure %u\n", __func__, _RAW_PRESSURE);
+
+ return IIO_VAL_INT;
+}
+
+#define _RAW_TEMPERATURE data->raw_buffer.temperature
+static int ms5607_read_raw_temperature(struct ms5607_data *data)
+{
+ union {
+ u8 byte[4];
+ __be32 ulong;
+ } buffer;
+ int ret;
+
+ data->timestamps[TS_request_temperature] = iio_get_time_ns();
+
+ ret = i2c_smbus_write_byte(data->client,
+ MS5607_CMD_ADC_D2 |
+ ms5607_cmd_adc_conv[data->temperature_osr]);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_smbus_write_byte(MS5607_CMD_ADC_D2)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ usleep_range(ms5607_conversion_time[data->temperature_osr][0],
+ ms5607_conversion_time[data->temperature_osr][1]);
+
+ data->timestamps[TS_ready_temperature] = iio_get_time_ns();
+
+ ret = i2c_smbus_write_byte(data->client, MS5607_CMD_ADC_READ);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_smbus_write_byte(MS5607_CMD_ADC_READ)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buffer.ulong = 0;
+ ret = i2c_master_recv(data->client, &buffer.byte[1], 3);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on i2c_master_recv\n", __func__, ret);
+ return ret;
+ }
+
+ _RAW_TEMPERATURE = be32_to_cpu(buffer.ulong);
+
+ data->timestamps[TS_read_temperature] = iio_get_time_ns();
+
+ dev_dbg(&data->client->dev,
+ "%s: raw_temperature %u\n", __func__,
+ _RAW_TEMPERATURE);
+
+ return IIO_VAL_INT;
+}
+
+#define _TEMPERATURE data->processed_buffer.data[CHAN_TEMPERATURE]
+#define _PRESSURE data->processed_buffer.data[CHAN_PRESSURE]
+static int ms5607_read_processed_temperature(struct ms5607_data *data)
+{
+ int ret;
+
+ ret = ms5607_read_raw_temperature(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on ms5607_read_raw_temperature\n", __func__, ret);
+ return ret;
+ }
+
+ _TEMPERATURE = ms5607_temperature_computation(data, _RAW_TEMPERATURE);
+
+ dev_dbg(&data->client->dev, "%s: temperature %d\n",
+ __func__, _TEMPERATURE);
+
+ if ( (_TEMPERATURE < (MS5607_TEMPERATURE_MIN_VALUE*C_TEMPERATURE_SCALER)) ||
+ (_TEMPERATURE > (MS5607_TEMPERATURE_MAX_VALUE*C_TEMPERATURE_SCALER)) ) {
+ dev_err(&data->client->dev,
+ "%s: out of range temperature: %d\n", __func__, _TEMPERATURE);
+ return -EINVAL;
+ }
+
+ data->timestamps[TS_compute_temperature] = iio_get_time_ns();
+
+ return IIO_VAL_INT;
+}
+
+static int ms5607_read_processed_pressure(struct ms5607_data *data)
+{
+ int ret;
+
+ ret = ms5607_read_processed_temperature(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on ms5607_read_processed_temperature\n", __func__, ret);
+ return ret;
+ }
+
+ ret = ms5607_read_raw_pressure(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "%s: error %d on ms5607_read_raw_pressure\n", __func__, ret);
+ return ret;
+ }
+
+ _PRESSURE = ms5607_pressure_computation(data,
+ _TEMPERATURE,
+ _RAW_PRESSURE);
+
+ dev_dbg(&data->client->dev, "%s: pressure %d\n", __func__, _PRESSURE);
+
+ if ( (_PRESSURE < (MS5607_PRESSURE_MIN_VALUE * C_PRESSURE_SCALER)) ||
+ (_PRESSURE > (MS5607_PRESSURE_MAX_VALUE * C_PRESSURE_SCALER)) ) {
+ dev_err(&data->client->dev,
+ "%s: out of range pressure: %d\n", __func__, _PRESSURE);
+ return -EINVAL;
+ }
+
+ data->timestamps[TS_compute_pressure] = iio_get_time_ns();
+
+ return IIO_VAL_INT;
+}
+
+static int ms5607_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ms5607_data *data = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch(chan->address) {
+ case ATTR_PRESSURE:
+ mutex_lock(&data->lock);
+ data->timestamps[TS_trigger] = iio_get_time_ns();
+ ret = ms5607_read_processed_pressure(data);
+ *val = _PRESSURE;
+ *val2 = 0;
+ data->timestamps[TS_push_pressure] = iio_get_time_ns();
+ mutex_unlock(&data->lock);
+ break;
+ case ATTR_TEMPERATURE:
+ mutex_lock(&data->lock);
+ data->timestamps[TS_trigger] = iio_get_time_ns();
+ ret = ms5607_read_processed_temperature(data);
+ *val = _TEMPERATURE;
+ *val2 = 0;
+ data->timestamps[TS_push_temperature] = iio_get_time_ns();
+ mutex_unlock(&data->lock);
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val2 = 0;
+ switch(chan->address) {
+ case ATTR_PRESSURE:
+ *val = 0;
+ *val2 = 1000000/C_PRESSURE_SCALER;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case ATTR_TEMPERATURE:
+ *val = 0;
+ *val2 = 1000000/C_TEMPERATURE_SCALER;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int ms5607_read(struct ms5607_data *data)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = ms5607_read_processed_pressure(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t ms5607_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ms5607_data *data = iio_priv(indio_dev);
+ int ret;
+
+ data->timestamps[TS_trigger] = iio_get_time_ns();
+ ret = ms5607_read(data);
+ if (ret < 0)
+ goto done;
+
+ data->timestamps[TS_push_pressure] = data->timestamps[TS_push_temperature] = iio_get_time_ns();
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->processed_buffer,
+ data->timestamps[TS_push_pressure]);
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec ms5607_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)|BIT(IIO_CHAN_INFO_SCALE),
+ .address = ATTR_PRESSURE,
+ .channel = CHAN_PRESSURE,
+ .scan_index = CHAN_PRESSURE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 32,
+ .shift = 0,
+ .endianness = IIO_CPU,
+ },
+ .indexed = 1
+ },
+
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)|BIT(IIO_CHAN_INFO_SCALE),
+ .address = ATTR_TEMPERATURE,
+ .channel = CHAN_TEMPERATURE,
+ .scan_index = CHAN_TEMPERATURE,
+ .scan_type = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 32,
+ .shift = 0,
+ .endianness = IIO_CPU,
+ },
+ .indexed = 1
+ },
+
+ IIO_CHAN_SOFT_TIMESTAMP(CHAN_TIMESTAMP)
+};
+
+#if 0
+/* calibration values */
+static IIO_DEVICE_ATTR(in_calib_C1, S_IRUGO, show_calibration, NULL, 0);
+static IIO_DEVICE_ATTR(in_calib_C2, S_IRUGO, show_calibration, NULL, 1);
+static IIO_DEVICE_ATTR(in_calib_C3, S_IRUGO, show_calibration, NULL, 2);
+static IIO_DEVICE_ATTR(in_calib_C4, S_IRUGO, show_calibration, NULL, 3);
+static IIO_DEVICE_ATTR(in_calib_C5, S_IRUGO, show_calibration, NULL, 4);
+static IIO_DEVICE_ATTR(in_calib_C6, S_IRUGO, show_calibration, NULL, 5);
+#endif
+
+/* constant IIO attribute */
+static IIO_CONST_ATTR(oversampling_ratio_available, OSR_AVAILABLE);
+
+/* oversample_ratios */
+static IIO_DEVICE_ATTR(oversampling_ratio_pressure,
+ S_IRUGO | S_IWUSR,
+ show_oversampling_ratio,
+ store_oversampling_ratio,
+ ATTR_PRESSURE);
+static IIO_DEVICE_ATTR(oversampling_ratio_temperature,
+ S_IRUGO | S_IWUSR,
+ show_oversampling_ratio,
+ store_oversampling_ratio,
+ ATTR_TEMPERATURE);
+
+#ifdef CONFIG_PARROT_IIO_MS5607_TIMESTAMPS
+static IIO_DEVICE_ATTR(timestamps, S_IRUGO, show_timestamps, NULL, 2);
+#endif /* CONFIG_PARROT_IIO_MS5607_TIMESTAMPS */
+
+static struct attribute *ms5607_attr[] = {
+#if 0
+ &iio_dev_attr_in_calib_C1.dev_attr.attr,
+ &iio_dev_attr_in_calib_C2.dev_attr.attr,
+ &iio_dev_attr_in_calib_C3.dev_attr.attr,
+ &iio_dev_attr_in_calib_C4.dev_attr.attr,
+ &iio_dev_attr_in_calib_C5.dev_attr.attr,
+ &iio_dev_attr_in_calib_C6.dev_attr.attr,
+#endif
+ &iio_dev_attr_oversampling_ratio_pressure.dev_attr.attr,
+ &iio_dev_attr_oversampling_ratio_temperature.dev_attr.attr,
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+#ifdef CONFIG_PARROT_IIO_MS5607_TIMESTAMPS
+ &iio_dev_attr_timestamps.dev_attr.attr,
+#endif /* CONFIG_PARROT_IIO_MS5607_TIMESTAMPS */
+ NULL
+};
+
+static struct attribute_group ms5607_attr_group = {
+ .attrs = ms5607_attr,
+};
+
+static const struct iio_info ms5607_info = {
+ .attrs = &ms5607_attr_group,
+ .read_raw = &ms5607_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long ms5607_scan_masks[] = {0x3, 0}; /* 2 channels Pressure & Temperature */
+
+static int ms5607_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ms5607_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &ms5607_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ms5607_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ms5607_channels);
+ indio_dev->available_scan_masks = ms5607_scan_masks;
+
+ setup_oversampling_ratios(data);
+
+ ret = ms5607_read_calibration(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ms5607_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ dev_info(&client->dev,
+ "Measurement Specialties MS5607-02BA03 pressure/temperature sensor (%s)\n",
+ indio_dev->name);
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int ms5607_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ devm_iio_device_unregister(&client->dev, indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ms5607_id[] = {
+ { "ms5607", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ms5607_id);
+
+static struct i2c_driver ms5607_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ms5607",
+ },
+ .probe = ms5607_probe,
+ .remove = ms5607_remove,
+ .id_table = ms5607_id,
+};
+module_i2c_driver(ms5607_driver);
+
+MODULE_AUTHOR("Didier Leymarie <didier.leymarie.ext@parrot.com>");
+MODULE_DESCRIPTION("Measurement Specialties MS5607-02BA03 pressure/temperature sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/iio/trigger/Kconfig b/drivers/parrot/iio/trigger/Kconfig
new file mode 100644
index 00000000000000..e2d989d5f7c4ae
--- /dev/null
+++ b/drivers/parrot/iio/trigger/Kconfig
@@ -0,0 +1,19 @@
+#
+# Industrial I/O standalone triggers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Triggers - standalone"
+
+config PARROT_IIO_HRTIMER_TRIGGER
+ tristate "Generic hrtimer trigger"
+ depends on SYSFS
+ select HIGH_RES_TIMERS
+ help
+ Provides support for using a high resolution timer as an IIO
+ trigger.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-hrtimer.
+
+endmenu
diff --git a/drivers/parrot/iio/trigger/Makefile b/drivers/parrot/iio/trigger/Makefile
new file mode 100644
index 00000000000000..8f02b21c815dda
--- /dev/null
+++ b/drivers/parrot/iio/trigger/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for triggers not associated with iio-devices
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_PARROT_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
diff --git a/drivers/parrot/iio/trigger/iio-trig-hrtimer.c b/drivers/parrot/iio/trigger/iio-trig-hrtimer.c
new file mode 100644
index 00000000000000..d2e484b47b043f
--- /dev/null
+++ b/drivers/parrot/iio/trigger/iio-trig-hrtimer.c
@@ -0,0 +1,453 @@
+/*
+ * iio-trig-hrtimer.c - Standalone IIO trigger based on hrtimer
+ *
+ * Copyright (c) 2015 Parrot <didier.leymarie.ext@parrot.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ */
+// #define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/hrtimer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+struct iio_hrtimer_trig {
+ struct iio_trigger *trig;
+ struct hrtimer timer;
+ struct mutex lock;
+ int id;
+ unsigned long period_ns;
+ unsigned long frequency;
+ ktime_t ktime_period_ns;
+ bool enable;
+ struct list_head l;
+};
+
+static LIST_HEAD(iio_hrtimer_trig_list);
+static DEFINE_MUTEX(iio_hrtimer_trig_list_mut);
+
+static int iio_hrtimer_trigger_probe(const int id);
+static ssize_t iio_hrtimer_trig_add(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ unsigned long input;
+
+ ret = kstrtoul(buf, 10, &input);
+ if (ret)
+ return ret;
+ ret = iio_hrtimer_trigger_probe(input);
+ if (ret)
+ return ret;
+ return len;
+}
+static DEVICE_ATTR(add_trigger, S_IWUSR, NULL, &iio_hrtimer_trig_add);
+
+static int iio_hrtimer_trigger_remove(const int id);
+static ssize_t iio_hrtimer_trig_remove(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ unsigned long input;
+
+ ret = kstrtoul(buf, 10, &input);
+ if (ret)
+ return ret;
+ ret = iio_hrtimer_trigger_remove(input);
+ if (ret)
+ return ret;
+ return len;
+}
+
+static DEVICE_ATTR(remove_trigger, S_IWUSR, NULL, &iio_hrtimer_trig_remove);
+
+static struct attribute *iio_hrtimer_trig_attrs[] = {
+ &dev_attr_add_trigger.attr,
+ &dev_attr_remove_trigger.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_hrtimer_trig_group = {
+ .attrs = iio_hrtimer_trig_attrs,
+};
+
+static const struct attribute_group *iio_hrtimer_trig_groups[] = {
+ &iio_hrtimer_trig_group,
+ NULL
+};
+
+/* Nothing to actually do upon release */
+static void iio_trigger_hrtimer_release(struct device *dev)
+{
+}
+
+static struct device iio_hrtimer_trig_dev = {
+ .bus = &iio_bus_type,
+ .groups = iio_hrtimer_trig_groups,
+ .release = &iio_trigger_hrtimer_release,
+};
+
+/* Timer callback: called at the end of period. */
+static enum hrtimer_restart iio_hrtimer_periodic_callback(struct hrtimer *timer)
+{
+ struct iio_hrtimer_trig *data;
+ ktime_t kt_now;
+ int overrun;
+ enum hrtimer_restart ret = HRTIMER_RESTART;
+
+ data = container_of(timer, struct iio_hrtimer_trig, timer);
+
+ kt_now = hrtimer_cb_get_time(&data->timer);
+
+ overrun = hrtimer_forward(&data->timer,
+ kt_now,
+ data->ktime_period_ns);
+
+ iio_trigger_poll(data->trig);
+
+ dev_dbg(&data->trig->dev, "hrtimer #%d restarted\n", data->id);
+
+ return ret;
+}
+
+static int iio_hrtimer_start(struct hrtimer *timer)
+{
+ struct iio_hrtimer_trig *hrt_trig;
+ hrt_trig = container_of(timer, struct iio_hrtimer_trig, timer);
+
+ if (!hrtimer_active(&hrt_trig->timer)) {
+ hrt_trig->ktime_period_ns = ktime_set(0, hrt_trig->period_ns);
+ hrt_trig->timer.function = &iio_hrtimer_periodic_callback;
+ hrtimer_start(&hrt_trig->timer,
+ hrt_trig->ktime_period_ns,
+ HRTIMER_MODE_REL);
+ }
+ return 1;
+}
+
+static int iio_hrtimer_stop(struct hrtimer *timer)
+{
+ struct iio_hrtimer_trig *hrt_trig;
+ int ret = 1;
+
+ hrt_trig = container_of(timer, struct iio_hrtimer_trig, timer);
+
+ if (hrtimer_callback_running(timer)) {
+ dev_dbg(&hrt_trig->trig->dev,
+ "hrtimer #%d callback is running\n",
+ hrt_trig->id);
+ }
+
+ if (hrtimer_active(timer) != 0) {
+ ret = hrtimer_cancel(timer);
+ dev_dbg(&hrt_trig->trig->dev,
+ "active hrtimer #%d cancelled: %d\n",
+ hrt_trig->id, ret);
+ }
+
+ if (hrtimer_is_queued(timer) != 0) {
+ ret = hrtimer_cancel(timer);
+ dev_dbg(&hrt_trig->trig->dev,
+ "queued hrtimer #%d cancelled: %d\n",
+ hrt_trig->id, ret);
+ }
+
+ return ret;
+}
+
+static ssize_t iio_hrtimer_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+ bool value;
+ int ret;
+
+ ret = strtobool(buf, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value == hrt_trig->enable)
+ return count;
+
+ mutex_lock(&hrt_trig->lock);
+ switch (value)
+ {
+ case false:
+ /* disable timer */
+ ret = iio_hrtimer_stop(&hrt_trig->timer);
+
+ dev_info(&hrt_trig->trig->dev,
+ "Trigger HR Timer '%s' #%d stopped\n",
+ hrt_trig->trig->name,
+ hrt_trig->id);
+
+ hrt_trig->enable = false;
+
+ break;
+
+ case true:
+ /* enable timer */
+ ret = iio_hrtimer_start(&hrt_trig->timer);
+ hrt_trig->enable = true;
+ dev_info(&hrt_trig->trig->dev,
+ "Trigger HR Timer '%s' #%d started\n",
+ hrt_trig->trig->name,
+ hrt_trig->id);
+ break;
+ }
+ mutex_unlock(&hrt_trig->lock);
+ return count;
+}
+
+static ssize_t iio_hrtimer_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+
+ return sprintf(buf, "%d\n", hrt_trig->enable);
+}
+
+/* period_ns <==> frequency Hz */
+#define FREQUENCY_1_GHz (1000000000L)
+
+/* period minimum 10 ns ==> frequency max 100 MHz */
+#define PERIOD_NS_MIN (10L)
+#define FREQUENCY_MAX (FREQUENCY_1_GHz/PERIOD_NS_MIN)
+
+
+static ssize_t iio_hrtimer_period_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+ unsigned long value;
+ unsigned long frequency;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value == hrt_trig->period_ns)
+ return count;
+
+ if ( value < PERIOD_NS_MIN )
+ return -EINVAL;
+
+ if (hrtimer_active(&hrt_trig->timer))
+ return -EBUSY;
+
+ frequency = FREQUENCY_1_GHz/value;
+
+ mutex_lock(&hrt_trig->lock);
+ hrt_trig->period_ns = value;
+ hrt_trig->frequency = frequency;
+ mutex_unlock(&hrt_trig->lock);
+
+ return count;
+}
+
+static ssize_t iio_hrtimer_period_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+
+ return sprintf(buf, "%lu\n", hrt_trig->period_ns);
+}
+
+static ssize_t iio_hrtimer_frequency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+ unsigned long value;
+ unsigned long period_ns;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value == hrt_trig->frequency)
+ return count;
+
+ if ( value == 0 || value > FREQUENCY_MAX )
+ return -EINVAL;
+
+ if (hrtimer_active(&hrt_trig->timer))
+ return -EBUSY;
+
+ period_ns = FREQUENCY_1_GHz/value;
+
+ mutex_lock(&hrt_trig->lock);
+ hrt_trig->frequency = value;
+ hrt_trig->period_ns = period_ns;
+ mutex_unlock(&hrt_trig->lock);
+
+ return count;
+}
+
+static ssize_t iio_hrtimer_frequency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_trigger *trig = to_iio_trigger(dev);
+ struct iio_hrtimer_trig *hrt_trig = iio_trigger_get_drvdata(trig);
+
+ return sprintf(buf, "%lu\n", hrt_trig->frequency);
+}
+
+static DEVICE_ATTR(enable, S_IRUGO|S_IWUSR, iio_hrtimer_enable_show, iio_hrtimer_enable_store);
+static DEVICE_ATTR(period_ns, S_IRUGO|S_IWUSR, iio_hrtimer_period_ns_show, iio_hrtimer_period_ns_store);
+static DEVICE_ATTR(frequency, S_IRUGO|S_IWUSR, iio_hrtimer_frequency_show, iio_hrtimer_frequency_store);
+
+static struct attribute *iio_hrtimer_trigger_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_period_ns.attr,
+ &dev_attr_frequency.attr,
+ NULL,
+};
+
+static const struct attribute_group iio_hrtimer_trigger_attr_group = {
+ .attrs = iio_hrtimer_trigger_attrs,
+};
+
+static const struct attribute_group *iio_hrtimer_trigger_attr_groups[] = {
+ &iio_hrtimer_trigger_attr_group,
+ NULL
+};
+
+static const struct iio_trigger_ops iio_hrtimer_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static int iio_hrtimer_trigger_probe(const int id)
+{
+ struct iio_hrtimer_trig *t;
+ int ret;
+ bool foundit = false;
+ mutex_lock(&iio_hrtimer_trig_list_mut);
+ list_for_each_entry(t, &iio_hrtimer_trig_list, l)
+ if (id == t->id) {
+ foundit = true;
+ break;
+ }
+ if (foundit) {
+ ret = -EINVAL;
+ goto out1;
+ }
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+ t->id = id;
+ t->trig = devm_iio_trigger_alloc(&iio_hrtimer_trig_dev,
+ "hrtimertrig%d",
+ t->id);
+ if (!t->trig) {
+ ret = -ENOMEM;
+ dev_err(&iio_hrtimer_trig_dev,
+ "devm_iio_trigger_alloc(hrtimertrig%d) failed ENOMEM\n",
+ t->id);
+ goto free_t;
+ }
+
+ t->trig->dev.groups = iio_hrtimer_trigger_attr_groups;
+ t->trig->ops = &iio_hrtimer_trigger_ops;
+ t->trig->dev.parent = &iio_hrtimer_trig_dev;
+ iio_trigger_set_drvdata(t->trig, t);
+
+ mutex_init(&t->lock);
+ hrtimer_init(&t->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ t->period_ns = 1000000000LL/HZ;
+ t->frequency = HZ;
+ t->enable = false;
+
+ ret = iio_trigger_register(t->trig);
+ if (ret)
+ goto out2;
+ list_add(&t->l, &iio_hrtimer_trig_list);
+ __module_get(THIS_MODULE);
+ mutex_unlock(&iio_hrtimer_trig_list_mut);
+ dev_info(&t->trig->dev, "Trigger HR Timer '%s' #%d created\n",
+ t->trig->name,
+ t->id);
+ return 0;
+
+out2:
+ iio_trigger_put(t->trig);
+free_t:
+ kfree(t);
+out1:
+ mutex_unlock(&iio_hrtimer_trig_list_mut);
+ return ret;
+}
+
+static int iio_hrtimer_trigger_remove(const int id)
+{
+ bool foundit = false;
+ struct iio_hrtimer_trig *t;
+ mutex_lock(&iio_hrtimer_trig_list_mut);
+ list_for_each_entry(t, &iio_hrtimer_trig_list, l)
+ if (id == t->id) {
+ foundit = true;
+ break;
+ }
+ if (!foundit) {
+ mutex_unlock(&iio_hrtimer_trig_list_mut);
+ return -EINVAL;
+ }
+
+ iio_hrtimer_stop(&t->timer);
+ iio_trigger_unregister(t->trig);
+ devm_iio_trigger_free(&iio_hrtimer_trig_dev, t->trig);
+
+ list_del(&t->l);
+ kfree(t);
+ module_put(THIS_MODULE);
+ mutex_unlock(&iio_hrtimer_trig_list_mut);
+ return 0;
+}
+
+
+static int __init iio_hrtimer_trig_init(void)
+{
+ device_initialize(&iio_hrtimer_trig_dev);
+ dev_set_name(&iio_hrtimer_trig_dev, "iio_hrtimer_trigger");
+ return device_add(&iio_hrtimer_trig_dev);
+}
+module_init(iio_hrtimer_trig_init);
+
+static void __exit iio_hrtimer_trig_exit(void)
+{
+ device_unregister(&iio_hrtimer_trig_dev);
+}
+module_exit(iio_hrtimer_trig_exit);
+
+MODULE_AUTHOR("Didier Leymarie <didier.leymarie.ext@parrot.com>");
+MODULE_DESCRIPTION("HRtimer trigger for the IIO subsystem");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iio-trig-hrtimer");
diff --git a/drivers/parrot/input/Kconfig b/drivers/parrot/input/Kconfig
new file mode 100644
index 00000000000000..b318245f76ad7b
--- /dev/null
+++ b/drivers/parrot/input/Kconfig
@@ -0,0 +1,3 @@
+config VIRTUAL_WAKEUP_BUTTON
+ tristate "Virtual wakeup button"
+ default n
diff --git a/drivers/parrot/input/Makefile b/drivers/parrot/input/Makefile
new file mode 100644
index 00000000000000..64d77c58f70981
--- /dev/null
+++ b/drivers/parrot/input/Makefile
@@ -0,0 +1,2 @@
+obj-y += touchscreen/
+obj-$(CONFIG_VIRTUAL_WAKEUP_BUTTON) += virtual_wakeup_button.o
diff --git a/drivers/parrot/input/touchscreen/Kconfig b/drivers/parrot/input/touchscreen/Kconfig
new file mode 100644
index 00000000000000..959b57ad111ff7
--- /dev/null
+++ b/drivers/parrot/input/touchscreen/Kconfig
@@ -0,0 +1,21 @@
+config TOUCHSCREEN_ATMEL_MXT
+ tristate "Atmel mXT I2C Touchscreen"
+ depends on I2C && INPUT_TOUCHSCREEN && TI_UB925_LVDS
+ help
+ Say Y here if you have Atmel mXT series I2C touchscreen,
+ such as AT42QT602240/ATMXT224, connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_mxt_ts.
+
+config TOUCHSCREEN_ANTEC
+ tristate "Antec Touchscreen"
+ depends on I2C && INPUT_TOUCHSCREEN && MACH_PARROT_ANTEC
+ default m
+ help
+ Say Y here if you have Antec touchscreen
+
+ To compile this driver as a module, choose M here: the
+ module will be called antec_ts.
diff --git a/drivers/parrot/input/touchscreen/Makefile b/drivers/parrot/input/touchscreen/Makefile
new file mode 100644
index 00000000000000..8946cfa1f4d6ec
--- /dev/null
+++ b/drivers/parrot/input/touchscreen/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ANTEC) += antec_ts.o
diff --git a/drivers/parrot/input/touchscreen/antec_ts.c b/drivers/parrot/input/touchscreen/antec_ts.c
new file mode 100644
index 00000000000000..74853c0bf58b8e
--- /dev/null
+++ b/drivers/parrot/input/touchscreen/antec_ts.c
@@ -0,0 +1,693 @@
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+
+#include <linux/gpio.h>
+#define I2C_DEVICE_ADDRESS 0x0a
+#define OMAP3_DEVKIT_TS_GPIO 130
+#define I2C_DEVICE_NAME "uc6511"
+#define I2C_ADAPTER_NUMBER 2
+
+#define PRINT_XY 0
+#define MAX_TRACK_POINT 5
+#define MAX_12BIT ((1<<12)-1)
+
+typedef struct i2c_client bus_device;
+
+static u16 fingers_x[MAX_TRACK_POINT];
+static u16 fingers_y[MAX_TRACK_POINT];
+
+//################################################################################################
+// read and check parameter file, begin
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <asm/uaccess.h>
+
+#define Calibrate_Name "/system/etc/com.TPCalibration/CalibrateData.txt"
+static u16 Calibrate_data[10]; //x0,y0,x1,y1,x2,y2,x3,y4,host_width,host_height
+static bool Calibrate_run = false;
+static bool init_read= false;
+static u16 TOUCH_MAX_X = 0;
+static u16 TOUCH_MAX_Y = 0;
+static u16 TOUCH_MIN_X = 0;
+static u16 TOUCH_MIN_Y = 0;
+static u16 MUL_X_PARAMETER = 0;
+static u16 DIV_X_PARAMETER = 0;
+static u16 MUL_Y_PARAMETER = 0;
+static u16 DIV_Y_PARAMETER = 0;
+
+//*****added for proc used 20110929
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#define STRINGLEN 1024
+char Touch_Version_buffer[STRINGLEN];
+struct proc_dir_entry *touch_file;
+static char APK_CHECK;
+
+//*****added for proc used 20110929
+
+mm_segment_t oldfs;
+static struct file *openFile(char *path,int flag,int mode)
+{
+ struct file *fp = NULL;
+ fp=filp_open(path, flag, 0);
+
+ //printk(KERN_ALERT "fp===== %x",fp);
+ if(IS_ERR(fp))
+ {
+ //printk(KERN_ALERT "NO FILE !");
+ return NULL;
+ }
+ init_read = true;
+ printk(KERN_ALERT " HAVE FILE");
+ return fp;
+}
+
+static int readFile(struct file *fp,char *buf,int readlen)
+{
+ if (fp->f_op && fp->f_op->read)
+ return fp->f_op->read(fp,buf,readlen, &fp->f_pos);
+ else
+ return -1;
+}
+
+static int closeFile(struct file *fp)
+{
+ filp_close(fp,NULL);
+ return 0;
+}
+static void initKernelEnv(void)
+{
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+}
+
+static void readfile(void)
+{
+ char buf[1024];
+ struct file *fp;
+ int ret = 0;
+ int i = 0;
+ int k = 0;
+ int last_number = 0;
+ int file_data_number = 0;
+ bool data_error;
+ initKernelEnv();
+
+ fp=openFile(Calibrate_Name,O_RDONLY,0);
+
+ //printk(KERN_ALERT"fp= %x ----",fp);
+ if (fp!= NULL)
+ {
+ memset(buf,0,1024);
+ if ((ret=readFile(fp,buf,1024))>0) //get file data
+ printk(KERN_ALERT"buf:%s\n",buf);
+ else
+ printk(KERN_ALERT"read file error %d\n",ret);
+ closeFile(fp);
+ set_fs(oldfs);
+
+ for( i = 0 ; i < 1024 ; i++ )
+ {
+ if( buf[i]==',' ) //Calibrate_data[10]
+ {
+ for( k = last_number ; k < i ; k++)
+ {
+ if( k == last_number )
+ Calibrate_data[file_data_number] = buf[k] -48 ;
+ else
+ Calibrate_data[file_data_number] = Calibrate_data[file_data_number]*10 + buf[k] - 48;
+ }
+ last_number=i+1;
+ file_data_number++;
+ }
+ }
+ data_error = false;
+
+ for( i = 0 ; i < 10 ; i++) //確ċšċ—ċˆ°ĉ•¸ċ€ĵ不ċ¤§ĉ–ĵ èž˘ċı•è§£ĉžċşĤ
+ {
+ if ( (i % 2) == 0 )
+ {
+ if( Calibrate_data[i] > Calibrate_data[8])
+ data_error = true;
+ }
+ else if( (i % 2) == 1 )
+ {
+ if( Calibrate_data[i] > Calibrate_data[9])
+ data_error = true;
+ }
+ printk(KERN_INFO "%d\n",Calibrate_data[i] );
+ }
+ if( data_error == false ) //ĉ²’éŒŻèޤç™ĵ生ċ­˜ċ…ĉ Ħĉ­£ĉ•¸ċ€ĵ
+ {
+ TOUCH_MAX_X = Calibrate_data[4] * (MAX_12BIT + 1) /Calibrate_data[8];
+ TOUCH_MAX_Y = Calibrate_data[7] * (MAX_12BIT + 1) /Calibrate_data[9];
+ TOUCH_MIN_X = Calibrate_data[2] * (MAX_12BIT + 1) /Calibrate_data[8];
+ TOUCH_MIN_Y = Calibrate_data[1] * (MAX_12BIT + 1) /Calibrate_data[9];
+ MUL_X_PARAMETER = MAX_12BIT * 1000 / (TOUCH_MAX_X - TOUCH_MIN_X);
+ DIV_X_PARAMETER = 1000;
+ MUL_Y_PARAMETER = MAX_12BIT * 1000 / (TOUCH_MAX_Y - TOUCH_MIN_Y);
+ DIV_Y_PARAMETER = 1000;
+ Calibrate_run = false;
+ }
+ else
+ Calibrate_run = true;
+ }
+ else
+ {
+ //printk(KERN_INFO "=no file=");
+ // If no use of parrotVTK wich recalibrate, use TPCalibration_mode_0.apk
+ // to create /data/data/com.TPCalibration/CalibrateData.txt
+ Calibrate_run = true;
+ }
+}
+// read and check parameter file, end
+//#################################################################################################
+
+struct uc6511_point {
+ char count;
+ char number;
+ u16 x;
+ u16 y;
+};
+
+struct uc6511 {
+ bus_device *bus;
+ struct input_dev *input;
+ struct work_struct work;
+ struct timer_list timer;
+
+ struct mutex mutex;
+ unsigned disabled:1; /* P: mutex */
+
+ char phys[32];
+};
+
+static void uc6511_work(struct work_struct *work)
+{
+ struct uc6511_point pt;
+ char point_count;
+ char point_no;
+ int x, y;
+ int ret;
+
+ struct uc6511 *ts = container_of(work, struct uc6511, work);
+ struct input_dev *input_dev = ts->input;
+
+ ret = i2c_master_recv(ts->bus, (char *)&pt, 6);
+ if (ret < 6) {
+ printk(KERN_WARNING "uc6511 antec touchscreen can't read i2c values\n");
+ return;
+ }
+ point_count = pt.count;
+ point_no = pt.number;
+
+ // calculate x/y sampling values
+ //x = MAX_12BIT - swab16(pt.y);
+ //y = swab16(pt.x);
+ x = MAX_12BIT - swab16(pt.x);
+ y = MAX_12BIT - swab16(pt.y);
+
+//#######################################################
+//calibration of x,y coordinates, begin
+ if (init_read ==false)
+ {
+ readfile();
+ //init_read=true;
+ }
+
+// added for proc test 2011092
+ if( APK_CHECK == 0x51)
+ {
+ Calibrate_run=true ;
+ APK_CHECK = 0x00;
+ // printk(KERN_INFO " calibrate_run = true\n");
+ }
+ else if( APK_CHECK == 0x52)
+ {
+ Calibrate_run= false;
+ APK_CHECK = 0x00;
+ // printk(KERN_INFO " calibrate_run = false\n");
+ readfile();
+ }
+// mask for proc test 20110929
+
+ if ( Calibrate_run == false )
+ {
+ // printk(KERN_INFO " calibrate_run = false\n");
+ int TmpX = 0, TmpY = 0, TmpX2 = 0, TmpY2 = 0;
+ int px = 0, py =0;
+
+ px=x;
+ py=y;
+ TmpX2 = (px >= TOUCH_MAX_X) ? (TOUCH_MAX_X) : px;
+ TmpY2 = (py >= TOUCH_MAX_Y) ? (TOUCH_MAX_Y) : py;
+
+ TmpX = (TmpX2 < TOUCH_MIN_X) ? (TOUCH_MIN_X) : TmpX2;
+ TmpY = (TmpY2 < TOUCH_MIN_Y) ? (TOUCH_MIN_Y) : TmpY2;
+
+ TmpX -= TOUCH_MIN_X;
+ TmpY -= TOUCH_MIN_Y;
+
+ TmpX = (TmpX) ? TmpX : 0;
+ TmpY = (TmpY) ? TmpY : 0;
+ px = TmpX * MUL_X_PARAMETER / DIV_X_PARAMETER;
+ py = TmpY * MUL_Y_PARAMETER / DIV_Y_PARAMETER;
+
+ if(px>4095) px=4095; //add by falcon, 2011/1/24, last_yĉœ‰ċ‡şçèĥ…過4095的ĉ•¸ċ€ĵ, ċŠ ä¸Šé€™ċ…İèĦŒä†é é˜².
+ if(py>4095) py=4095;
+
+ x = px; //For TopTouch PDC2 TP
+ y = py; //For TopTouch PDC2 TP
+ }
+//calibration of x,y coordinates, end
+//#######################################################
+ //printk(KERN_ERR "%d %d\n", x, y);
+
+ fingers_x[point_no - 1] = x;
+ fingers_y[point_no - 1] = y;
+
+ //printk(KERN_INFO "%s_work %d / %d-------%d ----%d +\n", I2C_DEVICE_NAME, point_count, point_no,x,y);
+#if PRINT_XY
+ //printk(KERN_INFO "%s_work %d / %d +\n", I2C_DEVICE_NAME, point_count, point_no);
+#endif
+ //report_count = report_count + 1;
+ if (point_count == point_no) {
+
+#if PRINT_XY
+ // printk(KERN_INFO "input\n");
+ // printk(KERN_INFO "%d / %d", point_count, point_no);
+#endif
+ for (point_no = 0; point_no < point_count; point_no ++) {
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 128);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, fingers_x[(unsigned int)point_no]);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, fingers_y[(unsigned int)point_no]);
+ input_mt_sync(input_dev);
+#if PRINT_XY
+ // printk(KERN_INFO "(%d, %d)", fingers_x[point_no], fingers_y[point_no]);
+#endif
+ }
+ input_sync(input_dev);
+ }
+ else if (point_count == 0x80) {
+ //printk(KERN_INFO "report_count= %d", report_count);
+ //report_count = 0;
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_mt_sync(input_dev);
+ input_sync(input_dev);
+ }
+}
+
+static irqreturn_t uc6511_irq(int irq, void *handle)
+{
+ struct uc6511 *ts = handle;
+
+ /* The repeated conversion sequencer controlled by TMR kicked off too fast.
+ * We ignore the last and process the sample sequence currently in the queue.
+ * It can't be older than 9.4ms
+ */
+
+ if (!work_pending(&ts->work))
+ schedule_work(&ts->work);
+
+ return IRQ_HANDLED;
+}
+
+static void uc6511_disable(struct uc6511 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (!ts->disabled) {
+
+ ts->disabled = 1;
+ disable_irq(ts->bus->irq);
+
+ cancel_work_sync(&ts->work);
+ }
+
+ mutex_unlock(&ts->mutex);
+}
+
+static void uc6511_enable(struct uc6511 *ts)
+{
+ mutex_lock(&ts->mutex);
+
+ if (ts->disabled) {
+ ts->disabled = 0;
+ enable_irq(ts->bus->irq);
+ }
+
+ mutex_unlock(&ts->mutex);
+}
+
+static ssize_t uc6511_pen_down_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR(pen_down, 0644, uc6511_pen_down_show, NULL);
+
+static ssize_t uc6511_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uc6511 *ts = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ts->disabled);
+}
+
+static ssize_t uc6511_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uc6511 *ts = dev_get_drvdata(dev);
+ unsigned long val;
+ int error;
+
+ error = strict_strtoul(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val)
+ uc6511_disable(ts);
+ else
+ uc6511_enable(ts);
+
+ return count;
+}
+
+static DEVICE_ATTR(disable, 0664, uc6511_disable_show, uc6511_disable_store);
+
+static struct attribute *uc6511_attributes[] = {
+ &dev_attr_disable.attr,
+ &dev_attr_pen_down.attr,
+ NULL
+};
+
+static const struct attribute_group uc6511_attr_group = {
+ .attrs = uc6511_attributes,
+};
+
+static int __devinit uc6511_construct(bus_device *bus, struct uc6511 *ts)
+{
+ struct input_dev *input_dev;
+//- struct uc6511_platform_data *pdata = bus->dev.platform_data;
+ int err;
+ u16 revid=0;
+ printk(KERN_ERR "uc6511_construct\n");
+ printk(KERN_INFO "%s_construct +\n",I2C_DEVICE_NAME);
+ if (!bus->irq) {
+ dev_err(&bus->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+//-- if (!pdata) {
+//--- dev_err(&bus->dev, "no platform data?\n");
+//---- return -ENODEV;
+//----- }
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return -ENOMEM;
+
+ ts->input = input_dev;
+
+ INIT_WORK(&ts->work, uc6511_work);
+ mutex_init(&ts->mutex);
+
+ snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev));
+
+ input_dev->name = "UC6511 Touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->dev.parent = &bus->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+/* __set_bit(ABS_X, input_dev->absbit);
+ __set_bit(ABS_Y, input_dev->absbit);
+ __set_bit(ABS_PRESSURE, input_dev->absbit);
+ __set_bit(ABS_MT_POSITION_X, input_dev->absbit);
+ __set_bit(ABS_MT_POSITION_Y, input_dev->absbit);
+ __set_bit(ABS_MT_TOUCH_MAJOR, input_dev->absbit);*/
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ /*err = uc6511_write(bus, AD7879_REG_CTRL2, AD7879_RESET);
+ if (err < 0) {
+ dev_err(&bus->dev, "Failed to write %s\n", input_dev->name);
+ goto err_free_mem;
+ }*/
+
+ err = request_irq(bus->irq, uc6511_irq,
+ IRQF_TRIGGER_FALLING, bus->dev.driver->name, ts);
+
+ if (err) {
+ dev_err(&bus->dev, "irq %d busy?\n", bus->irq);
+ goto err_free_mem;
+ }
+
+ err = sysfs_create_group(&bus->dev.kobj, &uc6511_attr_group);
+ if (err)
+ goto err_free_irq;
+
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_remove_attr;
+
+ dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n",
+ revid >> 8, bus->irq);
+
+ printk(KERN_INFO "%s_construct -\n",I2C_DEVICE_NAME);
+ return 0;
+
+err_remove_attr:
+ sysfs_remove_group(&bus->dev.kobj, &uc6511_attr_group);
+err_free_irq:
+ free_irq(bus->irq, ts);
+err_free_mem:
+ input_free_device(input_dev);
+
+ printk(KERN_ERR "%s_construct error\n",I2C_DEVICE_NAME);
+ return err;
+}
+
+static int __devexit uc6511_destroy(bus_device *bus, struct uc6511 *ts)
+{
+ uc6511_disable(ts);
+ sysfs_remove_group(&ts->bus->dev.kobj, &uc6511_attr_group);
+ free_irq(ts->bus->irq, ts);
+ input_unregister_device(ts->input);
+ dev_dbg(&bus->dev, "unregistered touchscreen\n");
+
+ return 0;
+}
+
+
+static int uc6511_suspend(bus_device *bus, pm_message_t message)
+{
+ struct uc6511 *ts = dev_get_drvdata(&bus->dev);
+ unsigned char buf[2];
+ printk("i2cTP:write-----------------suspend \n");
+ buf[0]=0x07;
+ buf[1]=0x00;
+
+ i2c_master_send(ts->bus, buf, 2);
+ uc6511_disable(ts);
+ return 0;
+}
+
+static int uc6511_resume(bus_device *bus)
+{
+ struct uc6511 *ts = dev_get_drvdata(&bus->dev);
+ unsigned char buf[2];
+ uc6511_enable(ts);
+ printk("i2cTP:write------------------resume \n");
+ buf[0]=0x07;
+ buf[1]=0x01;
+
+ i2c_master_send(ts->bus, buf, 2);
+ i2c_master_send(ts->bus, buf, 2);
+
+ return 0;
+}
+
+//#######################################################
+//*****added for proc used 20110929
+struct i2c_board_info info;
+struct i2c_adapter *adapter;
+struct i2c_client *client2;
+
+int proc_read_touch(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ //struct uc6511 *ts = dev_get_drvdata(&client2->dev);
+ char z[]="ULTRACHHIP I2C Version is 3.0 ";
+ int len;
+ //unsigned char buf[2];
+ printk("i2cTP:write------------------ \n");
+ //buf[0]=0x07;
+ //buf[1]=0x50;
+ //i2c_master_send(ts->bus, buf, 2);
+ //msleep(15);
+
+ //int ret;
+ //unsigned char data2[6];
+ //ret = i2c_master_recv(ts->bus, data2, 6);
+ //printk("\ndata= %d %d %d %d %d %d\n",data2[0],data2[1],data2[2],data2[3],data2[4],data2[5]);
+ printk("\n==%s==222\n",z);
+ strcpy(Touch_Version_buffer,z);
+
+ //printk(" read Touch data \n");
+ len = sprintf(page, Touch_Version_buffer); //ĉŠŠglobal_buffer的ċ†…ċıĉ˜ç¤şçğ™èżé—者
+ return len;
+}
+#include <asm/uaccess.h>
+
+int proc_write_touch(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ int len;
+ int ret;
+ char global_buffer[1024];
+ if (count >= STRINGLEN)
+ len = STRINGLEN-1;
+ else
+ len = count;
+ ret = copy_from_user(global_buffer, buffer, len);
+ //copy_from_userċ‡½ĉ•°ċ°†ĉ•°ĉäğŽç”¨ĉˆ·çİşé—´ĉ‹·è´ċˆ°ċ†…ĉ ¸çİşé—´ïĵŒĉ­¤ċ¤„ċ°†ç”¨ĉˆ·ċ†™ċ…çš„ĉ•°ĉċ­˜ċ…global_buffer
+ global_buffer[len] = '\0';
+
+ if (global_buffer[0]=='A' && global_buffer[1]=='A') {
+ APK_CHECK = 0x51;
+ }
+ else if (global_buffer[0]=='B' && global_buffer[1]=='B') {
+ APK_CHECK = 0x52;
+ }
+ else {
+ APK_CHECK = 0x00;
+ }
+ printk("write count : %d",(int)count);
+
+ return len;
+}
+//*****added for proc used 20110929
+//#######################################################
+
+/* All registers are word-sized.
+ * AD7879 uses a high-byte first convention.
+ */
+
+static int __devinit uc6511_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct uc6511 *ts;
+ int error;
+
+ ts = kzalloc(sizeof(struct uc6511), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ts);
+ ts->bus = client;
+
+ error = uc6511_construct(client, ts);
+ if (error) {
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ }
+
+//#######################################################
+ //*****added for proc used 20110929
+ touch_file = create_proc_entry("TouchData", 0666, NULL);
+ touch_file->read_proc = proc_read_touch;
+ touch_file->write_proc = proc_write_touch;
+ //*****added for proc used 20110929
+//#######################################################
+
+ return error;
+}
+
+static int __devexit uc6511_remove(struct i2c_client *client)
+{
+ struct uc6511 *ts = dev_get_drvdata(&client->dev);
+
+ uc6511_destroy(client, ts);
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ return 0;
+}
+
+static const struct i2c_device_id uc6511_id[] = {
+ { I2C_DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, uc6511_id);
+
+static struct i2c_driver uc6511_driver = {
+ .driver = {
+ .name = I2C_DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = uc6511_id,
+ .probe = uc6511_probe,
+ .remove = __devexit_p(uc6511_remove),
+ .suspend = uc6511_suspend,
+ .resume = uc6511_resume,
+};
+
+static int __init uc6511_init(void)
+{
+ printk(KERN_INFO "%s_init +\n",I2C_DEVICE_NAME);
+//#######################################################
+//Check Parameter file if exist or not, begin
+ readfile();
+//Check Parameter file if exist or not, end
+//#######################################################
+/*
+ struct i2c_board_info info;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ printk(KERN_ERR "initialize struct\n");
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ info.addr = I2C_DEVICE_ADDRESS;
+ info.irq = gpio_to_irq(OMAP3_DEVKIT_TS_GPIO);
+ printk(KERN_ERR "irq = %d, copy type\n", info.irq);
+ strlcpy(info.type, I2C_DEVICE_NAME, I2C_NAME_SIZE);
+
+ printk(KERN_ERR "get adapter %d\n", I2C_ADAPTER_NUMBER);
+ adapter = i2c_get_adapter(I2C_ADAPTER_NUMBER);
+ if (!adapter) {
+ printk("*******get_adapter error!\n");
+ }
+ client = i2c_new_device(adapter, &info);
+*/
+//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ printk(KERN_ERR "add driver\n");
+
+ return i2c_add_driver(&uc6511_driver);
+}
+module_init(uc6511_init);
+
+static void __exit uc6511_exit(void)
+{
+ i2c_del_driver(&uc6511_driver);
+ printk(KERN_INFO "%s_exit +\n",I2C_DEVICE_NAME);
+}
+module_exit(uc6511_exit);
+
+MODULE_AUTHOR("Larry Yang <larry.yang@ultrachip.com>");
+MODULE_DESCRIPTION("uc6511(-1) touchscreen Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:uc6511");
diff --git a/drivers/parrot/input/touchscreen/atmel_mxt_ts.c b/drivers/parrot/input/touchscreen/atmel_mxt_ts.c
new file mode 100644
index 00000000000000..0b6cadda508a5a
--- /dev/null
+++ b/drivers/parrot/input/touchscreen/atmel_mxt_ts.c
@@ -0,0 +1,3373 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Copyright (C) 2011-2012 Atmel Corporation
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include "atmel_mxt_ts.h"
+#include <linux/input/mt.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include "../../i2c/ti_ub925_lvds.h"
+#include "../../../media/dvb/dvb-usb/dib0700_irq.h"
+
+/* Configuration file */
+#define MXT_CFG_MAGIC "OBP_RAW V1"
+
+/* Registers */
+#define MXT_OBJECT_START 0x07
+#define MXT_OBJECT_SIZE 6
+#define MXT_INFO_CHECKSUM_SIZE 3
+#define MXT_MAX_BLOCK_WRITE 256
+
+/* Object types */
+#define MXT_DEBUG_DIAGNOSTIC_T37 37
+#define MXT_GEN_MESSAGE_T5 5
+#define MXT_GEN_COMMAND_T6 6
+#define MXT_GEN_POWER_T7 7
+#define MXT_GEN_ACQUIRE_T8 8
+#define MXT_GEN_DATASOURCE_T53 53
+#define MXT_TOUCH_MULTI_T9 9
+#define MXT_TOUCH_KEYARRAY_T15 15
+#define MXT_TOUCH_PROXIMITY_T23 23
+#define MXT_TOUCH_PROXKEY_T52 52
+#define MXT_PROCI_GRIPFACE_T20 20
+#define MXT_PROCG_NOISE_T22 22
+#define MXT_PROCI_ONETOUCH_T24 24
+#define MXT_PROCI_TWOTOUCH_T27 27
+#define MXT_PROCI_GRIP_T40 40
+#define MXT_PROCI_PALM_T41 41
+#define MXT_PROCI_TOUCHSUPPRESSION_T42 42
+#define MXT_PROCI_STYLUS_T47 47
+#define MXT_PROCG_NOISESUPPRESSION_T48 48
+#define MXT_SPT_COMMSCONFIG_T18 18
+#define MXT_SPT_GPIOPWM_T19 19
+#define MXT_SPT_SELFTEST_T25 25
+#define MXT_SPT_CTECONFIG_T28 28
+#define MXT_SPT_USERDATA_T38 38
+#define MXT_SPT_DIGITIZER_T43 43
+#define MXT_SPT_MESSAGECOUNT_T44 44
+#define MXT_SPT_CTECONFIG_T46 46
+#define MXT_PROCI_ACTIVE_STYLUS_T63 63
+#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100
+
+/* MXT_GEN_MESSAGE_T5 object */
+#define MXT_RPTID_NOMSG 0xff
+
+/* MXT_GEN_COMMAND_T6 field */
+#define MXT_COMMAND_RESET 0
+#define MXT_COMMAND_BACKUPNV 1
+#define MXT_COMMAND_CALIBRATE 2
+#define MXT_COMMAND_REPORTALL 3
+#define MXT_COMMAND_DIAGNOSTIC 5
+
+/* Define for T6 status byte */
+#define MXT_T6_STATUS_RESET (1 << 7)
+#define MXT_T6_STATUS_OFL (1 << 6)
+#define MXT_T6_STATUS_SIGERR (1 << 5)
+#define MXT_T6_STATUS_CAL (1 << 4)
+#define MXT_T6_STATUS_CFGERR (1 << 3)
+#define MXT_T6_STATUS_COMSERR (1 << 2)
+
+/* MXT_GEN_POWER_T7 field */
+struct t7_config {
+ u8 idle;
+ u8 active;
+} __packed;
+
+#define MXT_POWER_CFG_RUN 0
+#define MXT_POWER_CFG_DEEPSLEEP 1
+
+/* MXT_TOUCH_MULTI_T9 field */
+#define MXT_T9_ORIENT 9
+#define MXT_T9_RANGE 18
+
+/* MXT_TOUCH_MULTI_T9 status */
+#define MXT_T9_UNGRIP (1 << 0)
+#define MXT_T9_SUPPRESS (1 << 1)
+#define MXT_T9_AMP (1 << 2)
+#define MXT_T9_VECTOR (1 << 3)
+#define MXT_T9_MOVE (1 << 4)
+#define MXT_T9_RELEASE (1 << 5)
+#define MXT_T9_PRESS (1 << 6)
+#define MXT_T9_DETECT (1 << 7)
+
+struct t9_range {
+ u16 x;
+ u16 y;
+} __packed;
+
+/* MXT_TOUCH_MULTI_T9 orient */
+#define MXT_T9_ORIENT_SWITCH (1 << 0)
+
+/* MXT_SPT_COMMSCONFIG_T18 */
+#define MXT_COMMS_CTRL 0
+#define MXT_COMMS_CMD 1
+#define MXT_COMMS_RETRIGEN (1 << 6)
+
+/* Define for MXT_GEN_COMMAND_T6 */
+#define MXT_BOOT_VALUE 0xa5
+#define MXT_RESET_VALUE 0x01
+#define MXT_BACKUP_VALUE 0x55
+
+/* Define for MXT_PROCI_TOUCHSUPPRESSION_T42 */
+#define MXT_T42_MSG_TCHSUP (1 << 0)
+
+/* T47 Stylus */
+#define MXT_TOUCH_MAJOR_T47_STYLUS 1
+
+/* T63 Stylus */
+#define MXT_T63_STYLUS_PRESS (1 << 0)
+#define MXT_T63_STYLUS_RELEASE (1 << 1)
+#define MXT_T63_STYLUS_MOVE (1 << 2)
+#define MXT_T63_STYLUS_SUPPRESS (1 << 3)
+
+#define MXT_T63_STYLUS_DETECT (1 << 4)
+#define MXT_T63_STYLUS_TIP (1 << 5)
+#define MXT_T63_STYLUS_ERASER (1 << 6)
+#define MXT_T63_STYLUS_BARREL (1 << 7)
+
+#define MXT_T63_STYLUS_PRESSURE_MASK 0x3F
+
+/* T100 Multiple Touch Touchscreen */
+#define MXT_T100_CTRL 0
+#define MXT_T100_CFG1 1
+#define MXT_T100_TCHAUX 3
+#define MXT_T100_XRANGE 13
+#define MXT_T100_YRANGE 24
+
+#define MXT_T100_CFG_SWITCHXY (1 << 5)
+
+#define MXT_T100_TCHAUX_VECT (1 << 0)
+#define MXT_T100_TCHAUX_AMPL (1 << 1)
+#define MXT_T100_TCHAUX_AREA (1 << 2)
+
+#define MXT_T100_DETECT (1 << 7)
+#define MXT_T100_TYPE_MASK 0x70
+#define MXT_T100_TYPE_STYLUS 0x20
+
+/* Delay times */
+#define MXT_BACKUP_TIME 50 /* msec */
+#define MXT_RESET_TIME 200 /* msec */
+#define MXT_RESET_TIMEOUT 3000 /* msec */
+#define MXT_CRC_TIMEOUT 1000 /* msec */
+#define MXT_FW_RESET_TIME 3000 /* msec */
+#define MXT_FW_CHG_TIMEOUT 300 /* msec */
+#define MXT_WAKEUP_TIME 25 /* msec */
+#define MXT_REGULATOR_DELAY 150 /* msec */
+#define MXT_POWERON_DELAY 150 /* msec */
+
+/* Command to unlock bootloader */
+#define MXT_UNLOCK_CMD_MSB 0xaa
+#define MXT_UNLOCK_CMD_LSB 0xdc
+
+/* Bootloader mode status */
+#define MXT_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */
+#define MXT_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */
+#define MXT_FRAME_CRC_CHECK 0x02
+#define MXT_FRAME_CRC_FAIL 0x03
+#define MXT_FRAME_CRC_PASS 0x04
+#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
+#define MXT_BOOT_STATUS_MASK 0x3f
+#define MXT_BOOT_EXTENDED_ID (1 << 5)
+#define MXT_BOOT_ID_MASK 0x1f
+
+/* Touchscreen absolute values */
+#define MXT_MAX_AREA 0xff
+
+#define MXT_PIXELS_PER_MM 20
+
+#define DEBUG_MSG_MAX 200
+
+static int mxt_force_gingerbread_mode = 0;
+module_param(mxt_force_gingerbread_mode, int, 0604);
+MODULE_PARM_DESC(mxt_force_gingerbread_mode, "Workaround for Android 2.3.7");
+
+struct mxt_info {
+ u8 family_id;
+ u8 variant_id;
+ u8 version;
+ u8 build;
+ u8 matrix_xsize;
+ u8 matrix_ysize;
+ u8 object_num;
+};
+
+struct mxt_object {
+ u8 type;
+ u16 start_address;
+ u8 size_minus_one;
+ u8 instances_minus_one;
+ u8 num_report_ids;
+} __packed;
+
+/* Each client has this additional data */
+struct mxt_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ char phys[64]; /* device physical location */
+ struct mxt_platform_data *pdata;
+ struct mxt_object *object_table;
+ struct mxt_info *info;
+ void *raw_info_block;
+ unsigned int irq;
+ unsigned int max_x;
+ unsigned int max_y;
+ bool in_bootloader;
+ u16 mem_size;
+ u8 t100_aux_ampl;
+ u8 t100_aux_area;
+ u8 t100_aux_vect;
+ struct bin_attribute mem_access_attr;
+ bool debug_enabled;
+ bool debug_v2_enabled;
+ u8 *debug_msg_data;
+ u16 debug_msg_count;
+ struct bin_attribute debug_msg_attr;
+ struct mutex debug_msg_lock;
+ u8 max_reportid;
+ u32 config_crc;
+ u32 info_crc;
+ u8 bootloader_addr;
+ struct t7_config t7_cfg;
+ u8 *msg_buf;
+ u8 t6_status;
+ bool update_input;
+ u8 last_message_count;
+ u8 num_touchids;
+ u8 num_stylusids;
+ unsigned long t15_keystatus;
+ bool use_retrigen_workaround;
+ bool use_regulator;
+ struct regulator *reg_vdd;
+ struct regulator *reg_avdd;
+ char *fw_name;
+ char *cfg_name;
+
+ /* Cached parameters from object table */
+ u16 T5_address;
+ u8 T5_msg_size;
+ u8 T6_reportid;
+ u16 T6_address;
+ u16 T7_address;
+ u8 T9_reportid_min;
+ u8 T9_reportid_max;
+ u8 T15_reportid_min;
+ u8 T15_reportid_max;
+ u16 T18_address;
+ u8 T19_reportid;
+ u8 T42_reportid_min;
+ u8 T42_reportid_max;
+ u16 T44_address;
+ u8 T48_reportid;
+ u8 T63_reportid_min;
+ u8 T63_reportid_max;
+ u8 T100_reportid_min;
+ u8 T100_reportid_max;
+
+ /* for fw update in bootloader */
+ struct completion bl_completion;
+
+ /* for reset handling */
+ struct completion reset_completion;
+
+ /* for reset handling */
+ struct completion crc_completion;
+
+ /* Enable reporting of input events */
+ bool enable_reporting;
+
+ /* Indicates whether device is in suspend */
+ bool suspended;
+};
+
+static inline size_t mxt_obj_size(const struct mxt_object *obj)
+{
+ return obj->size_minus_one + 1;
+}
+
+static inline size_t mxt_obj_instances(const struct mxt_object *obj)
+{
+ return obj->instances_minus_one + 1;
+}
+
+static bool mxt_object_readable(unsigned int type)
+{
+ switch (type) {
+ case MXT_GEN_COMMAND_T6:
+ case MXT_GEN_POWER_T7:
+ case MXT_GEN_ACQUIRE_T8:
+ case MXT_GEN_DATASOURCE_T53:
+ case MXT_TOUCH_MULTI_T9:
+ case MXT_TOUCH_KEYARRAY_T15:
+ case MXT_TOUCH_PROXIMITY_T23:
+ case MXT_TOUCH_PROXKEY_T52:
+ case MXT_PROCI_GRIPFACE_T20:
+ case MXT_PROCG_NOISE_T22:
+ case MXT_PROCI_ONETOUCH_T24:
+ case MXT_PROCI_TWOTOUCH_T27:
+ case MXT_PROCI_GRIP_T40:
+ case MXT_PROCI_PALM_T41:
+ case MXT_PROCI_TOUCHSUPPRESSION_T42:
+ case MXT_PROCI_STYLUS_T47:
+ case MXT_PROCG_NOISESUPPRESSION_T48:
+ case MXT_SPT_COMMSCONFIG_T18:
+ case MXT_SPT_GPIOPWM_T19:
+ case MXT_SPT_SELFTEST_T25:
+ case MXT_SPT_CTECONFIG_T28:
+ case MXT_SPT_USERDATA_T38:
+ case MXT_SPT_DIGITIZER_T43:
+ case MXT_SPT_CTECONFIG_T46:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mxt_dump_message(struct mxt_data *data, u8 *message)
+{
+ print_hex_dump(KERN_DEBUG, "MXT MSG:", DUMP_PREFIX_NONE, 16, 1,
+ message, data->T5_msg_size, false);
+}
+
+static void mxt_debug_msg_enable(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+
+ if (data->debug_v2_enabled)
+ return;
+
+ mutex_lock(&data->debug_msg_lock);
+
+ data->debug_msg_data = kcalloc(DEBUG_MSG_MAX,
+ data->T5_msg_size, GFP_KERNEL);
+ if (!data->debug_msg_data) {
+ dev_err(&data->client->dev, "Failed to allocate buffer\n");
+ return;
+ }
+
+ data->debug_v2_enabled = true;
+ mutex_unlock(&data->debug_msg_lock);
+
+ dev_info(dev, "Enabled message output\n");
+}
+
+static void mxt_debug_msg_disable(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+
+ if (!data->debug_v2_enabled)
+ return;
+
+ dev_info(dev, "disabling message output\n");
+ data->debug_v2_enabled = false;
+
+ mutex_lock(&data->debug_msg_lock);
+ kfree(data->debug_msg_data);
+ data->debug_msg_data = NULL;
+ data->debug_msg_count = 0;
+ mutex_unlock(&data->debug_msg_lock);
+ dev_info(dev, "Disabled message output\n");
+}
+
+static void mxt_debug_msg_add(struct mxt_data *data, u8 *msg)
+{
+ struct device *dev = &data->client->dev;
+
+ mutex_lock(&data->debug_msg_lock);
+
+ if (!data->debug_msg_data) {
+ dev_err(dev, "No buffer!\n");
+ return;
+ }
+
+ if (data->debug_msg_count < DEBUG_MSG_MAX) {
+ memcpy(data->debug_msg_data + data->debug_msg_count * data->T5_msg_size,
+ msg,
+ data->T5_msg_size);
+ data->debug_msg_count++;
+ } else {
+ dev_dbg(dev, "Discarding %u messages\n", data->debug_msg_count);
+ data->debug_msg_count = 0;
+ }
+
+ mutex_unlock(&data->debug_msg_lock);
+
+ sysfs_notify(&data->client->dev.kobj, NULL, "debug_notify");
+}
+
+static ssize_t mxt_debug_msg_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
+{
+ return -EIO;
+}
+
+static ssize_t mxt_debug_msg_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t bytes)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int count;
+ size_t bytes_read;
+
+ if (!data->debug_msg_data) {
+ dev_err(dev, "No buffer!\n");
+ return 0;
+ }
+
+ count = bytes / data->T5_msg_size;
+
+ if (count > DEBUG_MSG_MAX)
+ count = DEBUG_MSG_MAX;
+
+ mutex_lock(&data->debug_msg_lock);
+
+ if (count > data->debug_msg_count)
+ count = data->debug_msg_count;
+
+ bytes_read = count * data->T5_msg_size;
+
+ memcpy(buf, data->debug_msg_data, bytes_read);
+ data->debug_msg_count = 0;
+
+ mutex_unlock(&data->debug_msg_lock);
+
+ return bytes_read;
+}
+
+static int mxt_debug_msg_init(struct mxt_data *data)
+{
+ sysfs_bin_attr_init(&data->debug_msg_attr);
+ data->debug_msg_attr.attr.name = "debug_msg";
+ data->debug_msg_attr.attr.mode = 0660;
+ data->debug_msg_attr.read = mxt_debug_msg_read;
+ data->debug_msg_attr.write = mxt_debug_msg_write;
+ data->debug_msg_attr.size = data->T5_msg_size * DEBUG_MSG_MAX;
+
+ if (sysfs_create_bin_file(&data->client->dev.kobj,
+ &data->debug_msg_attr) < 0) {
+ dev_err(&data->client->dev, "Failed to create %s\n",
+ data->debug_msg_attr.attr.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mxt_debug_msg_remove(struct mxt_data *data)
+{
+ if (data->debug_msg_attr.attr.name)
+ sysfs_remove_bin_file(&data->client->dev.kobj,
+ &data->debug_msg_attr);
+}
+
+static int mxt_wait_for_completion(struct mxt_data *data,
+ struct completion *comp, unsigned int timeout_ms)
+{
+ struct device *dev = &data->client->dev;
+ unsigned long timeout = msecs_to_jiffies(timeout_ms);
+ long ret;
+
+ ret = wait_for_completion_interruptible_timeout(comp, timeout);
+ if (ret < 0) {
+ dev_err(dev, "Wait for completion interrupted.\n");
+ return -EINTR;
+ } else if (ret == 0) {
+ dev_err(dev, "Wait for completion timed out.\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int mxt_bootloader_read(struct mxt_data *data,
+ u8 *val, unsigned int count)
+{
+ int ret;
+ struct i2c_msg msg;
+
+ msg.addr = data->bootloader_addr;
+ msg.flags = data->client->flags & I2C_M_TEN;
+ msg.flags |= I2C_M_RD;
+ msg.len = count;
+ msg.buf = val;
+
+ ret = i2c_transfer(data->client->adapter, &msg, 1);
+
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ ret = (ret < 0) ? ret : -EIO;
+ dev_err(&data->client->dev, "%s: i2c recv failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int mxt_bootloader_write(struct mxt_data *data,
+ const u8 * const val, unsigned int count)
+{
+ int ret;
+ struct i2c_msg msg;
+
+ msg.addr = data->bootloader_addr;
+ msg.flags = data->client->flags & I2C_M_TEN;
+ msg.len = count;
+ msg.buf = (u8 *)val;
+
+ ret = i2c_transfer(data->client->adapter, &msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ ret = (ret < 0) ? ret : -EIO;
+ dev_err(&data->client->dev, "%s: i2c send failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
+{
+ u8 appmode = data->client->addr;
+ u8 bootloader;
+ u8 family_id = 0;
+
+ if (data->info)
+ family_id = data->info->family_id;
+
+ switch (appmode) {
+ case 0x4a:
+ case 0x4b:
+ /* Chips after 1664S use different scheme */
+ if (retry || family_id >= 0xa2) {
+ bootloader = appmode - 0x24;
+ break;
+ }
+ /* Fall through for normal case */
+ case 0x4c:
+ case 0x4d:
+ case 0x5a:
+ case 0x5b:
+ bootloader = appmode - 0x26;
+ break;
+ default:
+ dev_err(&data->client->dev,
+ "Appmode i2c address 0x%02x not found\n",
+ appmode);
+ return -EINVAL;
+ }
+
+ data->bootloader_addr = bootloader;
+ return 0;
+}
+
+static int mxt_probe_bootloader(struct mxt_data *data, bool retry)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ u8 val;
+ bool crc_failure;
+
+ ret = mxt_lookup_bootloader_address(data, retry);
+ if (ret)
+ return ret;
+
+ ret = mxt_bootloader_read(data, &val, 1);
+ if (ret)
+ return ret;
+
+ /* Check app crc fail mode */
+ crc_failure = (val & ~MXT_BOOT_STATUS_MASK) == MXT_APP_CRC_FAIL;
+
+ dev_err(dev, "Detected bootloader, status:%02X%s\n",
+ val, crc_failure ? ", APP_CRC_FAIL" : "");
+
+ return 0;
+}
+
+static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val)
+{
+ struct device *dev = &data->client->dev;
+ u8 buf[3];
+
+ if (val & MXT_BOOT_EXTENDED_ID) {
+ if (mxt_bootloader_read(data, &buf[0], 3) != 0) {
+ dev_err(dev, "%s: i2c failure\n", __func__);
+ return -EIO;
+ }
+
+ dev_info(dev, "Bootloader ID:%d Version:%d\n", buf[1], buf[2]);
+
+ return buf[0];
+ } else {
+ dev_info(dev, "Bootloader ID:%d\n", val & MXT_BOOT_ID_MASK);
+
+ return val;
+ }
+}
+
+static int mxt_check_bootloader(struct mxt_data *data, unsigned int state,
+ bool wait)
+{
+ struct device *dev = &data->client->dev;
+ u8 val;
+ int ret;
+
+recheck:
+ if (wait) {
+ /*
+ * In application update mode, the interrupt
+ * line signals state transitions. We must wait for the
+ * CHG assertion before reading the status byte.
+ * Once the status byte has been read, the line is deasserted.
+ */
+ ret = mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_FW_CHG_TIMEOUT);
+ if (ret) {
+ /*
+ * TODO: handle -EINTR better by terminating fw update
+ * process before returning to userspace by writing
+ * length 0x000 to device (iff we are in
+ * WAITING_FRAME_DATA state).
+ */
+ dev_err(dev, "Update wait error %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = mxt_bootloader_read(data, &val, 1);
+ if (ret)
+ return ret;
+
+ if (state == MXT_WAITING_BOOTLOAD_CMD)
+ val = mxt_get_bootloader_version(data, val);
+
+ switch (state) {
+ case MXT_WAITING_BOOTLOAD_CMD:
+ case MXT_WAITING_FRAME_DATA:
+ case MXT_APP_CRC_FAIL:
+ val &= ~MXT_BOOT_STATUS_MASK;
+ break;
+ case MXT_FRAME_CRC_PASS:
+ if (val == MXT_FRAME_CRC_CHECK) {
+ goto recheck;
+ } else if (val == MXT_FRAME_CRC_FAIL) {
+ dev_err(dev, "Bootloader CRC fail\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val != state) {
+ dev_err(dev, "Invalid bootloader state %02X != %02X\n",
+ val, state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxt_send_bootloader_cmd(struct mxt_data *data, bool unlock)
+{
+ int ret;
+ u8 buf[2];
+
+ if (unlock) {
+ buf[0] = MXT_UNLOCK_CMD_LSB;
+ buf[1] = MXT_UNLOCK_CMD_MSB;
+ } else {
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ }
+
+ ret = mxt_bootloader_write(data, buf, 2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __mxt_read_reg(struct i2c_client *client,
+ u16 reg, u16 len, void *val)
+{
+ struct i2c_msg xfer[2];
+ u8 buf[2];
+ int ret;
+ bool retry = false;
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 2;
+ xfer[0].buf = buf;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = len;
+ xfer[1].buf = val;
+
+retry_read:
+ ret = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (ret != ARRAY_SIZE(xfer)) {
+ if (!retry) {
+ dev_dbg(&client->dev, "%s: i2c retry\n", __func__);
+ msleep(MXT_WAKEUP_TIME);
+ retry = true;
+ goto retry_read;
+ } else {
+ dev_err(&client->dev, "%s: i2c transfer failed (%d)\n",
+ __func__, ret);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int __mxt_write_reg(struct i2c_client *client, u16 reg, u16 len,
+ const void *val)
+{
+ u8 *buf;
+ size_t count;
+ int ret;
+ bool retry = false;
+
+ count = len + 2;
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = reg & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ memcpy(&buf[2], val, len);
+
+retry_write:
+ ret = i2c_master_send(client, buf, count);
+ if (ret != count) {
+ if (!retry) {
+ dev_dbg(&client->dev, "%s: i2c retry\n", __func__);
+ msleep(MXT_WAKEUP_TIME);
+ retry = true;
+ goto retry_write;
+ } else {
+ dev_err(&client->dev, "%s: i2c send failed (%d)\n",
+ __func__, ret);
+ ret = -EIO;
+ }
+ } else {
+ ret = 0;
+ }
+
+ kfree(buf);
+ return ret;
+}
+
+static int mxt_write_reg(struct i2c_client *client, u16 reg, u8 val)
+{
+ return __mxt_write_reg(client, reg, 1, &val);
+}
+
+static struct mxt_object *
+mxt_get_object(struct mxt_data *data, u8 type)
+{
+ struct mxt_object *object;
+ int i;
+
+ for (i = 0; i < data->info->object_num; i++) {
+ object = data->object_table + i;
+ if (object->type == type)
+ return object;
+ }
+
+ dev_warn(&data->client->dev, "Invalid object type T%u\n", type);
+ return NULL;
+}
+
+static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
+{
+ struct device *dev = &data->client->dev;
+ u8 status = msg[1];
+ u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16);
+
+ if (crc != data->config_crc) {
+ data->config_crc = crc;
+ dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc);
+ complete(&data->crc_completion);
+ }
+
+ /* Detect transition out of reset */
+ if ((data->t6_status & MXT_T6_STATUS_RESET) &&
+ !(status & MXT_T6_STATUS_RESET))
+ complete(&data->reset_completion);
+
+ /* Output debug if status has changed */
+ if (status != data->t6_status)
+ dev_dbg(dev, "T6 Status 0x%02X%s%s%s%s%s%s%s\n",
+ status,
+ (status == 0) ? " OK" : "",
+ (status & MXT_T6_STATUS_RESET) ? " RESET" : "",
+ (status & MXT_T6_STATUS_OFL) ? " OFL" : "",
+ (status & MXT_T6_STATUS_SIGERR) ? " SIGERR" : "",
+ (status & MXT_T6_STATUS_CAL) ? " CAL" : "",
+ (status & MXT_T6_STATUS_CFGERR) ? " CFGERR" : "",
+ (status & MXT_T6_STATUS_COMSERR) ? " COMSERR" : "");
+
+ /* Save current status */
+ data->t6_status = status;
+}
+
+static void mxt_input_button(struct mxt_data *data, u8 *message)
+{
+ struct input_dev *input = data->input_dev;
+ const struct mxt_platform_data *pdata = data->pdata;
+ bool button;
+ int i;
+
+ /* do not report events if input device not yet registered */
+ if (!data->enable_reporting)
+ return;
+
+ /* Active-low switch */
+ for (i = 0; i < pdata->t19_num_keys; i++) {
+ if (pdata->t19_keymap[i] == KEY_RESERVED)
+ continue;
+ button = !(message[1] & (1 << i));
+ input_report_key(input, pdata->t19_keymap[i], button);
+ }
+}
+
+static void mxt_input_sync(struct input_dev *input_dev)
+{
+ input_mt_report_pointer_emulation(input_dev, false);
+ input_sync(input_dev);
+}
+
+static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input_dev = data->input_dev;
+ int id;
+ u8 status;
+ int x;
+ int y;
+ int area;
+ int amplitude;
+ u8 vector;
+ int tool;
+
+ /* do not report events if input device not yet registered */
+ if (!data->enable_reporting)
+ return;
+
+ id = message[0] - data->T9_reportid_min;
+ status = message[1];
+ x = (message[2] << 4) | ((message[4] >> 4) & 0xf);
+ y = (message[3] << 4) | ((message[4] & 0xf));
+
+ /* Handle 10/12 bit switching */
+ /* not defined. Allways 1023 default value */
+ if (!mxt_force_gingerbread_mode) {
+ if (data->max_x < 1024)
+ x >>= 2;
+ if (data->max_y < 1024)
+ y >>= 2;
+ }
+
+ area = message[5];
+
+ amplitude = message[6];
+ vector = message[7];
+
+ dev_dbg(dev,
+ "[%u] %c%c%c%c%c%c%c%c x: %5u y: %5u area: %3u amp: %3u vector: %02X\n",
+ id,
+ (status & MXT_T9_DETECT) ? 'D' : '.',
+ (status & MXT_T9_PRESS) ? 'P' : '.',
+ (status & MXT_T9_RELEASE) ? 'R' : '.',
+ (status & MXT_T9_MOVE) ? 'M' : '.',
+ (status & MXT_T9_VECTOR) ? 'V' : '.',
+ (status & MXT_T9_AMP) ? 'A' : '.',
+ (status & MXT_T9_SUPPRESS) ? 'S' : '.',
+ (status & MXT_T9_UNGRIP) ? 'U' : '.',
+ x, y, area, amplitude, vector);
+
+ input_mt_slot(input_dev, id);
+
+ if (status & MXT_T9_DETECT) {
+ /* Multiple bits may be set if the host is slow to read the
+ * status messages, indicating all the events that have
+ * happened */
+ if (status & MXT_T9_RELEASE) {
+ input_mt_report_slot_state(input_dev,
+ MT_TOOL_FINGER, 0);
+ mxt_input_sync(input_dev);
+ }
+
+ /* A reported size of zero indicates that the reported touch
+ * is a stylus from a linked Stylus T47 object. */
+ if (area == 0) {
+ area = MXT_TOUCH_MAJOR_T47_STYLUS;
+ tool = MT_TOOL_PEN;
+ } else {
+ tool = MT_TOOL_FINGER;
+ }
+
+ /* Touch active */
+ input_mt_report_slot_state(input_dev, tool, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, amplitude);
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
+ input_report_abs(input_dev, ABS_MT_ORIENTATION, vector);
+ } else {
+ /* Touch no longer active, close out slot */
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+ if (mxt_force_gingerbread_mode)
+ mxt_input_sync(input_dev);
+ }
+ if (mxt_force_gingerbread_mode)
+ input_mt_sync(input_dev);
+ data->update_input = true;
+}
+
+static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input_dev = data->input_dev;
+ int id;
+ u8 status;
+ int x;
+ int y;
+ int tool;
+
+ /* do not report events if input device not yet registered */
+ if (!data->enable_reporting)
+ return;
+
+ id = message[0] - data->T100_reportid_min - 2;
+
+ /* ignore SCRSTATUS events */
+ if (id < 0)
+ return;
+
+ status = message[1];
+ x = (message[3] << 8) | message[2];
+ y = (message[5] << 8) | message[4];
+
+ dev_dbg(dev,
+ "[%u] status:%02X x:%u y:%u area:%02X amp:%02X vec:%02X\n",
+ id,
+ status,
+ x, y,
+ (data->t100_aux_area) ? message[data->t100_aux_area] : 0,
+ (data->t100_aux_ampl) ? message[data->t100_aux_ampl] : 0,
+ (data->t100_aux_vect) ? message[data->t100_aux_vect] : 0);
+
+ input_mt_slot(input_dev, id);
+
+ if (status & MXT_T100_DETECT) {
+ /* A reported size of zero indicates that the reported touch
+ * is a stylus from a linked Stylus T47 object. */
+ if ((status & MXT_T100_TYPE_MASK) == MXT_T100_TYPE_STYLUS)
+ tool = MT_TOOL_PEN;
+ else
+ tool = MT_TOOL_FINGER;
+
+ /* Touch active */
+ input_mt_report_slot_state(input_dev, tool, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+
+ if (data->t100_aux_ampl)
+ input_report_abs(input_dev, ABS_MT_PRESSURE,
+ message[data->t100_aux_ampl]);
+
+ if (data->t100_aux_area) {
+ if (tool == MT_TOOL_PEN)
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ MXT_TOUCH_MAJOR_T47_STYLUS);
+ else
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
+ message[data->t100_aux_area]);
+ }
+
+ if (data->t100_aux_vect)
+ input_report_abs(input_dev, ABS_MT_ORIENTATION,
+ message[data->t100_aux_vect]);
+ } else {
+ /* Touch no longer active, close out slot */
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+ }
+
+ data->update_input = true;
+}
+
+
+static void mxt_proc_t15_messages(struct mxt_data *data, u8 *msg)
+{
+ struct input_dev *input_dev = data->input_dev;
+ struct device *dev = &data->client->dev;
+ int key;
+ bool curr_state, new_state;
+ bool sync = false;
+ unsigned long keystates = le32_to_cpu(msg[2]);
+
+ /* do not report events if input device not yet registered */
+ if (!data->enable_reporting)
+ return;
+
+ for (key = 0; key < data->pdata->t15_num_keys; key++) {
+ curr_state = test_bit(key, &data->t15_keystatus);
+ new_state = test_bit(key, &keystates);
+
+ if (!curr_state && new_state) {
+ dev_dbg(dev, "T15 key press: %u\n", key);
+ __set_bit(key, &data->t15_keystatus);
+ input_event(input_dev, EV_KEY,
+ data->pdata->t15_keymap[key], 1);
+ sync = true;
+ } else if (curr_state && !new_state) {
+ dev_dbg(dev, "T15 key release: %u\n", key);
+ __clear_bit(key, &data->t15_keystatus);
+ input_event(input_dev, EV_KEY,
+ data->pdata->t15_keymap[key], 0);
+ sync = true;
+ }
+ }
+
+ if (sync)
+ input_sync(input_dev);
+}
+
+static void mxt_proc_t42_messages(struct mxt_data *data, u8 *msg)
+{
+ struct device *dev = &data->client->dev;
+ u8 status = msg[1];
+
+ if (status & MXT_T42_MSG_TCHSUP)
+ dev_info(dev, "T42 suppress\n");
+ else
+ dev_info(dev, "T42 normal\n");
+}
+
+static int mxt_proc_t48_messages(struct mxt_data *data, u8 *msg)
+{
+ struct device *dev = &data->client->dev;
+ u8 status, state;
+
+ status = msg[1];
+ state = msg[4];
+
+ dev_dbg(dev, "T48 state %d status %02X %s%s%s%s%s\n",
+ state,
+ status,
+ (status & 0x01) ? "FREQCHG " : "",
+ (status & 0x02) ? "APXCHG " : "",
+ (status & 0x04) ? "ALGOERR " : "",
+ (status & 0x10) ? "STATCHG " : "",
+ (status & 0x20) ? "NLVLCHG " : "");
+
+ return 0;
+}
+
+static void mxt_proc_t63_messages(struct mxt_data *data, u8 *msg)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input_dev = data->input_dev;
+ u8 id;
+ u16 x, y;
+ u8 pressure;
+
+ /* do not report events if input device not yet registered */
+ if (!data->enable_reporting)
+ return;
+
+ /* stylus slots come after touch slots */
+ id = data->num_touchids + (msg[0] - data->T63_reportid_min);
+
+ if (id < 0 || id > (data->num_touchids + data->num_stylusids)) {
+ dev_err(dev, "invalid stylus id %d, max slot is %d\n",
+ id, data->num_stylusids);
+ return;
+ }
+
+ x = msg[3] | (msg[4] << 8);
+ y = msg[5] | (msg[6] << 8);
+ pressure = msg[7] & MXT_T63_STYLUS_PRESSURE_MASK;
+
+ dev_dbg(dev,
+ "[%d] %c%c%c%c x: %d y: %d pressure: %d stylus:%c%c%c%c\n",
+ id,
+ (msg[1] & MXT_T63_STYLUS_SUPPRESS) ? 'S' : '.',
+ (msg[1] & MXT_T63_STYLUS_MOVE) ? 'M' : '.',
+ (msg[1] & MXT_T63_STYLUS_RELEASE) ? 'R' : '.',
+ (msg[1] & MXT_T63_STYLUS_PRESS) ? 'P' : '.',
+ x, y, pressure,
+ (msg[2] & MXT_T63_STYLUS_BARREL) ? 'B' : '.',
+ (msg[2] & MXT_T63_STYLUS_ERASER) ? 'E' : '.',
+ (msg[2] & MXT_T63_STYLUS_TIP) ? 'T' : '.',
+ (msg[2] & MXT_T63_STYLUS_DETECT) ? 'D' : '.');
+
+ input_mt_slot(input_dev, id);
+
+ if (msg[2] & MXT_T63_STYLUS_DETECT) {
+ input_mt_report_slot_state(input_dev, MT_TOOL_PEN, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, pressure);
+ } else {
+ input_mt_report_slot_state(input_dev, MT_TOOL_PEN, 0);
+ }
+
+ input_report_key(input_dev, BTN_STYLUS,
+ (msg[2] & MXT_T63_STYLUS_ERASER));
+ input_report_key(input_dev, BTN_STYLUS2,
+ (msg[2] & MXT_T63_STYLUS_BARREL));
+
+ mxt_input_sync(input_dev);
+}
+
+static int mxt_proc_message(struct mxt_data *data, u8 *message)
+{
+ u8 report_id = message[0];
+ bool dump = data->debug_enabled;
+
+ if (report_id == MXT_RPTID_NOMSG)
+ return 0;
+
+ if (report_id == data->T6_reportid) {
+ mxt_proc_t6_messages(data, message);
+ } else if (report_id >= data->T9_reportid_min
+ && report_id <= data->T9_reportid_max) {
+ mxt_proc_t9_message(data, message);
+ } else if (report_id >= data->T100_reportid_min
+ && report_id <= data->T100_reportid_max) {
+ mxt_proc_t100_message(data, message);
+ } else if (report_id == data->T19_reportid) {
+ mxt_input_button(data, message);
+ data->update_input = true;
+ } else if (report_id >= data->T63_reportid_min
+ && report_id <= data->T63_reportid_max) {
+ mxt_proc_t63_messages(data, message);
+ } else if (report_id >= data->T42_reportid_min
+ && report_id <= data->T42_reportid_max) {
+ mxt_proc_t42_messages(data, message);
+ } else if (report_id == data->T48_reportid) {
+ mxt_proc_t48_messages(data, message);
+ } else if (report_id >= data->T15_reportid_min
+ && report_id <= data->T15_reportid_max) {
+ mxt_proc_t15_messages(data, message);
+ } else {
+ dump = true;
+ }
+
+ if (dump)
+ mxt_dump_message(data, message);
+
+ if (data->debug_v2_enabled)
+ mxt_debug_msg_add(data, message);
+
+ return 1;
+}
+
+static int mxt_read_and_process_messages(struct mxt_data *data, u8 count)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ int i;
+ u8 num_valid = 0;
+
+ /* Safety check for msg_buf */
+ if (count > data->max_reportid)
+ return -EINVAL;
+
+ /* Process remaining messages if necessary */
+ ret = __mxt_read_reg(data->client, data->T5_address,
+ data->T5_msg_size * count, data->msg_buf);
+ if (ret) {
+ dev_err(dev, "Failed to read %u messages (%d)\n", count, ret);
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = mxt_proc_message(data,
+ data->msg_buf + data->T5_msg_size * i);
+
+ if (ret == 1)
+ num_valid++;
+ }
+
+ /* return number of messages read */
+ return num_valid;
+}
+
+static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ u8 count, num_left;
+
+ /* Read T44 and T5 together */
+ ret = __mxt_read_reg(data->client, data->T44_address,
+ data->T5_msg_size + 1, data->msg_buf);
+ if (ret) {
+ dev_err(dev, "Failed to read T44 and T5 (%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ count = data->msg_buf[0];
+
+ if (count == 0) {
+ dev_dbg(dev, "Interrupt triggered but zero messages\n");
+ return IRQ_NONE;
+ } else if (count > data->max_reportid) {
+ dev_err(dev, "T44 count %d exceeded max report id\n", count);
+ count = data->max_reportid;
+ }
+
+ /* Process first message */
+ ret = mxt_proc_message(data, data->msg_buf + 1);
+ if (ret < 0) {
+ dev_warn(dev, "Unexpected invalid message\n");
+ return IRQ_NONE;
+ }
+
+ num_left = count - 1;
+
+ /* Process remaining messages if necessary */
+ if (num_left) {
+ ret = mxt_read_and_process_messages(data, num_left);
+ if (ret < 0)
+ goto end;
+ else if (ret != num_left)
+ dev_warn(dev, "Unexpected invalid message\n");
+ }
+
+end:
+ if (data->update_input) {
+ mxt_input_sync(data->input_dev);
+ data->update_input = false;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mxt_process_messages_until_invalid(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int count, read;
+ u8 tries = 2;
+
+ count = data->max_reportid;
+
+ /* Read messages until we force an invalid */
+ do {
+ read = mxt_read_and_process_messages(data, count);
+ if (read < count)
+ return 0;
+ } while (--tries);
+
+ if (data->update_input) {
+ mxt_input_sync(data->input_dev);
+ data->update_input = false;
+ }
+
+ dev_err(dev, "CHG pin isn't cleared\n");
+ return -EBUSY;
+}
+
+static irqreturn_t mxt_process_messages(struct mxt_data *data)
+{
+ int total_handled, num_handled;
+ u8 count = data->last_message_count;
+
+ if (count < 1 || count > data->max_reportid)
+ count = 1;
+
+ /* include final invalid message */
+ total_handled = mxt_read_and_process_messages(data, count + 1);
+ if (total_handled < 0)
+ return IRQ_NONE;
+ /* if there were invalid messages, then we are done */
+ else if (total_handled <= count)
+ goto update_count;
+
+ /* read two at a time until an invalid message or else we reach
+ * reportid limit */
+ do {
+ num_handled = mxt_read_and_process_messages(data, 2);
+ if (num_handled < 0)
+ return IRQ_NONE;
+
+ total_handled += num_handled;
+
+ if (num_handled < 2)
+ break;
+ } while (total_handled < data->num_touchids);
+
+update_count:
+ data->last_message_count = total_handled;
+
+ if (data->enable_reporting && data->update_input) {
+ mxt_input_sync(data->input_dev);
+ data->update_input = false;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mxt_interrupt(int irq, void *dev_id)
+{
+ struct mxt_data *data = dev_id;
+
+ if (data == NULL) {
+ printk(KERN_ERR "mxt interrupt called with null data\n");
+ return IRQ_HANDLED;
+ }
+
+ if (data->in_bootloader) {
+ /* bootloader state transition completion */
+ complete(&data->bl_completion);
+ return IRQ_HANDLED;
+ }
+
+ if (!data->object_table)
+ return IRQ_NONE;
+
+ if (data->T44_address) {
+ return mxt_process_messages_t44(data);
+ } else {
+ return mxt_process_messages(data);
+ }
+}
+
+irqreturn_t mxt224e_interrupt(int irq, void *dev_id)
+{
+ return mxt_interrupt(irq, dev_id);
+}
+
+EXPORT_SYMBOL(mxt224e_interrupt);
+
+static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
+ u8 value, bool wait)
+{
+ u16 reg;
+ u8 command_register;
+ int timeout_counter = 0;
+ int ret;
+
+ reg = data->T6_address + cmd_offset;
+
+ ret = mxt_write_reg(data->client, reg, value);
+ if (ret)
+ return ret;
+
+ if (!wait)
+ return 0;
+
+ do {
+ msleep(20);
+ ret = __mxt_read_reg(data->client, reg, 1, &command_register);
+ if (ret)
+ return ret;
+ } while ((command_register != 0) && (timeout_counter++ <= 100));
+
+ if (timeout_counter > 100) {
+ dev_err(&data->client->dev, "Command failed!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mxt_soft_reset(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+
+ dev_info(dev, "Resetting chip\n");
+
+ INIT_COMPLETION(data->reset_completion);
+
+ ret = mxt_t6_command(data, MXT_COMMAND_RESET, MXT_RESET_VALUE, false);
+ if (ret)
+ return ret;
+
+ ret = mxt_wait_for_completion(data, &data->reset_completion,
+ MXT_RESET_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void mxt_update_crc(struct mxt_data *data, u8 cmd, u8 value)
+{
+ /* on failure, CRC is set to 0 and config will always be downloaded */
+ data->config_crc = 0;
+ INIT_COMPLETION(data->crc_completion);
+
+ mxt_t6_command(data, cmd, value, true);
+
+ /* Wait for crc message. On failure, CRC is set to 0 and config will
+ * always be downloaded */
+ mxt_wait_for_completion(data, &data->crc_completion, MXT_CRC_TIMEOUT);
+}
+
+static void mxt_calc_crc24(u32 *crc, u8 firstbyte, u8 secondbyte)
+{
+ static const unsigned int crcpoly = 0x80001B;
+ u32 result;
+ u32 data_word;
+
+ data_word = (secondbyte << 8) | firstbyte;
+ result = ((*crc << 1) ^ data_word);
+
+ if (result & 0x1000000)
+ result ^= crcpoly;
+
+ *crc = result;
+}
+
+static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
+{
+ u32 crc = 0;
+ u8 *ptr = base + start_off;
+ u8 *last_val = base + end_off - 1;
+
+ if (end_off < start_off)
+ return -EINVAL;
+
+ while (ptr < last_val) {
+ mxt_calc_crc24(&crc, *ptr, *(ptr + 1));
+ ptr += 2;
+ }
+
+ /* if len is odd, fill the last byte with 0 */
+ if (ptr == last_val)
+ mxt_calc_crc24(&crc, *ptr, 0);
+
+ /* Mask to 24-bit */
+ crc &= 0x00FFFFFF;
+
+ return crc;
+}
+
+static int mxt_check_retrigen(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ int val;
+
+ if (data->pdata->irqflags & IRQF_TRIGGER_LOW)
+ return 0;
+
+ if (data->T18_address) {
+ error = __mxt_read_reg(client,
+ data->T18_address + MXT_COMMS_CTRL,
+ 1, &val);
+ if (error)
+ return error;
+
+ if (val & MXT_COMMS_RETRIGEN)
+ return 0;
+ }
+
+ dev_warn(&client->dev, "Enabling RETRIGEN workaround\n");
+ data->use_retrigen_workaround = true;
+ return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data);
+
+/*
+ * mxt_check_reg_init - download configuration to chip
+ *
+ * Atmel Raw Config File Format
+ *
+ * The first four lines of the raw config file contain:
+ * 1) Version
+ * 2) Chip ID Information (first 7 bytes of device memory)
+ * 3) Chip Information Block 24-bit CRC Checksum
+ * 4) Chip Configuration 24-bit CRC Checksum
+ *
+ * The rest of the file consists of one line per object instance:
+ * <TYPE> <INSTANCE> <SIZE> <CONTENTS>
+ *
+ * <TYPE> - 2-byte object type as hex
+ * <INSTANCE> - 2-byte object instance number as hex
+ * <SIZE> - 2-byte object size as hex
+ * <CONTENTS> - array of <SIZE> 1-byte hex values
+ */
+static int mxt_check_reg_init(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_info cfg_info;
+ struct mxt_object *object;
+ const struct firmware *cfg = NULL;
+ int ret;
+ int offset;
+ int data_pos;
+ int byte_offset;
+ int i;
+ int cfg_start_ofs;
+ u32 info_crc, config_crc, calculated_crc;
+ u8 *config_mem;
+ size_t config_mem_size;
+ unsigned int type, instance, size;
+ u8 val;
+ u16 reg;
+
+ if (!data->cfg_name) {
+ dev_dbg(dev, "Skipping cfg download\n");
+ return 0;
+ }
+
+ ret = request_firmware(&cfg, data->cfg_name, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failure to request config file %s\n",
+ data->cfg_name);
+ return 0;
+ }
+
+ mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
+
+ if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
+ dev_err(dev, "Unrecognised config file\n");
+ ret = -EINVAL;
+ goto release;
+ }
+
+ data_pos = strlen(MXT_CFG_MAGIC);
+
+ /* Load information block and check */
+ for (i = 0; i < sizeof(struct mxt_info); i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ (unsigned char *)&cfg_info + i,
+ &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format\n");
+ ret = -EINVAL;
+ goto release;
+ }
+
+ data_pos += offset;
+ }
+
+ if (cfg_info.family_id != data->info->family_id) {
+ dev_err(dev, "Family ID mismatch!\n");
+ ret = -EINVAL;
+ goto release;
+ }
+
+ if (cfg_info.variant_id != data->info->variant_id) {
+ dev_err(dev, "Variant ID mismatch!\n");
+ ret = -EINVAL;
+ goto release;
+ }
+
+ /* Read CRCs */
+ ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format: failed to parse Info CRC\n");
+ ret = -EINVAL;
+ goto release;
+ }
+ data_pos += offset;
+
+ ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format: failed to parse Config CRC\n");
+ ret = -EINVAL;
+ goto release;
+ }
+ data_pos += offset;
+
+ /* The Info Block CRC is calculated over mxt_info and the object table
+ * If it does not match then we are trying to load the configuration
+ * from a different chip or firmware version, so the configuration CRC
+ * is invalid anyway. */
+ if (info_crc == data->info_crc) {
+ if (config_crc == 0 || data->config_crc == 0) {
+ dev_info(dev, "CRC zero, attempting to apply config\n");
+ } else if (config_crc == data->config_crc) {
+ dev_info(dev, "Config CRC 0x%06X: OK\n",
+ data->config_crc);
+ ret = 0;
+ goto release;
+ } else {
+ dev_info(dev, "Config CRC 0x%06X: does not match file 0x%06X\n",
+ data->config_crc, config_crc);
+ }
+ } else {
+ dev_warn(dev,
+ "Warning: Info CRC error - device=0x%06X file=0x%06X\n",
+ data->info_crc, info_crc);
+ }
+
+ /* Malloc memory to store configuration */
+ cfg_start_ofs = MXT_OBJECT_START
+ + data->info->object_num * sizeof(struct mxt_object)
+ + MXT_INFO_CHECKSUM_SIZE;
+ config_mem_size = data->mem_size - cfg_start_ofs;
+ config_mem = kzalloc(config_mem_size, GFP_KERNEL);
+ if (!config_mem) {
+ dev_err(dev, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ while (data_pos < cfg->size) {
+ /* Read type, instance, length */
+ ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
+ &type, &instance, &size, &offset);
+ if (ret == 0) {
+ /* EOF */
+ break;
+ } else if (ret != 3) {
+ dev_err(dev, "Bad format: failed to parse object\n");
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ data_pos += offset;
+
+ object = mxt_get_object(data, type);
+ if (!object) {
+ /* Skip object */
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val,
+ &offset);
+ data_pos += offset;
+ }
+ continue;
+ }
+
+ if (size > mxt_obj_size(object)) {
+ /* Either we are in fallback mode due to wrong
+ * config or config from a later fw version,
+ * or the file is corrupt or hand-edited */
+ dev_warn(dev, "Discarding %u byte(s) in T%u\n",
+ size - mxt_obj_size(object), type);
+ } else if (mxt_obj_size(object) > size) {
+ /* If firmware is upgraded, new bytes may be added to
+ * end of objects. It is generally forward compatible
+ * to zero these bytes - previous behaviour will be
+ * retained. However this does invalidate the CRC and
+ * will force fallback mode until the configuration is
+ * updated. We warn here but do nothing else - the
+ * malloc has zeroed the entire configuration. */
+ dev_warn(dev, "Zeroing %u byte(s) in T%d\n",
+ mxt_obj_size(object) - size, type);
+ }
+
+ if (instance >= mxt_obj_instances(object)) {
+ dev_err(dev, "Object instances exceeded!\n");
+ ret = -EINVAL;
+ goto release_mem;
+ }
+
+ reg = object->start_address + mxt_obj_size(object) * instance;
+
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val,
+ &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format in T%d\n", type);
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ data_pos += offset;
+
+ if (i > mxt_obj_size(object))
+ continue;
+
+ byte_offset = reg + i - cfg_start_ofs;
+
+ if ((byte_offset >= 0)
+ && (byte_offset <= config_mem_size)) {
+ *(config_mem + byte_offset) = val;
+ } else {
+ dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
+ reg, object->type, byte_offset);
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ }
+ }
+
+ /* calculate crc of the received configs (not the raw config file) */
+ if (data->T7_address < cfg_start_ofs) {
+ dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n",
+ data->T7_address, cfg_start_ofs);
+ ret = 0;
+ goto release_mem;
+ }
+
+ calculated_crc = mxt_calculate_crc(config_mem,
+ data->T7_address - cfg_start_ofs,
+ config_mem_size);
+
+ if (config_crc > 0 && (config_crc != calculated_crc))
+ dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n",
+ calculated_crc, config_crc);
+
+ /* Write configuration as blocks */
+ byte_offset = 0;
+ while (byte_offset < config_mem_size) {
+ size = config_mem_size - byte_offset;
+
+ if (size > MXT_MAX_BLOCK_WRITE)
+ size = MXT_MAX_BLOCK_WRITE;
+
+ ret = __mxt_write_reg(data->client,
+ cfg_start_ofs + byte_offset,
+ size, config_mem + byte_offset);
+ if (ret != 0) {
+ dev_err(dev, "Config write error, ret=%d\n", ret);
+ goto release_mem;
+ }
+
+ byte_offset += size;
+ }
+
+ mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
+
+ ret = mxt_check_retrigen(data);
+ if (ret)
+ goto release_mem;
+
+ mxt_soft_reset(data);
+
+ dev_info(dev, "Config written\n");
+
+ /* T7 config may have changed */
+ mxt_init_t7_power_cfg(data);
+
+release_mem:
+ kfree(config_mem);
+release:
+ release_firmware(cfg);
+ return ret;
+}
+
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ struct t7_config *new_config;
+ struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+ if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+ new_config = &deepsleep;
+ else
+ new_config = &data->t7_cfg;
+
+ error = __mxt_write_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg),
+ new_config);
+ if (error)
+ return error;
+
+ dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+ new_config->active, new_config->idle);
+
+ return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ bool retry = false;
+
+recheck:
+ error = __mxt_read_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg), &data->t7_cfg);
+ if (error)
+ return error;
+
+ if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+ if (!retry) {
+ dev_info(dev, "T7 cfg zero, resetting\n");
+ mxt_soft_reset(data);
+ retry = true;
+ goto recheck;
+ } else {
+ dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+ data->t7_cfg.active = 20;
+ data->t7_cfg.idle = 100;
+ return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+ }
+ } else {
+ dev_info(dev, "Initialised power cfg: ACTV %d, IDLE %d\n",
+ data->t7_cfg.active, data->t7_cfg.idle);
+ return 0;
+ }
+}
+
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+ int error;
+
+ if (data->irq >= 0)
+ enable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_enable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+
+ if (data->use_retrigen_workaround) {
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static void mxt_free_input_device(struct mxt_data *data)
+{
+ if (data->input_dev) {
+ input_unregister_device(data->input_dev);
+ data->input_dev = NULL;
+ }
+}
+
+static void mxt_free_object_table(struct mxt_data *data)
+{
+ mxt_debug_msg_remove(data);
+
+ kfree(data->raw_info_block);
+ data->object_table = NULL;
+ data->info = NULL;
+ data->raw_info_block = NULL;
+ kfree(data->msg_buf);
+ data->msg_buf = NULL;
+
+ mxt_free_input_device(data);
+
+ data->enable_reporting = false;
+ data->T5_address = 0;
+ data->T5_msg_size = 0;
+ data->T6_reportid = 0;
+ data->T7_address = 0;
+ data->T9_reportid_min = 0;
+ data->T9_reportid_max = 0;
+ data->T15_reportid_min = 0;
+ data->T15_reportid_max = 0;
+ data->T18_address = 0;
+ data->T19_reportid = 0;
+ data->T42_reportid_min = 0;
+ data->T42_reportid_max = 0;
+ data->T44_address = 0;
+ data->T48_reportid = 0;
+ data->T63_reportid_min = 0;
+ data->T63_reportid_max = 0;
+ data->T100_reportid_min = 0;
+ data->T100_reportid_max = 0;
+ data->max_reportid = 0;
+}
+
+static int mxt_parse_object_table(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int i;
+ u8 reportid;
+ u16 end_address;
+
+ /* Valid Report IDs start counting from 1 */
+ reportid = 1;
+ data->mem_size = 0;
+ for (i = 0; i < data->info->object_num; i++) {
+ struct mxt_object *object = data->object_table + i;
+ u8 min_id, max_id;
+
+ le16_to_cpus(&object->start_address);
+
+ if (object->num_report_ids) {
+ min_id = reportid;
+ reportid += object->num_report_ids *
+ mxt_obj_instances(object);
+ max_id = reportid - 1;
+ } else {
+ min_id = 0;
+ max_id = 0;
+ }
+
+ dev_dbg(&data->client->dev,
+ "T%u Start:%u Size:%u Instances:%u Report IDs:%u-%u\n",
+ object->type, object->start_address,
+ mxt_obj_size(object), mxt_obj_instances(object),
+ min_id, max_id);
+
+ switch (object->type) {
+ case MXT_GEN_MESSAGE_T5:
+ if (data->info->family_id == 0x80) {
+ /* On mXT224 read and discard unused CRC byte
+ * otherwise DMA reads are misaligned */
+ data->T5_msg_size = mxt_obj_size(object);
+ } else {
+ /* CRC not enabled, so skip last byte */
+ data->T5_msg_size = mxt_obj_size(object) - 1;
+ }
+ data->T5_address = object->start_address;
+ case MXT_GEN_COMMAND_T6:
+ data->T6_reportid = min_id;
+ data->T6_address = object->start_address;
+ break;
+ case MXT_GEN_POWER_T7:
+ data->T7_address = object->start_address;
+ break;
+ case MXT_TOUCH_MULTI_T9:
+ /* Only handle messages from first T9 instance */
+ data->T9_reportid_min = min_id;
+ data->T9_reportid_max = min_id +
+ object->num_report_ids - 1;
+ data->num_touchids = object->num_report_ids;
+ break;
+ case MXT_TOUCH_KEYARRAY_T15:
+ data->T15_reportid_min = min_id;
+ data->T15_reportid_max = max_id;
+ break;
+ case MXT_SPT_COMMSCONFIG_T18:
+ data->T18_address = object->start_address;
+ break;
+ case MXT_PROCI_TOUCHSUPPRESSION_T42:
+ data->T42_reportid_min = min_id;
+ data->T42_reportid_max = max_id;
+ break;
+ case MXT_SPT_MESSAGECOUNT_T44:
+ data->T44_address = object->start_address;
+ break;
+ case MXT_SPT_GPIOPWM_T19:
+ data->T19_reportid = min_id;
+ break;
+ case MXT_PROCG_NOISESUPPRESSION_T48:
+ data->T48_reportid = min_id;
+ break;
+ case MXT_PROCI_ACTIVE_STYLUS_T63:
+ /* Only handle messages from first T63 instance */
+ data->T63_reportid_min = min_id;
+ data->T63_reportid_max = min_id;
+ data->num_stylusids = 1;
+ break;
+ case MXT_TOUCH_MULTITOUCHSCREEN_T100:
+ data->T100_reportid_min = min_id;
+ data->T100_reportid_max = max_id;
+ /* first two report IDs reserved */
+ data->num_touchids = object->num_report_ids - 2;
+ break;
+ }
+
+ end_address = object->start_address
+ + mxt_obj_size(object) * mxt_obj_instances(object) - 1;
+
+ if (end_address >= data->mem_size)
+ data->mem_size = end_address + 1;
+ }
+
+ /* Store maximum reportid */
+ data->max_reportid = reportid;
+
+ /* If T44 exists, T5 position has to be directly after */
+ if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
+ dev_err(&client->dev, "Invalid T44 position\n");
+ return -EINVAL;
+ }
+
+ data->msg_buf = kcalloc(data->max_reportid,
+ data->T5_msg_size, GFP_KERNEL);
+ if (!data->msg_buf) {
+ dev_err(&client->dev, "Failed to allocate message buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mxt_read_info_block(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ size_t size;
+ void *buf;
+ uint8_t num_objects;
+ u32 calculated_crc;
+ u8 *crc_ptr;
+
+ /* If info block already allocated, free it */
+ if (data->raw_info_block != NULL)
+ mxt_free_object_table(data);
+
+ /* Read 7-byte ID information block starting at address 0 */
+ size = sizeof(struct mxt_info);
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ error = __mxt_read_reg(client, 0, size, buf);
+ if (error)
+ goto err_free_mem;
+
+ /* Resize buffer to give space for rest of info block */
+ num_objects = ((struct mxt_info *)buf)->object_num;
+ size += (num_objects * sizeof(struct mxt_object))
+ + MXT_INFO_CHECKSUM_SIZE;
+
+ buf = krealloc(buf, size, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ /* Read rest of info block */
+ error = __mxt_read_reg(client, MXT_OBJECT_START,
+ size - MXT_OBJECT_START,
+ buf + MXT_OBJECT_START);
+ if (error)
+ goto err_free_mem;
+
+ /* Extract & calculate checksum */
+ crc_ptr = buf + size - MXT_INFO_CHECKSUM_SIZE;
+ data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
+
+ calculated_crc = mxt_calculate_crc(buf, 0,
+ size - MXT_INFO_CHECKSUM_SIZE);
+
+ /* CRC mismatch can be caused by data corruption due to I2C comms
+ * issue or else device is not using Object Based Protocol */
+ if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
+ dev_err(&client->dev,
+ "Info Block CRC error calculated=0x%06X read=0x%06X\n",
+ data->info_crc, calculated_crc);
+ return -EIO;
+ }
+
+ /* Save pointers in device data structure */
+ data->raw_info_block = buf;
+ data->info = (struct mxt_info *)buf;
+ data->object_table = (struct mxt_object *)(buf + MXT_OBJECT_START);
+
+ dev_info(&client->dev,
+ "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
+ data->info->family_id, data->info->variant_id,
+ data->info->version >> 4, data->info->version & 0xf,
+ data->info->build, data->info->object_num);
+
+ /* Parse object table information */
+ error = mxt_parse_object_table(data);
+ if (error) {
+ dev_err(&client->dev, "Error %d reading object table\n", error);
+ mxt_free_object_table(data);
+ return error;
+ }
+
+ return 0;
+
+err_free_mem:
+ kfree(buf);
+ return error;
+}
+
+static int mxt_read_t9_resolution(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ struct t9_range range;
+ unsigned char orient;
+ struct mxt_object *object;
+
+ object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
+ if (!object)
+ return -EINVAL;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_RANGE,
+ sizeof(range), &range);
+ if (error)
+ return error;
+
+ le16_to_cpus(range.x);
+ le16_to_cpus(range.y);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_ORIENT,
+ 1, &orient);
+ if (error)
+ return error;
+
+ /* Handle default values */
+ if (range.x == 0)
+ range.x = 1023;
+
+ if (range.y == 0)
+ range.y = 1023;
+
+ if (orient & MXT_T9_ORIENT_SWITCH) {
+ data->max_x = range.y;
+ data->max_y = range.x;
+ } else {
+ data->max_x = range.x;
+ data->max_y = range.y;
+ }
+
+ dev_info(&client->dev,
+ "Touchscreen size X%uY%u\n", data->max_x, data->max_y);
+
+ return 0;
+}
+
+static void mxt_regulator_enable(struct mxt_data *data)
+{
+ gpio_set_value(data->pdata->gpio_reset, 0);
+
+ regulator_enable(data->reg_vdd);
+ regulator_enable(data->reg_avdd);
+ msleep(MXT_REGULATOR_DELAY);
+
+ INIT_COMPLETION(data->bl_completion);
+ gpio_set_value(data->pdata->gpio_reset, 1);
+ mxt_wait_for_completion(data, &data->bl_completion, MXT_POWERON_DELAY);
+}
+
+static void mxt_regulator_disable(struct mxt_data *data)
+{
+ regulator_disable(data->reg_vdd);
+ regulator_disable(data->reg_avdd);
+}
+
+static void mxt_probe_regulators(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+
+ /* According to maXTouch power sequencing specification, RESET line
+ * must be kept low until some time after regulators come up to
+ * voltage */
+ if (!data->pdata->gpio_reset) {
+ dev_warn(dev, "Must have reset GPIO to use regulator support\n");
+ goto fail;
+ }
+
+ data->reg_vdd = regulator_get(dev, "vdd");
+ if (IS_ERR(data->reg_vdd)) {
+ error = PTR_ERR(data->reg_vdd);
+ dev_err(dev, "Error %d getting vdd regulator\n", error);
+ goto fail;
+ }
+
+ data->reg_avdd = regulator_get(dev, "avdd");
+ if (IS_ERR(data->reg_vdd)) {
+ error = PTR_ERR(data->reg_vdd);
+ dev_err(dev, "Error %d getting avdd regulator\n", error);
+ goto fail_release;
+ }
+
+ data->use_regulator = true;
+ mxt_regulator_enable(data);
+
+ dev_dbg(dev, "Initialised regulators\n");
+ return;
+
+fail_release:
+ regulator_put(data->reg_vdd);
+fail:
+ data->reg_vdd = NULL;
+ data->reg_avdd = NULL;
+ data->use_regulator = false;
+}
+
+static int mxt_read_t100_config(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ struct mxt_object *object;
+ u16 range_x, range_y;
+ u8 cfg, tchaux;
+ u8 aux;
+
+ object = mxt_get_object(data, MXT_TOUCH_MULTITOUCHSCREEN_T100);
+ if (!object)
+ return -EINVAL;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_XRANGE,
+ sizeof(range_x), &range_x);
+ if (error)
+ return error;
+
+ le16_to_cpus(range_x);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_YRANGE,
+ sizeof(range_y), &range_y);
+ if (error)
+ return error;
+
+ le16_to_cpus(range_y);
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_CFG1,
+ 1, &cfg);
+ if (error)
+ return error;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_TCHAUX,
+ 1, &tchaux);
+ if (error)
+ return error;
+
+ /* Handle default values */
+ if (range_x == 0)
+ range_x = 1023;
+
+ /* Handle default values */
+ if (range_x == 0)
+ range_x = 1023;
+
+ if (range_y == 0)
+ range_y = 1023;
+
+ if (cfg & MXT_T100_CFG_SWITCHXY) {
+ data->max_x = range_y;
+ data->max_y = range_x;
+ } else {
+ data->max_x = range_x;
+ data->max_y = range_y;
+ }
+
+ /* allocate aux bytes */
+ aux = 6;
+
+ if (tchaux & MXT_T100_TCHAUX_VECT)
+ data->t100_aux_vect = aux++;
+
+ if (tchaux & MXT_T100_TCHAUX_AMPL)
+ data->t100_aux_ampl = aux++;
+
+ if (tchaux & MXT_T100_TCHAUX_AREA)
+ data->t100_aux_area = aux++;
+
+ dev_info(&client->dev,
+ "T100 Touchscreen size X%uY%u\n", data->max_x, data->max_y);
+
+ return 0;
+}
+
+static int mxt_input_open(struct input_dev *dev);
+static void mxt_input_close(struct input_dev *dev);
+
+static int mxt_initialize_t100_input_device(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct input_dev *input_dev;
+ int error;
+
+ error = mxt_read_t100_config(data);
+ if (error)
+ dev_warn(dev, "Failed to initialize T9 resolution\n");
+
+ input_dev = input_allocate_device();
+ if (!data || !input_dev) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = "atmel_mxt_ts T100 touchscreen";
+
+ input_dev->phys = data->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &data->client->dev;
+ input_dev->open = mxt_input_open;
+ input_dev->close = mxt_input_close;
+
+ set_bit(EV_ABS, input_dev->evbit);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, data->max_y, 0, 0);
+
+ if (data->t100_aux_ampl)
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, 255, 0, 0);
+
+ /* For multi touch */
+ error = input_mt_init_slots(input_dev, data->num_touchids);
+ if (error) {
+ dev_err(dev, "Error %d initialising slots\n", error);
+ goto err_free_mem;
+ }
+
+ input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, data->max_y, 0, 0);
+
+ if (data->t100_aux_area)
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+
+ if (data->t100_aux_ampl)
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+
+ if (data->t100_aux_vect)
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ 0, 255, 0, 0);
+
+ input_set_drvdata(input_dev, data);
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(dev, "Error %d registering input device\n", error);
+ goto err_free_mem;
+ }
+
+ data->input_dev = input_dev;
+
+ return 0;
+
+err_free_mem:
+ input_free_device(input_dev);
+ return error;
+}
+
+static int mxt_initialize_t9_input_device(struct mxt_data *data);
+static int mxt_configure_objects(struct mxt_data *data);
+
+static int mxt_initialize(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ bool alt_bootloader_addr = false;
+ bool retry = false;
+
+retry_info:
+ error = mxt_read_info_block(data);
+ if (error) {
+retry_bootloader:
+ error = mxt_probe_bootloader(data, alt_bootloader_addr);
+ if (error) {
+ if (alt_bootloader_addr) {
+ /* Chip is not in appmode or bootloader mode */
+ return error;
+ }
+
+ dev_info(&client->dev, "Trying alternate bootloader address\n");
+ alt_bootloader_addr = true;
+ goto retry_bootloader;
+ } else {
+ if (retry) {
+ dev_err(&client->dev,
+ "Could not recover device from "
+ "bootloader mode\n");
+ /* this is not an error state, we can reflash
+ * from here */
+ data->in_bootloader = true;
+ return 0;
+ }
+
+ /* Attempt to exit bootloader into app mode */
+ mxt_send_bootloader_cmd(data, false);
+ msleep(MXT_FW_RESET_TIME);
+ retry = true;
+ goto retry_info;
+ }
+ }
+
+ error = mxt_check_retrigen(data);
+ if (error)
+ return error;
+
+ error = mxt_acquire_irq(data);
+ if (error)
+ return error;
+
+ error = mxt_configure_objects(data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int mxt_configure_objects(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+
+ error = mxt_debug_msg_init(data);
+ if (error)
+ return error;
+
+ error = mxt_init_t7_power_cfg(data);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize power cfg\n");
+ return error;
+ }
+
+ /* Check register init values */
+ error = mxt_check_reg_init(data);
+ if (error) {
+ dev_err(&client->dev, "Error %d initialising configuration\n",
+ error);
+ return error;
+ }
+
+ if (data->T9_reportid_min) {
+ error = mxt_initialize_t9_input_device(data);
+ if (error)
+ return error;
+ } else if (data->T100_reportid_min) {
+ error = mxt_initialize_t100_input_device(data);
+ if (error)
+ return error;
+ } else {
+ dev_warn(&client->dev, "No touch object detected\n");
+ }
+
+ data->enable_reporting = true;
+ return 0;
+}
+
+/* Firmware Version is returned as Major.Minor.Build */
+static ssize_t mxt_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
+ data->info->version >> 4, data->info->version & 0xf,
+ data->info->build);
+}
+
+/* Hardware Version is returned as FamilyID.VariantID */
+static ssize_t mxt_hw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
+ data->info->family_id, data->info->variant_id);
+}
+
+static ssize_t mxt_show_instance(char *buf, int count,
+ struct mxt_object *object, int instance,
+ const u8 *val)
+{
+ int i;
+
+ if (mxt_obj_instances(object) > 1)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Instance %u\n", instance);
+
+ for (i = 0; i < mxt_obj_size(object); i++)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "\t[%2u]: %02x (%d)\n", i, val[i], val[i]);
+ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+
+ return count;
+}
+
+static ssize_t mxt_object_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ struct mxt_object *object;
+ int count = 0;
+ int i, j;
+ int error;
+ u8 *obuf;
+
+ /* Pre-allocate buffer large enough to hold max sized object. */
+ obuf = kmalloc(256, GFP_KERNEL);
+ if (!obuf)
+ return -ENOMEM;
+
+ error = 0;
+ for (i = 0; i < data->info->object_num; i++) {
+ object = data->object_table + i;
+
+ if (!mxt_object_readable(object->type))
+ continue;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "T%u:\n", object->type);
+
+ for (j = 0; j < mxt_obj_instances(object); j++) {
+ u16 size = mxt_obj_size(object);
+ u16 addr = object->start_address + j * size;
+
+ error = __mxt_read_reg(data->client, addr, size, obuf);
+ if (error)
+ goto done;
+
+ count = mxt_show_instance(buf, count, object, j, obuf);
+ }
+ }
+
+done:
+ kfree(obuf);
+ return error ?: count;
+}
+
+static int mxt_check_firmware_format(struct device *dev,
+ const struct firmware *fw)
+{
+ unsigned int pos = 0;
+ char c;
+
+ while (pos < fw->size) {
+ c = *(fw->data + pos);
+
+ if (c < '0' || (c > '9' && c < 'A') || c > 'F')
+ return 0;
+
+ pos++;
+ }
+
+ /* To convert file try
+ * xxd -r -p mXTXXX__APP_VX-X-XX.enc > maxtouch.fw */
+ dev_err(dev, "Aborting: firmware file must be in binary format\n");
+
+ return -1;
+}
+
+static int mxt_load_fw(struct device *dev)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ const struct firmware *fw = NULL;
+ unsigned int frame_size;
+ unsigned int pos = 0;
+ unsigned int retry = 0;
+ unsigned int frame = 0;
+ int ret;
+
+ ret = request_firmware(&fw, data->fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Unable to open firmware %s\n", data->fw_name);
+ return ret;
+ }
+
+ /* Check for incorrect enc file */
+ ret = mxt_check_firmware_format(dev, fw);
+ if (ret)
+ goto release_firmware;
+
+ if (data->suspended) {
+ if (data->use_regulator)
+ mxt_regulator_enable(data);
+
+ if (data->irq >= 0)
+ enable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_enable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+
+ data->suspended = false;
+ }
+
+ if (!data->in_bootloader) {
+ /* Change to the bootloader mode */
+ data->in_bootloader = true;
+
+ ret = mxt_t6_command(data, MXT_COMMAND_RESET,
+ MXT_BOOT_VALUE, false);
+ if (ret)
+ goto release_firmware;
+
+ msleep(MXT_RESET_TIME);
+
+ /* At this stage, do not need to scan since we know
+ * family ID */
+ ret = mxt_lookup_bootloader_address(data, 0);
+ if (ret)
+ goto release_firmware;
+ } else {
+ if (data->irq >= 0)
+ enable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_enable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+ }
+
+ mxt_free_object_table(data);
+ INIT_COMPLETION(data->bl_completion);
+
+ ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
+ if (ret) {
+ /* Bootloader may still be unlocked from previous update
+ * attempt */
+ ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, false);
+ if (ret)
+ goto disable_irq;
+ } else {
+ dev_info(dev, "Unlocking bootloader\n");
+
+ /* Unlock bootloader */
+ ret = mxt_send_bootloader_cmd(data, true);
+ if (ret)
+ goto disable_irq;
+ }
+
+ while (pos < fw->size) {
+ ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, true);
+ if (ret)
+ goto disable_irq;
+
+ frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1));
+
+ /* Take account of CRC bytes */
+ frame_size += 2;
+
+ /* Write one frame to device */
+ ret = mxt_bootloader_write(data, fw->data + pos, frame_size);
+ if (ret)
+ goto disable_irq;
+
+ ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS, true);
+ if (ret) {
+ retry++;
+
+ /* Back off by 20ms per retry */
+ msleep(retry * 20);
+
+ if (retry > 20) {
+ dev_err(dev, "Retry count exceeded\n");
+ goto disable_irq;
+ }
+ } else {
+ retry = 0;
+ pos += frame_size;
+ frame++;
+ }
+
+ if (frame % 50 == 0)
+ dev_info(dev, "Sent %d frames, %d/%zd bytes\n",
+ frame, pos, fw->size);
+ }
+
+ /* Wait for flash. */
+ ret = mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_FW_RESET_TIME);
+ if (ret)
+ goto disable_irq;
+
+ dev_info(dev, "Sent %d frames, %zd bytes\n", frame, pos);
+
+ /* Wait for device to reset. Some bootloader versions do not assert
+ * the CHG line after bootloading has finished, so ignore error */
+ mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_FW_RESET_TIME);
+
+ data->in_bootloader = false;
+
+disable_irq:
+ if (data->irq >= 0)
+ disable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_disable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+static int mxt_update_file_name(struct device *dev, char **file_name,
+ const char *buf, size_t count)
+{
+ char *file_name_tmp;
+
+ /* Simple sanity check */
+ if (count > 64) {
+ dev_warn(dev, "File name too long\n");
+ return -EINVAL;
+ }
+
+ file_name_tmp = krealloc(*file_name, count + 1, GFP_KERNEL);
+ if (!file_name_tmp) {
+ dev_warn(dev, "no memory\n");
+ return -ENOMEM;
+ }
+
+ *file_name = file_name_tmp;
+ memcpy(*file_name, buf, count);
+
+ /* Echo into the sysfs entry may append newline at the end of buf */
+ if (buf[count - 1] == '\n')
+ (*file_name)[count - 1] = '\0';
+ else
+ (*file_name)[count] = '\0';
+
+ return 0;
+}
+
+static ssize_t mxt_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int error;
+
+ error = mxt_update_file_name(dev, &data->fw_name, buf, count);
+ if (error)
+ return error;
+
+ error = mxt_load_fw(dev);
+ if (error) {
+ dev_err(dev, "The firmware update failed(%d)\n", error);
+ count = error;
+ } else {
+ dev_info(dev, "The firmware update succeeded\n");
+
+ data->suspended = false;
+
+ error = mxt_initialize(data);
+ if (error)
+ return error;
+ }
+
+ return count;
+}
+
+static ssize_t mxt_update_cfg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (data->in_bootloader) {
+ dev_err(dev, "Not in appmode\n");
+ return -EINVAL;
+ }
+
+ ret = mxt_update_file_name(dev, &data->cfg_name, buf, count);
+ if (ret)
+ return ret;
+
+ data->enable_reporting = false;
+ mxt_free_input_device(data);
+
+ if (data->suspended) {
+ if (data->use_regulator)
+ mxt_regulator_enable(data);
+ else
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+ mxt_acquire_irq(data);
+
+ data->suspended = false;
+ }
+
+ ret = mxt_configure_objects(data);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ return ret;
+}
+
+static ssize_t mxt_debug_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ char c;
+
+ c = data->debug_enabled ? '1' : '0';
+ return scnprintf(buf, PAGE_SIZE, "%c\n", c);
+}
+
+static ssize_t mxt_debug_notify_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t mxt_debug_v2_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int i;
+
+ if (sscanf(buf, "%u", &i) == 1 && i < 2) {
+ if (i == 1)
+ mxt_debug_msg_enable(data);
+ else
+ mxt_debug_msg_disable(data);
+
+ return count;
+ } else {
+ dev_dbg(dev, "debug_enabled write error\n");
+ return -EINVAL;
+ }
+}
+
+static ssize_t mxt_debug_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int i;
+
+ if (sscanf(buf, "%u", &i) == 1 && i < 2) {
+ data->debug_enabled = (i == 1);
+
+ dev_dbg(dev, "%s\n", i ? "debug enabled" : "debug disabled");
+ return count;
+ } else {
+ dev_dbg(dev, "debug_enabled write error\n");
+ return -EINVAL;
+ }
+}
+
+static int mxt_check_mem_access_params(struct mxt_data *data, loff_t off,
+ size_t *count)
+{
+ if (off >= data->mem_size)
+ return -EIO;
+
+ if (off + *count > data->mem_size)
+ *count = data->mem_size - off;
+
+ if (*count > MXT_MAX_BLOCK_WRITE)
+ *count = MXT_MAX_BLOCK_WRITE;
+
+ return 0;
+}
+
+static ssize_t mxt_mem_access_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = mxt_check_mem_access_params(data, off, &count);
+ if (ret < 0)
+ return ret;
+
+ if (count > 0)
+ ret = __mxt_read_reg(data->client, off, count, buf);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t mxt_mem_access_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct mxt_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = mxt_check_mem_access_params(data, off, &count);
+ if (ret < 0)
+ return ret;
+
+ if (count > 0)
+ ret = __mxt_write_reg(data->client, off, count, buf);
+
+ return ret == 0 ? count : 0;
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, mxt_fw_version_show, NULL);
+static DEVICE_ATTR(hw_version, S_IRUGO, mxt_hw_version_show, NULL);
+static DEVICE_ATTR(object, S_IRUGO, mxt_object_show, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, mxt_update_fw_store);
+static DEVICE_ATTR(update_cfg, S_IWUSR, NULL, mxt_update_cfg_store);
+static DEVICE_ATTR(debug_v2_enable, S_IWUSR | S_IRUSR, NULL, mxt_debug_v2_enable_store);
+static DEVICE_ATTR(debug_notify, S_IRUGO, mxt_debug_notify_show, NULL);
+static DEVICE_ATTR(debug_enable, S_IWUSR | S_IRUSR, mxt_debug_enable_show,
+ mxt_debug_enable_store);
+
+static struct attribute *mxt_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_hw_version.attr,
+ &dev_attr_object.attr,
+ &dev_attr_update_fw.attr,
+ &dev_attr_update_cfg.attr,
+ &dev_attr_debug_enable.attr,
+ &dev_attr_debug_v2_enable.attr,
+ &dev_attr_debug_notify.attr,
+ NULL
+};
+
+static const struct attribute_group mxt_attr_group = {
+ .attrs = mxt_attrs,
+};
+
+static void mxt_reset_slots(struct mxt_data *data)
+{
+ struct input_dev *input_dev = data->input_dev;
+ unsigned int num_mt_slots;
+ int id;
+
+ num_mt_slots = data->num_touchids + data->num_stylusids;
+
+ for (id = 0; id < num_mt_slots; id++) {
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+ }
+
+ mxt_input_sync(input_dev);
+}
+
+static void mxt_start(struct mxt_data *data)
+{
+ if (!data->suspended || data->in_bootloader)
+ return;
+
+ if (data->use_regulator) {
+ mxt_regulator_enable(data);
+ } else {
+ /* Discard any messages still in message buffer from before
+ * chip went to sleep */
+ mxt_process_messages_until_invalid(data);
+
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+ /* Recalibrate since chip has been in deep sleep */
+ mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ }
+
+ mxt_acquire_irq(data);
+ data->enable_reporting = true;
+ data->suspended = false;
+}
+
+static void mxt_stop(struct mxt_data *data)
+{
+ if (data->suspended || data->in_bootloader)
+ return;
+
+ data->enable_reporting = false;
+ if (data->irq >= 0)
+ disable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_disable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+
+ if (data->use_regulator)
+ mxt_regulator_disable(data);
+ else
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+
+ mxt_reset_slots(data);
+ data->suspended = true;
+}
+
+static int mxt_input_open(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_start(data);
+
+ return 0;
+}
+
+static void mxt_input_close(struct input_dev *dev)
+{
+ struct mxt_data *data = input_get_drvdata(dev);
+
+ mxt_stop(data);
+}
+
+static int mxt_handle_pdata(struct mxt_data *data)
+{
+ data->pdata = dev_get_platdata(&data->client->dev);
+
+ /* Use provided platform data if present */
+ if (data->pdata) {
+ if (data->pdata->cfg_name)
+ mxt_update_file_name(&data->client->dev,
+ &data->cfg_name,
+ data->pdata->cfg_name,
+ strlen(data->pdata->cfg_name));
+
+ if (data->pdata->fw_name)
+ mxt_update_file_name(&data->client->dev,
+ &data->fw_name,
+ data->pdata->fw_name,
+ strlen(data->pdata->fw_name));
+
+ return 0;
+ }
+
+ data->pdata = kzalloc(sizeof(*data->pdata), GFP_KERNEL);
+ if (!data->pdata) {
+ dev_err(&data->client->dev, "Failed to allocate pdata\n");
+ return -ENOMEM;
+ }
+
+ /* Set default parameters */
+ data->pdata->irqflags = IRQF_TRIGGER_FALLING;
+
+ return 0;
+}
+
+static int mxt_initialize_t9_input_device(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ const struct mxt_platform_data *pdata = data->pdata;
+ struct input_dev *input_dev;
+ int error;
+ unsigned int num_mt_slots;
+ int i;
+
+ error = mxt_read_t9_resolution(data);
+ if (error)
+ dev_warn(dev, "Failed to initialize T9 resolution\n");
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = "Atmel maXTouch Touchscreen";
+ input_dev->phys = data->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = dev;
+ input_dev->open = mxt_input_open;
+ input_dev->close = mxt_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+
+ if (pdata->t19_num_keys) {
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
+ for (i = 0; i < pdata->t19_num_keys; i++)
+ if (pdata->t19_keymap[i] != KEY_RESERVED)
+ input_set_capability(input_dev, EV_KEY,
+ pdata->t19_keymap[i]);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+
+ input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ MXT_PIXELS_PER_MM);
+
+ input_dev->name = "Atmel maXTouch Touchpad";
+ }
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, 255, 0, 0);
+
+ /* For multi touch */
+ num_mt_slots = data->num_touchids + data->num_stylusids;
+ error = input_mt_init_slots(input_dev, num_mt_slots);
+ if (error) {
+ dev_err(dev, "Error %d initialising slots\n", error);
+ goto err_free_mem;
+ }
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ 0, 255, 0, 0);
+
+ /* For T63 active stylus */
+ if (data->T63_reportid_min) {
+ input_set_capability(input_dev, EV_KEY, BTN_STYLUS);
+ input_set_capability(input_dev, EV_KEY, BTN_STYLUS2);
+ input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
+ }
+
+ /* For T15 key array */
+ if (data->T15_reportid_min) {
+ data->t15_keystatus = 0;
+
+ for (i = 0; i < data->pdata->t15_num_keys; i++)
+ input_set_capability(input_dev, EV_KEY,
+ data->pdata->t15_keymap[i]);
+ }
+
+ input_set_drvdata(input_dev, data);
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(dev, "Error %d registering input device\n", error);
+ goto err_free_mem;
+ }
+
+ data->input_dev = input_dev;
+
+ return 0;
+
+err_free_mem:
+ input_free_device(input_dev);
+ return error;
+}
+
+static int __devinit mxt_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mxt_data *data;
+ int error;
+
+ data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
+ client->adapter->nr, client->addr);
+
+ data->client = client;
+ data->irq = client->irq;
+ i2c_set_clientdata(client, data);
+
+ error = mxt_handle_pdata(data);
+ if (error)
+ goto err_free_mem;
+
+ init_completion(&data->bl_completion);
+ init_completion(&data->reset_completion);
+ init_completion(&data->crc_completion);
+ mutex_init(&data->debug_msg_lock);
+
+ if (client->irq >= 0) {
+ error = request_threaded_irq(data->irq, NULL, mxt_interrupt,
+ data->pdata->irqflags | IRQF_ONESHOT,
+ client->name, data);
+ }
+ else if (client->irq == -1) {
+ error = lvds_request_irq(client->irq, mxt_interrupt,
+ data->pdata->irqflags, client->dev.driver->name, data);
+ }
+ else if (client->irq == -2) {
+ error = dib_request_irq(client->irq, mxt_interrupt,
+ data->pdata->irqflags, client->dev.driver->name, data);
+ }
+ else
+ error = 0;
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_pdata;
+ }
+
+ mxt_probe_regulators(data);
+
+ if (data->irq >= 0)
+ disable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_disable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+
+ error = mxt_initialize(data);
+ if (error)
+ goto err_free_irq;
+
+ error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
+ if (error) {
+ dev_err(&client->dev, "Failure %d creating sysfs group\n",
+ error);
+ goto err_free_object;
+ }
+
+ sysfs_bin_attr_init(&data->mem_access_attr);
+ data->mem_access_attr.attr.name = "mem_access";
+ data->mem_access_attr.attr.mode = S_IRUGO | S_IWUSR | S_IWGRP;
+ data->mem_access_attr.read = mxt_mem_access_read;
+ data->mem_access_attr.write = mxt_mem_access_write;
+ data->mem_access_attr.size = data->mem_size;
+
+ if (sysfs_create_bin_file(&client->dev.kobj,
+ &data->mem_access_attr) < 0) {
+ dev_err(&client->dev, "Failed to create %s\n",
+ data->mem_access_attr.attr.name);
+ goto err_remove_sysfs_group;
+ }
+
+ return 0;
+
+err_remove_sysfs_group:
+ sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
+err_free_object:
+ mxt_free_object_table(data);
+err_free_irq:
+ if (data->irq == -1)
+ lvds_free_irq(data->irq, data);
+ else if (data->irq == -2)
+ dib_free_irq(data->irq, data);
+ else
+ free_irq(data->irq, data);
+err_free_pdata:
+ if (!dev_get_platdata(&data->client->dev))
+ kfree(data->pdata);
+err_free_mem:
+ kfree(data);
+ return error;
+}
+
+static int __devexit mxt_remove(struct i2c_client *client)
+{
+ struct mxt_data *data = i2c_get_clientdata(client);
+
+ if (data->mem_access_attr.attr.name)
+ sysfs_remove_bin_file(&client->dev.kobj,
+ &data->mem_access_attr);
+
+ sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
+ if (data->irq == -1)
+ lvds_free_irq(data->irq, data);
+ else if (data->irq == -2)
+ dib_free_irq(data->irq, data);
+ else
+ free_irq(data->irq, data);
+ regulator_put(data->reg_avdd);
+ regulator_put(data->reg_vdd);
+ mxt_free_object_table(data);
+ if (!dev_get_platdata(&data->client->dev))
+ kfree(data->pdata);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mxt_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_stop(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static int mxt_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mxt_data *data = i2c_get_clientdata(client);
+ struct input_dev *input_dev = data->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ mxt_start(data);
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
+
+static void mxt_shutdown(struct i2c_client *client)
+{
+ struct mxt_data *data = i2c_get_clientdata(client);
+
+ if (data->irq >= 0)
+ disable_irq(data->irq);
+ else if (data->irq == -1)
+ lvds_disable_irq(data->irq);
+ else if (data->irq == -2)
+ dib_disable_irq(data->irq);
+}
+
+static const struct i2c_device_id mxt_id[] = {
+ { "qt602240_ts", 0 },
+ { "atmel_mxt_ts", 0 },
+ { "atmel_mxt_tp", 0 },
+ { "mXT224", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mxt_id);
+
+static struct i2c_driver mxt_driver = {
+ .driver = {
+ .name = "atmel_mxt_ts",
+ .owner = THIS_MODULE,
+ .pm = &mxt_pm_ops,
+ },
+ .probe = mxt_probe,
+ .remove = __devexit_p(mxt_remove),
+ .shutdown = mxt_shutdown,
+ .id_table = mxt_id,
+};
+
+static int __init mxt_init(void)
+{
+ return i2c_add_driver(&mxt_driver);
+}
+
+static void __exit mxt_exit(void)
+{
+ i2c_del_driver(&mxt_driver);
+}
+
+module_init(mxt_init);
+module_exit(mxt_exit);
+
+/* Module information */
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Atmel maXTouch Touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/input/touchscreen/atmel_mxt_ts.h b/drivers/parrot/input/touchscreen/atmel_mxt_ts.h
new file mode 100644
index 00000000000000..e065abe4a832ec
--- /dev/null
+++ b/drivers/parrot/input/touchscreen/atmel_mxt_ts.h
@@ -0,0 +1,30 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_ATMEL_MXT_TS_H
+#define __LINUX_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+ unsigned long irqflags;
+ u8 t19_num_keys;
+ const unsigned int *t19_keymap;
+ int t15_num_keys;
+ const unsigned int *t15_keymap;
+ unsigned long gpio_reset;
+ const char *cfg_name;
+ const char *fw_name;
+};
+
+#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/drivers/parrot/input/virtual_wakeup_button.c b/drivers/parrot/input/virtual_wakeup_button.c
new file mode 100644
index 00000000000000..00a7819af3385d
--- /dev/null
+++ b/drivers/parrot/input/virtual_wakeup_button.c
@@ -0,0 +1,105 @@
+/*
+ * virtual_wakeup_button.c - Power Button which is pushed on resume from standby.
+ *
+ * Copyright (c) 2011 Stefan Seidel
+ * Copyright (c) 2014 Aurelien Lefebvre
+ *
+ * This file is released under the GPLv2 or later.
+ */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/suspend.h>
+#include <linux/pm_runtime.h>
+
+#define DRIVER_NAME "virtual_wakeup_button"
+
+static int __devinit virtual_wakeup_button_probe(struct platform_device* pdev)
+{
+ struct input_dev *input;
+ int error;
+
+ dev_info(&pdev->dev, "probe\n");
+
+ input = input_allocate_device();
+
+ platform_set_drvdata(pdev, input);
+
+ input->name = DRIVER_NAME;
+ input->evbit[0] = BIT_MASK(EV_KEY);
+
+ set_bit(KEY_POWER, input->keybit);
+
+ error = input_register_device(input);
+
+ if (error)
+ input_free_device(input);
+
+ return 0;
+}
+
+static int __devexit virtual_wakeup_button_remove(struct platform_device* pdev)
+{
+ struct input_dev *input;
+
+ input = platform_get_drvdata(pdev);
+
+ if (input)
+ input_free_device(input);
+
+ return 0;
+}
+
+static int virtual_wakeup_button_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct input_dev *input;
+
+ input = platform_get_drvdata(pdev);
+
+ if (!input)
+ return 0;
+
+ dev_info(dev, "resuming... sending KEY_POWER event\n");
+
+ input_report_key(input, KEY_POWER, 1);
+ input_sync(input);
+ input_report_key(input, KEY_POWER, 0);
+ input_sync(input);
+
+ return 0;
+}
+
+static struct dev_pm_ops virtual_wakeup_button_dev_pm_ops = {
+ .resume = &virtual_wakeup_button_resume,
+};
+
+static struct platform_driver virtual_wakeup_button_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &virtual_wakeup_button_dev_pm_ops,
+ },
+ .probe = &virtual_wakeup_button_probe,
+ .remove = __devexit_p(&virtual_wakeup_button_remove),
+};
+
+static int __init virtual_wakeup_button_init(void)
+{
+ return platform_driver_register(&virtual_wakeup_button_driver);
+}
+module_init(virtual_wakeup_button_init);
+
+static void __exit virtual_wakeup_button_exit(void)
+{
+ platform_driver_unregister(&virtual_wakeup_button_driver);
+}
+module_exit(virtual_wakeup_button_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Seidel <android@stefanseidel.info>");
+MODULE_AUTHOR("Aurelien Lefebvre <aurelien.lefebvre@parrot.com>");
+MODULE_DESCRIPTION("Sets up a virtual input device and sends a power key event during resume.");
+
+
diff --git a/drivers/parrot/lttng/Kconfig b/drivers/parrot/lttng/Kconfig
new file mode 100644
index 00000000000000..a68c37f841b9eb
--- /dev/null
+++ b/drivers/parrot/lttng/Kconfig
@@ -0,0 +1,4 @@
+config LTTNG_DEBUGFS
+ tristate "LTTng debugfs"
+ help
+ LTTng debugfs
diff --git a/drivers/parrot/lttng/Makefile b/drivers/parrot/lttng/Makefile
new file mode 100644
index 00000000000000..4c249fd21438fc
--- /dev/null
+++ b/drivers/parrot/lttng/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LTTNG_DEBUGFS) += lttng_debugfs.o
diff --git a/drivers/parrot/lttng/lttng_debugfs.c b/drivers/parrot/lttng/lttng_debugfs.c
new file mode 100644
index 00000000000000..4aba3151ae079a
--- /dev/null
+++ b/drivers/parrot/lttng/lttng_debugfs.c
@@ -0,0 +1,141 @@
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/parrot_trace.h>
+
+struct lttng_debugfs {
+ struct dentry *dir;
+};
+
+static struct lttng_debugfs lttng_debugfs;
+
+static ssize_t lttng_event_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos,
+ unsigned long *event)
+{
+ char buf[32];
+ int ret;
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, event);
+ if (0 != ret)
+ return ret;
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t lttng_event_start_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ unsigned long event = 0;
+ int ret = lttng_event_write(filp, ubuf, cnt, ppos, &event);
+ trace_user_kevent_start(event);
+ return ret;
+}
+
+static ssize_t lttng_event_stop_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ unsigned long event = 0;
+ int ret = lttng_event_write(filp, ubuf, cnt, ppos, &event);
+ trace_user_kevent_stop(event);
+ return ret;
+}
+
+static ssize_t lttng_event_msg_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[128];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+
+ trace_user_kmessage(buf);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations lttng_event_start_fops = {
+ .owner = THIS_MODULE,
+ .write = lttng_event_start_write,
+};
+
+static const struct file_operations lttng_event_stop_fops = {
+ .owner = THIS_MODULE,
+ .write = lttng_event_stop_write,
+};
+
+static const struct file_operations lttng_event_msg_fops = {
+ .owner = THIS_MODULE,
+ .write = lttng_event_msg_write,
+};
+
+static int __init lttng_debugfs_init(void)
+{
+ int ret = 0;
+ struct lttng_debugfs *debugfs = &lttng_debugfs;
+ struct dentry *d;
+
+ debugfs->dir = debugfs_create_dir("lttng", NULL);
+ if (!debugfs->dir) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ d = debugfs_create_file("event_start", S_IWUGO, debugfs->dir,
+ NULL, &lttng_event_start_fops);
+ if (!d) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ d = debugfs_create_file("event_stop", S_IWUGO, debugfs->dir,
+ NULL, &lttng_event_stop_fops);
+ if (!d) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ d = debugfs_create_file("message", S_IWUGO, debugfs->dir,
+ NULL, &lttng_event_msg_fops);
+ if (!d) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void __exit lttng_debugfs_exit(void)
+{
+ struct lttng_debugfs *debugfs = &lttng_debugfs;
+ debugfs_remove_recursive(debugfs->dir);
+}
+
+module_init(lttng_debugfs_init);
+module_exit(lttng_debugfs_exit);
+
+MODULE_DESCRIPTION("LTTng simple debugfs interface");
+MODULE_AUTHOR("Adrien Charruel, <adrien.charruel@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/Kconfig b/drivers/parrot/media/Kconfig
new file mode 100644
index 00000000000000..f809be49ede818
--- /dev/null
+++ b/drivers/parrot/media/Kconfig
@@ -0,0 +1,14 @@
+source "drivers/parrot/media/video/Kconfig"
+
+config MPEGTS_PARROT7
+ tristate "MPEG_TS driver"
+ depends on SPI_MASTER && SPI_PARROT7
+ default m
+ help
+ Select Parrot7 MPEG_TS driver. Requires the SPI master driver to be
+ selected to handle common registers.
+
+config MPEGTS_PARROT7_DEBUG
+ bool "debug mode"
+ depends on MPEGTS_PARROT7
+ default n
diff --git a/drivers/parrot/media/Makefile b/drivers/parrot/media/Makefile
new file mode 100644
index 00000000000000..b416c2ebe4899e
--- /dev/null
+++ b/drivers/parrot/media/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -I$(srctree)/drivers/parrot
+ccflags-$(CONFIG_MPEGTS_PARROT7_DEBUG) += -DDEBUG
+
+obj-y += video/
+
+obj-$(CONFIG_MPEGTS_PARROT7) += p7-mpegts.o
diff --git a/drivers/parrot/media/hx280enc.c b/drivers/parrot/media/hx280enc.c
new file mode 100644
index 00000000000000..9e70d6959fb026
--- /dev/null
+++ b/drivers/parrot/media/hx280enc.c
@@ -0,0 +1,528 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <linux/version.h>
+#include <linux/wait.h>
+#include<linux/sched.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+//#include<asm/signal.h>
+//#include<asm-generic/siginfo.h>
+
+/* On2 own stuff */
+#include "hx280enc.h"
+#include "memalloc.h"
+
+/* module description */
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Hantro Products Oy");
+MODULE_DESCRIPTION ("Hantro 6280/7280/8270 Encoder driver");
+
+//static int COUNTER=0;
+static int SLEEP = 0;
+static DECLARE_WAIT_QUEUE_HEAD (wq);
+
+#define ENC_IO_SIZE (96 * 4) /* bytes */
+#define ENC_HW_ID 0x72800000
+#define MEMALLOC_SIZE (96 << 20)
+
+/* here's all the must remember stuff */
+
+
+
+struct hx280_venc_t
+{
+ struct device *dev;
+ char *buffer;
+ unsigned int buffsize;
+ int hx280_venc_major;
+ unsigned long iobaseaddr;
+ unsigned int iosize;
+ volatile u8 *hwregs;
+ unsigned int irq;
+ struct clk *hx280clk;
+ hlinc chunks;
+ struct fasync_struct *async_queue;
+};
+
+static struct hx280_venc_t *hxvenc;
+static spinlock_t mem_lock = SPIN_LOCK_UNLOCKED;
+
+static int hx280_venc_cleanup (struct platform_device *);
+static int ReserveIO (void);
+static void ReleaseIO (void);
+static void ResetAsic (struct hx280_venc_t *);
+static void dump_regs (unsigned long);
+static int hx280_venc_probe (struct platform_device *);
+int memalloc_register (struct device *, hlinc *, dma_addr_t, unsigned int);
+int memalloc_unregister (struct device *);
+int memalloc_AllocMemory (struct device *, hlinc *, unsigned *, unsigned int);
+void memalloc_FreeMemory (struct device *, hlinc*, unsigned long);
+void memalloc_ResetMems (struct device *, hlinc *);
+
+/* IRQ handler */
+static irqreturn_t hx280_venc_isr (int, void *);
+/*clock init */
+static int hx280_venc_init_clock (struct hx280_venc_t *,
+ struct platform_device *);
+
+
+/*------------*/
+
+static int
+hx280_venc_vm_fault (struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ PDEBUG ("hx280enc_vm_fault: problem with mem access\n");
+ return VM_FAULT_SIGBUS; // send a SIGBUS
+}
+
+static void
+hx280_venc_vm_open (struct vm_area_struct *vma)
+{
+ PDEBUG ("hx280enc_vm_open:\n");
+}
+
+static void
+hx280_venc_vm_close (struct vm_area_struct *vma)
+{
+ PDEBUG ("hx280enc_vm_close:\n");
+}
+
+static struct vm_operations_struct hx280enc_vm_ops = {
+open:hx280_venc_vm_open,
+close:hx280_venc_vm_close,
+fault:hx280_venc_vm_fault,
+};
+
+static int
+hx280_venc_mmap (struct file *filp, struct vm_area_struct *vma)
+{
+ int result = -EINVAL;
+ vma->vm_ops = &hx280enc_vm_ops;
+ return result;
+}
+
+static long
+hx280_venc_ioctl (struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ PDEBUG (KERN_INFO"ioctl cmd %u\n", cmd);
+
+ // extract the type and number bitfields, and don't encode
+ // wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
+
+ if (_IOC_TYPE (cmd) != HX280ENC_IOC_MAGIC)
+ {
+ return -ENOTTY;
+ }
+ if (_IOC_NR (cmd) > HX280ENC_IOC_MAXNR)
+ {
+ return -ENOTTY;
+ }
+
+ // the direction is a bitmask, and VERIFY_WRITE catches R/W
+ // transfers. `Type' is user-oriented, while
+ // access_ok is kernel-oriented, so the concept of "read" and
+ // "write" is reversed
+
+ if (_IOC_DIR (cmd) & _IOC_READ)
+ err = !access_ok (VERIFY_WRITE, (void *) arg, _IOC_SIZE (cmd));
+ else if (_IOC_DIR (cmd) & _IOC_WRITE)
+ err = !access_ok (VERIFY_READ, (void *) arg, _IOC_SIZE (cmd));
+ if (err)
+ return -EFAULT;
+ switch (cmd)
+ {
+ case HX280ENC_IOCGHWOFFSET:
+ __put_user (hxvenc->iobaseaddr, (unsigned long *) arg);
+ break;
+
+ case HX280ENC_IOCGHWIOSIZE:
+ __put_user (hxvenc->iosize, (unsigned int *) arg);
+ break;
+ case HX280ENC_IOCWAIT:
+ err = wait_event_interruptible_timeout (wq, SLEEP, HZ / 10);
+ SLEEP = 0;
+ if (!err)
+ return -ETIMEDOUT;
+ break;
+ case MEMALLOC_IOCHARDRESET:
+ PDEBUG ("HARDRESET\n");
+ memalloc_ResetMems (hxvenc->dev, &hxvenc->chunks);
+ break;
+ case MEMALLOC_IOCXGETBUFFER:
+ {
+ int result;
+ MemallocParams memparams;
+ PDEBUG (KERN_INFO"GETBUFFER\n");
+ spin_lock (&mem_lock);
+ if (__copy_from_user
+ (&memparams, (const void *) arg, sizeof (memparams)))
+ result = -EFAULT;
+ else
+ {
+ PDEBUG(KERN_INFO"%u\n",memparams.busAddress);
+ result =
+ memalloc_AllocMemory (hxvenc->dev, &hxvenc->chunks, &memparams.busAddress, memparams.size);
+ PDEBUG(KERN_INFO"%u\n",hxvenc->chunks.bus_address);
+ if (__copy_to_user ((void *) arg, &memparams, sizeof (memparams)))
+ {
+ result = -EFAULT;
+ }
+ }
+ spin_unlock (&mem_lock);
+ return result;
+ }
+ case MEMALLOC_IOCSFREEBUFFER:
+ {
+ unsigned long busaddr;
+ PDEBUG ("FREEBUFFER\n");
+ spin_lock (&mem_lock);
+ __get_user (busaddr, (unsigned long *) arg);
+ spin_unlock (&mem_lock);
+ memalloc_FreeMemory (hxvenc->dev, &hxvenc->chunks, busaddr);
+ break;
+ }
+ default:
+ printk(KERN_ERR"IOCTL cmd not recognized\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+hx280_venc_open (struct inode *inode, struct file *filp)
+{
+ int result = 0;
+ struct hx280_venc_t *dev = hxvenc;
+ filp->private_data = (void *) dev;
+ PDEBUG ("dev opened\n");
+ return result;
+}
+
+static int
+hx280_venc_fasync (int fd, struct file *filp, int mode)
+{
+ struct hx280_venc_t *dev = (struct hx280_venc_t *) filp->private_data;
+ PDEBUG ("fasync called\n");
+ return fasync_helper (fd, filp, mode, &dev->async_queue);
+}
+
+static int
+hx280_venc_release (struct inode *inode, struct file *filp)
+{
+ struct hx280_venc_t *dev = (struct hx280_venc_t *) filp->private_data;
+ dump_regs ((unsigned long) dev); // dump the regs
+ hx280_venc_fasync (-1, filp, 0);
+ PDEBUG ("dev closed\n");
+ return 0;
+}
+
+/* VFS methods */
+static struct file_operations hx280_venc_fops = {
+mmap:hx280_venc_mmap,
+open:hx280_venc_open,
+release:hx280_venc_release,
+unlocked_ioctl:hx280_venc_ioctl,
+fasync:hx280_venc_fasync,
+};
+
+
+static int
+hx280_venc_probe (struct platform_device *pdev)
+{
+ struct resource *resm, *resi, *resmem;
+ int result;
+
+
+ /*dynamic allocation od data structure*/
+ hxvenc = kzalloc (sizeof (struct hx280_venc_t), GFP_KERNEL);
+ if (!hxvenc)
+ {
+ printk (KERN_ERR "HX280enc: Unable to allocate device structure\n");
+ result = -EINVAL;
+ goto err;
+ }
+ /*save the device in the struct*/
+ hxvenc->dev=&pdev->dev;
+
+ /*register driver */
+ hxvenc->hx280_venc_major = 0;
+ result =
+ register_chrdev (hxvenc->hx280_venc_major, "hx280-venc",
+ &hx280_venc_fops);
+ if (result < 0)
+ {
+ printk (KERN_INFO "HX280enc: unable to get major <%d>\n",
+ hxvenc->hx280_venc_major);
+ goto err_alloc;
+ }
+ else if (result != 0) // this is for dynamic major
+ {
+ hxvenc->hx280_venc_major = result;
+ }
+ /*get memory ressource from platform device*/
+ resm = platform_get_resource (pdev, IORESOURCE_MEM, 0);
+ if (!resm)
+ {
+ printk ("HX280enc: Unable to get ressource\n");
+ result = -ENXIO;
+ goto err_reg;
+ }
+ hxvenc->iobaseaddr = resm->start;
+ hxvenc->iosize = resm->end - resm->start + 1;
+ /*init clk*/
+ result = hx280_venc_init_clock (hxvenc, pdev);
+ if (result)
+ {
+ printk (KERN_ERR "clock enable failed");
+ goto err_reg;
+ }
+ /*map the address of the device on kernel space*/
+ result = ReserveIO ();
+ if (result < 0)
+ goto err_clk;
+
+ ResetAsic (hxvenc); /* reset hardware */
+ /*get irq ressource from platform device*/
+ resi = platform_get_resource (pdev, IORESOURCE_IRQ, 0);
+ if (!resi)
+ {
+ printk ("HX280enc: Unable to get ressource\n");
+ result = -ENXIO;
+ goto err_clk;
+ }
+ hxvenc->irq = resi->start;
+ printk (KERN_INFO "HX280enc: irq=%i\n", hxvenc->irq);
+ hxvenc->async_queue = NULL;
+
+ /* get the IRQ line */
+ if (hxvenc->irq != -1)
+ {
+ result = request_irq (hxvenc->irq, hx280_venc_isr,
+ IRQF_DISABLED | IRQF_SHARED,
+ "hx280enc", (void *) hxvenc);
+ if (result == -EINVAL)
+ {
+ printk (KERN_ERR "HX280enc: Bad irq number or handler\n");
+ ReleaseIO ();
+ goto err;
+ }
+ else if (result == -EBUSY)
+ {
+ printk (KERN_ERR "HX280enc: IRQ <%d> busy, change your config\n", hxvenc->irq);
+ ReleaseIO ();
+ goto err;
+ }
+ }
+ else
+ {
+ printk (KERN_INFO "HX280enc: IRQ not in use!\n");
+ }
+
+ /* get memalloc resources from platform device*/
+ resmem = platform_get_resource (pdev, IORESOURCE_MEM, 1);
+ if (!resmem)
+ {
+ printk ("HX280enc: Unable to get memalloc ressource\n");
+ result = -ENXIO;
+ goto err_clk;
+ }
+ /*call the dma-declare-coherent-memory*/
+ memalloc_register (&pdev->dev, &hxvenc->chunks ,resmem->start,
+ (resmem->end - resmem->start + 1));
+
+ printk (KERN_INFO "HX280enc: module inserted. Major <%d>\n",
+ hxvenc->hx280_venc_major);
+ return 0;
+
+err_clk:
+ clk_disable (hxvenc->hx280clk);
+ clk_put (hxvenc->hx280clk);
+err_reg:
+ unregister_chrdev (hxvenc->hx280_venc_major, "hx280-venc");
+err_alloc:
+ kfree (hxvenc);
+err:
+ printk (KERN_INFO "HX280enc: module not inserted\n");
+ return result;
+}
+
+
+static int
+hx280_venc_cleanup (struct platform_device *pdev)
+{
+ PDEBUG (KERN_INFO "HX280enc: remove module\n");
+ writel (0, hxvenc->hwregs + 0x04); /* clear enc IRQ */
+ PDEBUG (KERN_INFO "HX280enc: reset IRQ\n");
+ /* free the encoder IRQ */
+ if (hxvenc->irq != -1)
+ {
+ free_irq (hxvenc->irq, (void *) hxvenc);
+ }
+ PDEBUG (KERN_INFO "HX280enc: free IRQ\n");
+ ReleaseIO ();
+ PDEBUG (KERN_INFO "HX280enc: Release IO\n");
+ memalloc_unregister (&pdev->dev);
+ unregister_chrdev (hxvenc->hx280_venc_major, "hx280-venc");
+ clk_disable (hxvenc->hx280clk);
+ printk (KERN_INFO "HX280enc: module removed\n");
+ kfree (hxvenc);
+ return 0;
+}
+
+static int
+ReserveIO (void)
+{
+ long int hwid;
+ if (!request_mem_region (hxvenc->iobaseaddr, hxvenc->iosize, "hx280-venc"))
+ {
+ printk (KERN_INFO "HX280enc: failed to reserve HW regs\n");
+ return -EBUSY;
+ }
+ hxvenc->hwregs =
+ (volatile u8 *) ioremap (hxvenc->iobaseaddr, hxvenc->iosize);
+ if (hxvenc->hwregs == NULL)
+ {
+ printk (KERN_INFO "HX280enc: failed to ioremap HW regs\n");
+ ReleaseIO ();
+ return -EBUSY;
+ }
+ hwid = readl (hxvenc->hwregs);
+ /* check for encoder HW ID */
+ if ((((hwid >> 16) & 0xFFFF) != ((ENC_HW_ID >> 16) & 0xFFFF)))
+ {
+ printk (KERN_INFO "HX280enc: HW not found at 0x%08lx\n",
+ hxvenc->iobaseaddr);
+ dump_regs ((unsigned long) hxvenc);
+ ReleaseIO ();
+ return -EBUSY;
+ }
+ PDEBUG (KERN_INFO
+ "HX280enc: Hardware at bus @ <0x%08lx> with ID <0x%08lx>\n",
+ hxvenc->iobaseaddr, hwid);
+ return 0;
+}
+
+static void
+ReleaseIO (void)
+{
+ if (hxvenc->hwregs)
+ iounmap ((void *) hxvenc->hwregs);
+ release_mem_region (hxvenc->iobaseaddr, hxvenc->iosize);
+}
+
+irqreturn_t
+hx280_venc_isr (int irq, void *dev_id)
+{
+ struct hx280_venc_t *dev = (struct hx280_venc_t *) dev_id;
+ u32 irq_status;
+ irq_status = readl (dev->hwregs + 0x04);
+ if (irq_status & 0x01)
+ {
+ writel (irq_status & (~0x01), dev->hwregs + 0x04); /* clear enc IRQ */
+/* if (dev->async_queue)
+ kill_fasync (&dev->async_queue, SIGIO, POLL_IN);
+ else
+ {
+ printk (KERN_WARNING
+ "hx280enc: IRQ received w/o anybody waiting for it!\n");
+ }
+*/ SLEEP = 1;
+ wake_up_interruptible (&wq);
+
+ PDEBUG ("IRQ handled!\n");
+ return IRQ_HANDLED;
+ }
+ else
+ {
+ PDEBUG ("IRQ received, but NOT handled!\n");
+ return IRQ_NONE;
+ }
+}
+
+static int
+hx280_venc_init_clock (struct hx280_venc_t *host,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct clk *hx280_venc_clk;
+ hx280_venc_clk = clk_get (&pdev->dev, NULL);
+ if (IS_ERR (hx280_venc_clk))
+ {
+ ret = PTR_ERR (hx280_venc_clk);
+ goto out;
+ }
+ ret = clk_enable (hx280_venc_clk);
+ if (ret)
+ {
+ goto put_clk;
+ }
+ host->hx280clk = hx280_venc_clk;
+ return 0;
+
+put_clk:
+ clk_put (hx280_venc_clk);
+out:
+ return ret;
+}
+
+
+void
+ResetAsic (struct hx280_venc_t *dev)
+{
+ int i;
+ writel (0, dev->hwregs + 0x38);
+ for (i = 4; i < ENC_IO_SIZE; i += 4)
+ writel (0, dev->hwregs + i);
+}
+
+void
+dump_regs (unsigned long data)
+{
+ //struct hx280_venc_t *dev = (struct hx280_venc_t *) data;
+ int i;
+ PDEBUG ("Reg Dump Start\n");
+ for (i = 0; i < ENC_IO_SIZE; i += 4)
+ {
+ PDEBUG ("\toffset %02X = %08X\n", i, readl (dev->hwregs + i));
+ }
+ PDEBUG ("Reg Dump End\n");
+}
+
+static struct platform_driver hx280_venc_driver = {
+ .remove = &hx280_venc_cleanup,
+ .probe = &hx280_venc_probe,
+ .driver = {
+ .name = "hx280-venc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+hx280_venc_init (void)
+{
+ return platform_driver_register (&hx280_venc_driver);
+}
+
+module_init (hx280_venc_init);
+
+static void __exit
+hx280_venc_exit (void)
+{
+ platform_driver_unregister (&hx280_venc_driver);
+}
+
+module_exit (hx280_venc_exit);
diff --git a/drivers/parrot/media/hx280enc.h b/drivers/parrot/media/hx280enc.h
new file mode 100644
index 00000000000000..995f2c53f7955d
--- /dev/null
+++ b/drivers/parrot/media/hx280enc.h
@@ -0,0 +1,87 @@
+/*------------------------------------------------------------------------------
+-- --
+-- This software is confidential and proprietary and may be used --
+-- only as expressly authorized by a licensing agreement from --
+-- --
+-- Hantro Products Oy. --
+-- --
+-- (C) COPYRIGHT 2006 HANTRO PRODUCTS OY --
+-- ALL RIGHTS RESERVED --
+-- --
+-- The entire notice above must be reproduced --
+-- on all copies and should not be removed. --
+-- --
+--------------------------------------------------------------------------------
+--
+-- Abstract : 6280/7280/8270 Encoder device driver (kernel module)
+--
+--------------------------------------------------------------------------------
+--
+-- Version control information, please leave untouched.
+--
+-- $RCSfile: hx280enc.h,v $
+-- $Date: 2008/03/27 11:26:23 $
+-- $Revision: 1.1 $
+--
+------------------------------------------------------------------------------*/
+
+#ifndef _HX280ENC_H_
+#define _HX280ENC_H_
+#include <linux/ioctl.h> /* needed for the _IOW etc stuff used later */
+/*
+ * Macros to help debugging
+ */
+
+#undef PDEBUG /* undef it, just in case */
+#ifdef HX280ENC_DEBUG
+# ifdef __KERNEL__
+ /* This one if debugging is on, and kernel space */
+# define PDEBUG(fmt, args...) printk( KERN_INFO "hmp4e: " fmt, ## args)
+# else
+ /* This one for user space */
+# define PDEBUG(fmt, args...) printf(__FILE__ ":%d: " fmt, __LINE__ , ## args)
+# endif
+#else
+# define PDEBUG(fmt, args...) /* not debugging: nothing */
+#endif
+
+/*
+ * Ioctl definitions
+ */
+
+/* Use 'k' as magic number */
+#define HX280ENC_IOC_MAGIC 'k'
+/*
+ * S means "Set" through a ptr,
+ * T means "Tell" directly with the argument value
+ * G means "Get": reply by setting through a pointer
+ * Q means "Query": response is on the return value
+ * X means "eXchange": G and S atomically
+ * H means "sHift": T and Q atomically
+ */
+ /*
+ * #define HX280ENC_IOCGBUFBUSADDRESS _IOR(HX280ENC_IOC_MAGIC, 1, unsigned long *)
+ * #define HX280ENC_IOCGBUFSIZE _IOR(HX280ENC_IOC_MAGIC, 2, unsigned int *)
+ */
+#define HX280ENC_IOCGHWOFFSET _IOR(HX280ENC_IOC_MAGIC, 3, unsigned long *)
+#define HX280ENC_IOCGHWIOSIZE _IOR(HX280ENC_IOC_MAGIC, 4, unsigned int *)
+#define HX280ENC_IOC_CLI _IO(HX280ENC_IOC_MAGIC, 5)
+#define HX280ENC_IOC_STI _IO(HX280ENC_IOC_MAGIC, 6)
+#define HX280ENC_IOCXVIRT2BUS _IOWR(HX280ENC_IOC_MAGIC, 7, unsigned long *)
+
+#define HX280ENC_IOCHARDRESET _IO(HX280ENC_IOC_MAGIC, 8) /* debugging tool */
+#define HX280ENC_IOCWAIT _IO(HX280ENC_IOC_MAGIC, 9) /* wait for irq */
+#define MEMALLOC_IOCHARDRESET _IO(HX280ENC_IOC_MAGIC, 10) /* reset memalloc */
+#define MEMALLOC_IOCXGETBUFFER _IOWR(HX280ENC_IOC_MAGIC, 11, unsigned long) /* get memory memalloc */
+#define MEMALLOC_IOCSFREEBUFFER _IOW(HX280ENC_IOC_MAGIC, 12, unsigned long) /* free memory memalloc */
+
+#define HX280ENC_IOC_MAXNR 12
+
+
+typedef struct
+{
+ unsigned busAddress;
+ unsigned size;
+} MemallocParams;
+
+#endif /* !_HX280ENC_H_ */
diff --git a/drivers/parrot/media/memalloc.c b/drivers/parrot/media/memalloc.c
new file mode 100644
index 00000000000000..e58e33b32c6686
--- /dev/null
+++ b/drivers/parrot/media/memalloc.c
@@ -0,0 +1,180 @@
+/* Allocate memory blocks
+ *
+ * Copyright (C) 2009 On2 Technologies Finland Oy.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+--------------------------------------------------------------------------------
+--
+-- Version control information, please leave untouched.
+--
+-- $RCSfile: memalloc.c,v $
+-- $Date: 2011/03/10 14:03:45 $
+-- $Revision: 1.1 $
+--
+------------------------------------------------------------------------------*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+/* needed for __init,__exit directives */
+#include <linux/init.h>
+/* needed for remap_page_range */
+#include <linux/mm.h>
+/* obviously, for kmalloc */
+#include <linux/slab.h>
+/* for struct file_operations, register_chrdev() */
+#include <linux/fs.h>
+/* standard error codes */
+#include <linux/errno.h>
+/* this header files wraps some common module-space operations ...
+ here we use mem_map_reserve() macro */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+/* for current pid */
+#include <linux/sched.h>
+/* Our header */
+#include "hx280enc.h"
+#include "memalloc.h"
+/*platform device*/
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+/* module description */
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Hantro Products Oy");
+MODULE_DESCRIPTION ("RAM allocation");
+
+#define MAXDEV 2 //ENCODE and DECODER
+
+#define MEMALLOC_SW_MINOR 6
+#define MEMALLOC_SW_MAJOR 0
+#define MEMALLOC_SW_BUILD ((MEMALLOC_SW_MAJOR * 1000) + MEMALLOC_SW_MINOR)
+
+
+
+static void __exit
+memalloc_cleanup (void)
+{
+ printk (KERN_INFO "memalloc: module removed\n");
+ return;
+}
+
+/*We must keep a list of the allocated chunk*/
+int
+memalloc_AllocMemory (struct device *dev, hlinc *chunks,unsigned *busaddr, unsigned int size)
+{
+ int err;
+ void *rslt;
+ unsigned int asked_page;
+ hlinc *elem;
+ if (!size)
+ {
+ err = -1;
+ goto out;
+ }
+ asked_page = PAGE_ALIGN (size);
+ rslt =
+ (unsigned *) dma_alloc_coherent (dev, asked_page,
+ (dma_addr_t *) busaddr, GFP_DMA);
+ if (!rslt)
+ {
+ PDEBUG ("MEMALLOC FAILED: size = %d\n", size);
+ goto out;
+ }
+ PDEBUG (KERN_WARNING
+ "MEMALLOC OK: size: %d, size reserved: %d at @ 0x%x\n", size,
+ asked_page, *busaddr);
+ elem = kmalloc (sizeof *chunks, GFP_KERNEL);
+ PDEBUG (KERN_WARNING "alloc %d\n", asked_page);
+ elem->bus_address = *busaddr;
+ elem->size = asked_page;
+ elem->ba = rslt;
+ list_add (&(elem->list), &(chunks->list));
+ return 0;
+out:
+ return err;
+}
+EXPORT_SYMBOL (memalloc_AllocMemory);
+
+void
+memalloc_FreeMemory (struct device *dev, hlinc *chunks,unsigned long busaddr)
+{
+ hlinc *res, *resn;
+ list_for_each_entry_safe (res, resn, &(chunks->list), list)
+ {
+ PDEBUG (KERN_INFO "elem 0x%x, curr 0x%x\n", res->bus_address,
+ (unsigned int) busaddr);
+ if (res->bus_address == busaddr)
+ {
+ PDEBUG (KERN_WARNING "alloc - 1 %d\n", res->size);
+ dma_free_coherent (dev, res->size, (void *) res->ba,
+ res->bus_address);
+ list_del (&res->list);
+ kfree (res);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL (memalloc_FreeMemory);
+
+void
+memalloc_ResetMems (struct device *dev, hlinc *chunks)
+{
+ hlinc *res, *resn;
+ list_for_each_entry_safe (res, resn, &(chunks->list), list)
+ {
+ dma_free_coherent (dev, res->size, (void *) res->ba, res->bus_address);
+ list_del (&res->list);
+ kfree (res);
+ }
+}
+
+EXPORT_SYMBOL (memalloc_ResetMems);
+
+int
+memalloc_register(struct device *dev, hlinc *chunks, dma_addr_t bus_addr, unsigned int size)
+{
+ int err;
+ INIT_LIST_HEAD (&(chunks->list));
+ err =
+ dma_declare_coherent_memory (dev, bus_addr, bus_addr,
+ size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE);
+ if (!err)
+ {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL (memalloc_register);
+
+int
+memalloc_unregister (struct device *dev)
+{
+ dma_release_declared_memory (dev);
+ return 0;
+}
+
+EXPORT_SYMBOL (memalloc_unregister);
+
+int __init
+memalloc_init (void)
+{
+ return 0;
+}
+
+module_init (memalloc_init);
+module_exit (memalloc_cleanup);
diff --git a/drivers/parrot/media/memalloc.h b/drivers/parrot/media/memalloc.h
new file mode 100644
index 00000000000000..746f62c502b7ed
--- /dev/null
+++ b/drivers/parrot/media/memalloc.h
@@ -0,0 +1,9 @@
+typedef struct
+{
+ unsigned int bus_address;
+ unsigned int used;
+ unsigned int size;
+ void *ba;
+ struct list_head list;
+} hlinc;
+
diff --git a/drivers/parrot/media/p7-mpegts.c b/drivers/parrot/media/p7-mpegts.c
new file mode 100644
index 00000000000000..e3b1fc916ea889
--- /dev/null
+++ b/drivers/parrot/media/p7-mpegts.c
@@ -0,0 +1,937 @@
+/*
+ * linux/drivers/parrot/media/p7-mpegts.c - Parrot7 MPEG-TS driver
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * @author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/lcm.h>
+
+#include <spi/p7-swb.h>
+
+#include <spi/p7-swb.h>
+
+#include "p7-mpegts.h"
+#include "p7-mpegts_priv.h"
+
+#define MAX_BLK_NB 9
+#define MIN_BLK_NB 3
+
+static u32 p7mpg_save_regs[] = {
+ P7MPG_CTRL,
+ P7MPG_SYNC_VAL,
+ P7MPG_SYNC_NUM,
+ P7MPG_PKT_SIZE,
+ P7MPG_DATA_PIPE,
+ P7MPG_ITEN,
+ P7MPG_ITACK,
+ P7MPG_RXFIFO_TH,
+ P7MPG_RXCNT_TH,
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(poll_wait_queue);
+
+static bool p7mpg_filter(struct dma_chan* chan, void* param)
+{
+ if (chan->chan_id == (unsigned int) param)
+ return true;
+
+ return false;
+}
+
+int p7mpg_request_pad(struct p7mpg_core const * core, unsigned int pad,
+ enum p7swb_direction dir, enum p7swb_function func)
+{
+ return p7swb_request_pad(core->id, P7SPI_CORE_MPEG,
+ (void const * const) core, pad, dir, func);
+}
+
+void p7mpg_release_pad(struct p7mpg_core const * core, unsigned int pad,
+ enum p7swb_direction dir)
+{
+ p7swb_release_pad((void const * const) core, pad, dir);
+}
+
+static struct p7mpg_core* p7mpg_to_core(void *data)
+{
+ return container_of(data, struct p7mpg_core, miscdev);
+}
+
+static unsigned int p7mpg_get_pkt_sz(struct p7mpg_core *core)
+{
+ return __raw_readl(core->vaddr + P7MPG_PKT_SIZE);
+}
+
+static inline unsigned int p7mpg_get_min_pkt_by_blk(unsigned int pkt_size,
+ size_t dma_min)
+{
+ return lcm(pkt_size, dma_min) / pkt_size;
+}
+
+static inline unsigned int p7mpg_set_pkt_by_blk(struct p7mpg_core *core,
+ unsigned int blk_size,
+ unsigned int pkt_size)
+{
+ /* Blocks and DMA must be aligned! */
+ unsigned int min_pkt_by_blk = p7mpg_get_min_pkt_by_blk(pkt_size,
+ core->dma_min);
+ return roundup(blk_size / pkt_size, min_pkt_by_blk);
+}
+
+static inline unsigned int p7mpg_get_max_blk_nb(struct p7mpg_core *core,
+ unsigned int blk_size)
+{
+ int max_blk_nb = rounddown(core->dma_region_sz, blk_size) / blk_size;
+ if (max_blk_nb > MAX_BLK_NB)
+ return MAX_BLK_NB;
+ else if (max_blk_nb < MIN_BLK_NB)
+ return 0;
+ else
+ return max_blk_nb;
+}
+
+void p7mpg_setup_metadata(struct p7mpg_core* core)
+{
+ core->metadata->pkt_size = p7mpg_get_pkt_sz(core);
+ core->metadata->blk_size = core->metadata->pkt_size * core->pkt_by_blk;
+ core->metadata->blk_by_rb = core->blk_by_rb;
+ core->metadata->blks_states = 0;
+}
+
+static int p7mpg_setup_core(struct p7mpg_core *core,
+ struct p7mpg_plat_data const * pdata)
+{
+ ssize_t fifo_sz;
+
+ fifo_sz = p7spi_alloc_fifo(p7mpg_core_id(core), P7SPI_CORE_MPEG,
+ pdata->fifo_wcnt);
+ if (fifo_sz == -EAGAIN)
+ return -EPROBE_DEFER;
+ else if (fifo_sz < 0)
+ return fifo_sz;
+ core->fifo_sz = (size_t)fifo_sz * sizeof(u32);
+
+ /* Compute FIFO wakeup threshold in number of bytes. */
+ core->thres_sz = pdata->thres_wcnt * sizeof(u32);
+ if (core->thres_sz > (core->fifo_sz - sizeof(u32))) {
+ size_t const thres = core->fifo_sz / 2;
+
+ dev_warn(core->device,
+ "correcting invalid specified threshold (%u -> %u)",
+ core->thres_sz, thres);
+ core->thres_sz = thres;
+ }
+
+ /* Compute DMA burst length and threshold. */
+ core->dma_cfg.src_maxburst = min_t(u32,
+ core->thres_sz /
+ core->dma_cfg.src_addr_width,
+ 16);
+ core->dma_cfg.dst_maxburst = core->dma_cfg.src_maxburst;
+ core->dma_thres = core->dma_cfg.src_maxburst *
+ core->dma_cfg.src_addr_width;
+
+ if (core->thres_sz != core->dma_thres)
+ dev_info(core->device,
+ "aligning DMA burst threshold (%u -> %u)",
+ core->thres_sz, core->dma_thres);
+
+ core->dma_min = lcm(core->dma_thres, dma_get_cache_alignment());
+
+ return 0;
+}
+
+void p7mpg_complete_dma(void* ctxt)
+{
+ struct p7mpg_core *core = (struct p7mpg_core*) ctxt;
+ unsigned int next_period;
+ unsigned int shift = 2 * core->dma_block;
+
+ /*
+ * when entering this function, dma_block is the block that trigerred
+ * interrupt. Update metadata in the ring buffer and mark next periods
+ * as being reserved for DMA xfer.
+ */
+ atomic_set_mask(3 << shift, &core->metadata->blks_states);
+
+ core->dma_block = (core->dma_block + 1) % core->blk_by_rb;
+
+ /*
+ * okay now dma_block is the block that the DMA is currently writting
+ * in, so we reserve the next period.
+ */
+ next_period = (core->dma_block + 1) % core->blk_by_rb;
+ shift = 2 * next_period;
+ atomic_clear_mask(3 << shift, &core->metadata->blks_states);
+
+ wake_up(&poll_wait_queue);
+
+ dev_dbg(core->device, "DMA completion called\n");
+}
+
+static int p7mpg_config_dma(struct p7mpg_core* core)
+{
+ int err;
+ size_t pkt_size = p7mpg_get_pkt_sz(core);
+ size_t blk_size = pkt_size * core->pkt_by_blk;
+ size_t map_len = blk_size * core->blk_by_rb;
+ size_t max_mappable = core->dma_region_sz;
+
+ /*
+ * There is no much choices here. DMA is only used to received
+ * data so set direction to DEV_TO_MEM.
+ */
+ core->dma_cfg.direction = DMA_DEV_TO_MEM;
+ core->dma_cfg.src_addr = core->fifo_paddr;
+
+ if (blk_size % core->dma_min) {
+ dev_err(core->device,
+ "block size must be a multiple of %d (%d is not)\n",
+ core->dma_min, blk_size);
+ return -EINVAL;
+ }
+
+ if (map_len > max_mappable) {
+ dev_err(core->device,
+ "can't map %dB bytes (max %dB)\n",
+ map_len, max_mappable);
+ return -EINVAL;
+ }
+
+ err = dmaengine_slave_config(core->dma_chan, &core->dma_cfg);
+ return err;
+}
+
+static int p7mpg_run_dma(struct p7mpg_core* core,
+ void (*complete)(void*))
+{
+ size_t pkt_size = p7mpg_get_pkt_sz(core);
+ int err;
+ size_t period_len = core->pkt_by_blk * pkt_size;
+ size_t map_len = period_len * core->blk_by_rb;
+ struct dma_chan *chan = core->dma_chan;
+ dma_addr_t buff_addr = (dma_addr_t) core->dma_bus;
+ struct dma_async_tx_descriptor* const txd =
+ chan->device->device_prep_dma_cyclic(chan,
+ buff_addr,
+ map_len,
+ period_len,
+ core->dma_cfg.direction,
+ NULL);
+
+ if (! txd)
+ return -ENOMEM;
+
+ txd->callback = complete;
+ txd->callback_param = core;
+
+ core->dma_cook = dmaengine_submit(txd);
+ err = dma_submit_error(core->dma_cook);
+ if (err)
+ return err;
+
+ dma_async_issue_pending(core->dma_chan);
+ dev_dbg(core->device, "Starting cylic DMA\n");
+
+ return 0;
+}
+
+static int p7mpg_prepare_dma(struct p7mpg_core* core)
+{
+ int err = 0;
+
+ /*
+ * Setup DMA operation properties for cyclic buffer.
+ */
+ err = p7mpg_config_dma(core);
+ if (err)
+ return err;
+
+ /* Submit DMA operation. */
+ err = p7mpg_run_dma(core, p7mpg_complete_dma);
+ return err;
+}
+
+static int p7mpg_init_dmamem(struct p7mpg_core* core,
+ struct platform_device* pdev)
+{
+ struct p7mpg_plat_data *pdata = pdev->dev.platform_data;
+ core->dma_region_sz = pdata->dma_sz;
+
+ core->dma_virt = dma_alloc_coherent(core->device,
+ core->dma_region_sz,
+ &core->dma_bus,
+ GFP_KERNEL);
+ if (core->dma_virt)
+ return 0;
+
+ dev_err(core->device, "failed to reserve DMA'ble memory\n");
+ return -ENOMEM;
+}
+
+static void p7mpg_release_dmamem(struct p7mpg_core *core)
+{
+ dma_free_coherent(core->device,
+ core->dma_region_sz,
+ core->dma_virt,
+ core->dma_bus);
+}
+
+static void p7mpg_close_vmdma(struct vm_area_struct* vma)
+{
+ struct p7mpg_core* const core = p7mpg_to_core(vma->vm_file->private_data);
+
+ dev_dbg(core->device, "VMA of device p7mpg %d closed.\n",
+ p7mpg_core_id(core));
+}
+
+static int p7mpg_fault_vmdma(struct vm_area_struct* vma, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct const p7mpg_vm_ops = {
+ .close = p7mpg_close_vmdma,
+ .fault = p7mpg_fault_vmdma,
+};
+
+static int p7mpg_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int err;
+ unsigned long start_addr;
+ struct p7mpg_core* core = p7mpg_to_core(filp->private_data);
+ unsigned long const pageno = vma->vm_pgoff;
+ size_t const sz = vma->vm_end - vma->vm_start;
+
+ if (! (vma->vm_flags & VM_MAYSHARE)) {
+ dev_err(core->device,
+ "invalid vma flags (0x%lx)\n",
+ vma->vm_flags);
+ return -EINVAL;
+ }
+
+ if (vma->vm_end <= vma->vm_start) {
+ dev_err(core->device,
+ "invalid vma region: 0x%08lx-0x%08lx\n",
+ vma->vm_start, vma->vm_end);
+ return -EINVAL;
+ }
+
+ switch (pageno) {
+ case 0:
+ {
+ /* Metadata mapping */
+ start_addr = PFN_DOWN(virt_to_phys((void *)core->metadata));
+ break;
+ }
+ case 1:
+ /* DMA memory mapping */
+ if (sz > core->dma_region_sz) {
+ dev_err(core->device,
+ "invalid vma size %u (limited to %u)\n",
+ (unsigned int) sz, (unsigned int) core->dma_region_sz);
+ return -EINVAL;
+ }
+ start_addr = PFN_DOWN(core->dma_bus);
+ vma->vm_page_prot = pgprot_dmacoherent(vm_get_page_prot(vma->vm_flags));
+ break;
+ default:
+ dev_dbg(core->device,
+ "invalid vma offset page %lu.\n",
+ pageno);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~(VM_MAYEXEC | VM_EXEC);
+ vma->vm_flags |= VM_SPECIAL | VM_DONTCOPY;
+
+ err = remap_pfn_range(vma,
+ vma->vm_start,
+ start_addr,
+ sz,
+ vma->vm_page_prot);
+ if (err)
+ return err;
+
+ vma->vm_ops = &p7mpg_vm_ops;
+
+ dev_dbg(core->device, "device p7mpegts %d mmapped.\n", p7mpg_core_id(core));
+ return 0;
+}
+
+static int p7mpg_set_config(struct p7mpg_core *core, struct p7mpg_settings *config)
+{
+ int ctrl = 0;
+
+ /* The IP must be disabled when configured. */
+ if (__raw_readl(core->vaddr + P7MPG_CTRL) & P7MPG_CTRL_ENABLE)
+ return -EBUSY;
+
+ if (config->pkt_by_blk || config->blk_by_rb) {
+ unsigned int pkt_size, pkt_by_blk, blk_size, max_blk_by_rb;
+
+ if (config->pkt_size)
+ pkt_size = config->pkt_size;
+ else
+ pkt_size = __raw_readl(core->vaddr + P7MPG_PKT_SIZE);
+
+ if (config->pkt_by_blk)
+ pkt_by_blk = p7mpg_set_pkt_by_blk (core,
+ pkt_size * config->pkt_by_blk,
+ pkt_size);
+ else
+ pkt_by_blk = core->pkt_by_blk;
+
+ blk_size = pkt_size * pkt_by_blk;
+ max_blk_by_rb = p7mpg_get_max_blk_nb(core, blk_size);
+
+ if (!max_blk_by_rb)
+ return -EINVAL;
+ else if (config->blk_by_rb) {
+ if (config->blk_by_rb > max_blk_by_rb)
+ core->blk_by_rb = max_blk_by_rb;
+ else
+ core->blk_by_rb = config->blk_by_rb;
+ } else if (core->blk_by_rb > max_blk_by_rb) {
+ core->blk_by_rb = max_blk_by_rb;
+ }
+
+ core->pkt_by_blk = pkt_by_blk;
+ }
+
+ /* Set IP parameters */
+ __raw_writel(config->sync_val, core->vaddr + P7MPG_SYNC_VAL);
+ if (config->sync_num > 1)
+ __raw_writel(config->sync_num, core->vaddr + P7MPG_SYNC_NUM);
+ if (config->pkt_size)
+ __raw_writel(config->pkt_size, core->vaddr + P7MPG_PKT_SIZE);
+ __raw_writel(config->timing_dl, core->vaddr + P7MPG_DATA_PIPE);
+
+ if (config->fall_sampling)
+ ctrl |= P7MPG_CTRL_FALL_SAMPLING;
+ if (config->lsb)
+ ctrl |= P7MPG_CTRL_LSB_FIRST;
+ if (config->valid_en && core->valid_pin)
+ ctrl |= P7MPG_CTRL_VALID;
+ if (config->sync_en && core->sync_pin)
+ ctrl |= P7MPG_CTRL_SYNC;
+
+ __raw_writel(ctrl, core->vaddr + P7MPG_CTRL);
+
+ return 0;
+}
+
+static int p7mpg_get_config(struct p7mpg_core *core, struct p7mpg_settings *config)
+{
+ int ctrl = __raw_readl(core->vaddr + P7MPG_CTRL);
+
+ config->fall_sampling = (ctrl & P7MPG_CTRL_FALL_SAMPLING) ? 1 : 0;
+ config->lsb = (ctrl & P7MPG_CTRL_LSB_FIRST) ? 1 : 0;
+ config->valid_en = (ctrl & P7MPG_CTRL_VALID) ? 1 : 0;
+ config->sync_en = (ctrl & P7MPG_CTRL_SYNC) ? 1 : 0;
+
+ config->sync_val = __raw_readl(core->vaddr + P7MPG_SYNC_VAL);
+ config->sync_num = __raw_readl(core->vaddr + P7MPG_SYNC_NUM);
+ config->pkt_size = __raw_readl(core->vaddr + P7MPG_PKT_SIZE);
+ config->timing_dl = __raw_readl(core->vaddr + P7MPG_DATA_PIPE);
+
+ config->pkt_by_blk = core->pkt_by_blk;
+ config->blk_by_rb = core->blk_by_rb;
+
+ return 0;
+}
+
+static int p7mpg_start(struct p7mpg_core *core)
+{
+ int ctrl;
+ int err = -ENOMEM;
+
+ core->dma_block = 0;
+ p7mpg_setup_metadata(core);
+
+ /* flush FIFO */
+ __raw_writel(1, core->vaddr + P7MPG_FLUSH);
+ __raw_writel(core->dma_thres / sizeof(u32), core->vaddr + P7MPG_RXFIFO_TH);
+
+ err = p7mpg_prepare_dma(core);
+ if (err)
+ return err;
+
+ ctrl = __raw_readl(core->vaddr + P7MPG_CTRL);
+ ctrl |= P7MPG_CTRL_DMAC_ENABLE;
+ ctrl |= P7MPG_CTRL_ENABLE;
+
+ __raw_writel(ctrl, core->vaddr + P7MPG_CTRL);
+
+ return 0;
+}
+
+static int p7mpg_stop(struct p7mpg_core *core)
+{
+ int ctrl = __raw_readl(core->vaddr + P7MPG_CTRL);
+
+ __raw_writel(ctrl & ~(P7MPG_CTRL_ENABLE), core->vaddr + P7MPG_CTRL);
+
+ dmaengine_terminate_all(core->dma_chan);
+
+ return 0;
+}
+
+static int p7mpg_release(struct inode *inode, struct file *filp)
+{
+ struct p7mpg_core* core = p7mpg_to_core(filp->private_data);
+
+ /* If the IP is enabled, disable it. */
+ if (__raw_readl(core->vaddr + P7MPG_CTRL) & P7MPG_CTRL_ENABLE)
+ p7mpg_stop(core);
+
+ dev_dbg(core->device, "device p7mpegts %d closed.\n", core->id);
+ return 0;
+}
+
+static int p7mpg_open(struct inode *inode, struct file* filp)
+{
+ struct p7mpg_core* core = p7mpg_to_core(filp->private_data);
+
+ dev_dbg(core->device, "device p7mpegts %d opened.\n", core->id);
+ return 0;
+}
+
+static long p7mpg_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct p7mpg_core* core = p7mpg_to_core(filp->private_data);
+
+ if (_IOC_TYPE(cmd) != P7MPG_IOC_MAGIC) return -ENOTTY;
+ if (_IOC_NR(cmd) > P7MPG_IOC_MAXNR) return -ENOTTY;
+
+ switch (cmd) {
+ case P7MPG_IOC_SET_CONFIG:
+ ret = p7mpg_set_config(core, (struct p7mpg_settings *)arg);
+ dev_dbg(core->device, "device p7mpegts %d : P7MPG_IOC_SET_CONFIG ioctl.\n", core->id);
+ break;
+ case P7MPG_IOC_GET_CONFIG:
+ ret = p7mpg_get_config(core, (struct p7mpg_settings *)arg);
+ dev_dbg(core->device, "device p7mpegts %d : P7MPG_IOC_GET_CONFIG ioctl.\n", core->id);
+ break;
+ case P7MPG_IOC_START:
+ ret = p7mpg_start(core);
+ dev_dbg(core->device, "device p7mpegts %d : P7MPG_IOC_START ioctl.\n", core->id);
+ break;
+ case P7MPG_IOC_STOP:
+ ret = p7mpg_stop(core);
+ dev_dbg(core->device, "device p7mpegts %d : P7MPG_IOC_STOP ioctl.\n", core->id);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static unsigned int p7mpg_poll(struct file *filep, struct poll_table_struct *wait)
+{
+ struct p7mpg_core* core = p7mpg_to_core(filep->private_data);
+ int i;
+
+ poll_wait(filep, &poll_wait_queue, wait);
+
+ /* If there is one at least one block valid, return POLLIN. */
+ for(i = 0; i < core->blk_by_rb; i++) {
+ if (((core->metadata->blks_states >> 2 * i) & 3) == P7MPG_BLOCK_VALID)
+ return POLLIN | POLLRDNORM;
+ }
+
+ return 0;
+}
+
+static struct file_operations const p7mpg_fops = {
+ .owner = THIS_MODULE,
+ .open = p7mpg_open,
+ .release = p7mpg_release,
+ .mmap = p7mpg_mmap,
+ .unlocked_ioctl = p7mpg_ioctl,
+ .poll = p7mpg_poll,
+};
+
+static int p7mpg_init_core(struct p7mpg_core *core,
+ struct platform_device* pdev)
+{
+ int err;
+ struct resource const* res;
+ dma_cap_mask_t mask;
+
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! pdev);
+#endif
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (! res) {
+ dev_err(core->device, "failed to get core memory region\n");
+ return -ENXIO;
+ }
+
+ core->regs = request_mem_region(res->start,
+ resource_size(res),
+ dev_name(core->device));
+ if (! core->regs) {
+ dev_err(core->device,
+ "failed to request core memory region [%08x:%08x]\n",
+ res->start,
+ res->end);
+ return -EBUSY;
+ }
+
+ core->vaddr = (unsigned long) ioremap(res->start, resource_size(res));
+ if (! core->vaddr) {
+ dev_err(core->device,
+ "failed to remap core memory region [%08x:%08x]\n",
+ res->start,
+ res->end);
+ err = -ENXIO;
+ goto release;
+ }
+ core->fifo_paddr = res->start + P7MPG_DATA;
+
+ core->dma_chan = NULL;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (! res) {
+ dev_err(core->device, "failed to get DMA resource\n");
+ err = -ENXIO;
+ goto unmap;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+ core->dma_chan = dma_request_channel(mask, p7mpg_filter, (void*) res->start);
+ if (! core->dma_chan) {
+ dev_err(core->device,
+ "failed to request DMA channel %d\n", res->start);
+ err = -EINVAL;
+ goto unmap;
+ }
+
+ /*
+ * Setup DMA configuration fields remaining constant over controller
+ * life-cycle.
+ */
+ core->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ core->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ core->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(core->pctl)) {
+ err = PTR_ERR(core->pctl);
+ dev_err(&pdev->dev, "failed to select I/O pins (%d)\n", err);
+ goto release_channel;
+ }
+
+ core->metadata_mem = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+ if (!core->metadata_mem) {
+ err = -ENOMEM;
+ goto put_pins;
+ }
+ core->metadata = (struct p7mpg_metadata *)PFN_ALIGN(core->metadata_mem);
+
+#ifdef CONFIG_PM_SLEEP
+ core->saved_regs = kcalloc(ARRAY_SIZE(p7mpg_save_regs),
+ sizeof(u32), GFP_KERNEL);
+ if (!core->saved_regs) {
+ err = -ENOMEM;
+ goto free_metadata;
+ }
+#endif
+
+ return 0;
+
+#ifdef CONFIG_PM_SLEEP
+free_metadata:
+ kfree(core->metadata);
+#endif
+put_pins:
+ pinctrl_put(core->pctl);
+release_channel:
+ dma_release_channel(core->dma_chan);
+unmap:
+ iounmap((void*) core->vaddr);
+release:
+ release_mem_region(core->regs->start, resource_size(core->regs));
+ return err;
+}
+
+static int p7mpg_request_pads(struct p7mpg_core* core,
+ struct platform_device *pdev)
+{
+ int err = 0;
+ struct p7mpg_plat_data *pdata = pdev->dev.platform_data;
+ struct p7spi_swb const* swb = pdata->swb;
+
+ while (swb->pad != -1) {
+ err = p7mpg_request_pad(core, swb->pad, swb->direction,
+ swb->function);
+ if (err) {
+ struct p7spi_swb const* swb_rel = pdata->swb;
+ while (swb_rel != swb) {
+ p7mpg_release_pad(core, swb_rel->pad,
+ swb_rel->direction);
+ swb_rel++;
+ }
+
+ goto finish;
+ }
+ swb++;
+ }
+
+finish:
+ return err;
+}
+
+static void p7mpg_init_config(struct p7mpg_core* core)
+{
+ struct p7mpg_settings config;
+
+ config.fall_sampling = 0;
+ config.lsb = 0;
+ config.valid_en = 0;
+ config.sync_en = 0;
+ config.sync_val = 0x47;
+ config.sync_num = 2;
+ config.pkt_size = 188;
+ config.timing_dl = 0;
+ config.pkt_by_blk = p7mpg_set_pkt_by_blk(core, SZ_32K,
+ config.pkt_size);
+ config.blk_by_rb = p7mpg_get_max_blk_nb(core,
+ config.pkt_size *
+ config.pkt_by_blk);
+
+ while (config.blk_by_rb < 4) {
+ config.pkt_by_blk -= p7mpg_get_min_pkt_by_blk(config.pkt_size,
+ core->dma_min);
+ config.blk_by_rb = p7mpg_get_max_blk_nb(core,
+ config.pkt_size *
+ config.pkt_by_blk);
+ }
+
+ p7mpg_set_config(core, &config);
+}
+
+static int p7mpg_add_device(struct p7mpg_core* core,
+ struct platform_device *pdev)
+{
+ int err;
+ char *miscdev_name;
+ int len = strlen(pdev->name) + 3; /* dot + id (one digit)+ terminating char */
+
+ miscdev_name = kmalloc(len, GFP_KERNEL);
+ if (! miscdev_name)
+ return -ENOMEM;
+
+ err = snprintf(miscdev_name, len,
+ "%s.%d", pdev->name, pdev->id);
+ if (err < 0) {
+ dev_err(core->device, "failed to set device name\n");
+ goto free;
+ }
+
+ core->miscdev.minor = MISC_DYNAMIC_MINOR;
+ core->miscdev.name = miscdev_name;
+ core->miscdev.fops = &p7mpg_fops;
+
+ err = misc_register(&core->miscdev);
+ if (!err)
+ return 0;
+
+free:
+ kfree(miscdev_name);
+
+ return err;
+}
+
+static void p7mpg_exit_core(struct p7mpg_core *core)
+{
+#ifdef DEBUG
+ BUG_ON(! core);
+#endif
+
+ __raw_writel(0, core->vaddr + P7MPG_CTRL);
+
+ if (core->fifo_sz)
+ p7spi_free_fifo(p7mpg_core_id(core), P7SPI_CORE_MPEG);
+#ifdef CONFIG_PM_SLEEP
+ kfree(core->saved_regs);
+#endif
+ kfree(core->metadata_mem);
+ pinctrl_put(core->pctl);
+ dma_release_channel(core->dma_chan);
+ iounmap((void*) core->vaddr);
+ release_mem_region(core->regs->start, resource_size(core->regs));
+}
+
+static void p7mpg_release_pads(struct p7mpg_core* core,
+ struct platform_device *pdev)
+{
+ struct p7mpg_plat_data *pdata = pdev->dev.platform_data;
+ struct p7spi_swb const* swb = pdata->swb;
+
+ while (swb->pad != -1) {
+ p7mpg_release_pad(core, swb->pad, swb->direction);
+ swb++;
+ }
+}
+
+static void p7mpg_remove_device(struct p7mpg_core* core)
+{
+ misc_deregister(&core->miscdev);
+ kfree(core->miscdev.name);
+}
+
+#ifdef CONFIG_PM
+static int p7mpg_suspend(struct platform_device* pdev, pm_message_t state)
+{
+ int i;
+ struct p7mpg_core* core = platform_get_drvdata(pdev);
+
+ core->started = __raw_readl(core->vaddr + P7MPG_CTRL) &
+ P7MPG_CTRL_ENABLE;
+
+ if (core->started)
+ p7mpg_stop(core);
+
+ for(i = 0; i < ARRAY_SIZE(p7mpg_save_regs); i++) {
+ core->saved_regs[i] = __raw_readl(core->vaddr +
+ p7mpg_save_regs[i]);
+ }
+
+ return 0;
+}
+
+static int p7mpg_resume(struct platform_device* pdev)
+{
+ int i;
+ struct p7mpg_core* core = platform_get_drvdata(pdev);
+
+ /* Restore registers */
+ for (i = 0; i < ARRAY_SIZE(p7mpg_save_regs); i++) {
+ __raw_writel(core->saved_regs[i],
+ core->vaddr + p7mpg_save_regs[i]);
+ }
+
+ if (core->started)
+ p7mpg_start(core);
+
+ return 0;
+}
+#else
+
+#define p7mpg_suspend NULL
+#define p7mpg_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int __devinit p7mpg_probe(struct platform_device* pdev)
+{
+ int err;
+ int dev_id = pdev->id;
+ struct p7mpg_core* core = kzalloc(sizeof(*core), GFP_KERNEL);
+
+ if (!core)
+ return -ENOMEM;
+
+ /*
+ * early set of core->id because it's going to be used
+ * in almost all init functions
+ */
+ core->id = dev_id;
+ core->device = &pdev->dev;
+
+ err = p7mpg_init_core(core, pdev);
+ if (err)
+ return err;
+
+ err = p7mpg_setup_core(core, pdev->dev.platform_data);
+ if (err)
+ goto exit_core;
+
+ err = p7mpg_init_dmamem(core, pdev);
+ if (err)
+ goto exit_core;
+
+ err = p7mpg_request_pads(core, pdev);
+ if (err)
+ goto release_dmamem;
+
+ p7mpg_init_config(core);
+
+ err = p7mpg_add_device(core, pdev);
+ if (! err) {
+ platform_set_drvdata(pdev, core);
+ return 0;
+ }
+
+ p7mpg_release_pads(core, pdev);
+release_dmamem:
+ p7mpg_release_dmamem(core);
+exit_core:
+ p7mpg_exit_core(core);
+
+ kfree(core);
+ return err;
+}
+
+static int p7mpg_remove(struct platform_device *pdev)
+{
+ struct p7mpg_core* core = platform_get_drvdata(pdev);
+ if (!core)
+ return -EINVAL;
+
+ p7mpg_remove_device(core);
+ p7mpg_release_pads(core, pdev);
+ p7mpg_release_dmamem(core);
+ p7mpg_exit_core(core);
+ kfree(core);
+ return 0;
+}
+
+static struct platform_driver p7mpg_driver = {
+ .driver = {
+ .name = P7MPG_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = p7mpg_probe,
+ .remove = __devexit_p(p7mpg_remove),
+ .suspend = p7mpg_suspend,
+ .resume = p7mpg_resume,
+};
+
+static int __init p7mpg_init(void)
+{
+ return platform_driver_register(&p7mpg_driver);
+}
+
+static void __exit p7mpg_exit(void)
+{
+ platform_driver_unregister(&p7mpg_driver);
+}
+
+module_init(p7mpg_init);
+module_exit(p7mpg_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/p7-mpegts.h b/drivers/parrot/media/p7-mpegts.h
new file mode 100644
index 00000000000000..a647fdc8c9f9ae
--- /dev/null
+++ b/drivers/parrot/media/p7-mpegts.h
@@ -0,0 +1,36 @@
+/*
+ * linux/drivers/parrot/media/p7-mpegts.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * @author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef P7_MPEGTS_H_
+#define P7_MPEGTS_H_
+
+#include "p7-mpegts_ioctl.h"
+#include "p7-mpegts_regs.h"
+
+#define P7MPG_DRV_NAME "p7-mpegts"
+#define P7MPG_CORE_NR 2
+
+/**
+ * struct p7mpg_plat_data - Parrot7 MPEGTS per device platform settings
+ * @swb: switchbox settings (see &struct p7spi_swb)
+ * @thres_wcnt: FIFO threshold in number of 32 bits words
+ */
+struct p7mpg_plat_data {
+ struct p7spi_swb const* swb;
+ size_t fifo_wcnt;
+ size_t thres_wcnt;
+ size_t dma_sz;
+};
+
+#define P7MPG_MEM_WCNT(_core, _cnt) \
+ (P7SPI_MEM_ ## _cnt ## W << ((4 + _core) * P7SPI_MEM_BITLEN))
+
+#endif
diff --git a/drivers/parrot/media/p7-mpegts_ioctl.h b/drivers/parrot/media/p7-mpegts_ioctl.h
new file mode 100644
index 00000000000000..1393037da9800a
--- /dev/null
+++ b/drivers/parrot/media/p7-mpegts_ioctl.h
@@ -0,0 +1,53 @@
+/*
+ * linux/drivers/parrot/media/p7-mpegts_ioctl.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * @author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef P7_MPEGTS_IOCTL_H_
+#define P7_MPEGTS_IOCTL_H_
+
+#include <linux/ioctl.h>
+
+struct p7mpg_settings {
+ unsigned char fall_sampling:1;
+ unsigned char lsb:1;
+ unsigned char valid_en:1;
+ unsigned char sync_en:1;
+ unsigned char sync_val:8;
+ unsigned int sync_num:10;
+ unsigned int pkt_size:16;
+ unsigned char timing_dl:3;
+ unsigned int pkt_by_blk;
+ unsigned char blk_by_rb;
+};
+
+enum p7mpg_block_state {
+ P7MPG_BLOCK_INVALID = 0,
+ P7MPG_BLOCK_BUSY = 1,
+ P7MPG_BLOCK_FREE = 2,
+ P7MPG_BLOCK_VALID = 3,
+};
+
+struct p7mpg_metadata {
+ size_t blk_size;
+ size_t pkt_size;
+ unsigned char blk_by_rb;
+ unsigned long blks_states;
+};
+
+#define P7MPG_IOC_MAGIC 0xBB
+
+#define P7MPG_IOC_SET_CONFIG _IOW(P7MPG_IOC_MAGIC, 1, struct p7mpg_settings *)
+#define P7MPG_IOC_GET_CONFIG _IOR(P7MPG_IOC_MAGIC, 2, struct p7mpg_settings *)
+#define P7MPG_IOC_START _IO(P7MPG_IOC_MAGIC, 3)
+#define P7MPG_IOC_STOP _IO(P7MPG_IOC_MAGIC, 4)
+
+#define P7MPG_IOC_MAXNR 4
+
+#endif /* P7_MPEGTS_IOCTL_H_ */
diff --git a/drivers/parrot/media/p7-mpegts_priv.h b/drivers/parrot/media/p7-mpegts_priv.h
new file mode 100644
index 00000000000000..b7ef92d1d4da51
--- /dev/null
+++ b/drivers/parrot/media/p7-mpegts_priv.h
@@ -0,0 +1,87 @@
+/*
+ * linux/drivers/parrot/media/p7-mpegts_priv.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * @author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef P7_MPEGTS_PRIV_H_
+#define P7_MPEGTS_PRIV_H_
+
+#if defined(CONFIG_MPEGTS_PARROT7) || \
+ defined(CONFIG_MPEGTS_PARROT7_MODULE)
+
+#include "p7-mpegts_ioctl.h"
+#include "p7-mpegts_regs.h"
+
+#include <linux/miscdevice.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#include <spi/p7-spi.h>
+#include <spi/p7-spi_priv.h>
+
+
+#define P7MPG_DRV_NAME "p7-mpegts"
+
+struct p7mpg_block_desc;
+
+struct p7mpg_core {
+ int id;
+ struct device* device;
+ struct miscdevice miscdev;
+
+ struct resource const * regs;
+ unsigned long vaddr;
+
+ /* info about the region reserved for DMA */
+ size_t dma_region_sz;
+ void* dma_virt;
+ dma_addr_t dma_bus;
+
+ struct dma_chan* dma_chan;
+ bool map_dma;
+ struct dma_slave_config dma_cfg;
+ dma_cookie_t dma_cook;
+ dma_addr_t dma_addr;
+ size_t dma_sz;
+ size_t dma_thres;
+ size_t dma_min;
+
+ unsigned long fifo_paddr;
+ size_t fifo_sz;
+ /* Transfer bytes threshold */
+ size_t thres_sz;
+
+ /* Driver params */
+ unsigned int pkt_by_blk;
+ unsigned char blk_by_rb;
+
+ void* metadata_mem;
+ struct p7mpg_metadata* metadata;
+ unsigned int dma_block;
+
+ struct pinctrl* pctl;
+
+ /* Pins valid or sync avalaibility */
+ int valid_pin;
+ int sync_pin;
+
+ /* Array to save registers during suspend/resume procedure */
+ u32 *saved_regs;
+ bool started;
+};
+
+static inline int p7mpg_core_id(struct p7mpg_core const * core)
+{
+ return core->id;
+}
+
+#endif /* defined(CONFIG_MPEGTS_PARROT7) || \
+ defined(CONFIG_MPEGTS_PARROT7_MODULE) */
+
+#endif
diff --git a/drivers/parrot/media/p7-mpegts_regs.h b/drivers/parrot/media/p7-mpegts_regs.h
new file mode 100644
index 00000000000000..165bb6cf57a0ec
--- /dev/null
+++ b/drivers/parrot/media/p7-mpegts_regs.h
@@ -0,0 +1,45 @@
+#ifndef __P7_MPEGTS_REGS_H
+#define __P7_MPEGTS_REGS_H
+
+#define P7MPG_CORE_OFFSET(mpegts_id) (0x4000+(mpegts_id)*0x8000)
+
+#define P7MPG_CTRL 0x000 // MPEGTS Control Register
+#define P7MPG_SYNC_VAL 0x004 // MPEGTS SYNC byte value Register
+#define P7MPG_SYNC_NUM 0x008 // MPEGTS Number of SYNC waited Register
+#define P7MPG_PKT_SIZE 0x00C // MPEGTS Packet size Register
+#define P7MPG_DATA_PIPE 0x010 // MPEGTS Timing clock delay Register
+#define P7MPG_STAT 0x014 // MPEGTS Status Register
+#define P7MPG_ITEN 0x018 // MPEGTS Interrupt enable Register
+#define P7MPG_FLUSH 0x01C // MPEGTS FIFO & DMA flush Register
+#define P7MPG_ITACK 0x020 // MPEGTS Interrupt Acknowledge Register
+#define P7MPG_RXFIFO_TH 0x024 // MPEGTS RX FIFO threshold Register
+#define P7MPG_RXCNT_TH 0x028 // MPEGTS RX Counter threshold Register
+#define P7MPG_RXCNT 0x02C // MPEGTS RX Counter Value Register
+#define P7MPG_FIFO_RXLVL 0x030 // MPEGTS RX FIFO Level Register
+#define P7MPG_DATA 0x800 // MPEGTS Data Registers
+
+// Control register
+#define P7MPG_CTRL_ENABLE (1 << 0) // MPEGTS Enabled
+#define P7MPG_CTRL_SYNC (1 << 1) // MPEGTS Synchronization signal enabled
+#define P7MPG_CTRL_VALID (1 << 2) // MPEGTS Validation signal enabled
+#define P7MPG_CTRL_LSB_FIRST (1 << 3) // MPEGTS LSB first
+#define P7MPG_CTRL_FALL_SAMPLING (1 << 4) // MPEGTS Clock polarity selection
+#define P7MPG_CTRL_DMAC_ENABLE (1 << 5) // MPEGTS DMA interface enabled
+
+// Status register
+#define P7MPG_STATUS_RX_EMPTY (1 << 0) // MPEGTS Rx FIFO is empty
+#define P7MPG_STATUS_RX_FULL (1 << 1) // MPEGTS Rx FIFO is full
+#define P7MPG_STATUS_RXCNT_TH_REACHED (1 << 2) // MPEGTS FIFO level is high threshold
+#define P7MPG_STATUS_RXFIFO_TH_REACHED (1 << 3) // MPEGTS FIFO byte counter if higher than threshold
+#define P7MPG_STATUS_RXACCESS_ERR (1 << 4) // MPEGTS Rx access error
+
+/*
+ * Input pad registers
+ * _core: mpegts core id (0 - 1)
+ */
+#define P7MPG_SWB_IPAD_CLK(_core) (0x260UL + (16UL * (_core)))
+#define P7MPG_SWB_IPAD_DATA(_core) (0x264UL + (16UL * (_core)))
+#define P7MPG_SWB_IPAD_VALID(_core) (0x268UL + (16UL * (_core)))
+#define P7MPG_SWB_IPAD_SYNC(_core) (0x26CUL + (16UL * (_core)))
+
+#endif
diff --git a/drivers/parrot/media/video/Kconfig b/drivers/parrot/media/video/Kconfig
new file mode 100644
index 00000000000000..998ea99ea09305
--- /dev/null
+++ b/drivers/parrot/media/video/Kconfig
@@ -0,0 +1,150 @@
+config AVI_V4L2
+ tristate "AVI V4L2 interface"
+ depends on AVI
+ depends on VIDEO_V4L2
+ default n
+ help
+ Build the AVI V4L2 interface layer.
+
+config AVI_V4L2_DEV
+ tristate "AVI V4L2 device interface"
+ depends on AVI_V4L2
+ default n
+ help
+ Build the AVI V4L2 device interface
+
+config AVI_V4L2_ISP
+ tristate "AVI V4L2 ISP interface"
+ depends on AVI_V4L2
+ default n
+ help
+ Build the AVI V4L2 ISP interface
+
+config AVI_V4L2_GST_COLORSPACE_HACK
+ bool "Hack for Gstreamer colorspace bad handling"
+ default y
+ depends on AVI_V4L2
+ help
+ gstreamer v4l2sink and src do not handle the colorspace
+ correctly, they set bad values in the pix structure
+ (actually, they don't set anything, they use whatever they
+ get in g_fmt). Since we cannot trust the value we try
+ to guess the colorspace
+
+config CAM_AVI
+ tristate "Parrot Advanced Video Interface camif support"
+ depends on AVI_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ select CAM_AVI_DUMMY
+ help
+ Support for the camera interface in the Parrot AVI IP
+
+config AVICAM_TIMINGS_AUTO
+ bool "Automatic timings detection on avicam input"
+ depends on CAM_AVI
+ default n
+ help
+ This option enables the automatic detection of timings on
+ avicam input instead of getting format from v4l2-subdev
+
+config AVICAM_INSTANT_STREAMOFF
+ bool "Force avicam streamoff to be instantaneous"
+ depends on CAM_AVI
+ default n
+ help
+ This option will not wait for remaining frames during streamoff
+ and instead will force the AVI to shutdown instantaneously.
+ It is useful for low-framerate modes.
+
+config AVICAM_USE_ISP
+ bool "Enable ISP V4L2 controls in avicam interface"
+ depends on CAM_AVI && AVI_V4L2_ISP
+ default n
+ help
+ This option add many controls to V4L2 interface in order to control
+ internal ISP chain of Parrot7.
+
+config CAM_AVI_DUMMY
+ tristate "Parrot AVI dummy cam driver"
+ depends on AVI && CAM_AVI
+ help
+ Support for the dummy avi cam driver
+
+config AVI_CAPTURE_TEST
+ tristate "Parrot Advanced Video Interface capture test driver"
+ depends on CAM_AVI
+ default n
+ help
+ Support for capture
+
+config AVI_MULTICAPTURE
+ tristate "Parrot Advanced Video Interface Multiple capture"
+ depends on CAM_AVI
+ default n
+ help
+ Support for multiple captures
+
+config VIDOUT_AVI
+ tristate "Parrot Advanced Video Interface video output overlays support"
+ depends on AVI_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ Enable support for AVI video output overlays.
+
+ You can turn debug traces on/off with module option debug=0
+ or debug=1.
+ You can turn off dual channel usage for semi-planar with
+ monochannel=1 (test purpose)
+
+config R2R_AVI
+ tristate "Parrot Advanced Video Interface RAM to RAM support"
+ depends on AVI
+ help
+ Enable support for AVI RAM to RAM.
+
+config M2M_AVI
+ tristate "V4L2 MEM2MEM interface"
+ depends on AVI_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ V4L2 MEM2MEM interface
+
+config M2M_AVI_USE_ISP
+ bool "Enable ISP V4L2 controls in MEM2MEM interface"
+ depends on M2M_AVI && AVI_V4L2_ISP
+ default n
+ help
+ This option add many controls to V4L2 interface in order to control
+ internal ISP chain of Parrot7.
+
+config R2R_AVI_TEST_MULTI
+ tristate "Test module for AVI RAM to RAM with multiple clients support"
+ depends on R2R_AVI && AVI_DEBUG && DEBUG_FS
+ default n
+ help
+ Test module for AVI RAM to RAM with multiple clients support.
+
+config R2R_AVI_TEST_MULTI_NUMBER
+ int "Number of clients"
+ depends on R2R_AVI_TEST_MULTI
+ default 3
+ help
+ Number of clients: minimum 1, maximum 6, default 3
+
+config R2R_AVI_TEST_MULTI_RESMEM
+ int "Size of reserved memory per client (MegaBytes)"
+ depends on R2R_AVI_TEST_MULTI
+ default 64
+ help
+ Size of reserved memory per client in MegaBytes, default 64 MB.
+
+config CAM_LEPTON
+ tristate "Lepton camera support"
+ depends on SPI_MASTER && I2C
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ This enables the Lepton camera driver
+
diff --git a/drivers/parrot/media/video/Makefile b/drivers/parrot/media/video/Makefile
new file mode 100644
index 00000000000000..3b3b3d26e07b53
--- /dev/null
+++ b/drivers/parrot/media/video/Makefile
@@ -0,0 +1,33 @@
+ccflags-y += -I$(srctree)/drivers/parrot
+ccflags-$(CONFIG_AVI_DEBUG_LOG) += -DDEBUG
+
+obj-$(CONFIG_AVI_V4L2) += avi_v4l2.o
+
+obj-$(CONFIG_AVI_V4L2_ISP) += avi_v4l2_isp.o
+
+avicam-objs := avicam_v4l2.o avicam_stats.o avi_capture.o
+
+obj-$(CONFIG_CAM_AVI_DUMMY) += avicam_dummy_dev.o
+obj-$(CONFIG_CAM_AVI) += avicam.o
+obj-$(CONFIG_AVICAM_SUBDEV) += avicam_subdev.o
+
+avivoc-objs := avi_voc_ctrl.o avi_voc.o
+obj-$(CONFIG_VIDOUT_AVI) += avivoc.o
+
+avi_r2r-objs := avi_r2r_core.o
+ifeq ($(CONFIG_AVI_DEBUG),y)
+avi_r2r-objs += avi_r2r_debugfs.o
+endif
+obj-$(CONFIG_R2R_AVI) += avi_r2r.o
+
+obj-$(CONFIG_R2R_AVI_TEST_MULTI) += avi_r2r_test_multi.o
+
+obj-$(CONFIG_M2M_AVI) += avi_m2m.o avi_stats.o
+
+obj-$(CONFIG_AVI_CAPTURE_TEST) += avi_capture_test.o
+
+obj-$(CONFIG_CAM_LEPTON) += lepton.o lepton_dev.o
+
+obj-$(CONFIG_AVI_MULTICAPTURE) += avi_multicapture.o
+
+obj-$(CONFIG_AVI_V4L2_DEV) += avi_v4l2_dev.o
diff --git a/drivers/parrot/media/video/avi_capture.c b/drivers/parrot/media/video/avi_capture.c
new file mode 100644
index 00000000000000..4e6e87327a7c52
--- /dev/null
+++ b/drivers/parrot/media/video/avi_capture.c
@@ -0,0 +1,1032 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include "video/avi_pixfmt.h"
+#include "avi_capture.h"
+
+#ifdef DEBUG
+#define dprintk(ctx, format_, args...) \
+ dev_dbg((ctx)->dev, "%s: " format_, __func__, ##args)
+
+#else /* DEBUG */
+#define dprintk(ctx_, format_, args...) (void)ctx_
+#endif /* DEBUG */
+
+int avi_capture_init(struct avi_capture_context *ctx,
+ struct device *dev,
+ int major,
+ unsigned long caps,
+ int enable_stats)
+{
+ unsigned long cam_caps;
+ int ret = 0;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Extract the camera number from the capabilities */
+ cam_caps = caps & AVI_CAPS_CAM_ALL;
+ caps &= ~cam_caps;
+
+ ctx->dev = dev;
+
+ if (!cam_caps) {
+ /* We need a camera */
+ dprintk(ctx, "missing camera capability\n");
+ ret = -EINVAL;
+ goto no_cam;
+ }
+
+ /* Build the camera segment */
+ ctx->cam_segment = avi_segment_build(&cam_caps, "cam", major, -1, dev);
+ if (IS_ERR(ctx->cam_segment)) {
+ dprintk(ctx, "cam_segment build failed\n");
+ ret = PTR_ERR(ctx->cam_segment);
+ goto no_cam_segment;
+ }
+
+ ctx->cam_node = avi_segment_get_node(ctx->cam_segment,
+ AVI_CAPS_CAM_ALL);
+ BUG_ON(ctx->cam_node == NULL);
+
+ /* Build the DMA out segment. */
+ caps |= AVI_CAP_DMA_OUT;
+
+ ctx->dma_segment = avi_segment_build(&caps, "cam_dma", major, -1, dev);
+ if (IS_ERR(ctx->dma_segment)) {
+ dprintk(ctx, "dma_segment build failed\n");
+ ret = PTR_ERR(ctx->dma_segment);
+ goto no_dma_segment;
+ }
+
+ if (enable_stats) {
+ caps = avi_isp_bayer_get_stats_cap(ctx->dma_segment);
+
+ if (!caps) {
+ dprintk(ctx, "can't find stats cap\n");
+ ret = -ENODEV;
+ goto no_stats_cap;
+ }
+
+ caps |= AVI_CAP_DMA_OUT;
+
+ ctx->stats_segment = avi_segment_build(&caps,
+ "stats",
+ major,
+ -1,
+ dev);
+ if (IS_ERR(ctx->stats_segment)) {
+ dprintk(ctx, "stats_segment build failed\n");
+ ret = PTR_ERR(ctx->stats_segment);
+ ctx->stats_segment = NULL;
+ goto no_stats_segment;
+ }
+
+ ctx->stats_fifo = avi_segment_get_node(ctx->stats_segment,
+ AVI_CAP_DMA_OUT);
+ BUG_ON(!ctx->stats_fifo);
+ }
+
+ ctx->cam_segment->priv = ctx;
+ ctx->dma_segment->priv = ctx;
+
+ init_waitqueue_head(&ctx->waitq);
+ atomic_set(&ctx->streaming, 0);
+
+ avi_segment_enable_irq(ctx->dma_segment);
+
+ init_timer(&ctx->timer);
+ spin_lock_init(&ctx->lock);
+
+ return 0;
+
+ no_stats_segment:
+ no_stats_cap:
+ avi_segment_teardown(ctx->dma_segment);
+ no_dma_segment:
+ avi_segment_teardown(ctx->cam_segment);
+ no_cam_segment:
+ no_cam:
+ return ret;
+}
+EXPORT_SYMBOL(avi_capture_init);
+
+void avi_capture_destroy(struct avi_capture_context *ctx)
+{
+ if (ctx->stats_segment)
+ avi_segment_teardown(ctx->stats_segment);
+
+ avi_segment_teardown(ctx->dma_segment);
+ avi_segment_teardown(ctx->cam_segment);
+}
+EXPORT_SYMBOL(avi_capture_destroy);
+
+static enum avi_field avi_capture_get_field(struct avi_capture_context *ctx)
+{
+ union avi_cam_status status;
+
+ if (!ctx->interlaced)
+ return AVI_NONE_FIELD;
+
+ /* XXX interlaced with external synchros not yet supported (software
+ * limitation) */
+ BUG_ON(!ctx->interface.itu656);
+
+ status = avi_cam_get_status(ctx->cam_node);
+
+ return status.field ? AVI_BOT_FIELD : AVI_TOP_FIELD;
+}
+
+static struct avi_capture_timings *
+avi_capture_field_timings(struct avi_capture_context *ctx, enum avi_field f)
+{
+ enum avi_field cur_field;
+
+ if (ctx->interlaced) {
+ cur_field = avi_capture_get_field(ctx);
+
+ switch(f) {
+ case AVI_TOP_FIELD:
+ return &ctx->timings_top;
+ case AVI_BOT_FIELD:
+ return &ctx->timings_bot;
+ default:
+ BUG();
+ }
+ }
+
+ return &ctx->timings_top;
+}
+
+static unsigned avi_capture_timings_init(struct avi_segment *,
+ enum avi_field *);
+
+/* Default measure-to-timings function, should work fine for standard-compliant
+ * 656 timings. Assumes ivs and ihs are set in cam interface. */
+static void avi_capture_656_to_timings(struct avi_capture_timings *t,
+ struct avi_cam_measure_regs *m)
+{
+ /* We always invert the polarity of the synchronisation signals,
+ * therefore the active video is mesured by the cam module as a sync
+ * pulse.
+ *
+ * We do this in order to be able to capture stable video even if the
+ * input stream has variable blanking durations. */
+ t->ht.hactive_on = m->hactive_on;
+ t->ht.hactive_off = m->hsync_off;
+ t->vt.vactive_on = m->vactive_on;
+ t->vt.vactive_off = m->vsync_voff;
+}
+
+/* Default measure-to-timings function, should work fine for standard-compliant
+ * non-656 timings. Assumes ivs and ihs are set in cam interface. */
+static void avi_capture_to_timings(struct avi_capture_timings *t,
+ struct avi_cam_measure_regs *m)
+{
+ t->ht.hactive_on = m->hactive_on;
+ t->ht.hactive_off = m->hsync_off;
+ t->vt.vactive_on = m->vactive_on+1;
+ t->vt.vactive_off = m->vsync_voff+1;
+}
+
+/* Step 3: We compare the timings retreived in step 2 with the current mesured
+ * values. If they're the same we're done and wake up the calling
+ * task. Otherwise we go back to step 1 and try again. */
+static unsigned avi_capture_timings_validate(struct avi_segment *s,
+ enum avi_field *field)
+{
+ struct avi_capture_context *ctx = s->priv;
+ struct avi_capture_timings *expected;
+ enum avi_field f = avi_capture_get_field(ctx);
+ struct avi_cam_measure_regs m;
+ struct avi_capture_timings t;
+
+ expected = avi_capture_field_timings(ctx, f);
+
+ avi_cam_get_measure(ctx->cam_node, &m);
+
+ ctx->measure_to_timings(&t, &m);
+
+ if (memcmp(expected, &t, sizeof(t)) == 0) {
+ /* XXX for interlaced mode I should maybe check the consistency
+ * of both timings ? */
+ atomic_set(&ctx->timings_valid, 1);
+ avi_segment_register_irq(ctx->cam_segment, NULL);
+ wake_up_interruptible(&ctx->waitq);
+ return 0;
+ }
+
+ /* Got inconsistent timings */
+ if (printk_ratelimit()) {
+ dev_warn(ctx->dev,
+ "got inconsistent timings during bootstrap\n");
+
+#define CHECK_TIMING(_timing) do { \
+ if (expected->_timing != t._timing) \
+ dev_warn(ctx->dev, \
+ #_timing ": expected %u, got %u\n", \
+ expected->_timing, t._timing); \
+ } while(0);
+
+ /* Display the inconsistencies */
+ CHECK_TIMING(ht.hactive_on);
+ CHECK_TIMING(ht.hactive_off);
+ CHECK_TIMING(vt.vactive_on);
+ CHECK_TIMING(vt.vactive_off);
+#undef CHECK_TIMING
+ }
+
+ /* Restart the whole bootstrapping procedure to try and get clean
+ * timings */
+ return avi_capture_timings_init(s, field);
+}
+
+/* Step 2: We mesure the timings and store them in the context. Wait for two frames
+ * if we're interlaced in order to get the timings for both fields. */
+static unsigned avi_capture_timings_measure(struct avi_segment *s,
+ enum avi_field *field)
+{
+ struct avi_capture_context *ctx = s->priv;
+ enum avi_field f = avi_capture_get_field(ctx);
+ struct avi_capture_timings *t;
+ struct avi_cam_measure_regs m;
+
+ avi_cam_get_measure(ctx->cam_node, &m);
+
+ t = avi_capture_field_timings(ctx, f);
+
+ ctx->measure_to_timings(t, &m);
+
+ if (ctx->timings_top.vt.vactive_off &&
+ (!ctx->interlaced || ctx->timings_bot.vt.vactive_off))
+ /* Switch to validation mode */
+ avi_segment_register_irq(ctx->cam_segment,
+ &avi_capture_timings_validate);
+
+ return 0;
+}
+
+/* Step 1: At this point the measurements are probably garbage since it's likely
+ * we've been enabled in the middle of a frame. We'll start measuring starting
+ * with the next frame. */
+static unsigned avi_capture_timings_init(struct avi_segment *s,
+ enum avi_field *field)
+{
+ struct avi_capture_context *ctx = s->priv;
+
+ /* Mark the timings as invalid */
+ ctx->timings_top.vt.vactive_off = 0;
+ ctx->timings_bot.vt.vactive_off = 0;
+
+ avi_segment_register_irq(ctx->cam_segment,
+ &avi_capture_timings_measure);
+
+ return 0;
+}
+
+int avi_capture_prepare_timings(struct avi_capture_context *ctx,
+ unsigned width, unsigned height)
+{
+ struct avi_segment_format sensor_fmt = {
+ .interlaced = ctx->interlaced,
+ .colorspace = ctx->capture_cspace,
+ .width = width,
+ .height = height
+ };
+ struct avi_cam_measure_regs m = {
+ .hactive_on = 0,
+ .hsync_off = width,
+ .vactive_on = 0,
+ .vsync_voff = height,
+ };
+
+ if (ctx->interface.itu656) {
+ m.hactive_on = 2;
+ m.hsync_off += 2;
+ m.vactive_on = 1;
+ m.vsync_voff += 1;
+ }
+
+ avi_capture_configure_interface(ctx);
+
+ ctx->crop.x = 0;
+ ctx->crop.y = 0;
+ ctx->crop.width = width;
+ ctx->crop.height = height;
+
+ if(ctx->interlaced)
+ m.vsync_voff = height/2 + height%2
+ + (ctx->interface.itu656 ? 1 : 0);
+
+ ctx->measure_to_timings(&ctx->timings_top, &m);
+
+ if(ctx->interlaced) {
+ m.vsync_voff -= height % 2;
+ ctx->measure_to_timings(&ctx->timings_bot, &m);
+ }
+
+ return avi_segment_set_format(ctx->cam_segment, &sensor_fmt, &sensor_fmt);
+}
+EXPORT_SYMBOL(avi_capture_prepare_timings);
+
+/* Step 0: We ignore the first interrupt because the measures are not yet valid
+ * (we always get the mesures of the preceding frame).*/
+static unsigned avi_capture_timings_sync(struct avi_segment *s,
+ enum avi_field *field)
+{
+ struct avi_capture_context *ctx = s->priv;
+
+ avi_segment_register_irq(ctx->cam_segment,
+ &avi_capture_timings_init);
+
+ return 0;
+}
+
+void avi_capture_configure_interface(struct avi_capture_context *ctx)
+{
+ struct avi_cam_registers regs = {{{ 0 }}};
+
+ regs.itsource.done_en = 1;
+
+ regs.interface = ctx->interface;
+
+ if (ctx->measure_to_timings == NULL) {
+ /* 656-to-timings needs inversed syncs */
+ if (ctx->interface.itu656) {
+ ctx->measure_to_timings = &avi_capture_656_to_timings;
+ regs.interface.ivs = 1;
+ regs.interface.ihs = 1;
+ } else {
+ ctx->measure_to_timings = &avi_capture_to_timings;
+ regs.interface.ivs = !ctx->interface.ivs;
+ regs.interface.ihs = !ctx->interface.ihs;
+ }
+ }
+
+ avi_cam_set_registers(ctx->cam_node, &regs);
+}
+EXPORT_SYMBOL(avi_capture_configure_interface);
+
+int avi_capture_prepare(struct avi_capture_context *ctx)
+{
+ struct avi_segment_format sensor_fmt = {
+ .interlaced = ctx->interlaced,
+ .colorspace = ctx->capture_cspace,
+ };
+ unsigned long timeout;
+ int ret;
+
+ avi_capture_configure_interface(ctx);
+
+ atomic_set(&ctx->timings_valid, 0);
+
+ avi_segment_register_irq(ctx->cam_segment, &avi_capture_timings_sync);
+
+ avi_segment_enable_irq(ctx->cam_segment);
+
+ avi_segment_activate(ctx->cam_segment);
+
+ /* Force the activation of the camera alone */
+ avi_apply_node(ctx->cam_node);
+
+ timeout = ctx->timeout_jiffies;
+ if (timeout == 0)
+ /* default to 1 second */
+ timeout = HZ;
+
+ /* Give us some headroom since we'll have to capture a few frames to get
+ * the timings right. I chose this value completely arbitrarily, but it
+ * should be at least 3 since we'll need at least 3 frames before the
+ * timings are validated. The rest is in case */
+ timeout *= 4;
+
+ /* Wait for the notification by the interrupt handler that the timings
+ * are valid. */
+ ret = wait_event_interruptible_timeout(ctx->waitq,
+ atomic_read(&ctx->timings_valid),
+ timeout);
+
+ /* Cleanup everything related to the sensor's interrupt. If we start a
+ * capture after that we'll only care about the DMA interrupt */
+ avi_segment_deactivate(ctx->cam_segment);
+ /* Force the deactivation of the camera alone */
+ avi_apply_node(ctx->cam_node);
+
+ avi_segment_disable_irq(ctx->cam_segment);
+ avi_segment_register_irq(ctx->cam_segment, NULL);
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ if (ret < 0)
+ return ret;
+
+ avi_capture_resolution(ctx, &sensor_fmt.width, &sensor_fmt.height);
+ /* Default crop is the full frame */
+ ctx->crop.x = 0;
+ ctx->crop.y = 0;
+ ctx->crop.width = sensor_fmt.width;
+ ctx->crop.height = sensor_fmt.height;
+
+ return avi_segment_set_format(ctx->cam_segment, &sensor_fmt, &sensor_fmt);
+}
+EXPORT_SYMBOL(avi_capture_prepare);
+
+void avi_capture_resolution(const struct avi_capture_context *ctx,
+ unsigned *width,
+ unsigned *height)
+{
+ if (width)
+ *width = ctx->timings_top.ht.hactive_off -
+ ctx->timings_top.ht.hactive_on;
+
+ if (height) {
+ *height = ctx->timings_top.vt.vactive_off -
+ ctx->timings_top.vt.vactive_on;
+
+ if (ctx->interlaced)
+ *height += ctx->timings_bot.vt.vactive_off -
+ ctx->timings_bot.vt.vactive_on;
+ }
+}
+EXPORT_SYMBOL(avi_capture_resolution);
+
+int avi_capture_set_crop(struct avi_capture_context *ctx,
+ struct avi_capture_crop *crop)
+{
+ unsigned max_x = crop->x + crop->width;
+ unsigned max_y = crop->y + crop->height;
+ unsigned width;
+ unsigned height;
+ struct avi_segment_format sensor_fmt;
+ int ret;
+
+ avi_capture_resolution(ctx, &width, &height);
+
+ if (max_x > width || max_y > height)
+ /* The crop window is outside of the active video */
+ return -ERANGE;
+
+ /* Force the cropping to keep the field ordering and have identical top
+ * and bottom heights */
+ if (ctx->interlaced && (crop->y % 2 || crop->height % 2))
+ return -EINVAL;
+
+ if (crop->width == ctx->crop.width &&
+ crop->height == ctx->crop.height) {
+ /* We only need to update the crop coordinates, the segment
+ * formats is unchanged */
+ ctx->crop = *crop;
+
+ return 0;
+ }
+
+ /* If we reach this point it means we change the crop window
+ * dimensions */
+
+ if (atomic_read(&ctx->streaming))
+ /* We can't change the capture dimensions while streaming */
+ return -EBUSY;
+
+ avi_segment_get_input_format(ctx->cam_segment, &sensor_fmt);
+
+ sensor_fmt.width = crop->width;
+ sensor_fmt.height = crop->height;
+
+ ret = avi_segment_set_format(ctx->cam_segment,
+ &sensor_fmt,
+ &sensor_fmt);
+ if (ret)
+ return ret;
+
+ ctx->crop = *crop;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_capture_set_crop);
+
+int avi_capture_try_format(const struct avi_capture_context *ctx,
+ struct avi_segment_format *fmt)
+{
+ struct avi_segment_format sensor_fmt;
+
+ avi_segment_get_input_format(ctx->cam_segment, &sensor_fmt);
+
+ return avi_segment_try_format(ctx->dma_segment,
+ &sensor_fmt,
+ fmt,
+ &ctx->dma_segment->layout);
+}
+EXPORT_SYMBOL(avi_capture_try_format);
+
+int avi_capture_set_format(struct avi_capture_context *ctx,
+ struct avi_segment_format *fmt)
+{
+ struct avi_segment_format sensor_fmt;
+
+ avi_segment_get_input_format(ctx->cam_segment, &sensor_fmt);
+
+ return avi_segment_set_format(ctx->dma_segment,
+ &sensor_fmt,
+ fmt);
+}
+EXPORT_SYMBOL(avi_capture_set_format);
+
+static void avi_capture_set_timings(struct avi_capture_context *ctx,
+ struct avi_capture_timings *t)
+{
+ union avi_cam_h_timing ht;
+ union avi_cam_v_timing vt;
+ const struct avi_capture_crop *crop = &ctx->crop;
+
+ ht.hactive_on = t->ht.hactive_on + crop->x;
+ ht.hactive_off = ht.hactive_on + crop->width;
+
+ if (ctx->interlaced) {
+ vt.vactive_on = t->vt.vactive_on + crop->y / 2;
+ vt.vactive_off = vt.vactive_on + crop->height / 2;
+ } else {
+ vt.vactive_on = t->vt.vactive_on + crop->y;
+ vt.vactive_off = vt.vactive_on + crop->height;
+ }
+
+ avi_cam_set_timings(ctx->cam_node, ht, vt);
+}
+
+static const struct avi_dma_buffer avi_capture_null_buffer = {
+ .plane0.dma_addr = 0,
+ .plane1.dma_addr = 0,
+ .status = AVI_BUFFER_READY,
+ .priv = NULL,
+};
+
+static inline int avi_capture_is_null_buffer(struct avi_dma_buffer *buf)
+{
+ return !buf->plane0.dma_addr;
+}
+
+static inline int avi_capture_stats_enabled(struct avi_capture_context *ctx)
+{
+ return ctx->stats_segment != NULL;
+}
+
+static void avi_capture_update_period(long *period_us,
+ struct timeval *cur_date,
+ struct timeval *last_frame)
+{
+ long cur_period_us;
+
+ if (last_frame->tv_sec == 0 && last_frame->tv_usec == 0) {
+ /* Our first frame, can't compute the period yet */
+ *last_frame = *cur_date;
+ return;
+ }
+
+ cur_period_us = (cur_date->tv_sec - last_frame->tv_sec) * USEC_PER_SEC
+ + cur_date->tv_usec - last_frame->tv_usec;
+
+ if (*period_us) {
+ long delta = cur_period_us - *period_us;
+
+ /* Use an exponential moving average for the framerate. The
+ * shift value says how "fast" the mean changes when new samples
+ * come in, so basically the bigger it is the smoother will be
+ * the variation, */
+ *period_us += delta >> 4;
+ } else {
+ /* We start the mean at the first value */
+ *period_us = cur_period_us;
+ }
+
+ *last_frame = *cur_date;
+}
+
+static inline void avi_capture_get_time(struct timeval *date)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ date->tv_sec = ts.tv_sec;
+ date->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+
+static void avi_capture_handle_new_frame(struct avi_capture_context *ctx,
+ enum avi_field *f,
+ enum avi_dma_buffer_status s)
+{
+ struct avi_dma_buffer *buf = ctx->capture_buffer;
+ struct avi_dma_buffer *stats = ctx->capture_stats;
+ unsigned long flags;
+ union avi_cam_status status;
+ struct timeval cur_date;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ avi_capture_get_time(&cur_date);
+
+ avi_capture_update_period(&ctx->dma_segment->stream_period_us,
+ &cur_date,
+ &ctx->last_frame);
+
+ if (!avi_capture_is_null_buffer(buf))
+ avi_capture_update_period(&ctx->dma_segment->period_us,
+ &cur_date,
+ &ctx->last_captured_frame);
+ else
+ ctx->dma_segment->frame_dropped++;
+
+ *f = avi_capture_get_field(ctx);
+
+ if (ctx->done && !avi_capture_is_null_buffer(buf)) {
+ if (s == AVI_BUFFER_TIMEOUT) {
+ buf->status = s;
+ } else {
+ status = avi_cam_get_status(ctx->cam_node);
+
+ if (status.fof)
+ /* An overflow occured during the capture */
+ buf->status = AVI_BUFFER_ERROR;
+ else
+ buf->status = AVI_BUFFER_DONE;
+
+ /* Make sure all flags are reset */
+ avi_ack_node_irq(ctx->cam_node);
+ }
+
+ ctx->done(ctx, buf, *f);
+ }
+
+ /* Reset the buffer */
+ *buf = avi_capture_null_buffer;
+
+ /* The configured_buffer is not receiving the captured frame, so we swap
+ * them around */
+ ctx->capture_buffer = ctx->configured_buffer;
+ ctx->configured_buffer = buf;
+
+ /* Stats handling. We never actually enable the stats_fifo interrupt at
+ * the CFG level, we just use the itflg flag to see if they were
+ * correctly written in memory. This way we avoid having two interrupts
+ * per frame at the CPU level since we synchronize ourselves on the
+ * video output. */
+ if (avi_capture_stats_enabled(ctx)) {
+ avi_capture_update_period(&ctx->stats_segment->stream_period_us,
+ &cur_date,
+ &ctx->last_stats_frame);
+
+ if (!avi_capture_is_null_buffer(stats))
+ avi_capture_update_period(
+ &ctx->stats_segment->period_us,
+ &cur_date,
+ &ctx->last_captured_stats_frame);
+ else
+ ctx->stats_segment->frame_dropped++;
+
+ if (ctx->stats_done && !avi_capture_is_null_buffer(stats)) {
+
+ if (avi_node_get_irq_flag(ctx->stats_fifo))
+ stats->status = AVI_BUFFER_DONE;
+ else
+ /* Looks like the stats are not ready... */
+ stats->status = AVI_BUFFER_ERROR;
+
+ ctx->stats_done(ctx, stats, *f);
+ }
+
+ /* Make sure we never leave the IT flag raised */
+ avi_ack_node_irq(ctx->stats_fifo);
+
+ /* Same shuffling as above, this time for the stat buffers */
+ *stats = avi_capture_null_buffer;
+ ctx->capture_stats = ctx->configured_stats;
+ ctx->configured_stats = stats;
+ }
+
+ if (ctx->timeout_jiffies)
+ mod_timer(&ctx->timer, jiffies + ctx->timeout_jiffies);
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+}
+
+#ifndef CONFIG_AVICAM_INSTANT_STREAMOFF
+/* Final interrupt when the last frame is done capturing */
+static unsigned avi_capture_stopped(struct avi_segment *s, enum avi_field *f)
+{
+ struct avi_capture_context *ctx = s->priv;
+
+ /* Last frame */
+ avi_capture_handle_new_frame(ctx, f, AVI_BUFFER_DONE);
+
+ /* Make sure we apply everything on stats FIFO */
+ if (avi_capture_stats_enabled(ctx)) {
+ struct avi_node *dma_fifo;
+
+ dma_fifo = avi_segment_get_node(ctx->stats_segment,
+ AVI_CAP_DMA_OUT);
+ BUG_ON(dma_fifo == NULL);
+ avi_apply_node(dma_fifo);
+ }
+
+ atomic_set(&ctx->streaming, 0);
+ wake_up(&ctx->waitq);
+
+ return 0;
+}
+
+/* Interrupt callback for disabling the stream */
+static unsigned avi_capture_stop(struct avi_segment *s, enum avi_field *f)
+{
+ struct avi_capture_context *ctx = s->priv;
+
+ avi_capture_handle_new_frame(ctx, f, AVI_BUFFER_DONE);
+
+ avi_segment_deactivate(ctx->dma_segment);
+ avi_segment_deactivate(ctx->cam_segment);
+
+ if (avi_capture_stats_enabled(ctx))
+ avi_segment_deactivate(ctx->stats_segment);
+
+ /* We should receive a final interrupt when the last frame is
+ * captured. At that moment the deactivation will take effect. */
+ avi_segment_register_irq(ctx->dma_segment, &avi_capture_stopped);
+
+ return 0;
+}
+#endif
+
+/* Interrupt callback while streaming */
+static unsigned avi_capture_interrupt(struct avi_segment *s, enum avi_field *f)
+{
+ struct avi_capture_context *ctx = s->priv;
+
+ avi_capture_handle_new_frame(ctx, f, AVI_BUFFER_DONE);
+
+ if (ctx->interlaced) {
+ /* We're preparing for the next frame to be captured (not the
+ * one being captured right now) so it's the same field as the
+ * previous frame's field. Yes, it's complicated. */
+ avi_capture_set_timings(ctx, avi_capture_field_timings(ctx, *f));
+ }
+
+ /* Ask the client to provide us with a new buffer */
+ if (ctx->next)
+ ctx->next(ctx, ctx->configured_buffer, *f);
+
+ avi_segment_set_output_buffer(ctx->dma_segment, ctx->configured_buffer);
+
+ if (avi_capture_stats_enabled(ctx)) {
+ if (ctx->stats_next)
+ ctx->stats_next(ctx, ctx->configured_stats, *f);
+
+ avi_segment_set_output_buffer(ctx->stats_segment,
+ ctx->configured_stats);
+ }
+
+ avi_cam_run(ctx->cam_node);
+
+ return 0;
+}
+
+/* Triggered by timer */
+static void avi_capture_timeout(unsigned long data)
+{
+ struct avi_capture_context *ctx;
+ enum avi_field dummy_field;
+ struct avi_node *dma_fifo;
+
+ ctx = (struct avi_capture_context *)data;
+ if (!atomic_read(&ctx->streaming))
+ /* We're probably in the process of shutting down, ignore this
+ * timeout */
+ return;
+
+ dev_warn(ctx->dev, "timeout, dropping frame...\n");
+
+ /* If the AVI locked itself because of a corrupted frame for instance,
+ * attempt to restart the process by forcing the camera run */
+ avi_cam_freerun(ctx->cam_node);
+
+ /* And then we clean the whole chain by forcing a clear */
+ if (avi_capture_stats_enabled(ctx)) {
+ dma_fifo = avi_segment_get_node(ctx->stats_segment,
+ AVI_CAP_DMA_OUT);
+ BUG_ON(dma_fifo == NULL);
+ avi_fifo_force_clear(dma_fifo);
+ }
+
+ dma_fifo = avi_segment_get_node(ctx->dma_segment, AVI_CAP_DMA_OUT);
+ BUG_ON(dma_fifo == NULL);
+ avi_fifo_force_clear(dma_fifo);
+
+ avi_capture_handle_new_frame(ctx, &dummy_field, AVI_BUFFER_TIMEOUT);
+}
+
+static int avi_capture_stats_streamon(struct avi_capture_context *ctx)
+{
+ /* Hardcoded stats format. */
+ struct avi_segment_format vl_fmt = {
+ .width = AVI_CAPTURE_STATS_WIDTH,
+ .height = AVI_CAPTURE_STATS_HEIGHT,
+ .interlaced = 0,
+ .colorspace = 0,
+ };
+ struct avi_segment_format dma_fmt = vl_fmt;
+ struct avi_node *chain_bayer;
+ int ret;
+
+ if (!avi_capture_stats_enabled(ctx))
+ /* Nothing to do */
+ return 0;
+
+ chain_bayer = avi_segment_get_node(ctx->dma_segment,
+ AVI_CAP_ISP_CHAIN_BAYER);
+
+ BUG_ON(!chain_bayer);
+ avi_statistics_bayer_configure(chain_bayer,
+ ctx->crop.width,
+ ctx->crop.height,
+ AVI_CAPTURE_THUMB_STATS_WIDTH,
+ AVI_CAPTURE_THUMB_STATS_HEIGHT);
+
+ dma_fmt = vl_fmt;
+ dma_fmt.pix_fmt = AVI_PIXFMT_BGRA8888;
+ dma_fmt.plane0.line_size = dma_fmt.width * 4;
+
+ ret = avi_segment_set_format(ctx->stats_segment, &vl_fmt, &dma_fmt);
+ if (ret)
+ return ret;
+
+ /* Initialize buffer rotation */
+ ctx->configured_stats = &ctx->stats[0];
+ ctx->capture_stats = &ctx->stats[1];
+
+ *ctx->configured_stats = avi_capture_null_buffer;
+ *ctx->capture_stats = avi_capture_null_buffer;
+
+ avi_segment_set_output_buffer(ctx->stats_segment,
+ ctx->configured_stats);
+
+ avi_segment_activate(ctx->stats_segment);
+
+ return 0;
+}
+
+int avi_capture_streamon(struct avi_capture_context *ctx)
+{
+ enum avi_field dummy;
+ int ret;
+
+ if (atomic_read(&ctx->streaming))
+ return -EBUSY;
+
+ ret = avi_segment_connect(ctx->cam_segment, ctx->dma_segment, -1);
+ if (ret)
+ goto connect_failed;
+
+ /* We can configure the CAM module once and for all for progressive
+ * mode. We arbitrarily select the top config for interlaced input. */
+ avi_capture_set_timings(ctx, &ctx->timings_top);
+
+ /* Initialize buffer rotation */
+ ctx->configured_buffer = &ctx->buffers[0];
+ ctx->capture_buffer = &ctx->buffers[1];
+
+ *ctx->configured_buffer = avi_capture_null_buffer;
+ *ctx->capture_buffer = avi_capture_null_buffer;
+
+ avi_segment_set_output_buffer(ctx->dma_segment, ctx->capture_buffer);
+
+ avi_cam_run(ctx->cam_node);
+
+ atomic_set(&ctx->streaming, 1);
+
+ avi_segment_register_irq(ctx->dma_segment, &avi_capture_interrupt);
+
+ ret = avi_capture_stats_streamon(ctx);
+ if (ret)
+ goto stats_streamon_failed;
+
+ ret = avi_segment_activate(ctx->cam_segment);
+ if (ret)
+ goto cam_activate_failed;
+
+ ret = avi_segment_activate(ctx->dma_segment);
+ if (ret)
+ goto dma_activate_failed;
+
+ setup_timer(&ctx->timer, avi_capture_timeout, (long unsigned)ctx);
+
+ /* The capture has started, we manually call the interrupt routine to
+ * let it configure the first frame to capture. */
+ avi_capture_interrupt(ctx->dma_segment, &dummy);
+
+ return 0;
+
+ dma_activate_failed:
+ avi_segment_deactivate(ctx->cam_segment);
+ cam_activate_failed:
+ avi_segment_disconnect(ctx->cam_segment, ctx->dma_segment);
+ connect_failed:
+ stats_streamon_failed:
+ atomic_set(&ctx->streaming, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_capture_streamon);
+
+int avi_capture_streamoff(struct avi_capture_context *ctx)
+{
+ unsigned long timeout;
+ int ret = -1;
+
+ if (atomic_read(&ctx->streaming) == 0)
+ return -EINVAL;
+
+ timeout = ctx->timeout_jiffies;
+ ctx->timeout_jiffies = 0;
+ del_timer(&ctx->timer);
+
+#ifndef CONFIG_AVICAM_INSTANT_STREAMOFF
+ avi_segment_register_irq(ctx->dma_segment, &avi_capture_stop);
+
+ if (timeout == 0)
+ /* Default to one second */
+ timeout = HZ;
+
+ ret = wait_event_timeout(ctx->waitq,
+ !atomic_read(&ctx->streaming),
+ timeout);
+#endif
+
+ if (ret > 0) {
+ /* The interrupt handler deactivated everything and woke us
+ * up */
+ ret = 0;
+ } else {
+ struct avi_node *dma_fifo;
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ /* The IT handler wasn't called so force the deactivation from
+ * here. This can happen if the input stream stopped before
+ * us. */
+
+ atomic_set(&ctx->streaming, 0);
+
+ avi_segment_deactivate(ctx->dma_segment);
+ avi_segment_deactivate(ctx->cam_segment);
+ if (avi_capture_stats_enabled(ctx)) {
+ avi_segment_deactivate(ctx->stats_segment);
+
+ dma_fifo = avi_segment_get_node(ctx->stats_segment,
+ AVI_CAP_DMA_OUT);
+ BUG_ON(dma_fifo == NULL);
+ avi_apply_node(dma_fifo);
+ }
+
+ /* We apply the new configuration to make sure the fifo will not
+ * send random garbage if a stream comes up */
+ dma_fifo = avi_segment_get_node(ctx->dma_segment, AVI_CAP_DMA_OUT);
+ BUG_ON(dma_fifo == NULL);
+
+ /* Because there is no input stream, no clear will be sent
+ * 'naturally' so we need to force it */
+ avi_fifo_force_clear(dma_fifo);
+
+ avi_apply_node(dma_fifo);
+ }
+
+ avi_segment_disconnect(ctx->cam_segment, ctx->dma_segment);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_capture_streamoff);
+
+int avi_capture_resume(struct avi_capture_context *ctx)
+{
+ avi_capture_configure_interface(ctx);
+
+ if (avi_capture_stats_enabled(ctx)) {
+ avi_segment_set_output_buffer(ctx->stats_segment,
+ ctx->configured_stats);
+ avi_segment_activate(ctx->stats_segment);
+ }
+
+ avi_segment_set_output_buffer(ctx->dma_segment, ctx->capture_buffer);
+ avi_capture_set_timings(ctx, &ctx->timings_top);
+ avi_cam_run(ctx->cam_node);
+ avi_segment_enable_irq(ctx->dma_segment);
+
+ avi_segment_activate(ctx->cam_segment);
+ avi_segment_activate(ctx->dma_segment);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_capture_resume);
diff --git a/drivers/parrot/media/video/avi_capture.h b/drivers/parrot/media/video/avi_capture.h
new file mode 100644
index 00000000000000..feffb79e1a7746
--- /dev/null
+++ b/drivers/parrot/media/video/avi_capture.h
@@ -0,0 +1,151 @@
+#ifndef _AVI_CAPTURE_H_
+#define _AVI_CAPTURE_H_
+
+#include "video/avi_segment.h"
+#include "avi_capture_timings.h"
+
+/* Hardcoded stats output config */
+#define AVI_CAPTURE_THUMB_STATS_WIDTH 64
+#define AVI_CAPTURE_THUMB_STATS_HEIGHT 48
+#define AVI_CAPTURE_STATS_WIDTH (AVI_CAPTURE_THUMB_STATS_WIDTH * 6)
+#define AVI_CAPTURE_STATS_HEIGHT (AVI_CAPTURE_THUMB_STATS_HEIGHT + 1)
+/* Size of a stat buffer (in bytes) */
+#define AVI_CAPTURE_STATS_FRAMESIZE (AVI_CAPTURE_STATS_WIDTH * \
+ AVI_CAPTURE_STATS_HEIGHT * \
+ 4)
+
+struct avi_capture_crop {
+ unsigned x;
+ unsigned y;
+ unsigned width;
+ unsigned height;
+};
+
+struct avi_capture_context;
+
+typedef void (*avi_capture_frame_cback)(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field field);
+
+struct avi_capture_context {
+ /* Callback when a frame is done capturing */
+ avi_capture_frame_cback done;
+ /* Callback for the configuration of the next frame */
+ avi_capture_frame_cback next;
+ /* Ditto for the stats frame. Those functions are *always* called after
+ * the equivalent video callbacks above. This means that these stats
+ * always match the last frame passed to done() or next(). */
+ avi_capture_frame_cback stats_done;
+ avi_capture_frame_cback stats_next;
+
+ /* Private client data. */
+ void *priv;
+
+ /* These members need to be correctly initialized before the first call
+ * to avi_capture_get_measures */
+ union avi_cam_interface interface;
+ int interlaced;
+ enum avi_colorspace capture_cspace;
+ /* If set to NULL we use a builtin measure-to-timings conversion routine
+ * that should work for standard-compliant 656 streams at
+ * least. Unfortunately many sensors use non-standard timings and need
+ * adjustment, for those we need custom code (usually in the BSP). */
+ avi_capture_get_timings measure_to_timings;
+
+ struct device *dev;
+
+ struct avi_capture_timings timings_top;
+ struct avi_capture_timings timings_bot;
+
+ struct avi_capture_crop crop;
+
+ atomic_t timings_valid;
+ wait_queue_head_t waitq;
+
+ /* We have two segments: one containing the camera and one containing
+ * the DMA out. We need that in order to use either IRQ
+ * independently. */
+ struct avi_segment *cam_segment;
+ struct avi_segment *dma_segment;
+ struct avi_segment *stats_segment;
+
+ struct avi_node *stats_fifo;
+
+ struct avi_node *cam_node;
+
+ /* We need a rotation of two buffers: at any given moment while
+ * streaming one will be receiving the frame while the next one will be
+ * configured for receiving the next frame. */
+ struct avi_dma_buffer buffers[2];
+ struct avi_dma_buffer *configured_buffer;
+ struct avi_dma_buffer *capture_buffer;
+
+ /* Ditto for stats */
+ struct avi_dma_buffer stats[2];
+ struct avi_dma_buffer *configured_stats;
+ struct avi_dma_buffer *capture_stats;
+
+ atomic_t streaming;
+
+ /* Give up while waiting for a frame for more than timeout_jiffies
+ * jiffies. The frame is returned in the done() callback with status set
+ * to AVI_BUFFER_TIMEOUT. If 0 the timeout is ignored. */
+ struct timer_list timer;
+ unsigned long timeout_jiffies;
+ struct timeval last_frame;
+ struct timeval last_captured_frame;
+ struct timeval last_stats_frame;
+ struct timeval last_captured_stats_frame;
+
+ spinlock_t lock;
+};
+
+/* Allocate the resources needed for the capture. If stats is true and caps
+ * contains an ISP we also allocate the resources for capturing the stat
+ * buffers. */
+extern int avi_capture_init(struct avi_capture_context *ctx,
+ struct device *dev,
+ int major,
+ unsigned long caps,
+ int enable_stats);
+
+extern void avi_capture_destroy(struct avi_capture_context *ctx);
+
+/* Configure the capture interface and get the video timings. This has to be
+ * called after any change to the input stream timings or the capture
+ * configuration in ctx.
+ *
+ * This call will block for a few frames' worth of time. */
+extern int avi_capture_prepare(struct avi_capture_context *ctx);
+
+/* After a call to avi_capture_get_measures this function can be used to get the
+ * resolution of the video. */
+extern void avi_capture_resolution(const struct avi_capture_context *ctx,
+ unsigned *width,
+ unsigned *height);
+
+extern int avi_capture_set_crop(struct avi_capture_context *ctx,
+ struct avi_capture_crop *crop);
+
+extern int avi_capture_try_format(const struct avi_capture_context *ctx,
+ struct avi_segment_format *fmt);
+
+extern int avi_capture_set_format(struct avi_capture_context *ctx,
+ struct avi_segment_format *fmt);
+
+extern int avi_capture_streamon (struct avi_capture_context *ctx);
+extern int avi_capture_streamoff(struct avi_capture_context *ctx);
+
+/* Resume capture after suspend to ram */
+extern int avi_capture_resume(struct avi_capture_context *ctx);
+
+extern void avi_capture_configure_interface(struct avi_capture_context *ctx);
+
+extern int avi_capture_prepare_timings(struct avi_capture_context *ctx,
+ unsigned width, unsigned height);
+
+struct avi_multicapt {
+ struct avi_capture_context *ctx;
+ dma_addr_t fb_offset_addr;
+};
+#endif /* _AVI_CAPTURE_H_ */
diff --git a/drivers/parrot/media/video/avi_capture_test.c b/drivers/parrot/media/video/avi_capture_test.c
new file mode 100644
index 00000000000000..1e1b0d16d41bc0
--- /dev/null
+++ b/drivers/parrot/media/video/avi_capture_test.c
@@ -0,0 +1,145 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <video/avi_pixfmt.h>
+#include "avi_capture.h"
+
+static struct avi_capture_context test_cap_ctx;
+
+static void avitest_next(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ /* hardcoded fb address */
+ const dma_addr_t fb_addr = 0xbf000000;
+
+ frame->plane0.dma_addr = fb_addr;
+
+ if (f == AVI_BOT_FIELD)
+ frame->plane0.dma_addr += 1920 * 2;
+}
+
+static int __init avitest_init(void)
+{
+ struct avi_capture_context *ctx = &test_cap_ctx;
+ struct avi_capture_crop crop;
+ int ret;
+ struct avi_segment_format fmt;
+
+ printk("probe\n");
+
+ ret = avi_capture_init(ctx, NULL, 0,
+ AVI_CAP_CAM_0 | AVI_CAP_CONV,
+ 0);
+ if (ret) {
+ printk("init failed\n");
+ goto init_failed;
+ }
+
+ ctx->interface.itu656 = 1;
+ ctx->interface.format_control = AVI_FORMAT_CONTROL_UYVY_2X8;
+ ctx->interface.ipc = 0;
+ ctx->interface.pad_select = 1;
+
+ ctx->capture_cspace = AVI_BT709_CSPACE;
+
+ ctx->interlaced = 0;
+
+ /* Set frame timeout to 1second */
+ ctx->timeout_jiffies = HZ;
+
+ ret = avi_capture_prepare(ctx);
+ if (ret) {
+ printk("prepare failed\n");
+ goto prepare_failed;
+ }
+
+ avi_capture_resolution(ctx, &fmt.width, &fmt.height);
+
+#if 1
+ crop.x = 0;
+ crop.y = 0;
+
+ crop.width = 1280;
+ crop.height = 720;
+
+ ret = avi_capture_set_crop(ctx, &crop);
+ if (ret) {
+ printk("crop failed\n");
+ goto crop_failed;
+ }
+
+ fmt.width = crop.width;
+ fmt.height = crop.height;
+#endif
+
+ fmt.colorspace = AVI_RGB_CSPACE;
+ fmt.interlaced = ctx->interlaced;
+ fmt.pix_fmt = AVI_PIXFMT_RGB565;
+ fmt.plane0.line_size = fmt.width * avi_pixel_size0(fmt.pix_fmt);
+ fmt.plane1.line_size = fmt.width * avi_pixel_size1(fmt.pix_fmt);
+
+ if (fmt.interlaced) {
+ fmt.plane0.line_size *= 2;
+ fmt.plane1.line_size *= 2;
+ }
+
+ ret = avi_capture_set_format(ctx, &fmt);
+ if (ret) {
+ printk("set_format failed\n");
+ goto set_format_failed;
+ }
+
+ ctx->next = &avitest_next;
+
+ ret = avi_capture_streamon(ctx);
+ if (ret) {
+ printk("streamon failed\n");
+ goto streamon_failed;
+ }
+
+ printk("probe ok. Resolution: %ux%u\n", fmt.width, fmt.height);
+
+#if 0
+ for (;;) {
+ msleep(10);
+
+ crop.x += 2;
+ crop.y += 2;
+
+ if ((ret = avi_capture_set_crop(ctx, &crop))) {
+ printk("crop failed (%u, %u) %d\n", crop.x, crop.y, ret);
+ crop.x = crop.y = 0;
+ }
+ }
+#endif
+
+ return 0;
+
+ streamon_failed:
+ set_format_failed:
+ crop_failed:
+ prepare_failed:
+ avi_capture_destroy(&test_cap_ctx);
+ init_failed:
+ return ret;
+}
+module_init(avitest_init);
+
+static void __exit avitest_exit(void)
+{
+ struct avi_capture_context *ctx = &test_cap_ctx;
+ int ret;
+
+ ret = avi_capture_streamoff(ctx);
+ if (ret)
+ printk("streamoff failed [%d]\n", ret);
+
+ avi_capture_destroy(&test_cap_ctx);
+}
+module_exit(avitest_exit);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Test driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_capture_timings.h b/drivers/parrot/media/video/avi_capture_timings.h
new file mode 100644
index 00000000000000..43cd870f3c9fce
--- /dev/null
+++ b/drivers/parrot/media/video/avi_capture_timings.h
@@ -0,0 +1,14 @@
+#ifndef _AVI_CAPTURE_TIMINGS_H_
+#define _AVI_CAPTURE_TIMINGS_H_
+
+#include "video/avi.h"
+
+struct avi_capture_timings {
+ union avi_cam_h_timing ht;
+ union avi_cam_v_timing vt;
+};
+
+typedef void (*avi_capture_get_timings)(struct avi_capture_timings *t,
+ struct avi_cam_measure_regs *m);
+
+#endif /* _AVI_CAPTURE_TIMINGS_H_ */
diff --git a/drivers/parrot/media/video/avi_m2m.c b/drivers/parrot/media/video/avi_m2m.c
new file mode 100644
index 00000000000000..a5f79103aa4658
--- /dev/null
+++ b/drivers/parrot/media/video/avi_m2m.c
@@ -0,0 +1,2202 @@
+/*
+ * linux/drivers/parrot/video/media/avi_m2m.c
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Alexandre Dilly <alexandre.dilly@parrot.com>
+ * @date 03-Aug-2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "gpu/p7ump.h"
+
+#include "avi_v4l2.h"
+#include "avi_stats.h"
+#include "avi_m2m.h"
+
+#ifdef CONFIG_M2M_AVI_USE_ISP
+#include "avi_v4l2_isp.h"
+#endif
+
+#define DRIVER_NAME "avi_m2m"
+#define DRIVER_VERSION KERNEL_VERSION(0, 2, 0)
+
+struct avi_m2m_format {
+ struct v4l2_pix_format v4l2_fmt;
+ struct v4l2_rect bounds;
+ struct v4l2_rect rect;
+ struct avi_segment_format avi_fmt;
+ unsigned plane0_size;
+ unsigned plane1_size;
+};
+
+struct avi_m2m_dev {
+ struct device *dev;
+ int id;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t num_inst;
+ atomic_t activated;
+ unsigned long streaming;
+ unsigned long sequence;
+ /* V4L2 device */
+ struct v4l2_device *v4l2_dev;
+ struct video_device *vdev;
+ /* Videobuf2 */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* MEM2MEM device */
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ /* Input/output formats */
+ struct avi_m2m_format src_format;
+ struct avi_m2m_format dst_format;
+ unsigned long configuration_changed;
+ /* Composition */
+ struct v4l2_rect compose_rect;
+ int compose_alpha;
+ bool use_compose;
+ /* Control handler and ISP chain controls */
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct avi_v4l2_isp *ctrl_isp;
+#endif
+ /* Stats */
+ struct avi_stats stats;
+ bool stats_enabled;
+ bool use_stats;
+ /* Caps */
+ unsigned long caps;
+ unsigned long current_caps;
+ /* Segments */
+ struct avi_segment *in_seg;
+ struct avi_segment *out_seg;
+ struct avi_segment *stats_seg;
+ /* Timer */
+ struct timer_list timer;
+ /* Nodes */
+ struct avi_node *in_fifo_node;
+ struct avi_node *in_fifo_planar_node;
+ struct avi_node *out_fifo_node;
+ struct avi_node *stats_fifo_node;
+ struct avi_node *isp_node;
+ /* Buffers */
+ struct avi_dma_buffer src_buf;
+ struct avi_dma_buffer dst_buf;
+ struct avi_dma_buffer stats_buf;
+};
+
+/* Default values */
+#define AVI_M2M_DEFAULT_PIX_FMT V4L2_PIX_FMT_NV12
+#define AVI_M2M_DEFAULT_COLORSPACE V4L2_COLORSPACE_REC709
+#define AVI_M2M_DEFAULT_WIDTH 2048
+#define AVI_M2M_DEFAULT_HEIGHT 2048
+#define AVI_M2M_DEFAULT_ALPHA 0xFF
+
+static struct avi_m2m_format avi_m2m_default_format = {
+ .v4l2_fmt = {
+ .width = AVI_M2M_DEFAULT_WIDTH,
+ .height = AVI_M2M_DEFAULT_HEIGHT,
+ .pixelformat = AVI_M2M_DEFAULT_PIX_FMT,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = 0,
+ .sizeimage = 0,
+ .colorspace = AVI_M2M_DEFAULT_COLORSPACE,
+ .priv = 0,
+ },
+ .rect = {
+ .top = 0,
+ .left = 0,
+ .width = AVI_M2M_DEFAULT_WIDTH,
+ .height = AVI_M2M_DEFAULT_HEIGHT,
+ },
+ .bounds = {
+ .top = 0,
+ .left = 0,
+ .width = AVI_M2M_DEFAULT_WIDTH,
+ .height = AVI_M2M_DEFAULT_HEIGHT,
+ },
+};
+
+/* Old R2R driver compatibily */
+static int z_order = 1;
+module_param(z_order, int, S_IRUGO);
+MODULE_PARM_DESC(z_order, "Z order of plane managed by this driver: default 1");
+
+/* Helpers */
+#define AVI_M2M_CHECK_TYPE(_type) (_type == V4L2_BUF_TYPE_VIDEO_OUTPUT\
+ || _type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+
+static inline bool avi_m2m_is_streaming(struct avi_m2m_dev *avi_m2m,
+ enum v4l2_buf_type type)
+{
+ return !!(test_bit(type, &avi_m2m->streaming));
+}
+
+static inline void avi_m2m_set_streaming(struct avi_m2m_dev *avi_m2m,
+ enum v4l2_buf_type type,
+ bool stream_on)
+{
+ if (stream_on)
+ set_bit(type, &avi_m2m->streaming);
+ else
+ clear_bit(type, &avi_m2m->streaming);
+}
+
+/*
+ * AVI configuration and helpers
+ */
+static unsigned avi_m2m_aux_interrupt(struct avi_segment *segment,
+ enum avi_field *field)
+{
+ /* To remove? */
+ return 0;
+}
+
+static unsigned avi_m2m_interrupt(struct avi_segment *segment,
+ enum avi_field *field)
+{
+ struct avi_m2m_dev *avi_m2m = segment->priv;
+ struct vb2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ /* Lock segment access */
+ spin_lock_irqsave(&avi_m2m->lock, flags);
+
+ /* Check if segment is activated */
+ if (!atomic_read(&avi_m2m->activated))
+ goto unlock;
+ atomic_set(&avi_m2m->activated, 0);
+
+ /* Reset timer */
+ del_timer(&avi_m2m->timer);
+
+ /* Update output FIFO status */
+ avi_fifo_get_status(avi_m2m->out_fifo_node);
+
+ /* Ack node IRQ for multi planar mode */
+ avi_fifo_get_status(avi_m2m->in_fifo_node);
+ if (avi_node_get_irq_flag(avi_m2m->in_fifo_node))
+ avi_ack_node_irq(avi_m2m->in_fifo_node);
+
+ /* Ack node IRQ for multi planar mode */
+ if (avi_m2m->in_fifo_planar_node) {
+ avi_fifo_get_status(avi_m2m->in_fifo_planar_node);
+ if (avi_node_get_irq_flag(avi_m2m->in_fifo_planar_node))
+ avi_ack_node_irq(avi_m2m->in_fifo_planar_node);
+ }
+
+ /* Check stats buffer output */
+ if (avi_m2m->stats_seg) {
+ if (avi_node_get_irq_flag(avi_m2m->stats_fifo_node))
+ avi_m2m->stats_buf.status = AVI_BUFFER_DONE;
+ else
+ /* Looks like the stats are not ready... */
+ avi_m2m->stats_buf.status = AVI_BUFFER_ERROR;
+
+ /* Deactivate segment since IRQ for this segment is disabled and
+ * it is activated in oneshot mode.
+ */
+ avi_segment_deactivate(avi_m2m->stats_seg);
+
+ /* Make sure we never leave the IT flag raised */
+ avi_ack_node_irq(avi_m2m->stats_fifo_node);
+ }
+
+ /* Mark buffer has done */
+ avi_m2m->src_buf.status = AVI_BUFFER_DONE;
+ avi_m2m->dst_buf.status = AVI_BUFFER_DONE;
+
+ /* Get current buffers from MEM2MEM context */
+ src_vb = v4l2_m2m_src_buf_remove(avi_m2m->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(avi_m2m->m2m_ctx);
+
+ /* Frame is too late and it has already been processed by timeout
+ * interrupt. In this case, the timeout should be increased.
+ */
+ if (!src_vb || !dst_vb) {
+ dev_warn(avi_m2m->dev, "Frame is too late...\n");
+ goto unlock;
+ }
+
+ /* Release buffers */
+ v4l2_m2m_buf_done(src_vb, avi_m2m->src_buf.status == AVI_BUFFER_DONE ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, avi_m2m->dst_buf.status == AVI_BUFFER_DONE ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+
+ /* Return stats buffer to V4L2
+ * When buffer status is AVI_BUFFER_ERROR, it means that bayer
+ * statistics segment has not raised an interrupt which can be due to
+ * that module is not used or interrupt is raised later. In this case,
+ * we requeue buffer internaly instead of returning to videobuf2 (see
+ * avi_stats code).
+ */
+ if (avi_m2m->stats_enabled && avi_m2m->stats_buf.plane0.dma_addr)
+ avi_stats_done(&avi_m2m->stats, &avi_m2m->stats_buf,
+ src_vb->v4l2_buf.sequence, 1);
+
+ /* Update V4L2 buffer fields */
+ if (avi_m2m->src_buf.status == AVI_BUFFER_DONE &&
+ avi_m2m->dst_buf.status == AVI_BUFFER_DONE) {
+ /* Update timestamp */
+ v4l2_get_timestamp(&dst_vb->v4l2_buf.timestamp);
+
+ /* Copy timecode when provided */
+ if (src_vb->v4l2_buf.flags & V4L2_BUF_FLAG_TIMECODE) {
+ dst_vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
+ dst_vb->v4l2_buf.timecode = src_vb->v4l2_buf.timecode;
+ }
+
+ /* Copy sequence number */
+ dst_vb->v4l2_buf.sequence = src_vb->v4l2_buf.sequence;
+ }
+
+ /* Job is finished */
+ v4l2_m2m_job_finish(avi_m2m->m2m_dev, avi_m2m->m2m_ctx);
+
+unlock:
+ /* Unlock segment access */
+ spin_unlock_irqrestore(&avi_m2m->lock, flags);
+
+ return 0;
+}
+
+static void avi_m2m_timeout(unsigned long data)
+{
+ struct avi_m2m_dev *avi_m2m = (struct avi_m2m_dev *) data;
+ struct vb2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ /* Check if AVI is streaming */
+ if (!avi_m2m_is_streaming(avi_m2m, V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return;
+
+ /* Lock segment access */
+ spin_lock_irqsave(&avi_m2m->lock, flags);
+
+ /* Check if segment is activated */
+ if (!atomic_read(&avi_m2m->activated))
+ goto unlock;
+ atomic_set(&avi_m2m->activated, 0);
+
+ /* Warn a frame has been dropped */
+ dev_warn(avi_m2m->dev, "Timeout, dropping frame...\n");
+
+ /* Clean the stats chain and deactivate its segment */
+ if (avi_m2m->stats_seg) {
+ /*avi_fifo_force_clear*/
+ avi_segment_deactivate(avi_m2m->stats_seg);
+ avi_fifo_force_clear(avi_m2m->stats_fifo_node);
+ avi_apply_node(avi_m2m->stats_fifo_node);
+ }
+
+ /* Deactivate segments */
+ avi_segment_deactivate(avi_m2m->out_seg);
+ avi_segment_deactivate(avi_m2m->in_seg);
+
+ /* Clean the whole chain */
+ avi_fifo_force_clear(avi_m2m->in_fifo_node);
+ avi_fifo_force_clear(avi_m2m->out_fifo_node);
+ avi_apply_node(avi_m2m->out_fifo_node);
+ avi_apply_node(avi_m2m->in_fifo_node);
+
+ /* Get current buffers from MEM2MEM context */
+ src_vb = v4l2_m2m_src_buf_remove(avi_m2m->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(avi_m2m->m2m_ctx);
+
+ /* Check buffers */
+ if (!src_vb || !dst_vb) {
+ dev_err(avi_m2m->dev, "No V4L2 buffer available!\n");
+ goto unlock;
+ }
+
+ /* Release buffers with error status */
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+
+ /* Release stats buffer with error status */
+ if (avi_m2m->stats_enabled && avi_m2m->stats_buf.plane0.dma_addr) {
+ avi_m2m->stats_buf.status = AVI_BUFFER_ERROR;
+ avi_stats_done(&avi_m2m->stats, &avi_m2m->stats_buf,
+ src_vb->v4l2_buf.sequence, 1);
+ }
+
+ /* Job is finished */
+ v4l2_m2m_job_finish(avi_m2m->m2m_dev, avi_m2m->m2m_ctx);
+
+unlock:
+ /* Unlock segment access */
+ spin_unlock_irqrestore(&avi_m2m->lock, flags);
+}
+
+static int avi_m2m_build_segments(struct avi_m2m_dev *avi_m2m)
+{
+ struct avi_dma_pixfmt src_pixfmt;
+ enum avi_colorspace src_cspace;
+ enum avi_colorspace dst_cspace;
+ unsigned long requirements;
+ unsigned long caps;
+ int ret = 0;
+
+ /* Convert V4L2 formats to AVI formats */
+ src_pixfmt = avi_v4l2_to_avi_pixfmt(
+ avi_m2m->src_format.v4l2_fmt.pixelformat);
+ src_cspace = avi_v4l2_to_avi_cspace(
+ avi_m2m->src_format.v4l2_fmt.colorspace);
+ dst_cspace = avi_v4l2_to_avi_cspace(
+ avi_m2m->dst_format.v4l2_fmt.colorspace);
+
+ /* Determine needed caps */
+ if (src_pixfmt.raw) {
+ /* We need an ISP */
+ requirements = AVI_CAPS_ISP;
+
+ /* Stats can be used */
+ avi_m2m->use_stats = 1;
+ } else {
+ /* We need a scaler */
+ requirements = AVI_CAP_SCAL;
+
+ /* We need a colorspace converter */
+ if (src_cspace != dst_cspace)
+ requirements |= AVI_CAP_CONV;
+
+ /* We use planar */
+ if (avi_pixfmt_is_planar(src_pixfmt))
+ requirements |= AVI_CAP_PLANAR;
+
+ /* Stats are not available */
+ avi_m2m->use_stats = 0;
+ }
+
+ /* Check caps availability */
+ if ((avi_m2m->caps & requirements) != requirements) {
+ dev_err(avi_m2m->dev, "Capabilities are not available!\n");
+ return -EINVAL;
+ }
+
+ /* Build output segment (needs only DMA_OUT) */
+ caps = AVI_CAP_DMA_OUT;
+ avi_m2m->out_seg = avi_segment_build(&caps, "m2m", avi_m2m->id, -1,
+ avi_m2m->dev);
+ if (IS_ERR(avi_m2m->out_seg)) {
+ ret = PTR_ERR(avi_m2m->out_seg);
+ goto out_seg_failed;
+ }
+
+ /* Build input segment (all with DMA_IN) */
+ caps = requirements | AVI_CAP_DMA_IN;
+ avi_m2m->in_seg = avi_segment_build(&caps, "m2m-in", avi_m2m->id, -1,
+ avi_m2m->dev);
+ if (IS_ERR(avi_m2m->in_seg)) {
+ ret = PTR_ERR(avi_m2m->in_seg);
+ goto in_seg_failed;
+ }
+
+ /* Set current caps used */
+ avi_m2m->current_caps = requirements | AVI_CAP_DMA_IN | AVI_CAP_DMA_OUT;
+
+ /* Build a segment for stats if ISP is used */
+ if (avi_m2m->stats_enabled &&
+ (requirements & AVI_CAP_ISP_CHAIN_BAYER)) {
+ /* Check if bayer stats are available in input segment */
+ caps = avi_isp_bayer_get_stats_cap(avi_m2m->in_seg);
+ if (!caps) {
+ dev_err(avi_m2m->dev, "can't find stats cap");
+ goto bayer_caps_failed;
+ }
+
+ /* Build stats segment */
+ caps |= AVI_CAP_DMA_OUT;
+ avi_m2m->stats_seg = avi_segment_build(&caps, "m2m-stats",
+ avi_m2m->id, -1,
+ avi_m2m->dev);
+ if (IS_ERR(avi_m2m->stats_seg)) {
+ ret = PTR_ERR(avi_m2m->stats_seg);
+ goto stats_seg_failed;
+ }
+
+ /* Get FIFO node of stats segment */
+ avi_m2m->stats_fifo_node = avi_segment_get_node(
+ avi_m2m->stats_seg,
+ AVI_CAP_DMA_OUT);
+ if (!avi_m2m->stats_fifo_node) {
+ dev_err(avi_m2m->dev,
+ "Failed to get stats FIFO node!\n");
+ goto stats_node_failed;
+ }
+
+ /* Get BAYER ISP node */
+ avi_m2m->isp_node = avi_segment_get_node(avi_m2m->in_seg,
+ AVI_CAP_ISP_CHAIN_BAYER);
+ if (!avi_m2m->isp_node) {
+ dev_err(avi_m2m->dev, "Failed to get bayer node!\n");
+ goto isp_node_failed;
+ }
+
+ } else {
+ /* No stats needed */
+ avi_m2m->stats_seg = NULL;
+ }
+
+ /* Connect segments */
+ ret = avi_segment_connect(avi_m2m->in_seg, avi_m2m->out_seg, z_order);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Cannot connect segments %s to %s",
+ avi_m2m->in_seg->id, avi_m2m->out_seg->id);
+ goto connect_failed;
+ }
+
+ /* Get FIFO node of input segment */
+ avi_m2m->in_fifo_node = avi_segment_get_node(avi_m2m->in_seg,
+ AVI_CAP_DMA_IN);
+ if (!avi_m2m->in_fifo_node) {
+ dev_err(avi_m2m->dev, "Failed to get input FIFO node!\n");
+ goto in_node_failed;
+ }
+
+ /* Get FIFO node of output segment */
+ avi_m2m->out_fifo_node = avi_segment_get_node(avi_m2m->out_seg,
+ AVI_CAP_DMA_OUT);
+ if (!avi_m2m->out_fifo_node) {
+ dev_err(avi_m2m->dev, "Failed to get output FIFO node!\n");
+ goto out_node_failed;
+ }
+
+ /* Get FIFO node of planar source */
+ if (requirements & AVI_CAP_PLANAR) {
+ avi_m2m->in_fifo_planar_node = avi_segment_get_node(
+ avi_m2m->in_seg,
+ AVI_CAP_PLANAR);
+ if (!avi_m2m->in_fifo_planar_node) {
+ dev_err(avi_m2m->dev,
+ "Failed to get planar FIFO node!\n");
+ goto planar_node_failed;
+ }
+ }
+
+ /* Set private in segments for IRQ */
+ avi_m2m->in_seg->priv = avi_m2m;
+ avi_m2m->out_seg->priv = avi_m2m;
+
+ /* Request segments' IRQ */
+ avi_segment_register_irq(avi_m2m->in_seg, &avi_m2m_interrupt);
+ avi_segment_register_irq(avi_m2m->out_seg, &avi_m2m_aux_interrupt);
+
+ /* Create timer for timeout handling */
+ init_timer(&avi_m2m->timer);
+ setup_timer(&avi_m2m->timer, avi_m2m_timeout, (long unsigned) avi_m2m);
+
+ /* Enable segment IRQ (DMA_OUT) */
+ avi_segment_enable_irq(avi_m2m->out_seg);
+
+ return 0;
+
+planar_node_failed:
+out_node_failed:
+in_node_failed:
+ avi_segment_disconnect(avi_m2m->in_seg, avi_m2m->out_seg);
+connect_failed:
+isp_node_failed:
+stats_node_failed:
+stats_seg_failed:
+ if (avi_m2m->stats_seg)
+ avi_segment_teardown(avi_m2m->stats_seg);
+ avi_segment_teardown(avi_m2m->in_seg);
+bayer_caps_failed:
+in_seg_failed:
+ avi_segment_teardown(avi_m2m->out_seg);
+out_seg_failed:
+ return ret;
+}
+
+static int avi_m2m_activate_oneshot(struct avi_m2m_dev *avi_m2m)
+{
+ int ret;
+
+ /* Setup buffers */
+ avi_segment_set_input_buffer(avi_m2m->in_seg, &avi_m2m->src_buf);
+ avi_segment_set_output_buffer(avi_m2m->out_seg, &avi_m2m->dst_buf);
+
+ /* Setup stats buffer and activate its segment */
+ if (avi_m2m->stats_seg && avi_m2m->stats_buf.plane0.dma_addr) {
+ /* Setup stats buffer */
+ avi_segment_set_output_buffer(avi_m2m->stats_seg,
+ &avi_m2m->stats_buf);
+
+ /* Activate stats segment */
+ ret = avi_segment_activate_oneshot(avi_m2m->stats_seg);
+ if (ret) {
+ dev_err(avi_m2m->dev,
+ "Failed to activate stats segment!\n");
+ goto activate_stats_failed;
+ }
+ }
+
+ /* Activate input segment */
+ ret = avi_segment_activate_oneshot(avi_m2m->in_seg);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to activate input segment!\n");
+ goto activate_input_failed;
+ }
+
+ /* Activate output segment */
+ ret = avi_segment_activate_oneshot(avi_m2m->out_seg);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to activate output segment!\n");
+ goto activate_output_failed;
+ }
+
+ /* Segment are activatded */
+ atomic_set(&avi_m2m->activated, 1);
+
+ /* Setup timer for capture timeout (1s) */
+ mod_timer(&avi_m2m->timer, jiffies + HZ);
+
+ return 0;
+
+activate_output_failed:
+ avi_segment_deactivate(avi_m2m->in_seg);
+activate_input_failed:
+ if (avi_m2m->stats_seg && avi_m2m->stats_buf.plane0.dma_addr)
+ avi_segment_deactivate(avi_m2m->stats_seg);
+activate_stats_failed:
+ return ret;
+}
+
+static int avi_m2m_deactivate_segments(struct avi_m2m_dev *avi_m2m)
+{
+ /* Lock segment access */
+ spin_lock(&avi_m2m->lock);
+
+ /* Segment are deactivated */
+ atomic_set(&avi_m2m->activated, 0);
+
+ /* Destroy stats */
+ if (avi_m2m->stats_seg) {
+ /* Deactivate and teardown */
+ avi_segment_deactivate(avi_m2m->stats_seg);
+
+ /* Force fifo clear FIFO node of stats segment */
+ avi_fifo_force_clear(avi_m2m->stats_fifo_node);
+ avi_apply_node(avi_m2m->stats_fifo_node);
+ }
+
+ /* Deactivate segments */
+ avi_segment_deactivate(avi_m2m->in_seg);
+ avi_segment_deactivate(avi_m2m->out_seg);
+
+ /* Recover in case of freeze */
+ avi_fifo_force_clear(avi_m2m->out_fifo_node);
+
+ /* Unlock segment access */
+ spin_unlock(&avi_m2m->lock);
+
+ return 0;
+}
+
+static int avi_m2m_destroy_segments(struct avi_m2m_dev *avi_m2m)
+{
+ /* Disable segment IRQ (DMA_OUT) */
+ avi_segment_disable_irq(avi_m2m->out_seg);
+
+ /* Remove timer */
+ del_timer(&avi_m2m->timer);
+
+ /* Destroy stats */
+ if (avi_m2m->stats_seg)
+ avi_segment_teardown(avi_m2m->stats_seg);
+
+ /* Disconnect segments */
+ avi_segment_disconnect(avi_m2m->in_seg, avi_m2m->out_seg);
+
+ /* Destroy segments */
+ avi_segment_teardown(avi_m2m->in_seg);
+ avi_segment_teardown(avi_m2m->out_seg);
+
+ /* Reset segments */
+ avi_m2m->in_seg = NULL;
+ avi_m2m->out_seg = NULL;
+ avi_m2m->stats_seg = NULL;
+
+ return 0;
+}
+
+static int avi_m2m_check_buffer(struct avi_m2m_dev *avi_m2m,
+ struct avi_m2m_format *fmt,
+ struct avi_dma_buffer *buf)
+{
+ /* Check buffer status */
+ if (buf->status != AVI_BUFFER_READY) {
+ dev_err(avi_m2m->dev, "Buffer not ready!\n");
+ return -EINVAL;
+ }
+
+ /* Check buffer DMA address */
+ if (buf->plane0.dma_addr == 0) {
+ dev_err(avi_m2m->dev, "Buffer plane #0 address is zero!\n");
+ return -EINVAL;
+ }
+
+ /* Check buffer DMA address for second plane (multi-planar mode) */
+ if (fmt && fmt->avi_fmt.plane1.line_size != 0 &&
+ buf->plane1.dma_addr == 0) {
+ dev_err(avi_m2m->dev, "Buffer plane #1 address is zero!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Adjust rectangle into image */
+static void avi_m2m_adjust_strip_into_image(struct v4l2_rect *rect,
+ struct v4l2_pix_format *fmt)
+{
+ struct avi_dma_pixfmt pix_fmt =
+ avi_v4l2_to_avi_pixfmt(fmt->pixelformat);
+
+ avi_limit_adjust_w_strip(pix_fmt, fmt->width,
+ &rect->width, &rect->left);
+ avi_limit_adjust_h_strip(pix_fmt, fmt->height,
+ &rect->height, &rect->top);
+}
+/* Adjust rectangle into bounds: rectangle is truncated into bounds */
+static void avi_m2m_adjust_crop_into_bounds(struct v4l2_rect *rect,
+ const struct v4l2_rect *bounds)
+{
+ u32 val, limit;
+
+ /* Left side */
+ if (rect->left < bounds->left)
+ rect->left = bounds->left;
+
+ /* Top side */
+ if (rect->top < bounds->top)
+ rect->top = bounds->top;
+
+ /* Right side */
+ limit = bounds->left + bounds->width - 1;
+ val = rect->left + rect->width - 1;
+ if (val > limit)
+ rect->width = val - limit;
+
+ /* Bottom side */
+ limit = bounds->top + bounds->height - 1;
+ val = rect->top + rect->height - 1;
+ if (val > limit)
+ rect->height = val - limit;
+}
+
+static void avi_m2m_setup_avi_dma_buffer(struct vb2_buffer *vb,
+ struct avi_m2m_format *fmt,
+ struct avi_dma_buffer *buf)
+{
+ dma_addr_t dma_addr;
+
+ /* Get DMA address from vb2 buffer */
+ dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ /* Setup AVI DMA buffer */
+ avi_v4l2_setup_dmabuf(&fmt->rect, &fmt->avi_fmt, fmt->plane0_size,
+ fmt->plane1_size, dma_addr, vb, buf);
+}
+
+static inline void avi_m2m_to_avi_segment_format(struct avi_m2m_format *fmt)
+{
+ avi_v4l2_to_segment_fmt(&fmt->v4l2_fmt, &fmt->rect, &fmt->avi_fmt,
+ &fmt->plane0_size, &fmt->plane1_size);
+
+ /* We support only progressive mode */
+ fmt->avi_fmt.interlaced = 0;
+}
+
+static int avi_m2m_check_format(struct avi_m2m_dev *avi_m2m,
+ struct avi_segment_format *fmt)
+{
+ enum avi_pixel_packing packing;
+ int w;
+
+ /* Check resolution */
+ if (fmt->width == 0 || fmt->height == 0) {
+ dev_err(avi_m2m->dev, "Bad resolution!\n");
+ return -EINVAL;
+ }
+
+ /* Get packing */
+ packing = avi_pixfmt_get_packing(fmt->pix_fmt);
+
+ /* Width must be even to match the subsampling */
+ if ((packing == AVI_SEMIPLANAR_YUV_422_PACKING ||
+ packing == AVI_SEMIPLANAR_YUV_420_PACKING ||
+ packing == AVI_INTERLEAVED_YUV_422_PACKING) &&
+ fmt->width % 2) {
+ dev_err(avi_m2m->dev, "Width is not event (%d)!\n", fmt->width);
+ return -EINVAL;
+ }
+
+ /* Height must be even to match the subsampling */
+ if (packing == AVI_SEMIPLANAR_YUV_420_PACKING &&
+ fmt->height % 2) {
+ dev_err(avi_m2m->dev, "Hidth is not event (%d)!\n",
+ fmt->height);
+ return -EINVAL;
+ }
+
+ /* Check line size */
+ w = fmt->width * avi_pixel_size0(fmt->pix_fmt);
+ if (fmt->plane0.line_size == 0 || w > fmt->plane0.line_size) {
+ dev_err(avi_m2m->dev, "Bad line size for plane #0 (%d / %d)!\n",
+ fmt->plane0.line_size, w);
+ return -EINVAL;
+ }
+
+ /* Check line size in mutli planar */
+ if (avi_pixel_size1(fmt->pix_fmt)) {
+ w = fmt->width * avi_pixel_size1(fmt->pix_fmt);
+ if (fmt->plane1.line_size == 0 || w > fmt->plane1.line_size) {
+ dev_err(avi_m2m->dev,
+ "Bad line size for plane #1 (%d / %d)!\n",
+ fmt->plane1.line_size, w);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int avi_m2m_apply_format(struct avi_m2m_dev *avi_m2m)
+{
+ struct avi_segment_format *src_fmt = &avi_m2m->src_format.avi_fmt;
+ struct avi_segment_format *dst_fmt = &avi_m2m->dst_format.avi_fmt;
+ struct avi_segment_format left_fmt, right_fmt;
+ struct avi_segment_layout compose_layout;
+ struct avi_segment_format compose_fmt;
+ int ret;
+
+ /* Prepare compose format */
+ if (avi_m2m->use_compose) {
+ compose_fmt.width = avi_m2m->compose_rect.width;
+ compose_fmt.height = avi_m2m->compose_rect.height;
+ compose_fmt.colorspace = dst_fmt->colorspace;
+ compose_layout.x = avi_m2m->compose_rect.left;
+ compose_layout.y = avi_m2m->compose_rect.top;
+ compose_layout.alpha = avi_m2m->compose_alpha;
+ compose_layout.hidden = 0;
+ } else {
+ compose_fmt = *dst_fmt;
+ compose_layout.x = 0;
+ compose_layout.y = 0;
+ compose_layout.alpha = 0xFF;
+ compose_layout.hidden = 0;
+ }
+
+ /* Force non-DMA and progressive mode for compose */
+ compose_fmt.pix_fmt = AVI_PIXFMT_INVALID;
+ compose_fmt.plane0.line_size = 0;
+ compose_fmt.plane1.line_size = 0;
+ compose_fmt.interlaced = 0;
+
+ /* Check formats */
+ if (avi_m2m_check_format(avi_m2m, src_fmt) ||
+ avi_m2m_check_format(avi_m2m, dst_fmt))
+ return -EINVAL;
+
+ /* Check if scaler is available */
+ if ((src_fmt->width != dst_fmt->width ||
+ src_fmt->height != dst_fmt->height) &&
+ !(avi_m2m->current_caps & AVI_CAP_SCAL))
+ return -EINVAL;
+
+ /* Determine formats between input and output segments
+ * Composing: left = compose, right = target
+ * +--------+ +------+-------+ +--------+
+ * | source |-- INPUT --> | left | right |--> OUTPUT --> | target |
+ * +--------+ +------+-------+ +--------+
+ * Not composing: left = target, right = target
+ */
+ right_fmt = *dst_fmt;
+ left_fmt = avi_m2m->use_compose ? compose_fmt : right_fmt;
+
+ /* Force non-DMA and progressive mode for compose */
+ right_fmt.pix_fmt = AVI_PIXFMT_INVALID;
+ left_fmt.pix_fmt = AVI_PIXFMT_INVALID;
+ right_fmt.plane0.line_size = 0;
+ left_fmt.plane0.line_size = 0;
+ right_fmt.plane1.line_size = 0;
+ left_fmt.plane1.line_size = 0;
+
+#define CHECK_FMTS(a, b) memcmp(a, b, sizeof(struct avi_segment_format))
+#define CHECK_LAYS(a, b) memcmp(a, b, sizeof(struct avi_segment_layout))
+
+ /* Update output segment format */
+ if ((CHECK_FMTS(&avi_m2m->out_seg->input_format, &right_fmt) != 0) ||
+ (CHECK_FMTS(&avi_m2m->out_seg->output_format, dst_fmt) != 0) ) {
+ ret = avi_segment_set_format(avi_m2m->out_seg, &right_fmt,
+ dst_fmt);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to set output format!\n");
+ return ret;
+ }
+ }
+
+ /* Update input segment */
+ if ((CHECK_FMTS(&avi_m2m->in_seg->input_format, src_fmt) != 0) ||
+ (CHECK_FMTS(&avi_m2m->in_seg->output_format, &left_fmt) != 0) ||
+ (CHECK_LAYS(&avi_m2m->in_seg->layout, &compose_layout) != 0)) {
+ ret = avi_segment_set_format_and_layout(avi_m2m->in_seg,
+ src_fmt, &left_fmt,
+ &compose_layout);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to set input format!\n");
+ return ret;
+ }
+
+ /* Update stats format in any case */
+ if (avi_m2m->stats_seg && avi_m2m->use_stats) {
+ ret = avi_stats_apply_format(&avi_m2m->stats,
+ avi_m2m->isp_node,
+ avi_m2m->stats_seg,
+ src_fmt, 1);
+ if (ret) {
+ dev_err(avi_m2m->dev,
+ "Failed to set stats format!\n");
+ return ret;
+ }
+ }
+ }
+
+#undef CHECK_LAYS
+#undef CHECK_FMTS
+
+ /* Update stats format */
+ if (avi_m2m->stats_seg && avi_m2m->use_stats) {
+ ret = avi_stats_apply_format(&avi_m2m->stats, avi_m2m->isp_node,
+ avi_m2m->stats_seg, src_fmt, 0);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to set stats format!\n");
+ return ret;
+ }
+ }
+
+ /* Set background when composing */
+ if (avi_m2m->use_compose) {
+ avi_segment_set_background(avi_m2m->out_seg, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * MEM2MEM callbacks
+ */
+static void avi_m2m_device_run(void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = priv;
+ struct vb2_buffer *src_vb, *dst_vb;
+ int ret;
+
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ /* Do blanking ISP task */
+ avi_v4l2_isp_blanking(avi_m2m->ctrl_isp);
+#endif
+
+ /* Update format and AVI configuration */
+ if (avi_m2m->configuration_changed) {
+ /* Update AVI format from V4L2 format */
+ avi_m2m_to_avi_segment_format(&avi_m2m->src_format);
+ avi_m2m_to_avi_segment_format(&avi_m2m->dst_format);
+ avi_m2m->configuration_changed = 0;
+ }
+
+ /* Get next input buffer */
+ src_vb = v4l2_m2m_next_src_buf(avi_m2m->m2m_ctx);
+ avi_m2m_setup_avi_dma_buffer(src_vb, &avi_m2m->src_format,
+ &avi_m2m->src_buf);
+
+ /* Get next output buffer */
+ dst_vb = v4l2_m2m_next_dst_buf(avi_m2m->m2m_ctx);
+ avi_m2m_setup_avi_dma_buffer(dst_vb, &avi_m2m->dst_format,
+ &avi_m2m->dst_buf);
+
+ /* Get next stats buffer */
+ avi_m2m->stats_buf.plane0.dma_addr = 0;
+ if (avi_m2m->stats_enabled && avi_m2m->use_stats)
+ avi_stats_next(&avi_m2m->stats, &avi_m2m->stats_buf);
+
+ /* Check DMA buffers */
+ if (avi_m2m_check_buffer(avi_m2m, &avi_m2m->src_format,
+ &avi_m2m->src_buf) ||
+ avi_m2m_check_buffer(avi_m2m, &avi_m2m->dst_format,
+ &avi_m2m->dst_buf))
+ goto error;
+
+ /* Check stats DMA buffer */
+ if (avi_m2m->stats_buf.plane0.dma_addr != 0 &&
+ avi_m2m_check_buffer(avi_m2m, NULL, &avi_m2m->stats_buf)) {
+ dev_err(avi_m2m->dev, "Bad stats buffer!\n");
+ goto error;
+ }
+
+ /* Update V4L2 buffer fields */
+ avi_m2m->sequence++;
+ src_vb->v4l2_buf.sequence = avi_m2m->sequence;
+ v4l2_get_timestamp(&src_vb->v4l2_buf.timestamp);
+
+ /* Apply format */
+ ret = avi_m2m_apply_format(avi_m2m);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to apply format!\n");
+ goto error;
+ }
+
+ /* Activate segments (One shot mode) */
+ ret = avi_m2m_activate_oneshot(avi_m2m);
+ if (ret) {
+ dev_err(avi_m2m->dev, "Failed to activate segments!\n");
+ goto error;
+ }
+
+ return;
+
+error:
+ /* Get current buffers from MEM2MEM context */
+ src_vb = v4l2_m2m_src_buf_remove(avi_m2m->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(avi_m2m->m2m_ctx);
+
+ /* Failed to get buffer */
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+
+ /* Failed to get stats buffer */
+ avi_m2m->stats_buf.status = AVI_BUFFER_ERROR;
+ if (avi_m2m->stats_enabled && avi_m2m->stats_buf.plane0.dma_addr)
+ avi_stats_done(&avi_m2m->stats, &avi_m2m->stats_buf,
+ src_vb->v4l2_buf.sequence, 1);
+ /* Job is finished */
+ v4l2_m2m_job_finish(avi_m2m->m2m_dev, avi_m2m->m2m_ctx);
+}
+
+static int avi_m2m_job_ready(void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = priv;
+
+ /* Check if input and output buffers are available */
+ if (v4l2_m2m_num_src_bufs_ready(avi_m2m->m2m_ctx) &&
+ v4l2_m2m_num_dst_bufs_ready(avi_m2m->m2m_ctx))
+ return 1;
+
+ return 0;
+}
+
+static void avi_m2m_job_abort(void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = priv;
+ v4l2_m2m_job_finish(avi_m2m->m2m_dev, avi_m2m->m2m_ctx);
+}
+
+static void avi_m2m_lock(void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = priv;
+ mutex_lock(&avi_m2m->mutex);
+}
+
+static void avi_m2m_unlock(void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = priv;
+ mutex_unlock(&avi_m2m->mutex);
+}
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = avi_m2m_device_run,
+ .job_ready = avi_m2m_job_ready,
+ .job_abort = avi_m2m_job_abort,
+ .lock = avi_m2m_lock,
+ .unlock = avi_m2m_unlock,
+};
+
+/*
+ * video ioctls
+ */
+static int avi_m2m_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ /* Set string infos */
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, AVI_V4L2_BUS_VERSION, sizeof(cap->bus_info));
+ snprintf(cap->card, sizeof(cap->card) - 1, "avi-m2m");
+
+ /* Set capabilites */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ /* Reset reserved part */
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+
+ return 0;
+}
+
+/* Formats ops */
+static int avi_m2m_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(f->type))
+ return -EINVAL;
+
+ return avi_v4l2_enum_fmt(f);
+}
+
+static int avi_m2m_try_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret = 0;
+
+ /* MEM2MEM works only in progressive */
+ pix->field = V4L2_FIELD_NONE;
+
+ /* Check if AVI support the format */
+ ret = avi_v4l2_try_fmt(f->type, pix);
+ if (ret < 0)
+ {
+ /* Change with default format */
+ pix->pixelformat = AVI_M2M_DEFAULT_PIX_FMT;
+ pix->colorspace = AVI_M2M_DEFAULT_COLORSPACE;
+ ret = avi_v4l2_try_fmt(f->type, pix);
+ }
+
+ return ret;
+}
+
+static int avi_m2m_g_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(f->type))
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Get V4L2 format */
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ f->fmt.pix = avi_m2m->src_format.v4l2_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ f->fmt.pix = avi_m2m->dst_format.v4l2_fmt;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Unlock format access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return 0;
+}
+
+static int avi_m2m_s_fmt_vid_unlocked(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ struct avi_m2m_format *format;
+ int ret = 0;
+
+ /* Do not change format during streaming */
+ if (avi_m2m_is_streaming(avi_m2m, f->type))
+ return -EBUSY;
+
+ /**
+ * V4L2 requirement says "[colorspace] must be set
+ * by the driver for capture streams and by the application
+ * for output streams". Thus for our m2m driver, we will
+ * force the CAPTURE colorspace same as OUTPUT.
+ *
+ * <<<Warning>>> wierd things can occurs, if pixel format differ
+ * between OUTPUT and CAPTURE: i.e. OUPUT=YUV and CAPTURE=RGB,
+ * colorspace is different...
+ */
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ f->fmt.pix.colorspace == AVI_NULL_CSPACE)
+ f->fmt.pix.colorspace = avi_m2m->src_format.v4l2_fmt.colorspace;
+
+ /* Try format before setting */
+ ret = avi_m2m_try_fmt_vid(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ /* Get format to set from type */
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ format = &avi_m2m->src_format;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ format = &avi_m2m->dst_format;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Update format */
+ format->v4l2_fmt = f->fmt.pix;
+ format->rect.left = 0;
+ format->rect.top = 0;
+ format->rect.width = f->fmt.pix.width;
+ format->rect.height = f->fmt.pix.height;
+ format->bounds = format->rect;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ avi_m2m->compose_rect = format->rect;
+
+ /* Get AVI segment format */
+ avi_m2m_to_avi_segment_format(format);
+
+ /* Format has changed */
+ set_bit(f->type, &avi_m2m->configuration_changed);
+
+ return 0;
+}
+
+static int avi_m2m_s_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(f->type))
+ return -EINVAL;
+
+ /* Set format */
+ mutex_lock(&avi_m2m->mutex);
+ ret = avi_m2m_s_fmt_vid_unlocked(file, priv, f);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+/* Buffer ops */
+static int avi_m2m_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(create->format.type))
+ return -EINVAL;
+
+ /* Create buffer */
+ mutex_lock(&avi_m2m->mutex);
+ vq = v4l2_m2m_get_vq(avi_m2m->m2m_ctx, create->format.type);
+ ret = vb2_create_bufs(vq, create);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(buf->type))
+ return -EINVAL;
+
+ /* Prepare buffer */
+ mutex_lock(&avi_m2m->mutex);
+ vq = v4l2_m2m_get_vq(avi_m2m->m2m_ctx, buf->type);
+ ret = vb2_prepare_buf(vq, buf);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(reqbufs->type))
+ return -EINVAL;
+
+ /* Request buffer */
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_reqbufs(file, avi_m2m->m2m_ctx, reqbufs);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(buf->type))
+ return -EINVAL;
+
+ /* Query buffer */
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_querybuf(file, avi_m2m->m2m_ctx, buf);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(buf->type))
+ return -EINVAL;
+
+ /* Queue buffer */
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_qbuf(file, avi_m2m->m2m_ctx, buf);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(buf->type))
+ return -EINVAL;
+
+ /* Dequeue buffer */
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_dqbuf(file, avi_m2m->m2m_ctx, buf);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *ebuf)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ /* Check buffer type */
+ if (!AVI_M2M_CHECK_TYPE(ebuf->type))
+ return -EINVAL;
+
+ /* Export buffer */
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_expbuf(file, avi_m2m->m2m_ctx, ebuf);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+/* Streaming ops */
+static int avi_m2m_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ struct avi_isp_offsets offsets;
+#endif
+ int ret = 0;
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(type))
+ return -EINVAL;
+
+ /* Lock MEM2MEM context access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Already streaming */
+ if (avi_m2m_is_streaming(avi_m2m, type)) {
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+ return -EBUSY;
+ }
+
+ /* Start capturing */
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ /* Build segments */
+ ret = avi_m2m_build_segments(avi_m2m);
+ if (ret)
+ goto build_failed;
+
+ /* Reload configuration for first frame */
+ set_bit(type, &avi_m2m->configuration_changed);
+
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ /* Add ISP controls */
+ if (avi_m2m->current_caps & AVI_CAPS_ISP) {
+ /* Get ISP offsets */
+ ret = avi_isp_get_offsets(avi_m2m->in_seg, &offsets);
+ if (ret)
+ goto isp_failed;
+
+ /* Activate ISP controls */
+ avi_v4l2_isp_activate(avi_m2m->ctrl_isp, &offsets);
+ avi_v4l2_isp_blanking(avi_m2m->ctrl_isp);
+ }
+ #endif
+ }
+
+ /* Stream on */
+ ret = v4l2_m2m_streamon(file, avi_m2m->m2m_ctx, type);
+ if (ret)
+ goto streamon_failed;
+
+ /* Set streaming bit */
+ avi_m2m_set_streaming(avi_m2m, type, 1);
+
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return 0;
+
+streamon_failed:
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ if (avi_m2m->current_caps & AVI_CAPS_ISP)
+ avi_v4l2_isp_deactivate(avi_m2m->ctrl_isp);
+isp_failed:
+#endif
+ avi_m2m_destroy_segments(avi_m2m);
+build_failed:
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret = 0;
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(type))
+ return -EINVAL;
+
+ /* Lock MEM2MEM context access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Stream off */
+ if (avi_m2m_is_streaming(avi_m2m, type)) {
+ /* Stream off MEM2MEM context */
+ v4l2_m2m_streamoff(file, avi_m2m->m2m_ctx, type);
+
+ /* Stop capturing */
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ /* Deactivate ISP chains controls */
+ if (avi_m2m->current_caps & AVI_CAPS_ISP) {
+ avi_v4l2_isp_deactivate(avi_m2m->ctrl_isp);
+ }
+#endif
+
+ /* Force deactivation of segments */
+ avi_m2m_deactivate_segments(avi_m2m);
+
+ /* Destroy segments */
+ avi_m2m_destroy_segments(avi_m2m);
+ }
+
+ /* Clear streaming bit */
+ avi_m2m_set_streaming(avi_m2m, type, 0);
+ }
+
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+/* Selection ops */
+static int avi_m2m_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret = 0;
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(s->type))
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Get selection */
+ switch (s->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ switch(s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r = avi_m2m->src_format.rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r = avi_m2m->src_format.bounds;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch(s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r = avi_m2m->dst_format.rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r = avi_m2m->compose_rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r = avi_m2m->dst_format.bounds;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ /* Unlock format access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ struct avi_m2m_format *fmt;
+ int ret = 0;
+
+ /* Check type */
+ if (!AVI_M2M_CHECK_TYPE(s->type))
+ return -EINVAL;
+
+ /* Only CROP and COMPOSE are supported */
+ if (s->target != V4L2_SEL_TGT_CROP && s->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Set selection */
+ switch (s->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ fmt = &avi_m2m->src_format;
+ /* Adjust crop rectangle into bounds rectangle */
+ avi_m2m_adjust_crop_into_bounds(&s->r, &fmt->bounds);
+
+ /* Adjust strip into image */
+ avi_m2m_adjust_strip_into_image(&s->r, &fmt->v4l2_fmt);
+
+ /* Update crop */
+ fmt->rect = s->r;
+ set_bit(s->type, &avi_m2m->configuration_changed);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ fmt = &avi_m2m->dst_format;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Adjust crop rectangle into bounds rectangle */
+ avi_m2m_adjust_crop_into_bounds(&s->r, &fmt->bounds);
+
+ /* Adjust strip into image */
+ avi_m2m_adjust_strip_into_image(&s->r, &fmt->v4l2_fmt);
+
+ /* Update crop and compose rect */
+ fmt->rect = s->r;
+ avi_m2m->compose_rect = s->r;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /* Adjust compose rectangle into crop rectangle */
+ avi_m2m_adjust_crop_into_bounds(&s->r, &fmt->rect);
+
+ /* Update compose rect */
+ avi_m2m->compose_rect = s->r;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Use compose if compose rect is bigger than crop */
+ avi_m2m->use_compose =
+ ((avi_m2m->compose_rect.left > fmt->rect.left) ||
+ (avi_m2m->compose_rect.top > fmt->rect.top) ||
+ (avi_m2m->compose_rect.left + avi_m2m->compose_rect.width) <
+ (fmt->rect.left + fmt->rect.width) ||
+ (avi_m2m->compose_rect.top + avi_m2m->compose_rect.height) <
+ (fmt->rect.top + fmt->rect.height));
+
+ /* Configuration has changed */
+ set_bit(s->type, &avi_m2m->configuration_changed);
+ break;
+ default:
+ BUG();
+ }
+
+error:
+ /* Unlock format access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+/* Deprecated ops */
+static long avi_m2m_default(struct file *file, void *priv, bool valid_prio,
+ int cmd, void *arg)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret = -ENOIOCTLCMD;
+
+ /* Lock access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Check command */
+ switch (cmd) {
+ case AVI_ISP_IOGET_OFFSETS:
+ /* ISP is not available in caps */
+ if ((avi_m2m->caps & AVI_CAPS_ISP) != AVI_CAPS_ISP) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Not streaming */
+ if (!avi_m2m->in_seg ||
+ !avi_m2m_is_streaming(avi_m2m,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Get ISP offsets from AVI segment */
+ ret = avi_isp_get_offsets(avi_m2m->in_seg,
+ (struct avi_isp_offsets *) arg);
+ break;
+ default:
+ v4l2_err(avi_m2m->v4l2_dev, "Not supported command!\n");
+ }
+
+unlock:
+ /* Unlock access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_log_status(struct file *file, void *priv)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+
+ /* Deprected IOCTL */
+ dev_info(avi_m2m->dev, "/!\\ DEPRECATED IOCTL!\n");
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops avi_m2m_ioctl_ops = {
+ /* Caps */
+ .vidioc_querycap = avi_m2m_querycap,
+
+ /* Source format (input) */
+ .vidioc_enum_fmt_vid_out = avi_m2m_enum_fmt_vid,
+ .vidioc_g_fmt_vid_out = avi_m2m_g_fmt_vid,
+ .vidioc_try_fmt_vid_out = avi_m2m_try_fmt_vid,
+ .vidioc_s_fmt_vid_out = avi_m2m_s_fmt_vid,
+
+ /* Target format (output) */
+ .vidioc_enum_fmt_vid_cap = avi_m2m_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = avi_m2m_g_fmt_vid,
+ .vidioc_try_fmt_vid_cap = avi_m2m_try_fmt_vid,
+ .vidioc_s_fmt_vid_cap = avi_m2m_s_fmt_vid,
+
+ /* Buffers */
+ .vidioc_create_bufs = &avi_m2m_create_bufs,
+ .vidioc_prepare_buf = &avi_m2m_prepare_buf,
+ .vidioc_reqbufs = avi_m2m_reqbufs,
+ .vidioc_querybuf = avi_m2m_querybuf,
+ .vidioc_qbuf = avi_m2m_qbuf,
+ .vidioc_dqbuf = avi_m2m_dqbuf,
+ .vidioc_expbuf = avi_m2m_expbuf,
+
+ /* Streaming */
+ .vidioc_streamon = avi_m2m_streamon,
+ .vidioc_streamoff = avi_m2m_streamoff,
+
+ /* Selection (+ crop) */
+ .vidioc_g_selection = avi_m2m_g_selection,
+ .vidioc_s_selection = avi_m2m_s_selection,
+
+ /* Used for getting ISP offset
+ * /!\ Deprecated: use ISP V4L2 controls
+ */
+ .vidioc_default = avi_m2m_default,
+
+ /* [DEPRECATED] Log status */
+ .vidioc_log_status = avi_m2m_log_status,
+};
+
+/*
+ * Queue operations
+ */
+static int avi_m2m_vb2_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ void *alloc_ctxs[])
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vq);
+
+ /* Setup queue with correct planes (assume only one plane) */
+ *nplanes = 1;
+ switch (vq->type)
+ {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ sizes[0] = avi_m2m->src_format.v4l2_fmt.sizeimage;
+ alloc_ctxs[0] = avi_m2m->alloc_ctx;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ sizes[0] = avi_m2m->dst_format.v4l2_fmt.sizeimage;
+ alloc_ctxs[0] = avi_m2m->alloc_ctx;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int avi_m2m_vb2_buffer_init(struct vb2_buffer *vb)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Allow GPU to access this memory through UMP */
+ switch (vb->v4l2_buf.type)
+ {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ p7ump_add_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avi_m2m->src_format.v4l2_fmt.sizeimage));
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ p7ump_add_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avi_m2m->dst_format.v4l2_fmt.sizeimage));
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int avi_m2m_vb2_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Prepate buffer with correct size */
+ switch (vb->v4l2_buf.type)
+ {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ vb2_set_plane_payload(vb, 0,
+ avi_m2m->src_format.v4l2_fmt.sizeimage);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ vb2_set_plane_payload(vb, 0,
+ avi_m2m->dst_format.v4l2_fmt.sizeimage);
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static void avi_m2m_vb2_buffer_queue(struct vb2_buffer *vb)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(avi_m2m->m2m_ctx, vb);
+}
+
+static void avi_m2m_vb2_buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* No buffer to remove */
+ if (!vb->planes[0].mem_priv)
+ return;
+
+ /* Remove GPU access to this memory through UMP */
+ switch (vb->v4l2_buf.type)
+ {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ p7ump_remove_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avi_m2m->src_format.v4l2_fmt.sizeimage));
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ p7ump_remove_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avi_m2m->dst_format.v4l2_fmt.sizeimage));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void avi_m2m_vb2_wait_prepare(struct vb2_queue *vq)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vq);
+ mutex_unlock(&avi_m2m->mutex);
+}
+
+static void avi_m2m_vb2_wait_finish(struct vb2_queue *vq)
+{
+ struct avi_m2m_dev *avi_m2m = vb2_get_drv_priv(vq);
+ mutex_lock(&avi_m2m->mutex);
+}
+
+static struct vb2_ops avi_m2m_video_qops = {
+ .queue_setup = avi_m2m_vb2_queue_setup,
+ .buf_init = avi_m2m_vb2_buffer_init,
+ .buf_prepare = avi_m2m_vb2_buffer_prepare,
+ .buf_queue = avi_m2m_vb2_buffer_queue,
+ .buf_cleanup = avi_m2m_vb2_buffer_cleanup,
+ .wait_prepare = avi_m2m_vb2_wait_prepare,
+ .wait_finish = avi_m2m_vb2_wait_finish,
+};
+
+/*
+ * Queue init operations
+ */
+static inline int avi_m2m_vb2_queue_init(struct vb2_queue *vq,
+ enum v4l2_buf_type type,
+ void *priv)
+{
+ memset(vq, 0, sizeof(*vq));
+ vq->type = type;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ vq->drv_priv = priv;
+ vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ vq->ops = &avi_m2m_video_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(vq);
+}
+
+static int avi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ /* Init input vb2 queue */
+ ret = avi_m2m_vb2_queue_init(src_vq, V4L2_BUF_TYPE_VIDEO_OUTPUT, priv);
+ if (ret)
+ return ret;
+
+ /* Init output vb2 queue */
+ return avi_m2m_vb2_queue_init(dst_vq, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ priv);
+}
+
+/*
+ * File operations
+ */
+static int avi_m2m_open(struct file *file)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+
+ /* Lock MEM2MEM context access */
+ mutex_lock(&avi_m2m->mutex);
+
+ if (!avi_m2m->m2m_ctx) {
+ /* Init MEM2MEM context (shared by all instances) */
+ avi_m2m->m2m_ctx = v4l2_m2m_ctx_init(avi_m2m->m2m_dev, avi_m2m,
+ &avi_m2m_queue_init);
+ if (IS_ERR(avi_m2m->m2m_ctx)) {
+ /* Unlock and exit */
+ mutex_unlock(&avi_m2m->mutex);
+ return PTR_ERR(avi_m2m->m2m_ctx);
+ }
+
+ /* Init formats with default values */
+ avi_m2m->src_format = avi_m2m_default_format;
+ avi_m2m->dst_format = avi_m2m_default_format;
+
+ /* Init composition to default values */
+ avi_m2m->compose_rect = avi_m2m_default_format.rect;
+ avi_m2m->compose_alpha = AVI_M2M_DEFAULT_ALPHA;
+ avi_m2m->use_compose = 0;
+
+ /* Get AVI segment formats */
+ avi_m2m_to_avi_segment_format(&avi_m2m->src_format);
+ avi_m2m_to_avi_segment_format(&avi_m2m->dst_format);
+
+ /* Set bytes per line */
+ avi_m2m->src_format.v4l2_fmt.bytesperline = max(
+ avi_m2m->src_format.avi_fmt.plane0.line_size,
+ avi_m2m->src_format.avi_fmt.plane1.line_size);
+ avi_m2m->dst_format.v4l2_fmt.bytesperline = max(
+ avi_m2m->dst_format.avi_fmt.plane0.line_size,
+ avi_m2m->dst_format.avi_fmt.plane1.line_size);
+
+ /* Set size image */
+ avi_m2m->src_format.v4l2_fmt.sizeimage =
+ avi_m2m->src_format.plane0_size +
+ avi_m2m->src_format.plane1_size;
+ avi_m2m->dst_format.v4l2_fmt.sizeimage =
+ avi_m2m->dst_format.plane0_size +
+ avi_m2m->dst_format.plane1_size;
+
+ /* Force configuration for first time */
+ set_bit(V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ &avi_m2m->configuration_changed);
+ set_bit(V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ &avi_m2m->configuration_changed);
+ }
+
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+
+ /* Increment instance number */
+ atomic_inc(&avi_m2m->num_inst);
+
+ return 0;
+}
+
+static int avi_m2m_release(struct file *file)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+
+ /* Decrement instance number and check instance number */
+ if (atomic_dec_and_test(&avi_m2m->num_inst)) {
+
+ /* Force streamoff */
+ avi_m2m_streamoff(file, avi_m2m, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ avi_m2m_streamoff(file, avi_m2m, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ /* Lock MEM2MEM context access */
+ mutex_lock(&avi_m2m->mutex);
+
+ /* Release MEM2MEM context */
+ if (avi_m2m->m2m_ctx)
+ v4l2_m2m_ctx_release(avi_m2m->m2m_ctx);
+ avi_m2m->m2m_ctx = NULL;
+
+ /* Unlock MEM2MEM context access */
+ mutex_unlock(&avi_m2m->mutex);
+ }
+
+ return 0;
+}
+
+static unsigned int avi_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_poll(file, avi_m2m->m2m_ctx, wait);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static int avi_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct avi_m2m_dev *avi_m2m = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&avi_m2m->mutex);
+ ret = v4l2_m2m_mmap(file, avi_m2m->m2m_ctx, vma);
+ mutex_unlock(&avi_m2m->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations avi_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = avi_m2m_open,
+ .release = avi_m2m_release,
+ .poll = avi_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = avi_m2m_mmap,
+};
+
+static int __devinit avi_m2m_v4l2_init(struct avi_m2m_dev *avi_m2m)
+{
+ struct video_device *vdev;
+ int ret;
+
+ /* Get shared V4L2 device instance */
+ avi_m2m->v4l2_dev = avi_v4l2_get_device();
+ if (!avi_m2m->v4l2_dev)
+ return -ENODEV;
+
+ /* Allocate new video device */
+ vdev = video_device_alloc();
+ if (!vdev) {
+ v4l2_err(avi_m2m->v4l2_dev,
+ "Failed to allocate video device\n");
+ ret = -ENODEV;
+ goto vdev_alloc_failed;
+ }
+ avi_m2m->vdev = vdev;
+
+ /* Init video device */
+ strlcpy(vdev->name, dev_name(avi_m2m->dev), sizeof(vdev->name) - 1);
+
+ vdev->parent = avi_m2m->dev;
+ vdev->v4l2_dev = avi_m2m->v4l2_dev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->fops = &avi_m2m_fops;
+ vdev->ioctl_ops = &avi_m2m_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ /* We handle the locking ourselves */
+ vdev->lock = NULL;
+
+ video_set_drvdata(vdev, avi_m2m);
+
+ /* Add V4L2 controls for ISP configuration */
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ if (avi_m2m->caps & AVI_CAPS_ISP) {
+ /* Init the control handler */
+ ret = v4l2_ctrl_handler_init(&avi_m2m->ctrl_handler,
+ AVI_V4L2_ISP_CONTROL_HINT);
+ if (ret)
+ goto init_ctrl_handler_failed;
+
+ /* Add ISP chain controls */
+ ret = avi_v4l2_isp_init(&avi_m2m->ctrl_isp,
+ &avi_m2m->ctrl_handler);
+ if (ret)
+ goto init_isp_failed;
+
+ /* Add control handler to video device */
+ vdev->ctrl_handler = &avi_m2m->ctrl_handler;
+ }
+#endif
+
+ /* Register video device */
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto video_register_failed;
+
+ /* Init MEM2MEM device */
+ avi_m2m->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(avi_m2m->m2m_dev)) {
+ v4l2_err(avi_m2m->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(avi_m2m->m2m_dev);
+ goto v4l2_m2m_init_failed;
+ }
+
+ /* Init stats video device */
+ if (avi_m2m->stats_enabled && (avi_m2m->caps & AVI_CAPS_ISP)) {
+ ret = avi_stats_init(&avi_m2m->stats, vdev, DRIVER_VERSION);
+ if (ret)
+ goto init_stats_failed;
+ }
+
+ return 0;
+
+init_stats_failed:
+ v4l2_m2m_release(avi_m2m->m2m_dev);
+v4l2_m2m_init_failed:
+ video_unregister_device(vdev);
+video_register_failed:
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ if (avi_m2m->caps & AVI_CAPS_ISP)
+ avi_v4l2_isp_free(avi_m2m->ctrl_isp);
+init_isp_failed:
+ if (avi_m2m->caps & AVI_CAPS_ISP)
+ v4l2_ctrl_handler_free(&avi_m2m->ctrl_handler);
+init_ctrl_handler_failed:
+#endif
+ video_unregister_device(avi_m2m->vdev);
+vdev_alloc_failed:
+ avi_v4l2_put_device(avi_m2m->v4l2_dev);
+ return ret;
+}
+
+static void __devexit avi_m2m_v4l2_destroy(struct avi_m2m_dev *avi_m2m)
+{
+ /* Remove stats video device */
+ if (avi_m2m->stats_enabled && (avi_m2m->caps & AVI_CAPS_ISP))
+ avi_stats_destroy(&avi_m2m->stats);
+
+ /* Release V4L2 MEM2MEM device */
+ v4l2_m2m_release(avi_m2m->m2m_dev);
+
+ /* Remove V4L2 controls */
+#ifdef CONFIG_M2M_AVI_USE_ISP
+ if (avi_m2m->caps & AVI_CAPS_ISP) {
+ avi_v4l2_isp_free(avi_m2m->ctrl_isp);
+ v4l2_ctrl_handler_free(&avi_m2m->ctrl_handler);
+ }
+#endif
+
+ /* Unregister video device */
+ video_unregister_device(avi_m2m->vdev);
+ avi_v4l2_put_device(avi_m2m->v4l2_dev);
+}
+
+static int __devinit avi_m2m_probe(struct platform_device *pdev)
+{
+ struct avi_m2m_platform_data *pdata;
+ struct avi_m2m_dev *avi_m2m;
+ int ret;
+
+ /* Get platform data */
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to find platform data!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate private device structure */
+ avi_m2m = kzalloc(sizeof *avi_m2m, GFP_KERNEL);
+ if (!avi_m2m) {
+ dev_err(&pdev->dev, "Failed to allocate!\n");
+ return -ENOMEM;
+ }
+
+ /* Save platform device */
+ avi_m2m->dev = &pdev->dev;
+ avi_m2m->id = pdev->id;
+ avi_m2m->caps = pdata->caps;
+ avi_m2m->stats_enabled = pdata->enable_stats;
+
+ /* Init spin lock and mutex */
+ mutex_init(&avi_m2m->mutex);
+ spin_lock_init(&avi_m2m->lock);
+ atomic_set(&avi_m2m->num_inst, 0);
+
+ /* Init V4L2 device */
+ ret = avi_m2m_v4l2_init(avi_m2m);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init V4L2 device!\n");
+ goto error;
+ }
+
+ /* Register driver data */
+ platform_set_drvdata(pdev, avi_m2m);
+ dev_set_drvdata(&pdev->dev, avi_m2m);
+
+ /* Video device is registered */
+ dev_info(avi_m2m->dev,
+ "video device successfuly registered as %s\n",
+ video_device_node_name(avi_m2m->vdev));
+
+ /* Init videobuf2 DMA contig context */
+ avi_m2m->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(avi_m2m->alloc_ctx)) {
+ dev_err(&pdev->dev, "Failed to init Videobuf2 DMA contig!\n");
+ ret = PTR_ERR(avi_m2m->alloc_ctx);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ kfree(avi_m2m);
+ return ret;
+}
+
+static int __devexit avi_m2m_remove(struct platform_device *pdev)
+{
+ struct avi_m2m_dev *avi_m2m = dev_get_drvdata(&pdev->dev);
+
+ /* Destroy V4L2 device */
+ avi_m2m_v4l2_destroy(avi_m2m);
+
+ /* Clean videobuf2 */
+ vb2_dma_contig_cleanup_ctx(avi_m2m->alloc_ctx);
+
+ /* Free device */
+ dev_set_drvdata(&pdev->dev, NULL);
+ platform_set_drvdata(pdev, NULL);
+ kfree(avi_m2m);
+
+ return 0;
+}
+
+static struct platform_driver avi_m2m_driver = {
+ .probe = avi_m2m_probe,
+ .remove = __devexit_p(avi_m2m_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init avi_m2m_init(void)
+{
+ /* Register driver */
+ return platform_driver_register(&avi_m2m_driver);
+}
+
+static void __exit avi_m2m_exit(void)
+{
+ /* Unregister driver */
+ platform_driver_unregister(&avi_m2m_driver);
+}
+
+module_init(avi_m2m_init);
+module_exit(avi_m2m_exit);
+
+MODULE_AUTHOR("Alexandre Dilly <alexandre.dilly@parrot.com>");
+MODULE_DESCRIPTION("V4L2 MEM2MEM driver for P7 Advanced Video Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_m2m.h b/drivers/parrot/media/video/avi_m2m.h
new file mode 100644
index 00000000000000..b653510b30be01
--- /dev/null
+++ b/drivers/parrot/media/video/avi_m2m.h
@@ -0,0 +1,34 @@
+/*
+ * linux/drivers/parrot/video/media/avi_m2m.h
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Alexandre Dilly <alexandre.dilly@parrot.com>
+ * @date 03-Aug-2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _AVI_M2M_H_
+#define _AVI_M2M_H_
+
+/**
+ * struct avi_m2m_platform_data - AVI M2M specific data
+ */
+struct avi_m2m_platform_data {
+ unsigned long caps;
+ int enable_stats;
+};
+
+#endif /* _AVI_M2M_H_ */
diff --git a/drivers/parrot/media/video/avi_multicapture.c b/drivers/parrot/media/video/avi_multicapture.c
new file mode 100644
index 00000000000000..96f25f9bb4e512
--- /dev/null
+++ b/drivers/parrot/media/video/avi_multicapture.c
@@ -0,0 +1,947 @@
+/**
+ * AVI Multicapture driver
+ *
+ * Its purpose is to create one big video (mosaic) made of multiple camera streams coming
+ * into the P7 AVI.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <parrot/avicam_dummy_dev.h>
+#include "avicam_v4l2.h"
+#include "avi_multicapture.h"
+
+#include <video/avi_pixfmt.h>
+#include "avi_capture.h"
+
+/**
+ * XXX: don't hardcode this.
+ * Multicapture driver should be able to handle different cameras with different
+ * byte/pixel values
+ */
+#define CAM_BYTES_PER_PIXEL 2
+
+/**
+ * Videobuf2 parts
+ */
+struct avimulti_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ unsigned done_needed;
+ unsigned done_current;
+};
+
+/*
+ * One instance per capturing camera
+ */
+struct avi_multicapt_context {
+ unsigned i;
+ struct avi_capture_context capture_ctx;
+ struct avicam_platform_data* avicam_pdata;
+
+ /**
+ * V4L2 subdevice associated with this camera
+ */
+ struct v4l2_subdev *subdev;
+
+ /**
+ * Whether the probe for this camera module was successful or not
+ */
+ unsigned valid;
+
+ /*
+ * Offset bytes in the final buffer where the frame for this camera will be stored
+ */
+ unsigned offset_bytes;
+ void *priv_top;
+
+ /*
+ * Whether the camera is in full framerate (and thus needs 2 frames per mosaic)
+ */
+ unsigned alternate;
+
+ /*
+ * These 3 variables control the state of the camera when capturing
+ */
+ unsigned cur_alt;
+
+ /*
+ * Whether this camera is active or not
+ * Unlike "streaming", this can become 0 during the multicapture
+ * if the camera goes offline or gets corrupted
+ */
+ unsigned enabled;
+
+ /*
+ * Whether we started an avi capture on this camera
+ */
+ unsigned streaming;
+
+ struct avimulti_buffer *cur_buf;
+};
+
+struct avi_multicapt_top {
+ struct device *dev;
+ struct avimulti_platform_data *pdata;
+ spinlock_t vbq_lock;
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct list_head bufqueue;
+ struct list_head bufqueue_active;
+
+ /* serialization lock */
+ struct mutex lock;
+ struct media_pad pad;
+ int use_count;
+ int streaming;
+
+ struct avi_multicapt_context multicapt_contexts[6];
+ unsigned int frame_count;
+
+ struct pinctrl *pctl;
+
+ /**
+ * Videobuf2 parts
+ */
+ void *alloc_ctx;
+ struct vb2_queue vb_vidq;
+};
+
+static struct avimulti_buffer* to_avimulti_buffer(struct vb2_buffer* vb2)
+{
+ return container_of(vb2, struct avimulti_buffer, vb);
+}
+
+static int avimulticapture_instance(struct avi_multicapt_top *multicapt_top)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(multicapt_top->dev, struct platform_device, dev);
+
+ return pdev->id;
+}
+
+static inline int avimulti_instance(struct avi_multicapt_top *top)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(top->dev, struct platform_device, dev);
+
+ return pdev->id;
+}
+
+/**
+ * Returns the mosaic image height
+ */
+static int avimulti_image_height(struct avi_multicapt_top *top)
+{
+ return top->pdata->height * (top->pdata->nb_cameras+top->pdata->nb_full_framerate);
+}
+
+/**
+ * Return the total mosaic size, in bytes
+ */
+static int avimulti_image_size(struct avi_multicapt_top *top)
+{
+ return avimulti_image_height(top) * top->pdata->width * CAM_BYTES_PER_PIXEL;
+}
+
+/**
+ * Number of metadata lines necessary (if enabled)
+ */
+static int avimulti_metadata_height(struct avi_multicapt_top *top) {
+ struct avimulti_platform_data* pdata = top->pdata;
+
+ if(!pdata->enable_metadata)
+ return 0;
+
+ return (sizeof(struct avimulti_metadata) *
+ (pdata->nb_cameras + pdata->nb_full_framerate)) /
+ (pdata->width * CAM_BYTES_PER_PIXEL) + 1;
+}
+
+/**
+ * Size of metadata in bytes (if enabled)
+ */
+static int avimulti_metadata_size(struct avi_multicapt_top *top) {
+ return top->pdata->width * avimulti_metadata_height(top) * CAM_BYTES_PER_PIXEL;
+}
+
+/**
+ * VB2 callback: setup queue plane number & payload size
+ */
+static int avimulti_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct avi_multicapt_top *multicapt_top = vb2_get_drv_priv(vq);
+ unsigned int size = avimulti_image_size(multicapt_top) +
+ avimulti_metadata_size(multicapt_top);
+
+ sizes[0] = size;
+ alloc_ctxs[0] = multicapt_top->alloc_ctx;
+ *num_planes = 1;
+ if (*count == 0)
+ /* Default to 4 buffers */
+ *count = 4;
+
+ return 0;
+}
+
+static int avimulti_vbuf_prepare(struct vb2_buffer* vb)
+{
+ struct avi_multicapt_top *multicapt_top = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = avimulti_image_size(multicapt_top) +
+ avimulti_metadata_size(multicapt_top);
+
+ vb2_set_plane_payload(vb, 0, size);
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static void avimulti_vbuf_queue(struct vb2_buffer* vb)
+{
+ struct avi_multicapt_top *multicapt_top = vb2_get_drv_priv(vb->vb2_queue);
+ struct avimulti_buffer *vbuf = to_avimulti_buffer(vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&multicapt_top->vbq_lock, flags);
+ list_add_tail(&vbuf->list, &multicapt_top->bufqueue);
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+}
+
+static int avimulti_fill_fmt(struct file *file,
+ void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct avi_multicapt_top *top = video_drvdata(file);
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ pix->width = top->pdata->width;
+ pix->height = avimulti_image_height(top) + avimulti_metadata_height(top);
+ pix->bytesperline = top->pdata->width * CAM_BYTES_PER_PIXEL;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/**
+ * Tells whether the mosaic is ready (i.e all frames are captured)
+ * for the current buffer
+ */
+static unsigned avimulti_ready(struct avimulti_buffer *buf)
+{
+ if (buf->done_current >= buf->done_needed)
+ return 1;
+
+ return 0;
+}
+
+static void avimulti_reset_counter_done(struct avimulti_buffer *buf)
+{
+ buf->done_needed = 0;
+ buf->done_current = 0;
+}
+
+/**
+ * This is called by the avi_capture layer when one of the camera finished
+ * capturing a frame
+ */
+static void avimulti_done(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avimulti_buffer *vbuf = frame->priv;
+ struct avi_multicapt_context *multicapt = ctx->priv;
+ struct avi_multicapt_top *multicapt_top = multicapt->priv_top;
+ struct avimulti_platform_data *pdata = multicapt_top->pdata;
+ unsigned long flags = 0;
+ u32 was_enabled = multicapt->enabled;
+
+ if (!multicapt_top->streaming)
+ return;
+
+ if (frame->status == AVI_BUFFER_ERROR ||
+ frame->status == AVI_BUFFER_TIMEOUT)
+ /* Disable the camera in case it goes corrupt */
+ multicapt->enabled = 0;
+ else
+ multicapt->enabled = 1;
+
+ ++multicapt_top->frame_count;
+
+ /* Write meta-data at the end of the mosaic */
+ if (multicapt_top->pdata->enable_metadata) {
+ struct avimulti_metadata metadata;
+ unsigned int index =
+ multicapt->offset_bytes / (pdata->width * pdata->height * CAM_BYTES_PER_PIXEL);
+ struct avimulti_metadata *metadata_addr =
+ vb2_plane_vaddr(&vbuf->vb, 0) + avimulti_image_size(multicapt_top);
+
+ metadata.timestamp = ktime_get().tv64;
+ metadata.enabled = multicapt->enabled;
+ memcpy(&metadata_addr[index], &metadata, sizeof(metadata));
+ }
+
+ spin_lock_irqsave(&multicapt_top->vbq_lock, flags);
+
+ /* This camera is corrupt and thus we shouldn't update the buffers */
+ if (vbuf->done_needed == 0) {
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+ return;
+ }
+
+ /* If the camera was corrupt before, we aren't waiting for it. So don't
+ * increment the counter */
+ if (was_enabled)
+ vbuf->done_current++;
+
+ /* If this was the last frame needed for the full mosaic, mark the buffer as done
+ and wake up any existing process waiting on it */
+ if (avimulti_ready(vbuf)) {
+ while (!list_empty(&multicapt_top->bufqueue_active)) {
+ struct avimulti_buffer *buf;
+ buf = list_entry(multicapt_top->bufqueue_active.next, struct avimulti_buffer, list);
+ list_del(&buf->list);
+
+ buf->vb.v4l2_buf.sequence = multicapt_top->frame_count;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ avimulti_reset_counter_done(buf);
+
+ if (buf == vbuf)
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+}
+
+static void avimulti_clean_metadata(struct avi_multicapt_top *multicapt_top,
+ struct avimulti_buffer *vbuf)
+{
+ struct avimulti_metadata *metadata_addr;
+
+ if (!multicapt_top->pdata->enable_metadata)
+ return;
+
+ metadata_addr = vb2_plane_vaddr(&vbuf->vb, 0) +
+ avimulti_image_size(multicapt_top);
+
+ memset(metadata_addr, 0,
+ sizeof(struct avimulti_metadata) *
+ multicapt_top->pdata->nb_cameras);
+}
+
+/**
+ * This is called by the avi_capture layer to configure
+ * the next frame's buffer for a camera
+ */
+static void avimulti_next(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avi_multicapt_context *multicapt = ctx->priv;
+ struct avi_multicapt_top *multicapt_top = multicapt->priv_top;
+ struct avimulti_buffer *vbuf = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&multicapt_top->vbq_lock, flags);
+
+ if (!multicapt_top->streaming) {
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+ return;
+ }
+
+ /* Get the latest buffer that was required */
+ if (!list_empty(&multicapt_top->bufqueue_active))
+ vbuf = list_entry(multicapt_top->bufqueue_active.prev,
+ struct avimulti_buffer,
+ list);
+
+ /* Get ahead with the next buffer if all the next() have been called for every
+ camera. */
+ if (!vbuf || multicapt->cur_buf == vbuf) {
+ if(!list_empty(&multicapt_top->bufqueue)) {
+ vbuf = list_first_entry(&multicapt_top->bufqueue,
+ struct avimulti_buffer,
+ list);
+ list_del(&vbuf->list);
+
+ vbuf->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ list_add_tail(&vbuf->list,
+ &multicapt_top->bufqueue_active);
+
+ avimulti_clean_metadata(multicapt_top, vbuf);
+ } else {
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+ return;
+ }
+ }
+
+ multicapt->cur_buf = vbuf;
+
+ /* We don't want corrupt cameras to be waited for */
+ if (multicapt->enabled)
+ vbuf->done_needed++;
+
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+
+ /* Find out the correct offset for this frame in the current mosaic buffer
+ It depends on whether the current camera is in full framerate or not */
+ if (multicapt->alternate && multicapt->cur_alt) {
+ frame->plane0.dma_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb, 0);
+ frame->plane0.dma_addr += multicapt->offset_bytes;
+ frame->plane0.dma_addr += (multicapt_top->pdata->width * multicapt_top->pdata->height * CAM_BYTES_PER_PIXEL);
+ } else {
+ frame->plane0.dma_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb, 0);
+ frame->plane0.dma_addr += multicapt->offset_bytes;
+ }
+
+ multicapt->cur_alt = !multicapt->cur_alt;
+ frame->priv = vbuf;
+}
+
+static int avimulti_querycap(struct file *file,
+ void *fh,
+ struct v4l2_capability *cap)
+{
+ struct avi_multicapt_top *multicapt_top = video_drvdata(file);
+
+ mutex_lock(&multicapt_top->lock);
+
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->card, sizeof(cap->card), "avi-cam.%d",
+ avimulticapture_instance(multicapt_top));
+
+ cap->version = DRIVER_VERSION;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ mutex_unlock(&multicapt_top->lock);
+
+ return 0;
+}
+
+static int avimulti_enum_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return avi_v4l2_enum_fmt(f);
+}
+
+static int avimulti_streamon(struct vb2_queue* vq, unsigned int count)
+{
+ struct avi_multicapt_top *multicapt_top = vb2_get_drv_priv(vq);
+ struct avimulti_platform_data *pdata = multicapt_top->pdata;
+ int ret = 0;
+ unsigned i;
+
+ struct avi_segment_format dma_fmt = {
+ .width = pdata->width,
+ .height = pdata->height,
+ .interlaced = 0,
+ .colorspace = AVI_BT601_CSPACE,
+ .pix_fmt = AVI_PIXFMT_YUYV,
+ .plane0 = {
+ .line_size = pdata->width * CAM_BYTES_PER_PIXEL,
+ }
+ };
+
+ if (multicapt_top->streaming) {
+ return -EBUSY;
+ }
+
+ multicapt_top->frame_count = 0;
+
+ for (i = 0; i < pdata->nb_cameras; i++) {
+ struct avi_multicapt_context *ctx =
+ &multicapt_top->multicapt_contexts[i];
+
+ if(!ctx->valid) {
+ goto capture_init_failed;
+ }
+
+ ctx->offset_bytes = (i+min(pdata->nb_full_framerate, i))
+ * (pdata->width * pdata->height * CAM_BYTES_PER_PIXEL);
+
+ ret = avi_capture_init(&ctx->capture_ctx,
+ multicapt_top->dev,
+ avimulti_instance(multicapt_top) + i,
+ ctx->avicam_pdata->cam_cap,
+ 0);
+ if (ret)
+ goto capture_init_failed;
+
+ if(i < pdata->nb_full_framerate)
+ ctx->alternate = 1;
+ else
+ ctx->alternate = 0;
+
+ ctx->i = i;
+ ctx->cur_alt = 0;
+ ctx->priv_top = multicapt_top;
+ ctx->cur_buf = NULL;
+ ctx->capture_ctx.priv = ctx;
+ ctx->capture_ctx.interface = ctx->avicam_pdata->interface;
+ ctx->capture_ctx.capture_cspace = AVI_BT601_CSPACE;
+ ctx->capture_ctx.interlaced = 0;
+ ctx->capture_ctx.measure_to_timings = ctx->avicam_pdata->measure_to_timings;
+
+ ctx->capture_ctx.next = &avimulti_next;
+ ctx->capture_ctx.done = &avimulti_done;
+
+ ctx->capture_ctx.timeout_jiffies = pdata->timeout;
+
+ /* enable streaming on the V4L2 subdev driver */
+ ret = v4l2_subdev_call(ctx->subdev, video, s_stream, 1);
+ if (ret) {
+ dev_err(multicapt_top->dev, "Failed to call s_stream for %d\n", i);
+ goto s_stream_failed;
+ }
+#ifdef CONFIG_AVICAM_TIMINGS_AUTO
+ {
+ unsigned cam_width;
+ unsigned cam_height;
+
+ /* Setup the avi_capture layer */
+ ret = avi_capture_prepare(&ctx->capture_ctx);
+ if (ret) {
+ dev_err(multicapt_top->dev, "failed to get sensor timings for %d\n", i);
+ goto prepare_failed;
+ }
+
+ avi_capture_resolution(&ctx->capture_ctx, &cam_width, &cam_height);
+
+ if (cam_width != pdata->width || cam_height != pdata->height) {
+ dev_err(multicapt_top->dev,
+ "Invalid sensor resolution for %d, "
+ "expected %ux%u but measured %ux%u\n",
+ i, pdata->width, pdata->height, cam_width, cam_height);
+ ret = -EINVAL;
+ goto bad_resolution;
+ }
+ }
+#else
+ ret = avi_capture_prepare_timings(&ctx->capture_ctx, pdata->width, pdata->height);
+ if (ret)
+ goto prepare_failed;
+
+#endif
+ ret = avi_capture_set_format(&ctx->capture_ctx, &dma_fmt);
+ if (ret)
+ goto set_format_failed;
+
+ ctx->enabled = 1;
+
+ ret = avi_capture_streamon(&ctx->capture_ctx);
+ if (ret)
+ goto capture_streamon_failed;
+
+ /* Success */
+ ctx->streaming = 1;
+ ret = 0;
+ continue;
+
+ capture_streamon_failed:
+ set_format_failed:
+#ifdef CONFIG_AVICAM_TIMINGS_AUTO
+ bad_resolution:
+#endif
+ prepare_failed:
+ v4l2_subdev_call(ctx->subdev, video, s_stream, 0);
+ s_stream_failed:
+ avi_capture_destroy(&ctx->capture_ctx);
+ capture_init_failed:
+ ctx->enabled = 0;
+ ctx->streaming = 0;
+ dev_err(multicapt_top->dev, "Couldn't initialize camera %d", i);
+ }
+
+ multicapt_top->streaming = 1;
+ return 0;
+}
+
+static int avimulti_streamoff(struct vb2_queue* vq)
+{
+ struct avi_multicapt_top *multicapt_top = vb2_get_drv_priv(vq);
+ int i;
+ unsigned long flags = 0;
+
+ if (!multicapt_top->streaming) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&multicapt_top->vbq_lock, flags);
+ multicapt_top->streaming = 0;
+
+ while (!list_empty(&multicapt_top->bufqueue)) {
+ struct avimulti_buffer *buf;
+ buf = list_entry(multicapt_top->bufqueue.next, struct avimulti_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&multicapt_top->bufqueue_active)) {
+ struct avimulti_buffer *buf;
+ buf = list_entry(multicapt_top->bufqueue_active.next, struct avimulti_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&multicapt_top->vbq_lock, flags);
+
+ for (i = 0; i < multicapt_top->pdata->nb_cameras; i++) {
+ struct avi_multicapt_context *ctx = &multicapt_top->multicapt_contexts[i];
+
+ if (!ctx->streaming)
+ continue;
+
+ v4l2_subdev_call(ctx->subdev, video, s_stream, 0);
+ avi_capture_streamoff(&ctx->capture_ctx);
+ avi_capture_destroy (&ctx->capture_ctx);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops avimulti_qops = {
+ .queue_setup = avimulti_queue_setup,
+ .buf_prepare = avimulti_vbuf_prepare,
+ .buf_queue = avimulti_vbuf_queue,
+ .start_streaming = avimulti_streamon,
+ .stop_streaming = avimulti_streamoff,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int avimulti_open(struct file *file)
+{
+ struct avi_multicapt_top *multicapt_top = video_drvdata(file);
+
+ mutex_lock(&multicapt_top->lock);
+
+ if (multicapt_top->use_count == 0) {
+ struct vb2_queue* q = &multicapt_top->vb_vidq;
+
+ memset(q, 0, sizeof(struct vb2_queue));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->lock = &multicapt_top->lock;
+ q->ops = &avimulti_qops;
+ q->drv_priv = multicapt_top;
+ q->buf_struct_size = sizeof(struct avimulti_buffer);
+ q->cache_flags = multicapt_top->pdata->vb2_cache_flags;
+ vb2_queue_init(q);
+ }
+ multicapt_top->use_count++;
+
+ mutex_unlock(&multicapt_top->lock);
+
+ return 0;
+}
+
+static int avimulti_release(struct file *file)
+{
+ struct avi_multicapt_top *top = video_drvdata(file);
+ int ret = 0;
+
+ mutex_lock(&top->lock);
+
+ top->use_count--;
+
+ if (top->use_count == 0) {
+ ret = vb2_fop_release(file);
+ }
+
+ mutex_unlock(&top->lock);
+
+ return ret;
+}
+
+static struct v4l2_file_operations avimulti_fops = {
+ .owner = THIS_MODULE,
+ .open = avimulti_open,
+ .release = avimulti_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+};
+
+
+static const struct v4l2_ioctl_ops avimulti_ioctl_ops = {
+ .vidioc_querycap = avimulti_querycap,
+ .vidioc_enum_fmt_vid_cap = avimulti_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = avimulti_fill_fmt,
+ .vidioc_try_fmt_vid_cap = avimulti_fill_fmt,
+ .vidioc_s_fmt_vid_cap = avimulti_fill_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/**
+ * Register one V4L2 subdev controlling one of the mosaic's cameras
+ */
+static struct v4l2_subdev* __devinit avimulti_register_subdev(
+ struct avicam_platform_data* camera,
+ struct v4l2_device *v4l2_dev)
+{
+ struct i2c_adapter *adapter;
+ struct avicam_subdevs *devs = camera->subdevs;
+
+ if (devs == NULL || devs->board_info == NULL) {
+ return NULL;
+ }
+
+ adapter = i2c_get_adapter(devs->i2c_adapter_id);
+ if (adapter == NULL) {
+ return NULL;
+ }
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev,
+ adapter,
+ devs->board_info,
+ NULL);
+}
+
+/**
+ * Register all the V4L2 subdev modules controlling the mosaic's cameras
+ */
+static int __devinit avimulti_register_cameras(struct avi_multicapt_top *top,
+ struct v4l2_device *v4l2_dev)
+{
+ struct avicam_platform_data *cameras = top->pdata->cam_interfaces;
+ int i, ok = 0;
+
+ for (i = 0; i < top->pdata->nb_cameras; ++i) {
+ struct avi_multicapt_context *ctx =
+ &top->multicapt_contexts[i];
+
+ ctx->subdev = avimulti_register_subdev(&cameras[i], v4l2_dev);
+
+ if(ctx->subdev != NULL) {
+ ok = 1;
+ ctx->valid = 1;
+ ctx->avicam_pdata = &cameras[i];
+ } else {
+ ctx->valid = 0;
+ }
+ }
+
+ if (!ok)
+ return -ENODEV;
+
+ return v4l2_device_register_subdev_nodes(v4l2_dev);
+}
+
+static int __devinit avimulti_v4l2_init(struct avi_multicapt_top *multicapt_top)
+{
+ struct video_device *vdev;
+ int ret;
+ struct v4l2_device *v4l2_dev;
+
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENODEV;
+ goto vdev_alloc_failed;
+ }
+
+ v4l2_dev = &multicapt_top->v4l2_dev;
+ ret = v4l2_device_register(multicapt_top->dev, v4l2_dev);
+ if (ret)
+ goto vdev_alloc_failed;
+
+ multicapt_top->vdev = vdev;
+
+ multicapt_top->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&multicapt_top->vdev->entity, 1, &multicapt_top->pad, 0);
+ if (ret < 0) {
+ dprintk(multicapt_top, "Error initializing media entity\n");
+ goto media_init_failed;
+ }
+
+ mutex_init(&multicapt_top->lock);
+
+ strlcpy(vdev->name, dev_name(multicapt_top->dev), sizeof(vdev->name));
+
+ vdev->parent = multicapt_top->dev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->fops = &avimulti_fops;
+ vdev->ioctl_ops = &avimulti_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->queue = &multicapt_top->vb_vidq;
+ /* We handle the locking ourselves */
+ vdev->lock = NULL;
+
+ video_set_drvdata(vdev, multicapt_top);
+
+ vdev->v4l2_dev = v4l2_dev;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto video_register_failed;
+
+ ret = avimulti_register_cameras(multicapt_top, v4l2_dev);
+
+ if (ret < 0) {
+ v4l2_err(vdev, "Couldn't register at least one camera subdev\n");
+ goto video_register_failed;
+ }
+
+ return 0;
+
+ video_register_failed:
+ media_entity_cleanup(&multicapt_top->vdev->entity);
+ media_init_failed:
+ /* unregister calls video_device_release */
+ video_unregister_device(multicapt_top->vdev);
+ vdev_alloc_failed:
+ return ret;
+}
+
+static int avimulti_resume(struct device *dev)
+{
+ struct avicam *avicam = dev_get_drvdata(dev);
+
+ if (!avicam)
+ return -ENODEV;
+
+ if (!avicam->streaming)
+ return 0;
+
+ return avi_capture_resume(&avicam->capture_ctx);
+}
+
+static struct dev_pm_ops avimulti_dev_pm_ops = {
+ .resume = &avimulti_resume,
+};
+
+static int __devinit avimulti_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct avi_multicapt_top *multicapt_top;
+ struct vb2_dc_conf *alloc_ctx;
+
+ multicapt_top = kzalloc(sizeof(*multicapt_top), GFP_KERNEL);
+ if (multicapt_top == NULL) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ multicapt_top->dev = &pdev->dev;
+
+ multicapt_top->pdata = dev_get_platdata(&pdev->dev);
+ if (!multicapt_top->pdata) {
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ multicapt_top->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(multicapt_top->pctl)) {
+ ret = PTR_ERR(multicapt_top->pctl);
+ goto pinctrl_failed;
+ }
+
+ spin_lock_init(&multicapt_top->vbq_lock);
+ INIT_LIST_HEAD(&multicapt_top->bufqueue);
+ INIT_LIST_HEAD(&multicapt_top->bufqueue_active);
+
+ ret = avimulti_v4l2_init(multicapt_top);
+ if (ret)
+ goto v4l2_init_failed;
+
+
+ platform_set_drvdata(pdev, multicapt_top);
+ dev_set_drvdata(&pdev->dev, multicapt_top);
+
+ dev_info(multicapt_top->dev,
+ "video device successfuly registered as %s\n",
+ video_device_node_name(multicapt_top->vdev));
+
+ alloc_ctx = (struct vb2_dc_conf *)vb2_dma_contig_init_ctx(&pdev->dev);
+ alloc_ctx->cache_flags = multicapt_top->pdata->vb2_cache_flags;
+
+ multicapt_top->alloc_ctx = alloc_ctx;
+ return 0;
+
+ v4l2_init_failed:
+ pinctrl_put(multicapt_top->pctl);
+ pinctrl_failed:
+ no_pdata:
+ kfree(multicapt_top);
+ alloc_failed:
+
+ return ret;
+}
+
+static void __devexit avicam_v4l2_destroy(struct avi_multicapt_top *multicapt_top)
+{
+ video_unregister_device(multicapt_top->vdev);
+ avi_v4l2_put_device (&multicapt_top->v4l2_dev);
+}
+
+static int __devexit avimulti_remove(struct platform_device *pdev)
+{
+ struct avi_multicapt_top *multicapt_top = platform_get_drvdata(pdev);
+
+ if (!multicapt_top)
+ return -ENODEV;
+
+ avicam_v4l2_destroy(multicapt_top);
+ pinctrl_put(multicapt_top->pctl);
+ vb2_dma_contig_cleanup_ctx(multicapt_top->alloc_ctx);
+ kfree(multicapt_top);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver avimulticapt_driver = {
+ .driver = {
+ .name = "avi_multicapture",
+ .owner = THIS_MODULE,
+ .pm = &avimulti_dev_pm_ops,
+ },
+ .probe = avimulti_probe,
+ .remove = __devexit_p(avimulti_remove),
+};
+static int __init avimulti_init(void)
+{
+ return platform_driver_register(&avimulticapt_driver);
+}
+module_init(avimulti_init);
+
+static void __exit avimulti_exit(void)
+{
+ platform_driver_unregister(&avimulticapt_driver);
+}
+module_exit(avimulti_exit);
+
+MODULE_AUTHOR("Misha Nesaratnam <misha.nesaratnam@parrot.com>");
+MODULE_DESCRIPTION("P7 AVI multicapture mosaic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_multicapture.h b/drivers/parrot/media/video/avi_multicapture.h
new file mode 100644
index 00000000000000..c024896cb9809c
--- /dev/null
+++ b/drivers/parrot/media/video/avi_multicapture.h
@@ -0,0 +1,72 @@
+/**
+ * Avi multicapture configuration
+ */
+#ifndef _AVI_MULTICAPTURE_H__
+#define _AVI_MULTICAPTURE_H__
+
+struct avimulti_platform_data {
+ /*
+ * The number of cameras that will be set in full-framerate (2 frames instead of 1).
+ *
+ * For example, if you have 3 cameras @30 and you set this parameter to 1, you will get
+ * the following stream running at @15 :
+ *
+ * CAM_0 (1st frame)
+ * CAM_0 (2nd frame)
+ * CAM_1
+ * CAM_2
+ *
+ * Leave to 0 if the system can handle all the cameras at full framerate.
+ */
+ unsigned int nb_full_framerate;
+
+ /*
+ * All the cameras that will be merged into the multicapture driver
+ * The order is important if you set nb_full_framerate to something other than 0
+ */
+ struct avicam_platform_data *cam_interfaces;
+
+ /*
+ * The number of cameras in cam_interfaces
+ */
+ unsigned int nb_cameras;
+
+ /**
+ * The width/height of the cameras.
+ * This implies that all the cameras must have the same dimensions.
+ * XXX: probe the cameras' V4L2 drivers and get rid of those values
+ */
+ unsigned int width;
+ unsigned int height;
+
+ /**
+ * Mosaics will contain one additional "struct avimulti_metadata" for each frame.
+ * These structs will be stored at the end of the buffer, adding as many "lines"
+ * as necessary.
+ */
+ unsigned int enable_metadata;
+
+ /**
+ * How long is judged enough to trigger a timeout on one of the cameras
+ * This is used to disable specific cameras on the fly if they stop responding
+ * to not freeze the mosaic.
+ *
+ * "HZ" = 1s
+ */
+ unsigned int timeout;
+
+ /**
+ * Videobuf2 cpu caching flags
+ */
+ enum vb2_cache_flags vb2_cache_flags;
+};
+
+/**
+ * Contains metadata about one frame in the mosaic
+ */
+struct avimulti_metadata {
+ int enabled;
+ s64 timestamp;
+} __attribute__((packed));
+
+#endif
diff --git a/drivers/parrot/media/video/avi_r2r.h b/drivers/parrot/media/video/avi_r2r.h
new file mode 100644
index 00000000000000..4271b6259d0394
--- /dev/null
+++ b/drivers/parrot/media/video/avi_r2r.h
@@ -0,0 +1,256 @@
+#ifndef _AVI_R2R_H_
+#define _AVI_R2R_H_
+/**
+ * @file avi_r2r.h
+ * Parrot AVI RAM to RAM driver.
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2013-10-23
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "video/avi_segment.h"
+
+#define MAX_CHANNELS_AVI_R2R 6
+#define AVI_R2R_DRIVER_NAME "avi_r2r"
+
+struct avi_r2r_platform_data {
+ struct {
+ unsigned long caps;
+ } channels[MAX_CHANNELS_AVI_R2R];
+};
+
+/**
+ * Inter Driver communication section
+ */
+
+/**
+ * Maximum number of elements per channel
+ * 2x2 FIFOS, 1 SCALER, 1 ROTATOR, 4 CONV, 2 GAMMA, 1 ISP_BAYER, 1 ISP_YUV
+ */
+#define AVI_R2R_MAX_ELEMENTS_PER_CHANNEL 16
+
+struct avi_r2r_context;
+
+/* Structure defining operations (callbacks) */
+struct avi_r2r_ops
+{
+ /* Callback to notify the client after processing frame */
+ void (*done)(struct avi_r2r_context *, struct avi_dma_buffer *, struct avi_dma_buffer *, struct avi_dma_buffer *, void *);
+ /* Callback to notify the client after processing frame and that
+ * internal spinlock has been released
+ */
+ void (*finish)(struct avi_r2r_context *, void *);
+ /* Callback to setup the client before processing frame */
+ void (*configure)(struct avi_r2r_context *, struct avi_dma_buffer *, struct avi_dma_buffer *, void *);
+};
+
+/* Structure containing the ram2ram context */
+struct avi_r2r_context
+{
+ /* operations */
+ struct avi_r2r_ops *ops;
+ /* device */
+ struct device *dev;
+
+ /* capabilities required */
+ u32 caps;
+
+ /* number of stacked jobs not processed yet */
+ atomic_t stacked_nr;
+
+ /* use has job(s) in progress (bool) */
+ atomic_t running;
+
+ /* private part managed by driver (driver privacy) */
+ void *avi_r2r_private;
+
+ /* private data for user,
+ * driver uses it as 4th parameter for callbacks
+ */
+ void *private;
+};
+
+
+/**
+ * Exported functions
+ */
+
+/** check if ram2ram driver is available and if requirements are supported.
+ *
+ * @requirements is a bitmask of the capabilities requested.
+ * Returns 0 on success, negative errno on error.
+ * -ENODEV if driver is not available
+ * -ENOENT if no suitable channel was found
+ *
+ *
+ */
+extern
+int avi_r2r_is_available(u32 requirements);
+
+/** Initiate the session with the ram2ram driver.
+ *
+ * @ctx is the context
+ * @dev is the device requesting the channel
+ * @requirements is a bitmask of the capabilities requested.
+ * @ops are operations
+ *
+ * /!\ Prior to this call
+ *
+ * Returns 0 on success, negative errno on error.
+ * -ENODEV if driver is not available
+ * -ENOENT if no suitable channel was found
+ * -ENOMEM if new user could not be allocated
+ *
+ * By default the channel is initialized to "sane" default values: the
+ * converter/gamma are bypassed, no scaling or rotation etc...
+ *
+ * It's then up to the client driver to initialize the elements it needs.
+ *
+ * The "DMA_IN" and "DMA_OUT" capabilities don't have to be specified since
+ * they're mandatory in ram2ram operation and are therefore always implied.
+ *
+ * /!| the same client (same "dev") is allowed to make several calls to this
+ * function to request several ram2ram contexts (one for scaling, one for
+ * rotation etc...).
+ *
+ */
+extern
+int avi_r2r_request(struct avi_r2r_context *ctx,
+ struct device *dev,
+ u32 requirements,
+ struct avi_r2r_ops *ops);
+
+/** Destroys a context initialized by a successful avi_r2r_request
+ *
+ * @ctx is the context
+ *
+ * Returns 0 on success, negative errno on error.
+ * -ENODEV if driver is not available
+ * -ENOENT if user context is not found
+ * -EBUSY if some jobs have not been processed yet
+ */
+extern
+int avi_r2r_destroy(struct avi_r2r_context *ctx);
+
+/** return status of user's jobs.
+ *
+ * /!\ This function can be called at any moment on a valid context.
+ *
+ * @ctx is the context
+ * @stacked_nr will provide number of jobs on stack
+ * @running will provide a booleen if a job is in progress
+ * Returns 0 on success, negative errno on error.
+ * -ENODEV if driver is not available
+ *
+ */
+extern
+int avi_r2r_get_status(struct avi_r2r_context *ctx,
+ int *stacked_nr,
+ int *running);
+
+/* The various setup functions used to configure the ram2ram operation
+ *
+ * /!\ These functions can be called at any moment on a valid context.
+ *
+ * @ctx is the context
+ *
+ * Returns 0 on success, negative errno on error.
+ * -ENODEV if driver is not available
+ * -ENOENT if user context is not found
+ * -EINVAL if parameters can not be processed with capabilities requested
+ * -EBUSY if requests have not been processed yet
+ *
+ * The operation is defined by the following parameters; input format, output
+ * format, internal transformations.
+ *
+ * The internal transformations supported are those requested by the driver in
+ * the "cap" parameter of avi_r2r_request. For now those are: gamma correction
+ * and colorspace conversion.
+ */
+
+/**
+ * Gamma conversion
+ */
+extern
+int avi_r2r_setup_gam(struct avi_r2r_context *ctx,
+ bool bypass,
+ bool palette,
+ bool comp_width,
+ struct avi_cmap *cmap);
+
+/**
+ * Rotator
+ */
+extern
+int avi_r2r_setup_rotator(struct avi_r2r_context *ctx,
+ enum avi_rotation angle, bool flip);
+
+/** Input and output format configuration.
+ *
+ * /!\ This function can be called at any moment on a valid context.
+ */
+extern
+int avi_r2r_set_format(struct avi_r2r_context *ctx,
+ struct avi_segment_format *infmt,
+ struct avi_segment_format *outfmt,
+ struct avi_segment_format *composefmt,
+ struct avi_segment_layout *layout);
+
+
+/** Enqueue buffers for processing.
+ * *
+ * @ctx is the context
+ * @in_dma is input buffer
+ * @out_dma is output buffer
+ *
+ * Returns
+ * 0 on success,
+ * 1 if user already streaming
+ * negative errno on error.
+ * -ENODEV if driver is not available
+ * -ENOENT if user context is not found
+ * -EINVAL if parameters can not be processed with capabilities requested
+ *
+ */
+
+extern
+int avi_r2r_buf_queue(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_dma,
+ struct avi_dma_buffer *out_dma,
+ struct avi_dma_buffer *stats_dma);
+
+extern
+struct avi_segment * avi_r2r_get_segment(struct avi_r2r_context *ctx,
+ int in_out,
+ int *index);
+
+static inline struct avi_segment * avi_r2r_get_dma_in_segment(
+ struct avi_r2r_context *ctx,
+ int *index)
+{
+ return avi_r2r_get_segment(ctx, 0, index);
+}
+
+static inline struct avi_segment * avi_r2r_get_dma_out_segment(
+ struct avi_r2r_context *ctx,
+ int *index)
+{
+ return avi_r2r_get_segment(ctx, 1, index);
+}
+#endif /* _AVI_R2R_H_ */
diff --git a/drivers/parrot/media/video/avi_r2r_core.c b/drivers/parrot/media/video/avi_r2r_core.c
new file mode 100644
index 00000000000000..c852a706b2aec0
--- /dev/null
+++ b/drivers/parrot/media/video/avi_r2r_core.c
@@ -0,0 +1,2351 @@
+/**
+ * @file linux/drivers/parrot/media/video/avi_r2r.c
+ * Parrot AVI RAM to RAM intra-kernel driver.
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2014-02-06
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "avi_r2r_core.h"
+
+#include "avi_stats.h"
+
+/**
+ * command line parameters
+ */
+
+#ifdef CONFIG_AVI_DEBUG
+/* size of timings saved */
+#define _NR_TIMINGS_SAVED_DEFAULT (PAGE_SIZE / sizeof(struct timings))
+static unsigned int nr_timings_saved = _NR_TIMINGS_SAVED_DEFAULT;
+module_param(nr_timings_saved, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_timings_saved, "number of timings saved");
+#endif /* CONFIG_AVI_DEBUG */
+
+static int z_order = 1;
+module_param(z_order, int, S_IRUGO);
+MODULE_PARM_DESC(z_order, "Z order of plane managed by this driver : default 1");
+
+#define _WORKS_POOL_DEFAULT (2 * PAGE_SIZE / sizeof(struct avi_r2r_work))
+static unsigned int works_pool = _WORKS_POOL_DEFAULT;
+module_param(works_pool, uint, S_IRUGO);
+MODULE_PARM_DESC(works_pool, "size of works pool");
+
+#define AVI_PIXFMT_PLANAR_DEFAULT AVI_PIXFMT_NV12
+#define PLANAR_PIXEL_SIZE0 1
+#define PLANAR_PIXEL_SIZE1 1
+#define AVI_CSPACE_PLANAR_DEFAULT AVI_BT709_CSPACE
+
+#define AVI_PIXFMT_RGB_DEFAULT AVI_PIXFMT_RGB565
+#define RGB_PIXEL_SIZE0 2
+#define RGB_PIXEL_SIZE1 0
+#define AVI_CSPACE_RGB_DEFAULT AVI_RGB_CSPACE
+
+#define AVI_PIXFMT_ISP_DEFAULT AVI_PIXFMT_BAYER_1X10_16
+#define ISP_PIXEL_SIZE0 2
+#define ISP_PIXEL_SIZE1 0
+#define AVI_CSPACE_ISP_DEFAULT AVI_NULL_CSPACE
+
+#define WIDTH_DEFAULT 2048
+#define HEIGHT_DEFAULT 2048
+#define ALPHA_DEFAULT 0xFF
+#define BACKGROUND_RED_DEFAULT 0x00
+#define BACKGROUND_GREEN_DEFAULT 0x00
+#define BACKGROUND_BLUE_DEFAULT 0x00
+#define BACKGROUND_DEFAULT (BACKGROUND_RED_DEFAULT<<16 | \
+ BACKGROUND_GREEN_DEFAULT<<8 | \
+ BACKGROUND_BLUE_DEFAULT) /* RGB */
+
+static struct avi_r2r_user_format const planar_default_format = {
+ .source = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_PLANAR_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_PLANAR_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE1
+ },
+ },
+ .target = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_PLANAR_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_PLANAR_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE1
+ },
+ },
+ .compose= {
+ .width = 0,
+ .height = 0,
+ .colorspace = 0,
+ .pix_fmt = AVI_PIXFMT_INVALID,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = 0
+ },
+ .plane1 = {
+ .line_size = 0
+ },
+ },
+ .use_compose = 0,
+ .layout = {
+ .x =0,
+ .y =0,
+ .hidden = 0,
+ .alpha = ALPHA_DEFAULT,
+ },
+ .background = BACKGROUND_DEFAULT
+};
+
+static struct avi_r2r_user_format const rgb_default_format = {
+ .source = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_RGB_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_RGB_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*RGB_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*RGB_PIXEL_SIZE1
+ },
+ },
+ .target = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_RGB_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_RGB_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*RGB_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*RGB_PIXEL_SIZE1
+ },
+ },
+ .compose= {
+ .width = 0,
+ .height = 0,
+ .colorspace = 0,
+ .pix_fmt = AVI_PIXFMT_INVALID,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = 0
+ },
+ .plane1 = {
+ .line_size = 0
+ },
+ },
+ .use_compose = 0,
+ .layout = {
+ .x =0,
+ .y =0,
+ .hidden = 0,
+ .alpha = ALPHA_DEFAULT,
+ },
+ .background = BACKGROUND_DEFAULT
+};
+
+static struct avi_r2r_user_format const isp_default_format = {
+ .source = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_ISP_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_ISP_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*ISP_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*ISP_PIXEL_SIZE1
+ },
+ },
+ .target = {
+ .width = WIDTH_DEFAULT,
+ .height = HEIGHT_DEFAULT,
+ .colorspace = AVI_CSPACE_PLANAR_DEFAULT,
+ .pix_fmt = AVI_PIXFMT_PLANAR_DEFAULT,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE0
+ },
+ .plane1 = {
+ .line_size = WIDTH_DEFAULT*PLANAR_PIXEL_SIZE1
+ },
+ },
+ .compose= {
+ .width = 0,
+ .height = 0,
+ .colorspace = 0,
+ .pix_fmt = AVI_PIXFMT_INVALID,
+ .interlaced = 0,
+ .plane0 = {
+ .line_size = 0
+ },
+ .plane1 = {
+ .line_size = 0
+ },
+ },
+ .use_compose = 0,
+ .layout = {
+ .x =0,
+ .y =0,
+ .hidden = 0,
+ .alpha = ALPHA_DEFAULT,
+ },
+ .background = BACKGROUND_DEFAULT
+};
+
+static struct avi_r2r avi_r2r_data;
+
+/**
+ * User Context management
+ */
+
+static inline void avi_r2r__user_context_free(
+ struct avi_r2r_user_context *user_context)
+{
+#ifdef CONFIG_AVI_DEBUG
+ avi_r2r__user_destroy_debugfs(user_context);
+#endif /* CONFIG_AVI_DEBUG */
+ user_context->magic = 0;
+ user_context->magic_end = 0;
+ kfree(user_context);
+}
+
+static struct avi_r2r_user_context * avi_r2r__user_context_alloc(struct avi_r2r *data)
+{
+ struct avi_r2r_user_context *new_ctx;
+ new_ctx=kzalloc(sizeof(struct avi_r2r_user_context), GFP_KERNEL);
+ if (new_ctx) {
+ new_ctx->magic = _AVI_R2R_CTX_ID_MAGIC;
+ new_ctx->magic_end = _AVI_R2R_CTX_ID_MAGIC;
+ }
+ return new_ctx;
+}
+
+/**
+ * Work pool management
+ * a linked list of free work
+ */
+static int avi_r2r__work_pool_init(struct avi_r2r_channel_data *chan,
+ unsigned int pool_size)
+{
+ struct avi_r2r_work *tmp;
+ unsigned int i;
+
+ BUG_ON(!pool_size);
+
+ chan->work_pool = kzalloc(sizeof(struct avi_r2r_work) * pool_size,
+ GFP_KERNEL);
+ if (!chan->work_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&(chan->work_pool->list));
+ chan->work_pool_size = pool_size;
+ chan->work_pool_free = pool_size;
+
+ for (i=0; i < pool_size; i++)
+ {
+ tmp = &chan->work_pool[i];
+ list_add_tail(&(tmp->list),&(chan->work_pool->list));
+ }
+
+ return 0;
+}
+
+static struct avi_r2r_work *avi_r2r__work_pool_pull_slot(
+ struct avi_r2r_channel_data *chan)
+{
+ struct avi_r2r_work *tmp = NULL;
+ struct list_head *pos;
+
+ if (list_empty(&(chan->work_pool->list)))
+ return NULL;
+
+ pos = chan->work_pool->list.next; /* first item in list */
+ chan->work_pool_free --;
+
+ list_del(pos);
+
+ tmp = (struct avi_r2r_work *)pos;
+
+ return tmp;
+}
+
+static void avi_r2r__work_pool_push_slot(struct avi_r2r_channel_data *chan,
+ struct avi_r2r_work *work)
+{
+ list_add_tail(&(work->list),&(chan->work_pool->list));
+ chan->work_pool_free ++;
+}
+/**
+ * Work queue management
+ * a linked list is used as a FIFO
+ */
+static inline void avi_r2r__work_init(struct avi_r2r_work *work)
+{
+ work->magic = _AVI_R2R_WORK_MAGIC;
+ ktime_get_ts(&work->ts_push);
+}
+
+#define BUG_ON_BAD_WORK(_work) BUG_ON((_work)->magic != _AVI_R2R_WORK_MAGIC || \
+ !(_work)->channel || \
+ !(_work)->user);
+
+static inline void avi_r2r__work_cpy(struct avi_r2r_work *dst,
+ struct avi_r2r_work *src)
+{
+ dst->magic = src->magic;
+ dst->channel = src->channel;
+ dst->user = src->user;
+ dst->source_buffer = src->source_buffer;
+ dst->target_buffer = src->target_buffer;
+ dst->stats_buffer = src->stats_buffer;
+ dst->ts_push = src->ts_push;
+}
+
+static inline int avi_r2r__work_queue_empty(struct avi_r2r_work_queue *wq)
+{
+ return list_empty(&(wq->works.list));
+}
+
+static void avi_r2r__work_queue_init(struct avi_r2r_work_queue *wq)
+{
+ INIT_LIST_HEAD(&(wq->works.list));
+ wq->works_nr = 0;
+}
+
+static void avi_r2r__work_queue_delete(struct avi_r2r_channel_data *chan)
+{
+ struct avi_r2r_work_queue *wq = &chan->work_queue;
+ struct avi_r2r_work *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &(wq->works.list)) {
+ tmp = list_entry(pos, struct avi_r2r_work, list);
+ list_del(pos);
+ avi_r2r__work_pool_push_slot(chan, tmp);
+ }
+}
+
+
+static void avi_r2r__work_queue_push(struct avi_r2r_channel_data *chan,
+ struct avi_r2r_work *work)
+{
+ struct avi_r2r_work_queue *wq = &chan->work_queue;
+ struct avi_r2r_work *tmp;
+
+ tmp = avi_r2r__work_pool_pull_slot(chan);
+ BUG_ON(!tmp);
+
+ avi_r2r__work_init(work);
+
+ avi_r2r__work_cpy(tmp, work);
+
+ BUG_ON_BAD_WORK(tmp);
+
+ list_add_tail(&(tmp->list),&(wq->works.list));
+ wq->works_nr++;
+}
+
+static int avi_r2r__work_queue_pull(struct avi_r2r_channel_data *chan,
+ struct avi_r2r_work *work)
+{
+ struct avi_r2r_work_queue *wq = &chan->work_queue;
+ struct avi_r2r_work *tmp;
+ struct list_head *pos;
+ int ret = 0;
+
+ if (!avi_r2r__work_queue_empty(wq)) {
+
+ pos=wq->works.list.next;/* first item in list */
+ list_del(pos);
+ wq->works_nr--;
+
+ tmp = (struct avi_r2r_work *)pos;
+ avi_r2r__work_cpy(work, tmp);
+
+ avi_r2r__work_pool_push_slot(chan, tmp);
+
+ BUG_ON_BAD_WORK(work);
+
+ ret=1;
+ }
+
+ return ret;
+}
+
+/**
+ * Gamma correction configuration management
+ */
+static inline void avi_r2r__gamma_set_registers(
+ struct avi_r2r_gamma_correction_config *in_config,
+ struct avi_r2r_gamma_correction_config *registers)
+{
+ *registers = *in_config;
+}
+
+static void avi_r2r__gamma_init_configuration(
+ struct avi_r2r_gamma_correction_config *in_config,
+ struct avi_r2r_gamma_correction_config *registers)
+{
+ memset(in_config, 0, sizeof(struct avi_r2r_gamma_correction_config));
+ in_config->palette = 0;
+ in_config->bypass = 1;
+ in_config->comp_width = 0;
+ avi_r2r__gamma_set_registers(in_config, registers);
+}
+
+static void avi_r2r__write_gamma_registers(
+ struct avi_node const* node,
+ struct avi_r2r_gamma_correction_config *user_registers,
+ struct avi_r2r_gamma_correction_config *chan_registers)
+{
+ int do_write=0;
+ if (0 != memcmp(chan_registers,
+ user_registers,
+ sizeof(struct avi_r2r_gamma_correction_config)))
+ do_write= 1;
+ if (do_write) {
+ *chan_registers = *user_registers;
+ avi_gam_setup( node,
+ chan_registers->bypass,
+ chan_registers->palette,
+ chan_registers->comp_width);
+ if (!chan_registers->bypass) {
+ avi_gam_set_cmap(node, &chan_registers->cmap);
+ }
+ }
+}
+
+static inline enum avi_r2r_channel_status avi_r2r__get_channel_status(struct avi_r2r_channel_data *channel)
+{
+ enum avi_r2r_channel_status ret;
+
+ ret = atomic_read(&channel->status);
+ return ret;
+}
+
+static inline bool avi_r2r__get_channel_is_running(struct avi_r2r_channel_data *channel)
+{
+ return (avi_r2r__get_channel_status(channel) == AVI_R2R_CHANNEL_RUNNING);
+}
+
+static inline bool avi_r2r__get_channel_is_active(struct avi_r2r_channel_data *channel)
+{
+ bool ret = 0;
+
+ ret = (channel->output_segment->active == AVI_SEGMENT_ACTIVE_ONESHOT);
+ return ret;
+}
+
+static inline bool avi_r2r__channel_is_ok(struct avi_r2r_channel_data *channel)
+{
+ if (channel)
+ return
+ ((channel->magic & _AVI_R2R_CHANNEL_MAGIC_MASK) ==
+ _AVI_R2R_CHANNEL_MAGIC) &&
+ ((channel->magic_end & _AVI_R2R_CHANNEL_MAGIC_MASK) ==
+ _AVI_R2R_CHANNEL_MAGIC) &&
+ (channel->magic_end == channel->magic);
+ return 0;
+}
+
+static inline int avi_r2r__get_user_status(struct avi_r2r_user_context *user)
+{
+ struct avi_r2r *avi_r2r = user->avi_r2r;
+
+ int ret = atomic_read(&user->context->stacked_nr);
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p running=%d stacked=%d",
+ __func__,
+ user,
+ atomic_read(&user->context->running), ret);
+
+ if (atomic_read(&user->context->running))
+ ret++;
+ return ret;
+}
+
+static void avi_r2r__requirements_mask(u32 requirements,
+ union avi_r2r_configuration_mask *mask)
+{
+ mask->_mask=0;
+ /* mandatory */
+ mask->buffers=1;
+ mask->formats=1;
+ if (requirements & AVI_CAP_CONV) mask->converter=1;
+ if (requirements & AVI_CAP_GAM) mask->gamma=1;
+ if (requirements & AVI_CAP_SCAL) mask->scaler=1;
+ if (requirements & AVI_CAP_ROT) mask->rotator=1;
+}
+
+/**
+ * Initialize user data
+ */
+static void avi_r2r__user_initialize(
+ struct avi_r2r_user_context *user,
+ u32 requirements,
+ const u32 channels_compliant,
+ const int channels_compliant_nr)
+{
+ user->requirements= requirements;
+ user->config_done._mask=0;
+
+ avi_r2r__requirements_mask(
+ user->requirements,
+ &user->requirements_mask);
+
+ user->channels_compliant = channels_compliant;
+ user->channels_compliant_nr = channels_compliant_nr;
+
+ avi_r2r__gamma_init_configuration(
+ &user->gamma_user_config,
+ &user->registers.gam);
+
+#ifdef CONFIG_AVI_DEBUG
+ avi_r2r__user_setup_debugfs(user);
+#endif /* CONFIG_AVI_DEBUG */
+
+ return;
+}
+#define CHECK_ITEM(_test, _msg, arg...) \
+ if (_test) { \
+ AVIR2R_LOG_DEBUG_EXT(&avi_r2r_data, _msg, ##arg); \
+ return false; \
+ }
+static bool avi_r2r__check_format(struct avi_segment_format *fmt, bool use_dma)
+{
+ int w;
+ enum avi_pixel_packing packing;
+
+ CHECK_ITEM(fmt->width == 0,"width is zero");
+ CHECK_ITEM(fmt->height == 0,"height is zero");
+
+ if (use_dma) {
+ CHECK_ITEM(fmt->plane0.line_size == 0, "line size #0 is zero");
+
+ packing = avi_pixfmt_get_packing(fmt->pix_fmt);
+ if (packing == AVI_SEMIPLANAR_YUV_422_PACKING ||
+ packing == AVI_SEMIPLANAR_YUV_420_PACKING ||
+ packing == AVI_INTERLEAVED_YUV_422_PACKING)
+ /* width must be even to match the subsampling */
+ CHECK_ITEM(fmt->width % 2,
+ "width not even (%d)",
+ fmt->width);
+
+ if (packing == AVI_SEMIPLANAR_YUV_420_PACKING)
+ /* height must be even to match the subsampling */
+ CHECK_ITEM(fmt->height % 2,
+ "height not even (%d)",
+ fmt->height);
+
+ w = fmt->width * avi_pixel_size0(fmt->pix_fmt);
+ CHECK_ITEM(w > fmt->plane0.line_size,
+ "line size #0 too large (%d > %d)",
+ w, fmt->plane0.line_size);
+
+ if (avi_pixel_size1(fmt->pix_fmt)) {
+
+ CHECK_ITEM(fmt->plane1.line_size == 0,
+ "line size #1 is zero");
+
+ w = fmt->width * avi_pixel_size1(fmt->pix_fmt);
+
+ CHECK_ITEM(w > fmt->plane1.line_size,
+ "line size #1 too large (%d > %d)",
+ w, fmt->plane1.line_size);
+ }
+ }
+ return true;
+}
+
+static bool avi_r2r__check_buffer(struct avi_segment_format *fmt,
+ struct avi_dma_buffer *buf)
+{
+ CHECK_ITEM(buf->status != AVI_BUFFER_READY,
+ "buffer not ready");
+
+ CHECK_ITEM(buf->plane0.dma_addr == 0,
+ "buffer plane #0 address is zero");
+
+ if (fmt->plane1.line_size != 0)
+ CHECK_ITEM(buf->plane1.dma_addr == 0,
+ "buffer plane #1 address is zero");
+
+ return true;
+}
+
+static bool avi_r2r__check_formats_and_requirements(struct avi_r2r_context *ctx,
+ struct avi_segment_format *infmt,
+ struct avi_segment_format *outfmt,
+ struct avi_segment_format *composefmt,
+ struct avi_segment_layout *layout)
+{
+ u32 l_req=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT);
+ bool ret = false;
+
+ if (avi_r2r__check_format(infmt, 1) &&
+ avi_r2r__check_format(outfmt, 1)) {
+ if (infmt->width != outfmt->width ||
+ infmt->height != outfmt->height)
+ l_req |= AVI_CAP_SCAL;
+
+ if ((ctx->caps & l_req) == l_req)
+ ret = true;
+ }
+
+ if (composefmt && layout) {
+ if (avi_r2r__check_format(composefmt, 0)) {
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+#undef CHECK_ITEM
+
+/**
+ * Segment formats management
+ * Composing: left = compose, right = target
+ * +--------+ +------+-------+ +--------+
+ * | source |-- INPUT --> | left | right |--> OUTPUT --> | target |
+ * +--------+ +------+-------+ +--------+
+ *
+ * Not composing: left = target, right = target
+ */
+static int avi_r2r_apply_format(struct avi_r2r_channel_data *channel,
+ struct avi_r2r_user_format *format)
+{
+ struct avi_segment_format left_fmt, right_fmt;
+ struct avi_r2r *avi_r2r;
+ int ret = 0;
+
+ avi_r2r = channel->avi_r2r;
+
+ /* setup formats on segments */
+ right_fmt = format->target;
+ /* force pixel format and line sizes for non-DMA format */
+ right_fmt.pix_fmt = AVI_PIXFMT_INVALID;
+ right_fmt.plane0.line_size = 0;
+ right_fmt.plane1.line_size = 0;
+
+ if (format->use_compose)
+ left_fmt = format->compose;
+ else
+ left_fmt = right_fmt;
+
+ /* force non-DMA */
+ left_fmt.pix_fmt = AVI_PIXFMT_INVALID;
+ left_fmt.plane0.line_size = 0;
+ left_fmt.plane1.line_size = 0;
+
+ if ((memcmp(&channel->output_segment->input_format,
+ &right_fmt,
+ sizeof(struct avi_segment_format))!=0) ||
+ (memcmp(&channel->output_segment->output_format,
+ &format->target,
+ sizeof(struct avi_segment_format))!=0) ) {
+ ret = avi_segment_set_format(channel->output_segment,
+ &right_fmt,
+ &format->target);
+
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r, "%s() Channel %s "
+ "avi_segment_set_format output(%d)",
+ __func__,
+ channel->output_segment->id,
+ ret);
+ return ret;
+ }
+ }
+
+ if ((memcmp(&channel->input_segment->input_format,
+ &format->source,
+ sizeof(struct avi_segment_format))!=0) ||
+ (memcmp(&channel->input_segment->output_format,
+ &left_fmt,
+ sizeof(struct avi_segment_format))!=0) ||
+ (memcmp(&channel->input_segment->layout,
+ &format->layout,
+ sizeof(struct avi_segment_layout))!=0)) {
+ ret = avi_segment_set_format_and_layout(channel->input_segment,
+ &format->source,
+ &left_fmt,
+ &format->layout);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r,"%s() Channel %s "
+ "avi_segment_set_format_and_layout input(%d)",
+ __func__,
+ channel->input_segment->id,
+ ret);
+ return ret;
+ }
+ }
+
+ if (format->use_compose) {
+ avi_segment_set_background(channel->output_segment,
+ format->background);
+ }
+
+ return 0;
+}
+
+static int avi_r2r_apply_stats_format(struct avi_r2r_channel_data *channel,
+ struct avi_r2r_user_format *format)
+{
+ struct avi_segment_format stats_fmt = {
+ .width = AVI_STATS_WIDTH,
+ .height = AVI_STATS_HEIGHT,
+ .interlaced = 0,
+ .colorspace = 0,
+ };
+ struct avi_segment_format dstats_fmt;
+ struct avi_r2r *avi_r2r = channel->avi_r2r;
+ int ret = 0;
+
+ /* Configure bayer stats */
+ avi_statistics_bayer_configure(
+ (struct avi_node *) channel->avi_nodes.bayer_node,
+ format->source.width,
+ format->source.height,
+ 64, 48);
+
+ /* Configure segment format */
+ dstats_fmt = stats_fmt;
+ dstats_fmt.pix_fmt = AVI_PIXFMT_BGRA8888;
+ dstats_fmt.plane0.line_size = dstats_fmt.width * 4;
+
+ /* Set format */
+ ret = avi_segment_set_format(channel->stats_segment, &stats_fmt,
+ &dstats_fmt);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r, "%s() Channel %s "
+ "avi_segment_set_stats_format stats(%d)",
+ __func__,
+ channel->stats_segment->id,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int avi_r2r_set_default_format(struct avi_r2r_channel_data *channel)
+{
+ struct avi_r2r_user_format *format;
+ int ret;
+
+ if ((channel->input_segment->caps & AVI_CAPS_ISP) == AVI_CAPS_ISP)
+ format = (struct avi_r2r_user_format *)&isp_default_format;
+ else
+ if (channel->input_segment->caps & AVI_CAP_PLANAR)
+ format = (struct avi_r2r_user_format *)&planar_default_format;
+ else
+ format = (struct avi_r2r_user_format *)&rgb_default_format;
+
+ ret = avi_r2r_apply_format(channel, format);
+
+ if (channel->stats_segment)
+ ret |= avi_r2r_apply_stats_format(channel, format);
+
+ return ret;
+}
+
+static int avi_r2r__activate_channel(struct avi_r2r_work *work)
+{
+ struct avi_r2r *avi_r2r;
+ int ret = 0;
+
+ BUG_ON_BAD_WORK(work);
+ BUG_ON(!avi_r2r__channel_is_ok(work->channel));
+ BUG_ON(!avi_r2r__user_context_is_ok(work->user));
+
+ BUG_ON(avi_r2r__get_channel_is_running(work->channel));
+ BUG_ON(avi_r2r__get_channel_is_active(work->channel));
+
+ avi_r2r = work->channel->avi_r2r;
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r, ">>> %s() Channel %s user %p",
+ __func__,
+ work->channel->output_segment->id,
+ work->user);
+
+ /* callback configure; user lock will locked/unlocked */
+ BUG_ON(!work->user->context->ops->configure);
+ work->user->context->ops->configure(work->user->context,
+ &work->source_buffer,
+ &work->target_buffer,
+ work->user->context->private);
+
+ BUG_ON(!avi_r2r__check_buffer(&work->user->format.source,
+ &work->source_buffer));
+ BUG_ON(!avi_r2r__check_buffer(&work->user->format.target,
+ &work->target_buffer));
+
+ ret = avi_r2r_apply_format(work->channel, &work->user->format);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r, "%s() Channel %s "
+ "avi_r2r_apply_format output(%d)",
+ __func__,
+ work->channel->output_segment->id,
+ ret);
+ goto activate_error;
+ }
+
+ /* Apply format for stats output */
+ if (work->channel->stats_segment &&
+ work->stats_buffer.plane0.dma_addr &&
+ avi_r2r_apply_stats_format(work->channel, &work->user->format)) {
+ AVIR2R_LOG_ERROR(avi_r2r, "%s() Channel %s "
+ "avi_r2r_apply_stats_format output(%d)",
+ __func__,
+ work->channel->stats_segment->id,
+ ret);
+ goto activate_error;
+ }
+
+ /* setup nodes not managed by segment API */
+ if (work->channel->avi_nodes.gam_node && work->user->config_done.gamma)
+ avi_r2r__write_gamma_registers(
+ work->channel->avi_nodes.gam_node,
+ &work->user->registers.gam,
+ &work->channel->registers.gam);
+
+ /* set up buffers */
+ avi_segment_set_input_buffer(work->channel->input_segment,
+ &work->source_buffer);
+
+ avi_segment_set_output_buffer(work->channel->output_segment,
+ &work->target_buffer);
+
+ if (work->channel->stats_segment &&
+ work->stats_buffer.plane0.dma_addr)
+ avi_segment_set_output_buffer(work->channel->stats_segment,
+ &work->stats_buffer);
+
+ atomic_set(&work->channel->status, AVI_R2R_CHANNEL_RUNNING);
+ atomic_set(&work->user->context->running, 1);
+ work->user->config_done.buffers = 1;
+
+ avi_r2r__work_cpy(&work->channel->current_work, work);
+
+ BUG_ON_BAD_WORK(&work->channel->current_work);
+ BUG_ON(work->channel->current_work.user != work->user);
+ BUG_ON(work->channel->current_work.channel != work->channel);
+
+ work->channel->ts_push = work->channel->current_work.ts_push;
+
+ /* Activate stats segment */
+ if (work->channel->stats_segment && work->stats_buffer.plane0.dma_addr)
+ {
+ ret = avi_segment_activate_oneshot(
+ work->channel->stats_segment);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r,"%s() Channel %s "
+ "avi_segment_activate stats(%d) %d %s %p",
+ __func__,
+ work->channel->stats_segment->id,
+ ret,
+ work->channel->stats_segment->active,
+ dev_name(work->user->context->dev),
+ work->user);
+ goto activate_error;
+ }
+ }
+
+ /* activate segments:
+ * - enable all nodes
+ * - set FIFOs in single mode
+ * - apply dma_out */
+ ret = avi_segment_activate_oneshot(work->channel->input_segment);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r,"%s() Channel %s "
+ "avi_segment_activate input(%d) %d %s %p",
+ __func__,
+ work->channel->input_segment->id,
+ ret,
+ work->channel->input_segment->active,
+ dev_name(work->user->context->dev),
+ work->user);
+ if (work->channel->stats_segment)
+ avi_segment_deactivate(work->channel->stats_segment);
+ goto activate_error;
+ }
+
+ ret = avi_segment_activate_oneshot(work->channel->output_segment);
+ if (ret) {
+ AVIR2R_LOG_ERROR(avi_r2r,"%s() Channel %s "
+ "avi_segment_activate output(%d) %d %s %p",
+ __func__,
+ work->channel->output_segment->id,
+ ret,
+ work->channel->output_segment->active,
+ dev_name(work->user->context->dev),
+ work->user);
+ if (work->channel->stats_segment)
+ avi_segment_deactivate(work->channel->stats_segment);
+ avi_segment_deactivate(work->channel->input_segment);
+ goto activate_error;
+ }
+
+ ktime_get_ts(&work->channel->ts_start);
+ work->channel->apply_counter++;
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,"<<< %s() Channel %s",
+ __func__,
+ work->channel->output_segment->id);
+ return 0;
+
+activate_error:
+ /* done processing call back */
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() channel %s error: calling callback done()",
+ __func__,
+ work->channel->output_segment->id);
+ work->source_buffer.status = AVI_BUFFER_ERROR;
+ work->target_buffer.status = AVI_BUFFER_ERROR;
+ work->stats_buffer.status = AVI_BUFFER_ERROR;
+ BUG_ON(!work->user->context->ops->done);
+ work->user->context->ops->done(work->user->context,
+ &work->source_buffer,
+ &work->target_buffer,
+ &work->stats_buffer,
+ work->user->context->private);
+ AVIR2R_LOG_DEBUG(avi_r2r,"%s() Channel %s FAIL (%d)",
+ __func__,
+ work->channel->output_segment->id,
+ ret);
+ return ret;
+}
+
+/**
+ * Interrupt function
+ * Called at the end of the Source FIFO DMA transfer
+ */
+static unsigned avi_r2r__aux_interrupt(struct avi_segment *segment,
+ enum avi_field *field)
+{
+ struct avi_r2r_channel_data *channel = segment->priv;
+ struct avi_r2r *avi_r2r;
+ BUG_ON(!avi_r2r__channel_is_ok(channel));
+
+ spin_lock(&channel->lock);
+
+ BUG_ON(!avi_r2r__get_channel_is_running(channel));
+
+ /* output segment (=active channel) has been deactivated
+ * by segment interrupt handler before calling of this handler */
+ BUG_ON(avi_r2r__get_channel_is_active(channel));
+
+ avi_r2r = channel->avi_r2r;
+
+ if (segment == channel->input_segment) {
+ channel->input_segment_interrupts_counter++;
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() segment %s",
+ __func__,
+ channel->input_segment->id);
+ }
+ if (segment == channel->output_segment) {
+ channel->output_segment_interrupts_counter++;
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() segment %s",
+ __func__,
+ channel->output_segment->id);
+ }
+
+ spin_unlock(&channel->lock);
+
+ return 0;
+}
+/**
+ * Interrupt function
+ * Called at the end of the Target FIFO DMA transfer
+ */
+static unsigned avi_r2r__interrupt(struct avi_segment *segment,
+ enum avi_field *field)
+{
+ struct avi_r2r_channel_data *channel = segment->priv;
+ struct avi_r2r *avi_r2r;
+ struct avi_r2r_user_context *current_user;
+ struct avi_r2r_context *ctx;
+ struct avi_r2r_work work;
+ int ret;
+
+ BUG_ON(!avi_r2r__channel_is_ok(channel));
+
+ spin_lock(&channel->lock);
+
+ BUG_ON(!avi_r2r__get_channel_is_running(channel));
+
+ /* output segment (=active channel) has been deactivated
+ * by segment interrupt handler before calling of this handler */
+ BUG_ON(avi_r2r__get_channel_is_active(channel));
+
+ BUG_ON_BAD_WORK(&channel->current_work);
+
+ BUG_ON(!avi_r2r__channel_is_ok(channel->current_work.channel));
+ BUG_ON(channel->magic != channel->current_work.channel->magic);
+
+ BUG_ON(!avi_r2r__user_context_is_ok(channel->current_work.user));
+ BUG_ON(!channel->current_work.user->context);
+
+ BUG_ON(!avi_r2r__user_context_is_ok(channel->current_work.user));
+
+ BUG_ON(!channel->current_work.user->context);
+
+ avi_r2r = channel->avi_r2r;
+
+ current_user = channel->current_work.user;
+ ctx=current_user->context;
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ ">>> %s() channel %s user %p",
+ __func__,
+ channel->output_segment->id,
+ channel->current_work.user);
+
+ ktime_get_ts(&channel->ts_stop);
+
+ channel->interrupts_counter++;
+ if (segment == channel->input_segment) {
+ channel->input_segment_interrupts_counter++;
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() segment %s",
+ __func__,
+ channel->input_segment->id);
+ }
+ if (segment == channel->output_segment) {
+ channel->output_segment_interrupts_counter++;
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() segment %s",
+ __func__,
+ channel->output_segment->id);
+ }
+ if (avi_node_get_irq_flag(channel->avi_nodes.target_fifo_node)) {
+ /* This interrupt has been acknowledged normally
+ * by avi_segment_irq_handler. */
+ channel->spurious_interrupts_counter++;
+ }
+
+#ifdef CONFIG_AVI_DEBUG
+ avi_r2r__channel_add_timings(channel);
+#endif /* CONFIG_AVI_DEBUG */
+
+ avi_fifo_get_status(channel->avi_nodes.target_fifo_node);
+
+ avi_fifo_get_status(channel->avi_nodes.source_fifo_node);
+ if (avi_node_get_irq_flag(channel->avi_nodes.source_fifo_node))
+ avi_ack_node_irq(channel->avi_nodes.source_fifo_node);
+ else
+ channel->missing_source_interrupts_counter++;
+
+ if (channel->avi_nodes.source_fifo_planar_node &&
+ avi_pixfmt_is_planar(current_user->format.source.pix_fmt)) {
+ avi_fifo_get_status(channel->avi_nodes.source_fifo_planar_node);
+ if (avi_node_get_irq_flag(channel->avi_nodes.source_fifo_planar_node))
+ avi_ack_node_irq(channel->avi_nodes.source_fifo_planar_node);
+ else
+ channel->missing_planar_interrupts_counter++;
+ }
+
+ /* Do stats checking */
+ if (channel->stats_segment &&
+ channel->current_work.stats_buffer.plane0.dma_addr) {
+ if (avi_node_get_irq_flag(channel->avi_nodes.stats_fifo_node))
+ channel->current_work.stats_buffer.status =
+ AVI_BUFFER_DONE;
+ else
+ /* Looks like the stats are not ready... */
+ channel->current_work.stats_buffer.status =
+ AVI_BUFFER_ERROR;
+
+ /* Deactivate segment since IRQ for this segment is disabled and
+ * it is activated in oneshot mode.
+ */
+ avi_segment_deactivate(channel->stats_segment);
+
+ /* Make sure we never leave the IT flag raised */
+ avi_ack_node_irq(channel->avi_nodes.stats_fifo_node);
+ }
+
+ /* if all works have been unstacked, so user is not longer running */
+ if (atomic_read(&ctx->stacked_nr) == 0)
+ atomic_set(&ctx->running, 0);
+
+ /* done processing call back */
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() channel %s calling callback done()",
+ __func__,
+ channel->output_segment->id);
+ channel->current_work.source_buffer.status = AVI_BUFFER_DONE;
+ channel->current_work.target_buffer.status = AVI_BUFFER_DONE;
+ BUG_ON(!ctx->ops->done);
+ ctx->ops->done(ctx,
+ &channel->current_work.source_buffer,
+ &channel->current_work.target_buffer,
+ &channel->current_work.stats_buffer,
+ ctx->private);
+
+ /* save current user (future features?) */
+ channel->previous_user = current_user;
+ /* deactivate channel */
+ atomic_set(&channel->status, AVI_R2R_CHANNEL_INACTIVE);
+ /* clear finished current work */
+ memset(&channel->current_work, 0, sizeof(channel->current_work));
+
+ if (avi_r2r->suspended) {
+ /* if suspend requested
+ * do not start further processing
+ */
+ AVIR2R_LOG_DEBUG(avi_r2r,"%s() suspended", __func__);
+ goto int_unlock;
+ }
+
+ do {
+ ret = 0;
+ /* check queue: if jobs, pull one, activate it */
+ if (!avi_r2r__work_queue_empty(&channel->work_queue) &&
+ avi_r2r__work_queue_pull(channel, &work)) {
+ BUG_ON_BAD_WORK(&work);
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() channel %s user %p %d "
+ "activate new work",
+ __func__,
+ channel->output_segment->id,
+ work.user,
+ atomic_read(&work.user->context->stacked_nr));
+ BUG_ON(atomic_dec_return(&work.user->context->stacked_nr) < 0);
+ ret = avi_r2r__activate_channel(&work);
+ }
+ } while (ret != 0);
+
+int_unlock:
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "<<< %s() channel %s",
+ __func__,
+ channel->output_segment->id);
+ spin_unlock(&channel->lock);
+
+ /* Call finish callback since we are outside from spinlock */
+ BUG_ON(!ctx->ops->finish);
+ ctx->ops->finish(ctx, ctx->private);
+
+ return 0;
+}
+
+/* TODO: complete support for suspend / resume. */
+static int avi_r2r_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ struct avi_r2r *avi_r2r = dev_get_drvdata(&pdev->dev);
+
+ BUG_ON(!avi_r2r);
+
+ AVIR2R_LOG_NOTICE(avi_r2r, ">>> %s()", __func__);
+
+ avi_r2r->suspended = 1;
+
+ AVIR2R_LOG_NOTICE(avi_r2r, "<<< %s()", __func__);
+ return 0;
+}
+
+static int avi_r2r_resume(struct platform_device *pdev)
+{
+ struct avi_r2r *avi_r2r = dev_get_drvdata(&pdev->dev);
+ struct avi_r2r_channel_data *channel;
+ struct avi_r2r_work work;
+ int i, ret=0;
+ unsigned long flags;
+
+ BUG_ON(!avi_r2r);
+
+ AVIR2R_LOG_NOTICE(avi_r2r, ">>> %s()", __func__);
+
+ avi_r2r->suspended = 0;
+ for (i = 0; i < avi_r2r->channels_nr; i++) {
+ channel = avi_r2r->channels[i];
+ BUG_ON(!channel);
+ spin_lock_irqsave(&channel->lock, flags);
+ do {
+ /* check queue: if jobs, pull one, activate it */
+ if (!avi_r2r__work_queue_empty(&channel->work_queue) &&
+ avi_r2r__work_queue_pull(channel, &work)) {
+ BUG_ON_BAD_WORK(&work);
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() channel %s user %p %d "
+ "activate new work",
+ __func__,
+ channel->output_segment->id,
+ work.user,
+ atomic_read(&work.user->context->stacked_nr));
+ BUG_ON(atomic_dec_return(&work.user->context->stacked_nr) < 0);
+ ret = avi_r2r__activate_channel(&work);
+ }
+ } while ( ret != 0);
+ spin_unlock_irqrestore(&channel->lock, flags);
+ }
+
+ /* An error has occured in channel activation and done callback has been
+ * called, so we must call finish callback
+ */
+ if (ret) {
+ BUG_ON(!work.user->context->ops->finish);
+ work.user->context->ops->finish(work.user->context,
+ work.user->context->private);
+ }
+
+ AVIR2R_LOG_NOTICE(avi_r2r, "<<< %s()", __func__);
+ return 0;
+}
+
+static int avi_r2r__check_requirements(struct avi_r2r *data,
+ u32 requirements,
+ u32 *channels_compliant)
+{
+ int ret=-1,i,n=0;
+
+ *channels_compliant = 0;
+ for (i=0;i<data->channels_nr;i++) {
+ if ((data->channels[i]->available & requirements) == requirements) {
+// AVIR2R_LOG_DEBUG(data,
+// "%s() channel #%d 0x%08X=0x%08X->%d",
+// __func__,
+// i,requirements,data->channels[i]->available,n);
+ *channels_compliant |= (1<<i);
+ n++;
+ }
+ }
+ if (n) ret=n;
+ return ret;
+}
+
+/**
+ * Exported functions
+ */
+
+
+/** check if ram2ram driver is available and if requirements are supported.
+ */
+int avi_r2r_is_available(u32 requirements)
+{
+ u32 channels_compliant = 0;
+ int n;
+
+ if (!avi_r2r_data.dev)
+ return -ENODEV;
+
+ requirements |= (AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT);
+
+ if ((n=avi_r2r__check_requirements(&avi_r2r_data,
+ requirements,
+ &channels_compliant))== 0) {
+ AVIR2R_LOG_NOTICE(&avi_r2r_data,
+ "%s() can not find channel suitable "
+ "for requirements 0x%08X",
+ __func__, requirements);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_r2r_is_available);
+
+/** Initiate the session with the ram2ram driver.
+ */
+int avi_r2r_request(struct avi_r2r_context *ctx,
+ struct device *dev,
+ u32 requirements,
+ struct avi_r2r_ops *ops)
+{
+#ifdef DEBUG
+ char buffer[64];
+ int c, i;
+#endif /* DEBUG */
+ u32 channels_compliant = 0;
+ struct avi_r2r_user_context *user_context;
+ int n;
+
+ BUG_ON(!ops);
+ BUG_ON(!dev);
+ BUG_ON(!ctx);
+
+ BUG_ON(!ops->configure || !ops->done || !ops->finish);
+
+ if (!avi_r2r_data.dev)
+ return -ENODEV;
+
+ requirements |= (AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT);
+
+ if ((n=avi_r2r__check_requirements(&avi_r2r_data,
+ requirements,
+ &channels_compliant))== 0) {
+ AVIR2R_LOG_NOTICE(&avi_r2r_data,
+ "%s() can not find channel suitable "
+ "for requirements 0x%08X",
+ __func__, requirements);
+ return -ENOENT;
+ }
+
+#ifdef DEBUG
+ c=0;
+ c+=sprintf(&buffer[c],"[");
+ for (i=0; i < avi_r2r_data.channels_nr; i++)
+ if (channels_compliant & (1<<i))
+ c += sprintf(&buffer[c]," %d", i);
+ c+=sprintf(&buffer[c]," ]");
+
+ AVIR2R_LOG_DEBUG(&avi_r2r_data,
+ "%s() found %d channels %s suitable"
+ " for requirements 0x%08X of %s",
+ __func__,
+ n, buffer, requirements,
+ dev_name(dev));
+#endif /* DEBUG */
+
+ user_context = avi_r2r__user_context_alloc(&avi_r2r_data);
+
+ if (!user_context) {
+ AVIR2R_LOG_ERROR(&avi_r2r_data,
+ "%s() could not create a new user",
+ __func__);
+ return -ENOMEM;
+ }
+
+ user_context->context = ctx;
+ user_context->avi_r2r = &avi_r2r_data;
+ avi_r2r__user_initialize(
+ user_context,
+ requirements,
+ channels_compliant,
+ n);
+ ctx->avi_r2r_private = user_context;
+ ctx->caps = requirements;
+ ctx->dev = dev;
+ ctx->ops = ops;
+ atomic_set(&ctx->stacked_nr, 0);
+ atomic_set(&ctx->running, 0);
+
+ AVIR2R_LOG_DEBUG(&avi_r2r_data,
+ "%s() created user %p (%s)",
+ __func__,
+ user_context,
+ dev_name(ctx->dev));
+
+ return 0;
+
+}
+EXPORT_SYMBOL(avi_r2r_request);
+
+/** Destroys a context initialized by a successful avi_r2r_request
+ */
+int avi_r2r_destroy(struct avi_r2r_context *ctx)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+ int err;
+
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ if ((err = avi_r2r__get_user_status(user)) != 0) {
+ AVIR2R_LOG_WARN(avi_r2r,
+ "%s() user %p (%s): work queue not empty (%d)",
+ __func__,
+ user,
+ dev_name(ctx->dev),
+ err);
+ return -EBUSY;
+ }
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p (%s)",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+
+ avi_r2r__user_context_free(user);
+ ctx->avi_r2r_private = NULL;
+ return 0;
+
+}
+EXPORT_SYMBOL(avi_r2r_destroy);
+
+int avi_r2r_get_status(struct avi_r2r_context *ctx,
+ int *stacked_nr,
+ int *running)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ *stacked_nr = atomic_read(&user->context->stacked_nr);
+
+ *running = atomic_read(&user->context->running);
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p (%s) running=%d stacked=%d",
+ __func__,
+ user,
+ dev_name(ctx->dev),
+ *running,
+ *stacked_nr);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_r2r_get_status);
+
+/**
+ * Gamma conversion
+ */
+int avi_r2r_setup_gam(struct avi_r2r_context *ctx,
+ bool bypass,
+ bool palette,
+ bool comp_width,
+ struct avi_cmap *cmap)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ if (!user->requirements_mask.gamma)
+ return -EINVAL;
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p (%s)",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+
+ user->gamma_user_config.bypass = bypass;
+ user->gamma_user_config.palette = palette;
+ user->gamma_user_config.comp_width = comp_width;
+ user->gamma_user_config.cmap = *cmap;
+ avi_r2r__gamma_set_registers(
+ &user->gamma_user_config,
+ &user->registers.gam);
+ user->config_done.gamma = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_r2r_setup_gam);
+
+/* rotator */
+int avi_r2r_setup_rotator(struct avi_r2r_context *ctx,
+ enum avi_rotation angle, bool flip)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ if (!user->requirements_mask.rotator)
+ return -EINVAL;
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p (%s)",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+
+ user->rotator_user_config.angle = angle;
+ user->rotator_user_config.flip = flip;
+ AVIR2R_LOG_WARN(avi_r2r,
+ "Rotator setup required");
+ user->config_done.rotator = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_r2r_setup_rotator);
+
+/** Input and output format configuration.
+ */
+int avi_r2r_set_format(struct avi_r2r_context *ctx,
+ struct avi_segment_format *infmt,
+ struct avi_segment_format *outfmt,
+ struct avi_segment_format *composefmt,
+ struct avi_segment_layout *layout)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "%s() user %p (%s)",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+
+ if (!avi_r2r__check_formats_and_requirements(ctx,
+ infmt,
+ outfmt,
+ composefmt,
+ layout))
+ return -EINVAL;
+
+ user->format.source = *infmt;
+ user->format.target = *outfmt;
+
+ /* we are working only in progressive mode */
+ user->format.source.interlaced = 0;
+ user->format.target.interlaced = 0;
+
+ if (composefmt && layout) {
+ /* composing is used */
+ user->format.use_compose = 1;
+ user->format.layout = *layout;
+ user->format.compose = *composefmt;
+ }
+ else {
+ /* if composing is not used,
+ * force layout to neutral
+ * force compose format to target format
+ */
+ user->format.use_compose = 0;
+ user->format.layout.x = 0;
+ user->format.layout.y = 0;
+ user->format.layout.hidden = 0;
+ user->format.layout.alpha = 0xFF;
+ user->format.compose = user->format.target;
+ }
+
+ /* force compose format to non-DMA and progressive */
+ user->format.compose.interlaced = 0;
+ user->format.compose.plane0.line_size = 0;
+ user->format.compose.plane1.line_size = 0;
+ user->format.compose.pix_fmt = AVI_PIXFMT_INVALID;
+
+ user->config_done.formats = 1;
+
+ if (user->requirements_mask.scaler)
+ user->config_done.scaler = 1;
+
+ if (user->requirements_mask.converter)
+ user->config_done.converter = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_r2r_set_format);
+
+/** Put DMA buffers into queue for processing.
+ */
+int avi_r2r_buf_queue(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_dma,
+ struct avi_dma_buffer *out_dma,
+ struct avi_dma_buffer *stats_dma)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+ struct avi_r2r_channel_data *channel;
+ struct avi_r2r_work work;
+ unsigned long chan_flags[MAX_CHANNELS_AVI_R2R];
+ int found, c, ret = 0;
+ int i, lvl, min_lvl, idx_min;
+
+ /* is a valid user ? */
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return -ENODEV;
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r, ">>> %s()", __func__);
+
+ /* check formats set */
+ if (!user->config_done.formats) {
+ AVIR2R_LOG_WARN(avi_r2r,
+ "%s() user %p (%s) formats not set",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+ ret = -EINVAL;
+ goto on_error;
+ }
+
+ /* check buffers */
+ if (!avi_r2r__check_buffer(&user->format.source, in_dma) ||
+ !avi_r2r__check_buffer(&user->format.target, out_dma) ) {
+ AVIR2R_LOG_WARN(avi_r2r,
+ "%s() user %p (%s) invalid buffers",
+ __func__,
+ user,
+ dev_name(ctx->dev));
+ ret = -EINVAL;
+ goto on_error;
+ }
+
+ /* lock all channels */
+ for (c=0; c < avi_r2r->channels_nr; c++) {
+ spin_lock_irqsave(&avi_r2r->channels[c]->lock, chan_flags[c]);
+ }
+ /* look for a channel compliant */
+ idx_min = -1;
+ min_lvl = INT_MAX;
+ for (i = avi_r2r->channels_nr - 1; i >= 0 ; i-- ) {
+ if (user->channels_compliant & (1<<i)) {
+ channel = avi_r2r->channels[i];
+ lvl = avi_r2r__work_queue_level(&channel->work_queue);
+ if (avi_r2r__get_channel_is_running(channel))
+ lvl += 1;
+ if (lvl < min_lvl) {
+ min_lvl = lvl;
+ idx_min = i;
+ }
+ }
+ }
+ found = idx_min;
+
+ if (found < 0) {
+ /* unlock all channels */
+ for (c=0; c < avi_r2r->channels_nr; c++) {
+ spin_unlock_irqrestore(&avi_r2r->channels[c]->lock,
+ chan_flags[c]);
+ }
+ ret = -ENODEV;
+ goto on_error;
+ }
+
+ /* unlock unused channels */
+ for (c=0; c < avi_r2r->channels_nr; c++) {
+ if (c != found)
+ spin_unlock_irqrestore(&avi_r2r->channels[c]->lock,
+ chan_flags[c]);
+ }
+
+ channel = avi_r2r->channels[found];
+ work.user = user;
+ work.channel = channel;
+ work.source_buffer = *in_dma;
+ work.target_buffer = *out_dma;
+ work.stats_buffer = *stats_dma;
+
+ ret = 0;
+ if (avi_r2r__work_queue_empty(&channel->work_queue) &&
+ !avi_r2r__get_channel_is_running(channel) &&
+ !avi_r2r__get_channel_is_active(channel)) {
+ /* channel compliant
+ * and inactive
+ * and queue empty
+ * --> activate it
+ */
+ avi_r2r__work_init(&work);
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() user %p (%s) "
+ "Activate channel #%d",
+ __func__,
+ user,
+ dev_name(ctx->dev),
+ found);
+
+ ret = avi_r2r__activate_channel(&work);
+ }
+ else {
+ /* channel compliant
+ * or running
+ * or queue not empty
+ * --> push work on it
+ */
+
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r,
+ "%s() user %p (%s) "
+ "Push on channel #%d",
+ __func__,
+ user,
+ dev_name(ctx->dev),
+ found);
+
+ avi_r2r__work_queue_push(channel, &work);
+ atomic_inc(&ctx->stacked_nr);
+ }
+
+ /* unlock compliant channel */
+ spin_unlock_irqrestore(&channel->lock,
+ chan_flags[found]);
+
+ /* An error has occured in channel activation and done callback has been
+ * called, so we must call finish callback
+ */
+ if (ret) {
+ BUG_ON(!ctx->ops->finish);
+ ctx->ops->finish(ctx, ctx->private);
+ }
+
+on_error:
+ AVIR2R_LOG_DEBUG_EXT(avi_r2r, "<<< %s() ret=%d", __func__, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_r2r_buf_queue);
+
+struct avi_segment * avi_r2r_get_segment(struct avi_r2r_context *ctx,
+ int in_out,
+ int *ret_index)
+{
+ struct avi_r2r_user_context *user = ctx->avi_r2r_private;
+ struct avi_r2r *avi_r2r;
+ struct avi_r2r_channel_data *channel;
+ struct avi_segment *segment= NULL;
+ int i;
+
+ /* is a valid user ? */
+ BUG_ON(!avi_r2r__user_context_is_ok(user));
+ *ret_index = -1;
+
+ avi_r2r = user->avi_r2r;
+ if (!avi_r2r->dev)
+ return NULL;
+
+ for (i = 0; i < avi_r2r->channels_nr; i++ ) {
+ if (user->channels_compliant & (1<<i)) {
+ channel = avi_r2r->channels[i];
+ if (in_out)
+ segment = channel->output_segment;
+ else
+ segment = channel->input_segment;
+ *ret_index = i;
+ break;
+ }
+ }
+
+ if (!segment)
+ *ret_index = -1;
+ return segment;
+}
+EXPORT_SYMBOL(avi_r2r_get_segment);
+
+/**
+ * Init/Exit module functions
+ */
+static void avi_r2r_destroy_channels(struct avi_r2r *avi_r2r)
+{
+ struct avi_r2r_channel_data *chan_data;
+ int c;
+
+ for (c = 0; c < MAX_CHANNELS_AVI_R2R; c++)
+ {
+ chan_data = avi_r2r->channels[c];
+ if (chan_data) {
+ if (atomic_read(&chan_data->status) >=
+ AVI_R2R_CHANNEL_INACTIVE) {
+ /* Clean remaining works */
+ avi_r2r__work_queue_delete(chan_data);
+
+ /* Disable segment IRQ (DMA_OUT) */
+ avi_segment_disable_irq(chan_data->output_segment);
+
+ /* to recover in case of freeze r2r */
+ if (chan_data->stats_segment)
+ avi_apply_node(chan_data->avi_nodes.stats_fifo_node);
+ avi_fifo_force_clear(chan_data->avi_nodes.target_fifo_node);
+
+ /* Deactivate stats */
+ if (chan_data->stats_segment)
+ avi_segment_deactivate(chan_data->stats_segment);
+
+ avi_segment_deactivate(chan_data->input_segment);
+
+ avi_segment_deactivate(chan_data->output_segment);
+
+ /* Release segments */
+ avi_segment_disconnect(chan_data->input_segment,
+ chan_data->output_segment);
+
+ /* Destroy stats */
+ if (chan_data->stats_segment)
+ avi_segment_teardown(chan_data->stats_segment);
+
+ avi_segment_teardown(chan_data->input_segment);
+ avi_segment_teardown(chan_data->output_segment);
+
+ atomic_set(&chan_data->status, AVI_R2R_CHANNEL_UNUSED);
+ }
+
+#ifdef CONFIG_AVI_DEBUG
+ avi_r2r__channel_destroy_debugfs(chan_data);
+#endif /* CONFIG_AVI_DEBUG */
+ if (chan_data->work_pool)
+ kfree(chan_data->work_pool);
+
+ kfree(chan_data);
+ }
+ }
+}
+
+static int __devinit avi_r2r__check_channel_nodes(
+ struct avi_r2r_channel_data *chan_data,
+ const unsigned long caps)
+{
+ struct avi_r2r *avi_r2r = chan_data->avi_r2r;
+ struct avi_node *the_node = NULL, *scaler_node = NULL;
+
+ chan_data->avi_nodes.nodes_nr = 0;
+ /* check nodes */
+ chan_data->avi_nodes.source_fifo_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_DMA_IN);
+ if (!chan_data->avi_nodes.source_fifo_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Source FIFO is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s (Source)",
+ chan_data->avi_nodes.source_fifo_node->name);
+ chan_data->available |= AVI_CAP_DMA_IN;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+
+ chan_data->avi_nodes.target_fifo_node =
+ avi_segment_get_node(chan_data->output_segment,
+ AVI_CAP_DMA_OUT);
+ if (!chan_data->avi_nodes.target_fifo_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Target FIFO is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s (Target)",
+ chan_data->avi_nodes.target_fifo_node->name);
+ chan_data->available |= AVI_CAP_DMA_OUT;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+
+ /* Check stats fifo node */
+ if (chan_data->stats_segment) {
+ chan_data->avi_nodes.stats_fifo_node = avi_segment_get_node(
+ chan_data->stats_segment,
+ AVI_CAP_DMA_OUT);
+ if (!chan_data->avi_nodes.stats_fifo_node) {
+ AVIR2R_LOG_ERROR(avi_r2r, "Stats FIFO is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r, "-- %s (Stats)",
+ chan_data->avi_nodes.stats_fifo_node->name);
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_ISP_CHAIN_BAYER) {
+ chan_data->avi_nodes.bayer_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_ISP_CHAIN_BAYER);
+ if (!chan_data->avi_nodes.bayer_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "ISP Bayer Chain is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ chan_data->avi_nodes.bayer_node->name);
+ chan_data->available |= AVI_CAP_ISP_CHAIN_BAYER;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_ISP_CHAIN_YUV) {
+ the_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_ISP_CHAIN_YUV);
+ if (!the_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "ISP YUV Chain is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ the_node->name);
+ chan_data->available |= AVI_CAP_ISP_CHAIN_YUV;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_STATS_YUV) {
+ the_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_STATS_YUV);
+ if (!the_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Statistics YUV is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ the_node->name);
+ chan_data->available |= AVI_CAP_STATS_YUV;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_CONV) {
+ the_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_CONV);
+ if (!the_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Color Space Converter is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ the_node->name);
+ chan_data->available |= AVI_CAP_CONV;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_GAM) {
+ chan_data->avi_nodes.gam_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_GAM);
+ if (!chan_data->avi_nodes.gam_node) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Gamma Correction is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ chan_data->avi_nodes.gam_node->name);
+ chan_data->available |= AVI_CAP_GAM;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if (caps & AVI_CAP_SCAL) {
+ scaler_node =
+ avi_segment_get_node(chan_data->input_segment, AVI_CAP_SCAL);
+ if (!scaler_node)
+ {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Scaler is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ scaler_node->name);
+ chan_data->available |= AVI_CAP_SCAL;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+
+ if ( (caps & AVI_CAP_PLANAR) && scaler_node) {
+ chan_data->avi_nodes.source_fifo_planar_node =
+ avi_segment_get_node(chan_data->input_segment,
+ AVI_CAP_PLANAR);
+ if (chan_data->avi_nodes.source_fifo_planar_node)
+ {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s (Source Plane 1)",
+ chan_data->avi_nodes.source_fifo_planar_node->name);
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ avi_get_node(scaler_node->node_id+1)->name);
+ chan_data->available |= AVI_CAP_PLANAR;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ else
+ {
+ AVIR2R_LOG_WARN(avi_r2r,
+ "Planar is missing");
+ }
+ }
+
+ if (caps & AVI_CAP_ROT) {
+ chan_data->avi_nodes.rotator_node =
+ avi_segment_get_node(chan_data->input_segment, AVI_CAP_ROT);
+ if (!chan_data->avi_nodes.rotator_node)
+ {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Rotator is missing");
+ return -ENODEV;
+ }
+ else {
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "-- %s",
+ chan_data->avi_nodes.rotator_node->name);
+ chan_data->available |= AVI_CAP_ROT;
+ chan_data->avi_nodes.nodes_nr++;
+ }
+ }
+ return 0;
+}
+
+#define DMA_OUT_CAPS (AVI_CAP_DMA_OUT)
+static int __devinit avi_r2r_setup_channels(struct avi_r2r *avi_r2r,
+ struct avi_r2r_platform_data *pdata)
+{
+ struct avi_segment *out_seg = NULL, *in_seg = NULL;
+ struct avi_segment *stats_seg = NULL;
+ char gen_id[AVI_SEGMENT_ID_LEN];
+ unsigned long required_caps, missing_caps;
+ struct avi_r2r_channel_data *chan_data;
+ int ret = 0, c;
+
+ avi_r2r->channels_nr=0;
+ for (c = 0;pdata->channels[c].caps; c++) {
+ BUG_ON(c >= MAX_CHANNELS_AVI_R2R);
+
+ if (avi_r2r->channels_nr>MAX_CHANNELS_AVI_R2R) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "Maximum number of channels reached");
+ return -EINVAL;
+ }
+
+ /* force capabilities */
+ pdata->channels[c].caps |= AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT;
+
+ /* build output segment */
+ required_caps = missing_caps = pdata->channels[c].caps & DMA_OUT_CAPS;
+ out_seg = avi_segment_build(&missing_caps,
+ "r2r",
+ c,
+ -1,
+ avi_r2r->dev);
+
+ if (IS_ERR(out_seg)) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't build output segment %d,"
+ " caps=%08lX, missing caps=%08lX",
+ c,
+ required_caps,
+ missing_caps);
+ ret = PTR_ERR(out_seg);
+ goto seg_error;
+ }
+
+ /* build input segment */
+ required_caps = missing_caps = pdata->channels[c].caps & ~DMA_OUT_CAPS;
+ in_seg = avi_segment_build(&missing_caps,
+ "r2r-in",
+ c,
+ -1,
+ avi_r2r->dev);
+
+ if (IS_ERR(in_seg)) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't build input segment %s,"
+ " caps=%08lX, missing caps=%08lX",
+ gen_id,
+ required_caps,
+ missing_caps);
+ ret = PTR_ERR(in_seg);
+ goto seg_error;
+ }
+
+ /* connect segments */
+ ret = avi_segment_connect(in_seg, out_seg, z_order);
+ if (ret<0) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't connect segments %s to %s",
+ in_seg->id,
+ out_seg->id);
+ goto seg_error;
+ }
+
+ /* Add a segment for stats if ISP is used */
+ if (required_caps & AVI_CAP_ISP_CHAIN_BAYER) {
+ /* Find bayer stats */
+ required_caps = avi_isp_bayer_get_stats_cap(in_seg);
+ if (!required_caps) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "can't find stats cap");
+ goto seg_error;
+ }
+
+ /* Build stats segment */
+ required_caps |= AVI_CAP_DMA_OUT;
+ stats_seg = avi_segment_build(&required_caps,
+ "r2r-stats",
+ c,
+ -1,
+ avi_r2r->dev);
+
+ if (IS_ERR(stats_seg)) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't build stats segment %d",
+ c);
+ ret = PTR_ERR(stats_seg);
+ goto seg_error;
+ }
+ }
+ else
+ stats_seg = NULL;
+
+ /* allocate data for channel */
+ chan_data = kzalloc(sizeof(struct avi_r2r_channel_data),
+ GFP_KERNEL);
+ if (!chan_data) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't allocate struct avi_r2r_channel_data");
+ ret = -ENOMEM;
+ goto seg_error;
+ }
+
+ /* initialize channel data */
+
+ chan_data->avi_r2r = avi_r2r;
+
+ out_seg->priv = chan_data;
+ in_seg->priv = chan_data;
+ chan_data->output_segment = out_seg;
+ chan_data->input_segment = in_seg;
+ chan_data->stats_segment = stats_seg;
+ atomic_set(&chan_data->status, AVI_R2R_CHANNEL_UNUSED);
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ ". Channel #%d",avi_r2r->channels_nr);
+ ret=avi_r2r__check_channel_nodes(chan_data,
+ pdata->channels[c].caps);
+ if (ret < 0)
+ goto seg_error;
+
+ ret = avi_r2r_set_default_format(chan_data);
+ if (ret < 0)
+ goto seg_error;
+
+ /* Request segments' IRQ */
+ avi_segment_register_irq(chan_data->input_segment,
+ &avi_r2r__interrupt);
+
+ avi_segment_register_irq(chan_data->output_segment,
+ &avi_r2r__aux_interrupt);
+
+ /* Enable segment IRQ (DMA_OUT) */
+ avi_segment_enable_irq(chan_data->output_segment);
+
+ spin_lock_init(&chan_data->lock);
+ ret = avi_r2r__work_pool_init(chan_data, works_pool);
+ if (ret)
+ goto seg_error;
+ avi_r2r__work_queue_init(&chan_data->work_queue);
+ atomic_set(&chan_data->status, AVI_R2R_CHANNEL_INACTIVE);
+ chan_data->magic = _AVI_R2R_CHANNEL_MAGIC + c;
+ chan_data->magic_end = _AVI_R2R_CHANNEL_MAGIC + c;
+
+#ifdef CONFIG_AVI_DEBUG
+ chan_data->nr_timings_saved = nr_timings_saved;
+ ret = avi_r2r__channel_setup_debugfs(chan_data);
+ if (ret) {
+ if (chan_data->work_pool)
+ kfree(chan_data->work_pool);
+ goto seg_error;
+ }
+#endif /* CONFIG_AVI_DEBUG */
+
+ AVIR2R_LOG_DEBUG(avi_r2r,
+ "- Channel #%d: %d nodes,"
+ " IRQ Node %s, IRQ n° %d,"
+ " Pool size %d",
+ c,
+ chan_data->avi_nodes.nodes_nr,
+ chan_data->output_segment->irq_node->name,
+ avi_node_irq(chan_data->output_segment->irq_node),
+ works_pool);
+
+
+ avi_r2r->channels[c] = chan_data;
+ avi_r2r->channels_nr++;
+ }
+ if (!ret)
+ {
+ int i;
+ AVIR2R_LOG_INFO(avi_r2r,
+ "Found %d channels", avi_r2r->channels_nr);
+ for (i=0; i<avi_r2r->channels_nr; i++)
+ AVIR2R_LOG_INFO(avi_r2r,
+ "Channel #%d: %s", i, avi_r2r->channels[i]->output_segment->id);
+
+ }
+ return ret;
+seg_error:
+ if (out_seg)
+ avi_segment_teardown(out_seg);
+ if (in_seg)
+ avi_segment_teardown(in_seg);
+ if (stats_seg)
+ avi_segment_teardown(stats_seg);
+ return ret;
+}
+
+static int __devinit avi_r2r_probe(struct platform_device *pdev)
+{
+ struct avi_r2r_platform_data *platform_data;
+ int ret;
+
+ dev_dbg(&pdev->dev, "Probing...\n");
+
+#define ERROR_OUT(lbl, msg, errno) \
+ do { ret = errno; \
+ dev_err(&pdev->dev, "Probing failed: %s [%d]\n", msg, ret); \
+ goto lbl; } \
+ while (0)
+
+ platform_data = dev_get_platdata(&pdev->dev);
+
+ if (!platform_data)
+ ERROR_OUT(exit, "failed to find platform data", -EINVAL);
+
+ dev_set_drvdata(&pdev->dev, &avi_r2r_data);
+
+ avi_r2r_data.dev = &pdev->dev;
+
+#ifdef CONFIG_AVI_DEBUG
+ if (nr_timings_saved < _NR_TIMINGS_SAVED_DEFAULT)
+ nr_timings_saved = _NR_TIMINGS_SAVED_DEFAULT;
+
+ avi_r2r_data.debugfs_root =
+ debugfs_create_dir(dev_name(&pdev->dev), NULL);
+ if (IS_ERR_OR_NULL(avi_r2r_data.debugfs_root))
+ ERROR_OUT(exit,
+ "debugfs_create_dir failed",
+ avi_r2r_data.debugfs_root ?
+ PTR_ERR(avi_r2r_data.debugfs_root) : -ENOMEM);
+
+ dev_info(&pdev->dev, "DebugFS enabled\n");
+#endif /* CONFIG_AVI_DEBUG */
+
+ ret = avi_r2r_setup_channels(&avi_r2r_data, platform_data);
+ if (ret)
+ ERROR_OUT(avi_unregister,
+ "setup channels failed", ret);
+
+#ifdef CONFIG_AVI_DEBUG
+ dev_dbg(&pdev->dev, "Saved timings: %d\n", nr_timings_saved);
+#endif /* CONFIG_AVI_DEBUG */
+
+ dev_dbg(&pdev->dev, "Probed\n");
+
+ return 0;
+
+avi_unregister:
+ avi_r2r_destroy_channels(&avi_r2r_data);
+#ifdef CONFIG_AVI_DEBUG
+ if (avi_r2r_data.debugfs_root)
+ debugfs_remove_recursive(avi_r2r_data.debugfs_root);
+ avi_r2r_data.debugfs_root = NULL;
+#endif /* CONFIG_AVI_DEBUG */
+exit:
+ return ret;
+}
+
+static int __devexit avi_r2r_remove(struct platform_device *pdev)
+{
+ struct avi_r2r *avi_r2r = dev_get_drvdata(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "Removing...\n");
+
+ BUG_ON(!avi_r2r);
+
+ avi_r2r_destroy_channels(avi_r2r);
+#ifdef CONFIG_AVI_DEBUG
+ if (avi_r2r->debugfs_root)
+ debugfs_remove_recursive(avi_r2r->debugfs_root);
+ avi_r2r->debugfs_root = NULL;
+#endif /* CONFIG_AVI_DEBUG */
+ dev_set_drvdata(&pdev->dev, NULL);
+ avi_r2r->dev = NULL;
+
+ dev_dbg(&pdev->dev, "Removed\n");
+
+ return 0;
+}
+
+static struct platform_driver avi_r2r_driver = {
+ .driver = {
+ .name = AVI_R2R_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = &avi_r2r_probe,
+ .remove = __devexit_p(avi_r2r_remove),
+ .suspend = &avi_r2r_suspend,
+ .resume = &avi_r2r_resume
+};
+
+static int __init avi_r2r_init(void)
+{
+ int ret;
+ pr_debug(AVI_R2R_DRIVER_NAME " Initializing...\n");
+ ret = platform_driver_register(&avi_r2r_driver);
+ if (ret) {
+ pr_err(AVI_R2R_DRIVER_NAME " init fail (%d)\n", ret);
+ }
+ else {
+ pr_info(AVI_R2R_DRIVER_NAME " Initialized\n");
+ }
+ return ret;
+}
+
+module_init(avi_r2r_init);
+
+static void __exit avi_r2r_exit(void)
+{
+ pr_debug(AVI_R2R_DRIVER_NAME " Exiting...\n");
+ platform_driver_unregister(&avi_r2r_driver);
+ pr_debug(AVI_R2R_DRIVER_NAME " Exited\n");
+}
+
+module_exit(avi_r2r_exit);
+
+MODULE_AUTHOR("Didier Leymarie <didier.leymarie.ext@parrot.com>");
+MODULE_DESCRIPTION("RAM to RAM driver for Parrot7 Advanced Video Interface");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/parrot/media/video/avi_r2r_core.h b/drivers/parrot/media/video/avi_r2r_core.h
new file mode 100644
index 00000000000000..f78e06a4488a70
--- /dev/null
+++ b/drivers/parrot/media/video/avi_r2r_core.h
@@ -0,0 +1,317 @@
+#ifndef _AVI_R2R_CORE_H_
+#define _AVI_R2R_CORE_H_
+/**
+ * @file avi_r2r_core.h
+ * Parrot AVI RAM to RAM driver.
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2014-02-06
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/atomic.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include "avi_r2r.h"
+#include "video/avi_pixfmt.h"
+#include "video/avi_dma.h"
+#include "video/avi_debug.h"
+
+#define AVI_R2R_VERSION KERNEL_VERSION(0, 1, 0)
+
+#define AVIR2R_LOG_INFO(data, fmt, arg...) \
+ dev_info((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_WARN(data, fmt, arg...) \
+ dev_warn((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_NOTICE(data, fmt, arg...) \
+ dev_notice((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_ERROR(data, fmt, arg...) \
+ dev_err((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_DEBUG(data, fmt, arg...) \
+ dev_dbg((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_DEBUG_EXT(data, fmt, arg...) \
+ dev_dbg((data)->dev, fmt "\n", ##arg)
+
+enum avi_r2r_channel_status {
+ AVI_R2R_CHANNEL_UNUSED,
+ AVI_R2R_CHANNEL_INACTIVE,
+ AVI_R2R_CHANNEL_RUNNING,
+ AVI_R2R_CHANNEL_STATUS_NR
+};
+
+/**
+ * Data structures definition
+ */
+
+/**
+ * Gamma correction
+ */
+struct avi_r2r_gamma_correction_config {
+ struct avi_cmap cmap;
+ struct
+ {
+ unsigned bypass :1;
+ unsigned palette :1;
+ unsigned comp_width :1;
+ };
+};
+
+struct avi_r2r_rotator_config {
+ /* rotation angle */
+ enum avi_rotation angle;
+ /* horizontal mirror (flip) */
+ bool flip;
+};
+
+struct avi_r2r_registers {
+
+ struct avi_r2r_gamma_correction_config gam;
+};
+
+union avi_r2r_configuration_mask {
+ struct {
+ unsigned buffers:1;
+ unsigned formats:1;
+ unsigned gamma:1;
+ unsigned converter:1;
+ unsigned scaler:1;
+ unsigned rotator:1;
+ unsigned isp_bayer:1;
+ unsigned isp_yuv:1;
+ };
+ u32 _mask;
+};
+
+struct avi_r2r;
+struct avi_r2r_channel_data;
+struct avi_r2r_user_context;
+
+/**
+ * Operation Queue item
+ */
+#define _AVI_R2R_WORK_MAGIC 0x52324432
+struct avi_r2r_work {
+ struct list_head list;
+ unsigned int magic;
+ struct avi_r2r_user_context *user;
+ struct avi_r2r_channel_data *channel;
+ struct timespec ts_push;
+ struct avi_dma_buffer source_buffer;
+ struct avi_dma_buffer target_buffer;
+ struct avi_dma_buffer stats_buffer;
+};
+
+struct avi_r2r_work_queue {
+ struct avi_r2r_work works;
+ int works_nr;
+};
+
+static inline int avi_r2r__work_queue_level(struct avi_r2r_work_queue *wq)
+{
+ return wq->works_nr;
+}
+
+struct avi_r2r_user_format {
+ struct avi_segment_format source;
+ struct avi_segment_format target;
+ struct avi_segment_format compose;
+ struct avi_segment_layout layout;
+ u32 background;
+ bool use_compose;
+};
+/**
+ * User context definition
+ */
+#define _AVI_R2R_CTX_ID_MAGIC 0x52325255
+#define _AVI_R2R_CTX_UUID_MASK 0x00FFFFFF
+#define _AVI_R2R_CTX_UUID_MAGIC 0x55000000
+
+struct avi_r2r_user_context {
+ unsigned int magic;
+#ifdef CONFIG_AVI_DEBUG
+ struct dentry *debugfs_root;
+#endif /* CONFIG_AVI_DEBUG */
+
+ u32 requirements;
+ u32 channels_compliant;
+ int channels_compliant_nr;
+
+ struct avi_r2r *avi_r2r;
+
+ /* Configuration of elements (user) */
+ struct avi_r2r_context *context;
+ struct avi_r2r_user_format format;
+
+ union avi_r2r_configuration_mask requirements_mask;
+ union avi_r2r_configuration_mask config_done;
+
+ struct avi_r2r_gamma_correction_config gamma_user_config;
+ struct avi_r2r_rotator_config rotator_user_config;
+
+ struct avi_r2r_registers registers;
+ unsigned int magic_end;
+};
+
+static inline bool avi_r2r__user_context_is_ok(
+ const struct avi_r2r_user_context *user_context)
+{
+ if (user_context)
+ return (user_context->magic == _AVI_R2R_CTX_ID_MAGIC &&
+ user_context->magic_end == _AVI_R2R_CTX_ID_MAGIC );
+ return 0;
+}
+
+/**
+ * Channel definition
+ */
+struct avi_r2r_channel_nodes {
+ /* AVI Nodes */
+ int nodes_nr;
+ /* Source FIFO */
+ const struct avi_node *source_fifo_node;
+
+ const struct avi_node *source_fifo_planar_node;
+
+ /* Target FIFO */
+ const struct avi_node *target_fifo_node;
+
+ /* Stats FIFO */
+ const struct avi_node *stats_fifo_node;
+
+ /* optional rotator for rotation */
+ const struct avi_node *rotator_node;
+
+ /* optional gamma correction module */
+ const struct avi_node *gam_node;
+
+ /* optional ISP bayer chain */
+ const struct avi_node *bayer_node;
+};
+
+#define _AVI_R2R_CHANNEL_MAGIC 0x52324330
+#define _AVI_R2R_CHANNEL_MAGIC_MASK 0xFFFFFFF0
+
+struct timings {
+ struct timespec ts_push;
+ struct timespec ts_start;
+ struct timespec ts_stop;
+ unsigned int uuid;
+ int index;
+};
+
+struct avi_r2r_channel_data {
+ unsigned int magic;
+ struct avi_segment *output_segment;
+ struct avi_segment *input_segment;
+ struct avi_r2r *avi_r2r;
+ struct avi_r2r_user_context *previous_user;
+ struct avi_r2r_work current_work;
+ spinlock_t lock;
+
+ atomic_t status;
+ /* enum avi_r2r_channel_status */
+
+ struct timespec ts_start;
+ struct timespec ts_stop;
+ struct timespec ts_push;
+#ifdef CONFIG_AVI_DEBUG
+ struct timings *timings;
+ int timings_index,
+ timings_nr,
+ timings_counter,
+ nr_timings_saved;
+ struct dentry *debugfs_root;
+#endif /* CONFIG_AVI_DEBUG */
+ unsigned int interrupts_counter;
+ unsigned int spurious_interrupts_counter;
+ unsigned int missing_source_interrupts_counter;
+ unsigned int missing_planar_interrupts_counter;
+ unsigned int input_segment_interrupts_counter;
+ unsigned int output_segment_interrupts_counter;
+ unsigned int apply_counter;
+
+ struct avi_r2r_work_queue work_queue;
+ struct avi_r2r_work *work_pool;
+ unsigned int work_pool_size;
+ unsigned int work_pool_free;
+
+ struct avi_r2r_channel_nodes avi_nodes;
+ u32 available;
+
+ /* Configuration of elements (cache) */
+ struct avi_r2r_registers registers;
+
+ unsigned int magic_end;
+
+ /* Bayer stats output */
+ struct avi_segment *stats_segment;
+
+};
+
+/**
+ * AVI RAM2RAM data: global driver data
+ */
+struct avi_r2r {
+ struct device *dev;
+
+#ifdef CONFIG_AVI_DEBUG
+ /* The root of our debugfs filesystem. */
+ struct dentry *debugfs_root;
+#endif /* CONFIG_AVI_DEBUG */
+
+ /* Channels */
+ struct avi_r2r_channel_data *channels[MAX_CHANNELS_AVI_R2R];
+ int channels_nr;
+
+ int suspended;
+};
+
+#ifdef CONFIG_AVI_DEBUG
+/*
+ * debugfs support
+ */
+extern void avi_r2r__user_setup_debugfs
+ (struct avi_r2r_user_context *user);
+extern void avi_r2r__user_destroy_debugfs
+ (struct avi_r2r_user_context *user);
+
+extern int __devinit avi_r2r__channel_setup_debugfs
+ (struct avi_r2r_channel_data *channel);
+extern void avi_r2r__channel_destroy_debugfs
+ (struct avi_r2r_channel_data *channel);
+
+extern void avi_r2r__channel_add_timings(struct avi_r2r_channel_data *data);
+
+#endif /* CONFIG_AVI_DEBUG */
+
+#endif /* _AVI_R2R_CORE_H_ */
diff --git a/drivers/parrot/media/video/avi_r2r_debugfs.c b/drivers/parrot/media/video/avi_r2r_debugfs.c
new file mode 100644
index 00000000000000..191375ece15753
--- /dev/null
+++ b/drivers/parrot/media/video/avi_r2r_debugfs.c
@@ -0,0 +1,691 @@
+/**
+ * @file avi_r2r_debugfs.c
+ * Parrot AVI RAM to RAM driver.
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2014-02-06
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "avi_r2r_core.h"
+
+/* debugfs user */
+/*--------------*/
+static int user_status_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+
+ seq_printf(s,
+ "running=%d, stacked=%d\n",
+ atomic_read(&user->context->running),
+ atomic_read(&user->context->stacked_nr));
+
+ return 0;
+}
+
+static int user_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_status_show ,inode->i_private);
+}
+
+static const struct file_operations user_status_debugfs_fops = {
+ .open = user_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_device_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+
+ seq_printf(s, "%s\n", dev_name(user->context->dev));
+
+ return 0;
+}
+
+static int user_device_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_device_show ,inode->i_private);
+}
+
+static const struct file_operations user_device_debugfs_fops = {
+ .open = user_device_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_requirements_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+ unsigned long caps = user->requirements;
+ int n = 0;
+ const char *str;
+
+ while ((str = avi_debug_caps_to_str(&caps))) {
+ if (n++)
+ seq_printf(s, " | ");
+
+ seq_printf(s, "%s", str);
+ }
+
+ if (n == 0)
+ seq_printf(s, "<none>");
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int user_requirements_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_requirements_show ,inode->i_private);
+}
+
+static const struct file_operations user_requirements_debugfs_fops = {
+ .open = user_requirements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_channels_compliant_show(struct seq_file *s, void *unused)
+{
+ char buffer[64];
+ struct avi_r2r_user_context *user = s->private;
+ struct avi_r2r *avi_r2r = user->avi_r2r;
+ int c,i;
+
+ c=0;
+ if (user->channels_compliant) {
+ for (i=0;i<avi_r2r->channels_nr;i++) {
+ if (user->channels_compliant & (1<<i))
+ c+=sprintf(&buffer[c],"%d ",i);
+ }
+ }
+ else
+ c+=sprintf(&buffer[c],"<none>");
+
+ seq_printf(s, "%s\n", buffer);
+
+ return 0;
+}
+
+static int user_channels_compliant_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_channels_compliant_show ,inode->i_private);
+}
+
+static const struct file_operations user_channels_compliant_debugfs_fops = {
+ .open = user_channels_compliant_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_src_format_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+
+ seq_printf(s, "%dx%d, %s, %s[%d], %d bytes/line@p0, %d bytes/line@p1\n",
+ user->format.source.width,
+ user->format.source.height,
+ avi_debug_colorspace_to_str(user->format.source.colorspace),
+ avi_debug_pixfmt_to_str(user->format.source.pix_fmt),
+ user->format.source.pix_fmt.id,
+ user->format.source.plane0.line_size,
+ user->format.source.plane1.line_size);
+
+ return 0;
+}
+
+static int user_src_format_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_src_format_show ,inode->i_private);
+}
+
+static const struct file_operations user_src_format_debugfs_fops = {
+ .open = user_src_format_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_tgt_format_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+
+ seq_printf(s, "%dx%d, %s, %s[%d], %d bytes/line@p0, %d bytes/line@p1\n",
+ user->format.target.width,
+ user->format.target.height,
+ avi_debug_colorspace_to_str(user->format.source.colorspace),
+ avi_debug_pixfmt_to_str(user->format.target.pix_fmt),
+ user->format.target.pix_fmt.id,
+ user->format.target.plane0.line_size,
+ user->format.target.plane1.line_size);
+
+ return 0;
+}
+
+static int user_tgt_format_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_tgt_format_show ,inode->i_private);
+}
+
+static const struct file_operations user_tgt_format_debugfs_fops = {
+ .open = user_tgt_format_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*--------------*/
+static int user_compose_format_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_user_context *user = s->private;
+
+ if (user->format.use_compose)
+ seq_printf(s, "%dx%d, %s, (%u, %u) alpha=0x%02X\n",
+ user->format.compose.width,
+ user->format.compose.height,
+ avi_debug_colorspace_to_str(user->format.compose.colorspace),
+ user->format.layout.x,
+ user->format.layout.y,
+ user->format.layout.alpha);
+ else
+ seq_printf(s, "no compositing\n");
+ return 0;
+}
+
+static int user_compose_format_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, user_compose_format_show ,inode->i_private);
+}
+
+static const struct file_operations user_compose_format_debugfs_fops = {
+ .open = user_compose_format_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void avi_r2r__user_setup_debugfs(struct avi_r2r_user_context *user)
+{
+ char buffer[64];
+ struct dentry *file;
+ struct avi_r2r *avi_r2r=user->avi_r2r;
+
+ sprintf(buffer, "user.%p", user);
+
+#define MY_DEBUGFS_REGISTER(_name) \
+ file = debugfs_create_file(#_name, \
+ S_IRWXUGO, \
+ user->debugfs_root, \
+ user, \
+ &user_ ## _name ## _debugfs_fops); \
+ if (IS_ERR_OR_NULL(file)) { \
+ goto rm; \
+ }
+
+ user->debugfs_root = debugfs_create_dir(buffer,
+ avi_r2r->debugfs_root);
+ if (IS_ERR_OR_NULL(user->debugfs_root)) {
+ AVIR2R_LOG_WARN(avi_r2r,
+ "cannot create debugfs directory "
+ "for user %p",
+ user);
+ user->debugfs_root = NULL;
+ return;
+ }
+
+ MY_DEBUGFS_REGISTER(status);
+ MY_DEBUGFS_REGISTER(device);
+ MY_DEBUGFS_REGISTER(requirements);
+ MY_DEBUGFS_REGISTER(channels_compliant);
+ MY_DEBUGFS_REGISTER(src_format);
+ MY_DEBUGFS_REGISTER(tgt_format);
+ MY_DEBUGFS_REGISTER(compose_format);
+#undef MY_DEBUGFS_REGISTER
+ return;
+
+rm:
+ if (user->debugfs_root)
+ debugfs_remove_recursive(user->debugfs_root);
+ user->debugfs_root = NULL;
+
+ return;
+}
+
+void avi_r2r__user_destroy_debugfs(struct avi_r2r_user_context *user)
+{
+ if (user->debugfs_root)
+ debugfs_remove_recursive(user->debugfs_root);
+ user->debugfs_root = NULL;
+}
+
+/* debugfs channel */
+/*-----------*/
+static const char *avi_r2r_channel_status_str[AVI_R2R_CHANNEL_STATUS_NR]=
+{
+#define AVI_DEBUGFS_CHAN_STATUS_TO_STR(_p) [AVI_R2R_CHANNEL_ ## _p] = # _p
+ AVI_DEBUGFS_CHAN_STATUS_TO_STR(UNUSED),
+ AVI_DEBUGFS_CHAN_STATUS_TO_STR(INACTIVE),
+ AVI_DEBUGFS_CHAN_STATUS_TO_STR(RUNNING)
+#undef AVI_DEBUGFS_CHAN_STATUS_TO_STR
+};
+
+static int channel_status_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+
+ seq_printf(s, "%s segment in=%s out=%s\n",
+ avi_r2r_channel_status_str[atomic_read(&channel->status)],
+ avi_debug_activation_to_str(channel->input_segment->active),
+ avi_debug_activation_to_str(channel->output_segment->active));
+
+ return 0;
+}
+
+static int channel_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_status_show, inode->i_private);
+}
+
+static const struct file_operations channel_status_debugfs_fops = {
+ .open = channel_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_work_queue_level_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+
+ seq_printf(s, "%d\n", avi_r2r__work_queue_level(&channel->work_queue));
+
+ return 0;
+}
+
+static int channel_work_queue_level_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_work_queue_level_show, inode->i_private);
+}
+
+static const struct file_operations channel_work_queue_level_debugfs_fops = {
+ .open = channel_work_queue_level_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_timespec_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+
+ seq_printf(s, "%20lld, %20lld, %20lld : %10lld microseconds\n",
+ ktime_to_us(timespec_to_ktime(channel->ts_push)),
+ ktime_to_us(timespec_to_ktime(channel->ts_start)),
+ ktime_to_us(timespec_to_ktime(channel->ts_stop)),
+ ktime_us_delta(timespec_to_ktime(channel->ts_stop),
+ timespec_to_ktime(channel->ts_start))
+ );
+
+ return 0;
+}
+
+static int channel_timespec_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_timespec_show, inode->i_private);
+}
+
+static const struct file_operations channel_timespec_debugfs_fops = {
+ .open = channel_timespec_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_timings_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+ struct timings prev;
+ int i, n, c, cn;
+
+ cn=channel->magic & ~_AVI_R2R_CHANNEL_MAGIC_MASK;
+
+ n = channel->timings_nr;
+ seq_printf(s, "%d/%d\n", n, channel->timings_counter);
+ if (n>0) {
+ i = channel->timings_index-n;
+ if (i < 0) i += channel->nr_timings_saved;
+ prev = channel->timings[i];
+ for (c = 0; c < n; c++) {
+ seq_printf(s,
+ "%08X_%01d: %18lld, %18lld, %18lld :"
+ " %10lld microseconds, "
+ "%10lld, %10lld, %10lld, %10d\n",
+ channel->timings[i].uuid, cn,
+ ktime_to_us(timespec_to_ktime(channel->timings[i].ts_start)),
+ ktime_to_us(timespec_to_ktime(channel->timings[i].ts_stop)),
+ ktime_to_us(timespec_to_ktime(channel->timings[i].ts_push)),
+ ktime_us_delta(timespec_to_ktime(channel->timings[i].ts_stop),
+ timespec_to_ktime(channel->timings[i].ts_start)),
+ ktime_us_delta(timespec_to_ktime(channel->timings[i].ts_start),
+ timespec_to_ktime(prev.ts_stop)),
+ ktime_us_delta(timespec_to_ktime(channel->timings[i].ts_start),
+ timespec_to_ktime(prev.ts_start)),
+ ktime_us_delta(timespec_to_ktime(channel->timings[i].ts_start),
+ timespec_to_ktime(channel->timings[i].ts_push)),
+ channel->timings[i].index);
+ prev = channel->timings[i];
+ i++;
+ if (i >= channel->nr_timings_saved ) i = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int channel_timings_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_timings_show, inode->i_private);
+}
+
+static const struct file_operations channel_timings_debugfs_fops = {
+ .open = channel_timings_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_available_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+ unsigned long caps = channel->available;
+ int n = 0;
+ const char *str;
+
+ while ((str = avi_debug_caps_to_str(&caps))) {
+ if (n++)
+ seq_printf(s, " | ");
+
+ seq_printf(s, "%s", str);
+ }
+
+ if (n == 0)
+ seq_printf(s, "<none>");
+
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int channel_available_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_available_show, inode->i_private);
+}
+
+static const struct file_operations channel_available_debugfs_fops = {
+ .open = channel_available_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_current_user_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+ void *uuid = 0, *prev_uuid = 0;
+ bool cur_ok = 0, prev_ok = 0;
+
+ if (channel->current_work.user)
+ if (avi_r2r__user_context_is_ok(channel->current_work.user)) {
+ uuid = channel->current_work.user;
+ cur_ok = 1;
+ }
+
+ prev_uuid = channel->previous_user;
+ if (channel->previous_user)
+ if (avi_r2r__user_context_is_ok(channel->previous_user)) {
+ prev_ok = 1;
+ }
+
+ seq_printf(s,
+ "%p%c %p%c\n",
+ uuid, cur_ok?' ':'!',
+ prev_uuid, prev_ok?' ':'!');
+
+ return 0;
+}
+
+static int channel_current_user_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_current_user_show, inode->i_private);
+}
+
+static const struct file_operations channel_current_user_debugfs_fops = {
+ .open = channel_current_user_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_interrupts_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+
+ seq_printf(s,
+ "%u, "
+ "spurious %u, "
+ "missing source p0=%u p1=%u, "
+ "segment in=%u out=%u"
+ "\n",
+ channel->interrupts_counter,
+ channel->spurious_interrupts_counter,
+ channel->missing_source_interrupts_counter,
+ channel->missing_planar_interrupts_counter,
+ channel->input_segment_interrupts_counter,
+ channel->output_segment_interrupts_counter);
+
+ return 0;
+}
+
+static int channel_interrupts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_interrupts_show, inode->i_private);
+}
+
+static const struct file_operations channel_interrupts_debugfs_fops = {
+ .open = channel_interrupts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_apply_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+
+ seq_printf(s, "%u\n", channel->apply_counter);
+
+ return 0;
+}
+
+static int channel_apply_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_apply_show , inode->i_private);
+}
+
+static const struct file_operations channel_apply_debugfs_fops = {
+ .open = channel_apply_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int channel_nodes_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_channel_data *channel = s->private;
+ int i;
+
+ seq_printf(s, "(%s) ", channel->input_segment->id);
+ for (i=0; i < channel->input_segment->nodes_nr; i++) {
+ seq_printf(s, "%s", channel->input_segment->nodes[i]->name);
+ if (i<(channel->input_segment->nodes_nr-1))
+ seq_printf(s, " -> ");
+ }
+ {
+ struct avi_node *dma_in_planar =
+ avi_segment_get_node(channel->input_segment,
+ AVI_CAP_PLANAR);
+ struct avi_node *scaler_planar =
+ avi_segment_get_node(channel->input_segment,
+ AVI_CAP_SCAL);
+ if (dma_in_planar && scaler_planar)
+ {
+ scaler_planar = avi_get_node(scaler_planar->node_id+1);
+ seq_printf(s, ", %s -> %s",
+ dma_in_planar->name,
+ scaler_planar->name);
+ }
+ }
+
+ seq_printf(s, " >> OUT(%s) ", channel->output_segment->id);
+ for (i=0; i < channel->output_segment->nodes_nr; i++)
+ {
+ seq_printf(s, "%s", channel->output_segment->nodes[i]->name);
+ if (i<(channel->output_segment->nodes_nr-1))
+ seq_printf(s, " -> ");
+ }
+
+ seq_printf(s, "\n");
+ return 0;
+}
+
+static int channel_nodes_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, channel_nodes_show ,inode->i_private);
+}
+
+static const struct file_operations channel_nodes_debugfs_fops = {
+ .open = channel_nodes_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __devinit avi_r2r__channel_setup_debugfs
+ (struct avi_r2r_channel_data *channel)
+{
+#define MY_DEBUGFS_REGISTER(_name) \
+ file = debugfs_create_file(#_name, \
+ S_IRWXUGO, \
+ debugfs_chan_root, \
+ channel, \
+ &channel_ ## _name ## _debugfs_fops); \
+ if (IS_ERR_OR_NULL(file)) { \
+ ret = PTR_ERR(file) ?: -ENOMEM; \
+ goto rm; \
+ }
+
+ char buffer[64];
+ struct avi_r2r *avi_r2r = channel->avi_r2r;
+ struct dentry *debugfs_chan_root = 0;
+ struct dentry *file;
+ int ret = 0;
+
+ channel->debugfs_root = 0;
+
+ /* allocate timings data for channel */
+ channel->timings=kzalloc(
+ sizeof(struct timings)*channel->nr_timings_saved,
+ GFP_KERNEL);
+ if (!channel->timings) {
+ AVIR2R_LOG_ERROR(avi_r2r,
+ "couldn't allocate struct timings");
+ ret = -ENOMEM;
+ goto rm;
+ }
+
+ sprintf(buffer,"channel.%d",channel->magic & ~_AVI_R2R_CHANNEL_MAGIC_MASK);
+ debugfs_chan_root = debugfs_create_dir(buffer, avi_r2r->debugfs_root);
+ if (IS_ERR_OR_NULL(debugfs_chan_root))
+ return debugfs_chan_root ? PTR_ERR(debugfs_chan_root) : -ENOMEM;
+
+ MY_DEBUGFS_REGISTER(status);
+ MY_DEBUGFS_REGISTER(work_queue_level);
+ MY_DEBUGFS_REGISTER(timespec);
+ MY_DEBUGFS_REGISTER(timings);
+ MY_DEBUGFS_REGISTER(available);
+ MY_DEBUGFS_REGISTER(current_user);
+ MY_DEBUGFS_REGISTER(interrupts);
+ MY_DEBUGFS_REGISTER(apply);
+ MY_DEBUGFS_REGISTER(nodes);
+
+ channel->debugfs_root = debugfs_chan_root;
+#undef MY_DEBUGFS_REGISTER
+ return 0;
+
+rm:
+ if (channel->timings)
+ kfree(channel->timings);
+ if (debugfs_chan_root)
+ debugfs_remove_recursive(debugfs_chan_root);
+
+ return ret;
+}
+
+void avi_r2r__channel_destroy_debugfs(struct avi_r2r_channel_data *channel)
+{
+ if (channel->timings)
+ kfree(channel->timings);
+ if (channel->debugfs_root)
+ debugfs_remove_recursive(channel->debugfs_root);
+}
+
+void avi_r2r__channel_add_timings(struct avi_r2r_channel_data *data)
+{
+ data->timings[data->timings_index].ts_start = data->ts_start;
+ data->timings[data->timings_index].ts_stop = data->ts_stop;
+ data->timings[data->timings_index].ts_push = data->ts_push;
+ data->timings[data->timings_index].uuid = (unsigned long)data->current_work.user;
+ data->timings[data->timings_index].index = data->timings_counter;
+
+ data->timings_counter++;
+ data->timings_index++;
+ if (data->timings_index >= data->nr_timings_saved)
+ data->timings_index = 0;
+
+ if (data->timings_nr < data->nr_timings_saved)
+ data->timings_nr++;
+}
diff --git a/drivers/parrot/media/video/avi_r2r_test_multi.c b/drivers/parrot/media/video/avi_r2r_test_multi.c
new file mode 100644
index 00000000000000..b98e05305cf1bb
--- /dev/null
+++ b/drivers/parrot/media/video/avi_r2r_test_multi.c
@@ -0,0 +1,3191 @@
+/**
+ * @file avi_r2r_test_multi.c
+ * Parrot AVI ram2ram driver (test module with multiple clients support).
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author didier.leymarie.ext@parrot.com
+ * @date 2013-11-07
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/hrtimer.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+
+#include "video/avi_segment.h"
+#include "video/avi_dma.h"
+#include "video/avi_debug.h"
+#include "video/avi_pixfmt.h"
+#include "avi_r2r.h"
+
+
+#define DRIVER_NAME "avi_r2r_test_multi"
+#define DRIVER_VERSION KERNEL_VERSION(0, 0, 0)
+
+#define AVIR2R_LOG_INFO(data, fmt, arg...) \
+ dev_info((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_WARN(data, fmt, arg...) \
+ dev_warn((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_NOTICE(data, fmt, arg...) \
+ dev_notice((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_ERROR(data, fmt, arg...) \
+ dev_err((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_DEBUG(data, fmt, arg...) \
+ dev_dbg((data)->dev, fmt "\n", ##arg)
+#define AVIR2R_LOG_DEBUG_EXT(data, fmt, arg...) \
+ dev_dbg((data)->dev, fmt "\n", ##arg)
+
+/* command line parameters */
+
+/* source */
+static unsigned int src_pixfmt = 1;
+module_param(src_pixfmt, uint, S_IRUGO);
+
+static unsigned int src_frame_width = 1280;
+module_param(src_frame_width, uint, S_IRUGO);
+
+static unsigned int src_frame_height = 720;
+module_param(src_frame_height, uint, S_IRUGO);
+
+static unsigned int src_top = 0;
+module_param(src_top, uint, S_IRUGO);
+
+static unsigned int src_left = 0;
+module_param(src_left, uint, S_IRUGO);
+
+static unsigned int src_width = 1280;
+module_param(src_width, uint, S_IRUGO);
+
+static unsigned int src_height = 720;
+module_param(src_height, uint, S_IRUGO);
+
+static unsigned int src_delta_left = 0;
+module_param(src_delta_left, int, S_IRUGO);
+
+static unsigned int src_delta_top = 0;
+module_param(src_delta_top, int, S_IRUGO);
+
+static unsigned int src_sa0 = 0;
+module_param(src_sa0, uint, S_IRUGO);
+
+static unsigned int src_sa1 = 0;
+module_param(src_sa1, uint, S_IRUGO);
+
+static unsigned int src_cspace = AVI_RGB_CSPACE;
+module_param(src_cspace, uint, S_IRUGO);
+
+static unsigned int src_nbuffers = 1;
+module_param(src_nbuffers, uint, S_IRUGO);
+
+/* target */
+static unsigned int tgt_pixfmt = 2;
+module_param(tgt_pixfmt, uint, S_IRUGO);
+
+static unsigned int tgt_frame_width = 1280;
+module_param(tgt_frame_width, uint, S_IRUGO);
+
+static unsigned int tgt_frame_height = 720;
+module_param(tgt_frame_height, uint, S_IRUGO);
+
+static unsigned int tgt_top = 0;
+module_param(tgt_top, uint, S_IRUGO);
+
+static unsigned int tgt_left = 0;
+module_param(tgt_left, uint, S_IRUGO);
+
+static unsigned int tgt_width = 1280;
+module_param(tgt_width, uint, S_IRUGO);
+
+static unsigned int tgt_height = 720;
+module_param(tgt_height, uint, S_IRUGO);
+
+static unsigned int tgt_delta_left = 0;
+module_param(tgt_delta_left, int, S_IRUGO);
+
+static unsigned int tgt_delta_top = 0;
+module_param(tgt_delta_top, int, S_IRUGO);
+
+static unsigned int tgt_sa0 = 0;
+module_param(tgt_sa0, uint, S_IRUGO);
+
+static unsigned int tgt_sa1 = 0;
+module_param(tgt_sa1, uint, S_IRUGO);
+
+static unsigned int tgt_cspace = AVI_RGB_CSPACE;
+module_param(tgt_cspace, uint, S_IRUGO);
+
+static unsigned int tgt_nbuffers = 1;
+module_param(tgt_nbuffers, uint, S_IRUGO);
+
+static unsigned int gam_bypass = 1;
+module_param(gam_bypass, int, S_IRUGO);
+
+static unsigned int gam_palette = 0;
+module_param(gam_palette, int, S_IRUGO);
+
+static unsigned int config_run_time = 0;
+module_param(config_run_time, int, S_IRUGO);
+
+static unsigned int tag_src = 0;
+module_param(tag_src, int, S_IRUGO);
+
+/* number of operations */
+static unsigned int delta_count = 1;
+module_param(delta_count, uint, S_IRUGO);
+
+/* time out (ms) for done */
+#ifndef CONFIG_ARCH_VEXPRESS
+#define DONE_TMOUT 100 /* 100 ms: 1/10th of a sec */
+#else
+#define DONE_TMOUT 2500 /* 2500 ms: VExpress vs real P7 : x25 slower */
+#endif
+static unsigned int time_out = DONE_TMOUT;
+module_param(time_out, uint, S_IRUGO);
+
+/* period (microseconds) */
+static unsigned int period = 0;
+module_param(period, uint, S_IRUGO);
+
+/* start up delay (milliseconds) */
+static unsigned int startup_delay = 0;
+module_param(startup_delay, uint, S_IRUGO);
+
+/* verbose display */
+static unsigned int verbose = 1;
+module_param(verbose, uint, S_IRUGO);
+
+/* size of timings saved */
+#define _NR_TIMINGS_SAVED 20
+static unsigned int nr_timings_saved = _NR_TIMINGS_SAVED;
+module_param(nr_timings_saved, uint, S_IRUGO);
+
+/**
+ * struct avi_frame - AVI whole frame descriptor .
+ * @width: frame width in pixels
+ * @height: frame height in pixels
+ */
+struct avi_frame {
+ u16 width;
+ u16 height;
+};
+/**
+ * Image configuration
+ */
+struct avi_r2r_image_configuration {
+ /* whole frame */
+ struct avi_frame frame;
+ /* rectangle within frame */
+ struct avi_rect rect;
+ /* pixel format AVI_xxxx_FMT in avi.h */
+ struct avi_dma_pixfmt pixel_format;
+
+ enum avi_colorspace colorspace;
+ struct {
+ unsigned int line_size;
+ unsigned int frame_size;
+ }plane0, plane1;
+};
+/**
+ * Source/Target Image configuration
+ */
+struct avi_r2r_images_config {
+ /* definition of source picture */
+ struct avi_r2r_image_configuration source;
+ /* definition of target picture */
+ struct avi_r2r_image_configuration target;
+} ;
+/**
+ * Buffers definition
+ */
+/**
+ * Parameters for submit: pair of buffer addresses (Plane 0, Plane 1)
+ */
+struct avi_r2r_frame_physical_addresses {
+ dma_addr_t plane_0;
+ dma_addr_t plane_1;
+};
+
+struct avi_r2r_buffers_config {
+ struct avi_r2r_frame_physical_addresses source;
+ struct avi_r2r_frame_physical_addresses target;
+};
+
+/**
+ * Gamma correction
+ */
+struct avi_r2r_gamma_correction_config {
+ struct avi_cmap cmap;
+ struct
+ {
+ unsigned bypass :1;
+ unsigned palette :1;
+ unsigned comp_width :1;
+ };
+};
+
+/**
+ * AVI RAM to RAM data: global test driver data
+ */
+struct kt_period {
+ ktime_t begin;
+ ktime_t end;
+ int overrun;
+};
+#define NR_REQUIREMENTS 5
+struct avi_r2r_test {
+ struct device *dev;
+ struct platform_device *platform_device;
+
+ void *mem_start;
+ void *phys_mem_start;
+ size_t mem_size;
+ struct resource *vmem;
+ struct {
+ struct avi_segment_format source;
+ struct avi_segment_format target;
+ } formats;
+ struct {
+ struct avi_dma_buffer source;
+ struct avi_dma_buffer target;
+ } buffers;
+ struct avi_r2r_images_config images_config;
+ struct avi_r2r_buffers_config buffers_config;
+ struct avi_r2r_buffers_config l_buffers_config;
+ unsigned int src_nbuffers;
+ unsigned int tgt_nbuffers;
+ unsigned int buffer_in_idx;
+ unsigned int buffer_out_idx;
+
+ struct avi_r2r_gamma_correction_config gamma_config;
+ u32 requirements[NR_REQUIREMENTS];
+ struct avi_r2r_context context_id;
+ struct avi_r2r_ops ops;
+ struct {
+ unsigned number;
+ struct {
+ int left;
+ int top;
+ }source,target;
+ } deltas;
+ unsigned long period; /* microseconds*/
+ ktime_t ktime_period_ns;
+ unsigned time_out;/* milliseconds */
+ unsigned startup_delay;/* milliseconds */
+
+ int run;
+ int running;
+ int config_run_time;
+ int tag_src;
+ int verbose;
+
+ unsigned counter_buf_queue;
+ unsigned counter_done;
+ unsigned int frame_index_src, frame_index_tgt;
+ unsigned int mismatch_frame_index_src,
+ mismatch_frame_index_tgt,
+ mismatch_frame_index;
+ bool finished;
+ bool aborted;
+ bool done_periodic;
+ struct kt_period *kt_period;
+ int kt_period_index,kt_period_nr,kt_period_counter;
+ wait_queue_head_t wait_done;
+ atomic_t callback_done_ran;
+ atomic_t callback_configure_ran;
+ atomic_t done_counter;
+ atomic_t all_done;
+ struct hrtimer timer;
+ spinlock_t lock;
+ /* The root of our debugfs filesystem. */
+ struct dentry *debugfs_root;
+};
+
+static void setup_formats(struct avi_r2r_test *data)
+{
+ data->formats.source.pix_fmt = data->images_config.source.pixel_format;
+ data->formats.source.width = data->images_config.source.rect.width;
+ data->formats.source.height = data->images_config.source.rect.height;
+ data->formats.source.interlaced = 0;
+ data->formats.source.colorspace = data->images_config.source.colorspace;
+ avi_dma_setup_line_size(&data->formats.source,
+ data->images_config.source.frame.width,
+ 0);
+ data->images_config.source.plane0.line_size =
+ data->formats.source.plane0.line_size;
+ data->images_config.source.plane1.line_size =
+ data->formats.source.plane1.line_size;
+ data->images_config.source.plane0.frame_size =
+ data->images_config.source.plane0.line_size *
+ data->images_config.source.frame.height;
+ data->images_config.source.plane1.frame_size =
+ data->images_config.source.plane1.line_size *
+ data->images_config.source.frame.height;
+
+ data->formats.target.pix_fmt = data->images_config.target.pixel_format;
+ data->formats.target.width = data->images_config.target.rect.width;
+ data->formats.target.height = data->images_config.target.rect.height;
+ data->formats.target.interlaced = 0;
+ data->formats.target.colorspace = data->images_config.target.colorspace;
+ avi_dma_setup_line_size(&data->formats.target,
+ data->images_config.target.frame.width,
+ 0);
+ data->images_config.target.plane0.line_size =
+ data->formats.target.plane0.line_size;
+ data->images_config.target.plane1.line_size =
+ data->formats.target.plane1.line_size;
+ data->images_config.target.plane0.frame_size =
+ data->images_config.target.plane0.line_size *
+ data->images_config.target.frame.height;
+ data->images_config.target.plane1.frame_size =
+ data->images_config.target.plane1.line_size *
+ data->images_config.target.frame.height;
+}
+
+static void setup_dma(struct avi_dma_buffer *buf,
+ struct avi_segment_format *fmt,
+ struct avi_r2r_frame_physical_addresses *physaddrs,
+ unsigned left,
+ unsigned top)
+{
+ unsigned pixsz,l_top;
+
+ buf->plane0.dma_addr = physaddrs->plane_0;
+ if (buf->plane0.dma_addr==0) {
+ buf->status=AVI_BUFFER_INVAL;
+ return;
+ }
+ pixsz = avi_pixel_size0(fmt->pix_fmt);
+ buf->plane0.dma_addr += fmt->plane0.line_size * top;
+ buf->plane0.dma_addr += left * pixsz;
+
+ buf->plane1.dma_addr = 0;
+
+ pixsz = avi_pixel_size1(fmt->pix_fmt);
+
+ if (pixsz > 0) {
+ buf->plane1.dma_addr = physaddrs->plane_1;
+ if (buf->plane1.dma_addr == 0) {
+ buf->status = AVI_BUFFER_INVAL;
+ return;
+ }
+ l_top = top;
+
+ if (fmt->pix_fmt.id == AVI_NV12_FMTID ||
+ fmt->pix_fmt.id == AVI_NV21_FMTID)
+ l_top = l_top/2;
+
+ buf->plane1.dma_addr += fmt->plane1.line_size * l_top;
+ buf->plane1.dma_addr += left * pixsz;
+ }
+ buf->status=AVI_BUFFER_READY;
+}
+
+static inline uint32_t timeval_to_usec(const struct timeval *tv)
+{
+ return ((uint32_t)tv->tv_sec * 1000000) + (uint32_t)tv->tv_usec;
+}
+
+static void report_status(struct avi_r2r_test *data)
+{
+ if (data->aborted)
+ AVIR2R_LOG_INFO(data,"Aborted");
+ if (data->finished)
+ AVIR2R_LOG_INFO(data,"Finished");
+
+ if (data->verbose) {
+ AVIR2R_LOG_INFO(data,"Callback done:\t%d",
+ atomic_read(&data->callback_done_ran));
+ AVIR2R_LOG_INFO(data,"Callback configure:\t%d",
+ atomic_read(&data->callback_configure_ran));
+ AVIR2R_LOG_INFO(data,"Counter Buf queue:\t%d",
+ data->counter_buf_queue);
+ AVIR2R_LOG_INFO(data,"Counter Done:\t%d",
+ data->counter_done);
+ }
+}
+
+static inline unsigned ms_to_jiffies(unsigned ms)
+{
+ return (ms/(1000/HZ));
+}
+static int wait_for_done(struct avi_r2r_test *data)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(data->wait_done,
+ atomic_read(&data->done_counter)
+ != atomic_read(&data->all_done),
+ data->deltas.number*ms_to_jiffies(data->time_out)
+ *(data->period==0?1:2));
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,"%s() ret=%d!", __FUNCTION__, ret);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void batch_queue(struct avi_r2r_test *data)
+{
+ struct avi_dma_buffer stats_buf = {
+ .plane0.dma_addr = 0,
+ };
+ int ret;
+
+ if (0 == data->buffer_in_idx)
+ data->l_buffers_config.source = data->buffers_config.source;
+ if (0 == data->buffer_out_idx)
+ data->l_buffers_config.target = data->buffers_config.target;
+
+ setup_dma(&data->buffers.source,
+ &data->formats.source,
+ &data->l_buffers_config.source,
+ data->images_config.source.rect.left,
+ data->images_config.source.rect.top);
+
+ setup_dma(&data->buffers.target,
+ &data->formats.target,
+ &data->l_buffers_config.target,
+ data->images_config.target.rect.left,
+ data->images_config.target.rect.top);
+
+ data->buffers.source.priv = (void *)(0x42000000|data->counter_buf_queue);
+ data->buffers.target.priv = (void *)(0x62000000|data->counter_buf_queue);
+
+ ret=avi_r2r_buf_queue(&data->context_id,
+ &data->buffers.source,
+ &data->buffers.target,
+ &stats_buf);
+ if (ret>=0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,
+ "%s() #%d/%d Buf Queue successful",
+ __FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number);
+ }
+ else {
+ AVIR2R_LOG_INFO(data,"%s() #%d/%d "
+ "Buf Queue failed (%d)",
+ __FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number,
+ ret);
+ data->aborted=1;
+ return;
+ }
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() #%d/%d",__FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number);
+
+ data->counter_buf_queue++;
+ data->images_config.source.rect.left += data->deltas.source.left;
+ data->images_config.source.rect.top += data->deltas.source.top;
+ data->images_config.target.rect.left += data->deltas.target.left;
+ data->images_config.target.rect.top += data->deltas.target.top;
+
+ data->buffer_in_idx++;
+ if (data->buffer_in_idx > data->src_nbuffers)
+ data->buffer_in_idx = 0;
+ else {
+ data->l_buffers_config.source.plane_0 += data->images_config.source.plane0.frame_size;
+ data->l_buffers_config.source.plane_1 += data->images_config.source.plane1.frame_size;
+ }
+ data->buffer_out_idx++;
+ if (data->buffer_out_idx > data->tgt_nbuffers)
+ data->buffer_out_idx = 0;
+ else {
+ data->l_buffers_config.target.plane_0 += data->images_config.target.plane0.frame_size;
+ data->l_buffers_config.target.plane_1 += data->images_config.target.plane1.frame_size;
+ }
+}
+
+static void my_callback_done(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_buf,
+ struct avi_dma_buffer *out_buf,
+ struct avi_dma_buffer *stats_buf,
+ void *priv)
+{
+ struct avi_r2r_test *data=ctx->private;
+ unsigned int index_src, index_tgt;
+
+ spin_lock(&data->lock);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) IN",__FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+
+ atomic_inc(&data->callback_done_ran);
+
+ index_src=(unsigned int)in_buf->priv & 0x00FFFFFF;
+ index_tgt=(unsigned int)out_buf->priv & 0x00FFFFFF;
+
+ if (index_src != index_tgt) {
+ data->mismatch_frame_index++;
+ AVIR2R_LOG_INFO(data, "%s(%d) index mismatch src=%u tgt=%u",
+ __FUNCTION__, data->counter_done,
+ index_src, index_tgt);
+ }
+
+ if (index_src != (data->frame_index_src+1)) {
+ data->mismatch_frame_index_src++;
+ AVIR2R_LOG_INFO(data, "%s(%d) SRC index mismatch read=%u expected=%u",
+ __FUNCTION__, data->counter_done,
+ index_src, data->frame_index_src+1);
+ }
+
+ if (index_tgt != (data->frame_index_tgt+1)) {
+ data->mismatch_frame_index_tgt++;
+ AVIR2R_LOG_INFO(data, "%s(%d) TGT index mismatch read=%u expected=%u",
+ __FUNCTION__, data->counter_done,
+ index_tgt, data->frame_index_tgt+1);
+ }
+
+ data->frame_index_src=index_src;
+ data->frame_index_tgt=index_tgt;
+
+ data->counter_done++;
+
+ if (data->counter_done>=data->deltas.number) {
+ data->finished=1;
+ AVIR2R_LOG_INFO(data, "%s(%d) finished",
+ __FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+ atomic_inc(&data->all_done);
+ /* Wake up any task waiting for done */
+ wake_up_interruptible(&data->wait_done);
+ }
+ else {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) #%d/%d",
+ __FUNCTION__,
+ atomic_read(&data->callback_done_ran),
+ data->counter_done,
+ data->deltas.number);
+ }
+
+ if (data->aborted) {
+ AVIR2R_LOG_INFO(data, "%s() aborted",__FUNCTION__);
+ atomic_inc(&data->all_done);
+ /* Wake up any task waiting for done */
+ wake_up_interruptible(&data->wait_done);
+ }
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) OUT",__FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+
+ spin_unlock(&data->lock);
+}
+
+static inline void * physical_to_virtual(struct avi_r2r_test *data, dma_addr_t phys)
+{
+ u32 offset = (u32)phys - (u32)data->phys_mem_start;
+ return (void * )((u32)data->mem_start + offset);
+}
+
+static void my_callback_configure(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_buf,
+ struct avi_dma_buffer *out_buf,
+ void *priv)
+{
+ struct avi_r2r_test *data = ctx->private;
+ struct avi_segment_format out_fmt, in_fmt;
+ int ret;
+ unsigned int l, o;
+ u32 *buffer;
+
+ spin_lock(&data->lock);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) IN",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran));
+
+ atomic_inc(&data->callback_configure_ran);
+ in_fmt = data->formats.source ;
+ out_fmt = data->formats.target ;
+
+ switch (data->config_run_time) {
+ case 1:
+ in_fmt.colorspace = AVI_BT601_CSPACE;
+ out_fmt.colorspace = AVI_BT709_CSPACE;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format(conv) fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+
+ break;
+ case 2:
+ if ((ret=avi_r2r_setup_gam(&data->context_id,
+ data->gamma_config.bypass,
+ data->gamma_config.palette,
+ data->gamma_config.comp_width,
+ &data->gamma_config.cmap)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_setup_gam fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ break;
+ case 3:
+ out_fmt.height /= 2;
+ out_fmt.width /= 2;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format(out_size) fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ break;
+ case 4:
+ if (((unsigned)in_buf->priv & 1) == 0) {
+ out_fmt.height /= 2;
+ out_fmt.width /= 2;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (data->tag_src) {
+ buffer = (u32 *)physical_to_virtual(data, in_buf->plane0.dma_addr);
+ for (l = 0; l < in_fmt.height; l++) {
+ o = l * in_fmt.plane0.line_size / 4;
+ buffer[o] = (u32)in_buf->priv;
+ }
+ o = in_fmt.height * in_fmt.plane0.line_size / 4;
+ buffer[o-1] = (u32)in_buf->priv;
+ }
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) %08X %08X",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ (unsigned)in_buf->priv, (unsigned)out_buf->priv);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) OUT",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran));
+
+ spin_unlock(&data->lock);
+}
+
+static int run_batch(struct avi_r2r_test *data)
+{
+ int ret=0, i;
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() IN",__FUNCTION__);
+
+ data->ops.done = &my_callback_done;
+ data->ops.configure = &my_callback_configure;
+
+ data->counter_buf_queue = 0;
+ data->counter_done = 0;
+ data->frame_index_src = -1;
+ data->frame_index_tgt = -1;
+ data->buffer_in_idx = 0;
+ data->buffer_out_idx = 0;
+
+ for (i = 0; i < data->deltas.number ;i++ ) {
+ batch_queue(data);
+ }
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() OUT",__FUNCTION__);
+
+ return ret;
+}
+
+static void add_kt_period(struct avi_r2r_test *data,
+ ktime_t kt_begin,
+ ktime_t kt_end,
+ int over)
+ {
+ data->kt_period[data->kt_period_index].begin = kt_begin;
+ data->kt_period[data->kt_period_index].end = kt_end;
+ data->kt_period[data->kt_period_index].overrun = over;
+
+ data->kt_period_counter++;
+ data->kt_period_index++;
+ if (data->kt_period_index >= nr_timings_saved)
+ data->kt_period_index = 0;
+
+ if (data->kt_period_nr < nr_timings_saved)
+ data->kt_period_nr++;
+}
+
+/* Timer callback: called at the end of period. */
+static enum hrtimer_restart periodic_timer(struct hrtimer *timer)
+{
+ struct avi_dma_buffer stats = {
+ .plane0.dma_addr = 0,
+ };
+ struct avi_dma_buffer source;
+ struct avi_dma_buffer target;
+ struct avi_r2r_test *data;
+ unsigned long tjnow;
+ ktime_t kt_now, kt_now1;
+ int err;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
+
+ data = container_of(timer, struct avi_r2r_test, timer);
+
+ if (data->counter_buf_queue<data->deltas.number
+ && !data->finished
+ && !data->aborted) {
+ tjnow = jiffies;
+ kt_now = hrtimer_cb_get_time(&data->timer);
+ err = hrtimer_forward(&data->timer,
+ kt_now,
+ data->ktime_period_ns);
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,
+ "%s() "
+ "jiffies %lu ;"
+ " ret: %d ;"
+ " ktnsec: %lld ;"
+ " count %d",
+ __FUNCTION__,
+ tjnow,
+ err,
+ ktime_to_ns(kt_now),
+ data->counter_buf_queue);
+
+ if (0 == data->buffer_in_idx)
+ data->l_buffers_config.source = data->buffers_config.source;
+ if (0 == data->buffer_out_idx)
+ data->l_buffers_config.target = data->buffers_config.target;
+
+ setup_dma(&source,
+ &data->formats.source,
+ &data->l_buffers_config.source,
+ data->images_config.source.rect.left,
+ data->images_config.source.rect.top);
+
+ setup_dma(&target,
+ &data->formats.target,
+ &data->l_buffers_config.target,
+ data->images_config.target.rect.left,
+ data->images_config.target.rect.top);
+
+ source.priv = (void *)(0x50000000|data->counter_buf_queue);
+ target.priv = (void *)(0x70000000|data->counter_buf_queue);
+
+ ret=avi_r2r_buf_queue(&data->context_id,
+ &source,
+ &target,
+ &stats);
+ if (ret>=0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() #%d/%d"
+ " Buf Queue successful",
+ __FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number);
+
+ data->counter_buf_queue++;
+ data->images_config.source.rect.left +=
+ data->deltas.source.left;
+ data->images_config.source.rect.top +=
+ data->deltas.source.top;
+ data->images_config.target.rect.left +=
+ data->deltas.target.left;
+ data->images_config.target.rect.top +=
+ data->deltas.target.top;
+
+ data->buffer_in_idx++;
+ if (data->buffer_in_idx > data->src_nbuffers)
+ data->buffer_in_idx = 0;
+ else {
+ data->l_buffers_config.source.plane_0 += data->images_config.source.plane0.frame_size;
+ data->l_buffers_config.source.plane_1 += data->images_config.source.plane1.frame_size;
+ }
+ data->buffer_out_idx++;
+ if (data->buffer_out_idx > data->tgt_nbuffers)
+ data->buffer_out_idx = 0;
+ else {
+ data->l_buffers_config.target.plane_0 += data->images_config.target.plane0.frame_size;
+ data->l_buffers_config.target.plane_1 += data->images_config.target.plane1.frame_size;
+ }
+
+ ret=HRTIMER_RESTART;
+ }
+ else {
+ AVIR2R_LOG_INFO(data,"%s() #%d/%d "
+ "Buf Queue failed (%d). Abort !!",
+ __FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number,
+ ret);
+ data->aborted=1;
+ }
+
+ kt_now1 = hrtimer_cb_get_time(&data->timer);
+ add_kt_period(data,kt_now,kt_now1,err);
+ }
+ else
+ AVIR2R_LOG_INFO(data,
+ "%s() NORESTART #%d/%d end:%d abort:%d",
+ __FUNCTION__,
+ data->counter_buf_queue,
+ data->deltas.number,
+ data->finished, data->aborted);
+
+ if (data->aborted) {
+ AVIR2R_LOG_INFO(data, "%s() aborted",__FUNCTION__);
+ atomic_inc(&data->all_done);
+ /* Wake up any task waiting for done */
+ wake_up_interruptible(&data->wait_done);
+ }
+
+ return ret;
+}
+
+static void my_periodic_callback_done(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_buf,
+ struct avi_dma_buffer *out_buf,
+ struct avi_dma_buffer *stats_buf,
+ void *priv)
+{
+ struct avi_r2r_test *data=ctx->private;
+ unsigned int index_src, index_tgt;
+
+ spin_lock(&data->lock);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) IN",__FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+
+ atomic_inc(&data->callback_done_ran);
+
+ index_src=(unsigned int)in_buf->priv & 0x00FFFFFF;
+ index_tgt=(unsigned int)out_buf->priv & 0x00FFFFFF;
+
+ if (index_src != index_tgt) {
+ data->mismatch_frame_index++;
+ AVIR2R_LOG_INFO(data, "%s(%d) index mismatch src=%u tgt=%u",
+ __FUNCTION__, data->counter_done,
+ index_src, index_tgt);
+ }
+
+ if (index_src != (data->frame_index_src+1)) {
+ data->mismatch_frame_index_src++;
+ AVIR2R_LOG_INFO(data, "%s(%d) SRC index mismatch read=%u expected=%u",
+ __FUNCTION__, data->counter_done,
+ index_src, data->frame_index_src+1);
+ }
+
+ if (index_tgt != (data->frame_index_tgt+1)) {
+ data->mismatch_frame_index_tgt++;
+ AVIR2R_LOG_INFO(data, "%s(%d) TGT index mismatch read=%u expected=%u",
+ __FUNCTION__, data->counter_done,
+ index_tgt, data->frame_index_tgt+1);
+ }
+
+ data->frame_index_src=index_src;
+ data->frame_index_tgt=index_tgt;
+
+ data->counter_done++;
+
+ if (data->counter_done>=data->deltas.number) {
+ data->finished=1;
+ AVIR2R_LOG_INFO(data, "%s(%d) finished",
+ __FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+ atomic_inc(&data->all_done);
+ /* Wake up any task waiting for done */
+ wake_up_interruptible(&data->wait_done);
+ }
+ else
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) #%d/%d",
+ __FUNCTION__,
+ atomic_read(&data->callback_done_ran),
+ data->counter_done,
+ data->deltas.number);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) OUT",__FUNCTION__,
+ atomic_read(&data->callback_done_ran));
+
+ spin_unlock(&data->lock);
+}
+
+static void my_periodic_callback_configure(struct avi_r2r_context *ctx,
+ struct avi_dma_buffer *in_buf,
+ struct avi_dma_buffer *out_buf,
+ void *priv)
+{
+ struct avi_r2r_test *data = ctx->private;
+ struct avi_segment_format out_fmt, in_fmt;
+ u32 *buffer;
+ unsigned int l, o;
+ int ret;
+
+ spin_lock(&data->lock);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) IN",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran));
+
+ atomic_inc(&data->callback_configure_ran);
+
+ in_fmt = data->formats.source ;
+ out_fmt = data->formats.target ;
+ switch (data->config_run_time) {
+ case 1:
+ in_fmt.colorspace = AVI_BT601_CSPACE;
+ out_fmt.colorspace = AVI_BT709_CSPACE;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format(conv) fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+
+ break;
+ case 2:
+ if ((ret=avi_r2r_setup_gam(&data->context_id,
+ data->gamma_config.bypass,
+ data->gamma_config.palette,
+ data->gamma_config.comp_width,
+ &data->gamma_config.cmap)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_setup_gam fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ break;
+ case 3:
+ out_fmt.height /= 2;
+ out_fmt.width /= 2;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ break;
+ case 4:
+ if (((unsigned)in_buf->priv & 1) == 0) {
+ out_fmt.height /= 2;
+ out_fmt.width /= 2;
+ if ((ret=avi_r2r_set_format(&data->context_id,
+ &in_fmt,
+ &out_fmt,
+ 0,
+ 0)) < 0)
+ AVIR2R_LOG_ERROR(data,
+ "%s(%d) avi_r2r_set_format fail (%d)",
+ __FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ ret);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (data->tag_src) {
+ buffer = (u32 *)physical_to_virtual(data, in_buf->plane0.dma_addr);
+ for (l = 0; l < in_fmt.height; l++) {
+ o = l * in_fmt.plane0.line_size / 4;
+ buffer[o] = (u32)in_buf->priv;
+ }
+ o = in_fmt.height * in_fmt.plane0.line_size / 4;
+ buffer[o-1] = (u32)in_buf->priv;
+ }
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) %08X %08X",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran),
+ (unsigned)in_buf->priv, (unsigned)out_buf->priv);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s(%d) OUT",__FUNCTION__,
+ atomic_read(&data->callback_configure_ran));
+
+ spin_unlock(&data->lock);
+}
+
+static int run_periodic(struct avi_r2r_test *data)
+{
+ int ret=0;
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() IN",__FUNCTION__);
+
+ data->ops.done = &my_periodic_callback_done;
+ data->ops.configure = &my_periodic_callback_configure;
+
+ data->ktime_period_ns = ktime_set(0, data->period*1000);
+ data->timer.function = &periodic_timer;
+ data->done_periodic = 0;
+ data->counter_buf_queue = 0;
+ data->counter_done = 0;
+ data->frame_index_src = -1;
+ data->frame_index_tgt = -1;
+ data->buffer_in_idx = 0;
+ data->buffer_out_idx = 0;
+
+ if (!hrtimer_active(&data->timer)) {
+ unsigned long tjnow=jiffies;
+ struct timespec ts,ts1;
+ ktime_get_ts(&ts);
+ hrtimer_start(&data->timer,
+ data->ktime_period_ns,
+ HRTIMER_MODE_REL);
+ ktime_get_ts(&ts1);
+ add_kt_period(data,
+ timespec_to_ktime(ts),
+ timespec_to_ktime(ts1),
+ -1);
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,
+ "%s() hrtimer_start %lu %lld",
+ __FUNCTION__,
+ tjnow,
+ ktime_to_ns(timespec_to_ktime(ts)));
+ }
+
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data, "%s() OUT",__FUNCTION__);
+
+ return ret;
+}
+
+static void get_test_config(struct avi_r2r_test *data)
+{
+ int i,avi_cmap_sz;
+
+ data->images_config.source.pixel_format = avi_pixfmt_by_id(src_pixfmt);
+ data->images_config.source.frame.width=src_frame_width;
+ data->images_config.source.frame.height=src_frame_height;
+ data->images_config.source.rect.width=src_width;
+ data->images_config.source.rect.height=src_height;
+ data->images_config.source.rect.left=src_left;
+ data->images_config.source.rect.top=src_top;
+ data->src_nbuffers=src_nbuffers;
+
+ data->images_config.target.pixel_format= avi_pixfmt_by_id(tgt_pixfmt);
+ data->images_config.target.frame.width=tgt_frame_width;
+ data->images_config.target.frame.height=tgt_frame_height;
+ data->images_config.target.rect.width=tgt_width;
+ data->images_config.target.rect.height=tgt_height;
+ data->images_config.target.rect.left=tgt_left;
+ data->images_config.target.rect.top=tgt_top;
+ data->tgt_nbuffers=tgt_nbuffers;
+
+ data->buffers_config.source.plane_0=src_sa0;
+ data->buffers_config.source.plane_1=src_sa1;
+
+ data->buffers_config.target.plane_0=tgt_sa0;
+ data->buffers_config.target.plane_1=tgt_sa1;
+
+ data->deltas.source.left=src_delta_left;
+ data->deltas.source.top=src_delta_top;
+ data->deltas.target.left=tgt_delta_left;
+ data->deltas.target.top=tgt_delta_top;
+ data->deltas.number=delta_count;
+
+ data->time_out=time_out;
+ data->period=period;
+ data->startup_delay=startup_delay;
+
+ data->images_config.source.colorspace=src_cspace;
+ data->images_config.target.colorspace=tgt_cspace;
+
+ memset(&data->gamma_config.cmap,0,sizeof(struct avi_cmap));
+ avi_cmap_sz=AVI_CMAP_SZ;
+#ifdef AVI_BACKWARD_COMPAT
+ if (avi_get_revision() < AVI_REVISION_3)
+ avi_cmap_sz=AVI_CMAP_SZ/4;
+#endif /* AVI_BACKWARD_COMPAT */
+
+ for (i=0;i<avi_cmap_sz;i++) {
+ data->gamma_config.cmap.red[i]=i;
+ data->gamma_config.cmap.green[i]=i;
+ data->gamma_config.cmap.blue[i]=i;
+ }
+ data->gamma_config.bypass=gam_bypass;
+ data->gamma_config.palette=gam_palette;
+ data->gamma_config.comp_width=0;
+
+ data->config_run_time = config_run_time;
+ data->tag_src = tag_src;
+
+
+ data->requirements[0]=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT);
+ data->requirements[1]=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT|AVI_CAP_CONV|AVI_CAP_GAM);
+ data->requirements[2]=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT|AVI_CAP_CONV|AVI_CAP_SCAL);
+ data->requirements[3]=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT|AVI_CAP_CONV|AVI_CAP_SCAL|AVI_CAP_PLANAR);
+ data->requirements[4]=(AVI_CAP_DMA_IN|AVI_CAP_DMA_OUT|AVI_CAP_CONV);
+
+ data->run=0;
+ data->running=0;
+ data->verbose=verbose;
+ data->kt_period_index=0;
+ data->kt_period_counter=0;
+ data->kt_period_nr=0;
+}
+
+static void print_test_config(struct avi_r2r_test *data)
+{
+ AVIR2R_LOG_INFO(data,
+ "\n"
+ " - Source: %s[%d] %dx%d (%d,%d) %dx%d 0x%p|0x%p\n"
+ " - Target: %s[%d] %dx%d (%d,%d) %dx%d 0x%p|0x%p\n"
+ " - Delta: Source (%d,%d), Target (%d,%d), Number %d",
+ avi_debug_pixfmt_to_str(data->images_config.source.pixel_format),
+ data->images_config.source.pixel_format.id,
+ data->images_config.source.frame.width,
+ data->images_config.source.frame.height,
+ data->images_config.source.rect.left,
+ data->images_config.source.rect.top,
+ data->images_config.source.rect.width,
+ data->images_config.source.rect.height,
+ (void *)data->buffers_config.source.plane_0,
+ (void *)data->buffers_config.source.plane_1,
+
+ avi_debug_pixfmt_to_str(data->images_config.target.pixel_format),
+ data->images_config.target.pixel_format.id,
+ data->images_config.target.frame.width,
+ data->images_config.target.frame.height,
+ data->images_config.target.rect.left,
+ data->images_config.target.rect.top,
+ data->images_config.target.rect.width,
+ data->images_config.target.rect.height,
+ (void *)data->buffers_config.target.plane_0,
+ (void *)data->buffers_config.target.plane_1,
+
+ data->deltas.source.left,
+ data->deltas.source.top,
+ data->deltas.target.left,
+ data->deltas.target.top,
+ data->deltas.number);
+
+}
+
+static void cancel_timer(struct avi_r2r_test *data)
+{
+ int ret_cancel = 0;
+
+ if (hrtimer_callback_running(&data->timer)) {
+ AVIR2R_LOG_WARN(data,
+ "hrtimer callback is running");
+ }
+ if (hrtimer_active(&data->timer) != 0) {
+ ret_cancel = hrtimer_cancel(&data->timer);
+ AVIR2R_LOG_INFO(data,
+ "active hrtimer cancelled: %d (%d, %d)",
+ ret_cancel,
+ data->counter_buf_queue,
+ data->counter_done);
+ }
+ if (hrtimer_is_queued(&data->timer) != 0) {
+ ret_cancel = hrtimer_cancel(&data->timer);
+ AVIR2R_LOG_INFO(data,
+ "queued hrtimer cancelled: %d (%d, %d)",
+ ret_cancel,
+ data->counter_buf_queue,
+ data->counter_done);
+ }
+}
+
+static void run_test(struct avi_r2r_test *data, int new_run)
+{
+ int ret,seq;
+// int count;
+
+ if (new_run==0) {
+ AVIR2R_LOG_INFO(data,"Run is zero!!");
+ return;
+ }
+
+ if (new_run>NR_REQUIREMENTS) new_run=NR_REQUIREMENTS;
+ data->running=data->run=new_run;
+
+ AVIR2R_LOG_INFO(data,"Running test #%d",data->running);
+ print_test_config(data);
+
+ data->context_id.private=data;
+
+ data->kt_period_counter = 0;
+ data->kt_period_index = 0;
+ data->kt_period_nr = 0;
+ memset(data->kt_period,0,sizeof(data->kt_period));
+
+ if (data->period == 0) {
+ data->ops.done = &my_callback_done;
+ data->ops.configure = &my_callback_configure;
+ }
+ else {
+ data->ops.done = &my_periodic_callback_done;
+ data->ops.configure = &my_periodic_callback_configure;
+ AVIR2R_LOG_INFO(data,"Period %lu microseconds",data->period);
+ }
+ if ((ret=avi_r2r_request(&data->context_id,
+ data->dev,
+ data->requirements[data->running-1],
+ &data->ops))==0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,"Request successful");
+
+ for (seq=0;seq<10 && ret==0;seq++)
+ switch (seq) {
+ case 0:
+ setup_formats(data);
+ ret=avi_r2r_set_format(&data->context_id,
+ &data->formats.source,
+ &data->formats.target,
+ 0,
+ 0);
+ if (ret==0)
+ {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,"Set formats successful");
+ }
+ else
+ AVIR2R_LOG_INFO(data,"Set formats failed (%d)",ret);
+ break;
+
+ case 1:
+ break;
+
+ case 2:
+ if (data->requirements[data->running-1] & AVI_CAP_GAM) {
+ ret=avi_r2r_setup_gam(&data->context_id,
+ data->gamma_config.bypass,
+ data->gamma_config.palette,
+ data->gamma_config.comp_width,
+ &data->gamma_config.cmap);
+ if (ret==0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,"Setup Gamma successful");
+ }
+ else
+ AVIR2R_LOG_INFO(data,"Setup Gamma failed (%d)",ret);
+ }
+ break;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ default:
+ break;
+
+ case 8:
+ if (data->startup_delay)
+ msleep(data->startup_delay);
+ break;
+
+ case 9:
+ data->finished=0;
+ data->aborted=0;
+ atomic_set(&data->done_counter,
+ atomic_read(&data->all_done));
+
+ if (data->period==0)
+ ret=run_batch(data);
+ else
+ ret=run_periodic(data);
+
+ if (ret>=0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,"Run successful");
+ ret=wait_for_done(data);
+ if (ret!=0) {
+ data->aborted=1;
+ AVIR2R_LOG_INFO(data,"Done failed (%d)",ret);
+ break;
+ }
+ else
+ report_status(data);
+ }
+ else
+ AVIR2R_LOG_INFO(data,"Run failed (%d)",ret);
+ break;
+ }
+
+// count=0;
+// do {
+ if ((ret=avi_r2r_destroy(&data->context_id))==0) {
+ if (data->verbose)
+ AVIR2R_LOG_INFO(data,
+ "Destroy successful");
+ }
+ else {
+ AVIR2R_LOG_INFO(data,"Destroy failed (%d)",ret);
+// if (count<20)
+// {
+// msleep_interruptible(10);
+// count++;
+// }
+// else
+// {
+// AVIR2R_LOG_INFO(data,
+// "cannot destroy");
+// break;
+// }
+ }
+// } while(ret!=0);
+ }
+ else
+ AVIR2R_LOG_INFO(data,"Request failed (%d)",ret);
+
+ if (data->period != 0)
+ cancel_timer(data);
+
+ data->running=0;
+ data->run=0;
+}
+
+/****************************************************************/
+static int src_frame_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%ux%u\n",
+ data->images_config.source.frame.width,
+ data->images_config.source.frame.height);
+
+ return 0;
+}
+
+static ssize_t src_frame_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 width,height;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ width = simple_strtoul(_buf, &last, 0);
+ if (*last == 'x')
+ {
+ last++;
+ height = simple_strtoul(last, &last, 0);
+
+ data->images_config.source.frame.width=width;
+ data->images_config.source.frame.height=height;
+ }
+ return size;
+}
+
+static int src_frame_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_frame_show ,inode->i_private);
+}
+
+static const struct file_operations src_frame_debugfs_fops = {
+ .open = src_frame_open,
+ .read = seq_read,
+ .write = src_frame_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_frame_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%ux%u\n",
+ data->images_config.target.frame.width,
+ data->images_config.target.frame.height);
+
+ return 0;
+}
+
+static ssize_t tgt_frame_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 width,height;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ width = simple_strtoul(_buf, &last, 0);
+ if (*last == 'x') {
+ last++;
+ height = simple_strtoul(last, &last, 0);
+
+ data->images_config.target.frame.width=width;
+ data->images_config.target.frame.height=height;
+ }
+ return size;
+}
+
+static int tgt_frame_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_frame_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_frame_debugfs_fops = {
+ .open = tgt_frame_open,
+ .read = seq_read,
+ .write = tgt_frame_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_rect_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%ux%u\n",
+ data->images_config.source.rect.width,
+ data->images_config.source.rect.height);
+
+ return 0;
+}
+
+static ssize_t src_rect_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 width,height;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ width = simple_strtoul(_buf, &last, 0);
+ if (*last == 'x') {
+ last++;
+ height = simple_strtoul(last, &last, 0);
+
+ data->images_config.source.rect.width=width;
+ data->images_config.source.rect.height=height;
+ }
+ return size;
+}
+
+static int src_rect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_rect_show ,inode->i_private);
+}
+
+static const struct file_operations src_rect_debugfs_fops = {
+ .open = src_rect_open,
+ .read = seq_read,
+ .write = src_rect_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_rect_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%ux%u\n",
+ data->images_config.target.rect.width,
+ data->images_config.target.rect.height);
+
+ return 0;
+}
+
+static ssize_t tgt_rect_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 width,height;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ width = simple_strtoul(_buf, &last, 0);
+ if (*last == 'x') {
+ last++;
+ height = simple_strtoul(last, &last, 0);
+
+ data->images_config.target.rect.width=width;
+ data->images_config.target.rect.height=height;
+ }
+ return size;
+}
+
+static int tgt_rect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_rect_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_rect_debugfs_fops = {
+ .open = tgt_rect_open,
+ .read = seq_read,
+ .write = tgt_rect_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_pos_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%u,%u\n",
+ data->images_config.source.rect.left,
+ data->images_config.source.rect.top);
+
+ return 0;
+}
+
+static ssize_t src_pos_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 left,top;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ left = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ top = simple_strtoul(last, &last, 0);
+
+ data->images_config.source.rect.left=left;
+ data->images_config.source.rect.top=top;
+ }
+ return size;
+}
+
+static int src_pos_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_pos_show ,inode->i_private);
+}
+
+static const struct file_operations src_pos_debugfs_fops = {
+ .open = src_pos_open,
+ .read = seq_read,
+ .write = src_pos_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_pos_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%u,%u\n",
+ data->images_config.target.rect.left,
+ data->images_config.target.rect.top);
+
+ return 0;
+}
+
+static ssize_t tgt_pos_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 left,top;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ left = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ top = simple_strtoul(last, &last, 0);
+
+ data->images_config.target.rect.left=left;
+ data->images_config.target.rect.top=top;
+ }
+ return size;
+}
+
+static int tgt_pos_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_pos_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_pos_debugfs_fops = {
+ .open = tgt_pos_open,
+ .read = seq_read,
+ .write = tgt_pos_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_addr_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "0x%08X,0x%08X\n",
+ data->buffers_config.source.plane_0,
+ data->buffers_config.source.plane_1);
+
+ return 0;
+}
+
+static ssize_t src_addr_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 addr[2];
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ addr[0] = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ addr[1] = simple_strtoul(last, &last, 0);
+
+ data->buffers_config.source.plane_0=addr[0];
+ data->buffers_config.source.plane_1=addr[1];
+ }
+ return size;
+}
+
+static int src_addr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_addr_show ,inode->i_private);
+}
+
+static const struct file_operations src_addr_debugfs_fops = {
+ .open = src_addr_open,
+ .read = seq_read,
+ .write = src_addr_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_addr_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "0x%08X,0x%08X\n",
+ data->buffers_config.target.plane_0,
+ data->buffers_config.target.plane_1);
+
+ return 0;
+}
+
+static ssize_t tgt_addr_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 addr[2];
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ addr[0] = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ addr[1] = simple_strtoul(last, &last, 0);
+
+ data->buffers_config.target.plane_0=addr[0];
+ data->buffers_config.target.plane_1=addr[1];
+ }
+ return size;
+}
+
+static int tgt_addr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_addr_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_addr_debugfs_fops = {
+ .open = tgt_addr_open,
+ .read = seq_read,
+ .write = tgt_addr_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_pixfmt_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d (%s)\n",
+ data->images_config.source.pixel_format.id,
+ avi_debug_pixfmt_to_str(data->images_config.source.pixel_format));
+
+ return 0;
+}
+
+static ssize_t src_pixfmt_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->images_config.source.pixel_format = avi_pixfmt_by_id(fmt);
+ return size;
+}
+
+static int src_pixfmt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_pixfmt_show ,inode->i_private);
+}
+
+static const struct file_operations src_pixfmt_debugfs_fops = {
+ .open = src_pixfmt_open,
+ .read = seq_read,
+ .write = src_pixfmt_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_pixfmt_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d (%s)\n",
+ data->images_config.target.pixel_format.id,
+ avi_debug_pixfmt_to_str(data->images_config.target.pixel_format));
+
+ return 0;
+}
+
+static ssize_t tgt_pixfmt_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->images_config.target.pixel_format = avi_pixfmt_by_id(fmt);
+ return size;
+}
+
+static int tgt_pixfmt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_pixfmt_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_pixfmt_debugfs_fops = {
+ .open = tgt_pixfmt_open,
+ .read = seq_read,
+ .write = tgt_pixfmt_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_delta_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%u,%u\n",
+ data->deltas.source.left,
+ data->deltas.source.top);
+
+ return 0;
+}
+
+static ssize_t src_delta_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 left,top;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ left = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ top = simple_strtoul(last, &last, 0);
+
+ data->deltas.source.left=left;
+ data->deltas.source.top=top;
+ }
+ return size;
+}
+
+static int src_delta_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_delta_show ,inode->i_private);
+}
+
+static const struct file_operations src_delta_debugfs_fops = {
+ .open = src_delta_open,
+ .read = seq_read,
+ .write = src_delta_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_delta_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%u,%u\n",
+ data->deltas.target.left,
+ data->deltas.target.top);
+
+ return 0;
+}
+
+static ssize_t tgt_delta_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 left,top;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ left = simple_strtoul(_buf, &last, 0);
+ if (*last == ',') {
+ last++;
+ top = simple_strtoul(last, &last, 0);
+
+ data->deltas.target.left=left;
+ data->deltas.target.top=top;
+ }
+ return size;
+}
+
+static int tgt_delta_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_delta_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_delta_debugfs_fops = {
+ .open = tgt_delta_open,
+ .read = seq_read,
+ .write = tgt_delta_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int delta_count_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->deltas.number);
+
+ return 0;
+}
+
+static ssize_t delta_count_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->deltas.number=fmt;
+ return size;
+}
+
+static int delta_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, delta_count_show ,inode->i_private);
+}
+
+static const struct file_operations delta_count_debugfs_fops = {
+ .open = delta_count_open,
+ .read = seq_read,
+ .write = delta_count_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_cspace_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d (%s)\n", data->images_config.source.colorspace,
+ avi_debug_colorspace_to_str(data->images_config.source.colorspace));
+
+ return 0;
+}
+
+static ssize_t src_cspace_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->images_config.source.colorspace=fmt;
+ return size;
+}
+
+static int src_cspace_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_cspace_show ,inode->i_private);
+}
+
+static const struct file_operations src_cspace_debugfs_fops = {
+ .open = src_cspace_open,
+ .read = seq_read,
+ .write = src_cspace_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_cspace_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d (%s)\n", data->images_config.target.colorspace,
+ avi_debug_colorspace_to_str(data->images_config.target.colorspace));
+
+ return 0;
+}
+
+static ssize_t tgt_cspace_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->images_config.target.colorspace=fmt;
+ return size;
+}
+
+static int tgt_cspace_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_cspace_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_cspace_debugfs_fops = {
+ .open = tgt_cspace_open,
+ .read = seq_read,
+ .write = tgt_cspace_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int src_nbuffers_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->src_nbuffers);
+
+ return 0;
+}
+
+static ssize_t src_nbuffers_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->src_nbuffers=fmt;
+ return size;
+}
+
+static int src_nbuffers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, src_nbuffers_show ,inode->i_private);
+}
+
+static const struct file_operations src_nbuffers_debugfs_fops = {
+ .open = src_nbuffers_open,
+ .read = seq_read,
+ .write = src_nbuffers_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tgt_nbuffers_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->tgt_nbuffers);
+
+ return 0;
+}
+
+static ssize_t tgt_nbuffers_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->tgt_nbuffers=fmt;
+ return size;
+}
+
+static int tgt_nbuffers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tgt_nbuffers_show ,inode->i_private);
+}
+
+static const struct file_operations tgt_nbuffers_debugfs_fops = {
+ .open = tgt_nbuffers_open,
+ .read = seq_read,
+ .write = tgt_nbuffers_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int run_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->run);
+
+ return 0;
+}
+
+static ssize_t run_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (data->running)
+ {
+ AVIR2R_LOG_INFO(data,"Already running test #%d", data->running);
+ return size;
+ }
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ run_test(data, fmt);
+
+ return size;
+}
+
+static int run_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, run_show ,inode->i_private);
+}
+
+static const struct file_operations run_debugfs_fops = {
+ .open = run_open,
+ .read = seq_read,
+ .write = run_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int running_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->running);
+
+ return 0;
+}
+
+static int running_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, running_show ,inode->i_private);
+}
+
+static const struct file_operations running_debugfs_fops = {
+ .open = running_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int gam_bypass_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->gamma_config.bypass);
+
+ return 0;
+}
+
+static ssize_t gam_bypass_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->gamma_config.bypass=!!fmt;
+ return size;
+}
+
+static int gam_bypass_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gam_bypass_show ,inode->i_private);
+}
+
+static const struct file_operations gam_bypass_debugfs_fops = {
+ .open = gam_bypass_open,
+ .read = seq_read,
+ .write = gam_bypass_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int gam_palette_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->gamma_config.palette);
+
+ return 0;
+}
+
+static ssize_t gam_palette_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+ data->gamma_config.palette=!!fmt;
+ return size;
+}
+
+static int gam_palette_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gam_palette_show ,inode->i_private);
+}
+
+static const struct file_operations gam_palette_debugfs_fops = {
+ .open = gam_palette_open,
+ .read = seq_read,
+ .write = gam_palette_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int mismatchs_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "SRC=%u, TGT=%u, %u\n",
+ data->mismatch_frame_index_src,
+ data->mismatch_frame_index_tgt,
+ data->mismatch_frame_index);
+
+ return 0;
+}
+
+static int mismatchs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mismatchs_show ,inode->i_private);
+}
+
+static const struct file_operations mismatchs_debugfs_fops = {
+ .open = mismatchs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int counters_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d, %d\n",
+ data->counter_buf_queue,
+ data->counter_done);
+
+ return 0;
+}
+
+static int counters_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, counters_show ,inode->i_private);
+}
+
+static const struct file_operations counters_debugfs_fops = {
+ .open = counters_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int verbose_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->verbose);
+
+ return 0;
+}
+
+static ssize_t verbose_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->verbose=fmt;
+
+ return size;
+}
+
+static int verbose_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, verbose_show ,inode->i_private);
+}
+
+static const struct file_operations verbose_debugfs_fops = {
+ .open = verbose_open,
+ .read = seq_read,
+ .write = verbose_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+/*-----------*/
+static int time_out_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->time_out);
+
+ return 0;
+}
+
+static ssize_t time_out_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->time_out=fmt;
+
+ return size;
+}
+
+static int time_out_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, time_out_show ,inode->i_private);
+}
+
+static const struct file_operations time_out_debugfs_fops = {
+ .open = time_out_open,
+ .read = seq_read,
+ .write = time_out_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int period_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%lu\n", data->period);
+
+ return 0;
+}
+
+static ssize_t period_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->period=fmt;
+
+ return size;
+}
+
+static int period_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, period_show ,inode->i_private);
+}
+
+static const struct file_operations period_debugfs_fops = {
+ .open = period_open,
+ .read = seq_read,
+ .write = period_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int config_run_time_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->config_run_time);
+
+ return 0;
+}
+
+static ssize_t config_run_time_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->config_run_time=fmt;
+
+ return size;
+}
+
+static int config_run_time_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, config_run_time_show ,inode->i_private);
+}
+
+static const struct file_operations config_run_time_debugfs_fops = {
+ .open = config_run_time_open,
+ .read = seq_read,
+ .write = config_run_time_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int tag_src_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->tag_src);
+
+ return 0;
+}
+
+static ssize_t tag_src_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->tag_src=fmt;
+
+ return size;
+}
+
+static int tag_src_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tag_src_show ,inode->i_private);
+}
+
+static const struct file_operations tag_src_debugfs_fops = {
+ .open = tag_src_open,
+ .read = seq_read,
+ .write = tag_src_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int startup_delay_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->startup_delay);
+
+ return 0;
+}
+
+static ssize_t startup_delay_store(struct file *file,
+ const char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct avi_r2r_test *data = m->private;
+ char *last;
+ char _buf[32];
+ u32 fmt;
+
+ if (*ppos > 0)
+ return -EOPNOTSUPP;
+
+ if (size > ARRAY_SIZE(_buf) - 1)
+ return -ERANGE;
+
+ if (copy_from_user(_buf, buf, size))
+ return -EFAULT;
+
+ _buf[size] = '\0';
+
+ fmt = simple_strtoul(_buf, &last, 0);
+
+ data->startup_delay=fmt;
+
+ return size;
+}
+
+static int startup_delay_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, startup_delay_show ,inode->i_private);
+}
+
+static const struct file_operations startup_delay_debugfs_fops = {
+ .open = startup_delay_open,
+ .read = seq_read,
+ .write = startup_delay_store,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int timings_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+ struct kt_period kt_prev;
+ s64 delta;
+
+ int i,n,c;
+
+ n=data->kt_period_nr;
+ seq_printf(s, "%d/%d (periodic)\n", n, data->kt_period_counter);
+ if (n>0) {
+ i = data->kt_period_index-n;
+ if (i<0) i += nr_timings_saved;
+ kt_prev = data->kt_period[i];
+ for (c=0;c<n;c++) {
+ delta=ktime_us_delta(data->kt_period[i].begin,
+ kt_prev.begin);
+ seq_printf(s,
+ "client-%d: %18lld, %18lld, %2d,"
+ " %10lld microseconds, %+6lld\n",
+ data->platform_device->id,
+ ktime_to_us(data->kt_period[i].begin),
+ ktime_to_us(data->kt_period[i].end),
+ data->kt_period[i].overrun,
+ delta,
+ delta-data->period);
+ kt_prev=data->kt_period[i];
+ i++;
+ if (i>=nr_timings_saved) i=0;
+ }
+ }
+ return 0;
+}
+
+
+static int timings_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, timings_show ,inode->i_private);
+}
+
+static const struct file_operations timings_debugfs_fops = {
+ .open = timings_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int mem_start_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "0x%08X\n",
+ (unsigned int)data->platform_device->resource->start);
+
+ return 0;
+}
+
+static int mem_start_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mem_start_show ,inode->i_private);
+}
+
+static const struct file_operations mem_start_debugfs_fops = {
+ .open = mem_start_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*-----------*/
+static int mem_size_show(struct seq_file *s, void *unused)
+{
+ struct avi_r2r_test *data = s->private;
+
+ seq_printf(s, "%d\n", data->mem_size);
+
+ return 0;
+}
+
+static int mem_size_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mem_size_show ,inode->i_private);
+}
+
+static const struct file_operations mem_size_debugfs_fops = {
+ .open = mem_size_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/****************************************************************/
+
+static void avi_r2r_test_data_delete(struct avi_r2r_test *data)
+{
+ if (!IS_ERR_OR_NULL(data->mem_start))
+ iounmap(data->mem_start);
+
+ if (!IS_ERR_OR_NULL((void *)data->vmem->start))
+ release_mem_region(data->platform_device->resource->start,
+ resource_size(data->platform_device->resource));
+
+ if (!IS_ERR_OR_NULL(data->debugfs_root))
+ debugfs_remove_recursive(data->debugfs_root);
+ data->debugfs_root = NULL;
+
+ cancel_timer(data);
+}
+
+#define MY_DEBUGFS_REGISTER(_name) \
+ file = debugfs_create_file(#_name, \
+ S_IRWXUGO, \
+ data->debugfs_root, \
+ data, \
+ &_name ## _debugfs_fops); \
+ if (IS_ERR_OR_NULL(file)) { \
+ ret = PTR_ERR(file) ?: -ENOMEM; \
+ goto rm; \
+ } \
+ /*AVIR2R_LOG_INFO(data, "debugfs : %s",#_name)*/
+
+
+static int __devinit avi_r2r_test_data_init(struct avi_r2r_test *data)
+{
+ struct dentry *file;
+ int ret = 0;
+
+ AVIR2R_LOG_INFO(data, "Starting...");
+
+ init_waitqueue_head(&data->wait_done);
+ atomic_set(&data->callback_done_ran,0);
+ atomic_set(&data->callback_configure_ran,0);
+ atomic_set(&data->all_done,0);
+ spin_lock_init(&data->lock);
+
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ /* Map the DMA memory. */
+ data->vmem = request_mem_region(data->platform_device->resource->start,
+ resource_size(data->platform_device->resource),
+ dev_name(data->dev));
+ if (!data->vmem) {
+ ret = -EBUSY;
+ AVIR2R_LOG_ERROR(data,
+ "request_mem_region %pR failed",
+ data->platform_device->resource);
+ goto mem_request_failed;
+ }
+ data->mem_size = resource_size(data->platform_device->resource);
+
+ data->mem_start = ioremap(data->platform_device->resource->start,
+ resource_size(data->platform_device->resource));
+
+ if (!data->mem_start) {
+ ret = -ENOMEM;
+ AVIR2R_LOG_ERROR(data,
+ "ioremap %pR failed",
+ data->platform_device->resource);
+ goto ioremap_failed;
+ }
+
+ data->phys_mem_start = (void *)data->platform_device->resource->start;
+
+ AVIR2R_LOG_INFO(data,
+ "Workspace %pR",data->platform_device->resource);
+
+ data->debugfs_root = debugfs_create_dir(dev_name(data->dev), NULL);
+ if (IS_ERR_OR_NULL(data->debugfs_root)) {
+ ret=data->debugfs_root ? PTR_ERR(data->debugfs_root) : -ENOMEM;
+ goto rm;
+ }
+ MY_DEBUGFS_REGISTER(src_frame);
+ MY_DEBUGFS_REGISTER(tgt_frame);
+ MY_DEBUGFS_REGISTER(src_rect);
+ MY_DEBUGFS_REGISTER(tgt_rect);
+ MY_DEBUGFS_REGISTER(src_pos);
+ MY_DEBUGFS_REGISTER(tgt_pos);
+ MY_DEBUGFS_REGISTER(src_addr);
+ MY_DEBUGFS_REGISTER(tgt_addr);
+ MY_DEBUGFS_REGISTER(src_nbuffers);
+ MY_DEBUGFS_REGISTER(tgt_nbuffers);
+ MY_DEBUGFS_REGISTER(src_pixfmt);
+ MY_DEBUGFS_REGISTER(tgt_pixfmt);
+ MY_DEBUGFS_REGISTER(src_delta);
+ MY_DEBUGFS_REGISTER(tgt_delta);
+ MY_DEBUGFS_REGISTER(delta_count);
+ MY_DEBUGFS_REGISTER(src_cspace);
+ MY_DEBUGFS_REGISTER(tgt_cspace);
+ MY_DEBUGFS_REGISTER(run);
+ MY_DEBUGFS_REGISTER(running);
+ MY_DEBUGFS_REGISTER(counters);
+ MY_DEBUGFS_REGISTER(mismatchs);
+ MY_DEBUGFS_REGISTER(verbose);
+ MY_DEBUGFS_REGISTER(time_out);
+ MY_DEBUGFS_REGISTER(startup_delay);
+ MY_DEBUGFS_REGISTER(period);
+ MY_DEBUGFS_REGISTER(config_run_time);
+ MY_DEBUGFS_REGISTER(tag_src);
+ MY_DEBUGFS_REGISTER(gam_bypass);
+ MY_DEBUGFS_REGISTER(gam_palette);
+ MY_DEBUGFS_REGISTER(mem_start);
+ MY_DEBUGFS_REGISTER(mem_size);
+ MY_DEBUGFS_REGISTER(timings);
+
+ get_test_config(data);
+
+ AVIR2R_LOG_INFO(data, "Ready");
+ return 0;
+rm:
+ioremap_failed:
+mem_request_failed:
+ avi_r2r_test_data_delete(data);
+ return ret;
+}
+static int __devinit avi_r2r_test_probe(struct platform_device *pdev)
+{
+ struct avi_r2r_test *avi_r2r_test;
+ int ret;
+
+ dev_info(&pdev->dev, "Probing...\n");
+#define ERROR_OUT(lbl, msg, errno) \
+ do { ret = errno; \
+ dev_err(&pdev->dev, "Probing failed: %s [%d]\n", msg, ret); \
+ goto lbl; } \
+ while (0)
+
+ avi_r2r_test = kzalloc(sizeof(*avi_r2r_test), GFP_KERNEL);
+ if (!avi_r2r_test)
+ ERROR_OUT(exit, "couldn't allocate avi_r2r_test data", -ENOMEM);
+
+ avi_r2r_test->kt_period = kzalloc(sizeof(struct kt_period)*nr_timings_saved, GFP_KERNEL);
+ if (!avi_r2r_test->kt_period)
+ ERROR_OUT(clean, "couldn't allocate kt_period data", -ENOMEM);
+
+ avi_r2r_test->dev = &pdev->dev;
+ avi_r2r_test->platform_device = pdev;
+
+ dev_set_drvdata(&pdev->dev, avi_r2r_test);
+
+ if ((ret=avi_r2r_test_data_init(avi_r2r_test))!=0)
+ ERROR_OUT(clean, "couldn't initialise avi_r2r_test data", ret);
+
+ dev_info(&pdev->dev, "Probed\n");
+
+ return 0;
+clean:
+ avi_r2r_test_data_delete(avi_r2r_test);
+ if (avi_r2r_test->kt_period)
+ kfree(avi_r2r_test->kt_period);
+ kfree(avi_r2r_test);
+exit:
+ return ret;
+}
+
+
+static int __devexit avi_r2r_test_remove(struct platform_device *pdev)
+{
+ struct avi_r2r_test *avi_r2r_test = dev_get_drvdata(&pdev->dev);
+
+ dev_info(&pdev->dev, "Removing...\n"); \
+ dev_set_drvdata(&pdev->dev, NULL);
+ avi_r2r_test_data_delete(avi_r2r_test);
+ kfree(avi_r2r_test);
+ dev_info(&pdev->dev, "Removed\n");
+ return 0;
+}
+
+static struct platform_driver avi_r2r_test_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = avi_r2r_test_probe,
+ .remove = __devexit_p(avi_r2r_test_remove),
+};
+/**
+ * init function
+ */
+static int __init avi_r2r_test_init( void )
+{
+ int ret;
+ pr_info(DRIVER_NAME " Initializing...\n");
+ ret = platform_driver_register(&avi_r2r_test_driver);
+ if (ret)
+ pr_err(DRIVER_NAME " init fail (%d)\n", ret);
+ else
+ pr_info(DRIVER_NAME " Initialized\n");
+ return ret;
+}
+
+/**
+ * exit function
+ */
+static void __exit avi_r2r_test_exit( void )
+{
+ pr_info(DRIVER_NAME " Exiting...\n");
+ platform_driver_unregister(&avi_r2r_test_driver);
+ pr_info(DRIVER_NAME " Exited\n");
+}
+
+module_init( avi_r2r_test_init );
+module_exit( avi_r2r_test_exit );
+
+MODULE_AUTHOR("Didier Leymarie <didier.leymarie.ext@parrot.com>");
+MODULE_DESCRIPTION("RAM to RAM driver for Parrot7 Advanced Video Interface (test module multiple clients");
+MODULE_LICENSE( "GPL" );
+
diff --git a/drivers/parrot/media/video/avi_stats.c b/drivers/parrot/media/video/avi_stats.c
new file mode 100644
index 00000000000000..65f529fd14732e
--- /dev/null
+++ b/drivers/parrot/media/video/avi_stats.c
@@ -0,0 +1,618 @@
+#include <linux/module.h>
+#include <media/v4l2-ioctl.h>
+
+#include "avi_stats.h"
+
+#define AVI_STATS_DEFAULT_THUMB_WIDTH 64
+#define AVI_STATS_DEFAULT_THUMB_HEIGHT 48
+#define AVI_STATS_DEFAULT_WIDTH (AVI_STATS_DEFAULT_THUMB_WIDTH * 6)
+#define AVI_STATS_DEFAULT_HEIGHT (AVI_STATS_DEFAULT_THUMB_HEIGHT + 1)
+
+void avi_stats_done(struct avi_stats *stats, struct avi_dma_buffer *frame,
+ u32 sequence, int requeue)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ struct avi_vbuf *vbuf = frame->priv;
+ unsigned long flags;
+
+ if (!frame->plane0.dma_addr || !stats->streaming)
+ return;
+
+ /* Bad buffer */
+ if (frame->status != AVI_BUFFER_DONE) {
+ /* Requeue buffer instead of returning to V4L2 */
+ if (requeue) {
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+ list_add_tail(&vbuf->list, &stats->bufqueue);
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+ return;
+ }
+ dev_err(stats->vdev->parent, "couldn't capture stats\n");
+ }
+
+ v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp);
+ vbuf->vb.v4l2_buf.sequence = sequence;
+
+ if (frame->status != AVI_BUFFER_DONE)
+ state = VB2_BUF_STATE_ERROR;
+
+ vb2_buffer_done(&vbuf->vb, state);
+}
+EXPORT_SYMBOL(avi_stats_done);
+
+void avi_stats_next(struct avi_stats *stats, struct avi_dma_buffer *frame)
+{
+ struct avi_vbuf *vbuf = NULL;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+
+ if (!stats->streaming)
+ return;
+
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+
+ if (stats->streaming && !list_empty(&stats->bufqueue)) {
+ vbuf = list_first_entry(&stats->bufqueue, struct avi_vbuf,
+ list);
+
+ list_del(&vbuf->list);
+
+ vbuf->vb.state = VB2_BUF_STATE_ACTIVE;
+ }
+
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+
+ if (!vbuf)
+ /* exhausted buffer queue */
+ return;
+
+ /* Get DMA address from vb2 buffer */
+ dma_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb, 0);
+
+ /* Setup AVI DMA buffer with crop */
+ avi_v4l2_setup_dmabuf(&stats->crop, &stats->avi_fmt, stats->plane0_size,
+ stats->plane1_size, dma_addr, vbuf, frame);
+}
+EXPORT_SYMBOL(avi_stats_next);
+
+int avi_stats_apply_format(struct avi_stats *stats,
+ struct avi_node *bayer_node,
+ struct avi_segment *stats_seg,
+ const struct avi_segment_format *src_fmt, int force)
+{
+ struct avi_segment_format stats_fmt = {
+ .width = stats->avi_fmt.width,
+ .height = stats->avi_fmt.height,
+ .interlaced = 0,
+ .colorspace = 0,
+ };
+ struct avi_segment_format dstats_fmt;
+ unsigned width, height;
+ int ret = 0;
+
+ /* Configuration has not changed */
+ if (!force && !stats->updated_fmt)
+ return 0;
+
+ /* Apply new configuration */
+ stats->updated_fmt = 0;
+
+ /* Calculate thumbnail
+ * In order to get histogram in statistics, we need to store 256 bytes
+ * in last lines of captured frame, so thumbnail height is
+ * frame height - ((256 + line_width - 1) / line width).
+ */
+ width = stats_fmt.width / 6;
+ height = stats_fmt.height - ((255 + stats_fmt.width) / stats_fmt.width);
+
+ /* Configure bayer stats */
+ avi_statistics_bayer_configure(bayer_node, src_fmt->width,
+ src_fmt->height, width, height);
+
+ /* Configure segment format */
+ dstats_fmt = stats_fmt;
+ dstats_fmt.pix_fmt = AVI_PIXFMT_BGRA8888;
+ dstats_fmt.plane0.line_size = stats->v4l2_fmt.width * 4;
+
+ /* Set format */
+ ret = avi_segment_set_format(stats_seg, &stats_fmt,
+ &dstats_fmt);
+ if (ret) {
+ dev_err(stats->vdev->parent, "Failed to set stats format!\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_stats_apply_format);
+
+static int avi_stats_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct avi_stats *stats = vb2_get_drv_priv(vq);
+
+ sizes[0] = stats->v4l2_fmt.sizeimage;
+ alloc_ctxs[0] = stats->alloc_ctx;
+ *num_planes = 1;
+
+ if (*count == 0)
+ /* Default to 4 buffers */
+ *count = 4;
+
+ return 0;
+}
+
+static int avi_stats_vbuf_prepare(struct vb2_buffer* vb)
+{
+ struct avi_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+
+ vb2_set_plane_payload(vb, 0, stats->v4l2_fmt.sizeimage);
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/* This function is called with stats->vbq_lock held (no need to protect
+ * ourselves when we play with the dmaqueue) */
+static void avi_stats_vbuf_queue(struct vb2_buffer *vb)
+{
+ struct avi_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+ struct avi_vbuf *vbuf = to_avi_vbuf(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+
+ list_add_tail(&vbuf->list, &stats->bufqueue);
+
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+}
+
+static int avi_stats_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct avi_stats *stats = video_drvdata(file);
+
+ mutex_lock(&stats->lock);
+
+ strlcpy(cap->driver, stats->vdev->name, sizeof(cap->driver));
+
+ snprintf(cap->card, sizeof(cap->card), "avi-stats.%d",
+ stats->vdev->parent->id);
+
+ cap->version = stats->version;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static int avi_stats_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ /* We only have one hardcoded format for the stats */
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->flags = 0;
+ f->pixelformat = V4L2_PIX_FMT_BGR32;
+
+ strlcpy(f->description, "bayer statistics", sizeof(f->description));
+
+ return 0;
+}
+
+static int avi_stats_g_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_stats *stats = video_drvdata(file);
+
+ /* Check type */
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&stats->lock);
+
+ /* Get V4L2 format */
+ f->fmt.pix = stats->v4l2_fmt;
+
+ /* Unlock format access */
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static int avi_stats_try_fmt_vid_unlocked(struct avi_stats *stats,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ /* Only V4L2_PIX_FMT_BGR32 is supported */
+ if (pix->pixelformat != V4L2_PIX_FMT_RGB32 &&
+ pix->pixelformat != V4L2_PIX_FMT_BGR32)
+ return -EINVAL;
+
+ /* Check resolution */
+ avi_limit_adjust_height(AVI_PIXFMT_BGRA8888, &pix->height);
+ avi_limit_adjust_width(AVI_PIXFMT_BGRA8888, &pix->width);
+
+ /* Update values */
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = pix->width * 4;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return 0;
+}
+
+static int avi_stats_try_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_stats *stats = video_drvdata(file);
+ int ret;
+
+ /* Set format */
+ mutex_lock(&stats->lock);
+ ret = avi_stats_try_fmt_vid_unlocked(stats, f);
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static void avi_stats_adjust(struct avi_stats *stats, struct v4l2_rect *crop)
+{
+ struct v4l2_rect bounds = {
+ .left = 0,
+ .top = 0,
+ .width = stats->v4l2_fmt.width,
+ .height = stats->v4l2_fmt.height,
+ };
+ int32_t max_height;
+
+ /* Align width to a multiple of 6 */
+ crop->width -= crop->width % 6;
+
+ /* Max resolution width is 64 * 6 pixels */
+ if (crop->width > 384)
+ crop->width = 384;
+
+ /* Adjust crop rectangle into output frame */
+ avi_v4l2_crop_adjust(crop, &bounds);
+
+ /* Adjust height size:
+ * max resolution is 48 + ((256 + width - 1) / width)
+ */
+ max_height = 48 + ((255 + crop->width) / crop->width);
+ if (crop->height > max_height)
+ crop->height = max_height;
+}
+
+static int avi_stats_s_fmt_vid_unlocked(struct avi_stats *stats,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ /* Try format before setting */
+ ret = avi_stats_try_fmt_vid_unlocked(stats, f);
+ if (ret < 0)
+ return ret;
+
+ /* Update format */
+ stats->v4l2_fmt = f->fmt.pix;
+ stats->crop.left = 0;
+ stats->crop.top = 0;
+ stats->crop.width = f->fmt.pix.width;
+ stats->crop.height = f->fmt.pix.height;
+
+ /* Adjust crop to best value */
+ avi_stats_adjust(stats, &stats->crop);
+
+ /* Update AVI segment format */
+ avi_v4l2_to_segment_fmt(&stats->v4l2_fmt, &stats->crop, &stats->avi_fmt,
+ &stats->plane0_size, &stats->plane1_size);
+
+ return 0;
+}
+
+static int avi_stats_s_fmt_vid(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct avi_stats *stats = video_drvdata(file);
+ int ret;
+
+ /* Check type */
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* Already streaming */
+ if (stats->streaming)
+ return -EBUSY;
+
+ /* Set format */
+ mutex_lock(&stats->lock);
+ ret = avi_stats_s_fmt_vid_unlocked(stats, f);
+ stats->updated_fmt = 1;
+ mutex_unlock(&stats->lock);
+
+ return ret;
+}
+
+static int avi_stats_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct avi_stats *stats = video_drvdata(file);
+ int ret = 0;
+
+ /* Check type */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&stats->lock);
+
+ /* Get selection */
+ switch(s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r = stats->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = stats->v4l2_fmt.width;
+ s->r.height = stats->v4l2_fmt.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* Unlock format access */
+ mutex_unlock(&stats->lock);
+
+ return ret;
+}
+
+static int avi_stats_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct avi_stats *stats = video_drvdata(file);
+ int ret = 0;
+
+ /* Check type */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* Only CROP is supported */
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* Lock format access */
+ mutex_lock(&stats->lock);
+
+ /* Adjust crop */
+ avi_stats_adjust(stats, &s->r);
+
+ /* Update crop and compose rect */
+ stats->crop = s->r;
+
+ /* Update AVI segment format */
+ avi_v4l2_to_segment_fmt(&stats->v4l2_fmt, &stats->crop, &stats->avi_fmt,
+ &stats->plane0_size, &stats->plane1_size);
+
+ /* Selection has been updated */
+ stats->updated_fmt = 1;
+
+ /* Unlock format access */
+ mutex_unlock(&stats->lock);
+
+ return ret;
+}
+
+static int avi_stats_streamon(struct vb2_queue* vq, unsigned int count)
+{
+ struct avi_stats *stats = vb2_get_drv_priv(vq);
+
+ /* Already streaming */
+ if (stats->streaming)
+ return -EBUSY;
+
+ /* Nothing else to do here, the complicated stuff is done in streamon
+ * of main capture device.
+ */
+ stats->updated_fmt = 1;
+ stats->streaming = 1;
+
+ return 0;
+}
+
+static int avi_stats_streamoff(struct vb2_queue* vq)
+{
+ struct avi_stats *stats = vb2_get_drv_priv(vq);
+ struct avi_vbuf *node;
+ struct avi_vbuf *buf;
+ unsigned long flags;
+
+ /* Already stopped */
+ if (!stats->streaming)
+ return -EINVAL;
+
+ /* Mark every buffer as DONE */
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+ list_for_each_entry_safe(buf, node, &stats->bufqueue, list) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+
+ /* Stopped */
+ stats->streaming = 0;
+
+ return 0;
+}
+
+static struct vb2_ops avi_stats_vqueue_ops = {
+ .queue_setup = avi_stats_queue_setup,
+ .buf_prepare = avi_stats_vbuf_prepare,
+ .buf_queue = avi_stats_vbuf_queue,
+ .start_streaming = avi_stats_streamon,
+ .stop_streaming = avi_stats_streamoff,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_ioctl_ops avi_stats_ioctl_ops = {
+ .vidioc_querycap = avi_stats_querycap,
+ .vidioc_enum_fmt_vid_cap = avi_stats_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = avi_stats_g_fmt_vid,
+ .vidioc_try_fmt_vid_cap = avi_stats_try_fmt_vid,
+ .vidioc_s_fmt_vid_cap = avi_stats_s_fmt_vid,
+ .vidioc_g_selection = avi_stats_g_selection,
+ .vidioc_s_selection = avi_stats_s_selection,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int avi_stats_open(struct file *file)
+{
+ struct avi_stats *stats = video_drvdata(file);
+ struct vb2_queue *q = &stats->vb_vidq;
+
+ mutex_lock(&stats->lock);
+
+ if (stats->use_count != 0)
+ goto done;
+
+ stats->updated_fmt = 1;
+
+ memset(q, 0, sizeof(*q));
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->lock = &stats->lock;
+ q->ops = &avi_stats_vqueue_ops;
+ q->drv_priv = stats;
+ q->buf_struct_size = sizeof(struct avi_vbuf);
+
+ vb2_queue_init(q);
+
+ done:
+ stats->use_count++;
+
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static int avi_stats_release(struct file *file)
+{
+ struct avi_stats *stats= video_drvdata(file);
+ int ret = 0;
+
+ mutex_lock(&stats->lock);
+
+ stats->use_count--;
+ if (!stats->use_count)
+ ret = vb2_fop_release(file);
+
+ mutex_unlock(&stats->lock);
+
+ return ret;
+}
+
+static struct v4l2_file_operations avi_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = avi_stats_open,
+ .release = avi_stats_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+};
+
+int __devinit avi_stats_init(struct avi_stats *stats,
+ struct video_device *pvdev, u32 version)
+{
+ struct v4l2_format default_fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = AVI_STATS_DEFAULT_WIDTH,
+ .height = AVI_STATS_DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_BGR32,
+ },
+ };
+ struct video_device *vdev;
+ int ret;
+
+ spin_lock_init(&stats->vbq_lock);
+ INIT_LIST_HEAD(&stats->bufqueue);
+
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENODEV;
+ goto vdev_alloc_failed;
+ }
+
+ stats->vdev = vdev;
+ stats->version = version;
+
+ mutex_init(&stats->lock);
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s-stats" , pvdev->name);
+
+ vdev->parent = pvdev->parent;
+ vdev->current_norm = V4L2_STD_UNKNOWN;;
+ vdev->fops = &avi_stats_fops;
+ vdev->ioctl_ops = &avi_stats_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->queue = &stats->vb_vidq;
+ /* We handle the locking ourselves */
+ vdev->lock = NULL;
+
+ video_set_drvdata(vdev, stats);
+
+ vdev->v4l2_dev = pvdev->v4l2_dev;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto video_register_failed;
+
+ /* Set default format to 64x48 thumbnail */
+ avi_stats_s_fmt_vid_unlocked(stats, &default_fmt);
+ stats->updated_fmt = 1;
+
+ stats->alloc_ctx = vb2_dma_contig_init_ctx(&vdev->dev);
+
+ dev_info(vdev->parent,
+ "stats video device successfuly registered as %s\n",
+ video_device_node_name(stats->vdev));
+ return 0;
+
+ video_register_failed:
+ video_unregister_device(stats->vdev);
+ vdev_alloc_failed:
+ return ret;
+}
+EXPORT_SYMBOL(avi_stats_init);
+
+void __devexit avi_stats_destroy(struct avi_stats *stats)
+{
+ vb2_dma_contig_cleanup_ctx(stats->alloc_ctx);
+ video_unregister_device(stats->vdev);
+}
+EXPORT_SYMBOL(avi_stats_destroy);
+
+MODULE_AUTHOR("Alexandre Dilly <alexandre.dilly@parrot.com>");
+MODULE_DESCRIPTION("V4L2 stats capture interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_stats.h b/drivers/parrot/media/video/avi_stats.h
new file mode 100644
index 00000000000000..1a3ed5db90f7e6
--- /dev/null
+++ b/drivers/parrot/media/video/avi_stats.h
@@ -0,0 +1,106 @@
+#ifndef _AVI_STATS_H_
+#define _AVI_STATS_H_
+
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+
+#include "avi_v4l2.h"
+
+#define AVI_STATS_WIDTH 384
+#define AVI_STATS_HEIGHT 49
+
+struct avi_vbuf {
+ /* *must* be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+ /* For interlaced streams we capture two fields in a single frame */
+ int last_capture;
+};
+
+struct avi_stats {
+ /* Buffer queue */
+ spinlock_t vbq_lock;
+ struct list_head bufqueue;
+ /* Video device */
+ struct video_device *vdev;
+ struct vb2_queue vb_vidq;
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Capture format */
+ struct v4l2_pix_format v4l2_fmt;
+ struct v4l2_rect crop;
+ struct avi_segment_format avi_fmt;
+ unsigned plane0_size;
+ unsigned plane1_size;
+ int updated_fmt;
+ /* Serialization lock */
+ struct mutex lock;
+ int streaming;
+ int use_count;
+ u32 version;
+};
+
+static inline struct avi_vbuf* to_avi_vbuf(struct vb2_buffer* vb2)
+{
+ return container_of(vb2, struct avi_vbuf, vb);
+}
+
+/* avi_stats_done() - Return a filled buffer to V4L2 capture interface.
+ * @stats: The stats V4L2 capture interface handler.
+ * @frame: The filled buffer (can be empty with AVI_BUFFER_ERROR flag).
+ * @sequence: Sequence number of V4L2 frame.
+ * @requeue: If AVI_BUFFER_ERROR flag is set and requeue is not equel to zero
+ * the buffer is not returned to V4L2 interface and just requeued
+ * internaly.
+ *
+ * Return a processed buffer to V4L2 capture interface dedicated to bayer
+ * statistics output.
+ */
+extern void avi_stats_done(struct avi_stats *stats,
+ struct avi_dma_buffer *frame, u32 sequence,
+ int requeue);
+
+/* avi_stats_next() - Get a new buffer from V4L2 capture interface.
+ * @stats: The stats V4L2 capture interface handler.
+ * @frame: The buffer from V4L2.
+ *
+ * If no buffer are available from V4L2 interface, the avi_dma_buffer structure
+ * is not modified. In order to detect if a buffer is available, initialize the
+ * avi_dma_buffer passed with zeros and check if plane0.dma_addr is set.
+ */
+extern void avi_stats_next(struct avi_stats *stats,
+ struct avi_dma_buffer *frame);
+
+/* avi_stats_apply_format() - Apply format on stats segment output.
+ * @stats: The stats V4L2 capture interface handler.
+ * @bayer_node: The ISP bayer node of ISP segment on which stats are connected.
+ * @stats_seg: The stats segment.
+ * @src_fmt: The input format in ISP.
+ * @force: Force format segment update.
+ *
+ * Apply V4L2 format to the AVI segment format.
+ */
+int avi_stats_apply_format(struct avi_stats *stats,
+ struct avi_node *bayer_node,
+ struct avi_segment *stats_seg,
+ const struct avi_segment_format *src_fmt, int force);
+
+/* avi_stats_init() - Initialize V4L2 capture interface for bayer statistics.
+ * @stats: The stats V4L2 capture interface handler.
+ * @pvdev: The video_device used in parallel of this V4L2 interface (some
+ * informations are extracted from it like driver name and parent
+ * device).
+ * @version: Driver version.
+ *
+ * Initialize and create a new V4L2 video device for bayer statistics output.
+ */
+extern int __devinit avi_stats_init(struct avi_stats *stats,
+ struct video_device *pvdev, u32 version);
+
+/* avi_stats_destroy() - Destroy V4L2 capture interface for bayer statistics.
+ * @stats: The stats V4L2 capture interface handler.
+ *
+ * Destroy the V4L2 video device dedicated for bayer statistics output.
+ */
+extern void __devexit avi_stats_destroy(struct avi_stats *stats);
+
+#endif // #ifdef AVI_STATS_H
diff --git a/drivers/parrot/media/video/avi_v4l2.c b/drivers/parrot/media/video/avi_v4l2.c
new file mode 100644
index 00000000000000..c29add085a88a5
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2.c
@@ -0,0 +1,584 @@
+/*
+ * linux/drivers/parrot/video/segment_v4l2.c
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 04-Feb-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include "avi_v4l2.h"
+
+#ifdef DEBUG
+#define dprintk(format_, args...) \
+ pr_debug("%s: " format_, __func__, ##args)
+
+#else /* DEBUG */
+#define dprintk(format_, args...)
+#endif /* DEBUG */
+
+struct avi_v4l2_pixfmt {
+ struct avi_dma_pixfmt avi_fmt;
+ u32 v4l2_fmt;
+ char description[32];
+};
+
+static const struct avi_v4l2_pixfmt avi_v4l2_formats[] = {
+ {
+ .avi_fmt = AVI_PIXFMT_YVYU,
+ .v4l2_fmt = V4L2_PIX_FMT_YVYU,
+ .description = "16bpp, packed, 4:2:2 YVYU"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_AYUV,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV32,
+ .description = "32bpp, 4:4:4 AYUV"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_YUYV,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ .description = "16bpp, packed, 4:2:2 YUYV"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_YYUV,
+ .v4l2_fmt = V4L2_PIX_FMT_YYUV,
+ .description = "16bpp, packed, 4:2:2 YYUV"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_UYVY,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ .description = "16bpp, packed, 4:2:2 UYVY"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_VYUY,
+ .v4l2_fmt = V4L2_PIX_FMT_VYUY,
+ .description = "16bpp, packed, 4:2:2 VYUY"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_GREY,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ .description = "8bpp greyscale"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_Y10,
+ .v4l2_fmt = V4L2_PIX_FMT_Y10,
+ .description = "10bpp greyscale"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_RGBA8888,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB32,
+ .description = "32bpp, RGBA"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BGRA8888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR32,
+ .description = "32bpp, BGRA"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_RGB888,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ .description = "24bpp, RGB"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BGR888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ .description = "24bpp, BGR"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_RGB565,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB565,
+ .description = "16bpp, RGB565"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_RGBA5551,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB555,
+ .description = "16bpp, RGBA5551"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_NV16,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16,
+ .description = "16bpp, semi-planar, 4:2:2 Y/UV"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_NV61,
+ .v4l2_fmt = V4L2_PIX_FMT_NV61,
+ .description = "16bpp, semi-planar, 4:2:2 Y/VU"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_NV12,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12,
+ .description = "12bpp, semi-planar, 4:2:0 Y/UV"
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_NV21,
+ .v4l2_fmt = V4L2_PIX_FMT_NV21,
+ .description = "12bpp, semi-planar, 4:2:0 Y/VU"
+ },
+
+ /* Bayer formats, only available when we have a raw bayer input. */
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X10_16,
+ /* I'm not following the spec here because in this format the
+ * data should be packed on the LSB, the 6 remaining MSB padded
+ * with 0s. However the AVI does not support this format, so I
+ * make do. We could lie and pretend we store 16bits but our
+ * current v4l2 version does not seem to support 16bit raw
+ * pixelcodes. */
+ .v4l2_fmt = V4L2_PIX_FMT_SBGGR10,
+ .description = "16bpp, 10bit raw bayer [BG/GR]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X10_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SGBRG10,
+ .description = "16bpp, 10bit raw bayer [GB/RG]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X10_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SGRBG10,
+ .description = "16bpp, 10bit raw bayer [GR/BG]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X10_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SRGGB10,
+ .description = "16bpp, 10bit raw bayer [RG/GB]",
+ },
+ /* 12bit bayer formats */
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X12_16,
+ /* This format is completely nonstandard, the bits are shifted
+ * around. */
+ .v4l2_fmt = V4L2_PIX_FMT_SBGGR12,
+ .description = "16bpp, 12bit raw bayer [BG/GR]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X12_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SGBRG12,
+ .description = "16bpp, 12bit raw bayer [GB/RG]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X12_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SGRBG12,
+ .description = "16bpp, 12bit raw bayer [GR/BG]",
+ },
+ {
+ .avi_fmt = AVI_PIXFMT_BAYER_1X12_16,
+ .v4l2_fmt = V4L2_PIX_FMT_SRGGB12,
+ .description = "16bpp, 12bit raw bayer [RG/GB]",
+ },
+};
+
+int avi_v4l2_enum_fmt(struct v4l2_fmtdesc *f)
+{
+ const struct avi_v4l2_pixfmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(avi_v4l2_formats))
+ return -EINVAL;
+
+ fmt = &avi_v4l2_formats[f->index];
+
+ f->flags = 0;
+ f->pixelformat = fmt->v4l2_fmt;
+
+ strlcpy(f->description, fmt->description, sizeof(f->description));
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_v4l2_enum_fmt);
+
+enum avi_v4l2_pixtype avi_v4l2_get_pixfmt_type(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YYUV:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return AVI_PIXTYPE_YUV;
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ return AVI_PIXTYPE_GREY;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB555:
+ return AVI_PIXTYPE_RGB;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ return AVI_PIXTYPE_BAYER;
+ }
+
+ dprintk("unknown format type 0x%08x '" FOURCC_FORMAT "'\n",
+ fourcc, FOURCC_SHOW(fourcc));
+
+ return AVI_PIXTYPE_UNKNOWN;
+}
+EXPORT_SYMBOL(avi_v4l2_get_pixfmt_type);
+
+struct avi_dma_pixfmt avi_v4l2_to_avi_pixfmt(u32 v4l2_fmt)
+{
+ int i ;
+
+ for (i = 0; i < ARRAY_SIZE(avi_v4l2_formats); i++)
+ if (v4l2_fmt == avi_v4l2_formats[i].v4l2_fmt)
+ return avi_v4l2_formats[i].avi_fmt;
+
+ return AVI_PIXFMT_INVALID;
+}
+EXPORT_SYMBOL(avi_v4l2_to_avi_pixfmt);
+
+static u32 avi_v4l2_lnsize(struct avi_dma_pixfmt pixfmt,
+ u32 frm_width,
+ u32 requested_lnsize)
+{
+ struct avi_segment_format fmt;
+
+ fmt.pix_fmt = pixfmt;
+ avi_dma_setup_line_size(&fmt, frm_width, requested_lnsize);
+
+ return max(fmt.plane0.line_size, fmt.plane1.line_size);
+}
+
+static u32 avi_v4l2_frmsize(struct avi_dma_pixfmt pixfmt,
+ unsigned int line_size,
+ u32 frm_height)
+{
+ switch (avi_pixfmt_get_packing(pixfmt)) {
+ case AVI_INTERLEAVED_444_PACKING:
+ case AVI_INTERLEAVED_YUV_422_PACKING:
+ return frm_height * line_size;
+ case AVI_SEMIPLANAR_YUV_420_PACKING:
+ return (frm_height + frm_height / 2)* line_size;
+ case AVI_SEMIPLANAR_YUV_422_PACKING:
+ return frm_height * 2 * line_size;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+
+int avi_v4l2_to_segment_fmt(const struct v4l2_pix_format *pix,
+ const struct v4l2_rect *crop,
+ struct avi_segment_format *avi_fmt,
+ unsigned *plane0_size,
+ unsigned *plane1_size)
+{
+ static const struct avi_segment_format avi_zero_fmt = { 0 };
+
+ *avi_fmt = avi_zero_fmt;
+
+ avi_fmt->pix_fmt = avi_v4l2_to_avi_pixfmt(pix->pixelformat);
+ if (avi_fmt->pix_fmt.id == AVI_INVALID_FMTID) {
+ dprintk("bad pixel format %d\n", pix->pixelformat);
+ return -EINVAL;
+ }
+
+ if (crop) {
+ avi_fmt->width = crop->width;
+ avi_fmt->height = crop->height;
+ } else {
+ /* Use the full frame */
+ avi_fmt->width = pix->width;
+ avi_fmt->height = pix->height;
+ }
+
+ avi_fmt->interlaced = (pix->field != V4L2_FIELD_NONE);
+
+ /* There is no V4L2 colorspace for 'GREY' so we specify it manually */
+ if (avi_v4l2_get_pixfmt_type(pix->pixelformat) == AVI_PIXTYPE_GREY) {
+ if (avi_fmt->pix_fmt.bytes_per_pixel0 == 0)
+ avi_fmt->colorspace = AVI_GREY_CSPACE;
+ else
+ avi_fmt->colorspace = AVI_Y10_CSPACE;
+ }
+ else
+ avi_fmt->colorspace = avi_v4l2_to_avi_cspace(pix->colorspace);
+
+ if (!avi_fmt->pix_fmt.raw && avi_fmt->colorspace == AVI_NULL_CSPACE) {
+ dprintk("bad colorspace %d\n", pix->colorspace);
+ return -EINVAL;
+ }
+
+ /* pix_fmt is valid only for DMA(in/out) segment format
+ * so line sizes and plane sizes can be computed only in these cases
+ */
+ if (avi_fmt->pix_fmt.id == AVI_INVALID_FMTID) {
+ *plane0_size = 0;
+ *plane1_size = 0;
+ } else {
+ avi_dma_setup_line_size(avi_fmt, pix->width, pix->bytesperline);
+ avi_dma_setup_plane_size(avi_fmt,
+ pix->height,
+ plane0_size,
+ plane1_size);
+ }
+
+ if (pix->field == V4L2_FIELD_INTERLACED) {
+ /* We will store one field at a time, which means that we want
+ * to jump one line after each line to skip the other field */
+ avi_fmt->plane0.line_size *= 2;
+ avi_fmt->plane1.line_size *= 2;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_v4l2_to_segment_fmt);
+
+void avi_v4l2_setup_dmabuf(const struct v4l2_rect *crop,
+ const struct avi_segment_format *fmt,
+ unsigned plane0_size,
+ unsigned plane1_size,
+ dma_addr_t addr,
+ void *priv,
+ struct avi_dma_buffer *dma_buf)
+{
+ dma_addr_t start0 = addr;
+ dma_addr_t start1 = 0;
+ unsigned top;
+ unsigned left;
+
+ if (crop) {
+ top = crop->top;
+ left = crop->left;
+ } else {
+ top = 0;
+ left = 0;
+ }
+
+ /* start address of transfered data in plane 0 */
+ start0 += top * fmt->plane0.line_size
+ + left * avi_pixel_size0(fmt->pix_fmt);
+
+ /* start address in plane 1 is valid only for semi-planar format */
+ if (avi_pixfmt_is_planar(fmt->pix_fmt)) {
+ /* Plane 1 follows Plane 0 */
+ start1 = addr + plane0_size;
+ /* in case of NV12 (4:2:0) height of chroma plane (plane 1)
+ * is divided by 2
+ */
+ /* start address of transfered data in plane 1: start of line */
+ if (avi_pixfmt_get_packing(fmt->pix_fmt) ==
+ AVI_SEMIPLANAR_YUV_420_PACKING)
+ start1 += top / 2 * fmt->plane1.line_size;
+ else
+ start1 += top * fmt->plane1.line_size;
+ /* add offset in line */
+ start1 += left * avi_pixel_size1(fmt->pix_fmt);
+ }
+
+ dma_buf->plane0.dma_addr = start0;
+ dma_buf->plane1.dma_addr = start1;
+ dma_buf->priv = priv;
+ dma_buf->status = AVI_BUFFER_READY;
+}
+EXPORT_SYMBOL(avi_v4l2_setup_dmabuf);
+
+int avi_v4l2_try_fmt(__u32 type, struct v4l2_pix_format *pix)
+{
+ struct avi_dma_pixfmt pixfmt = AVI_PIXFMT_INVALID;
+ enum avi_colorspace cspace = AVI_NULL_CSPACE;
+
+ /* check if pixelformat can deal with AVI */
+ pixfmt = avi_v4l2_to_avi_pixfmt(pix->pixelformat);
+ if (pixfmt.id == AVI_INVALID_FMTID) {
+ dprintk("unknown pixel format " FOURCC_FORMAT
+ ", replaced by default\n",
+ FOURCC_SHOW(pix->pixelformat));
+ /* Format is unknown, we replace it by a default format */
+ pixfmt = avi_v4l2_formats[0].avi_fmt;
+ pix->pixelformat = avi_v4l2_formats[0].v4l2_fmt;
+ }
+
+ if (pixfmt.raw) {
+ /* No meaningful colorspace for raw formats */
+ cspace = AVI_NULL_CSPACE;
+ pix->colorspace = 0;
+
+ } else {
+
+#ifdef CONFIG_AVI_V4L2_GST_COLORSPACE_HACK
+ /* gstreamer v4l2sink and src do not handle the colorspace
+ * correctly, they set bad values in the pix structure
+ * (actually, they don't set anything, they use whatever they
+ * get in g_fmt). Since we cannot trust the value we force the
+ * heuristic below to guess the colorspace */
+ cspace = AVI_NULL_CSPACE;
+#else
+ cspace = avi_v4l2_to_avi_cspace(pix->colorspace);
+#endif
+
+ if (cspace == AVI_NULL_CSPACE) {
+ /* Unknown or unsupported colorspace. Let's try to guess
+ * a sane value. */
+ switch (avi_v4l2_get_pixfmt_type(pix->pixelformat)) {
+ case AVI_PIXTYPE_RGB:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ case AVI_PIXTYPE_YUV:
+ pix->colorspace = (pix->height <= 576) ?
+ /* Use SMPTE170M for SD output */
+ V4L2_COLORSPACE_SMPTE170M :
+ V4L2_COLORSPACE_REC709;
+ break;
+ case AVI_PIXTYPE_GREY:
+ if (pixfmt.bytes_per_pixel0 == 0)
+ cspace = AVI_GREY_CSPACE;
+ else
+ cspace = AVI_Y10_CSPACE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Adjust image size */
+ avi_limit_adjust_height(pixfmt, &pix->height);
+ avi_limit_adjust_width(pixfmt, &pix->width);
+
+ pix->priv = 0;
+
+ pix->bytesperline = avi_v4l2_lnsize(pixfmt,
+ pix->width,
+ pix->bytesperline);
+ pix->sizeimage = avi_v4l2_frmsize(pixfmt,
+ pix->bytesperline,
+ pix->height);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_v4l2_try_fmt);
+
+static inline void avi_v4l2_crop_fit(int *pos, int *size, int posmin, int bound)
+{
+ if (*pos < posmin)
+ *pos = posmin;
+
+ if (*size <= 0)
+ *size = bound;
+
+ if (*size > bound)
+ *size = bound;
+
+ if ((*pos + *size) > bound)
+ *pos = bound - *size;
+}
+
+void avi_v4l2_crop_adjust(struct v4l2_rect *crop,
+ const struct v4l2_rect *bounds)
+{
+ avi_v4l2_crop_fit(&crop->left,
+ &crop->width,
+ bounds->left,
+ bounds->width);
+
+ avi_v4l2_crop_fit(&crop->top,
+ &crop->height,
+ bounds->top,
+ bounds->height);
+}
+EXPORT_SYMBOL(avi_v4l2_crop_adjust);
+
+struct avi_v4l2_device
+{
+ struct mutex lock;
+ unsigned refcount;
+ struct v4l2_device dev;
+ struct media_device media;
+}avi_v4l2_device;
+
+struct v4l2_device *avi_v4l2_get_device(void)
+{
+ struct v4l2_device *dev = &avi_v4l2_device.dev;
+ struct media_device *media = &avi_v4l2_device.media;
+ int ret;
+
+ if (avi_v4l2_device.refcount == 0) {
+ memset(&avi_v4l2_device, 0, sizeof(avi_v4l2_device));
+ strlcpy(avi_v4l2_device.media.bus_info, "AVI", sizeof(avi_v4l2_device.media.bus_info));
+ strlcpy(avi_v4l2_device.media.model, "AVI", sizeof(avi_v4l2_device.media.model));
+
+ mutex_init(&avi_v4l2_device.lock);
+ mutex_lock(&avi_v4l2_device.lock);
+ media->hw_revision = avi_get_revision();
+ media->dev = avi_ctrl.dev;
+
+ ret = media_device_register(media);
+ if (ret) {
+ dev = NULL;
+ goto unlock;
+ }
+
+ dev->mdev = media;
+
+ ret = v4l2_device_register(avi_ctrl.dev, dev);
+ if (ret) {
+ dev = NULL;
+ media_device_unregister(media);
+ goto unlock;
+ }
+ }
+ else
+ mutex_lock(&avi_v4l2_device.lock);
+
+ avi_v4l2_device.refcount++;
+
+ unlock:
+ mutex_unlock(&avi_v4l2_device.lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(avi_v4l2_get_device);
+
+void avi_v4l2_put_device(struct v4l2_device *dev)
+{
+ struct media_device *media = &avi_v4l2_device.media;
+
+ BUG_ON(dev != &avi_v4l2_device.dev);
+
+ mutex_lock(&avi_v4l2_device.lock);
+
+ avi_v4l2_device.refcount--;
+
+ if (avi_v4l2_device.refcount == 0) {
+ v4l2_device_unregister(dev);
+ dev->mdev = NULL;
+ media_device_unregister(media);
+ }
+
+ mutex_unlock(&avi_v4l2_device.lock);
+}
+EXPORT_SYMBOL(avi_v4l2_put_device);
+
+MODULE_AUTHOR("Victor Lambret <victor.lambret.ext@parrot.com>");
+MODULE_DESCRIPTION("Parrot AVI V4L2 layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_v4l2.h b/drivers/parrot/media/video/avi_v4l2.h
new file mode 100644
index 00000000000000..d6b4a3a1481fe9
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2.h
@@ -0,0 +1,143 @@
+/*
+ * linux/drivers/parrot/media/video/avi_v4l2.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 04-Feb-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_V4L2_H_
+#define _AVI_V4L2_H_
+
+#include <linux/videodev2.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+#include "video/avi_segment.h"
+#include "video/avi_pixfmt.h"
+#include "video/avi_limit.h"
+
+#define AVI_V4L2_BUS_VERSION "platform:parrot7:avi"
+
+/****************************************
+ * Set of macros to pretty print FourCCs
+ ****************************************/
+#define FOURCC_FORMAT "%c%c%c%c"
+#define FOURCC_SHOW(_cc) \
+ ((_cc) & 0xff), \
+ (((_cc) >> 8) & 0xff), \
+ (((_cc) >> 16) & 0xff), \
+ (((_cc) >> 24) & 0xff)
+
+enum avi_v4l2_pixtype {
+ AVI_PIXTYPE_UNKNOWN,
+ AVI_PIXTYPE_RGB,
+ AVI_PIXTYPE_YUV,
+ AVI_PIXTYPE_BAYER,
+ AVI_PIXTYPE_GREY,
+};
+
+extern enum avi_v4l2_pixtype avi_v4l2_get_pixfmt_type(u32);
+
+/** convert a V4L2 pixel format (fourcc) to a AVI pixel format
+ *
+ * @v4l2_fmt: fourcc pixel format
+ *
+ * Return a struct avi_dma_pixfmt
+ */
+extern struct avi_dma_pixfmt avi_v4l2_to_avi_pixfmt(u32 v4l2_fmt);
+
+/** Convert V4L2 configuration into AVI segment format
+ *
+ * Input parameters:
+ * @v4l2_pix: struct v4l2_pix_format defining the whole frame
+ *
+ * @rect: defining the cropped area into whole frame
+ *
+ * Output parameters:
+ * @avi_fmt: AVI segment format filled by this function
+ *
+ * @plane0_size: size in bytes of plane 0, filled by this function
+ * @plane1_size: size in bytes of plane 1, filled by this function
+ * they are useful for computing addresses in buffer
+ * and size of image
+ */
+extern int
+avi_v4l2_to_segment_fmt(const struct v4l2_pix_format *pix,
+ const struct v4l2_rect *rect,
+ struct avi_segment_format *avi_fmt,
+ unsigned *plane0_size,
+ unsigned *plane1_size);
+
+/** Setup a avi_dma_buffer
+ *
+ * Input parameters:
+ * @rect: defining the cropped area into whole frame
+ * @seg_conf: AVI segment format set by avi_v4l2_convert_format()
+ * @plane0_size: size in bytes of plane 0, set by avi_v4l2_convert_format()
+ * @plane1_size: size in bytes of plane 1, set by avi_v4l2_convert_format()
+ * @addr: physical starting address of buffer for the whole frame
+ * @priv: value to set in priv field of dma_buf
+ *
+ * Output parameters:
+ * @dma_buf: AVI DMA buffer, filled by this function, suitable to be used
+ * by avi_segment_set_input_buffer() or
+ * avi_segment_set_output_buffer()
+ */
+extern void avi_v4l2_setup_dmabuf(const struct v4l2_rect *rect,
+ const struct avi_segment_format *fmt,
+ unsigned plane0_size,
+ unsigned plane1_size,
+ dma_addr_t addr,
+ void *priv,
+ struct avi_dma_buffer *dma_buf);
+
+/* enumerate V4L2 formats */
+extern int avi_v4l2_enum_fmt(struct v4l2_fmtdesc* fmt);
+
+/** try a V4L2 format
+ * used by IOCTL functions vidioc_try_fmt_xxxx
+ *
+ * Return -EINVAL if pixel format or colorspace are not available on AVI
+ */
+extern int avi_v4l2_try_fmt(__u32 type, struct v4l2_pix_format *pix);
+
+extern void avi_v4l2_crop_adjust(struct v4l2_rect *crop,
+ const struct v4l2_rect *bounds);
+
+static inline enum avi_colorspace avi_v4l2_to_avi_cspace(enum v4l2_colorspace c)
+{
+ switch (c) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return AVI_BT601_CSPACE;
+ case V4L2_COLORSPACE_REC709:
+ return AVI_BT709_CSPACE;
+ case V4L2_COLORSPACE_JPEG:
+ return AVI_JFIF_CSPACE;
+ case V4L2_COLORSPACE_SRGB:
+ return AVI_RGB_CSPACE;
+ default:
+ /* Unknown colorspace */
+ return AVI_NULL_CSPACE;
+ }
+}
+
+extern struct v4l2_device *avi_v4l2_get_device(void);
+extern void avi_v4l2_put_device(struct v4l2_device *);
+
+#endif /* _AVI_V4L2_H_ */
diff --git a/drivers/parrot/media/video/avi_v4l2_dev.c b/drivers/parrot/media/video/avi_v4l2_dev.c
new file mode 100644
index 00000000000000..a503ac5126a0d1
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2_dev.c
@@ -0,0 +1,192 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+
+#include "avi_v4l2.h"
+#include "avi_v4l2_dev.h"
+
+#define DRIVER_NAME "avi_v4l2_dev"
+#define DEVICE_NAME "avi"
+
+struct avi_v4l2_dev
+{
+ struct device *dev;
+ struct mutex lock;
+ struct class *class;
+ dev_t devno;
+ struct cdev cdev;
+ struct list_head segment_devs;
+ int id;
+};
+
+struct segment_dev
+{
+ struct list_head list;
+ struct avi_segment *segment;
+};
+
+/* There should be only one instance of this driver so I put the data statically
+ * here. I still pass pointers everywhere though, in order to make all functions
+ * reentrant if we ever need to refactor the driver or add multiple instances
+ * for some reason. */
+static struct avi_v4l2_dev avi_v4l2_dev;
+
+/* Called with avi_dev->lock held */
+int avi_v4l2_dev_build(struct avi_v4l2_dev *avi_dev, struct avi_dev_segment *d)
+{
+ struct segment_dev *sd;
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ sd->segment = avi_segment_build(&d->caps,
+ "v4l2",
+ avi_dev->id,
+ -1,
+ avi_dev->dev);
+ if (IS_ERR(sd->segment)) {
+ int ret = PTR_ERR(sd->segment);
+
+ kfree(sd);
+
+ return ret;
+ }
+
+ avi_dev->id++;
+ memcpy(d->id, sd->segment->id, sizeof(d->id));
+
+ list_add_tail(&sd->list, &avi_dev->segment_devs);
+
+ return 0;
+}
+
+long avi_v4l2_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct avi_v4l2_dev *avi_dev = &avi_v4l2_dev;
+ struct avi_dev_segment d;
+ int ret;
+
+ switch (cmd) {
+ case AVI_BUILD_DEV:
+ if (copy_from_user(&d, argp, sizeof(d)))
+ return -EFAULT;
+
+ mutex_lock(&avi_dev->lock);
+ ret = avi_v4l2_dev_build(avi_dev, &d);
+ mutex_unlock(&avi_dev->lock);
+
+ if (copy_to_user(argp, &d,sizeof(d)))
+ return -EFAULT;
+
+ return ret;
+
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+int avi_v4l2_dev_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+int avi_v4l2_dev_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static struct file_operations const avi_v4l2_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = avi_v4l2_dev_open,
+ .release = avi_v4l2_dev_release,
+ .unlocked_ioctl = avi_v4l2_dev_ioctl,
+};
+
+static int __init avi_v4l2_dev_init(void)
+{
+ struct avi_v4l2_dev *avi_dev = &avi_v4l2_dev;
+ int ret;
+
+ if (!avi_probed()) {
+ ret = -ENODEV;
+ goto no_avi;
+ }
+
+ memset(avi_dev, 0, sizeof(*avi_dev));
+
+ mutex_init(&avi_dev->lock);
+ INIT_LIST_HEAD(&avi_dev->segment_devs);
+
+ ret = alloc_chrdev_region(&avi_dev->devno, 0, 1, DRIVER_NAME);
+ if (ret)
+ goto alloc_chrdev_failed;
+
+ cdev_init(&avi_dev->cdev, &avi_v4l2_dev_fops);
+ avi_dev->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&avi_dev->cdev, avi_dev->devno, 1);
+ if (ret)
+ goto cdev_add_failed;
+
+ avi_dev->class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(avi_dev->class)) {
+ ret = PTR_ERR(avi_dev->class);
+ goto class_create_failed;
+ }
+
+ avi_dev->dev = device_create(avi_dev->class,
+ avi_dev->dev,
+ avi_dev->devno,
+ &avi_dev,
+ DEVICE_NAME);
+ if (IS_ERR(avi_dev->dev)) {
+ ret = PTR_ERR(avi_dev->dev);
+ goto device_create_failed;
+ }
+
+ dev_info(avi_dev->dev, "successfully probed\n");
+
+ return 0;
+
+ device_create_failed:
+ class_destroy(avi_dev->class);
+ class_create_failed:
+ cdev_del(&avi_dev->cdev);
+ cdev_add_failed:
+ unregister_chrdev_region(avi_dev->devno, 1);
+ alloc_chrdev_failed:
+ mutex_destroy(&avi_v4l2_dev.lock);
+ no_avi:
+ memset(avi_dev, 0, sizeof(*avi_dev));
+
+ return ret;
+}
+module_init(avi_v4l2_dev_init);
+
+static void __exit avi_v4l2_dev_exit(void)
+{
+ struct segment_dev *sd, *next;
+ struct avi_v4l2_dev *avi_dev = &avi_v4l2_dev;
+
+ list_for_each_entry_safe(sd, next, &avi_dev->segment_devs, list) {
+ avi_segment_teardown(sd->segment);
+ kfree(sd);
+ }
+
+ device_destroy(avi_dev->class, avi_dev->devno),
+ class_destroy(avi_dev->class);
+ cdev_del(&avi_dev->cdev);
+ unregister_chrdev_region(avi_dev->devno, 1);
+ mutex_destroy(&avi_v4l2_dev.lock);
+
+ memset(&avi_v4l2_dev, 0, sizeof(avi_v4l2_dev));
+}
+module_exit(avi_v4l2_dev_exit);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("V4L2 device interface for the AVI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_v4l2_dev.h b/drivers/parrot/media/video/avi_v4l2_dev.h
new file mode 100644
index 00000000000000..1b33e6161cb789
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2_dev.h
@@ -0,0 +1,19 @@
+#ifndef _AVI_V4L2_DEV_H_
+#define _AVI_V4L2_DEV_H_
+
+#include <asm/ioctl.h>
+
+#include "video/avi_cap.h"
+
+/*
+ * Userland interface
+ */
+
+struct avi_dev_segment {
+ unsigned long caps;
+ char id[16];
+};
+
+#define AVI_BUILD_DEV _IOW('A', 0x0, struct avi_dev_segment *)
+
+#endif /* _AVI_V4L2_DEV_H_ */
diff --git a/drivers/parrot/media/video/avi_v4l2_isp.c b/drivers/parrot/media/video/avi_v4l2_isp.c
new file mode 100644
index 00000000000000..9140402b30d7d4
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2_isp.c
@@ -0,0 +1,1286 @@
+/*
+ * linux/drivers/parrot/video/media/avi_v4l2_isp.c
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Alexandre Dilly <alexandre.dilly@parrot.com>
+ * @date 01-Jan-2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../../video/avi_compat.h"
+#include "../../video/reg_avi.h"
+
+#include "avi_v4l2_isp_ctrls.h"
+#include "avi_v4l2_isp.h"
+
+/** Boolean control types
+ * @id: Control ID
+ * @name: Name of control (and description)
+ * @addr: Offest of assocaited register
+ * @len: Length of regsiter field (in bits)
+ * @shift: Position of filed in register
+ * @read: Read register before writing
+ * @flags: Flags (RO, WO, ...)
+ */
+#define _BOOLEAN_CTRL(_id, _name, _addr, _len, _shift, _read, _flags) \
+{ \
+ .cfg = { \
+ .ops = &avi_v4l2_isp_ops, \
+ .type = V4L2_CTRL_TYPE_BOOLEAN, \
+ .id = _id, \
+ .name = _name, \
+ .def = 0, \
+ .min = 0, \
+ .max = 1, \
+ .step = 1, \
+ .flags = _flags, \
+ }, \
+ .reg = { \
+ .addr = _addr, \
+ .count = 1, \
+ .mask = ((1L << _len)-1) << _shift, \
+ .shift = _shift, \
+ .read = _read, \
+ }, \
+},
+#define BOOLEAN_CTRL(_id, _name, _addr, _len, _shift, _read) \
+ _BOOLEAN_CTRL(_id, _name, _addr, _len, _shift, _read, 0)
+
+/** Array control types
+ * @id: Control ID
+ * @name: Name of control (and description)
+ * @min: Minimal value for control
+ * @max: Maximal value for control
+ * @def: Default value for control
+ * @size: Size of array
+ * @addr: Offest of assocaited register
+ * @len: Length of regsiter field (in bits)
+ * @shift: Position of filed in register
+ * @read: Read register before writing
+ * @flags: Flags (RO, WO, ...)
+ */
+#define _ARRAY_CTRL(_type, _id, _name, _min, _max, _def, _size, _addr, _len, \
+ _shift, _read, _flags) \
+{ \
+ .cfg = { \
+ .ops = &avi_v4l2_isp_ops, \
+ .type = _type, \
+ .id = _id, \
+ .name = _name, \
+ .def = _def, \
+ .min = _min, \
+ .max = _max, \
+ .step = 1, \
+ .flags = _flags, \
+ .dims = { _size } \
+ }, \
+ .reg = { \
+ .addr = _addr, \
+ .count = _size, \
+ .mask = ((1L << _len)-1) << _shift, \
+ .shift = _shift, \
+ .read = _read, \
+ }, \
+},
+#define _ARRAY8_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, _shift, \
+ _read, _flags) \
+ _ARRAY_CTRL(V4L2_CTRL_TYPE_U8, _id, _name, _min, _max, _def, _size, \
+ _addr, _len, _shift, _read, _flags)
+#define ARRAY8_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, _shift, \
+ _read) \
+ _ARRAY8_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, _shift, \
+ _read, 0)
+
+#define _ARRAY16_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, \
+ _shift, _read, _flags) \
+ _ARRAY_CTRL(V4L2_CTRL_TYPE_U16, _id, _name, _min, _max, _def, _size, \
+ _addr, _len, _shift, _read, _flags)
+#define ARRAY16_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, _shift, \
+ _read) \
+ _ARRAY16_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, \
+ _shift, _read, 0)
+
+#define _ARRAY32_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, \
+ _shift, _read, _flags) \
+ _ARRAY_CTRL(V4L2_CTRL_TYPE_INTEGER, _id, _name, _min, _max, _def, \
+ _size, _addr, _len, _shift, _read, _flags)
+#define ARRAY32_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, _shift, \
+ _read) \
+ _ARRAY32_CTRL(_id, _name, _min, _max, _def, _size, _addr, _len, \
+ _shift, _read, 0)
+
+/** Basic control types
+ * @id: Control ID
+ * @name: Name of control (and description)
+ * @min: Minimal value for control
+ * @max: Maximal value for control
+ * @def: Default value for control
+ * @addr: Offest of assocaited register
+ * @len: Length of regsiter field (in bits)
+ * @shift: Position of filed in register
+ * @read: Read register before writing
+ * @flags: Flags (RO, WO, ...)
+ */
+#define _U32_CTRL(_id, _name, _min, _max, _def, _addr, _len, _shift, _read, \
+ _flags) \
+{ \
+ .cfg = { \
+ .ops = &avi_v4l2_isp_ops, \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .id = _id, \
+ .name = _name, \
+ .def = _def, \
+ .min = _min, \
+ .max = _max, \
+ .step = 1, \
+ .flags = _flags, \
+ }, \
+ .reg = { \
+ .addr = _addr, \
+ .count = 1, \
+ .mask = ((1L << _len)-1) << _shift, \
+ .shift = _shift, \
+ .read = _read, \
+ }, \
+},
+#define U32_CTRL(_id, _name, _min, _max, _def, _addr, _len, _shift, _read) \
+ _U32_CTRL(_id, _name, _min, _max, _def, _addr, _len, _shift, \
+ _read, 0)
+#define _U16_CTRL _U32_CTRL
+#define U16_CTRL U32_CTRL
+#define _U8_CTRL _U32_CTRL
+#define U8_CTRL U32_CTRL
+
+static int avi_v4l2_isp_g_volatile_ctrl(struct v4l2_ctrl *ctrl);
+static int avi_v4l2_isp_s_ctrl(struct v4l2_ctrl *ctrl);
+
+struct v4l2_ctrl_ops avi_v4l2_isp_ops = {
+ .g_volatile_ctrl = avi_v4l2_isp_g_volatile_ctrl,
+ .s_ctrl = avi_v4l2_isp_s_ctrl,
+};
+
+struct avi_v4l2_isp_ctrl_config {
+ struct v4l2_ctrl_config cfg;
+ struct avi_v4l2_isp_reg {
+ unsigned long addr;
+ unsigned long count;
+ u32 mask;
+ u8 shift;
+ u8 read;
+ } reg;
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_bayer_ctrls[] = {
+ /* ISP_BYPASS */
+ BOOLEAN_CTRL(P7_CID_BYPASS_PEDESTAL, "Bypass Pedestal ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 0, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_GRIM, "Bypass GRIM ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 1, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_RIP, "Bypass RIP ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 2, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_DENOISE, "Bypass Denoise ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 3, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_LSC, "Bypass LSC ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 4, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_CHROMA_ABER,
+ "Bypass Chroma Aberation ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 5, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_BAYER, "Bypass Bayer ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 6, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_COLOR_MATRIX,
+ "Bypass Color Matrix ISP Module",
+ AVI_ISP_CHAIN_BAYER_INTER, 1, 7, 1)
+ /* ISP_VLF_32TO40 */
+ U8_CTRL(P7_CID_ISP_VLF_32_TO_40, "VL Format 32 to 40", 0, 4, 0,
+ AVI_ISP_VL_FORMAT_32_TO_40 + AVI_ISP_VLFORMAT_32TO40_FORMAT,
+ 3, 0, 0)
+ /* ISP_PEDESTAL */
+ U16_CTRL(P7_CID_ISP_PEDESTAL_SUB_R,
+ "Value subtracted from red bayer pixels",
+ 0, 1023, 0,
+ AVI_ISP_PEDESTAL + AVI_ISP_PEDESTAL_SUB_R, 10, 0, 0)
+ U16_CTRL(P7_CID_ISP_PEDESTAL_SUB_GB,
+ "Value subtracted from green bayer pixels on blue row",
+ 0, 1023, 0,
+ AVI_ISP_PEDESTAL + AVI_ISP_PEDESTAL_SUB_GB, 10, 0, 0)
+ U16_CTRL(P7_CID_ISP_PEDESTAL_SUB_GR,
+ "Value subtracted from green bayer pixels on red row",
+ 0, 1023, 0,
+ AVI_ISP_PEDESTAL + AVI_ISP_PEDESTAL_SUB_GR, 10, 0, 0)
+ U16_CTRL(P7_CID_ISP_PEDESTAL_SUB_B,
+ "Value subtracted from blue bayer pixels",
+ 0, 1023, 0,
+ AVI_ISP_PEDESTAL + AVI_ISP_PEDESTAL_SUB_B, 10, 0, 0)
+ U8_CTRL(P7_CID_ISP_PEDESTAL_CFA,
+ "Color Filter Array of Pedestal",
+ 0, 3, 0,
+ AVI_ISP_PEDESTAL + AVI_ISP_PEDESTAL_CFA, 2, 0, 0)
+ /* ISP_GRIM */
+ U16_CTRL(P7_CID_ISP_GRIM_OFFSET_X,
+ "Offset of top left pixel from top left vertice of cell",
+ 0, 511, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_OFFSET_X_Y, 9, 0, 1)
+ U16_CTRL(P7_CID_ISP_GRIM_OFFSET_Y,
+ "Offset of top left pixel from top left vertice of cell",
+ 0, 1023, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_OFFSET_X_Y, 10, 16, 1)
+ U8_CTRL(P7_CID_ISP_GRIM_CELL_ID_X,
+ "Horizontal cell index to which top left pixel belongs",
+ 0, 15, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_ID_X_Y, 4, 0, 1)
+ U8_CTRL(P7_CID_ISP_GRIM_CELL_ID_Y,
+ "Vertical cell index to which top left pixel belongs",
+ 0, 15, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_ID_X_Y, 4, 16, 1)
+ U16_CTRL(P7_CID_ISP_GRIM_CELL_W,
+ "Mesh cells width in pixel", 0, 511, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_W, 9, 0, 0)
+ U16_CTRL(P7_CID_ISP_GRIM_CELL_H,
+ "Mesh cells height in pixel", 0, 1023, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_H, 10, 0, 0)
+ U32_CTRL(P7_CID_ISP_GRIM_CELL_W_INV,
+ "Mesh cells inverse width", 0, 131071, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_W_INV, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_GRIM_CELL_H_INV,
+ "Mesh cells inverse height", 0, 131071, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_CELL_H_INV, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_GRIM_ALPHA,
+ "Normalised horizontal offset of top left pixel from top left "
+ "vertice of cell",
+ 0, 131071, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_ALPHA, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_GRIM_BETA,
+ "Normalised vertical offset of top left pixel from top left "
+ "vertice of cell",
+ 0, 131071, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_BETA, 17, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_GRIM_GR_COEFF,
+ "Red coefficients: Correction mesh for R channel",
+ 0, 255, 0, 221,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_GREEN_RED_COEFF_MEM, 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_GRIM_GB_COEFF,
+ "Blue coefficients: Correction mesh for B channel",
+ 0, 255, 0, 221,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_GREEN_BLUE_COEFF_MEM, 8, 0, 0)
+ U8_CTRL(P7_CID_ISP_GRIM_CFA,
+ "Color Filter Array of Green Imbalance",
+ 0, 3, 0,
+ AVI_ISP_GREEN_IMBALANCE +
+ AVI_ISP_GREEN_IMBALANCE_BAYER_CFA, 2, 0, 0)
+ /* ISP_RIP */
+ ARRAY16_CTRL(P7_CID_ISP_RIP_LIST_MEM_X, "Abscissa of known dead pixel",
+ 0, 2047, 0, 256,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_LIST_MEM, 11, 3, 1)
+ ARRAY16_CTRL(P7_CID_ISP_RIP_LIST_MEM_Y, "Ordinate of known dead pixel",
+ 0, 4095, 0, 256,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_LIST_MEM, 12, 14, 1)
+ ARRAY8_CTRL(P7_CID_ISP_RIP_LIST_MEM_OP,
+ "List of suitable corrections for each corresponding dead "
+ "pixel position",
+ 0, 7, 0, 256,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_LIST_MEM, 3, 0, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_RIP_BYPASS_LIST, "Deactivates list mode",
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_BYPASS, 1, 0, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_RIP_BYPASS_AUTO_DETECTION,
+ "Deactivates the live mode",
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_BYPASS, 1, 1, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_RIP_BYPASS_RGRIM,
+ "Deactivates the RGRIM filter list mode, which in turn "
+ "takes precedence over the RGRIM filter",
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_BYPASS, 1, 2, 1)
+ U16_CTRL(P7_CID_ISP_RIP_THRESHOLD,
+ "Used in Live mode to compare one pixel to its neighbours",
+ 0, 1023, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_THRESHOLD, 10, 0, 0)
+ U8_CTRL(P7_CID_ISP_RIP_RGRIM_CONF_TIM_1, "Imbalance 1", 0, 255, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_CONF, 8, 0, 1)
+ U8_CTRL(P7_CID_ISP_RIP_RGRIM_CONF_TIM_2, "Imbalance 2", 0, 255, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_CONF, 8, 8, 1)
+ U8_CTRL(P7_CID_ISP_RIP_RGRIM_CONF_TSAT, "Saturation", 0, 255, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_CONF, 8, 16, 1)
+ U8_CTRL(P7_CID_ISP_RIP_RGRIM_CONF_TCON, "Contrast", 0, 255, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_CONF, 8, 24, 1)
+ U8_CTRL(P7_CID_ISP_RIP_RGRIM_GAIN,
+ "The gain obeys the formula : actual gain = 20 * gain / 4096",
+ 0, 127, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_GAIN, 7, 0, 0)
+ U8_CTRL(P7_CID_ISP_RIP_CFA,
+ "Color Filter Array of Dead Pixel Correction",
+ 0, 3, 0,
+ AVI_ISP_DEAD_PIXEL_CORRECTION +
+ AVI_ISP_DEAD_PIXEL_CORRECTION_CFA, 2, 0, 0)
+ /* ISP_DENOISE */
+ ARRAY8_CTRL(P7_CID_ISP_DENOISE_LUMOCOEFF_RED,
+ "Ordinates of lumo coeffs Red", 0, 255, 0, 14,
+ AVI_ISP_DENOISING + AVI_ISP_DENOISING_LUMOCOEFF_R_03_00,
+ 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_DENOISE_LUMOCOEFF_GREEN,
+ "Ordinates of lumo coeffs Green", 0, 255, 0, 14,
+ AVI_ISP_DENOISING + AVI_ISP_DENOISING_LUMOCOEFF_G_03_00,
+ 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_DENOISE_LUMOCOEFF_BLUE,
+ "Ordinates of lumo coeffs Blue", 0, 255, 0, 14,
+ AVI_ISP_DENOISING + AVI_ISP_DENOISING_LUMOCOEFF_B_03_00,
+ 8, 0, 0)
+ U8_CTRL(P7_CID_ISP_DENOISE_CFA,
+ "Color Filter Array of Denoising",
+ 0, 3, 0,
+ AVI_ISP_DENOISING + AVI_ISP_DENOISING_CFA, 2, 0, 0)
+ /* ISP_BAYER_STAT */
+ _BOOLEAN_CTRL(P7_CID_ISP_BSTAT_MEASURE_REQ_CLEAR,
+ "Force clear the histogram, usually done the first time "
+ "before enabling the stats bayer module",
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_MEASURE_REQ, 1, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_X_WIDTH,
+ "Width of a thumbnail pixel in sensor pixels (how many "
+ "columns are averaged into one)",
+ 0, 2047, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_WINDOW_X, 11, 16, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_X_OFFSET,
+ "Origin abcissa of the statistics window in the captured "
+ "image",
+ 0, 8191, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_WINDOW_X, 13, 0, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_Y_HEIGHT,
+ "Height of a thumbnail pixel in sensor pixels (how many rows "
+ "are averaged into one).",
+ 0, 2047, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_WINDOW_Y, 11, 16, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_Y_OFFSET,
+ "Origin abcissa of the statistics window in the captured "
+ "image",
+ 0, 8191, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_WINDOW_Y, 13, 0, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_CIRCLE_POS_X_CENTER,
+ "Center abcissa of the image circle in sensor pixels",
+ 0, 16383, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_X_CENTER, 14, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_BSTAT_CIRCLE_POS_X_SQUARED,
+ "Center abcissa of the image circle in sensor pixels value "
+ "squared",
+ 0, 67108863, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_X_SQUARED, 26, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_CIRCLE_POS_Y_CENTER,
+ "Center ordinate of the image circle in sensor pixels",
+ 0, 16383, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_Y_CENTER, 14, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_BSTAT_CIRCLE_POS_Y_SQUARED,
+ "Center ordinate of the image circle in sensor pixels value "
+ "squared",
+ 0, 67108863, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_Y_SQUARED, 26, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_BSTAT_CIRCLE_RADIUS_SQUARED,
+ "Squared radius of the image circle", 0, 536870911, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_CIRCLE_RADIUS_SQUARED, 29, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U8_CTRL(P7_CID_ISP_BSTAT_INC_X_LOG2_INC,
+ "Vertical skipping/binning parameter", 0, 7, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_INCREMENTS_LOG2, 3, 0, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U8_CTRL(P7_CID_ISP_BSTAT_INC_Y_LOG2_INC,
+ "Horizontal skipping/binning parameter", 0, 7, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_INCREMENTS_LOG2, 3, 16, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U16_CTRL(P7_CID_ISP_BSTAT_SAT_THRESHOLD,
+ "Below this threshold a pixel is considered unsaturated, a "
+ "Bayer block is saturated if any of its 4 pixels is saturated",
+ 0, 1023, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_SAT_THRESHOLD, 10, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U8_CTRL(P7_CID_ISP_BSTAT_MAX_X_WINDOW_COUNT,
+ "Width of thumbnail", 0, 127, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_MAX_NB_WINDOWS, 7, 0, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U8_CTRL(P7_CID_ISP_BSTAT_MAX_Y_WINDOW_COUNT,
+ "Height of thumbnail", 0, 127, 0,
+ AVI_ISP_STATS_BAYER +
+ AVI_ISP_STATISTICS_BAYER_MAX_NB_WINDOWS, 7, 16, 1,
+ V4L2_CTRL_FLAG_VOLATILE)
+ _U8_CTRL(P7_CID_ISP_BSTAT_CFA,
+ "Color Filter Array of Statistics bayer",
+ 0, 3, 0,
+ AVI_ISP_STATS_BAYER + AVI_ISP_STATISTICS_BAYER_CFA, 2, 0, 0,
+ V4L2_CTRL_FLAG_VOLATILE)
+ /* ISP_LSC */
+ U16_CTRL(P7_CID_ISP_LSC_OFFSET_X,
+ "Offset of top left pixel from top left vertice of cell",
+ 0, 511, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_OFFSET_X_Y, 9, 0, 1)
+ U16_CTRL(P7_CID_ISP_LSC_OFFSET_Y,
+ "Offset of top left pixel from top left vertice of cell",
+ 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_OFFSET_X_Y, 10, 16, 1)
+ U8_CTRL(P7_CID_ISP_LSC_CELL_ID_X,
+ "Horizontal cell index to which top left pixel belongs",
+ 0, 15, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_ID_X_Y, 4, 0, 1)
+ U8_CTRL(P7_CID_ISP_LSC_CELL_ID_Y,
+ "Vertical cell index to which top left pixel belongs",
+ 0, 15, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_ID_X_Y, 4, 16, 1)
+ U16_CTRL(P7_CID_ISP_LSC_CELL_W,
+ "Mesh cells width in pixel", 0, 511, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_W, 9, 0, 0)
+ U16_CTRL(P7_CID_ISP_LSC_CELL_H,
+ "Mesh cells height in pixel", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_H, 10, 0, 0)
+ U32_CTRL(P7_CID_ISP_LSC_CELL_W_INV,
+ "Mesh cells inverse width", 0, 131071, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_W_INV, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_LSC_CELL_H_INV,
+ "Mesh cells inverse height", 0, 131071, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_CELL_H_INV, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_LSC_ALPHA,
+ "Normalised horizontal offset of top left pixel from top left "
+ "vertice of cell",
+ 0, 131071, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_ALPHA, 17, 0, 0)
+ U32_CTRL(P7_CID_ISP_LSC_BETA,
+ "Normalised vertical offset of top left pixel from top left "
+ "vertice of cell",
+ 0, 131071, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_BETA, 17, 0, 0)
+ U16_CTRL(P7_CID_ISP_LSC_THRESHOLD_B,
+ "Threshold to avoid declipping", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_THRESHOLD, 10, 20, 1)
+ U16_CTRL(P7_CID_ISP_LSC_THRESHOLD_G,
+ "Threshold to avoid declipping", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_THRESHOLD, 10, 10, 1)
+ U16_CTRL(P7_CID_ISP_LSC_THRESHOLD_R,
+ "Threshold to avoid declipping", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_THRESHOLD, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_LSC_GAIN_B,
+ "White balance gain Blue", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_GAIN, 10, 20, 1)
+ U16_CTRL(P7_CID_ISP_LSC_GAIN_G,
+ "White balance gain Green", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_GAIN, 10, 10, 1)
+ U16_CTRL(P7_CID_ISP_LSC_GAIN_R,
+ "White balance gain Red", 0, 1023, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_GAIN, 10, 0, 1)
+ ARRAY8_CTRL(P7_CID_ISP_LSC_RED_COEFF_MEM,
+ "Correction mesh for Red channel (array 15x13)",
+ 0, 255, 0, 221,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_RED_COEFF_MEM, 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_LSC_GREEN_COEFF_MEM,
+ "Correction mesh for Green channel (array 15x13)",
+ 0, 255, 0, 221,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_GREEN_COEFF_MEM, 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_LSC_BLUE_COEFF_MEM,
+ "Correction mesh for Blue channel (array 15x13)",
+ 0, 255, 0, 221,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_BLUE_COEFF_MEM, 8, 0, 0)
+ U8_CTRL(P7_CID_ISP_LSC_CFA,
+ "Color Filter Array of Lens Shading correction",
+ 0, 3, 0,
+ AVI_ISP_LENS_SHADING_CORRECTION +
+ AVI_ISP_LENS_SHADING_CORRECTION_BAYER_CFA, 2, 0, 0)
+ /* ISP_CHROMA_ABER */
+ ARRAY32_CTRL(P7_CID_ISP_CA_RADIUS_SQUARED,
+ "Abscissa of the Look Up Table", 0, 16777215, 0, 20,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_RADIUS_SQUARED, 24, 0, 0)
+ ARRAY16_CTRL(P7_CID_ISP_CA_DISPLACEMENT_BLUE_COEFF,
+ "Displacement for the BLUE channel", 0, 65535, 0, 20,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_DISPLACEMENT_COEFFS, 16, 0, 1)
+ ARRAY16_CTRL(P7_CID_ISP_CA_DISPLACEMENT_RED_COEFF,
+ "Displacement for the RED channel", 0, 65535, 0, 20,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_DISPLACEMENT_COEFFS,
+ 16, 16, 1)
+ U16_CTRL(P7_CID_ISP_CA_CIRCLE_POS_X_CENTER,
+ "Center abcissa of the image circle in sensor pixels",
+ 0, 16383, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_X_CENTER, 14, 0, 0)
+ U32_CTRL(P7_CID_ISP_CA_CIRCLE_POS_X_SQUARED,
+ "Center abcissa of the image circle in sensor pixels value "
+ "squared.",
+ 0, 67108863, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_X_SQUARED, 26, 0, 0)
+ U16_CTRL(P7_CID_ISP_CA_CIRCLE_POS_Y_CENTER,
+ "Center ordinate of the image circle in sensor pixels",
+ 0, 16383, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_Y_CENTER, 14, 0, 0)
+ U32_CTRL(P7_CID_ISP_CA_CIRCLE_POS_Y_SQUARED,
+ "Center ordinate of the image circle in sensor pixels value "
+ "squared",
+ 0, 67108863, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_Y_SQUARED, 26, 0, 0)
+ BOOLEAN_CTRL(P7_CID_ISP_CA_GREEN_VARIATION,
+ "Green variation is taken into account for the correction",
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_GREEN_VARIATION, 1, 0, 0)
+ U8_CTRL(P7_CID_ISP_CA_X_LOG2_INC,
+ "Horizontal skipping/binning parameter", 0, 7, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_INCREMENTS_LOG2, 3, 0, 1)
+ U8_CTRL(P7_CID_ISP_CA_Y_LOG2_INC,
+ "Vertical skipping/binning parameter", 0, 7, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_INCREMENTS_LOG2, 3, 16, 1)
+ U8_CTRL(P7_CID_ISP_CA_CFA,
+ "Color Filter Array of Chromatic Aberration",
+ 0, 3, 0,
+ AVI_ISP_CHROMATIC_ABERRATION +
+ AVI_ISP_CHROMATIC_ABERRATION_CFA, 2, 0, 0)
+ /* ISP_BAYER */
+ U16_CTRL(P7_CID_ISP_BAYER_THRESHOLD_1, "Low threshold value",
+ 0, 8191, 0,
+ AVI_ISP_BAYER + AVI_ISP_BAYER_THRESHOLD_1, 13, 0, 0)
+ U16_CTRL(P7_CID_ISP_BAYER_THRESHOLD_2, "High threshold value",
+ 0, 8191, 0,
+ AVI_ISP_BAYER + AVI_ISP_BAYER_THRESHOLD_2, 13, 0, 0)
+ U8_CTRL(P7_CID_ISP_BAYER_CFA,
+ "Color Filter Array of Bayer",
+ 0, 3, 0,
+ AVI_ISP_BAYER + AVI_ISP_BAYER_CFA, 2, 0, 0)
+ /* ISP_CONV */
+ U16_CTRL(P7_CID_ISP_CC_COEFF_00, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_01_00, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_01, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_01_00, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_02, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_10_02, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_10, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_10_02, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_11, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_12_11, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_12, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_12_11, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_20, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_21_20, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_21, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION +
+ AVI_ISP_COLOR_CORRECTION_COEFF_21_20, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_COEFF_22, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_COEFF_22,
+ 14, 0, 0)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_RY_IN,
+ "Red or Y component offset: before matrix multiplication, the "
+ "pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_RY,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_RY_OUT,
+ "Red or Y component offsets: after the matrix multiplication, "
+ "the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_RY,
+ 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_RY_MIN,
+ "Red or Y component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_RY,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_RY_MAX,
+ "Red or Y component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_RY,
+ 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_GU_IN,
+ "Green or U component offset: before matrix multiplication, "
+ "the pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_GU,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_GU_OUT,
+ "Green or U component offsets: after the matrix "
+ "multiplication, the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_GU,
+ 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_GU_MIN,
+ "Green or U component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_GU,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_GU_MAX,
+ "Green or U component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_GU,
+ 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_BV_IN,
+ "Blue or V component offset: before matrix multiplication, "
+ "the pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_BV,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_OFFSET_BV_OUT,
+ "Blue or V component offsets: after the matrix "
+ "multiplication, the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_OFFSET_BV,
+ 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_BV_MIN,
+ "Blue or V component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_BV,
+ 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CC_CLIP_BV_MAX,
+ "Blue or V component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0,
+ AVI_ISP_COLOR_CORRECTION + AVI_ISP_COLOR_CORRECTION_CLIP_BV,
+ 10, 16, 1)
+ /* ISP_VLF_40TO32 */
+ U8_CTRL(P7_CID_ISP_VLF_40_TO_32, "VL Format 40 to 32", 0, 4, 0,
+ AVI_ISP_VL_FORMAT_40_T0_32 + AVI_ISP_VLFORMAT_40TO32_FORMAT,
+ 3, 0, 0)
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_gamma_ctrls[] = {
+ /* ISP_GAMMA_CORRECTION */
+ BOOLEAN_CTRL(P7_CID_ISP_GAMMA_BYPASS, "Bypass Gamma correction",
+ AVI_ISP_GAMMA_CORRECTOR_CONF, 1, 0, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_GAMMA_PALETTE,
+ "Palette mode (0 = non-linear, 1 = palette)",
+ AVI_ISP_GAMMA_CORRECTOR_CONF, 1, 1, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_GAMMA_COMP_WIDTH,
+ "Component width (0 = 8bits, 1 = 10bits)",
+ AVI_ISP_GAMMA_CORRECTOR_CONF, 1, 2, 1)
+ ARRAY8_CTRL(P7_CID_ISP_GAMMA_RY_LUT, "Red or Y curve",
+ 0, 255, 0, 1024, AVI_ISP_GAMMA_CORRECTOR_RY_LUT, 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_GAMMA_GU_LUT, "Green or U curve",
+ 0, 255, 0, 1024, AVI_ISP_GAMMA_CORRECTOR_GU_LUT, 8, 0, 0)
+ ARRAY8_CTRL(P7_CID_ISP_GAMMA_BV_LUT, "Blue or V curve",
+ 0, 255, 0, 1024, AVI_ISP_GAMMA_CORRECTOR_BV_LUT, 8, 0, 0)
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_conv_ctrls[] = {
+ /* ISP_YUV_CONV */
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_00, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_01_00, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_01, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_01_00, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_02, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_10_02, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_10, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_10_02, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_11, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_12_11, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_12, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_12_11, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_20, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_21_20, 14, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_21, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_21_20, 14, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_COEFF_22, "Coded in signed fixed-point Q2.11",
+ 0, 16383, 0, AVI_ISP_CHROMA_COEFF_22, 14, 0, 0)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_RY_IN,
+ "Red or Y component offset: before matrix multiplication, the "
+ "pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_RY, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_RY_OUT,
+ "Red or Y component offsets: after the matrix multiplication, "
+ "the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_RY, 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_RY_MIN,
+ "Red or Y component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_RY, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_RY_MAX,
+ "Red or Y component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_RY, 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_GU_IN,
+ "Green or U component offset: before matrix multiplication, "
+ "the pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_GU, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_GU_OUT,
+ "Green or U component offsets: after the matrix "
+ "multiplication, the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_GU, 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_GU_MIN,
+ "Green or U component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_GU, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_GU_MAX,
+ "Green or U component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_GU, 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_BV_IN,
+ "Blue or V component offset: before matrix multiplication, "
+ "the pixel vector is subtracted by ’OFFSET_IN’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_BV, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_OFFSET_BV_OUT,
+ "Blue or V component offsets: after the matrix "
+ "multiplication, the product is added with ’OFFSET_OUT’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_OFFSET_BV, 10, 16, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_BV_MIN,
+ "Blue or V component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_BV, 10, 0, 1)
+ U16_CTRL(P7_CID_ISP_CONV_CLIP_BV_MAX,
+ "Blue or V component clips: the output result is clipped "
+ "between ’CLIP_MIN’ and ’CLIP_MAX’ value",
+ 0, 1023, 0, AVI_ISP_CHROMA_CLIP_BV, 10, 16, 1)
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_stats_ctrls[] = {
+ _BOOLEAN_CTRL(P7_CID_ISP_YSTAT_MEASURE_REQ,
+ "Will measure at the beginning of the next frame",
+ AVI_ISP_STATISTICS_YUV_MEASURE_REQ,
+ 1, 0, 1, V4L2_CTRL_FLAG_VOLATILE)
+ _BOOLEAN_CTRL(P7_CID_ISP_YSTAT_CLEAR, "Clearing the histogram",
+ AVI_ISP_STATISTICS_YUV_MEASURE_REQ,
+ 1, 1, 1, V4L2_CTRL_FLAG_VOLATILE)
+ _BOOLEAN_CTRL(P7_CID_ISP_YSTAT_DONE,
+ "Asserted at the end of each measured frame",
+ AVI_ISP_STATISTICS_YUV_MEASURE_STATUS,
+ 1, 0, 1, V4L2_CTRL_FLAG_VOLATILE)
+ _BOOLEAN_CTRL(P7_CID_ISP_YSTAT_ERROR,
+ "Asserted if there is valid input stream during the "
+ "histogram clearing process",
+ AVI_ISP_STATISTICS_YUV_MEASURE_STATUS,
+ 1, 1, 1, V4L2_CTRL_FLAG_VOLATILE)
+ U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_X_START,
+ "Beginning of the measured window X",
+ 0, 8191, 0,
+ AVI_ISP_STATISTICS_YUV_WINDOW_POS_X,
+ 13, 0, 1)
+ U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_X_END, "End of the measured window X",
+ 0, 8191, 0,
+ AVI_ISP_STATISTICS_YUV_WINDOW_POS_X,
+ 13, 16, 1)
+ U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_Y_START,
+ "Beginning of the measured window Y",
+ 0, 8191, 0,
+ AVI_ISP_STATISTICS_YUV_WINDOW_POS_Y,
+ 13, 0, 1)
+ U16_CTRL(P7_CID_ISP_BSTAT_WINDOW_Y_END, "End of the measured window Y",
+ 0, 8191, 0,
+ AVI_ISP_STATISTICS_YUV_WINDOW_POS_Y,
+ 13, 16, 1)
+ U16_CTRL(P7_CID_ISP_YSTAT_CIRCLE_POS_X_CENTER, "Circle center X",
+ 0, 16383, 0,
+ AVI_ISP_STATISTICS_YUV_CIRCLE_POS_X_CENTER,
+ 14, 0, 0)
+ U32_CTRL(P7_CID_ISP_YSTAT_CIRCLE_POS_X_SQUARED, "X center squared",
+ 0, 67108863, 0,
+ AVI_ISP_STATISTICS_YUV_CIRCLE_POS_X_SQUARED,
+ 26, 0, 0)
+ U16_CTRL(P7_CID_ISP_YSTAT_CIRCLE_POS_Y_CENTER, "Circle center Y",
+ 0, 16383, 0,
+ AVI_ISP_STATISTICS_YUV_CIRCLE_POS_Y_CENTER,
+ 14, 0, 0)
+ U32_CTRL(P7_CID_ISP_YSTAT_CIRCLE_POS_Y_SQUARED, "Y center squared",
+ 0, 67108863, 0,
+ AVI_ISP_STATISTICS_YUV_CIRCLE_POS_Y_SQUARED,
+ 26, 0, 0)
+ U32_CTRL(P7_CID_ISP_YSTAT_CIRCLE_RADIUS_SQUARED,
+ "Circle radius squared",
+ 0, 536870911, 0,
+ AVI_ISP_STATISTICS_YUV_CIRCLE_RADIUS_SQUARED,
+ 29, 0, 0)
+ U8_CTRL(P7_CID_ISP_YSTAT_INC_X_LOG2_INC,
+ "Horizontal skipping/binning parameter",
+ 0, 7, 0,
+ AVI_ISP_STATISTICS_YUV_INCREMENTS_LOG2,
+ 3, 0, 1)
+ U8_CTRL(P7_CID_ISP_YSTAT_INC_Y_LOG2_INC,
+ "Vertical skipping/binning parameter",
+ 0, 7, 0,
+ AVI_ISP_STATISTICS_YUV_INCREMENTS_LOG2,
+ 3, 16, 1)
+ _U32_CTRL(P7_CID_ISP_YSTAT_AE_NB_VALID_Y, "Number of valid luma pixels",
+ 0, 4194303, 0,
+ AVI_ISP_STATISTICS_YUV_AE_NB_VALID_Y,
+ 22 , 0, 0, V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+ U8_CTRL(P7_CID_ISP_YSTAT_AWB_THRESHOLD,
+ "Auto white balancing threshold",
+ 0, 255, 0,
+ AVI_ISP_STATISTICS_YUV_AWB_THRESHOLD,
+ 8, 0, 0)
+ _U32_CTRL(P7_CID_ISP_YSTAT_AWB_SUM_Y, "White balancing sum of Y",
+ 0, 1073741823, 0,
+ AVI_ISP_STATISTICS_YUV_AWB_SUM_Y,
+ 30, 0, 0, V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_YSTAT_AWB_SUM_U, "White balancing sum of U",
+ 0, 1073741823, 0,
+ AVI_ISP_STATISTICS_YUV_AWB_SUM_U,
+ 30, 0, 0, V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_YSTAT_AWB_SUM_V, "White balancing sum of V",
+ 0, 1073741823, 0,
+ AVI_ISP_STATISTICS_YUV_AWB_SUM_V,
+ 30, 0, 0, V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+ _U32_CTRL(P7_CID_ISP_YSTAT_AWB_NB_GREY_PIXELS,
+ "Number of valid grey pixels",
+ 0, 4194303, 0,
+ AVI_ISP_STATISTICS_YUV_AWB_NB_GREY_PIXELS,
+ 22, 0, 0, V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+ _ARRAY32_CTRL(P7_CID_ISP_YSTAT_AE_HISTOGRAM_Y, "Value of each bin",
+ 0, 4194303, 0, 256,
+ AVI_ISP_STATISTICS_YUV_AE_HISTOGRAM_Y,
+ 22, 0, 0,
+ V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE)
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_yuv_ctrls[] = {
+ /* ISP EE_CRF_I3D_LUT_DROP */
+ BOOLEAN_CTRL(P7_CID_BYPASS_EE_CRF, "Bypass EE CRF ISP Module",
+ AVI_ISP_CHAIN_YUV_INTER +
+ AVI_ISP_CHAIN_YUV_INTER_MODULE_BYPASS, 1, 0, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_I3D_LUT, "Bypass I3D LUT ISP Module",
+ AVI_ISP_CHAIN_YUV_INTER +
+ AVI_ISP_CHAIN_YUV_INTER_MODULE_BYPASS, 1, 1, 1)
+ BOOLEAN_CTRL(P7_CID_BYPASS_DROP, "Bypass Drop ISP Module",
+ AVI_ISP_CHAIN_YUV_INTER +
+ AVI_ISP_CHAIN_YUV_INTER_MODULE_BYPASS, 1, 2, 1)
+ /* ISP_EE_CRF */
+ ARRAY8_CTRL(P7_CID_ISP_EE_CRF_EE_LUT, "EE LUT table", 0, 63, 0, 256,
+ AVI_ISP_EDGE_ENHANCEMENT +
+ AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_EE_LUT,
+ 6, 0, 0)
+ ARRAY16_CTRL(P7_CID_ISP_EE_CRF_EE_KERNEL_COEFF, "EE kernel",
+ 0, 2047, 0, 6,
+ AVI_ISP_EDGE_ENHANCEMENT +
+ AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_EE_KERNEL_COEFF,
+ 11, 0, 0)
+ ARRAY16_CTRL(P7_CID_ISP_EE_CRF_CRF_KERNEL_COEFF, "CRF kernel",
+ 0, 2047, 0, 6,
+ AVI_ISP_EDGE_ENHANCEMENT +
+ AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_CRF_KERNEL_COEFF,
+ 11, 0, 0)
+ U8_CTRL(P7_CID_ISP_EE_CRF_M_COEFF, "Sobel threshold",
+ 0, 255, 0,
+ AVI_ISP_EDGE_ENHANCEMENT +
+ AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_M_COEFF,
+ 8, 0, 0)
+ U16_CTRL(P7_CID_ISP_EE_CRF_D_COEFF, "Normalization factor", 0, 2047, 0,
+ AVI_ISP_EDGE_ENHANCEMENT +
+ AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_D_COEFF,
+ 11, 0, 0)
+ /* ISP_DROP */
+ U16_CTRL(P7_CID_ISP_DROP_OFFSET_X, "Horizontal offset", 0, 65535, 0,
+ AVI_ISP_DROP + AVI_ISP_DROP_OFFSET_X, 16, 0, 0)
+ U16_CTRL(P7_CID_ISP_DROP_OFFSET_Y, "Vertical offset", 0, 65535, 0,
+ AVI_ISP_DROP + AVI_ISP_DROP_OFFSET_Y, 16, 0, 0)
+};
+
+static const struct avi_v4l2_isp_ctrl_config isp_i3d_ctrls[] = {
+ /* ISP_I3D_LUT */
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_OUTSIDE_RY, "Red or Y component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_OUTSIDE, 8, 16, 1)
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_OUTSIDE_GU, "Green or U component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_OUTSIDE, 8, 8, 1)
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_OUTSIDE_BV, "Blue or V component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_OUTSIDE, 8, 0, 1)
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_INSIDE_RY, "Red or Y component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_INSIDE, 8, 16, 1)
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_INSIDE_GU, "Green or U component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_INSIDE, 8, 8, 1)
+ ARRAY8_CTRL(P7_CID_ISP_I3D_LUT_INSIDE_BV, "Blue or V component value",
+ 0, 255, 0, 125,
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_LUT_INSIDE, 8, 0, 1)
+ BOOLEAN_CTRL(P7_CID_ISP_I3D_LUT_CLIP_MODE, "YUV Clip",
+ AVI_ISP_I3D_LUT + AVI_ISP_I3D_LUT_CLIP_MODE, 1, 0, 0)
+};
+
+struct avi_v4l2_isp_node {
+ int is_activate;
+ u32 base_node;
+};
+
+struct avi_v4l2_isp_priv {
+ const struct avi_v4l2_isp_reg *reg;
+ struct avi_v4l2_isp_node *node;
+ int blanking_update;
+};
+
+struct avi_v4l2_isp {
+ /* ISP chain bayer */
+ struct v4l2_ctrl *bayer_ctrls[ARRAY_SIZE(isp_bayer_ctrls)];
+ struct avi_v4l2_isp_priv bayer_priv[ARRAY_SIZE(isp_bayer_ctrls)];
+ struct avi_v4l2_isp_node bayer_node;
+ /* ISP Gamma correction */
+ struct v4l2_ctrl *gamma_ctrls[ARRAY_SIZE(isp_gamma_ctrls)];
+ struct avi_v4l2_isp_priv gamma_priv[ARRAY_SIZE(isp_gamma_ctrls)];
+ struct avi_v4l2_isp_node gamma_node;
+ /* ISP Converter */
+ struct v4l2_ctrl *conv_ctrls[ARRAY_SIZE(isp_conv_ctrls)];
+ struct avi_v4l2_isp_priv conv_priv[ARRAY_SIZE(isp_conv_ctrls)];
+ struct avi_v4l2_isp_node conv_node;
+ /* ISP statistics YUV */
+ struct v4l2_ctrl *stats_ctrls[ARRAY_SIZE(isp_stats_ctrls)];
+ struct avi_v4l2_isp_priv stats_priv[ARRAY_SIZE(isp_stats_ctrls)];
+ struct avi_v4l2_isp_node stats_node;
+ /* ISP chain YUV */
+ struct v4l2_ctrl *yuv_ctrls[ARRAY_SIZE(isp_yuv_ctrls)];
+ struct avi_v4l2_isp_priv yuv_priv[ARRAY_SIZE(isp_yuv_ctrls)];
+ struct avi_v4l2_isp_node yuv_node;
+ /* ISP chain YUV: I3D LUT */
+ struct v4l2_ctrl *i3d_ctrls[ARRAY_SIZE(isp_i3d_ctrls)];
+ struct avi_v4l2_isp_priv i3d_priv[ARRAY_SIZE(isp_i3d_ctrls)];
+ struct avi_v4l2_isp_node i3d_node;
+};
+
+static int avi_v4l2_isp_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct avi_v4l2_isp_priv *priv = ctrl->priv;
+ const struct avi_v4l2_isp_reg *isp_reg = priv->reg;
+ u32 base_node = priv->node->base_node;
+ u32 value, i;
+
+ /* Do not change ISP configuration when controls are not activated */
+ if (!priv->node->is_activate)
+ return 0;
+
+ /* Read values from registers */
+ for (i = 0; i < isp_reg->count; i++) {
+ /* Get register value */
+ value = (AVI_READ(base_node + isp_reg->addr + (i * sizeof(u32)))
+ & isp_reg->mask) >> isp_reg->shift;
+
+ /* Get new value */
+ if (ctrl->is_array)
+ ctrl->p_new.p_s32[i] = value;
+ else
+ ctrl->val = value;
+ }
+
+ return 0;
+}
+
+static int avi_v4l2_isp_set(struct v4l2_ctrl *ctrl,
+ const struct avi_v4l2_isp_reg *isp_reg,
+ u32 base_node)
+{
+ u32 reg, value, i;
+
+ /* Special case for Denoising registers */
+ if (ctrl->id == P7_CID_ISP_DENOISE_LUMOCOEFF_RED ||
+ ctrl->id == P7_CID_ISP_DENOISE_LUMOCOEFF_GREEN ||
+ ctrl->id == P7_CID_ISP_DENOISE_LUMOCOEFF_BLUE) {
+ for (i = 0; i < isp_reg->count; i += 4) {
+ /* Prepare value */
+ reg = (ctrl->p_new.p_u8[i+1] << 8) |
+ ctrl->p_new.p_u8[i];
+ if (i < 12)
+ reg |= (ctrl->p_new.p_u8[i+3] << 24) |
+ (ctrl->p_new.p_u8[i+2] << 16);
+
+ /* Write to register */
+ AVI_WRITE(reg, base_node + isp_reg->addr + i);
+ }
+ return 0;
+ }
+
+ /* Special case for chroma abberation registers */
+ if (ctrl->id == P7_CID_ISP_CA_RADIUS_SQUARED ||
+ ctrl->id == P7_CID_ISP_CA_DISPLACEMENT_BLUE_COEFF ||
+ ctrl->id == P7_CID_ISP_CA_DISPLACEMENT_RED_COEFF) {
+ for (i = 0; i < isp_reg->count; i++) {
+ /* Prepare new value */
+ if (ctrl->id != P7_CID_ISP_CA_RADIUS_SQUARED) {
+ reg = AVI_READ(base_node + isp_reg->addr +
+ (2 * i * sizeof(u32)));
+ reg = ((ctrl->p_new.p_u16[i] << isp_reg->shift)
+ & isp_reg->mask) | (reg & ~isp_reg->mask);
+ } else {
+ reg = ctrl->p_new.p_s32[i] & 0xFFFFFF;
+ }
+
+ /* Write value to registers */
+ AVI_WRITE(reg, base_node + isp_reg->addr +
+ (2 * i * sizeof(u32)));
+ }
+ return 0;
+ }
+
+ /* Write values to registers */
+ for (i = 0; i < isp_reg->count; i++) {
+ /* Read register before */
+ if (isp_reg->read)
+ reg = AVI_READ(base_node + isp_reg->addr +
+ (i * sizeof(u32)));
+ else
+ reg = 0;
+
+ /* Get new value */
+ if (ctrl->is_array) {
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_U16:
+ value = ctrl->p_new.p_u16[i];
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ value = ctrl->p_new.p_u8[i];
+ break;
+ default:
+ value = 0;
+ }
+ } else
+ value = ctrl->val;
+
+ /* Prepare value for write */
+ reg = ((value << isp_reg->shift) & isp_reg->mask) |
+ (reg & ~isp_reg->mask);
+
+ /* Write to register */
+ AVI_WRITE(reg, base_node + isp_reg->addr + (i * sizeof(u32)));
+ }
+
+ return 0;
+}
+
+static int avi_v4l2_isp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct avi_v4l2_isp_priv *priv = ctrl->priv;
+ const struct avi_v4l2_isp_reg *isp_reg = priv->reg;
+ u32 base_node = priv->node->base_node;
+
+ /* Do not change ISP configuration when controls are not activated */
+ if (!priv->node->is_activate)
+ return 0;
+
+ /* These registers are not written */
+ if (ctrl->id == P7_CID_ISP_YSTAT_AE_NB_VALID_Y ||
+ ctrl->id == P7_CID_ISP_YSTAT_AWB_SUM_Y ||
+ ctrl->id == P7_CID_ISP_YSTAT_AWB_SUM_U ||
+ ctrl->id == P7_CID_ISP_YSTAT_AWB_SUM_V ||
+ ctrl->id == P7_CID_ISP_YSTAT_AWB_NB_GREY_PIXELS ||
+ ctrl->id == P7_CID_ISP_YSTAT_AE_HISTOGRAM_Y)
+ return -EINVAL;
+
+ /* Workaround: I3D LUT must be set during blanking period */
+ if (ctrl->id == P7_CID_ISP_I3D_LUT_OUTSIDE_RY ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_OUTSIDE_GU ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_OUTSIDE_BV ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_INSIDE_RY ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_INSIDE_GU ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_INSIDE_BV ||
+ ctrl->id == P7_CID_ISP_I3D_LUT_CLIP_MODE) {
+ /* Update configuration at next blanking call */
+ priv->blanking_update = 1;
+ return 0;
+ }
+
+ /* Set control */
+ return avi_v4l2_isp_set(ctrl, isp_reg, base_node);
+}
+
+/* An hardware bug in I3D LUT block imply its configuration must be done during
+ * blanking period or when stream is off.
+ */
+int avi_v4l2_isp_blanking(struct avi_v4l2_isp *isp)
+{
+ int i;
+
+ /* Return immediatly when ISP is not activated */
+ if (!isp->i3d_node.is_activate)
+ return 0;
+
+ /* Set I3D LUT configuration */
+ for (i = 0; i < ARRAY_SIZE(isp_i3d_ctrls); i++) {
+ if (isp->i3d_priv[i].blanking_update) {
+ avi_v4l2_isp_set(isp->i3d_ctrls[i],
+ isp->i3d_priv[i].reg,
+ isp->i3d_node.base_node);
+ isp->i3d_priv[i].blanking_update = 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_v4l2_isp_blanking);
+
+int avi_v4l2_isp_activate(struct avi_v4l2_isp *isp,
+ struct avi_isp_offsets *offsets)
+{
+ int ret = 0, i;
+
+#define RESTORE_CONTROLS(_off, _name) \
+for (i = 0; i < ARRAY_SIZE(isp_ ## _name ## _ctrls); i++) { \
+ isp->_name ## _node.is_activate = 1; \
+ isp->_name ## _node.base_node = avi_ctrl.base + offsets->_off; \
+ if (isp->_name ## _ctrls[i] != NULL) \
+ ret |= avi_v4l2_isp_s_ctrl(isp->_name ## _ctrls[i]); \
+}
+
+ /* Set ISP offest and restore all control values */
+ RESTORE_CONTROLS(chain_bayer, bayer);
+ RESTORE_CONTROLS(gamma_corrector, gamma);
+ RESTORE_CONTROLS(chroma, conv);
+ RESTORE_CONTROLS(statistics_yuv, stats);
+ RESTORE_CONTROLS(chain_yuv, yuv);
+ RESTORE_CONTROLS(chain_yuv, i3d);
+
+#undef RESTORE_CONTROLS
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_v4l2_isp_activate);
+
+void avi_v4l2_isp_deactivate(struct avi_v4l2_isp *isp)
+{
+ /* Deactivate ISP chain */
+ isp->bayer_node.is_activate = 0;
+ isp->gamma_node.is_activate = 0;
+ isp->conv_node.is_activate = 0;
+ isp->stats_node.is_activate = 0;
+ isp->yuv_node.is_activate = 0;
+ isp->i3d_node.is_activate = 0;
+}
+EXPORT_SYMBOL(avi_v4l2_isp_deactivate);
+
+int avi_v4l2_isp_init(struct avi_v4l2_isp **_isp,
+ struct v4l2_ctrl_handler *ctrl_handler)
+{
+ struct avi_v4l2_isp *isp;
+ int i;
+
+ BUG_ON(!ctrl_handler);
+
+ /* Allocate a new ISP context */
+ *_isp = kzalloc(sizeof(*isp), GFP_KERNEL);
+ if (*_isp == NULL)
+ return -ENOMEM;
+ isp = *_isp;
+
+#define ADD_CONTROLS(_name) \
+for (i = 0; i < ARRAY_SIZE(isp_ ## _name ## _ctrls); i++) { \
+ isp->_name ## _priv[i].node = &isp->_name ## _node; \
+ isp->_name ## _priv[i].reg = &isp_ ## _name ## _ctrls[i].reg; \
+ isp->_name ## _ctrls[i] = v4l2_ctrl_new_custom(ctrl_handler, \
+ &isp_ ## _name ## _ctrls[i].cfg, \
+ &isp->_name ## _priv[i]); \
+}
+
+ /* Add ISP controls to control handler */
+ ADD_CONTROLS(bayer);
+ ADD_CONTROLS(gamma);
+ ADD_CONTROLS(conv);
+ ADD_CONTROLS(stats);
+ ADD_CONTROLS(yuv);
+ ADD_CONTROLS(i3d);
+
+#undef ADD_CONTROLS
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_v4l2_isp_init);
+
+void avi_v4l2_isp_free(struct avi_v4l2_isp *isp)
+{
+ if (!isp)
+ return;
+ kfree(isp);
+}
+EXPORT_SYMBOL(avi_v4l2_isp_free);
+
+MODULE_AUTHOR("Alexandre Dilly <alexandre.dilly@parrot.com>");
+MODULE_DESCRIPTION("V4L2 controls for ISP chains of Advanced Video Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_v4l2_isp.h b/drivers/parrot/media/video/avi_v4l2_isp.h
new file mode 100644
index 00000000000000..eb586ce45e2e9c
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2_isp.h
@@ -0,0 +1,80 @@
+/*
+ * linux/drivers/parrot/video/media/avi_v4l2_isp.h
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Alexandre Dilly <alexandre.dilly@parrot.com>
+ * @date 01-Jan-2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_V4L2_ISP_H_
+#define _AVI_V4L2_ISP_H_
+
+#include <media/v4l2-ctrls.h>
+
+#include "../../video/avi_isp.h"
+
+#define AVI_V4L2_ISP_CONTROL_HINT 182
+
+struct avi_v4l2_isp;
+
+/** avi_v4l2_isp_blanking() - Blanking ISP task.
+ * @isp: The ISP handler.
+ *
+ * Some ISP configuration tasks must be done during blanking period like I3D
+ * LUT. It is recommanded to call this function just after getting a new frame.
+ */
+extern int avi_v4l2_isp_blanking(struct avi_v4l2_isp *isp);
+
+/** avi_v4l2_isp_activate() - Activate ISP chain.
+ * @isp: The ISP handler.
+ * @offsets: The ISP chain offsets.
+ *
+ * This activate ISP chain and restore all context from controls (all ISP chain
+ * registers of selected).
+ *
+ * This function should be called in a streamon().
+ */
+extern int avi_v4l2_isp_activate(struct avi_v4l2_isp *isp,
+ struct avi_isp_offsets *offsets);
+
+/** avi_v4l2_isp_deactivate() - Deactivate ISP chain.
+ * @isp: The ISP handler.
+ *
+ * This deactivate ISP chain. ISP chain can be used by another module. The
+ * context is saved in controls and will be restored at next call of
+ * avi_v4l2_isp_activate().
+ *
+ * This function should be called in a streamoff().
+ */
+extern void avi_v4l2_isp_deactivate(struct avi_v4l2_isp *isp);
+
+/** avi_v4l2_isp_init() - Init ISP control handler.
+ * @isp: The ISP handler.
+ * @ctrl_handler: The control handler.
+ *
+ * This allocates and init ISP handler and add all controls to control handler.
+ */
+extern int avi_v4l2_isp_init(struct avi_v4l2_isp **isp,
+ struct v4l2_ctrl_handler *ctrl_handler);
+
+/** avi_v4l2_isp_free() - Free ISP control handler.
+ * @isp: The ISP handler.
+ */
+extern void avi_v4l2_isp_free(struct avi_v4l2_isp *isp);
+
+#endif /* _AVI_V4L2_ISP_H_ */
diff --git a/drivers/parrot/media/video/avi_v4l2_isp_ctrls.h b/drivers/parrot/media/video/avi_v4l2_isp_ctrls.h
new file mode 100644
index 00000000000000..5fc65f78d3c975
--- /dev/null
+++ b/drivers/parrot/media/video/avi_v4l2_isp_ctrls.h
@@ -0,0 +1,649 @@
+/*
+ * linux/drivers/parrot/video/media/avi_v4l2_isp_ctrls.h
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Alexandre Dilly <alexandre.dilly@parrot.com>
+ * @date 01-Jan-2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_V4L2_ISP_CTRLS_H_
+#define _AVI_V4L2_ISP_CTRLS_H_
+
+#include <linux/videodev2.h>
+
+/* Definition of controls
+ * All V4L2 controls are defined in this header with the following syntax:
+ * NAME: (TYPE) Description
+ * Where NAME is the name of control to pass as ID in v4l2_ext_control
+ * structure, TYPE is one of type provided by V4L2 control API:
+ * - bool for a boolean (V4L2_CTRL_TYPE_BOOLEAN)
+ * - int for an integer (V4L2_CTRL_TYPE_INTEGER)
+ * The syntax format is (int Xb [MIN:MAX]) where X is number of bytes and
+ * MIN/MAX are respectively the minimum and maximum value allowed by the
+ * control. If the value passed in v4l2_ext_control is out of range, all
+ * set of controls in v4l2_ext_controls fail.
+ * - ary[Y] for an array (multi types (U8 to S32) with 1 dimension: matrix
+ * of 1xY)
+ * The syntax is the same as integer.
+ * To use with an v4l2_ext_control, the local array is passed through its
+ * pointer with one of the following field:
+ * - p_u8 for an U8. Size is equal to Y (count of value in the
+ * array),
+ * - p_u16 for an U16. Size is equal to Y*2,
+ * - ptr for an S/U32. Size is equal to Y*4.
+ *
+ * For more informations about the ext controls, see videodev2.h and
+ * v4l2_ext_control and v4l2_ext_controls structures.
+ */
+
+/* Color Filter Array */
+enum p7_isp_cfa {
+ P7_ISP_CFA_BGGR = 0,
+ P7_ISP_CFA_RGGB = 1,
+ P7_ISP_CFA_GBRG = 2,
+ P7_ISP_CFA_GRBG = 3,
+};
+
+#define P7_CID_ISP_BASE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 10)
+
+/* Bypass ISP module
+ * BYPASS_PEDESTAL: (bool) Bypass Pedestal block
+ * BYPASS_GRIM: (bool) Bypass Green Imbalance block
+ * BYPASS_RIP: (bool) Bypass Dead Pixel Correction block
+ * BYPASS_DENOISE: (bool) Bypass Denoising block
+ * BYPASS_LSC: (bool) Bypass Lens Shading Correction block
+ * BYPASS_CHROMA_ABER: (bool) Bypass Chromatic Aberration block
+ * BYPASS_BAYER: (bool) Bypass Bayer block
+ * BYPASS_COLOR_MATRIX: (bool) Byapss Color Correction block
+ */
+#define P7_CID_BYPASS_BASE (P7_CID_ISP_BASE + 1)
+#define P7_CID_BYPASS_PEDESTAL (P7_CID_BYPASS_BASE + 1)
+#define P7_CID_BYPASS_GRIM (P7_CID_BYPASS_BASE + 2)
+#define P7_CID_BYPASS_RIP (P7_CID_BYPASS_BASE + 3)
+#define P7_CID_BYPASS_DENOISE (P7_CID_BYPASS_BASE + 4)
+#define P7_CID_BYPASS_LSC (P7_CID_BYPASS_BASE + 5)
+#define P7_CID_BYPASS_CHROMA_ABER (P7_CID_BYPASS_BASE + 6)
+#define P7_CID_BYPASS_BAYER (P7_CID_BYPASS_BASE + 7)
+#define P7_CID_BYPASS_COLOR_MATRIX (P7_CID_BYPASS_BASE + 8)
+
+/* Bypass YUV ISP module
+ * BYPASS_EE_CRF: (bool) Bypass Edge Enhancement block
+ * BYPASS_I3D_LUT: (bool) Bypass I3D block
+ * BYPASS_DROP: (bool) Bypass Drop block
+ */
+#define P7_CID_BYPASS_YUV_BASE (P7_CID_BYPASS_BASE + 10)
+#define P7_CID_BYPASS_EE_CRF (P7_CID_BYPASS_YUV_BASE + 1)
+#define P7_CID_BYPASS_I3D_LUT (P7_CID_BYPASS_YUV_BASE + 2)
+#define P7_CID_BYPASS_DROP (P7_CID_BYPASS_YUV_BASE + 3)
+
+/* VL Format 32 to 40 ISP module
+ * VLF_32_TO_40: (int 3b [0:4]) Convert format from 32 to 40
+ * see enum p7_vlf_32to40 for types.
+ */
+#define P7_CID_ISP_VLF_32_TO_40 (P7_CID_BYPASS_YUV_BASE + 5)
+enum p7_vlf_32to40 {
+ P7_VLF_32_RAW10_TO_RAW10,
+ P7_VLF_32_RAW8_TO_RAW10,
+ P7_VLF_32_RAW3x10_TO_RAW10,
+ P7_VLF_32_RGB3x10_TO_RGB3x10,
+ P7_VLF_32_ARGB_TO_RGB3x10,
+};
+
+/* Pedestal ISP module
+ * PEDESTAL_SUB_R: (int 10b [0:1023]) Substract X to Red pixel
+ * PEDESTAL_SUB_GB: (int 10b [0:1023]) Substract X to Green red pixel
+ * PEDESTAL_SUB_GR: (int 10b [0:1023]) Substract X to Green blue pixel
+ * PEDESTAL_SUB_B: (int 10b [0:1023]) Substract X to Blue pixel
+ * PEDESTAL_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_PEDESTAL_BASE (P7_CID_BYPASS_YUV_BASE + 10)
+#define P7_CID_ISP_PEDESTAL_SUB_R (P7_CID_ISP_PEDESTAL_BASE + 1)
+#define P7_CID_ISP_PEDESTAL_SUB_GB (P7_CID_ISP_PEDESTAL_BASE + 2)
+#define P7_CID_ISP_PEDESTAL_SUB_GR (P7_CID_ISP_PEDESTAL_BASE + 3)
+#define P7_CID_ISP_PEDESTAL_SUB_B (P7_CID_ISP_PEDESTAL_BASE + 4)
+#define P7_CID_ISP_PEDESTAL_CFA (P7_CID_ISP_PEDESTAL_BASE + 5)
+
+/* Green Imbalance ISP module
+ * GRIM_OFFSET_X: (int 9b [0:511]) Offset of top left pixel from
+ * top left vertice of cell
+ * GRIM_OFFSET_Y: (int 10b [0:1023]) Offset of top left pixel from
+ * top left vertice of cell
+ * GRIM_CELL_ID_X: (int 4b [0:15]) Horizontal cell index to which
+ * top left pixel belongs
+ * GRIM_CELL_ID_Y: (int 4b [0:15]) Vertical cell index to which top
+ * left pixel belongs
+ * GRIM_CELL_W: (int 9b [0:511]) Mesh cells width in pixel
+ * GRIM_CELL_H: (int 10b [0:1023]) Mesh cells height in pixel
+ * GRIM_CELL_W_INV: (int 17b [0:131071]) Mesh cells inverse width
+ * GRIM_CELL_H_INV: (int 17b [0:131071]) Mesh cells inverse height
+ * GRIM_ALPHA: (int 17b [0:131071]) Normalised horizontal offset of
+ * top left pixel from top left
+ * vertice of cell
+ * GRIM_BETA: (int 17b [0:131071]) Normalised vertical offset of
+ * top left pixel from top left
+ * vertice of cell
+ * GRIM_GR_COEFF: (ary[221] 8b [0:255]) Red coefficients: Correction
+ * mesh for R channel
+ * GRIM_GB_COEFF: (ary[221] 8b [0:255]) Blue coefficients: Correction
+ * mesh for B channel
+ * GRIM_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_GRIM_BASE (P7_CID_ISP_PEDESTAL_BASE + 10)
+#define P7_CID_ISP_GRIM_OFFSET_X (P7_CID_ISP_GRIM_BASE + 1)
+#define P7_CID_ISP_GRIM_OFFSET_Y (P7_CID_ISP_GRIM_BASE + 2)
+#define P7_CID_ISP_GRIM_CELL_ID_X (P7_CID_ISP_GRIM_BASE + 3)
+#define P7_CID_ISP_GRIM_CELL_ID_Y (P7_CID_ISP_GRIM_BASE + 4)
+#define P7_CID_ISP_GRIM_CELL_W (P7_CID_ISP_GRIM_BASE + 5)
+#define P7_CID_ISP_GRIM_CELL_H (P7_CID_ISP_GRIM_BASE + 6)
+#define P7_CID_ISP_GRIM_CELL_W_INV (P7_CID_ISP_GRIM_BASE + 7)
+#define P7_CID_ISP_GRIM_CELL_H_INV (P7_CID_ISP_GRIM_BASE + 8)
+#define P7_CID_ISP_GRIM_ALPHA (P7_CID_ISP_GRIM_BASE + 9)
+#define P7_CID_ISP_GRIM_BETA (P7_CID_ISP_GRIM_BASE + 10)
+#define P7_CID_ISP_GRIM_GR_COEFF (P7_CID_ISP_GRIM_BASE + 11)
+#define P7_CID_ISP_GRIM_GB_COEFF (P7_CID_ISP_GRIM_BASE + 12)
+#define P7_CID_ISP_GRIM_CFA (P7_CID_ISP_GRIM_BASE + 13)
+
+/* Reconstruction Incorrect Pixel ISP module
+ * LIST_MEM_X: (ary[256] 11b [0:2047]) Abscissa of known dead
+ * pixel
+ * LIST_MEM_Y: (ary[256] 12b [0:4095]) Ordinate of known dead
+ * pixel
+ * LIST_MEM_OP: (ary[256] 3bits [0:7]) List of suitable
+ * corrections for each
+ * corresponding dead pixel
+ * position
+ * BYPASS_RGRIM: (bool) Deactivates list mode
+ * BYPASS_AUTO_DETECTION: (bool) Deactivates live mode
+ * BYPASS_LIST: (bool) Deactivates the RGRIM
+ * filter list mode, which
+ * in turn takes precedence
+ * over the RGRIM filter
+ * THRESHOLD: (int 10b [0:1023]) Used in Live mode to
+ * compare one pixel to its
+ * neighbours
+ * RGRIM_CONF_TCON: (int 8b [0:255]) Contrast
+ * RGRIM_CONF_TSAT: (int 8b [0:255]) Saturation
+ * RGRIM_CONF_TIM_2: (int 8b [0:255]) Imbalance 2
+ * RGRIM_CONF_TIM_1: (int 8b [0:255]) Imbalance 1
+ * RGRIM_GAIN: (int 7b [0:127]) The gain obeys the
+ * formula : actual gain =
+ * 20 * gain / 4096
+ * RIP_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_RIP_BASE (P7_CID_ISP_GRIM_BASE + 20)
+#define P7_CID_ISP_RIP_LIST_MEM_X (P7_CID_ISP_RIP_BASE + 1)
+#define P7_CID_ISP_RIP_LIST_MEM_Y (P7_CID_ISP_RIP_BASE + 2)
+#define P7_CID_ISP_RIP_LIST_MEM_OP (P7_CID_ISP_RIP_BASE + 3)
+#define P7_CID_ISP_RIP_BYPASS_RGRIM (P7_CID_ISP_RIP_BASE + 4)
+#define P7_CID_ISP_RIP_BYPASS_AUTO_DETECTION (P7_CID_ISP_RIP_BASE + 5)
+#define P7_CID_ISP_RIP_BYPASS_LIST (P7_CID_ISP_RIP_BASE + 6)
+#define P7_CID_ISP_RIP_THRESHOLD (P7_CID_ISP_RIP_BASE + 7)
+#define P7_CID_ISP_RIP_RGRIM_CONF_TCON (P7_CID_ISP_RIP_BASE + 8)
+#define P7_CID_ISP_RIP_RGRIM_CONF_TSAT (P7_CID_ISP_RIP_BASE + 9)
+#define P7_CID_ISP_RIP_RGRIM_CONF_TIM_2 (P7_CID_ISP_RIP_BASE + 10)
+#define P7_CID_ISP_RIP_RGRIM_CONF_TIM_1 (P7_CID_ISP_RIP_BASE + 11)
+#define P7_CID_ISP_RIP_RGRIM_GAIN (P7_CID_ISP_RIP_BASE + 12)
+#define P7_CID_ISP_RIP_CFA (P7_CID_ISP_RIP_BASE + 13)
+
+/* Denoising ISP module
+ * DENOISE_LUMOCOEFF_RED: (ary[14] 8b [0:255]) Ordinates of lumo coeffs
+ * Red
+ * DENOISE_LUMOCOEFF_BLUE: (ary[14] 8b [0:255]) Ordinates of lumo coeffs
+ * Blue
+ * DENOISE_LUMOCOEFF_GREEN: (ary[14] 8b [0:255]) Ordinates of lumo coeffs
+ * Green
+ * DENOISE_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_DENOISE_BASE (P7_CID_ISP_RIP_BASE + 15)
+#define P7_CID_ISP_DENOISE_LUMOCOEFF_RED (P7_CID_ISP_DENOISE_BASE + 1)
+#define P7_CID_ISP_DENOISE_LUMOCOEFF_BLUE (P7_CID_ISP_DENOISE_BASE + 2)
+#define P7_CID_ISP_DENOISE_LUMOCOEFF_GREEN (P7_CID_ISP_DENOISE_BASE + 3)
+#define P7_CID_ISP_DENOISE_CFA (P7_CID_ISP_DENOISE_BASE + 4)
+
+/* Bayer Statistics ISP module
+ * BSTAT_MEASURE_REQ_CLEAR: (bool) Force clear the
+ * histogram, usually done
+ * the first time before
+ * enabling the stats bayer
+ * module
+ * BSTAT_WINDOW_X_WIDTH: (int 11b [0:2047]) Width of a thumbnail
+ * pixel in sensor pixels
+ * (how many columns are
+ * averaged into one)
+ * BSTAT_WINDOW_X_OFFSET: (int 13b [0:8191]) Origin abcissa of the
+ * statistics window in the
+ * captured image
+ * BSTAT_WINDOW_Y_HEIGHT: (int 11b [0:2047]) Height of a thumbnail
+ * pixel in sensor pixels
+ * (how many rows are
+ * averaged into one)
+ * BSTAT_WINDOW_Y_OFFSET: (int 13b [0:8191]) Origin abcissa of the
+ * statistics window in the
+ * captured image
+ * BSTAT_CIRCLE_POS_X_CENTER: (int 14b [0:16383]) Center abcissa of the
+ * image circle in sensor
+ * pixels
+ * BSTAT_CIRCLE_POS_X_SQUARED: (int 26b [0:67108863]) Center abcissa of the
+ * image circle in sensor
+ * pixels value squared
+ * BSTAT_CIRCLE_POS_Y_CENTER: (int 14b [0:16383]) Center ordinate of the
+ * image circle in sensor
+ * pixels
+ * BSTAT_CIRCLE_POS_Y_SQUARED: (int 26b [0:67108863]) Center ordinate of the
+ * image circle in sensor
+ * pixels value squared
+ * BSTAT_CIRCLE_RADIUS_SQUARED: (int 29b [0:536870911])Squared radius of the
+ * image circle
+ * BSTAT_INC_X_LOG2_INC: (int 3b [0:7]) Vertical
+ * skipping/binning
+ * parameter
+ * BSTAT_INC_Y_LOG2_INC: (int 3b [0:7]) Horizontal
+ * skipping/binning
+ * parameter
+ * BSTAT_SAT_THRESHOLD: (int 10b [0:1023]) Below this threshold a
+ * pixel is considered
+ * unsaturated, a Bayer
+ * block is saturated if
+ * any of its 4 pixels is
+ * saturated
+ * BSTAT_MAX_X_WINDOW_COUNT: (int 7b [0:127]) Width of thumbnail
+ * BSTAT_MAX_Y_WINDOW_COUNT: (int 7b [0:127]) Height of thumbnail
+ * BSTAT_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_BSTAT_BASE (P7_CID_ISP_DENOISE_BASE + 5)
+#define P7_CID_ISP_BSTAT_MEASURE_REQ_CLEAR (P7_CID_ISP_BSTAT_BASE + 1)
+#define P7_CID_ISP_BSTAT_WINDOW_X_WIDTH (P7_CID_ISP_BSTAT_BASE + 2)
+#define P7_CID_ISP_BSTAT_WINDOW_X_OFFSET (P7_CID_ISP_BSTAT_BASE + 3)
+#define P7_CID_ISP_BSTAT_WINDOW_Y_HEIGHT (P7_CID_ISP_BSTAT_BASE + 4)
+#define P7_CID_ISP_BSTAT_WINDOW_Y_OFFSET (P7_CID_ISP_BSTAT_BASE + 5)
+#define P7_CID_ISP_BSTAT_CIRCLE_POS_X_CENTER (P7_CID_ISP_BSTAT_BASE + 6)
+#define P7_CID_ISP_BSTAT_CIRCLE_POS_X_SQUARED (P7_CID_ISP_BSTAT_BASE + 7)
+#define P7_CID_ISP_BSTAT_CIRCLE_POS_Y_CENTER (P7_CID_ISP_BSTAT_BASE + 8)
+#define P7_CID_ISP_BSTAT_CIRCLE_POS_Y_SQUARED (P7_CID_ISP_BSTAT_BASE + 9)
+#define P7_CID_ISP_BSTAT_CIRCLE_RADIUS_SQUARED (P7_CID_ISP_BSTAT_BASE + 10)
+#define P7_CID_ISP_BSTAT_INC_X_LOG2_INC (P7_CID_ISP_BSTAT_BASE + 11)
+#define P7_CID_ISP_BSTAT_INC_Y_LOG2_INC (P7_CID_ISP_BSTAT_BASE + 12)
+#define P7_CID_ISP_BSTAT_SAT_THRESHOLD (P7_CID_ISP_BSTAT_BASE + 13)
+#define P7_CID_ISP_BSTAT_MAX_X_WINDOW_COUNT (P7_CID_ISP_BSTAT_BASE + 14)
+#define P7_CID_ISP_BSTAT_MAX_Y_WINDOW_COUNT (P7_CID_ISP_BSTAT_BASE + 15)
+#define P7_CID_ISP_BSTAT_CFA (P7_CID_ISP_BSTAT_BASE + 16)
+
+/* Lens Shading Correction ISP module
+ * LSC_OFFSET_X: (int 9b [0:511]) Offset of top left pixel from
+ * top left vertice of cell
+ * LSC_OFFSET_Y: (int 10b [0:1023]) Offset of top left pixel from
+ * top left vertice of cell
+ * LSC_CELL_ID_X: (int 4b [0:15]) Horizontal cell index to which
+ * top left pixel belongs
+ * LSC_CELL_ID_Y: (int 4b [0:15]) Vertical cell index to which top
+ * left pixel belongs
+ * LSC_CELL_W: (int 9b [0:511]) Mesh cells width in pixel
+ * LSC_CELL_H: (int 10b [0:1023]) Mesh cells height in pixel
+ * LSC_CELL_W_INV: (int 17b [0:131071]) Mesh cells inverse width
+ * LSC_CELL_H_INV: (int 17b [0:131071]) Mesh cells inverse height
+ * LSC_ALPHA: (int 17b [0:131071]) Normalised horizontal offset of
+ * top left pixel from top left
+ * vertice of cell
+ * LSC_BETA: (int 17b [0:131071]) Normalised vertical offset of
+ * top left pixel from top left
+ * vertice of cell
+ * LSC_THRESHOLD_B: (int 10b [0:1023]) Threshold to avoid declipping on
+ * Blue
+ * LSC_THRESHOLD_G: (int 10b [0:1023]) Threshold to avoid declipping on
+ * Green
+ * LSC_THRESHOLD_R: (int 10b [0:1023]) Threshold to avoid declipping on
+ * Blue
+ * LSC_GAIN_B: (int 10b [0:1023]) White balance gain Blue
+ * LSC_GAIN_G: (int 10b [0:1023]) White balance gain Green
+ * LSC_GAIN_R: (int 10b [0:1023]) White balance gain Red
+ * LSC_RED_COEFF_MEM: (ary[221] 8b [0:255]) Correction mesh for Red channel
+ * (array 15x13)
+ * LSC_GREEN_COEFF_MEM: (ary[221] 8b [0:255]) Correction mesh for Green
+ * channel (array 15x13)
+ * LSC_BLUE_COEFF_MEM: (ary[221] 8b [0:255]) Correction mesh for Blue channel
+ * (array 15x13)
+ * LSC_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_LSC_BASE (P7_CID_ISP_BSTAT_BASE + 20)
+#define P7_CID_ISP_LSC_OFFSET_X (P7_CID_ISP_LSC_BASE + 1)
+#define P7_CID_ISP_LSC_OFFSET_Y (P7_CID_ISP_LSC_BASE + 2)
+#define P7_CID_ISP_LSC_CELL_ID_X (P7_CID_ISP_LSC_BASE + 3)
+#define P7_CID_ISP_LSC_CELL_ID_Y (P7_CID_ISP_LSC_BASE + 4)
+#define P7_CID_ISP_LSC_CELL_W (P7_CID_ISP_LSC_BASE + 5)
+#define P7_CID_ISP_LSC_CELL_H (P7_CID_ISP_LSC_BASE + 6)
+#define P7_CID_ISP_LSC_CELL_W_INV (P7_CID_ISP_LSC_BASE + 7)
+#define P7_CID_ISP_LSC_CELL_H_INV (P7_CID_ISP_LSC_BASE + 8)
+#define P7_CID_ISP_LSC_ALPHA (P7_CID_ISP_LSC_BASE + 9)
+#define P7_CID_ISP_LSC_BETA (P7_CID_ISP_LSC_BASE + 10)
+#define P7_CID_ISP_LSC_THRESHOLD_B (P7_CID_ISP_LSC_BASE + 11)
+#define P7_CID_ISP_LSC_THRESHOLD_G (P7_CID_ISP_LSC_BASE + 12)
+#define P7_CID_ISP_LSC_THRESHOLD_R (P7_CID_ISP_LSC_BASE + 13)
+#define P7_CID_ISP_LSC_GAIN_B (P7_CID_ISP_LSC_BASE + 14)
+#define P7_CID_ISP_LSC_GAIN_G (P7_CID_ISP_LSC_BASE + 15)
+#define P7_CID_ISP_LSC_GAIN_R (P7_CID_ISP_LSC_BASE + 16)
+#define P7_CID_ISP_LSC_RED_COEFF_MEM (P7_CID_ISP_LSC_BASE + 17)
+#define P7_CID_ISP_LSC_GREEN_COEFF_MEM (P7_CID_ISP_LSC_BASE + 18)
+#define P7_CID_ISP_LSC_BLUE_COEFF_MEM (P7_CID_ISP_LSC_BASE + 19)
+#define P7_CID_ISP_LSC_CFA (P7_CID_ISP_LSC_BASE + 20)
+
+/* Chromatic Aberation ISP module
+ * CA_RADIUS_SQUARED: (ary[20] 24b [0:16777215])Abscissa of the Look
+ * Up Table
+ * CA_DISPLACEMENT_RED_COEFF: (ary[20] 16b [0:65535]) Displacement for the
+ * BLUE channel
+ * CA_DISPLACEMENT_BLUE_COEFF: (ary[20] 16b [0:65535]) Displacement for the
+ * RED channel
+ * CA_CIRCLE_POS_X_CENTER: (int 14b [0:16383]) Center abcissa of the
+ * image circle in sensor
+ * pixels
+ * CA_CIRCLE_POS_X_SQUARED: (int 26b [0:67108863]) Center abcissa of the
+ * image circle in sensor
+ * pixels value squared
+ * CA_CIRCLE_POS_Y_CENTER: (int 14b [0:16383]) Center ordinate of the
+ * image circle in sensor
+ * pixels
+ * CA_CIRCLE_POS_Y_SQUARED: (int 26b [0:67108863]) Center ordinate of the
+ * image circle in sensor
+ * pixels value squared
+ * CA_GREEN_VARIATION: (bool) Green variation is
+ * taken into account for
+ * the correction
+ * CA_X_LOG2_INC: (int 3b [0:7]) Horizontal
+ * skipping/binning
+ * parameter
+ * CA_Y_LOG2_INC: (int 3b [0:7]) Vertical
+ * skipping/binning
+ * parameter
+ * CA_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_CA_BASE (P7_CID_ISP_LSC_BASE + 25)
+#define P7_CID_ISP_CA_RADIUS_SQUARED (P7_CID_ISP_CA_BASE + 1)
+#define P7_CID_ISP_CA_DISPLACEMENT_RED_COEFF (P7_CID_ISP_CA_BASE + 2)
+#define P7_CID_ISP_CA_DISPLACEMENT_BLUE_COEFF (P7_CID_ISP_CA_BASE + 3)
+#define P7_CID_ISP_CA_CIRCLE_POS_X_CENTER (P7_CID_ISP_CA_BASE + 4)
+#define P7_CID_ISP_CA_CIRCLE_POS_X_SQUARED (P7_CID_ISP_CA_BASE + 5)
+#define P7_CID_ISP_CA_CIRCLE_POS_Y_CENTER (P7_CID_ISP_CA_BASE + 6)
+#define P7_CID_ISP_CA_CIRCLE_POS_Y_SQUARED (P7_CID_ISP_CA_BASE + 7)
+#define P7_CID_ISP_CA_GREEN_VARIATION (P7_CID_ISP_CA_BASE + 8)
+#define P7_CID_ISP_CA_X_LOG2_INC (P7_CID_ISP_CA_BASE + 9)
+#define P7_CID_ISP_CA_Y_LOG2_INC (P7_CID_ISP_CA_BASE + 10)
+#define P7_CID_ISP_CA_CFA (P7_CID_ISP_CA_BASE + 11)
+
+/* Bayer ISP module
+ * BAYER_THRESHOLD_1 (int 13b [0:8191]) Low threshold value
+ * BAYER_THRESHOLD_2 (int 13b [0:8191]) High threshold value
+ * BAYER_CFA: (int 2b [0:3]) Color Filter Array
+ * see enum p7_isp_cfa for types.
+ */
+#define P7_CID_ISP_BAYER_BASE (P7_CID_ISP_CA_BASE + 15)
+#define P7_CID_ISP_BAYER_THRESHOLD_1 (P7_CID_ISP_BAYER_BASE + 1)
+#define P7_CID_ISP_BAYER_THRESHOLD_2 (P7_CID_ISP_BAYER_BASE + 2)
+#define P7_CID_ISP_BAYER_CFA (P7_CID_ISP_BAYER_BASE + 3)
+
+/* Color correction ISP module
+ * CC_COEFF_00: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_01: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_02: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_10: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_11: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_12: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_20: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_21: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_COEFF_22: (int 14b [0:16383]) Coded in signed fixed-pointQ2.11
+ * CC_OFFSET_RY_IN: (int 10b [0:1023]) Red or Y component offset:
+ * before matrix multiplication,
+ * the pixel vector is subtracted
+ * by 'OFFSET_IN' value
+ * CC_OFFSET_RY_OUT: (int 10b [0:1023]) Red or Y component offsets:
+ * after the matrix multiplication,
+ * the product is added with
+ * 'OFFSET_OUT' value
+ * CC_CLIP_RY_MIN: (int 10b [0:1023]) Red or Y component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ * CC_CLIP_RY_MAX: (int 10b [0:1023]) Red or Y component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ * CC_OFFSET_GU_IN: (int 10b [0:1023]) Green or U component offset:
+ * before matrix multiplication,
+ * the pixel vector is subtracted
+ * by 'OFFSET_IN' value
+ * CC_OFFSET_GU_OUT: (int 10b [0:1023]) Green or U component offsets:
+ * after the matrix multiplication,
+ * the product is added with
+ * 'OFFSET_OUT' value
+ * CC_CLIP_GU_MIN: (int 10b [0:1023]) Green or U component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ * CC_CLIP_GU_MAX: (int 10b [0:1023]) Green or U component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ * CC_OFFSET_BV_IN: (int 10b [0:1023]) Blue or V component offset:
+ * before matrix multiplication,
+ * the pixel vector is subtracted
+ * by 'OFFSET_IN' value
+ * CC_OFFSET_BV_OUT: (int 10b [0:1023]) Blue or V component offsets:
+ * after the matrix multiplication,
+ * the product is added with
+ * 'OFFSET_OUT' value
+ * CC_CLIP_BV_MIN: (int 10b [0:1023]) Blue or V component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ * CC_CLIP_BV_MAX: (int 10b [0:1023]) Blue or V component clips: the
+ * output result is clipped between
+ * 'CLIP_MIN' and 'CLIP_MAX' value
+ */
+#define P7_CID_ISP_CC_BASE (P7_CID_ISP_BAYER_BASE + 5)
+#define P7_CID_ISP_CC_COEFF_00 (P7_CID_ISP_CC_BASE + 1)
+#define P7_CID_ISP_CC_COEFF_01 (P7_CID_ISP_CC_BASE + 2)
+#define P7_CID_ISP_CC_COEFF_02 (P7_CID_ISP_CC_BASE + 3)
+#define P7_CID_ISP_CC_COEFF_10 (P7_CID_ISP_CC_BASE + 4)
+#define P7_CID_ISP_CC_COEFF_11 (P7_CID_ISP_CC_BASE + 5)
+#define P7_CID_ISP_CC_COEFF_12 (P7_CID_ISP_CC_BASE + 6)
+#define P7_CID_ISP_CC_COEFF_20 (P7_CID_ISP_CC_BASE + 7)
+#define P7_CID_ISP_CC_COEFF_21 (P7_CID_ISP_CC_BASE + 8)
+#define P7_CID_ISP_CC_COEFF_22 (P7_CID_ISP_CC_BASE + 9)
+#define P7_CID_ISP_CC_OFFSET_RY_IN (P7_CID_ISP_CC_BASE + 10)
+#define P7_CID_ISP_CC_OFFSET_RY_OUT (P7_CID_ISP_CC_BASE + 11)
+#define P7_CID_ISP_CC_CLIP_RY_MIN (P7_CID_ISP_CC_BASE + 12)
+#define P7_CID_ISP_CC_CLIP_RY_MAX (P7_CID_ISP_CC_BASE + 13)
+#define P7_CID_ISP_CC_OFFSET_GU_IN (P7_CID_ISP_CC_BASE + 14)
+#define P7_CID_ISP_CC_OFFSET_GU_OUT (P7_CID_ISP_CC_BASE + 15)
+#define P7_CID_ISP_CC_CLIP_GU_MIN (P7_CID_ISP_CC_BASE + 16)
+#define P7_CID_ISP_CC_CLIP_GU_MAX (P7_CID_ISP_CC_BASE + 17)
+#define P7_CID_ISP_CC_OFFSET_BV_IN (P7_CID_ISP_CC_BASE + 18)
+#define P7_CID_ISP_CC_OFFSET_BV_OUT (P7_CID_ISP_CC_BASE + 19)
+#define P7_CID_ISP_CC_CLIP_BV_MIN (P7_CID_ISP_CC_BASE + 20)
+#define P7_CID_ISP_CC_CLIP_BV_MAX (P7_CID_ISP_CC_BASE + 21)
+
+/* VL Format 40 to 32 ISP module
+ * VLF_40_TO_32: (int 3b [0:4]) Convert format from 40 to 32
+ * see enum p7_vlf_40to32 for types.
+ */
+#define P7_CID_ISP_VLF_40_TO_32 (P7_CID_ISP_CC_BASE + 25)
+enum p7_vlf_40to32 {
+ P7_VLF_40_RAW10_TO_RAW8,
+ P7_VLF_40_RAW10_TO_RAW10,
+ P7_VLF_40_RAW10_TO_RAW3x10,
+ P7_VLF_40_RGB3x10_TO_RGB3x10,
+ P7_VLF_40_RGB3x10_TO_ARGB,
+};
+
+/* Gamma Correction YUV ISP module
+ * GAMMA_BYPASS: (bool) Bypass Gamma correction
+ * GAMMA_PALETTE: (bool) Palette mode
+ * (0 = non-linear, 1 = palette)
+ * GAMMA_COMP_WIDTH: (bool) Component width
+ * (0 = 8bits, 1 = 10bits)
+ * GAMMA_RY_LUT: (ary[1024] 8b [0:255]) Red or Y curve
+ * GAMMA_GU_LUT: (ary[1024] 8b [0:255]) Green or U curve
+ * GAMMA_BV_LUT: (ary[1024] 8b [0:255]) Blue or V curve
+ */
+#define P7_CID_ISP_GAMMA_BASE (P7_CID_ISP_CC_BASE + 30)
+#define P7_CID_ISP_GAMMA_BYPASS (P7_CID_ISP_GAMMA_BASE + 1)
+#define P7_CID_ISP_GAMMA_PALETTE (P7_CID_ISP_GAMMA_BASE + 2)
+#define P7_CID_ISP_GAMMA_COMP_WIDTH (P7_CID_ISP_GAMMA_BASE + 3)
+#define P7_CID_ISP_GAMMA_RY_LUT (P7_CID_ISP_GAMMA_BASE + 4)
+#define P7_CID_ISP_GAMMA_GU_LUT (P7_CID_ISP_GAMMA_BASE + 5)
+#define P7_CID_ISP_GAMMA_BV_LUT (P7_CID_ISP_GAMMA_BASE + 6)
+
+/* Chroma converter YUV ISP module
+ * See definition of Color Correction ISP module (P7_CID_ISP_CC_*)
+ */
+#define P7_CID_ISP_CONV_BASE (P7_CID_ISP_GAMMA_BASE + 10)
+#define P7_CID_ISP_CONV_COEFF_00 (P7_CID_ISP_CONV_BASE + 1)
+#define P7_CID_ISP_CONV_COEFF_01 (P7_CID_ISP_CONV_BASE + 2)
+#define P7_CID_ISP_CONV_COEFF_02 (P7_CID_ISP_CONV_BASE + 3)
+#define P7_CID_ISP_CONV_COEFF_10 (P7_CID_ISP_CONV_BASE + 4)
+#define P7_CID_ISP_CONV_COEFF_11 (P7_CID_ISP_CONV_BASE + 5)
+#define P7_CID_ISP_CONV_COEFF_12 (P7_CID_ISP_CONV_BASE + 6)
+#define P7_CID_ISP_CONV_COEFF_20 (P7_CID_ISP_CONV_BASE + 7)
+#define P7_CID_ISP_CONV_COEFF_21 (P7_CID_ISP_CONV_BASE + 8)
+#define P7_CID_ISP_CONV_COEFF_22 (P7_CID_ISP_CONV_BASE + 9)
+#define P7_CID_ISP_CONV_OFFSET_RY_IN (P7_CID_ISP_CONV_BASE + 10)
+#define P7_CID_ISP_CONV_OFFSET_RY_OUT (P7_CID_ISP_CONV_BASE + 11)
+#define P7_CID_ISP_CONV_CLIP_RY_MIN (P7_CID_ISP_CONV_BASE + 12)
+#define P7_CID_ISP_CONV_CLIP_RY_MAX (P7_CID_ISP_CONV_BASE + 13)
+#define P7_CID_ISP_CONV_OFFSET_GU_IN (P7_CID_ISP_CONV_BASE + 14)
+#define P7_CID_ISP_CONV_OFFSET_GU_OUT (P7_CID_ISP_CONV_BASE + 15)
+#define P7_CID_ISP_CONV_CLIP_GU_MIN (P7_CID_ISP_CONV_BASE + 16)
+#define P7_CID_ISP_CONV_CLIP_GU_MAX (P7_CID_ISP_CONV_BASE + 17)
+#define P7_CID_ISP_CONV_OFFSET_BV_IN (P7_CID_ISP_CONV_BASE + 18)
+#define P7_CID_ISP_CONV_OFFSET_BV_OUT (P7_CID_ISP_CONV_BASE + 19)
+#define P7_CID_ISP_CONV_CLIP_BV_MIN (P7_CID_ISP_CONV_BASE + 20)
+#define P7_CID_ISP_CONV_CLIP_BV_MAX (P7_CID_ISP_CONV_BASE + 21)
+
+/* Statistics YUV ISP module
+ * YSTAT_MEASURE_REQ: (bool) Will measure at the
+ * beginning of the next
+ * frame
+ * YSTAT_CLEAR: (bool) Clearing the histogram
+ * YSTAT_DONE: (bool) Asserted at the end of
+ * each measured frame
+ * YSTAT_ERROR: (bool) Asserted if there is
+ * valid input stream
+ * during the histogram
+ * clearing process
+ * BSTAT_WINDOW_X_START: (int 13b [0:8191]) Beginning of the
+ * measured window X
+ * BSTAT_WINDOW_X_END: (int 13b [0:8191]) End of the measured
+ * window X
+ * BSTAT_WINDOW_Y_START: (int 13b [0:8191]) Beginning of the
+ * measured window Y
+ * BSTAT_WINDOW_Y_END: (int 13b [0:8191]) End of the measured
+ * window Y
+ * YSTAT_CIRCLE_POS_X_CENTER: (int 14b [0:16383]) Circle center X
+ * YSTAT_CIRCLE_POS_X_SQUARED: (int 26b [0:67108863]) X center squared
+ * YSTAT_CIRCLE_POS_Y_CENTER: (int 14b [0:16383]) Circle center Y
+ * YSTAT_CIRCLE_POS_Y_SQUARED: (int 26b [0:67108863]) Y center squared
+ * YSTAT_CIRCLE_RADIUS_SQUARED: (int 29b [0:536870911])Circle radius squared
+ * YSTAT_INC_X_LOG2_INC: (int 3b [0:7]) Horizontal
+ * skipping/binning
+ * parameter
+ * YSTAT_INC_Y_LOG2_INC: (int 3b [0:7]) Vertical
+ * skipping/binning
+ * parameter
+ * YSTAT_AWB_THRESHOLD: (int 8b [0:255]) Auto white balancing
+ * threshold
+ *
+ * Read Only controls:
+ * YSTAT_AE_NB_VALID_Y: (int 22b [0:4194303]) Number of valid
+ * luma pixels
+ * YSTAT_AWB_SUM_Y: (int 30b [0:1073741823]) White balancing
+ * sum of Y
+ * YSTAT_AWB_SUM_U: (int 30b [0:1073741823]) White balancing
+ * sum of U
+ * YSTAT_AWB_SUM_V: (int 30b [0:1073741823]) White balancing
+ * sum of V
+ * YSTAT_AWB_NB_GREY_PIXELS: (int 22b [0:4194303]) Number of valid
+ * grey pixels
+ * YSTAT_AE_HISTOGRAM_Y: (ary[256] 22b [0:4194303]) Value of each
+ * bin
+ */
+#define P7_CID_ISP_YSTAT_BASE (P7_CID_ISP_CONV_BASE + 25)
+#define P7_CID_ISP_YSTAT_MEASURE_REQ (P7_CID_ISP_YSTAT_BASE + 1)
+#define P7_CID_ISP_YSTAT_CLEAR (P7_CID_ISP_YSTAT_BASE + 2)
+#define P7_CID_ISP_YSTAT_DONE (P7_CID_ISP_YSTAT_BASE + 3)
+#define P7_CID_ISP_YSTAT_ERROR (P7_CID_ISP_YSTAT_BASE + 4)
+#define P7_CID_ISP_BSTAT_WINDOW_X_START (P7_CID_ISP_YSTAT_BASE + 5)
+#define P7_CID_ISP_BSTAT_WINDOW_X_END (P7_CID_ISP_YSTAT_BASE + 6)
+#define P7_CID_ISP_BSTAT_WINDOW_Y_START (P7_CID_ISP_YSTAT_BASE + 7)
+#define P7_CID_ISP_BSTAT_WINDOW_Y_END (P7_CID_ISP_YSTAT_BASE + 8)
+#define P7_CID_ISP_YSTAT_CIRCLE_POS_X_CENTER (P7_CID_ISP_YSTAT_BASE + 9)
+#define P7_CID_ISP_YSTAT_CIRCLE_POS_X_SQUARED (P7_CID_ISP_YSTAT_BASE + 10)
+#define P7_CID_ISP_YSTAT_CIRCLE_POS_Y_CENTER (P7_CID_ISP_YSTAT_BASE + 11)
+#define P7_CID_ISP_YSTAT_CIRCLE_POS_Y_SQUARED (P7_CID_ISP_YSTAT_BASE + 12)
+#define P7_CID_ISP_YSTAT_CIRCLE_RADIUS_SQUARED (P7_CID_ISP_YSTAT_BASE + 13)
+#define P7_CID_ISP_YSTAT_INC_X_LOG2_INC (P7_CID_ISP_YSTAT_BASE + 14)
+#define P7_CID_ISP_YSTAT_INC_Y_LOG2_INC (P7_CID_ISP_YSTAT_BASE + 15)
+#define P7_CID_ISP_YSTAT_AE_NB_VALID_Y (P7_CID_ISP_YSTAT_BASE + 16)
+#define P7_CID_ISP_YSTAT_AWB_THRESHOLD (P7_CID_ISP_YSTAT_BASE + 17)
+#define P7_CID_ISP_YSTAT_AWB_SUM_Y (P7_CID_ISP_YSTAT_BASE + 18)
+#define P7_CID_ISP_YSTAT_AWB_SUM_U (P7_CID_ISP_YSTAT_BASE + 19)
+#define P7_CID_ISP_YSTAT_AWB_SUM_V (P7_CID_ISP_YSTAT_BASE + 20)
+#define P7_CID_ISP_YSTAT_AWB_NB_GREY_PIXELS (P7_CID_ISP_YSTAT_BASE + 21)
+#define P7_CID_ISP_YSTAT_AE_HISTOGRAM_Y (P7_CID_ISP_YSTAT_BASE + 22)
+
+/* Edge enhancement color reduction filter ISP module
+ * EE_CRF_EE_LUT: (ary[256] 6b [0:63]) EE LUT table
+ * EE_CRF_EE_KERNEL_COEFF: (ary[6] 11b [0:2047]) EE kernel
+ * EE_CRF_CRF_KERNEL_COEFF: (ary[6] 11b [0:2047]) CRF kernel
+ * EE_CRF_M_COEFF: (int 8b [0:255]) Sobel threshold
+ * EE_CRF_D_COEFF: (int 11b [0:2047]) Normalization factor
+ */
+#define P7_CID_ISP_EE_CRF_BASE (P7_CID_ISP_YSTAT_BASE + 25)
+#define P7_CID_ISP_EE_CRF_EE_LUT (P7_CID_ISP_EE_CRF_BASE + 1)
+#define P7_CID_ISP_EE_CRF_EE_KERNEL_COEFF (P7_CID_ISP_EE_CRF_BASE + 2)
+#define P7_CID_ISP_EE_CRF_CRF_KERNEL_COEFF (P7_CID_ISP_EE_CRF_BASE + 3)
+#define P7_CID_ISP_EE_CRF_M_COEFF (P7_CID_ISP_EE_CRF_BASE + 4)
+#define P7_CID_ISP_EE_CRF_D_COEFF (P7_CID_ISP_EE_CRF_BASE + 5)
+
+/* I3D Lut ISP module
+ * I3D_LUT_OUTSIDE_RY: (ary[125] 8b [0:255]) Red or Y component value
+ * I3D_LUT_OUTSIDE_GU: (ary[125] 8b [0:255]) Green or U component value
+ * I3D_LUT_OUTSIDE_BV: (ary[125] 8b [0:255]) Blue or V component value
+ * I3D_LUT_INSIDE_RY: (ary[125] 8b [0:255]) Red or Y component value
+ * I3D_LUT_INSIDE_GU: (ary[125] 8b [0:255]) Green or U component value
+ * I3D_LUT_INSIDE_BV: (ary[125] 8b [0:255]) Blue or V component value
+ * I3D_LUT_CLIP_MODE: (bool) YUV Clip
+ */
+#define P7_CID_ISP_I3D_LUT_BASE (P7_CID_ISP_EE_CRF_BASE + 10)
+#define P7_CID_ISP_I3D_LUT_OUTSIDE_RY (P7_CID_ISP_I3D_LUT_BASE + 1)
+#define P7_CID_ISP_I3D_LUT_OUTSIDE_GU (P7_CID_ISP_I3D_LUT_BASE + 2)
+#define P7_CID_ISP_I3D_LUT_OUTSIDE_BV (P7_CID_ISP_I3D_LUT_BASE + 3)
+#define P7_CID_ISP_I3D_LUT_INSIDE_RY (P7_CID_ISP_I3D_LUT_BASE + 4)
+#define P7_CID_ISP_I3D_LUT_INSIDE_GU (P7_CID_ISP_I3D_LUT_BASE + 5)
+#define P7_CID_ISP_I3D_LUT_INSIDE_BV (P7_CID_ISP_I3D_LUT_BASE + 6)
+#define P7_CID_ISP_I3D_LUT_CLIP_MODE (P7_CID_ISP_I3D_LUT_BASE + 7)
+
+/* Drop ISP module
+ * DROP_OFFSET_X: (int 16b [0:65535]) Horizontal offset
+ * DROP_OFFSET_Y: (int 16b [0:65535]) Vertical offset
+ */
+#define P7_CID_ISP_DROP_BASE (P7_CID_ISP_I3D_LUT_BASE + 10)
+#define P7_CID_ISP_DROP_OFFSET_X (P7_CID_ISP_DROP_BASE + 1)
+#define P7_CID_ISP_DROP_OFFSET_Y (P7_CID_ISP_DROP_BASE + 2)
+
+#endif /* _AVI_V4L2_ISP_CTRLS_H_ */
diff --git a/drivers/parrot/media/video/avi_voc.c b/drivers/parrot/media/video/avi_voc.c
new file mode 100644
index 00000000000000..222425035b328e
--- /dev/null
+++ b/drivers/parrot/media/video/avi_voc.c
@@ -0,0 +1,1582 @@
+/*
+ * linux/drivers/parrot/video/avi_voc.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 03-Mar-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gcd.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-ctrls.h>
+
+#include "avi_v4l2.h"
+#include "video/avi_limit.h"
+#include "avi_voc.h"
+#include "avi_voc_core.h"
+#include "avi_voc_ctrl.h"
+
+/*******************************************************************************
+ * HELPERS
+ ******************************************************************************/
+
+static int avi_voc_instance(struct avi_voc *voc)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(voc->dev, struct platform_device, dev);
+
+ return pdev->id;
+}
+
+static inline struct avi_voc_fh *avi_voc_vfh_get_fh(struct v4l2_fh *vfh)
+{
+ return container_of(vfh, struct avi_voc_fh, vfh);
+}
+
+static void __maybe_unused avi_voc_print_debug(const char *func, struct avi_voc *voc)
+{
+ printk("=== VOC %d (%s)===\n", avi_voc_instance(voc), func);
+ printk(" FMT={w=%4u, h=%4u}\n",
+ voc->pix.width, voc->pix.height);
+ printk(" OVL={w=%4u, h=%4u, t=%4d, l=%4d }\n",
+ voc->win.w.width, voc->win.w.height,
+ voc->win.w.top, voc->win.w.left);
+ printk(" CRP={w=%4u, h=%4u, t=%4d, l=%4d }\n",
+ voc->crop.c.width, voc->crop.c.height,
+ voc->crop.c.top, voc->crop.c.left);
+ if (voc->display) {
+ struct avi_segment_format display_fmt;
+ avi_segment_get_input_format(voc->display, &display_fmt);
+ printk(" OUT={w=%4u, h=%4u}\n",
+ display_fmt.width, display_fmt.height);
+ } else {
+ printk(" OUTPUT NOT CONNECTED\n");
+ }
+}
+
+/* Must be called with vbuf_lock held */
+static void avi_voc_display(struct avi_voc *voc, struct avi_voc_vbuf *b)
+{
+ struct avi_segment_format in_fmt;
+ dma_addr_t dma_addr;
+ struct avi_dma_buffer frame;
+
+ dma_addr = vb2_dma_contig_plane_dma_addr(&b->vb, 0);
+
+ avi_segment_get_input_format(voc->segment, &in_fmt);
+
+ avi_v4l2_setup_dmabuf(&voc->win.w,
+ &in_fmt,
+ voc->plane0_size,
+ voc->plane1_size,
+ dma_addr,
+ b,
+ &frame);
+ avi_segment_set_input_buffer(voc->segment, &frame);
+
+ /* In case we're just starting and aren't visible yet */
+ voc->hw_streaming = 1;
+ if (!voc->hide)
+ avi_segment_unhide(voc->segment);
+}
+
+/* Configure the segment after a crop/win change or on streamon
+ *
+ * Must be called with voc->vbuf_lock held
+ */
+static int avi_voc_update_segment_layout(struct avi_voc *voc)
+{
+ struct avi_segment_format dma_fmt;
+ struct avi_segment_format out_fmt;
+ struct avi_segment_layout layout;
+ int ret;
+
+ ret = avi_v4l2_to_segment_fmt(&voc->pix,
+ &voc->win.w,
+ &dma_fmt,
+ &voc->plane0_size,
+ &voc->plane1_size);
+ if (ret)
+ return ret;
+
+ /* Since we have a scaler we can always have a progressive input (the
+ * scaler will do the rest)
+ * XXX: with s_ctrl scaler can now be disabled */
+ dma_fmt.interlaced = 0;
+
+ avi_segment_get_input_format(voc->display, &out_fmt);
+
+ out_fmt.width = voc->crop.c.width;
+ out_fmt.height = voc->crop.c.height;
+
+ layout.x = voc->crop.c.left;
+ layout.y = voc->crop.c.top;
+
+ if (avi_pixfmt_have_alpha(dma_fmt.pix_fmt))
+ layout.alpha = AVI_ALPHA_OSD;
+ else
+ layout.alpha = voc->win.global_alpha;
+
+ layout.hidden = 0;
+
+ ret = avi_segment_set_format_and_layout(voc->segment,
+ &dma_fmt,
+ &out_fmt,
+ &layout);
+ if (ret) {
+ dev_err(voc->dev, "couldn't set segment format\n");
+ return ret;
+ }
+
+ /* The config changed, we need to reconfigure the displayed frame */
+ if (voc->displayed_frame)
+ avi_voc_display(voc, voc->displayed_frame);
+
+ return 0;
+}
+
+static int avi_voc_querycap(struct file *file,
+ void *vfh,
+ struct v4l2_capability *cap)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ strcpy(cap->driver, DRIVER_NAME);
+
+ snprintf(cap->card, sizeof(cap->card), "avi-voc.%d",
+ avi_voc_instance(voc));
+ snprintf(cap->bus_info, sizeof(cap->card), AVI_V4L2_BUS_VERSION);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS ;
+
+ return 0;
+}
+
+static void avi_voc_fill_outputs(struct avi_voc *voc) {
+ int i;
+ struct avi_segment *seg;
+ __u8 name[32];
+
+ voc->out_nb = 0;
+
+ for (i = 0; i < AVI_VOC_ENUM_NB; i++) {
+ snprintf(name, 32, "lcd.%d", i);
+ seg = avi_segment_find(name);
+ if (seg) {
+ strlcpy(voc->outputs[voc->out_nb], name, 32);
+ voc->out_nb++;
+ }
+ }
+
+ for (i = 0; i < AVI_VOC_ENUM_NB; i++) {
+ snprintf(name, 32, "r2r.%d", i);
+ seg = avi_segment_find(name);
+ if (seg) {
+ strlcpy(voc->outputs[voc->out_nb], name, 32);
+ voc->out_nb++;
+ }
+ }
+}
+
+static int avi_voc_enum_output(struct file *file,
+ void *vfh,
+ struct v4l2_output *output)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ __u32 index = output->index;
+
+ /* In our case possible ouputs are segments. While segments can be
+ * dynamically created we have a race condition between the enumeration
+ * and s_output due to a segment change decaling the indexes.
+ * Our solution to fix it is to create a table associating an index with
+ * an segment.
+ * The output list have to be recreated sometimes in order to be up to
+ * date. We update this list at each new enumation*/
+ if (index == 0) {
+ int i;
+ avi_voc_fill_outputs(voc);
+
+ /* Find index value of the current output */
+ voc->out_id = -1;
+ for (i = 0; i < voc->out_nb; i++)
+ if (0 == strncmp(voc->outputs[i], voc->output,
+ sizeof(voc->output))) {
+ voc->out_id = i;
+ break;
+ }
+ }
+
+
+ if (index >= voc->out_nb)
+ return -EINVAL;
+
+
+ memset(output, 0, sizeof(*output));
+
+ output->index = index;
+ strlcpy(output->name, voc->outputs[index], sizeof(output->name));
+ /* XXX Analog? where does that come from? */
+ output->type = V4L2_OUTPUT_TYPE_ANALOG;
+ /* No support for video standards.*/
+ output->audioset = 0;
+ output->modulator = 0;
+ output->std = 0;
+ output->capabilities = 0;
+
+ return 0;
+}
+
+static int avi_voc_s_output(struct file *file, void *priv, unsigned int index)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ if (index >= voc->out_nb)
+ return -EINVAL;
+
+ /* At the moment we dont change output while streaming */
+ if (vb2_is_streaming(&voc->vb2_vidq))
+ return -EBUSY;
+
+ strlcpy(voc->output, voc->outputs[index], sizeof(voc->output));
+ voc->out_id = index;
+ voc->display = avi_segment_find(voc->output);
+
+ return 0;
+}
+
+static int avi_voc_g_output(struct file *file, void *priv, unsigned int *index)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ /* Current output segment cant be find so it no longer exists */
+ if (voc->out_id == -1)
+ return -EINVAL;
+
+ *index = voc->out_id;
+ return 0;
+}
+
+static int avi_voc_g_parm(struct file *file,
+ void *vfh,
+ struct v4l2_streamparm *parms)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct v4l2_outputparm *p = &parms->parm.output;
+ unsigned g;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ p->capability = V4L2_CAP_TIMEPERFRAME;
+ p->timeperframe.numerator = atomic_read(&voc->period_us);
+ p->timeperframe.denominator = USEC_PER_SEC;
+
+ /* I could return there but I prefer to reduce the fraction first */
+ g = gcd(p->timeperframe.numerator, p->timeperframe.denominator);
+
+ p->timeperframe.numerator /= g;
+ p->timeperframe.denominator /= g;
+
+ return 0;
+}
+
+
+static int avi_voc_s_parm(struct file *file,
+ void *vfh,
+ struct v4l2_streamparm *parms)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct v4l2_outputparm *p = &parms->parm.output;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Protect against division by 0. */
+ if (p->timeperframe.denominator == 0)
+ p->timeperframe.denominator = 1;
+
+ if (p->timeperframe.numerator == 0)
+ p->timeperframe.numerator = 25;
+
+ /* Would overflow */
+ if (p->timeperframe.numerator > (INT_MAX / USEC_PER_SEC))
+ p->timeperframe.numerator = (INT_MAX / USEC_PER_SEC);
+
+ /* Compute the value of the fraction in us, rounded to the nearest */
+ atomic_set(&voc->period_us,
+ ((USEC_PER_SEC * p->timeperframe.numerator) +
+ (p->timeperframe.denominator / 2)) /
+ p->timeperframe.denominator);
+
+ /* Update parms in case of rounding */
+ return avi_voc_g_parm(file, vfh, parms);
+}
+
+static int avi_voc_enum_fmt_vid_out(struct file *file,
+ void *vfh,
+ struct v4l2_fmtdesc *fmt)
+{
+ return avi_v4l2_enum_fmt(fmt);
+}
+
+static int avi_voc_try_fmt_vid_out(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ int ret;
+
+ ret = avi_v4l2_try_fmt(f->type, &f->fmt.pix);
+ if (ret) {
+ dprintk(voc, "avi_v4l2_try_fmt failed\n");
+ return ret;
+ }
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED) {
+ /* We don't support the other interlacing formats yet */
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ }
+
+ return 0;
+}
+
+#define AVI_VOC_DISPLAY_V4L2_FORMAT(_voc, _f) do { \
+ switch ((_f)->type) { \
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: \
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: \
+ dprintk((_voc), \
+ "VIDEO_CAPTURE/OUTPUT: width: %d, height: %d, " \
+ "pixelformat: " FOURCC_FORMAT ", field: %d, " \
+ "bytesperline: %d, sizeimage: %d, " \
+ "colorspace: %d, " \
+ "priv: %p\n", \
+ (_f)->fmt.pix.width, \
+ (_f)->fmt.pix.height, \
+ FOURCC_SHOW((_f)->fmt.pix.pixelformat), \
+ (_f)->fmt.pix.field, \
+ (_f)->fmt.pix.bytesperline, \
+ (_f)->fmt.pix.sizeimage, \
+ (_f)->fmt.pix.colorspace, \
+ (void*)(_f)->fmt.pix.priv); \
+ break; \
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: \
+ dprintk((_voc), "VIDEO_CAPTURE_MPLANE\n"); \
+ break; \
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: \
+ dprintk((_voc), \
+ "VIDEO_OVERLAY: left: %d, top: %d, " \
+ "width: %d, height: %d, field: %d, " \
+ "chromakey: %08x, clips: %pK, clipcount: %u " \
+ "bitmap: %p, global_alpha: %d\n", \
+ (_f)->fmt.win.w.left, \
+ (_f)->fmt.win.w.top, \
+ (_f)->fmt.win.w.width, \
+ (_f)->fmt.win.w.height, \
+ (_f)->fmt.win.field, \
+ (_f)->fmt.win.chromakey, \
+ (_f)->fmt.win.clips, \
+ (_f)->fmt.win.clipcount, \
+ (_f)->fmt.win.bitmap, \
+ (_f)->fmt.win.global_alpha); \
+ break; \
+ case V4L2_BUF_TYPE_VBI_CAPTURE: \
+ dprintk((_voc), "VBI_CAPTURE\n"); \
+ break; \
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: \
+ dprintk((_voc), "SLICED_VBI_CAPTURE\n"); \
+ break; \
+ default: \
+ dprintk((_voc), "invalid format type: %d\n", \
+ (_f)->type); \
+ }; \
+} while (0)
+
+static int avi_voc_s_fmt_vid_out_unlocked(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct avi_segment_format display_fmt;
+ int ret;
+
+ AVI_VOC_DISPLAY_V4L2_FORMAT(voc, f);
+
+ if (vb2_is_streaming(&voc->vb2_vidq))
+ /* Can't change format while streaming */
+ return -EBUSY;
+
+ ret = avi_voc_try_fmt_vid_out(file, vfh, f);
+ if (ret)
+ return ret;
+
+ voc->pix = f->fmt.pix;
+
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ /* Reset overlay */
+ voc->win.w.left = 0;
+ voc->win.w.top = 0;
+ voc->win.global_alpha = 0xff;
+ voc->win.field = V4L2_FIELD_NONE;
+
+ voc->crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ voc->crop.c.left = 0;
+ voc->crop.c.top = 0;
+
+ if (voc->no_scaler) {
+ /* Without scaler we cant display more than the output size */
+ voc->win.w.width = min(voc->pix.width, display_fmt.width);
+ voc->win.w.height = min(voc->pix.height, display_fmt.height);
+ /* crop and overlay have to be equals */
+ voc->crop.c.width = voc->win.w.width;
+ voc->crop.c.height = voc->win.w.height;
+ } else {
+ voc->crop.c.width = display_fmt.width;
+ voc->crop.c.height = display_fmt.height;
+ voc->win.w.width = voc->pix.width;
+ voc->win.w.height = voc->pix.height;
+ }
+
+ return ret;
+}
+
+static int avi_voc_s_fmt_vid_out(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ ret = avi_voc_s_fmt_vid_out_unlocked(file, vfh, f);
+
+ mutex_unlock(&voc->lock);
+
+ return ret;
+};
+
+static int avi_voc_g_fmt_vid_out(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ mutex_lock(&voc->lock);
+ f->fmt.pix = voc->pix;
+ mutex_unlock(&voc->lock);
+
+ return 0;
+}
+
+static int avi_voc_try_fmt_vid_overlay_unlocked(struct avi_voc *voc,
+ struct v4l2_format *f)
+{
+ struct v4l2_window *win = &f->fmt.win;
+ struct avi_dma_pixfmt pixfmt;
+
+ pixfmt = avi_v4l2_to_avi_pixfmt(voc->pix.pixelformat);
+ if (pixfmt.id == AVI_INVALID_FMTID)
+ return -EINVAL;
+
+ avi_limit_adjust_w_strip(pixfmt, voc->pix.width,
+ &win->w.width, &win->w.left);
+ avi_limit_adjust_h_strip(pixfmt, voc->pix.height,
+ &win->w.height, &win->w.top);
+
+ win->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int avi_voc_try_fmt_vid_overlay(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ ret = avi_voc_try_fmt_vid_overlay_unlocked(voc, f);
+
+ mutex_unlock(&voc->lock);
+
+ return ret;
+}
+
+
+static int avi_voc_s_fmt_vid_overlay(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct v4l2_window old_win;
+ struct v4l2_crop old_crop;
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ AVI_VOC_DISPLAY_V4L2_FORMAT(voc, f);
+
+ ret = avi_voc_try_fmt_vid_overlay_unlocked(voc, f);
+ if (ret)
+ goto unlock;
+
+ spin_lock_irq(&voc->vbuf_lock);
+
+ old_win = voc->win;
+ voc->win = f->fmt.win;
+
+ /* Without scaler we have to keep overlay and crop sizes equals */
+ if (voc->no_scaler) {
+ struct avi_segment_format display_fmt;
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ voc->win.w.width = min((unsigned) voc->win.w.width,
+ display_fmt.width);
+ voc->win.w.height = min((unsigned) voc->win.w.height,
+ display_fmt.height);
+
+ old_crop = voc->crop;
+ voc->crop.c.width = voc->win.w.width;
+ voc->crop.c.height = voc->win.w.height;
+ }
+
+ if (vb2_is_streaming(&voc->vb2_vidq)) {
+ ret = avi_voc_update_segment_layout(voc);
+ if (ret) {
+ voc->win = old_win;
+ if (voc->no_scaler)
+ voc->crop = old_crop;
+ goto unlock_spin;
+ }
+ }
+
+ ret = 0;
+
+unlock_spin:
+ spin_unlock_irq(&voc->vbuf_lock);
+unlock:
+ mutex_unlock(&voc->lock);
+
+ return ret;
+}
+
+static int avi_voc_g_fmt_vid_overlay(struct file *file,
+ void *vfh,
+ struct v4l2_format *f)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ mutex_lock(&voc->lock);
+
+ f->fmt.win = voc->win;
+
+ mutex_unlock(&voc->lock);
+
+ return 0;
+}
+
+static int avi_voc_cropcap_unlocked(struct avi_voc *voc,
+ struct v4l2_cropcap *cap)
+{
+ struct avi_segment_format display_fmt;
+
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ cap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ cap->bounds.top = 0;
+ cap->bounds.left = 0;
+ cap->bounds.width = display_fmt.width;
+ cap->bounds.height = display_fmt.height;
+
+ cap->defrect = cap->bounds;
+
+ cap->pixelaspect.numerator = 1;
+ cap->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int avi_voc_cropcap(struct file *file,
+ void *vfh,
+ struct v4l2_cropcap *cap)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ ret = avi_voc_cropcap_unlocked(voc, cap);
+
+ mutex_unlock(&voc->lock);
+
+ return 0;
+}
+
+static int avi_voc_g_crop(struct file *file,
+ void *vfh,
+ struct v4l2_crop *crop)
+{
+ struct avi_voc *voc = video_drvdata(file);
+
+ mutex_lock(&voc->lock);
+
+ *crop = voc->crop;
+
+ mutex_unlock(&voc->lock);
+
+ return 0;
+}
+
+static int avi_voc_s_crop(struct file *file,
+ void *vfh,
+ struct v4l2_crop *crop)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct v4l2_cropcap ccap;
+ struct v4l2_crop old_crop;
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ dprintk(voc,
+ "type: %d, left: %d, top: %d, width: %d, height: %d\n",
+ crop->type,
+ crop->c.left,
+ crop->c.top,
+ crop->c.width,
+ crop->c.height);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* When no scaler is used we cant crop. Instead of returning an error
+ * we adjust configuration with no cropping. */
+ if (voc->no_scaler) {
+ struct avi_segment_format display_fmt;
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ voc->crop = *crop;
+ /* S_CROP is an write only so we dont modify parameter values */
+ voc->crop.c.width = voc->win.w.width;
+ voc->crop.c.height = voc->win.w.height;
+ /* Left and top are adjusted to avoid part of the image to be out of display.
+ * Such configuration can be achieved by setting overlay window first. */
+ voc->crop.c.left = min((unsigned) voc->crop.c.left,
+ display_fmt.width - voc->crop.c.width);
+ voc->crop.c.top = min((unsigned)voc->crop.c.top,
+ display_fmt.height - voc->crop.c.height);
+ if (voc->crop.c.left < 0)
+ voc->crop.c.left = 0;
+ if (voc->crop.c.top < 0)
+ voc->crop.c.top = 0;
+
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = avi_voc_cropcap_unlocked(voc, &ccap);
+ if (ret)
+ goto unlock;
+
+ avi_v4l2_crop_adjust(&crop->c, &ccap.bounds);
+
+ if (crop->c.width == 0 || crop->c.height == 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ spin_lock_irq(&voc->vbuf_lock);
+
+ old_crop = voc->crop;
+ voc->crop = *crop;
+
+ if (vb2_is_streaming(&voc->vb2_vidq)) {
+ ret = avi_voc_update_segment_layout(voc);
+ if (ret) {
+ voc->crop = old_crop;
+ goto unlock_spin;
+ }
+ }
+
+ ret = 0;
+
+unlock_spin:
+ spin_unlock_irq(&voc->vbuf_lock);
+unlock:
+ mutex_unlock(&voc->lock);
+ return ret;
+}
+
+static inline void timeval_add_usec(struct timeval* to, unsigned int usecs)
+{
+ to->tv_usec += usecs;
+ if (to->tv_usec >= USEC_PER_SEC) {
+ to->tv_usec -= USEC_PER_SEC;
+ to->tv_sec++;
+ }
+}
+
+static inline int timeval_isnull(const struct timeval *tv)
+{
+ return tv->tv_sec == 0 && tv->tv_usec == 0;
+}
+
+/* Must be called with voc->vbuf_lock held */
+static inline void avi_voc_handle_expired_frame(struct avi_voc *voc)
+{
+// XXX : is it usefull with videobuf2 ?
+#if 1
+ if (voc->expired_frame) {
+ /* This frame is no longer displayed, we can safely send it back
+ * to the user. */
+ vb2_buffer_done(&voc->expired_frame->vb, VB2_BUF_STATE_DONE);
+ voc->expired_frame = NULL;
+ }
+#endif
+}
+
+/* Main interrupt while streaming */
+static unsigned avi_voc_interrupt(struct avi_segment *s, enum avi_field* f)
+{
+ struct avi_voc *voc = s->priv;
+ struct avi_voc_vbuf *candidate;
+ struct avi_voc_vbuf *next = NULL;
+ struct timeval display_date;
+ struct timeval target_date;
+
+ v4l2_get_timestamp(&display_date);
+
+ /* The frame will be displayed at the next interrupt, not right now */
+ timeval_add_usec(&display_date, voc->display->period_us);
+
+ target_date = display_date;
+
+ /* We put ourselves at the centre of the frame period, we want to
+ * display the frame the closest in time (in the past or future). */
+ timeval_add_usec(&target_date, atomic_read(&voc->period_us) / 2);
+
+ spin_lock(&voc->vbuf_lock);
+
+ avi_voc_handle_expired_frame(voc);
+
+ /* Look for a new frame to display. The algorithm is to iterate through
+ * the bufferqueue until we reach the end or a frame that's not supposed
+ * to be displayed yet. We then take the one before that (if any). This
+ * takes care of dropping frames if we're late (we take the most
+ * up-to-date frame). Of course this assumes that the buffers are queued
+ * in order. */
+ while (!list_empty(&voc->bufqueue)) {
+
+ candidate = list_first_entry(&voc->bufqueue,
+ struct avi_voc_vbuf,
+ list);
+
+ if (!timeval_isnull(&candidate->vb.v4l2_buf.timestamp) &&
+ timeval_compare(&candidate->vb.v4l2_buf.timestamp,
+ &target_date) > 0)
+ /* Too early to display this frame. */
+ break;
+
+ /* This frame is ready to be displayed */
+ if (next) {
+ /* we have to drop the previous candidate, we're running
+ * late! */
+ vb2_buffer_done(&next->vb, VB2_BUF_STATE_DONE);
+ }
+
+ next = candidate;
+
+ list_del(&next->list);
+ next->in_queue = 0;
+ voc->frame_count++;
+
+ next->vb.v4l2_buf.sequence = voc->frame_count;
+ /* I wonder if I should set this when we skip the frame? */
+ next->vb.v4l2_buf.timestamp = display_date;
+ }
+
+ if (next) {
+ /* The currently displayed frame (if any) will be released at
+ * the next interrupt */
+ voc->expired_frame = voc->displayed_frame;
+ voc->displayed_frame = next;
+ }
+
+ if (next)
+ avi_voc_display(voc, next);
+
+ spin_unlock(&voc->vbuf_lock);
+
+ return 0;
+}
+
+static unsigned avi_voc_stopped(struct avi_segment *s, enum avi_field* f)
+{
+ struct avi_voc *voc = s->priv;
+
+ spin_lock(&voc->vbuf_lock);
+
+ avi_voc_handle_expired_frame(voc);
+
+ spin_unlock(&voc->vbuf_lock);
+
+ avi_segment_register_irq(voc->segment, NULL);
+
+ /* Streamoff sequence is complete */
+ wake_up_interruptible(&voc->waitq);
+
+ return 0;
+}
+
+/* IRQ handler called when we want to shutdown the stream */
+static unsigned avi_voc_finish(struct avi_segment *s, enum avi_field* f)
+{
+ struct avi_voc *voc = s->priv;
+
+ spin_lock(&voc->vbuf_lock);
+
+ avi_voc_handle_expired_frame(voc);
+
+ spin_unlock(&voc->vbuf_lock);
+
+ avi_segment_hide(voc->segment);
+ avi_segment_deactivate(voc->segment);
+
+ voc->expired_frame = voc->displayed_frame;
+ voc->displayed_frame = NULL;
+
+ /* At the next interrupt we're sure we're no longer in use */
+ avi_segment_register_irq(voc->segment, &avi_voc_stopped);
+
+ return 0;
+}
+
+static void avi_voc_cleanup_leftovers(struct avi_voc *voc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&voc->vbuf_lock, flags);
+
+ /* Make sure left no frame lying around */
+ avi_voc_handle_expired_frame(voc);
+
+ voc->expired_frame = voc->displayed_frame;
+ voc->displayed_frame = NULL;
+
+ avi_voc_handle_expired_frame(voc);
+
+ voc->expired_frame = NULL;
+
+ spin_unlock_irqrestore(&voc->vbuf_lock, flags);
+}
+
+static struct v4l2_ioctl_ops const avi_voc_ioctl_ops = {
+ .vidioc_querycap = &avi_voc_querycap,
+ .vidioc_enum_output = &avi_voc_enum_output,
+ .vidioc_s_output = &avi_voc_s_output,
+ .vidioc_g_output = &avi_voc_g_output,
+ .vidioc_g_parm = &avi_voc_g_parm,
+ .vidioc_s_parm = &avi_voc_s_parm,
+ .vidioc_enum_fmt_vid_out = &avi_voc_enum_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = &avi_voc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = &avi_voc_s_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = &avi_voc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_overlay = &avi_voc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = &avi_voc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = &avi_voc_g_fmt_vid_overlay,
+ .vidioc_cropcap = &avi_voc_cropcap,
+ .vidioc_g_crop = &avi_voc_g_crop,
+ .vidioc_s_crop = &avi_voc_s_crop,
+ .vidioc_reqbufs = &vb2_ioctl_reqbufs,
+ .vidioc_querybuf = &vb2_ioctl_querybuf,
+ .vidioc_qbuf = &vb2_ioctl_qbuf,
+ .vidioc_dqbuf = &vb2_ioctl_dqbuf,
+ .vidioc_expbuf = &vb2_ioctl_expbuf,
+ .vidioc_streamon = &vb2_ioctl_streamon,
+ .vidioc_streamoff = &vb2_ioctl_streamoff,
+};
+
+/*******************************************************************************
+ * VIDEOBUF2 OPERATIONS
+ ******************************************************************************/
+
+static int avi_voc_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct avi_voc *voc = vb2_get_drv_priv(vq);
+ //unsigned long size;
+
+ // XXX : fix to update size
+ struct avi_segment_format dma_fmt;
+ unsigned plane0_size;
+ unsigned plane1_size;
+ int ret;
+ ret = avi_v4l2_to_segment_fmt(&voc->pix,
+ &voc->win.w,
+ &dma_fmt,
+ &plane0_size,
+ &plane1_size);
+ if (ret)
+ return ret;
+
+ sizes[0] = plane0_size + plane1_size;
+
+ if (*nbuffers < AVI_VOC_MIN_BUFFERS)
+ *nbuffers = AVI_VOC_MIN_BUFFERS;
+
+ // XXX : it would be great to have such a mecanism in avi_voc to adapt
+ // the number of buffers
+// while (size * *nbuffers > vid_limit * 4096 * 4096)
+// (*nbuffers)--;
+
+ *nplanes = 1;
+
+ alloc_ctxs[0] = vb2_dma_contig_init_ctx(voc->dev);
+
+ INIT_LIST_HEAD(&voc->bufqueue);
+
+ return 0;
+}
+
+/* This function is called with avicam->vbq_lock held (no need to protect
+ * ourselves when we play with the dmaqueue) */
+static void avi_voc_buffer_queue(struct vb2_buffer *vb)
+{
+ int ret;
+ struct avi_voc *voc = vb2_get_drv_priv(vb->vb2_queue);
+ struct avi_voc_vbuf *buf = container_of(vb, struct avi_voc_vbuf, vb);
+
+ unsigned long flags;
+
+ if (vb->acquire_fence) {
+ ret = sync_fence_wait(vb->acquire_fence, 100);
+ if(ret < 0){
+ dprintk(voc,"Acquire fence timed out (100ms)");
+ }
+ sync_fence_put(vb->acquire_fence);
+ vb->acquire_fence = NULL;
+ }
+
+ spin_lock_irqsave(&voc->vbuf_lock, flags);
+
+ list_add_tail(&buf->list, &voc->bufqueue);
+ buf->in_queue = 1;
+ spin_unlock_irqrestore(&voc->vbuf_lock, flags);
+}
+
+static void avi_voc_buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(vb->vb2_queue);
+ struct avi_voc_vbuf *buf = container_of(vb, struct avi_voc_vbuf, vb);
+ unsigned long flags;
+ spin_lock_irqsave(&voc->vbuf_lock, flags);
+ if(buf->in_queue)
+ list_del(&buf->list);
+ buf->in_queue = 0;
+ spin_unlock_irqrestore(&voc->vbuf_lock, flags);
+}
+
+static int avi_voc_buffer_init(struct vb2_buffer *vb)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(vb->vb2_queue);
+ struct avi_voc_vbuf *buf = container_of(vb, struct avi_voc_vbuf, vb);
+ unsigned long flags;
+ spin_lock_irqsave(&voc->vbuf_lock, flags);
+ INIT_LIST_HEAD(&buf->list);
+ buf->in_queue = 0;
+ spin_unlock_irqrestore(&voc->vbuf_lock, flags);
+ return 0;
+}
+int avi_voc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(q);
+ struct avi_segment_format display_fmt;
+ enum avi_colorspace in_cspace;
+ struct avi_dma_pixfmt in_pixfmt;
+ unsigned long caps = AVI_CAP_DMA_IN;
+ int ret;
+
+ mutex_lock(&voc->lock);
+
+ voc->frame_count = 0;
+
+ if (!voc->no_scaler)
+ caps |= AVI_CAP_SCAL | AVI_CAP_BUFFER;
+
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ in_cspace = avi_v4l2_to_avi_cspace(voc->pix.colorspace);
+
+ if (in_cspace != display_fmt.colorspace)
+ caps |= AVI_CAP_CONV;
+
+ in_pixfmt = avi_v4l2_to_avi_pixfmt(voc->pix.pixelformat);
+ if (avi_pixfmt_is_planar(in_pixfmt))
+ caps |= AVI_CAP_PLANAR;
+
+ voc->segment = avi_segment_build(&caps,
+ "voc",
+ avi_voc_instance(voc),
+ -1,
+ voc->dev);
+ if (IS_ERR(voc->segment)) {
+ ret = PTR_ERR(voc->segment);
+ dev_err(voc->dev, "failed to build overlay segment caps=%lx\n",
+ caps);
+ goto no_segment;
+ }
+
+ voc->segment->priv = voc;
+
+ /* Configure the segment */
+ ret = avi_voc_update_segment_layout(voc);
+ if (ret)
+ goto bad_segment_fmt;
+
+ /* Wait until a frame is configured to actually display the voc */
+ avi_segment_hide(voc->segment);
+
+ ret = avi_segment_activate(voc->segment);
+ if (ret)
+ goto segment_activate_failed;
+
+ ret = avi_segment_connect(voc->segment, voc->display, voc->zorder);
+ if (ret) {
+ dev_err(voc->dev,
+ "couldn't connect voc and output segments (%s)\n",
+ voc->display->id);
+ goto connect_failed;
+ }
+
+ avi_segment_register_irq(voc->segment, &avi_voc_interrupt);
+
+ goto unlock;
+
+ connect_failed:
+ avi_segment_deactivate(voc->segment);
+ segment_activate_failed:
+ bad_segment_fmt:
+ avi_segment_teardown(voc->segment);
+ no_segment:
+ voc->segment = NULL;
+ voc->plane0_size = 0;
+ voc->plane1_size = 0;
+unlock:
+ mutex_unlock(&voc->lock);
+
+ return ret;
+}
+
+static int avi_voc_is_streaming_ended(struct avi_voc *voc)
+{
+ return (voc->expired_frame == NULL && voc->displayed_frame == NULL);
+}
+
+static int avi_voc_stop_streaming(struct vb2_queue *q)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(q);
+
+ mutex_lock(&voc->lock);
+
+ avi_segment_register_irq(voc->segment, &avi_voc_finish);
+
+ /* I use an arbitrary timeout value of 1 second. I don't check for
+ * errors, if for some reason the sequence does not finish the worst
+ * that can happen is one or two gargabe frames being displayed (and if
+ * the sequence does not finish in all likelyhood it means that the
+ * display stopped) */
+ wait_event_interruptible_timeout(voc->waitq,
+ avi_voc_is_streaming_ended(voc),
+ HZ);
+
+ avi_segment_register_irq(voc->segment, NULL);
+ voc->hw_streaming = 0;
+ avi_segment_deactivate(voc->segment);
+
+ /* In case the streamoff sequence didn't complete properly, make sure
+ * no frame was left lying around */
+ avi_voc_cleanup_leftovers(voc);
+
+ avi_segment_disconnect(voc->segment, voc->display);
+ avi_segment_teardown(voc->segment);
+ voc->segment = NULL;
+
+ voc->plane0_size = 0;
+ voc->plane1_size = 0;
+
+ mutex_unlock(&voc->lock);
+
+ return 0;
+}
+
+static void avi_voc_lock(struct vb2_queue *q)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(q);
+ mutex_lock(&voc->vb2_lock);
+}
+
+static void avi_voc_unlock(struct vb2_queue *q)
+{
+ struct avi_voc *voc = vb2_get_drv_priv(q);
+ mutex_unlock(&voc->vb2_lock);
+}
+
+static struct vb2_ops const avi_voc_vqueue_ops = {
+ .queue_setup = &avi_voc_queue_setup,
+ .buf_queue = &avi_voc_buffer_queue,
+ .stop_streaming = &avi_voc_stop_streaming,
+ .start_streaming = &avi_voc_start_streaming,
+ .wait_prepare = &avi_voc_unlock,
+ .wait_finish = &avi_voc_lock,
+ .buf_cleanup = &avi_voc_buffer_cleanup,
+ .buf_init = &avi_voc_buffer_init,
+};
+
+/*******************************************************************************
+ * V4L2 FILE OPERATIONS
+ ******************************************************************************/
+
+static int avi_voc_open(struct file *file)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ struct avi_voc_fh *fh;
+ struct avi_segment_format display_fmt;
+ struct v4l2_format v4l2_fmt = {0};
+ int ret;
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh) {
+ ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ v4l2_fh_init(&fh->vfh, voc->vdev);
+ v4l2_fh_add(&fh->vfh);
+ file->private_data = &fh->vfh;
+
+ voc->user_count++;
+
+ /* With multi open we dont need to go further if VOC is already
+ * initialized */
+ if (voc->user_count > 1)
+ return 0;
+
+ mutex_lock(&voc->lock);
+
+ /* Fresh open, let's reinitialize everything to sane values */
+
+ init_waitqueue_head(&voc->waitq);
+
+ voc->display = avi_segment_find(voc->output);
+ if (!voc->display) {
+ dev_err(voc->dev,
+ "Couldn't get display segment \"%s\"",
+ voc->output);
+ ret = -ENODEV;
+ goto no_display;
+ }
+
+ /* Use sane value for default format */
+ avi_segment_get_input_format(voc->display, &display_fmt);
+
+ v4l2_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ v4l2_fmt.fmt.pix.width = display_fmt.width;
+ v4l2_fmt.fmt.pix.height = display_fmt.height;
+ v4l2_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
+ v4l2_fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ v4l2_fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+
+ ret = avi_voc_s_fmt_vid_out_unlocked(file, &fh->vfh, &v4l2_fmt);
+ if (ret)
+ goto bad_fmt;
+
+ /* Set the default framerate to that of the display. I'm not sure if
+ * that's the right thing to do but it seems that the v4l2 compliance
+ * tool complains if I set the framerate (G_PARM) to 0 */
+ atomic_set(&voc->period_us, voc->display->period_us);
+
+ voc->hw_streaming = 0;
+ voc->hide = 0;
+ avi_voc_ctrl_unhide(voc);
+
+ ret = 0;
+ goto unlock;
+
+ bad_fmt:
+ if (voc->user_count == 1)
+ voc->display = NULL;
+ no_display:
+ v4l2_fh_del(&fh->vfh);
+ file->private_data = NULL;
+ kfree(fh);
+ voc->user_count--;
+ unlock:
+ mutex_unlock(&voc->lock);
+ no_mem:
+
+ return ret;
+}
+
+static int avi_voc_release(struct file *file)
+{
+ struct avi_voc *voc = video_drvdata(file);
+ int ret;
+
+ ret = vb2_fop_release(file);
+
+ voc->user_count--;
+ if (voc->user_count == 0)
+ voc->display = NULL;
+
+ return ret;
+}
+
+static struct v4l2_file_operations const avi_voc_fops = {
+ .owner = THIS_MODULE,
+ .open = &avi_voc_open,
+ .release = &avi_voc_release,
+ .mmap = &vb2_fop_mmap,
+ .poll = &vb2_fop_poll,
+ .unlocked_ioctl = &video_ioctl2,
+};
+
+/*******************************************************************************
+ * SYSFS INTERFACE
+ ******************************************************************************/
+
+static ssize_t avi_voc_show_zorder(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voc->zorder);
+}
+
+static ssize_t avi_voc_store_zorder(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+ long zorder;
+ int status;
+
+ status = strict_strtol(buf, 0, &zorder);
+ if (status < 0)
+ return -EINVAL;
+
+ /* We dont change zorder of an already connected segment */
+ if(voc->segment && avi_segment_connected(voc->segment))
+ return -EBUSY;
+
+
+ voc->zorder = zorder;
+
+ return count;
+}
+
+static ssize_t avi_voc_show_frame_count(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", voc->frame_count);
+}
+
+static ssize_t avi_voc_show_no_scaler(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voc->no_scaler);
+}
+
+static ssize_t avi_voc_store_no_scaler(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+ long no_scaler;
+ int status;
+ int ret;
+
+ status = strict_strtol(buf, 0, &no_scaler);
+ if (status < 0)
+ return -EINVAL;
+
+ mutex_lock(&voc->lock);
+ /* We cant remove scaler is segment is already built */
+ if (voc->segment) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = count;
+ voc->no_scaler = no_scaler;
+unlock:
+ mutex_unlock(&voc->lock);
+
+ return ret;
+}
+
+static struct device_attribute avi_voc_sysfs_attrs[] = {
+ __ATTR(zorder, S_IRUGO | S_IWUSR,
+ avi_voc_show_zorder, avi_voc_store_zorder),
+ __ATTR(frame_count, S_IRUGO,
+ avi_voc_show_frame_count, NULL),
+ __ATTR(no_scaler, S_IRUGO | S_IWUSR,
+ avi_voc_show_no_scaler, avi_voc_store_no_scaler),
+};
+
+/*******************************************************************************
+ * INIT / REMOVE
+ ******************************************************************************/
+
+static int avi_voc_v4l2_init(struct avi_voc *voc)
+{
+ struct video_device *vdev;
+ int i;
+ int ret;
+
+ spin_lock_init(&voc->vbuf_lock);
+ mutex_init(&voc->lock);
+
+ voc->v4l2_dev = avi_v4l2_get_device();
+ if (!voc->v4l2_dev) {
+ ret = -ENODEV;
+ goto no_v4l2_dev;
+ }
+
+ memset(&voc->vb2_vidq, 0, sizeof(voc->vb2_vidq));
+ voc->vb2_vidq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ voc->vb2_vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ voc->vb2_vidq.drv_priv = voc;
+ voc->vb2_vidq.buf_struct_size = sizeof(struct avi_voc_vbuf);
+ voc->vb2_vidq.ops = &avi_voc_vqueue_ops;
+ voc->vb2_vidq.mem_ops = &vb2_dma_contig_memops;
+ ret = vb2_queue_init(&voc->vb2_vidq);
+ if (ret)
+ goto vb2_queue_init_failed;
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto vdev_alloc_failed;
+ }
+
+ voc->vdev = vdev;
+ mutex_init(&voc->vb2_lock);
+
+ strlcpy(vdev->name, dev_name(voc->dev), sizeof(vdev->name));
+
+ vdev->parent = voc->dev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->fops = &avi_voc_fops;
+ vdev->ioctl_ops = &avi_voc_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ /* We handle the locking ourselves */
+ vdev->lock = &voc->vb2_lock;
+ vdev->queue = &voc->vb2_vidq;
+
+ video_set_drvdata(vdev, voc);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto video_register_failed;
+
+ if (avi_voc_ctrl_create(voc))
+ goto ctrl_failed;
+ v4l2_ctrl_handler_setup(&voc->ctrl_handler);
+
+ for (i = 0; i < ARRAY_SIZE(avi_voc_sysfs_attrs); i++) {
+ ret = device_create_file(&voc->vdev->dev,
+ &avi_voc_sysfs_attrs[i]);
+ if (ret) {
+ while (i--) {
+ device_remove_file(&voc->vdev->dev,
+ &avi_voc_sysfs_attrs[i]);
+ goto sysfs_failed;
+ }
+ }
+ }
+
+ return 0;
+
+ sysfs_failed:
+ video_unregister_device(voc->vdev);
+ ctrl_failed:
+ avi_voc_ctrl_free(voc);
+ video_register_failed:
+ video_device_release(voc->vdev);
+ vdev_alloc_failed:
+ vb2_queue_init_failed:
+ avi_v4l2_put_device(voc->v4l2_dev);
+ no_v4l2_dev:
+ return ret;
+}
+
+static void __devexit avi_voc_v4l2_destroy(struct avi_voc *voc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avi_voc_sysfs_attrs); i++)
+ device_remove_file(&voc->vdev->dev, &avi_voc_sysfs_attrs[i]);
+ avi_voc_ctrl_free(voc);
+ video_unregister_device(voc->vdev);
+ video_device_release(voc->vdev);
+ avi_v4l2_put_device(voc->v4l2_dev);
+}
+
+static int __devinit avi_voc_probe(struct platform_device* pdev)
+{
+ struct avi_voc *voc;
+ const struct avi_voc_plat_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "cant get platform data\n");
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ voc = kzalloc(sizeof(*voc), GFP_KERNEL);
+ if (!voc) {
+ dev_err(&pdev->dev, "failed to allocate voc struct\n");
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ voc->dev = &pdev->dev;
+ strlcpy(voc->output, pdata->display, sizeof(voc->output));
+
+ ret = avi_voc_v4l2_init(voc);
+ if (ret)
+ goto v4l2_init_failed;
+
+ dev_info(&pdev->dev,
+ "V4L2 device successfuly registered as %s\n",
+ video_device_node_name(voc->vdev));
+
+ dev_set_drvdata(&pdev->dev, voc);
+ platform_set_drvdata(pdev, voc);
+
+ return 0;
+
+ v4l2_init_failed:
+ kfree(voc);
+ alloc_failed:
+ no_pdata:
+ return ret;
+}
+
+static int __devexit avi_voc_remove(struct platform_device* pdev)
+{
+ struct avi_voc *voc = platform_get_drvdata(pdev);
+
+ if (!voc)
+ return -ENODEV;
+
+ avi_voc_v4l2_destroy(voc);
+ kfree(voc);
+
+ platform_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static int avi_voc_suspend(struct device *dev)
+{
+ dev_info(dev, "voc shutting down\n");
+
+ return 0;
+}
+
+static int avi_voc_resume(struct device *dev)
+{
+ struct avi_voc *voc = dev_get_drvdata(dev);
+
+ dev_info(dev, "voc resuming\n");
+
+ if (voc->segment) {
+ if (voc->displayed_frame)
+ avi_voc_display(voc, voc->displayed_frame);
+ avi_segment_activate(voc->segment);
+ }
+
+ return 0;
+}
+
+static struct dev_pm_ops avi_voc_dev_pm_ops = {
+ .suspend = &avi_voc_suspend,
+ .resume = &avi_voc_resume,
+};
+
+static struct platform_driver avi_voc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &avi_voc_dev_pm_ops,
+ },
+ .probe = &avi_voc_probe,
+ .remove = __devexit_p(&avi_voc_remove),
+};
+
+static int __init avi_init_voc(void)
+{
+ return platform_driver_register(&avi_voc_driver);
+}
+module_init(avi_init_voc);
+
+static void __exit avi_exit_voc(void)
+{
+ platform_driver_unregister(&avi_voc_driver);
+}
+module_exit(avi_exit_voc);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_AUTHOR("Victor Lambret <victor.lambret.ext@parrot.com>");
+MODULE_DESCRIPTION("Parrot Advanced Video Interface video output overlays");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avi_voc.h b/drivers/parrot/media/video/avi_voc.h
new file mode 100644
index 00000000000000..f4e044783fbe49
--- /dev/null
+++ b/drivers/parrot/media/video/avi_voc.h
@@ -0,0 +1,19 @@
+#ifndef _AVI_VOC_H
+#define _AVI_VOC_H
+
+#include <linux/videodev2.h>
+#include <video/avi_cap.h>
+
+#define V4L2_CID_VOC_BASE (V4L2_CID_USER_BASE | 0x1000)
+#define V4L2_CID_VOC_NO_SCALER (V4L2_CID_VOC_BASE + 0)
+#define V4L2_CID_VOC_ZORDER (V4L2_CID_VOC_BASE + 1)
+#define V4L2_CID_VOC_HIDE (V4L2_CID_VOC_BASE + 2)
+
+/**
+ * struct avi_voc_plat_data - AVI voc specific data
+ */
+struct avi_voc_plat_data {
+ char *display;
+};
+
+#endif /* _AVI_VOC_H */
diff --git a/drivers/parrot/media/video/avi_voc_core.h b/drivers/parrot/media/video/avi_voc_core.h
new file mode 100644
index 00000000000000..5684a79fc817f5
--- /dev/null
+++ b/drivers/parrot/media/video/avi_voc_core.h
@@ -0,0 +1,145 @@
+/*
+ * linux/drivers/parrot/video/avi_voc_core.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 03-Mar-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_VOC_CORE_H_
+#define _AVI_VOC_CORE_H_
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gcd.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-ctrls.h>
+
+#include "avi_v4l2.h"
+#include "video/avi_limit.h"
+#include "avi_voc.h"
+
+#define DRIVER_NAME "avi-voc"
+
+#define AVI_VOC_ENUM_NB 3
+#define AVI_VOC_MIN_BUFFERS 3
+
+#ifdef DEBUG
+#define dprintk(voc, format_, args...) \
+ dev_dbg((voc)->dev, "%s: " format_, __func__, ##args)
+
+#else /* DEBUG */
+#define dprintk(voc_, format_, args...) (void)voc_
+#endif /* DEBUG */
+
+
+struct avi_voc_vbuf {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+ int in_queue;
+};
+
+struct avi_voc {
+ struct device *dev;
+ /* V4L2 video device - keep first for cast purpose ! */
+ struct video_device *vdev;
+
+ struct v4l2_device *v4l2_dev;
+
+ struct avi_segment *display;
+
+ /* This lock protects the bufqueue, the displayed and expired frame
+ * pointers as well as the pix, win and crop configs */
+ spinlock_t vbuf_lock;
+
+
+ /* The overlay format selection is a bit tricky. A picture being worth a
+ * thousand words here's how it works:
+ *
+ * Input frame Display
+ * ---------------------------------- ---------------------------
+ * |.pix.(fmt_vid_out)..............| |~Display~segment~format~~|
+ * |................................| |~~~~~~~~~~~~~~~~~~~~~~~~~|
+ * |................................| |~~~~~~~~~~~~~~~~~~~~~~~~~|
+ * |....________________________....| |~~_______________~~~~~~~~|
+ * |...| win (fmt_vid_overlay) |...| |~| crop (crop) |~~~~~~~|
+ * |...| |...| voc |~| |~~~~~~~|
+ * |...| |============>| |~~~~~~~|
+ * |...|________________________|...| |~| |~~~~~~~|
+ * |................................| |~|_______________|~~~~~~~|
+ * ---------------------------------- |~~~~~~~~~~~~~~~~~~~~~~~~~|
+ * |~~~~~~~~~~~~~~~~~~~~~~~~~|
+ * ---------------------------
+ */
+ /* format of the input frame */
+ struct v4l2_pix_format pix;
+ /* window within the input frame that we want to display */
+ struct v4l2_window win;
+ /* position and size within the output window */
+ struct v4l2_crop crop;
+
+ struct vb2_queue vb2_vidq;
+ struct list_head bufqueue;
+ struct avi_voc_vbuf *displayed_frame;
+ struct avi_voc_vbuf *expired_frame;
+ /* serialization lock */
+ struct mutex lock;
+ /* VB2 callback lock.
+ * XXX: Have to be merge with another lock : not in vivi */
+ struct mutex vb2_lock;
+
+ /* Used when waiting for streamoff */
+ wait_queue_head_t waitq;
+ /* frame period in microsecond. Updated in user context, read in
+ * interrupt context. To save us some locking I made this variable
+ * atomic */
+ atomic_t period_us;
+ unsigned frame_count;
+
+ struct avi_segment *segment;
+ unsigned plane0_size;
+ unsigned plane1_size;
+ int user_count;
+
+ /* Used for outputs enumerations */
+ int out_nb;
+ int out_id;
+ u8 output[32];
+ u8 outputs[2*AVI_VOC_ENUM_NB][32];
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ int no_scaler;
+ int zorder;
+ struct v4l2_ctrl *hide_ctrl;
+ int hide;
+ int hw_streaming;
+};
+
+struct avi_voc_fh {
+ struct v4l2_fh vfh;
+};
+
+#endif
diff --git a/drivers/parrot/media/video/avi_voc_ctrl.c b/drivers/parrot/media/video/avi_voc_ctrl.c
new file mode 100644
index 00000000000000..21eeeef781d5a7
--- /dev/null
+++ b/drivers/parrot/media/video/avi_voc_ctrl.c
@@ -0,0 +1,191 @@
+/*
+ * linux/drivers/parrot/video/avi_voc_ctrl.c
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 20-Oct-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "avi_voc_ctrl.h"
+
+static int avi_voc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct avi_voc *voc = container_of(ctrl->handler,
+ struct avi_voc,
+ ctrl_handler);
+ int rt = 0;
+
+ mutex_lock(&voc->lock);
+
+ dprintk(voc, "id=%x, value=%d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VOC_NO_SCALER:
+ if (voc->segment)
+ rt = -EBUSY;
+ else
+ voc->no_scaler = ctrl->val;
+ break;
+ case V4L2_CID_VOC_ZORDER:
+ if (voc->segment)
+ rt = -EBUSY;
+ else
+ voc->zorder = ctrl->val;
+ break;
+ case V4L2_CID_VOC_HIDE:
+ spin_lock_irq(&voc->vbuf_lock);
+ dprintk(voc, "voc->hide=%d, voc->hw_streaming=%d\n",
+ voc->hide, voc->hw_streaming);
+ voc->hide = ctrl->val;
+ if (voc->hide && voc->hw_streaming)
+ avi_segment_hide(voc->segment);
+ else if (!voc->hide && voc->hw_streaming)
+ avi_segment_unhide(voc->segment);
+ spin_unlock_irq(&voc->vbuf_lock);
+ break;
+ default:
+ dev_err(voc->dev, "Invalid set of control=%08x\n", ctrl->id);
+ rt = -EACCES;
+ break;
+ }
+ mutex_unlock(&voc->lock);
+ return rt;
+}
+
+/* As we can also modify those value with sysfs we have to bypass v4l2 control
+ * cached value by declaring those control volatiles. */
+static int avi_voc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct avi_voc *voc = container_of(ctrl->handler,
+ struct avi_voc,
+ ctrl_handler);
+ int rt = 0;
+
+ mutex_lock(&voc->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VOC_NO_SCALER:
+ ctrl->val = voc->no_scaler;
+ break;
+ case V4L2_CID_VOC_ZORDER:
+ ctrl->val = voc->zorder;
+ break;
+ case V4L2_CID_VOC_HIDE:
+ ctrl->val = voc->hide;
+ break;
+ default:
+ dev_err(voc->dev, "Invalid set of control=%08x\n", ctrl->id);
+ rt = -EACCES;
+ break;
+ }
+
+ dprintk(voc, "id=%x, value=%d\n", ctrl->id, ctrl->val);
+ mutex_unlock(&voc->lock);
+ return rt;
+}
+
+static const struct v4l2_ctrl_ops avi_voc_ctrl_ops = {
+ .s_ctrl = avi_voc_s_ctrl,
+};
+
+static const struct v4l2_ctrl_ops avi_voc_ctrl_ops_volatile = {
+ .g_volatile_ctrl = avi_voc_g_volatile_ctrl,
+ .s_ctrl = avi_voc_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config avi_voc_ctrls[] = {
+ {
+ .ops = &avi_voc_ctrl_ops_volatile,
+ .id = V4L2_CID_VOC_NO_SCALER,
+ .name = "Turn off scaling possibility",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &avi_voc_ctrl_ops_volatile,
+ .id = V4L2_CID_VOC_ZORDER,
+ .name = "blender zorder",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -1,
+ .max = 7,
+ .step = 1,
+ .def = -1,
+ },
+ {
+ .ops = &avi_voc_ctrl_ops,
+ .id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of output buffer",
+ .min = AVI_VOC_MIN_BUFFERS,
+ .max = AVI_VOC_MIN_BUFFERS,
+ .step = 1,
+ .def = AVI_VOC_MIN_BUFFERS,
+ },
+};
+
+static const struct v4l2_ctrl_config avi_voc_ctrl_hide = {
+ .ops = &avi_voc_ctrl_ops,
+ .id = V4L2_CID_VOC_HIDE,
+ .name = "Hide VOC",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+void avi_voc_ctrl_unhide(struct avi_voc *voc)
+{
+ mutex_lock(voc->ctrl_handler.lock);
+ voc->hide_ctrl->val = 0;
+ voc->hide_ctrl->cur.val = 0;
+ mutex_unlock(voc->ctrl_handler.lock);
+}
+
+int avi_voc_ctrl_create(struct avi_voc *voc)
+{
+ int rt;
+ int i;
+ struct v4l2_ctrl *ctrl;
+
+ if ((rt = v4l2_ctrl_handler_init(&voc->ctrl_handler,
+ ARRAY_SIZE(avi_voc_ctrls)+1)))
+ return rt;
+
+ for (i = 0; i < ARRAY_SIZE(avi_voc_ctrls); ++i) {
+ ctrl = v4l2_ctrl_new_custom(&voc->ctrl_handler,
+ &avi_voc_ctrls[i], NULL);
+ if (ctrl != NULL && avi_voc_ctrls[i].ops->g_volatile_ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+
+ voc->hide_ctrl = v4l2_ctrl_new_custom(&voc->ctrl_handler,
+ &avi_voc_ctrl_hide, NULL);
+
+ voc->vdev->ctrl_handler = &voc->ctrl_handler;
+
+ return 0;
+}
+
+void avi_voc_ctrl_free(struct avi_voc *voc)
+{
+ v4l2_ctrl_handler_free(&voc->ctrl_handler);
+}
diff --git a/drivers/parrot/media/video/avi_voc_ctrl.h b/drivers/parrot/media/video/avi_voc_ctrl.h
new file mode 100644
index 00000000000000..4fcaf8e2881e90
--- /dev/null
+++ b/drivers/parrot/media/video/avi_voc_ctrl.h
@@ -0,0 +1,35 @@
+/*
+ * linux/drivers/parrot/video/avi_voc_ctrl.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 20-Oct-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_VOC_CTRLS_H_
+#define _AVI_VOC_CTRLS_H_
+
+#include "avi_voc_core.h"
+
+int avi_voc_ctrl_create(struct avi_voc *voc);
+
+void avi_voc_ctrl_free(struct avi_voc *voc);
+
+void avi_voc_ctrl_unhide(struct avi_voc *voc);
+
+#endif
diff --git a/drivers/parrot/media/video/avicam.h b/drivers/parrot/media/video/avicam.h
new file mode 100644
index 00000000000000..e1470092dc185a
--- /dev/null
+++ b/drivers/parrot/media/video/avicam.h
@@ -0,0 +1,44 @@
+#ifndef _AVICAM_H_
+#define _AVICAM_H_
+
+#include <linux/i2c.h>
+#include <media/videobuf2-core.h>
+
+#include "avi_v4l2.h"
+#include "avi_capture_timings.h"
+
+enum avicam_bus_width {
+ AVICAM_BUS_WIDTH_8,
+ AVICAM_BUS_WIDTH_16,
+};
+
+/* this name should correspond to the module file name,
+ not to the module.driver.name as declared in the driver code */
+#define AVICAM_DUMMY_NAME "avicam_dummy_dev"
+
+struct avicam_subdevs {
+ struct i2c_board_info *board_info;
+ int i2c_adapter_id;
+ struct avicam_subdevs *subdevs;/*subdevices connected to a video input of the current subdev */
+};
+
+struct avicam_platform_data {
+ /* We need at least one camera */
+ unsigned long cam_cap;
+
+ /* True if we want to create a video device for stat capture (only
+ * available for raw format capture)*/
+ int enable_stats;
+
+ /* Custom measure-to-timings conversion function, if necessary */
+ avi_capture_get_timings measure_to_timings;
+
+ union avi_cam_interface interface;
+ unsigned int bus_config;
+ char bus_width;
+ struct avicam_subdevs *subdevs;
+ struct avicam_dummy_info *dummy_driver_info;
+ enum vb2_cache_flags vb2_cache_flags;
+};
+
+#endif /* _AVICAM_H_ */
diff --git a/drivers/parrot/media/video/avicam_dummy_dev.c b/drivers/parrot/media/video/avicam_dummy_dev.c
new file mode 100644
index 00000000000000..79e17cb0fd0c09
--- /dev/null
+++ b/drivers/parrot/media/video/avicam_dummy_dev.c
@@ -0,0 +1,210 @@
+
+/*
+ * Dummy subdev driver for avicam
+ *
+ * Based(copied) on soc_camera_platform
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <parrot/avicam_dummy_dev.h>
+
+struct avicam_dummy_priv {
+ struct v4l2_subdev subdev;
+};
+
+static struct avicam_dummy_priv *get_priv(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ return container_of(subdev, struct avicam_dummy_priv, subdev);
+}
+
+static int avicam_dummy_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int avicam_dummy_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct avicam_dummy_info *p = v4l2_get_subdevdata(sd);
+
+ if (!fi ||
+ (p->fi.interval.numerator == 0) ||
+ (p->fi.interval.denominator == 0))
+ return -EINVAL;
+
+ fi->interval.numerator = p->fi.interval.numerator;
+ fi->interval.denominator = p->fi.interval.denominator;
+ return 0;
+}
+
+static int avicam_dummy_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct avicam_dummy_info *p = v4l2_get_subdevdata(sd);
+
+ if (!fi)
+ return -EINVAL;
+
+ p->fi.interval.numerator = fi->interval.numerator;
+ p->fi.interval.denominator = fi->interval.denominator;
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__avicam_dummy_get_fmt(struct avicam_dummy_info *dummy, struct v4l2_subdev_fh *fh,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(fh, pad);
+ else
+ return &dummy->format;
+}
+
+static int avicam_dummy_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct avicam_dummy_info *p = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __avicam_dummy_get_fmt(p, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ fmt->format = *format;
+ return 0;
+}
+
+static int avicam_dummy_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct avicam_dummy_info *p = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __avicam_dummy_get_fmt(p, fh, fmt->pad, fmt->which);
+ if (format == NULL)
+ return -EINVAL;
+
+ *format = fmt->format;
+ return 0;
+}
+
+static int avicam_dummy_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct avicam_dummy_info *p = v4l2_get_subdevdata(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = p->format.code;
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops avicam_dummy_subdev_core_ops;
+
+static struct v4l2_subdev_video_ops avicam_dummy_subdev_video_ops = {
+ .s_stream = avicam_dummy_s_stream,
+ .g_frame_interval = avicam_dummy_g_frame_interval,
+ .s_frame_interval = avicam_dummy_s_frame_interval,
+};
+
+static struct v4l2_subdev_pad_ops avicam_dummy_subdev_pad_ops = {
+ .get_fmt = avicam_dummy_get_fmt,
+ .set_fmt = avicam_dummy_set_fmt,
+ .enum_mbus_code = avicam_dummy_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops avicam_dummy_subdev_ops = {
+ .core = &avicam_dummy_subdev_core_ops,
+ .video = &avicam_dummy_subdev_video_ops,
+ .pad = &avicam_dummy_subdev_pad_ops,
+};
+
+static int avicam_dummy_probe(struct platform_device *pdev)
+{
+ struct avicam_dummy_priv *priv;
+ struct avicam_dummy_info *p = pdev->dev.platform_data;
+ int ret;
+ struct v4l2_subdev *sd;
+
+ if (!p)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sd = &priv->subdev;
+
+ platform_set_drvdata(pdev, sd);
+
+ v4l2_subdev_init(sd, &avicam_dummy_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_set_subdevdata(sd, p);
+ strlcpy(sd->name, dev_name(&pdev->dev),
+ V4L2_SUBDEV_NAME_SIZE);
+
+ ret = v4l2_device_register_subdev(p->v4l2_dev, sd);
+ if (ret)
+ {
+ dev_err(&pdev->dev, "couldn't register subdev\n");
+ goto evdrs;
+ }
+
+ p->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, 1, &p->pad, 0);
+
+ if(ret < 0)
+ {
+ dev_err(&pdev->dev, "couldn't init media entity\n");
+ goto evdrs;
+ }
+ else
+ dev_info(&pdev->dev, "entity intialized, dummy\n");
+
+ return ret;
+
+evdrs:
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ return ret;
+}
+
+static int avicam_dummy_remove(struct platform_device *pdev)
+{
+ struct avicam_dummy_priv *priv = get_priv(pdev);
+
+ v4l2_device_unregister_subdev(&priv->subdev);
+ media_entity_cleanup(&priv->subdev.entity);
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver avicam_dummy_driver = {
+ .driver = {
+ .name = AVICAM_DUMMY_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = avicam_dummy_probe,
+ .remove = avicam_dummy_remove,
+};
+
+module_platform_driver(avicam_dummy_driver);
+
+MODULE_DESCRIPTION("avicam dummy dev");
+MODULE_AUTHOR("Julien BERAUD");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:avicam_dummy");
diff --git a/drivers/parrot/media/video/avicam_stats.c b/drivers/parrot/media/video/avicam_stats.c
new file mode 100644
index 00000000000000..01f14c3f1a0c8f
--- /dev/null
+++ b/drivers/parrot/media/video/avicam_stats.c
@@ -0,0 +1,348 @@
+#include <linux/module.h>
+#include <media/v4l2-ioctl.h>
+
+#include "avicam_v4l2.h"
+
+void avicam_stats_done(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avicam *avicam = ctx->priv;
+ struct avicam_vbuf *vbuf = frame->priv;
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+
+ if (frame->status != AVI_BUFFER_DONE)
+ dev_err(avicam->dev, "couldn't capture stats\n");
+
+ v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp);
+ vbuf->vb.v4l2_buf.sequence = avicam->frame_count >> 1;
+
+ if (frame->status != AVI_BUFFER_DONE)
+ state = VB2_BUF_STATE_ERROR;
+
+ vb2_buffer_done(&vbuf->vb, state);
+}
+
+void avicam_stats_next(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avicam *avicam = ctx->priv;
+ struct avicam_stats *stats = &avicam->stats;
+ struct avicam_vbuf *vbuf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+
+ if (stats->streaming && !list_empty(&stats->bufqueue)) {
+ vbuf = list_first_entry(&stats->bufqueue,
+ struct avicam_vbuf,
+ list);
+
+ list_del(&vbuf->list);
+
+ vbuf->vb.state = VB2_BUF_STATE_ACTIVE;
+ }
+
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+
+ if (!vbuf)
+ /* exhausted buffer queue */
+ return;
+
+ /* XXX add YUV offset */
+ frame->plane0.dma_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb, 0);
+ frame->priv = vbuf;
+}
+
+static int avicam_stats_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+
+ sizes[0] = AVI_CAPTURE_STATS_FRAMESIZE;
+ alloc_ctxs[0] = avicam->stats.alloc_ctx;
+ *num_planes = 1;
+
+ if (*count == 0)
+ /* Default to 4 buffers */
+ *count = 4;
+
+ return 0;
+}
+
+static int avicam_stats_vbuf_prepare(struct vb2_buffer* vb)
+{
+ vb2_set_plane_payload(vb, 0, AVI_CAPTURE_STATS_FRAMESIZE);
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+/* This function is called with stats->vbq_lock held (no need to protect
+ * ourselves when we play with the dmaqueue) */
+static void avicam_stats_vbuf_queue(struct vb2_buffer *vb)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vb->vb2_queue);
+ struct avicam_vbuf *buf = to_avicam_vbuf(vb);
+ struct avicam_stats *stats = &avicam->stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+
+ list_add_tail(&buf->list, &stats->bufqueue);
+
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+}
+
+static int avicam_stats_querycap(struct file *file,
+ void *fh,
+ struct v4l2_capability *cap)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct avicam_stats *stats = &avicam->stats;
+
+ mutex_lock(&stats->lock);
+
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+
+ snprintf(cap->card, sizeof(cap->card), "avi-stats.%d",
+ avicam_instance(avicam));
+
+ cap->version = DRIVER_VERSION;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static int avicam_stats_enum_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ /* We only have one hardcoded format for the stats */
+ if (f->index != 0)
+ return -EINVAL;
+
+ f->flags = 0;
+ f->pixelformat = V4L2_PIX_FMT_BGR32;
+
+ strlcpy(f->description, "bayer statistics", sizeof(f->description));
+
+ return 0;
+}
+
+static int avicam_stats_fill_fmt(struct file *file,
+ void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ pix->pixelformat = V4L2_PIX_FMT_BGR32;
+ pix->colorspace = 0;
+
+ pix->width = AVI_CAPTURE_STATS_WIDTH;
+ pix->height = AVI_CAPTURE_STATS_HEIGHT;
+ pix->bytesperline = pix->width * 4;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int avicam_stats_streamon(struct vb2_queue* vq, unsigned int count)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+ struct avicam_stats *stats = &avicam->stats;
+
+ if (stats->streaming)
+ return -EBUSY;
+
+ /* Nothing else to do here, the complicated stuff is done in avicam_streamon */
+ stats->streaming = 1;
+
+ return 0;
+}
+
+static int avicam_stats_do_streamoff(struct avicam *avicam)
+{
+ struct avicam_stats *stats = &avicam->stats;
+ struct avicam_vbuf *buf;
+ struct avicam_vbuf *node;
+ unsigned long flags;
+
+ if (!stats->streaming)
+ return -EINVAL;
+
+ spin_lock_irqsave(&stats->vbq_lock, flags);
+ list_for_each_entry_safe(buf, node, &stats->bufqueue, list) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&stats->vbq_lock, flags);
+
+ stats->streaming = 0;
+
+ return 0;
+}
+
+
+static int avicam_stats_streamoff(struct vb2_queue* vq)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+
+ return avicam_stats_do_streamoff(avicam);
+}
+
+static struct vb2_ops avicam_stats_vqueue_ops = {
+ .queue_setup = avicam_stats_queue_setup,
+ .buf_prepare = avicam_stats_vbuf_prepare,
+ .buf_queue = avicam_stats_vbuf_queue,
+ .start_streaming = avicam_stats_streamon,
+ .stop_streaming = avicam_stats_streamoff,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_ioctl_ops avicam_stats_ioctl_ops = {
+ .vidioc_querycap = avicam_stats_querycap,
+ .vidioc_enum_fmt_vid_cap = avicam_stats_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = avicam_stats_fill_fmt,
+ .vidioc_try_fmt_vid_cap = avicam_stats_fill_fmt,
+ .vidioc_s_fmt_vid_cap = avicam_stats_fill_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int avicam_stats_open(struct file *file)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct avicam_stats *stats = &avicam->stats;
+ struct vb2_queue *q = &stats->vb_vidq;
+
+ mutex_lock(&stats->lock);
+
+ if (stats->use_count != 0)
+ goto done;
+
+ memset(q, 0, sizeof(*q));
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->lock = &stats->lock;
+ q->ops = &avicam_stats_vqueue_ops;
+ q->drv_priv = avicam;
+ q->buf_struct_size = sizeof(struct avicam_vbuf);
+
+ vb2_queue_init(q);
+
+ done:
+ stats->use_count++;
+
+ mutex_unlock(&stats->lock);
+
+ return 0;
+}
+
+static int avicam_stats_release(struct file *file)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct avicam_stats *stats = &avicam->stats;
+ int ret = 0;
+
+ mutex_lock(&stats->lock);
+
+ stats->use_count--;
+ if (!stats->use_count)
+ ret = vb2_fop_release(file);
+
+ mutex_unlock(&stats->lock);
+ return ret;
+}
+
+static struct v4l2_file_operations avicam_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = avicam_stats_open,
+ .release = avicam_stats_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+};
+
+int __devinit avicam_stats_init(struct avicam *avicam)
+{
+ struct avicam_stats *stats = &avicam->stats;
+ struct video_device *vdev;
+ int ret;
+
+ if (!avicam->pdata->enable_stats)
+ /* Nothing to do */
+ return 0;
+
+ spin_lock_init(&stats->vbq_lock);
+ INIT_LIST_HEAD(&stats->bufqueue);
+
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENODEV;
+ goto vdev_alloc_failed;
+ }
+
+ stats->vdev = vdev;
+
+ mutex_init(&stats->lock);
+
+ strlcpy(vdev->name, DRIVER_NAME "-stats", sizeof(vdev->name));
+
+ vdev->parent = avicam->dev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;;
+ vdev->fops = &avicam_stats_fops;
+ vdev->ioctl_ops = &avicam_stats_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->queue = &stats->vb_vidq;
+ /* We handle the locking ourselves */
+ vdev->lock = NULL;
+
+ video_set_drvdata(vdev, avicam);
+
+ vdev->v4l2_dev = avicam->v4l2_dev;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto video_register_failed;
+
+ stats->alloc_ctx = vb2_dma_contig_init_ctx(&vdev->dev);
+
+ dev_info(avicam->dev,
+ "stats video device successfuly registered as %s\n",
+ video_device_node_name(stats->vdev));
+ return 0;
+
+ video_register_failed:
+ video_unregister_device(stats->vdev);
+ vdev_alloc_failed:
+ return ret;
+}
+
+void __devexit avicam_stats_destroy(struct avicam *avicam)
+{
+ if (!avicam->pdata->enable_stats)
+ /* Nothing to do */
+ return;
+
+ vb2_dma_contig_cleanup_ctx(avicam->stats.alloc_ctx);
+ video_unregister_device(avicam->stats.vdev);
+}
diff --git a/drivers/parrot/media/video/avicam_v4l2.c b/drivers/parrot/media/video/avicam_v4l2.c
new file mode 100644
index 00000000000000..0eea79613fc733
--- /dev/null
+++ b/drivers/parrot/media/video/avicam_v4l2.c
@@ -0,0 +1,2023 @@
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <parrot/avicam_dummy_dev.h>
+
+#include "gpu/p7ump.h"
+
+#include "avicam_v4l2.h"
+
+static struct v4l2_subdev *
+avicam_remote_subdev(struct avicam *avicam, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_source(&avicam->pad);
+
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int avicam_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+
+ sizes[0] = avicam->pix.sizeimage;
+ alloc_ctxs[0] = avicam->alloc_ctx;
+ *num_planes = 1;
+
+ if (*count == 0)
+ /* Default to 4 buffers */
+ *count = 4;
+
+ return 0;
+}
+
+static int avicam_vbuf_init(struct vb2_buffer* vb)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Allow GPU to access this memory through UMP */
+ p7ump_add_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avicam->pix.sizeimage));
+
+ return 0;
+}
+
+static int avicam_vbuf_prepare(struct vb2_buffer* vb)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vb->vb2_queue);
+
+ vb2_set_plane_payload(vb, 0, avicam->pix.sizeimage);
+ vb->v4l2_buf.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/* This function is called with avicam->vbq_lock held (no need to protect
+ * ourselves when we play with the dmaqueue) */
+static void avicam_vbuf_queue(struct vb2_buffer *vb)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vb->vb2_queue);
+ struct avicam_vbuf *buf = to_avicam_vbuf(vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&avicam->vbq_lock, flags);
+ list_add_tail(&buf->list, &avicam->bufqueue);
+ spin_unlock_irqrestore(&avicam->vbq_lock, flags);
+}
+
+static void avicam_vbuf_release(struct vb2_buffer *vb)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vb->vb2_queue);
+
+ p7ump_remove_map(vb2_dma_contig_plane_dma_addr(vb, 0),
+ PAGE_ALIGN(avicam->pix.sizeimage));
+}
+
+struct avicam_mbus_desc {
+ enum v4l2_mbus_pixelcode code;
+ enum avi_v4l2_pixtype type;
+ unsigned format_control;
+ /* bits per pixel */
+ char bpp;
+ /* clock per pixel (less is better) */
+ char cpp;
+};
+
+static const struct avicam_mbus_desc avicam_mbus_descs[] =
+{
+ /* YUV mbus formats */
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_1X16,
+ .type = AVI_PIXTYPE_YUV,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_1X16,
+ .bpp = 16,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .type = AVI_PIXTYPE_YUV,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8,
+ .type = AVI_PIXTYPE_YUV,
+ .format_control = AVI_FORMAT_CONTROL_YVYU_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_UYVY8_2X8,
+ .type = AVI_PIXTYPE_YUV,
+ .format_control = AVI_FORMAT_CONTROL_UYVY_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_VYUY8_2X8,
+ .type = AVI_PIXTYPE_YUV,
+ .format_control = AVI_FORMAT_CONTROL_VYUY_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_Y8_1X8,
+ .type = AVI_PIXTYPE_GREY,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 8,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_Y10_1X10,
+ .type = AVI_PIXTYPE_GREY,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 10,
+ .cpp = 1,
+ },
+ /* RGB formats */
+ {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = AVI_PIXTYPE_RGB,
+ .format_control = AVI_FORMAT_CONTROL_RGB555_2X8,
+ .bpp = 15,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = AVI_PIXTYPE_RGB,
+ .format_control = AVI_FORMAT_CONTROL_BGR555_2X8,
+ .bpp = 15,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .type = AVI_PIXTYPE_RGB,
+ .format_control = AVI_FORMAT_CONTROL_RGB565_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+ {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .type = AVI_PIXTYPE_RGB,
+ .format_control = AVI_FORMAT_CONTROL_BGR565_2X8,
+ .bpp = 16,
+ .cpp = 2,
+ },
+
+ /* Bayer formats: 10bits */
+ {
+ .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 10,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 10,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SGBRG10_1X10,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 10,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SRGGB10_1X10,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUV888_1X24,
+ .bpp = 10,
+ .cpp = 1,
+ },
+
+ /* Bayer formats: 12bits. */
+ {
+ .code = V4L2_MBUS_FMT_SBGGR12_1X12,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_1X16,
+ .bpp = 12,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_1X16,
+ .bpp = 12,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SGBRG12_1X12,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_1X16,
+ .bpp = 12,
+ .cpp = 1,
+ },
+ {
+ .code = V4L2_MBUS_FMT_SRGGB12_1X12,
+ .type = AVI_PIXTYPE_BAYER,
+ .format_control = AVI_FORMAT_CONTROL_YUYV_1X16,
+ .bpp = 12,
+ .cpp = 1,
+ },
+};
+
+static const struct avicam_mbus_desc *
+avicam_get_desc(enum v4l2_mbus_pixelcode c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avicam_mbus_descs); i++)
+ if (avicam_mbus_descs[i].code == c)
+ return &avicam_mbus_descs[i];
+
+ /* Not found */
+ return NULL;
+}
+
+/* This function attempts to find a bus configuration between the subdevice and
+ * the avicam. In order do to that we iterate over the subdevice's formats and
+ * we try to find the most suitable one (least transformations, best picture
+ * quality etc...) */
+static int avicam_pixfmt_to_mbus(struct avicam *avicam,
+ u32 fmt,
+ enum v4l2_mbus_pixelcode *c)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+ enum avi_v4l2_pixtype dst_type;
+ const struct avicam_mbus_desc *best = NULL;
+ struct v4l2_subdev_mbus_code_enum code;
+
+ dst_type = avi_v4l2_get_pixfmt_type(fmt);
+ if (dst_type == AVI_PIXTYPE_UNKNOWN)
+ /* Unsupported or unknown pixel format */
+ return -EINVAL;
+
+ sd = avicam_remote_subdev(avicam, &code.pad);
+ if(!sd)
+ return -ENODEV;
+
+ for (code.index = 0;; code.index++) {
+ const struct avicam_mbus_desc *desc;
+
+ ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
+ if (ret)
+ /* We're done enumerating the formats */
+ break;
+
+ desc = avicam_get_desc(code.code);
+ if (desc == NULL)
+ /* Unsupported or unknown bus format */
+ continue;
+
+ /* Check if the bus is wide enough to support this format. I
+ * just divide the bits per pixel with the number of clock ticks
+ * per pixel, it seems to work with all the formats I've
+ * considered but if it breaks somehow with more exotic
+ * subdevices we should add a "bus_width" member to the
+ * avicam_mbus_desc structure. */
+ if (desc->bpp > avicam->pdata->bus_width * desc->cpp)
+ /* We can't handle this bus width */
+ continue;
+
+ if (best == NULL) {
+ /* Anything is better than nothing... */
+ best = desc;
+ continue;
+ }
+
+ /* Always favour similar types to avoid useless conversions */
+ if (best->type != desc->type) {
+ if (desc->type == dst_type)
+ best = desc;
+ continue;
+ }
+
+ if (best->bpp != desc->bpp) {
+ if (best->bpp < desc->bpp)
+ best = desc;
+ continue;
+ }
+
+ /* Favour the format with the least clock ticks per pixel in an
+ * effort to reduce the clock frequency. */
+ if (best->cpp != desc->cpp) {
+ if (best->cpp > desc->cpp)
+ best = desc;
+ continue;
+ }
+ }
+
+ if (best == NULL) {
+ dev_err(avicam->dev,
+ "Could not find any valid subdev bus configuration");
+ return -EREMOTEIO;
+ }
+
+ if (dst_type == AVI_PIXTYPE_BAYER && best->type != dst_type)
+ /* We can't substitute a YUV/RGB format for bayer output, it
+ * doesn't make any sense to convert from these formats into
+ * bayer. */
+ best = NULL;
+
+ if (c)
+ *c = best->code;
+
+ return 0;
+}
+
+static int avicam_querycap(struct file *file,
+ void *fh,
+ struct v4l2_capability *cap)
+{
+ struct avicam *avicam = video_drvdata(file);
+
+ mutex_lock(&avicam->lock);
+
+ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->card, sizeof(cap->card), "avicam.%d",
+ avicam_instance(avicam));
+
+ cap->version = DRIVER_VERSION;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ mutex_unlock(&avicam->lock);
+
+ return 0;
+}
+
+static int avicam_enum_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return avi_v4l2_enum_fmt(f);
+}
+
+static int avicam_g_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_format *f)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ f->fmt.pix = avicam->pix;
+
+ ret = 0;
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_try_fmt_vid_cap_unlocked(struct avicam *avicam,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev *sd;
+ const struct avicam_mbus_desc *mbus_desc;
+ int bayer_output;
+ struct v4l2_subdev_format format;
+ int ret;
+
+ sd = avicam_remote_subdev(avicam, &format.pad);
+ if(!sd) {
+ dprintk(avicam, "no subdev found\n");
+ return -ENODEV;
+ }
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dprintk(avicam, "bad format type %d\n", f->type);
+ return -EINVAL;
+ }
+
+ /* force driver to set colorspace as specified in V4L2:
+ * Colorspace information supplements the pixelformat
+ * and must be set by the driver for capture streams
+ */
+ pix->colorspace = 0;
+
+ ret = avi_v4l2_try_fmt(f->type, pix);
+ if (ret) {
+ dprintk(avicam, "avi_v4l2_try_fmt failed\n");
+ return ret;
+ }
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ bayer_output = (avi_v4l2_get_pixfmt_type(pix->pixelformat) ==
+ AVI_PIXTYPE_BAYER);
+
+ /* Try to get a good default format */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret)
+ return ret;
+
+ mbus_desc = avicam_get_desc(format.format.code);
+ if (mbus_desc == NULL) {
+ dprintk(avicam, "invalid mbus format\n");
+ return -EINVAL;
+ }
+
+ if (bayer_output && mbus_desc->type != AVI_PIXTYPE_BAYER) {
+ dprintk(avicam, "can't capture raw when sensor is not raw(%d)\n",
+ format.format.code);
+ return -EINVAL;
+ }
+
+ if (format.format.field == V4L2_FIELD_NONE ||
+ format.format.field == V4L2_FIELD_ANY) {
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ } else {
+ /* Input is interlaced, therefore so is the output. We only
+ * support ALTERNATE and INTERLACED so far */
+ if (f->fmt.pix.field != V4L2_FIELD_ALTERNATE &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ }
+
+ return 0;
+}
+
+static int avicam_try_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_format *f)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ ret = avicam_try_fmt_vid_cap_unlocked(avicam, f);
+
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_s_fmt_vid_cap_unlocked(struct avicam *avicam,
+ struct v4l2_format *f)
+{
+ const struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 code;
+ int ret;
+
+ if (avicam->streaming)
+ return -EBUSY;
+
+ ret = avicam_try_fmt_vid_cap_unlocked(avicam, f);
+ if (ret)
+ return ret;
+
+ ret = avicam_pixfmt_to_mbus(avicam, pix->pixelformat, &code);
+ if (ret) {
+ dprintk(avicam, "unsupported pixfmt\n");
+ return ret;
+ }
+
+ avicam->pix = *pix;
+ /* Reset compose selection to take the whole output buffer */
+ avicam->compose.top = 0;
+ avicam->compose.left = 0;
+ avicam->compose.width = pix->width;
+ avicam->compose.height = pix->height;
+
+ return 0;
+}
+
+static int avicam_s_fmt_vid_cap(struct file *file,
+ void *priv,
+ struct v4l2_format *f)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ ret = avicam_s_fmt_vid_cap_unlocked(avicam, f);
+
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+/* Return the cropping boundary for the source video. */
+static int avicam_get_bounds(struct avicam *avicam,
+ struct v4l2_rect *r)
+{
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev_format format;
+ int ret;
+
+ sd = avicam_remote_subdev(avicam, &format.pad);
+ if(!sd)
+ return -ENODEV;
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ /* Try to get a good default format */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret)
+ /* Looks like there's nothing we can do */
+ return -ENOIOCTLCMD;
+
+ r->top = 0;
+ r->left = 0;
+ r->width = format.format.width;
+ r->height = format.format.height;
+
+ return 0;
+}
+
+/* Returt the cropping boundary for the destination buffer. For now it's simply
+ * the size of the buffer */
+static int avicam_get_compose_bounds(struct avicam *avicam,
+ struct v4l2_rect *r)
+{
+ r->top = 0;
+ r->left = 0;
+ r->width = avicam->pix.width;
+ r->height = avicam->pix.height;
+
+ return 0;
+}
+
+static int avicam_g_selection(struct file *file,
+ void *priv,
+ struct v4l2_selection *s)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&avicam->lock);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ ret = avicam_get_bounds(avicam, &s->r);
+ break;
+ case V4L2_SEL_TGT_CROP_ACTIVE:
+ s->r = avicam->rect;
+ ret = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ ret = avicam_get_compose_bounds(avicam, &s->r);
+ break;
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ s->r = avicam->compose;
+ ret = 0;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_s_selection(struct file *file,
+ void *priv,
+ struct v4l2_selection *s)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_rect bounds;
+ int ret;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&avicam->lock);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_ACTIVE:
+ avicam_get_bounds(avicam, &bounds);
+
+ avi_v4l2_crop_adjust(&s->r, &bounds);
+
+ if (s->r.width == 0 || s->r.height == 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* We don't yet allow changing the source resolution while
+ * streaming because we can't add/remove the scaler on the
+ * fly. */
+ if (avicam->streaming &&
+ (s->r.width != avicam->rect.width ||
+ s->r.height != avicam->rect.height)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ avicam->rect = s->r;
+
+ ret = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ avicam_get_compose_bounds(avicam, &bounds);
+
+ avi_v4l2_crop_adjust(&s->r, &bounds);
+
+ if (s->r.width == 0 || s->r.height == 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* We don't yet allow changing the source resolution while
+ * streaming because we can't add/remove the scaler on the
+ * fly. */
+ if (avicam->streaming &&
+ (s->r.width != avicam->compose.width ||
+ s->r.height != avicam->compose.height)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ avicam->compose = s->r;
+ ret = 0;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_enum_input(struct file *file,
+ void *priv,
+ struct v4l2_input *inp)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ if (inp->index != 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ dev_err(avicam->dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ /* Ugly hack for dv */
+ ret = v4l2_subdev_call(sd, video, s_dv_timings, NULL);
+ if(ret == -ENODEV || ret == -ENOIOCTLCMD)
+ inp->capabilities &= ~V4L2_IN_CAP_CUSTOM_TIMINGS;
+
+ ret = v4l2_subdev_call(sd, core, s_std, 0);
+ if(ret == -ENODEV || ret == -ENOIOCTLCMD)
+ inp->capabilities &= ~V4L2_IN_CAP_STD;
+
+ /* default is camera */
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_UNKNOWN;
+ strlcpy(inp->name, "Camera", sizeof(inp->name));
+
+ ret = 0;
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int avicam_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct avicam *avicam = video_drvdata(file);
+
+ if (avicam->streaming)
+ return -EBUSY;
+
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int avicam_s_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ if (avicam->streaming) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, core, s_std, *a);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, core, g_std, a);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_g_chip_ident(struct file *file,
+ void *fh,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, core, g_chip_ident, id);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static void avicam_done(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avicam *avicam = ctx->priv;
+ struct avicam_vbuf *vbuf = frame->priv;
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+
+ if (vbuf->last_capture == 0) {
+ /* We wait until we get the bottom field in the
+ * buffer */
+ vbuf->last_capture = 1;
+ return;
+ }
+
+ /* Frame count is actually field count because videobuf is retarded */
+ if (avicam->pix.field == V4L2_FIELD_ALTERNATE)
+ avicam->frame_count++;
+ else
+ avicam->frame_count += 2;
+
+ v4l2_get_timestamp(&vbuf->vb.v4l2_buf.timestamp);
+ vbuf->vb.v4l2_buf.sequence = avicam->frame_count / 2;
+
+ if (frame->status == AVI_BUFFER_ERROR ||
+ frame->status == AVI_BUFFER_TIMEOUT)
+ state = VB2_BUF_STATE_ERROR;
+
+ if (avicam->pix.field == V4L2_FIELD_ALTERNATE)
+ vbuf->vb.v4l2_buf.field = (f == AVI_TOP_FIELD) ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+
+ vb2_buffer_done(&vbuf->vb, state);
+}
+
+static void avicam_next(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f)
+{
+ struct avicam *avicam = ctx->priv;
+ struct v4l2_subdev *sd;
+ unsigned bottom_offset = 0;
+ struct avicam_vbuf *vbuf = NULL;
+ dma_addr_t dma_addr;
+ unsigned long flags;
+ bool handled = false;
+
+#ifdef CONFIG_AVICAM_USE_ISP
+ /* Do blanking ISP task */
+ avi_v4l2_isp_blanking(avicam->ctrl_isp);
+#endif
+
+ if (avicam->pix.field == V4L2_FIELD_INTERLACED) {
+ vbuf = avicam->bottom_vbuf;
+
+ if (vbuf) {
+ /* We should be capturing the bottom field */
+ bottom_offset = avicam->pix.bytesperline;
+ avicam->bottom_vbuf = NULL;
+ } else if (f == AVI_BOT_FIELD) {
+ /* We must wait until we get a top field to start
+ * capturing a new buffer */
+ return;
+ }
+ }
+
+ /* Propagate interrupt to subdev */
+ sd = avicam_remote_subdev(avicam, NULL);
+ v4l2_subdev_call(sd, core, interrupt_service_routine, 0, &handled);
+
+ if (vbuf == NULL) {
+ spin_lock_irqsave(&avicam->vbq_lock, flags);
+
+ if (!list_empty(&avicam->bufqueue)) {
+ vbuf = list_entry(avicam->bufqueue.next,
+ struct avicam_vbuf,
+ list);
+
+ list_del(&vbuf->list);
+
+ vbuf->vb.state = VB2_BUF_STATE_ACTIVE;
+ }
+
+ spin_unlock_irqrestore(&avicam->vbq_lock, flags);
+
+ if (vbuf == NULL) {
+ dprintk_l(avicam, "Exhausted buffer queue\n");
+ return;
+ }
+
+ /* We'll have to capture two fields in a row if the input is
+ * interlaced */
+ if (avicam->pix.field == V4L2_FIELD_INTERLACED) {
+ avicam->bottom_vbuf = vbuf;
+ vbuf->last_capture = 0;
+ } else {
+ vbuf->last_capture = 1;
+ }
+ }
+
+ dma_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb, 0) + bottom_offset;
+
+ avi_v4l2_setup_dmabuf(&avicam->compose,
+ &avicam->segment_format,
+ avicam->plane0_size,
+ avicam->plane1_size,
+ dma_addr,
+ vbuf,
+ frame);
+}
+
+static int avicam_clean_buffers(struct avicam *avicam, int state) {
+ unsigned long flags;
+ struct avicam_vbuf *buf, *node;
+
+ spin_lock_irqsave(&avicam->vbq_lock, flags);
+
+ list_for_each_entry_safe(buf, node, &avicam->bufqueue, list) {
+ vb2_buffer_done(&buf->vb, state);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&avicam->vbq_lock, flags);
+
+ return 0;
+}
+
+static unsigned long avicam_get_timeout(struct v4l2_subdev *sd) {
+ struct v4l2_subdev_frame_interval fi;
+ int ret;
+ u32 min_timeout = HZ / 2; /* Minimum 500ms timeout */
+ u32 timeout;
+
+ ret = v4l2_subdev_call(sd, video, g_frame_interval, &fi);
+
+ if (ret)
+ return 3 * HZ; /* Default 3s timeout */
+
+ /* Set timeout to the duration of 3 frames */
+ timeout = div_u64((u64)fi.interval.numerator * 3 * HZ,
+ fi.interval.denominator);
+
+ if (timeout < min_timeout)
+ timeout = min_timeout;
+
+ return timeout;
+}
+
+static int avicam_streamon(struct vb2_queue* vq, unsigned int count)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format *pix = &avicam->pix;
+ const struct v4l2_rect *compose = &avicam->compose;
+ const struct v4l2_rect *rect = &avicam->rect;
+ struct avi_capture_context *ctx = &avicam->capture_ctx;
+ struct avi_segment_format *fmt = &avicam->segment_format;
+ int collect_stats;
+ struct v4l2_subdev *sd;
+ const struct avicam_mbus_desc *mbus_desc;
+ unsigned long caps;
+ int bayer_input;
+ int bayer_output;
+ struct v4l2_subdev_format format;
+ struct avi_capture_crop crop;
+ int ret;
+ unsigned sensor_width;
+ unsigned sensor_height;
+#ifdef CONFIG_AVICAM_USE_ISP
+ struct avi_isp_offsets isp_offsets;
+#endif
+
+ if (avicam->streaming) {
+ ret = -EBUSY;
+ goto busy;
+ }
+
+ avicam->frame_count = 0;
+
+ sd = avicam_remote_subdev(avicam, &format.pad);
+ if (!sd) {
+ ret = -ENODEV;
+ goto no_subdev;
+ }
+
+ media_entity_pipeline_start(&avicam->vdev->entity, &avicam->pipe);
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ /* Try to get a good default format */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret)
+ goto get_fmt_failed;
+
+ mbus_desc = avicam_get_desc(format.format.code);
+ if (mbus_desc == NULL)
+ goto bad_mbus;
+
+ crop.x = rect->left;
+ crop.y = rect->top;
+
+ if (rect->width && rect->height) {
+ crop.width = rect->width;
+ crop.height = rect->height;
+ } else {
+ /* Use the whole sensor's resolution when no crop window is
+ * set */
+ crop.width = format.format.width - crop.x;
+ crop.height = format.format.height - crop.y;
+ }
+
+ bayer_input = (mbus_desc->type == AVI_PIXTYPE_BAYER);
+ bayer_output = (avi_v4l2_get_pixfmt_type(pix->pixelformat) ==
+ AVI_PIXTYPE_BAYER);
+
+ caps = avicam->pdata->cam_cap;
+
+ if (crop.width != compose->width ||
+ crop.height != compose->height)
+ caps |= AVI_CAP_SCAL;
+
+ if (bayer_output) {
+ if (caps & AVI_CAP_SCAL) {
+ dev_err(avicam->dev,
+ "bad format: can't rescale a raw image "
+ "[%ux%u -> %ux%u]\n",
+ crop.width, crop.height,
+ compose->width, compose->height);
+ ret = -EINVAL;
+ goto bad_bayer;
+ }
+
+ if (!bayer_input) {
+ /* Can't output raw from non-raw input */
+ ret = -EINVAL;
+ goto bad_bayer;
+ }
+ } else {
+ /* Output is not raw. */
+ if (bayer_input)
+ /* We need to convert the RAW image */
+ caps |= AVI_CAPS_ISP;
+ else if (format.format.colorspace != pix->colorspace)
+ /* We need to do chroma conversion */
+ caps |= AVI_CAP_CONV;
+ }
+
+ /* We collect the stats when we have an ISP */
+ collect_stats = avicam->pdata->enable_stats &&
+ (caps & AVI_CAP_ISP_CHAIN_BAYER);
+
+ ret = avi_capture_init(ctx,
+ avicam->dev,
+ avicam_instance(avicam),
+ caps,
+ collect_stats);
+ if (ret)
+ goto capture_init_failed;
+
+#ifdef CONFIG_AVICAM_USE_ISP
+ /* Activate ISP chain controls */
+ if ((caps & AVI_CAPS_ISP) &&
+ avi_isp_get_offsets(ctx->dma_segment, &isp_offsets) == 0) {
+ avi_v4l2_isp_activate(avicam->ctrl_isp, &isp_offsets);
+ avi_v4l2_isp_blanking(avicam->ctrl_isp);
+ }
+#endif
+
+ ctx->priv = avicam;
+
+ ctx->measure_to_timings = avicam->pdata->measure_to_timings;
+ ctx->interface = avicam->pdata->interface;
+ ctx->interface.format_control = mbus_desc->format_control;
+
+ if (mbus_desc->type == AVI_PIXTYPE_BAYER && mbus_desc->bpp == 10)
+ ctx->interface.raw10 = AVI_CAP_RAW10_1X10;
+ else if (mbus_desc->type == AVI_PIXTYPE_GREY && mbus_desc->bpp == 8)
+ ctx->interface.unpacker = AVI_CAP_RAW8_1X8;
+ else if (mbus_desc->type == AVI_PIXTYPE_GREY && mbus_desc->bpp == 10)
+ ctx->interface.raw10 = AVI_CAP_RAW10_1X10;
+
+ ctx->interlaced = (pix->field != V4L2_FIELD_NONE);
+
+ /* There is no V4L2 colorspace for 'GREY' so we specify it manually */
+ if (mbus_desc->type == AVI_PIXTYPE_GREY)
+ ctx->capture_cspace = (mbus_desc->bpp == 8) ? AVI_GREY_CSPACE :
+ AVI_Y10_CSPACE;
+ else
+ ctx->capture_cspace =
+ avi_v4l2_to_avi_cspace(format.format.colorspace);
+
+ ctx->next = &avicam_next;
+ ctx->done = &avicam_done;
+ ctx->stats_next = &avicam_stats_next;
+ ctx->stats_done = &avicam_stats_done;
+
+ /* Make sure we don't have any stale vbuf laying around */
+ avicam->bottom_vbuf = NULL;
+
+ ctx->timeout_jiffies = avicam_get_timeout(sd);
+
+ /* Start the subdevice */
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret)
+ goto subdev_streamon_failed;
+#ifdef CONFIG_AVICAM_TIMINGS_AUTO
+ ret = avi_capture_prepare(ctx);
+ if (ret) {
+ dev_err(avicam->dev, "failed to get sensor timings\n");
+ goto subdev_streamon_failed;
+ }
+ avi_capture_resolution(ctx, &sensor_width, &sensor_height);
+#else
+ ret = avi_capture_prepare_timings(ctx, format.format.width, format.format.height);
+ if (ret)
+ goto prepare_failed;
+ sensor_width = format.format.width;
+ sensor_height = format.format.height;
+#endif
+ dprintk(avicam,
+ "sensor resolution is %dx%d%c\n",
+ sensor_width, sensor_height,
+ ctx->interlaced ? 'i' : 'p');
+
+ ret = avi_capture_set_crop(ctx, &crop);
+ if (ret) {
+ dev_err(avicam->dev,
+ "cropping failed. Resolution: %dx%d. "
+ "Crop window: %dx%d @%dx%d\n",
+ sensor_width, sensor_height,
+ crop.width, crop.height,
+ crop.x, crop.y);
+ goto crop_failed;
+ }
+
+ if ((caps & AVI_CAPS_ISP) &&
+ (crop.width > 2048 || crop.height > 4096)) {
+ dev_err(avicam->dev,
+ "AVI ISP doesn't handle resolutions above 2048x4096\n");
+ ret = -ERANGE;
+ goto bad_resolution;
+ }
+
+ /* XXX Handle output crop ? */
+ ret = avi_v4l2_to_segment_fmt(pix,
+ compose,
+ fmt,
+ &avicam->plane0_size,
+ &avicam->plane1_size);
+ if (ret)
+ goto format_conversion_failed;
+
+ ret = avi_capture_set_format(ctx, fmt);
+ if (ret) {
+ dev_err(avicam->dev, "failed to set capture format\n");
+ goto set_format_failed;
+ }
+
+ avicam->streaming = 1;
+
+ ret = avi_capture_streamon(ctx);
+ if (ret)
+ goto capture_streamon_failed;
+
+ /* Success */
+ return 0;
+
+ capture_streamon_failed:
+ set_format_failed:
+ format_conversion_failed:
+ crop_failed:
+ bad_resolution:
+ prepare_failed:
+ subdev_streamon_failed:
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ avi_capture_destroy(ctx);
+ capture_init_failed:
+ bad_bayer:
+ bad_mbus:
+ get_fmt_failed:
+ no_subdev:
+ avicam->streaming = 0;
+ avicam_clean_buffers(avicam, VB2_BUF_STATE_QUEUED);
+ busy:
+ media_entity_pipeline_stop(&avicam->vdev->entity);
+ return ret;
+}
+
+static int avicam_do_streamoff(struct avicam *avicam)
+{
+ struct v4l2_subdev *sd;
+
+#ifdef CONFIG_AVICAM_USE_ISP
+ /* Deactivate ISP chains controls */
+ avi_v4l2_isp_deactivate(avicam->ctrl_isp);
+#endif
+
+ if (!avicam->streaming)
+ return -EINVAL;
+
+ avicam->streaming = 0;
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if (!sd)
+ return -ENODEV;
+
+ avi_capture_streamoff(&avicam->capture_ctx);
+
+ avi_capture_destroy(&avicam->capture_ctx);
+
+ v4l2_subdev_call(sd, video, s_stream, 0);
+
+ avicam_clean_buffers(avicam, VB2_BUF_STATE_ERROR);
+
+ media_entity_pipeline_stop(&avicam->vdev->entity);
+
+ return 0;
+}
+
+static int avicam_streamoff(struct vb2_queue* vq)
+{
+ struct avicam *avicam = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *sd;
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ return avicam_do_streamoff(avicam);
+}
+
+static struct vb2_ops avicam_vqueue_ops = {
+ .queue_setup = avicam_queue_setup,
+ .buf_init = avicam_vbuf_init,
+ .buf_prepare = avicam_vbuf_prepare,
+ .buf_queue = avicam_vbuf_queue,
+ .buf_cleanup = avicam_vbuf_release,
+ .start_streaming = avicam_streamon,
+ .stop_streaming = avicam_streamoff,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int avicam_s_dv_timings(struct file *file,
+ void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_g_dv_timings(struct file *file,
+ void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, video, g_dv_timings, timings);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_query_dv_timings(struct file *file,
+ void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, video, query_dv_timings, timings);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_enum_dv_timings(struct file *file,
+ void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, video, enum_dv_timings, timings);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static int avicam_dv_timings_cap(struct file *file,
+ void *fh,
+ struct v4l2_dv_timings_cap *cap)
+
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, NULL);
+ if(!sd) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = v4l2_subdev_call(sd, video, dv_timings_cap, cap);
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static long avicam_default(struct file *file,
+ void *p,
+ bool valid_prio,
+ int cmd,
+ void *arg)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret = -ENOIOCTLCMD;
+
+ mutex_lock(&avicam->lock);
+
+ if (cmd == AVI_ISP_IOGET_OFFSETS) {
+
+ if (!avicam->streaming) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ BUG_ON(!avicam->capture_ctx.dma_segment);
+
+ /* The v4l2 layer allocates "arg" in kernel space based on
+ * _IOC_SIZE instead of directly passing the user pointer. It
+ * means we don't have to copy_to_user or check for overflows
+ * here. See the code of video_usercopy in v4l2-ioctl.c. */
+ ret = avi_isp_get_offsets(avicam->capture_ctx.dma_segment,
+ (struct avi_isp_offsets *)arg);
+ }
+
+ unlock:
+ mutex_unlock(&avicam->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops avicam_ioctl_ops = {
+ .vidioc_querycap = avicam_querycap,
+ .vidioc_enum_fmt_vid_cap = avicam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = avicam_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = avicam_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = avicam_s_fmt_vid_cap,
+
+ .vidioc_g_selection = avicam_g_selection,
+ .vidioc_s_selection = avicam_s_selection,
+
+ .vidioc_enum_input = avicam_enum_input,
+ .vidioc_g_input = avicam_g_input,
+ .vidioc_s_input = avicam_s_input,
+ .vidioc_s_std = avicam_s_std,
+ .vidioc_g_std = avicam_g_std,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_g_chip_ident = avicam_g_chip_ident,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_default = avicam_default,
+
+ /* These functions just call the subdev methods of the same name. They
+ * are only used by the adv7611 userland tool for debugging
+ * purposes. Once they are not needed anymore we should remove them from
+ * here (or re-implement them if they make sense in the context of this
+ * driver)
+ */
+ .vidioc_dv_timings_cap = avicam_dv_timings_cap,
+ .vidioc_s_dv_timings = avicam_s_dv_timings,
+ .vidioc_g_dv_timings = avicam_g_dv_timings,
+ .vidioc_query_dv_timings = avicam_query_dv_timings,
+ .vidioc_enum_dv_timings = avicam_enum_dv_timings,
+};
+
+static int avicam_open(struct file *file)
+{
+ struct avicam *avicam = video_drvdata(file);
+ struct v4l2_format default_fmt = { 0 };
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev_format format;
+ const struct avicam_mbus_desc *desc;
+ int ret;
+ struct vb2_queue* q = &avicam->vb_vidq;
+
+ mutex_lock(&avicam->lock);
+
+ sd = avicam_remote_subdev(avicam, &format.pad);
+ if (!sd) {
+ dev_err(avicam->dev, "no subdev found\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (avicam->use_count > 0) {
+ ret = 0;
+ goto unlock;
+ }
+
+ memset(&avicam->pix, 0, sizeof(avicam->pix));
+ memset(&avicam->rect, 0, sizeof(avicam->rect));
+
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ /* Try to get a good default format */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret) {
+ dev_err(avicam->dev, "can't get sensor bus format\n");
+ goto unlock;
+ }
+
+ default_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ default_fmt.fmt.pix.width = format.format.width;
+ default_fmt.fmt.pix.height = format.format.height;
+ default_fmt.fmt.pix.colorspace = format.format.colorspace;
+ default_fmt.fmt.pix.field = format.format.field;
+
+ /* Select a sane default pixelformat */
+ default_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ desc = avicam_get_desc(format.format.code);
+ if (desc)
+ switch (desc->type) {
+ case AVI_PIXTYPE_BAYER:
+ default_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR10;
+ break;
+ case AVI_PIXTYPE_RGB:
+ default_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
+ break;
+ case AVI_PIXTYPE_YUV:
+ default:
+ default_fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ break;
+ }
+
+ ret = avicam_s_fmt_vid_cap_unlocked(avicam, &default_fmt);
+ if (ret) {
+ dev_err(avicam->dev, "couldn't set the default format\n");
+ goto unlock;
+ }
+
+ v4l2_subdev_call(sd, core, s_power, 1);
+
+ memset(q, 0, sizeof(struct vb2_queue));
+
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->lock = &avicam->lock;
+ q->ops = &avicam_vqueue_ops;
+ q->drv_priv = avicam;
+ q->buf_struct_size = sizeof(struct avicam_vbuf);
+ q->cache_flags = avicam->pdata->vb2_cache_flags;
+ vb2_queue_init(q);
+
+ ret = 0;
+
+ unlock:
+ if (ret == 0)
+ avicam->use_count++;
+
+ mutex_unlock(&avicam->lock);
+
+ dprintk(avicam, "camera device opened\n");
+
+ return ret;
+}
+
+static int avicam_release(struct file *file)
+{
+ struct avicam *avicam = video_drvdata(file);
+ int ret = 0;
+
+ mutex_lock(&avicam->lock);
+
+ avicam->use_count--;
+ if (!avicam->use_count)
+ ret = vb2_fop_release(file);
+
+ mutex_unlock(&avicam->lock);
+ return ret;
+}
+
+static struct v4l2_file_operations avicam_fops = {
+ .owner = THIS_MODULE,
+ .open = avicam_open,
+ .release = avicam_release,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+};
+
+static int __devinit avicam_register_subdevs(struct avicam *avicam,
+ struct avicam_subdevs *devs,
+ struct media_entity *entity,
+ u32 sink_pad)
+{
+ struct v4l2_subdev *subdev = NULL;
+ struct i2c_adapter *adapter;
+ struct avicam_subdevs *dev;
+ int ret = 0;
+
+ if (devs == NULL || devs->board_info == NULL) {
+ dev_err(avicam->dev, "no board info found\n");
+ return -ENODEV;
+ }
+
+ for (dev = devs; dev->board_info; dev++) {
+ unsigned int flags = MEDIA_LNK_FL_ENABLED;
+ unsigned int pad = 0;
+ int i;
+
+ dprintk(avicam, "found one i2c board info\n");
+
+ adapter = i2c_get_adapter(dev->i2c_adapter_id);
+ if (adapter == NULL) {
+ dev_err(avicam->dev,
+ "Unable to get I2C adapter %d for device %s\n",
+ dev->i2c_adapter_id,
+ dev->board_info->type);
+ continue;
+ }
+
+ subdev = v4l2_i2c_new_subdev_board(avicam->v4l2_dev,
+ adapter,
+ dev->board_info,
+ NULL);
+ if (subdev == NULL) {
+ dprintk(avicam,
+ "Unable to register subdev %s\n",
+ dev->board_info->type);
+ continue;
+ }
+
+ /* This just returns 0 if either of the two args is NULL */
+ v4l2_ctrl_add_handler(avicam->vdev->ctrl_handler,
+ subdev->ctrl_handler);
+
+ if(dev->subdevs) {
+ pad = 0;
+
+ for (i = 0; i < subdev->entity.num_pads; i++)
+ if(subdev->entity.pads[i].flags & MEDIA_PAD_FL_SINK) {
+ pad = i;
+ break;
+ }
+
+ if (i == subdev->entity.num_pads) {
+ dprintk(avicam, "No sink pad found on subdev\n");
+ break;
+ }
+
+ ret = avicam_register_subdevs(avicam, dev->subdevs,
+ &subdev->entity, pad);
+ if(ret < 0) {
+ dprintk(avicam, "Error registering subdevices\n");
+ /* XXX We should cleanup whe previous registered
+ subdevices before returning here */
+ return ret;
+ }
+ }
+
+ pad = 0;
+ for (i = 0; i < subdev->entity.num_pads; i++)
+ if (subdev->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) {
+ pad = i;
+ break;
+ }
+
+ if (i == subdev->entity.num_pads) {
+ dprintk(avicam, "No source pad found on subdev\n");
+ break;
+ }
+
+ ret = media_entity_create_link(&subdev->entity, pad, entity, sink_pad,
+ flags);
+ if (ret < 0) {
+ dprintk(avicam, "Failed to create media link(sd)\n");
+ break;;
+ }
+ }
+
+ if (!subdev)
+ /* Couldn't initialise any device succesfully. XXX We should
+ cleanup whe previous registered subdevices before returning
+ here */
+ return -EINVAL;
+
+ return ret;
+}
+
+
+static int __devinit avicam_init_subdevs(struct avicam *avicam)
+{
+ struct device *dev = avicam->dev;
+ int ret;
+
+ /* register i2c devices
+ *
+ * if board_info isn't present, register dummy driver currently, only
+ * one device per i2c bus is supported but this is intended to change to
+ * be able to support several the media devices should be configured
+ * here */
+ if(!avicam->pdata->subdevs && !avicam->pdata->dummy_driver_info) {
+ dev_err(dev, "no dev config found\n");
+ return -EINVAL;
+ }
+
+ if (avicam->pdata->subdevs) {
+ ret = avicam_register_subdevs(avicam, avicam->pdata->subdevs,
+ &avicam->vdev->entity, 0);
+ if (ret)
+ dprintk(avicam, "Error registering subdevs\n");
+
+ }
+
+ if (avicam->pdata->dummy_driver_info) {
+ unsigned int flags = MEDIA_LNK_FL_ENABLED;
+ /* only one pad on avicam right now */
+ unsigned int pad = 0;
+ struct v4l2_subdev *subdev;
+ ret = request_module(AVICAM_DUMMY_NAME);
+
+ ret = avicam_dummy_add(avicam->v4l2_dev,
+ &avicam->dummy_pdev,
+ avicam->pdata->dummy_driver_info);
+ if (ret) {
+ dev_err(dev, "Unable to register dummy dev\n");
+ return ret;
+ }
+
+ if(!avicam->dummy_pdev->dev.driver ||
+ !try_module_get(avicam->dummy_pdev->dev.driver->owner)) {
+ dev_err(dev, "cannot get dummy module\n");
+ return ret;
+ }
+ subdev = platform_get_drvdata(avicam->dummy_pdev);
+ if(!subdev) {
+ dprintk(avicam, "Cannot get a dummy subdev\n");
+ return ret;
+ }
+ ret = media_entity_create_link(&subdev->entity, 0, &avicam->vdev->entity, pad,
+ flags);
+ if(ret < 0) {
+ dprintk(avicam, "Failed to create media link(dummy)\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void avicam_destroy_subdevs(struct avicam *avicam)
+{
+ struct v4l2_subdev *sd, *next;
+
+ if (avicam->dummy_pdev) {
+ if(avicam->dummy_pdev->dev.driver)
+ module_put(avicam->dummy_pdev->dev.driver->owner);
+ avicam_dummy_del(avicam->dummy_pdev);
+ }
+
+ list_for_each_entry_safe(sd, next, &avicam->v4l2_dev->subdevs, list) {
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* We need to unregister the i2c client explicitly.
+ We cannot rely on i2c_del_adapter to always
+ unregister clients for us, since if the i2c bus
+ is a platform bus, then it is never deleted. */
+ if (client)
+ i2c_unregister_device(client);
+ }
+ }
+}
+
+static int __devinit avicam_v4l2_init(struct avicam *avicam)
+{
+ struct video_device *vdev;
+ int ret;
+
+ avicam->v4l2_dev = avi_v4l2_get_device();
+ if (!avicam->v4l2_dev) {
+ ret = -ENODEV;
+ goto no_v4l2_dev;
+ }
+
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENODEV;
+ goto vdev_alloc_failed;
+ }
+
+ avicam->vdev = vdev;
+
+ avicam->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&avicam->vdev->entity, 1, &avicam->pad, 0);
+ if(ret < 0) {
+ dprintk(avicam, "Error initializing media entity\n");
+ goto media_init_failed;
+ }
+
+ mutex_init(&avicam->lock);
+
+ strlcpy(vdev->name, dev_name(avicam->dev), sizeof(vdev->name));
+
+ vdev->parent = avicam->dev;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->fops = &avicam_fops;
+ vdev->ioctl_ops = &avicam_ioctl_ops;
+ vdev->release = &video_device_release;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->queue = &avicam->vb_vidq;
+ /* We handle the locking ourselves */
+ vdev->lock = NULL;
+
+ video_set_drvdata(vdev, avicam);
+
+#ifndef AVI_V4L2_ISP_CONTROL_HINT
+#define AVI_V4L2_ISP_CONTROL_HINT 0
+#endif
+ /* Register ctrl_handler for inheriting controls from sub-devices */
+ ret = v4l2_ctrl_handler_init(&avicam->ctrl_handler,
+ 16 + AVI_V4L2_ISP_CONTROL_HINT);
+ if (ret < 0)
+ goto init_ctrl_handler_failed;
+
+#ifdef CONFIG_AVICAM_USE_ISP
+ /* Add ISP chain controls to V4L2 device */
+ ret = avi_v4l2_isp_init(&avicam->ctrl_isp, &avicam->ctrl_handler);
+ if (ret)
+ goto init_isp_failed;
+#endif
+
+ avicam->vdev->ctrl_handler = &avicam->ctrl_handler;
+
+ vdev->v4l2_dev = avicam->v4l2_dev;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto video_register_failed;
+
+ ret = avicam_init_subdevs(avicam);
+ if (ret)
+ goto subdev_register_failed;
+
+ ret = avicam_stats_init(avicam);
+ if (ret)
+ goto init_stats_failed;
+
+ return 0;
+
+ init_stats_failed:
+ avicam_destroy_subdevs(avicam);
+ subdev_register_failed:
+ video_register_failed:
+#ifdef CONFIG_AVICAM_USE_ISP
+ avi_v4l2_isp_free(avicam->ctrl_isp);
+ init_isp_failed:
+#endif
+ v4l2_ctrl_handler_free(&avicam->ctrl_handler);
+ init_ctrl_handler_failed:
+ media_entity_cleanup(&avicam->vdev->entity);
+ media_init_failed:
+ /* unregister calls video_device_release */
+ video_unregister_device(avicam->vdev);
+ vdev_alloc_failed:
+ avi_v4l2_put_device(avicam->v4l2_dev);
+ no_v4l2_dev:
+ return ret;
+}
+
+static int avicam_register_v4l2_subdevs(void)
+{
+ struct v4l2_device *v4l2_dev;
+ int ret;
+
+ /* Make sure the AVI has been succesfully probed */
+ if (!avi_probed())
+ return -ENODEV;
+
+ pr_info("registering avicam V4L2 subdevice nodes.\n");
+
+ v4l2_dev = avi_v4l2_get_device();
+
+ if (!v4l2_dev) {
+ pr_err("no v4l2 device available\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(v4l2_dev);
+ if (ret)
+ pr_err("v4l2_device_register_subdev_nodes failed [%d]\n", ret);
+
+ avi_v4l2_put_device(v4l2_dev);
+ return ret;
+}
+
+static void __devexit avicam_v4l2_destroy(struct avicam *avicam)
+{
+ avicam_stats_destroy(avicam);
+ avicam_destroy_subdevs(avicam);
+#ifdef CONFIG_AVICAM_USE_ISP
+ avi_v4l2_isp_free(avicam->ctrl_isp);
+#endif
+ v4l2_ctrl_handler_free(&avicam->ctrl_handler);
+ video_unregister_device(avicam->vdev);
+ avi_v4l2_put_device(avicam->v4l2_dev);
+}
+
+static int __devinit avicam_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct avicam *avicam;
+ struct vb2_dc_conf *alloc_ctx;
+
+ avicam = kzalloc(sizeof(*avicam), GFP_KERNEL);
+ if (avicam == NULL) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ avicam->dev = &pdev->dev;
+
+ avicam->pdata = dev_get_platdata(&pdev->dev);
+ if (!avicam->pdata) {
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ avicam->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(avicam->pctl)) {
+ ret = PTR_ERR(avicam->pctl);
+ goto pinctrl_failed;
+ }
+
+ spin_lock_init(&avicam->vbq_lock);
+ INIT_LIST_HEAD(&avicam->bufqueue);
+
+ ret = avicam_v4l2_init(avicam);
+ if(ret)
+ goto v4l2_init_failed;
+
+ platform_set_drvdata(pdev, avicam);
+ dev_set_drvdata(&pdev->dev, avicam);
+
+ dev_info(avicam->dev,
+ "video device successfuly registered as %s\n",
+ video_device_node_name(avicam->vdev));
+
+ alloc_ctx = (struct vb2_dc_conf *)vb2_dma_contig_init_ctx(&pdev->dev);
+ alloc_ctx->cache_flags = avicam->pdata->vb2_cache_flags;
+
+ avicam->alloc_ctx = alloc_ctx;
+ return 0;
+
+ v4l2_init_failed:
+ pinctrl_put(avicam->pctl);
+ pinctrl_failed:
+ no_pdata:
+ kfree(avicam);
+ alloc_failed:
+
+ return ret;
+}
+
+static int __devexit avicam_remove(struct platform_device *pdev)
+{
+ struct avicam *avicam = platform_get_drvdata(pdev);
+
+ if (!avicam)
+ return -ENODEV;
+
+ avicam_v4l2_destroy(avicam);
+ pinctrl_put(avicam->pctl);
+ vb2_dma_contig_cleanup_ctx(avicam->alloc_ctx);
+ kfree(avicam);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int avicam_resume(struct device *dev)
+{
+ struct avicam *avicam = dev_get_drvdata(dev);
+
+ if (!avicam)
+ return -ENODEV;
+
+ if (!avicam->streaming)
+ /* Nothing to do */
+ return 0;
+
+ return avi_capture_resume(&avicam->capture_ctx);
+}
+
+static struct dev_pm_ops avicam_dev_pm_ops = {
+ .resume = &avicam_resume,
+};
+
+static struct platform_driver avicam_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &avicam_dev_pm_ops,
+ },
+ .probe = avicam_probe,
+ .remove = __devexit_p(avicam_remove),
+};
+
+static int __init avicam_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&avicam_driver);
+
+ if (ret)
+ return ret;
+
+ return avicam_register_v4l2_subdevs();
+}
+
+static void __exit avicam_exit(void)
+{
+ platform_driver_unregister(&avicam_driver);
+}
+
+module_init(avicam_init);
+module_exit(avicam_exit);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Host driver for P7 Advanced Video Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/media/video/avicam_v4l2.h b/drivers/parrot/media/video/avicam_v4l2.h
new file mode 100644
index 00000000000000..e3af761380378b
--- /dev/null
+++ b/drivers/parrot/media/video/avicam_v4l2.h
@@ -0,0 +1,143 @@
+#ifndef _AVICAM_V4L2_H_
+#define _AVICAM_V4L2_H_
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+
+#include "avi_v4l2.h"
+#include "avi_capture.h"
+#include "avicam.h"
+
+#ifdef CONFIG_AVICAM_USE_ISP
+#include "avi_v4l2_isp.h"
+#endif
+
+#ifdef DEBUG
+#define dprintk(avicam, format_, args...) \
+ dev_dbg((avicam)->dev, "%s: " format_, __func__, ##args)
+
+#define dprintk_l(avicam, format_, args...) do { \
+ if (printk_ratelimit()) \
+ dprintk(avicam, format_, ##args); \
+ } while (0)
+
+#else /* DEBUG */
+#define dprintk(avicam_, format_, args...) (void)avicam_
+#define dprintk_l(avicam_, format_, args...) (void)avicam_
+#endif /* DEBUG */
+
+#define DRIVER_NAME "avicam"
+#define DRIVER_VERSION KERNEL_VERSION(0, 2, 0)
+
+struct avicam_vbuf
+{
+ /* *must* be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+ /* For interlaced streams we capture two fields in a single frame */
+ int last_capture;
+};
+
+/* Context used when capturing bayer stats */
+struct avicam_stats
+{
+ spinlock_t vbq_lock;
+ struct list_head bufqueue;
+
+ struct video_device *vdev;
+ struct vb2_queue vb_vidq;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ /* serialization lock */
+ struct mutex lock;
+ int streaming;
+
+ int use_count;
+};
+
+struct avicam
+{
+ struct device *dev;
+ struct avi_capture_context capture_ctx;
+ struct avicam_platform_data *pdata;
+
+ spinlock_t vbq_lock;
+ struct list_head bufqueue;
+
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct video_device *vdev;
+ struct vb2_queue vb_vidq;
+ void *alloc_ctx;
+ /* serialization lock */
+ struct mutex lock;
+
+ struct media_pad pad;
+ struct media_pipeline pipe;
+
+ int use_count;
+
+ int streaming;
+
+ unsigned frame_count;
+
+ /* Output format */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct v4l2_rect compose;
+
+ struct avi_segment_format segment_format;
+ unsigned plane0_size;
+ unsigned plane1_size;
+
+ /* When storing interlaced video we have to store both fields in the
+ * same buffer */
+ struct avicam_vbuf *bottom_vbuf;
+
+ struct avicam_stats stats;
+
+ struct platform_device *dummy_pdev;
+
+ struct pinctrl *pctl;
+
+ /* Integrated V4L2 controls for ISP chain */
+#ifdef CONFIG_AVICAM_USE_ISP
+ struct avi_v4l2_isp *ctrl_isp;
+#endif
+};
+
+static inline int avicam_instance(struct avicam *avicam)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(avicam->dev, struct platform_device, dev);
+
+ return pdev->id;
+}
+
+static inline struct avicam_vbuf* to_avicam_vbuf(struct vb2_buffer* vb2)
+{
+ return container_of(vb2, struct avicam_vbuf, vb);
+}
+
+extern void avicam_stats_done(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f);
+
+extern void avicam_stats_next(struct avi_capture_context *ctx,
+ struct avi_dma_buffer *frame,
+ enum avi_field f);
+
+
+
+extern int __devinit avicam_stats_init(struct avicam *avicam);
+extern void __devexit avicam_stats_destroy(struct avicam *avicam);
+
+#endif /* _AVICAM_V4L2_H_ */
diff --git a/drivers/parrot/media/video/lepton.c b/drivers/parrot/media/video/lepton.c
new file mode 100644
index 00000000000000..c26bf3aa1f4784
--- /dev/null
+++ b/drivers/parrot/media/video/lepton.c
@@ -0,0 +1,1076 @@
+/*
+ * Lepton camera driver
+ *
+ * Copyright 2014 Parrot S.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "lepton.h"
+
+static int zero_copy = 0;
+module_param(zero_copy, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+MODULE_PARM_DESC(zero_copy, "hint to use zero copy mode if possible");
+
+static int check_crc = 1;
+module_param(check_crc, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+MODULE_PARM_DESC(check_crc, "compute crc on every packet");
+
+MODULE_DESCRIPTION("Lepton capture driver");
+MODULE_AUTHOR("Damien Riegel <damien.riegel.ext@parrot.com>");
+MODULE_LICENSE("GPL");
+
+#define LEPTON_DRV_NAME "lepton"
+
+#define LEPTON_SPI_XFER_SPEED (20 * 1000 * 1000)
+#define LEPTON_HEADER_LENGTH (4)
+#define LEPTON_DISCARD_PKT (0x0FF0)
+#define LEPTON_CRC_POLY ((u32) 0x11021)
+
+struct lepton {
+ struct v4l2_device v4l2_dev;
+
+ struct video_device *vdev;
+ struct v4l2_subdev *subdev;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ void *alloc_ctx;
+
+#define LEPTON_NR_SPI_MESSAGES (2)
+ struct spi_device *spi;
+ struct spi_message spi_msg[LEPTON_NR_SPI_MESSAGES];
+ struct spi_transfer *spi_xfers;
+ int n_xfers;
+
+ void *virt_addr;
+ dma_addr_t dma_addr;
+ size_t buf_size;
+
+ struct vb2_queue vb2_queue;
+ const struct lepton_format *fmt;
+ struct v4l2_pix_format pix_fmt;
+
+ struct list_head vb_queued;
+#define LEPTON_SEND_NEXT_QUEUED_VB (0)
+#define LEPTON_STOP_TRANSFER (1)
+ unsigned long flags;
+
+ struct mutex mutex;
+ spinlock_t slock;
+};
+
+struct lepton_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ unsigned int cnt_fail;
+};
+
+/*
+ * format description
+ * zero_copy: this format can be used in zero copy mode. To compute this flag
+ * use this formula.
+ * line_per_pkt == 1 && (LEPTON_HEADER_LENGTH % (fmt->depth >> 3)) == 0
+ *
+ * Basically, it means that each line can be prefixed by its header and that
+ * header can be consider as valid pixel(s). If any of these criterion is not
+ * met, it means that we can't set correct values in the
+ * v4l2_pix_format.bytesperline and/or width fields.
+ */
+struct lepton_format {
+ char desc[32];
+ u32 v4l2_fmt;
+ u32 mbus_fmt;
+ int depth;
+ int line_per_pkt;
+ bool zero_copy;
+};
+
+static struct lepton_format formats[] = {
+ {
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ .mbus_fmt = V4L2_MBUS_FMT_Y8_1X8,
+ .desc = "8bit Greyscale",
+ .depth = 8,
+ .line_per_pkt = 2,
+ .zero_copy = false,
+ },
+ {
+ .v4l2_fmt = V4L2_PIX_FMT_Y16,
+ .mbus_fmt = V4L2_MBUS_FMT_Y14_2X8_PADHI_LE,
+ .desc = "14bit Greyscale",
+ .depth = 16,
+ .line_per_pkt = 1,
+ .zero_copy = true,
+ },
+ {
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ .mbus_fmt = V4L2_MBUS_FMT_RGB888_1X24,
+ .desc = "24bit RGB",
+ .depth = 24,
+ .line_per_pkt = 1,
+ .zero_copy = false,
+ },
+};
+
+#define LEPTON_DEBUG
+#if defined(LEPTON_DEBUG)
+#define dprintk(_dev, fmt_, args...) \
+ dev_dbg((_dev)->v4l2_dev.dev, "%s: " fmt_, __func__, ##args)
+#else
+#define dprintk(_dev, fmt_, args...)
+#endif
+
+/*************************
+ * Video buffer operations
+ *************************/
+static u16 calc_crc16(u8 *buf, size_t len)
+{
+ int i, j;
+ u16 word, crc = 0;
+
+ for(i = 0; i < len; i += 2) {
+ word = buf[i] << 8 | buf[i + 1];
+
+ crc ^= word;
+ for(j = sizeof(crc) * 8; j; j--) {
+ if (crc & 0x8000)
+ crc = (crc << 1) ^ LEPTON_CRC_POLY;
+ else
+ crc = (crc << 1);
+ }
+ }
+
+ return crc;
+}
+
+static bool lepton_check_crc(u8 *buf, size_t len)
+{
+ u16 expected_crc, calc_crc;
+ u8 save[LEPTON_HEADER_LENGTH];
+
+ /* this is very unlikely but let's prevent buffer overflow */
+ if (len <= LEPTON_HEADER_LENGTH)
+ return false;
+
+ expected_crc = buf[2] << 8 | buf[3];
+
+ /*
+ * CRC is computed on entire packet, including ID and CRC fields...
+ * For this computation, the T-number (some bits of the frame number)
+ * and the CRC are considered to be 0;
+ */
+ memcpy(save, buf, LEPTON_HEADER_LENGTH);
+ buf[0] &= 0x0F;
+ buf[2] = buf[3] = 0;
+
+ calc_crc = calc_crc16(buf, len);
+
+ memcpy(buf, save, LEPTON_HEADER_LENGTH);
+
+ return calc_crc == expected_crc;
+}
+
+static inline bool format_can_zero_copy(const struct lepton_format *lep_fmt)
+{
+ return zero_copy && lep_fmt->zero_copy;
+}
+
+static inline size_t spi_get_packet_length(struct lepton *lep)
+{
+ size_t len;
+
+ /*
+ * If the format can be used in zero copy, then the
+ * header length is already included in the width field
+ * because it is considered to be valid data
+ */
+ len = lep->pix_fmt.width * (lep->fmt->depth >> 3) *
+ lep->fmt->line_per_pkt;
+ if (!format_can_zero_copy(lep->fmt))
+ len += LEPTON_HEADER_LENGTH;
+
+ return len;
+}
+
+static inline size_t spi_align(size_t len)
+{
+ /*
+ * For SPI transfers, we must fetch one packet without pauses in
+ * the SPI clk. To follow this constraint, the P7 SPI master driver
+ * imposes that transfer starts aligned dma_get_cache_alignment().
+ * Wrap this constraint in this helper
+ */
+ return roundup(len, dma_get_cache_alignment());
+}
+
+static void spi_fill_xfers(struct lepton *lep, void *virt, dma_addr_t dma)
+{
+ int pkt_len = spi_get_packet_length(lep);
+ int align_len = spi_align(pkt_len);
+ struct spi_message *msg;
+ struct spi_transfer *xfer;
+ int i;
+
+ for(i = 0; i < LEPTON_NR_SPI_MESSAGES; i++) {
+ msg = &lep->spi_msg[i];
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->len = pkt_len;
+ xfer->rx_buf = virt;
+ xfer->rx_dma = dma;
+
+ virt += align_len;
+ dma += align_len;
+ }
+ }
+}
+
+static int spi_async_first_line(struct lepton *lep, struct lepton_buffer *b)
+{
+ struct spi_message *msg = &lep->spi_msg[0];
+
+ dprintk(lep, "buffer [%p] async first\n", b);
+ msg->context = b;
+ return spi_async(lep->spi, msg);
+}
+
+static int spi_async_remaining_lines(struct lepton *lep, struct lepton_buffer *b)
+{
+ struct spi_message *msg = &lep->spi_msg[1];
+
+ dprintk(lep, "buffer [%p] async others\n", b);
+ msg->context = b;
+ return spi_async(lep->spi, msg);
+}
+
+static void spi_first_line_complete(void *context)
+{
+ struct lepton_buffer *buf = context;
+ struct lepton *lep = vb2_get_drv_priv(buf->vb.vb2_queue);
+ struct spi_transfer *xfer = lep->spi_xfers;
+ u8 *pkt = (u8*)xfer->rx_buf;
+ u16 pkt_nr = (pkt[0] & 0xF) << 8 | pkt[1];
+ int err;
+
+ if (test_bit(LEPTON_STOP_TRANSFER, &lep->flags)) {
+ goto mark_buffer_error;
+ }
+
+ if (pkt_nr >= LEPTON_DISCARD_PKT ||
+ !lepton_check_crc(pkt, xfer->len)) {
+ buf->cnt_fail++;
+ if (buf->cnt_fail > 1000) {
+ printk(KERN_ERR "TOO MUCH FAIL, RESET SENSOR\n");
+ buf->cnt_fail = 0;
+ }
+
+ spi_async_first_line(lep, buf);
+ return;
+ }
+
+ err = spi_async_remaining_lines(lep, buf);
+ if (!err)
+ return;
+
+mark_buffer_error:
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(lep, "buffer [%p] done\n", buf);
+}
+
+static void copy_to_vb_buffer(struct lepton *lep, struct lepton_buffer *buf)
+{
+ if (!format_can_zero_copy(lep->fmt)) {
+ void *dst = vb2_plane_vaddr(&buf->vb, 0);
+ void *src = lep->virt_addr;
+ int pkt_len = spi_get_packet_length(lep);
+ int align_len = spi_align(pkt_len);
+ int copy_len = pkt_len - LEPTON_HEADER_LENGTH;
+ int i;
+
+ for (i = 0; i < lep->n_xfers; i++) {
+ memcpy(dst, src + LEPTON_HEADER_LENGTH, copy_len);
+ src += align_len;
+ dst += copy_len;
+ }
+ }
+
+ return;
+}
+
+static int capture_frame(struct lepton *lep, struct lepton_buffer *buf)
+{
+ if (format_can_zero_copy(lep->fmt)) {
+ struct vb2_buffer *vb_buf = &buf->vb;
+
+ /*
+ * If we support zero copy, receive sensor data directly in
+ * vb2_buffers. Prepare spi_transfers with the buffer addresses
+ * (dma and cpu).
+ */
+ spi_fill_xfers(lep,
+ vb2_plane_vaddr(vb_buf, 0),
+ vb2_dma_contig_plane_dma_addr(vb_buf, 0));
+ }
+
+ return spi_async_first_line(lep, buf);
+}
+
+static void spi_frame_complete(void *context)
+{
+ struct lepton_buffer *next = NULL, *buf = context;
+ struct lepton *lep = vb2_get_drv_priv(buf->vb.vb2_queue);
+ int status = VB2_BUF_STATE_DONE;
+
+ spin_lock(&lep->slock);
+ /*
+ * Try to get the next buffer to be sent. If there are no pending
+ * buffers, set a flag to send the next queued one (in buf_queue).
+ * Don't set this flag if we must stop capturing.
+ */
+ if (!list_empty(&lep->vb_queued)) {
+ next = list_first_entry(&lep->vb_queued,
+ struct lepton_buffer, list);
+ list_del(&next->list);
+ }
+ else if (!test_bit(LEPTON_STOP_TRANSFER, &lep->flags)) {
+ set_bit(LEPTON_SEND_NEXT_QUEUED_VB, &lep->flags);
+ dprintk(lep, "setting send next queued vb flag\n");
+ }
+ spin_unlock(&lep->slock);
+
+ if (check_crc) {
+ struct spi_transfer *xfer;
+ bool crc;
+ int i;
+
+ for(i = 1; i < lep->n_xfers; i++) {
+ xfer = lep->spi_xfers + i;
+
+ crc = lepton_check_crc(xfer->rx_buf, xfer->len);
+ if (!crc) {
+ status = VB2_BUF_STATE_ERROR;
+ break;
+ }
+ }
+ }
+
+ copy_to_vb_buffer(lep, buf);
+ vb2_buffer_done(&buf->vb, status);
+
+ if (next)
+ capture_frame(lep, next);
+}
+
+static int spi_init_messages(struct lepton *lep)
+{
+ struct spi_message *msg;
+ struct spi_transfer *xfers;
+ int n_xfers = lep->pix_fmt.height / lep->fmt->line_per_pkt;
+
+ if (lep->spi_xfers)
+ return 0;
+
+ xfers = kcalloc(n_xfers, sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
+
+ lep->spi_xfers = xfers;
+ lep->n_xfers = n_xfers;
+
+ msg = &lep->spi_msg[0];
+ spi_message_init(msg);
+ msg->complete = spi_first_line_complete;
+ msg->is_dma_mapped = true;
+ xfers->speed_hz = LEPTON_SPI_XFER_SPEED;
+ spi_message_add_tail(xfers, msg);
+
+ xfers++;
+
+ msg = &lep->spi_msg[1];
+ spi_message_init(msg);
+ msg->complete = spi_frame_complete;
+ msg->is_dma_mapped = true;
+ for(; xfers != lep->spi_xfers + n_xfers; xfers++) {
+ xfers->delay_usecs = 3;
+ xfers->speed_hz = LEPTON_SPI_XFER_SPEED;
+ spi_message_add_tail(xfers, msg);
+ }
+
+ return 0;
+}
+
+static void spi_free_messages(struct lepton *lep)
+{
+ kfree(lep->spi_xfers);
+ lep->spi_xfers = NULL;
+}
+
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct lepton *lep = vb2_get_drv_priv(q);
+ unsigned int size;
+
+ size = lep->pix_fmt.sizeimage;
+
+ if (*num_buffers == 0)
+ *num_buffers = 2;
+ *num_planes = 1;
+
+ sizes[0] = size;
+ alloc_ctxs[0] = lep->alloc_ctx;
+
+ dprintk(lep, "count=%d size=%u\n",
+ *num_buffers, size);
+
+ return 0;
+}
+
+static void wait_prepare(struct vb2_queue *q)
+{
+ struct lepton *lep = vb2_get_drv_priv(q);
+ mutex_unlock(&lep->mutex);
+}
+
+static void wait_finish(struct vb2_queue *q)
+{
+ struct lepton *lep = vb2_get_drv_priv(q);
+ mutex_lock(&lep->mutex);
+}
+
+static int buf_init(struct vb2_buffer *vb)
+{
+ struct lepton *lep = vb2_get_drv_priv(vb->vb2_queue);
+ dprintk(lep, "buf init\n");
+ return 0;
+}
+
+static int buf_prepare(struct vb2_buffer *vb)
+{
+ struct lepton *lep = vb2_get_drv_priv(vb->vb2_queue);
+ struct lepton_buffer *buf = container_of(vb, struct lepton_buffer, vb);
+ unsigned int size = lep->pix_fmt.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(lep, "data will not fit into plane (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ buf->cnt_fail = 0;
+
+ dprintk(lep, "buffer [%p] prepared\n", buf);
+
+ return 0;
+}
+
+static int buf_finish(struct vb2_buffer *vb)
+{
+ struct lepton *lep = vb2_get_drv_priv(vb->vb2_queue);
+ dprintk(lep, "buf finish\n");
+ return 0;
+}
+
+static void buf_cleanup(struct vb2_buffer *vb)
+{
+ struct lepton *lep = vb2_get_drv_priv(vb->vb2_queue);
+ dprintk(lep, "buf cleanup\n");
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct lepton *lep = vb2_get_drv_priv(q);
+ int err;
+
+ err = spi_init_messages(lep);
+ if (err)
+ return err;
+
+ if (!format_can_zero_copy(lep->fmt)) {
+ /*
+ * If the format is not compatible with zero copy,
+ * we must use an intermediate buffer to receive data and
+ * then copy them in the vb2_buffer
+ */
+ void *cpu;
+ dma_addr_t dma;
+ size_t size = spi_get_packet_length(lep);
+
+ size = spi_align(size);
+ size *= lep->n_xfers;
+
+ cpu = dma_alloc_coherent(NULL, size, &dma, GFP_DMA);
+ if (!cpu) {
+ spi_free_messages(lep);
+ return -ENOMEM;
+ }
+
+ lep->virt_addr = cpu;
+ lep->dma_addr = dma;
+ lep->buf_size = size;
+
+ spi_fill_xfers(lep, cpu, dma);
+ }
+
+ spin_lock(&lep->slock);
+ lep->flags = 0;
+ if (count == 0) {
+ set_bit(LEPTON_SEND_NEXT_QUEUED_VB, &lep->flags);
+ dprintk(lep, "setting next queue vb flag, f 0x%.8lX\n", lep->flags);
+ }
+ else {
+ struct lepton_buffer *buf;
+ buf = list_first_entry(&lep->vb_queued,
+ struct lepton_buffer, list);
+
+ list_del(&buf->list);
+ capture_frame(lep, buf);
+ dprintk(lep, "start capture frame\n");
+ }
+ spin_unlock(&lep->slock);
+
+ dprintk(lep, "streaming started\n");
+
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct lepton *lep = vb2_get_drv_priv(q);
+
+ set_bit(LEPTON_STOP_TRANSFER, &lep->flags);
+
+ spin_lock(&lep->slock);
+ while (!list_empty(&lep->vb_queued)) {
+ struct lepton_buffer *buf;
+
+ buf = list_first_entry(&lep->vb_queued,
+ struct lepton_buffer, list);
+ list_del(&buf->list);
+
+ spin_unlock(&lep->slock);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(lep, "buffer [%p] done\n", buf);
+ spin_lock(&lep->slock);
+ }
+ spin_unlock(&lep->slock);
+
+ /*
+ * wait for the buffer being filled
+ * by the SPI to be returned
+ */
+ vb2_wait_for_all_buffers(q);
+ if (!format_can_zero_copy(lep->fmt)) {
+ dma_free_coherent(NULL, lep->buf_size,
+ lep->virt_addr, lep->dma_addr);
+ }
+ lep->flags = 0;
+ spi_free_messages(lep);
+ dprintk(lep, "streaming stopped\n");
+
+ return 0;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct lepton *lep = vb2_get_drv_priv(vb->vb2_queue);
+ struct lepton_buffer *buf = container_of(vb, struct lepton_buffer, vb);
+
+ dprintk(lep, "queue buf [%p]\n", buf);
+
+ spin_lock(&lep->slock);
+ if (test_bit(LEPTON_SEND_NEXT_QUEUED_VB, &lep->flags)) {
+ clear_bit(LEPTON_SEND_NEXT_QUEUED_VB, &lep->flags);
+ capture_frame(lep, buf);
+ dprintk(lep, "start capture frame, f 0x%.8lX\n", lep->flags);
+ }
+ else {
+ list_add_tail(&buf->list, &lep->vb_queued);
+ }
+ spin_unlock(&lep->slock);
+}
+
+static struct vb2_ops lepton_videobuf_ops = {
+ .queue_setup = queue_setup,
+ .wait_prepare = wait_prepare,
+ .wait_finish = wait_finish,
+ .buf_init = buf_init,
+ .buf_prepare = buf_prepare,
+ .buf_finish = buf_finish,
+ .buf_cleanup = buf_cleanup,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .buf_queue = buf_queue,
+};
+
+/***********************
+ * lepton file operations
+ ***********************/
+static const struct v4l2_file_operations lepton_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*************
+ * V4L2 IOCTLs
+ *************/
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct lepton *lep = video_drvdata(file);
+
+ strcpy(cap->driver, LEPTON_DRV_NAME);
+ strcpy(cap->card, LEPTON_DRV_NAME);
+ strlcpy(cap->bus_info, lep->v4l2_dev.name, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ const struct lepton_format *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+
+ f->flags = 0;
+ f->pixelformat = fmt->v4l2_fmt;
+ strlcpy(f->description, fmt->desc, sizeof(f->description));
+
+ return 0;
+}
+
+static const struct lepton_format* get_format(struct v4l2_format *f)
+{
+ struct lepton_format *fmt;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->v4l2_fmt == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ i = 0; /* default to first format */
+
+ return &formats[i];
+}
+
+static const struct lepton_format* get_format_from_mbus(u32 mbus)
+{
+ struct lepton_format *fmt;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->mbus_fmt == mbus)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ return NULL;
+
+ return &formats[i];
+}
+
+static void lepton_fill_pix_format(const struct lepton_format *lepfmt,
+ struct v4l2_pix_format *pixfmt,
+ const struct v4l2_mbus_framefmt *mbusfmt)
+{
+ v4l2_fill_pix_format(pixfmt, mbusfmt);
+ pixfmt->pixelformat = lepfmt->v4l2_fmt;
+
+ if (format_can_zero_copy(lepfmt)) {
+ /*
+ * In zero copy mode, we consider the header as being part of
+ * the data because there is no way to cut it out without
+ * memcpy. The V4L2 layer doesn't support left padding.
+ */
+ pixfmt->width += LEPTON_HEADER_LENGTH / (lepfmt->depth >> 3);
+ pixfmt->bytesperline =
+ spi_align(pixfmt->width * (lepfmt->depth >> 3));
+ }
+ else {
+ pixfmt->bytesperline =
+ pixfmt->width * (lepfmt->depth >> 3);
+ }
+
+ pixfmt->sizeimage =
+ pixfmt->height * pixfmt->bytesperline;
+}
+
+static int get_pixfmt_from_sd(struct lepton *lep, struct v4l2_pix_format *pixfmt)
+{
+ struct v4l2_mbus_framefmt sub_fmt;
+ const struct lepton_format *fmt;
+ int err;
+
+ err = v4l2_subdev_call(lep->subdev, video, g_mbus_fmt, &sub_fmt);
+ if (err)
+ return err;
+
+ fmt = get_format_from_mbus(sub_fmt.code);
+ if (!fmt)
+ return -EINVAL;
+
+ lepton_fill_pix_format(fmt, pixfmt, &sub_fmt);
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct lepton *lep = video_drvdata(file);
+ struct v4l2_pix_format pixfmt = {0};
+ int err;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ err = get_pixfmt_from_sd(lep, &pixfmt);
+ if (err)
+ return err;
+
+ f->fmt.pix = lep->pix_fmt = pixfmt;
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct lepton *lep = video_drvdata(file);
+ const struct lepton_format *fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_mbus_framefmt sub_fmt;
+ int err;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ fmt = get_format(f);
+
+ v4l2_fill_mbus_format(&sub_fmt, pixfmt, fmt->mbus_fmt);
+ err = v4l2_subdev_call(lep->subdev, video, try_mbus_fmt, &sub_fmt);
+ if (err)
+ return err;
+
+ if (sub_fmt.code != fmt->mbus_fmt) {
+ /* subdevice doesn't want this format, damn... */
+ fmt = get_format_from_mbus(sub_fmt.code);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ lepton_fill_pix_format(fmt, pixfmt, &sub_fmt);
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct lepton *lep = video_drvdata(file);
+ struct vb2_queue *q = &lep->vb2_queue;
+ struct v4l2_mbus_framefmt sub_fmt = {0};
+ const struct lepton_format *lep_fmt;
+ int err;
+
+ if (vb2_is_streaming(q))
+ return -EBUSY;
+
+ err = vidioc_try_fmt_vid_cap(file, fh, f);
+ if (err < 0)
+ return err;
+
+ lep_fmt = get_format(f);
+ v4l2_fill_mbus_format(&sub_fmt, &f->fmt.pix, lep_fmt->mbus_fmt);
+ err = v4l2_subdev_call(lep->subdev, video, s_mbus_fmt, &sub_fmt);
+ if (err)
+ return err;
+
+ if (sub_fmt.code != lep_fmt->mbus_fmt) {
+ lep_fmt = get_format_from_mbus(sub_fmt.code);
+ if (!lep_fmt)
+ return -EINVAL;
+ }
+
+ lepton_fill_pix_format(lep_fmt, &f->fmt.pix, &sub_fmt);
+
+ lep->fmt = lep_fmt;
+ lep->pix_fmt = f->fmt.pix;
+ return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_UNKNOWN;
+ strlcpy(inp->name, "Camera", sizeof(inp->name));
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct lepton *lep = video_drvdata(file);
+ struct v4l2_pix_format fmt;
+ int err;
+
+ /*
+ * starting streaming on the subdev locks the telemetry
+ * control, preventing any change from happening. We can
+ * then check that subdev and device formats are the same
+ */
+ err = v4l2_subdev_call(lep->subdev, video, s_stream, 1);
+ if (err)
+ goto error;
+
+ err = get_pixfmt_from_sd(lep, &fmt);
+ if (err)
+ goto stop_stream;
+
+ if (fmt.width != lep->pix_fmt.width ||
+ fmt.height != lep->pix_fmt.height ||
+ fmt.pixelformat != lep->pix_fmt.pixelformat ||
+ fmt.field != lep->pix_fmt.field ||
+ fmt.bytesperline != lep->pix_fmt.bytesperline ||
+ fmt.sizeimage != lep->pix_fmt.sizeimage ||
+ fmt.colorspace != lep->pix_fmt.colorspace) {
+ err = -EPIPE;
+ goto stop_stream;
+ }
+
+ err = vb2_ioctl_streamon(file, priv, i);
+ if (err)
+ goto stop_stream;
+
+ return 0;
+
+stop_stream:
+ v4l2_subdev_call(lep->subdev, video, s_stream, 0);
+error:
+ return err;
+}
+
+int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ struct lepton *lep = video_drvdata(file);
+
+ v4l2_subdev_call(lep->subdev, video, s_stream, 0);
+ return vb2_ioctl_streamoff(file, fh, i);
+}
+
+static const struct v4l2_ioctl_ops lepton_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ //.vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+/**************************
+ * module setup and cleanup
+ **************************/
+static int lepton_init_ctrl(struct lepton *lep)
+{
+ lep->v4l2_dev.ctrl_handler = &lep->ctrl_handler;
+ return v4l2_ctrl_handler_init(&lep->ctrl_handler, 0);
+}
+
+static void lepton_free_ctrl(struct lepton *lep)
+{
+ v4l2_ctrl_handler_free(&lep->ctrl_handler);
+}
+
+static int lepton_probe(struct spi_device *spi)
+{
+ struct lepton *lep;
+ struct video_device *vdev;
+ struct v4l2_subdev *subdev;
+ struct vb2_queue *vb2_q;
+ struct lepton_platform_data *data;
+ struct i2c_adapter *adapter;
+ struct vb2_dc_conf *alloc_ctx;
+ int err;
+
+ data = spi->dev.platform_data;
+ if (!data)
+ return -EINVAL;
+
+ lep = kzalloc(sizeof(struct lepton), GFP_KERNEL);
+ if (!lep)
+ return -ENOMEM;
+
+ mutex_init(&lep->mutex);
+ spin_lock_init(&lep->slock);
+ INIT_LIST_HEAD(&lep->vb_queued);
+
+ err = -ENOMEM;
+ alloc_ctx = (struct vb2_dc_conf *)vb2_dma_contig_init_ctx(&spi->dev);
+ if (!alloc_ctx)
+ goto free_lep;
+
+ alloc_ctx->cache_flags = data->vb2_cache_flags;
+
+ lep->alloc_ctx = alloc_ctx;
+
+ err = lepton_init_ctrl(lep);
+ if (err)
+ goto free_alloc_ctx;
+
+ spi_set_drvdata(spi, lep);
+ err = v4l2_device_register(&spi->dev, &lep->v4l2_dev);
+ if (err)
+ goto free_ctrl;
+
+ err = -ENODEV;
+ adapter = i2c_get_adapter(data->i2c_adapter_nr);
+ if (!adapter)
+ goto unregister_v4l2_dev;
+ subdev = v4l2_i2c_new_subdev_board(&lep->v4l2_dev, adapter,
+ &data->board_info, NULL);
+ if (!subdev)
+ goto put_i2c_adapter;
+ lep->subdev = subdev;
+
+ err = v4l2_device_register_subdev_nodes(&lep->v4l2_dev);
+ if (err)
+ goto put_i2c_adapter;
+
+ vb2_q = &lep->vb2_queue;
+ memset(vb2_q, 0, sizeof(*vb2_q));
+ vb2_q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vb2_q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; /* XXX: change that ? */
+ vb2_q->drv_priv = lep;
+ vb2_q->buf_struct_size = sizeof(struct lepton_buffer);
+ vb2_q->ops = &lepton_videobuf_ops;
+ vb2_q->mem_ops = &vb2_dma_contig_memops;
+ vb2_q->lock = &lep->mutex;
+ vb2_queue_init(vb2_q);
+
+ err = -ENOMEM;
+ vdev = video_device_alloc();
+ if (!vdev)
+ goto put_i2c_adapter;
+
+ strlcpy(vdev->name, "lepton", sizeof(vdev->name)); /* XXX: change that ? */
+ vdev->v4l2_dev = &lep->v4l2_dev;
+ vdev->release = video_device_release;
+ vdev->fops = &lepton_fops;
+ vdev->ioctl_ops = &lepton_ioctl_ops;
+ vdev->current_norm = V4L2_STD_UNKNOWN;
+ vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->lock = &lep->mutex;
+ vdev->queue = vb2_q;
+
+ video_set_drvdata(vdev, lep);
+
+ err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (err < 0)
+ goto release_vdev;
+
+ get_pixfmt_from_sd(lep, &lep->pix_fmt);
+
+ v4l2_info(&lep->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(vdev));
+
+ lep->vdev = vdev;
+ lep->spi = spi;
+ return 0;
+
+release_vdev:
+ video_device_release(vdev);
+put_i2c_adapter:
+ i2c_put_adapter(adapter);
+unregister_v4l2_dev:
+ v4l2_device_unregister(&lep->v4l2_dev);
+free_ctrl:
+ lepton_free_ctrl(lep);
+free_alloc_ctx:
+ vb2_dma_contig_cleanup_ctx(lep->alloc_ctx);
+free_lep:
+ kfree(lep);
+ return err;
+}
+
+static int lepton_remove(struct spi_device *spi)
+{
+ struct lepton *lep;
+
+ lep = spi_get_drvdata(spi);
+
+ video_unregister_device(lep->vdev);
+ v4l2_device_unregister(&lep->v4l2_dev);
+ lepton_free_ctrl(lep);
+ vb2_dma_contig_cleanup_ctx(lep->alloc_ctx);
+ kfree(lep);
+ return 0;
+}
+
+static struct spi_driver lepton_driver = {
+ .driver = {
+ .name = LEPTON_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = lepton_probe,
+ .remove = __devexit_p(lepton_remove),
+};
+
+module_spi_driver(lepton_driver);
+
diff --git a/drivers/parrot/media/video/lepton.h b/drivers/parrot/media/video/lepton.h
new file mode 100644
index 00000000000000..5c8d7d7d48762a
--- /dev/null
+++ b/drivers/parrot/media/video/lepton.h
@@ -0,0 +1,18 @@
+
+#ifndef _LEPTON_H_
+#define _LEPTON_H_
+
+#include <linux/i2c.h>
+#include <media/videobuf2-core.h>
+
+struct lepton_platform_data {
+ struct i2c_board_info board_info;
+ int i2c_adapter_nr;
+ enum vb2_cache_flags vb2_cache_flags;
+};
+
+struct lepton_i2c_platform_data {
+ int gpio_pwr;
+ int gpio_rst;
+};
+#endif /* _LEPTON_H_ */
diff --git a/drivers/parrot/media/video/lepton_dev.c b/drivers/parrot/media/video/lepton_dev.c
new file mode 100644
index 00000000000000..c85904cf1e40cb
--- /dev/null
+++ b/drivers/parrot/media/video/lepton_dev.c
@@ -0,0 +1,886 @@
+/*
+ * Lepton I2C driver
+ *
+ * Copyright 2014 Parrot S.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "lepton.h"
+
+MODULE_AUTHOR("Damien Riegel <damien.riegel.ext@parrot.com>");
+MODULE_DESCRIPTION("Lepton camera I2C controller");
+MODULE_LICENSE("GPL");
+
+#define LEPTON_STATUS_REG ((u16)0x0002)
+#define LEPTON_CMD_REG ((u16)0x0004)
+#define LEPTON_DATA_LENGTH_REG ((u16)0x0006)
+#define LEPTON_DATA0_REG ((u16)0x0008)
+
+#define LEPTON_CMD_MOD_AGC ((u16) (0x100))
+#define LEPTON_CMD_MOD_SYS ((u16) (0x200))
+#define LEPTON_CMD_MOD_VID ((u16) (0x300))
+#define LEPTON_CMD_MOD_OEM ((u16) (0x800 | (1 << 14)))
+#define LEPTON_CMD_MOD_RAD ((u16) (0xE00 | (1 << 14)))
+
+#define LEPTON_CMD_TYP_GET ((u16) 0x0)
+#define LEPTON_CMD_TYP_SET ((u16) 0x1)
+#define LEPTON_CMD_TYP_RUN ((u16) 0x2)
+
+#define LEPTON_CMD_OEM_SEL_FMT (LEPTON_CMD_MOD_OEM | 0x28)
+#define LEPTON_CMD_OEM_SEL_SRC (LEPTON_CMD_MOD_OEM | 0x2C)
+#define LEPTON_CMD_AGC_ENABLE (LEPTON_CMD_MOD_AGC | 0x00)
+#define LEPTON_CMD_AGC_POLICY (LEPTON_CMD_MOD_AGC | 0x04)
+#define LEPTON_CMD_SYS_TELE_ENABLE (LEPTON_CMD_MOD_SYS | 0x18)
+#define LEPTON_CMD_SYS_TELE_LOC (LEPTON_CMD_MOD_SYS | 0x1C)
+#define LEPTON_CMD_RAD_ENABLE (LEPTON_CMD_MOD_RAD | 0x10)
+#define LEPTON_CMD_RAD_SHUTTER_MODE (LEPTON_CMD_MOD_RAD | 0x24)
+#define LEPTON_CMD_RAD_SHUTTER_TEMP (LEPTON_CMD_MOD_RAD | 0x28)
+#define LEPTON_CMD_RAD_FFC (LEPTON_CMD_MOD_RAD | 0x2C)
+
+#define LEPTON_CMD_ID_SHIFT 2
+
+enum lepton_telemetry {
+ LEPTON_TELEMETRY_DISABLED,
+ LEPTON_TELEMETRY_FIRST,
+ LEPTON_TELEMETRY_LAST,
+};
+
+struct lepton_subdev {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct i2c_client *client;
+ struct v4l2_mbus_framefmt format;
+
+ enum lepton_telemetry tele;
+ struct v4l2_ctrl *tele_ctrl;
+
+ int gpio_pwr;
+ int gpio_rst;
+ int gpio_clken;
+};
+
+#define to_lepton(_sd) container_of(_sd, struct lepton_subdev, sd)
+
+struct lepton_subdev_format {
+ enum v4l2_mbus_pixelcode code;
+ /* conf is the value to be given to the sensor to switch
+ * to this format */
+ u32 conf;
+};
+
+#define LEPTON_IMAGE_WIDTH (80)
+#define LEPTON_IMAGE_HEIGHT (60)
+#define LEPTON_TELEMETRY_LINES (3)
+
+struct lepton_subdev_format formats[] = {
+ /* format in first position is the default format */
+ {
+ .code = V4L2_MBUS_FMT_Y14_2X8_PADHI_LE, /* RAW14 */
+ .conf = 0x7,
+ },
+ {
+ .code = V4L2_MBUS_FMT_Y8_1X8, /* RAW8 */
+ .conf = 0x0,
+ },
+ {
+ .code = V4L2_MBUS_FMT_RGB888_1X24, /* RGB24 */
+ .conf = 0x3,
+ },
+};
+
+u32 lepton_init_regs[] = {
+ LEPTON_CMD_OEM_SEL_FMT, 7, /* set format to RAW14 */
+ LEPTON_CMD_OEM_SEL_SRC, 1, /* set output source to COOKED */
+ LEPTON_CMD_SYS_TELE_ENABLE, 1, /* enable telemetry */
+ LEPTON_CMD_SYS_TELE_LOC, 1, /* set telemetry location to footer */
+};
+
+static u32 get_image_height(u32 mbus_code, bool telemetry)
+{
+ u32 h = LEPTON_IMAGE_HEIGHT;
+
+ if (telemetry) {
+ h += LEPTON_TELEMETRY_LINES;
+ if (mbus_code == V4L2_MBUS_FMT_Y8_1X8)
+ h += LEPTON_TELEMETRY_LINES;
+ }
+
+ return h;
+}
+
+/*************
+ * I2C helpers
+ *************/
+static int lepton_transfer(struct lepton_subdev *dev, u16 reg_addr,
+ u8 *data, size_t cnt, bool read)
+{
+ struct i2c_client *cl = dev->client;
+ struct i2c_msg msg[2];
+ u8 reg[] = { (reg_addr >> 8) & 0xFF, reg_addr & 0xFF };
+ int err;
+
+ msg[0].addr = cl->addr;
+ msg[0].flags = 0;
+ msg[0].len = (u16) sizeof(reg_addr);
+ msg[0].buf = (u8*) &reg;
+
+ msg[1].addr = cl->addr;
+ msg[1].flags = read ? I2C_M_RD : I2C_M_NOSTART;
+ msg[1].len = cnt;
+ msg[1].buf = data;
+
+ err = i2c_transfer(cl->adapter, msg, ARRAY_SIZE(msg));
+ if (err == ARRAY_SIZE(msg))
+ return 0;
+ else if (err < 0) {
+ return err;
+ }
+ else
+ return -EIO;
+}
+
+static int lepton_transfer16(struct lepton_subdev *dev, u16 reg_addr,
+ u16 *data, size_t cnt, bool read)
+{
+ u8 buff[cnt * sizeof(*data)];
+ int err, i;
+
+ if (!read) {
+ for (i = 0; i < cnt; i++) {
+ buff[2 * i] = (data[i] >> 8) & 0xFF;
+ buff[2 * i + 1] = data[i] & 0xFF;
+ }
+ }
+
+ err = lepton_transfer(dev, reg_addr, buff, cnt * sizeof(*data), read);
+ if (err)
+ return err;
+
+ if (read) {
+ for (i = 0; i < cnt; i++) {
+ data[i] = buff[2 * i] << 8 | buff[2 * i + 1];
+ }
+ }
+
+ return err;
+}
+
+static int lepton_poll_busy(struct lepton_subdev *dev, s8 *error_code)
+{
+ unsigned long timeout = jiffies + 2 * HZ;
+ u16 status;
+ int err;
+
+ do {
+ if (time_is_before_jiffies(timeout)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = lepton_transfer16(dev, LEPTON_STATUS_REG, &status, 1, true);
+ if (err)
+ break;
+
+
+ if (status & 0x1)
+ err = -EBUSY;
+ else if (error_code)
+ *error_code = status >> 8;
+
+ } while (err);
+
+ return err;
+}
+
+static int lepton_get(struct lepton_subdev *dev, u16 cmd, u16 *data, size_t cnt)
+{
+ int err;
+ s8 error_code;
+ u16 length = (u16) cnt;
+
+ err = lepton_poll_busy(dev, NULL);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_DATA_LENGTH_REG, &length, 1, false);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_CMD_REG, &cmd, 1, false);
+ if (err)
+ return err;
+
+ err = lepton_poll_busy(dev, &error_code);
+ if (err)
+ return err;
+ else if (error_code)
+ return error_code;
+
+ err = lepton_transfer16(dev, LEPTON_DATA0_REG, data, cnt, true);
+ return err;
+}
+
+static int lepton_get32(struct lepton_subdev *dev, u16 cmd, u32 *data, size_t cnt)
+{
+ u16 buff[2 * cnt];
+ int err, i;
+
+ err = lepton_get(dev, cmd, buff, 2 * cnt);
+ if (err)
+ return err;
+
+ for (i = 0; i < cnt; i++) {
+ data[i] = buff[2 * i + 1] << 16 | buff[2 * i];
+ }
+
+ return 0;
+}
+
+static int lepton_set(struct lepton_subdev *dev, u16 cmd, u16 *data, size_t cnt)
+{
+ int err;
+ s8 error_code;
+ u16 length = (u16) cnt;
+
+ err = lepton_poll_busy(dev, NULL);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_DATA0_REG, data, cnt, false);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_DATA_LENGTH_REG, &length, 1, false);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_CMD_REG, &cmd, 1, false);
+ if (err)
+ return err;
+
+ err = lepton_poll_busy(dev, &error_code);
+ if (err)
+ return err;
+ else
+ return error_code;
+}
+
+static int lepton_set32(struct lepton_subdev *dev, u16 cmd, u32 *data, size_t cnt)
+{
+ u16 buff[2 * cnt];
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ buff[2 * i] = data[i] & 0xFFFF;
+ buff[2 * i + 1] = (data[i] >> 16) & 0xFFFF;
+ }
+
+ return lepton_set(dev, cmd, buff, 2 * cnt);
+}
+
+#if 0
+static int lepton_send_cmd(struct lepton_subdev *dev, u16 cmd)
+{
+ int err;
+ s8 error_code;
+
+ err = lepton_poll_busy(dev, NULL);
+ if (err)
+ return err;
+
+ err = lepton_transfer16(dev, LEPTON_CMD_REG, &cmd, 1, false);
+ if (err)
+ return err;
+
+ err = lepton_poll_busy(dev, &error_code);
+ if (err)
+ return err;
+ else
+ return error_code;
+}
+#endif
+
+/*****************
+ * V4L2 operations
+ *****************/
+static void lepton_power_on(struct lepton_subdev *lepton)
+{
+ if (lepton->gpio_pwr != -1) {
+ gpio_set_value(lepton->gpio_pwr, 1);
+ /* must wait 5000 clock cycles after power up,
+ * round up to 1ms */
+ msleep(1);
+ }
+}
+
+static void lepton_power_off(struct lepton_subdev *lepton)
+{
+ if (lepton->gpio_pwr != -1)
+ gpio_set_value(lepton->gpio_pwr, 0);
+}
+
+static int lepton_hard_reset(struct v4l2_subdev *sd, u32 val)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+
+ if (lepton->gpio_rst != -1) {
+ // Reset
+ gpio_set_value(lepton->gpio_rst, 0);
+ msleep(1);
+ // Release reset
+ gpio_set_value(lepton->gpio_rst, 1);
+ msleep(1200);
+ }
+
+ return 0;
+}
+
+static int lepton_set_sensor_format(struct lepton_subdev *lepton)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].code == lepton->format.code) {
+ u32 buff = formats[i].conf;
+ u16 cmd;
+
+ cmd = LEPTON_CMD_OEM_SEL_FMT;
+ cmd |= LEPTON_CMD_TYP_SET;
+ return lepton_set32(lepton, cmd, &buff, 1);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct lepton_subdev_format* get_format(enum v4l2_mbus_pixelcode code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].code == code)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static struct lepton_subdev_format* get_format_by_conf(u32 conf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].conf == conf)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static int lepton_init(struct lepton_subdev *lepton)
+{
+ int i, err;
+ u16 cmd;
+ u32 buff, read_buff;
+
+ for (i = 0; i < ARRAY_SIZE(lepton_init_regs); i += 2) {
+ cmd = lepton_init_regs[i];
+ buff = lepton_init_regs[i + 1];
+
+ cmd |= LEPTON_CMD_TYP_SET;
+ err = lepton_set32(lepton, cmd, &buff, 1);
+ if (err) {
+ v4l2_err(&lepton->sd,
+ "failed to set lepton reg 0x%X\n", cmd);
+ return err;
+ }
+
+ cmd = lepton_init_regs[i];
+ cmd |= LEPTON_CMD_TYP_GET;
+ err = lepton_get32(lepton, cmd, &read_buff, 1);
+ if (err) {
+ v4l2_err(&lepton->sd,
+ "failed to read lepton reg 0x%X\n", cmd);
+ return err;
+ }
+
+ if (read_buff != buff) {
+ v4l2_err(&lepton->sd, "failed to set lepton register "
+ "value [0x%X] is 0x%X instead of 0x%X\n",
+ cmd, read_buff, buff);
+ return -EINVAL;
+ }
+ }
+
+ cmd = LEPTON_CMD_OEM_SEL_FMT | LEPTON_CMD_TYP_GET;
+ err = lepton_get32(lepton, cmd, &buff, 1);
+ if (err) {
+ v4l2_err(&lepton->sd, "failed to get format (%d)\n", err);
+ return err;
+ }
+ else {
+ struct v4l2_mbus_framefmt *fmt = &lepton->format;
+ struct lepton_subdev_format *sub_fmt;
+
+ sub_fmt = get_format_by_conf(buff);
+ if (!sub_fmt) {
+ v4l2_warn(&lepton->sd, "unknown format found %d\n",
+ buff);
+ fmt->code = formats[0].code;
+ err = lepton_set_sensor_format(lepton);
+ if (err) {
+ v4l2_err(&lepton->sd,
+ "can't set a valid sensor format\n");
+ return -EINVAL;
+ }
+
+ }
+ else {
+ fmt->code = sub_fmt->code;
+ }
+ }
+
+ return 0;
+}
+
+static int lepton_reset(struct v4l2_subdev *sd, u32 val)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+
+ lepton_hard_reset(sd, val);
+ lepton_init(lepton);
+
+ return 0;
+}
+
+static int lepton_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+ int err = 0;
+
+ if (enable) {
+ err = lepton_set_sensor_format(lepton);
+ }
+ v4l2_ctrl_grab(lepton->tele_ctrl, enable);
+
+ return err;
+}
+
+static int lepton_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ *code = formats[index].code;
+ return 0;
+}
+
+static int lepton_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ /* update image height, that's one thing that can
+ * have changed if telemetry has been toggled */
+ lepton->format.height = get_image_height(lepton->format.code,
+ lepton->tele_ctrl->cur.val);
+ *fmt = lepton->format;
+ return 0;
+}
+
+static int lepton_try_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+ struct lepton_subdev_format *lep_fmt;
+
+ fmt->width = LEPTON_IMAGE_WIDTH;
+ fmt->height = get_image_height(lepton->format.code,
+ lepton->tele_ctrl->cur.val);
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ lep_fmt = get_format(fmt->code);
+ if (!lep_fmt)
+ lep_fmt = &formats[0];
+
+ fmt->code = lep_fmt->code;
+ return 0;
+}
+
+static int lepton_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct lepton_subdev *lepton = to_lepton(sd);
+ int err;
+
+ err = lepton_try_mbus_fmt(sd, fmt);
+ if (err)
+ return err;
+
+ lepton->format = *fmt;
+
+ return 0;
+}
+
+/***************
+ * V4L2 controls
+ ***************/
+static int enable_agc(struct lepton_subdev *lepton, s32 enable)
+{
+ u16 cmd = LEPTON_CMD_AGC_ENABLE | LEPTON_CMD_TYP_SET;
+ u32 value = enable ? 1 : 0;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int set_agc(struct lepton_subdev *lepton, s32 val)
+{
+ u16 cmd = LEPTON_CMD_AGC_POLICY | LEPTON_CMD_TYP_SET;
+ u32 value = val;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int enable_telemetry(struct lepton_subdev *lepton, s32 enable)
+{
+ u16 cmd = LEPTON_CMD_SYS_TELE_ENABLE | LEPTON_CMD_TYP_SET;
+ u32 value = enable ? 1 : 0;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int set_telemetry(struct lepton_subdev *lepton, s32 val)
+{
+ u16 cmd = LEPTON_CMD_SYS_TELE_LOC | LEPTON_CMD_TYP_SET;
+ u32 value = val;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int enable_rad(struct lepton_subdev *lepton, s32 enable)
+{
+ u16 cmd = LEPTON_CMD_RAD_ENABLE | LEPTON_CMD_TYP_SET;
+ u32 value = enable ? 1 : 0;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int do_ffc(struct lepton_subdev *lepton)
+{
+ u16 cmd = LEPTON_CMD_RAD_FFC | LEPTON_CMD_TYP_RUN;
+ u32 value = 0;
+
+ return lepton_set32(lepton, cmd, &value, 0);
+}
+
+static int set_shutter_mode(struct lepton_subdev *lepton, s32 mode)
+{
+ u16 cmd = LEPTON_CMD_RAD_SHUTTER_MODE | LEPTON_CMD_TYP_SET;
+ u32 value = (u32)mode;
+
+ return lepton_set32(lepton, cmd, &value, 1);
+}
+
+static int set_shutter_temp(struct lepton_subdev *lepton, s32 temperature)
+{
+ u16 cmd = LEPTON_CMD_RAD_SHUTTER_TEMP | LEPTON_CMD_TYP_SET;
+ u16 value = (u16) temperature;
+
+ return lepton_set(lepton, cmd, &value, 1);
+}
+
+#define V4L2_CID_LEPTON_BASE (V4L2_CID_USER_BASE | 0xf000)
+#define V4L2_CID_LEPTON_AGC_POLICY (V4L2_CID_LEPTON_BASE + 1)
+#define V4L2_CID_LEPTON_TELEMETRY (V4L2_CID_LEPTON_BASE + 2)
+#define V4L2_CID_LEPTON_FFC (V4L2_CID_LEPTON_BASE + 3)
+
+static int lepton_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int err = 0;
+ struct lepton_subdev *lepton =
+ container_of(ctrl->handler, struct lepton_subdev, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ err = enable_agc(lepton, ctrl->val);
+ break;
+ case V4L2_CID_LEPTON_AGC_POLICY:
+ err = set_agc(lepton, ctrl->val);
+ break;
+ case V4L2_CID_LEPTON_TELEMETRY:
+ err = enable_telemetry(lepton, ctrl->val);
+ if (err)
+ break;
+ if (ctrl->val)
+ err = set_telemetry(lepton, ctrl->val - 1);
+ break;
+ case V4L2_CID_LEPTON_FFC:
+ err = enable_rad(lepton, 1); // Enable radiometry
+ if(err)
+ break;
+ err = set_shutter_mode(lepton, 0); // Set shutter to user mode
+ if(err)
+ break;
+ err = set_shutter_temp(lepton, ctrl->val); // Set calib shutter temp
+ if(err)
+ break;
+ err = do_ffc(lepton); // Trigger FFC calibration
+
+ // Reset control value so that the handler is called next time
+ ctrl->cur.val = ctrl->val = 0;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+const struct v4l2_ctrl_ops lepton_ctrl_ops = {
+ .s_ctrl = lepton_s_ctrl,
+};
+
+static const char* agc_policy_opts[] = {
+ "linear",
+ "histogram equalization",
+ NULL,
+};
+
+static struct v4l2_ctrl_config lepton_agc_policy = {
+ .ops = &lepton_ctrl_ops,
+ .id = V4L2_CID_LEPTON_AGC_POLICY,
+ .name = "AGC Policy",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .qmenu = agc_policy_opts,
+};
+
+static const char* telemetry_opts[] = {
+ "disable",
+ "first",
+ "last",
+ NULL,
+};
+
+static struct v4l2_ctrl_config lepton_telemetry_cfg = {
+ .ops = &lepton_ctrl_ops,
+ .id = V4L2_CID_LEPTON_TELEMETRY,
+ .name = "Telemetry",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 2,
+ .def = 2,
+ .qmenu = telemetry_opts,
+};
+
+static struct v4l2_ctrl_config lepton_ffc_cfg = {
+ .ops = &lepton_ctrl_ops,
+ .id = V4L2_CID_LEPTON_FFC,
+ .name = "FFC",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 65535,
+ .step = 1,
+};
+
+static int lepton_init_ctrl(struct lepton_subdev *lepton)
+{
+ int err;
+
+ lepton->sd.ctrl_handler = &lepton->ctrl_handler;
+ err = v4l2_ctrl_handler_init(&lepton->ctrl_handler, 5);
+ if (err)
+ return err;
+ v4l2_ctrl_new_std(&lepton->ctrl_handler, &lepton_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 0);
+ v4l2_ctrl_new_custom(&lepton->ctrl_handler,
+ &lepton_ffc_cfg, NULL);
+ v4l2_ctrl_new_custom(&lepton->ctrl_handler,
+ &lepton_agc_policy, NULL);
+ lepton->tele_ctrl = v4l2_ctrl_new_custom(&lepton->ctrl_handler,
+ &lepton_telemetry_cfg, NULL);
+
+ err = lepton->ctrl_handler.error;
+ if (err)
+ v4l2_ctrl_handler_free(&lepton->ctrl_handler);
+
+ return err;
+}
+
+static void lepton_free_ctrl(struct lepton_subdev *lepton)
+{
+ v4l2_ctrl_handler_free(&lepton->ctrl_handler);
+}
+
+static const struct v4l2_subdev_core_ops lepton_core_ops = {
+ .reset = lepton_reset,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+};
+
+static const struct v4l2_subdev_video_ops lepton_video_ops = {
+ .s_stream = lepton_s_stream,
+ .enum_mbus_fmt = lepton_enum_mbus_fmt,
+ .try_mbus_fmt = lepton_try_mbus_fmt,
+ .g_mbus_fmt = lepton_g_mbus_fmt,
+ .s_mbus_fmt = lepton_s_mbus_fmt,
+};
+
+static const struct v4l2_subdev_ops lepton_ops = {
+ .core = &lepton_core_ops,
+ .video = &lepton_video_ops,
+};
+
+static int lepton_init_gpio(struct lepton_subdev *lepton, int pwr, int rst)
+{
+ int err = 0;
+
+ if (pwr != -1) {
+ err = gpio_request_one(pwr, GPIOF_OUT_INIT_LOW,
+ "lepton-ctrl_pwr");
+ if (err)
+ goto err;
+ }
+ lepton->gpio_pwr = pwr;
+
+ if (rst != -1) {
+ err = gpio_request_one(rst, GPIOF_OUT_INIT_LOW,
+ "lepton-ctrl_rst");
+ if (err)
+ goto free_pwr_gpio;
+ }
+ lepton->gpio_rst = rst;
+
+free_pwr_gpio:
+ if (err && pwr)
+ gpio_free(pwr);
+err:
+ return err;
+}
+
+static void lepton_free_gpio(struct lepton_subdev *lepton)
+{
+ if (lepton->gpio_pwr)
+ gpio_free(lepton->gpio_pwr);
+
+ if (lepton->gpio_rst)
+ gpio_free(lepton->gpio_rst);
+}
+
+static int lepton_dev_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lepton_i2c_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct lepton_subdev *lep_sub;
+ struct v4l2_subdev *sd;
+ struct v4l2_mbus_framefmt *fmt;
+ int err = -EIO;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ goto err;
+
+ err = -ENOMEM;
+ lep_sub = kzalloc(sizeof(struct lepton_subdev), GFP_KERNEL);
+ if (!lep_sub)
+ goto err;
+
+ lep_sub->client = client;
+
+ sd = &lep_sub->sd;
+ v4l2_i2c_subdev_init(sd, client, &lepton_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ fmt = &lep_sub->format;
+ fmt->width = LEPTON_IMAGE_WIDTH;
+ fmt->height = LEPTON_IMAGE_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ err = lepton_init_gpio(lep_sub, pdata ? pdata->gpio_pwr : -1,
+ pdata ? pdata->gpio_rst : -1);
+
+ if (err) {
+ v4l2_err(sd, "failed to request gpio(s) (%d)\n", err);
+ goto free_subdev;
+ }
+
+ lepton_power_on(lep_sub);
+ lepton_hard_reset(sd, 0);
+
+ err = lepton_init(lep_sub);
+ if (err) {
+ v4l2_err(sd, "init failed (%d)\n", err);
+ goto free_gpio;
+ }
+
+ err = lepton_init_ctrl(lep_sub);
+ if (err)
+ goto free_gpio;
+
+ return 0;
+
+free_gpio:
+ lepton_free_gpio(lep_sub);
+free_subdev:
+ kfree(lep_sub);
+err:
+ return err;
+}
+
+static int lepton_dev_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sub = i2c_get_clientdata(client);
+ struct lepton_subdev *dev = to_lepton(sub);
+
+ v4l2_device_unregister_subdev(sub);
+ lepton_power_off(dev);
+ lepton_free_ctrl(dev);
+ lepton_free_gpio(dev);
+ kfree(dev);
+ return 0;
+}
+
+static const struct i2c_device_id lepton_dev_id[] = {
+ { "lepton-ctrl", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lepton_dev_id);
+
+static struct i2c_driver lepton_dev_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lepton-ctrl",
+ },
+ .probe = lepton_dev_probe,
+ .remove = lepton_dev_remove,
+ .id_table = lepton_dev_id,
+};
+
+module_i2c_driver(lepton_dev_driver);
+
diff --git a/drivers/parrot/mfd/Kconfig b/drivers/parrot/mfd/Kconfig
new file mode 100644
index 00000000000000..43c29fcda9482f
--- /dev/null
+++ b/drivers/parrot/mfd/Kconfig
@@ -0,0 +1,33 @@
+config MFD_HAVE_P7MU
+ bool
+ depends on MFD_P7MU && I2C
+ select MFD_CORE
+ select IRQ_DOMAIN
+ default y
+ help
+ Support for the Parrot Power Management Unit companion chip.
+ This driver provies common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
+config MFD_P7MU
+ bool "Parrot Power Management Unit (PMU) companion chip core support"
+
+config MFD_P7MU_DEBUG
+ bool "Debug Parrot PMU"
+ depends on MFD_P7MU
+ default no
+ help
+ Build Parrot PMU support with debug enabled
+
+config SICILIA_IRRADIANCE
+ tristate "Sicilia irradiance module"
+ depends on MFD_DIB0700 && LEDS_PCA9633 && TSL2591
+ default no
+ help
+ Support for the Parrot Sicilia irradiance module
+
+config MFD_DIB0700
+ tristate "DiB700 I2C/UART/GPIO"
+ help
+ Driver for multi-function devices based on DiBcom DiB0700 - USB bridge
diff --git a/drivers/parrot/mfd/Makefile b/drivers/parrot/mfd/Makefile
new file mode 100644
index 00000000000000..c742d5dc679a8a
--- /dev/null
+++ b/drivers/parrot/mfd/Makefile
@@ -0,0 +1,4 @@
+ccflags-$(CONFIG_MFD_P7MU_DEBUG) += -DDEBUG
+obj-$(CONFIG_MFD_P7MU) += p7mu-core.o p7mu-clk.o p7mu-pin.o
+obj-$(CONFIG_MFD_DIB0700) += dib0700_mfd.o
+obj-$(CONFIG_SICILIA_IRRADIANCE) += sicilia-irradiance.o
diff --git a/drivers/parrot/mfd/dib0700_mfd.c b/drivers/parrot/mfd/dib0700_mfd.c
new file mode 100644
index 00000000000000..4c230033b6683e
--- /dev/null
+++ b/drivers/parrot/mfd/dib0700_mfd.c
@@ -0,0 +1,2101 @@
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+
+#include <parrot/mfd/dib0700_mfd.h>
+
+#define UART_DEV_PENDING
+#define MAX_NBR_UARTS 20 //127 /* Maximum number of uart that can be managed by the driver */
+
+#define DRIVER_NAME "dib0700_mfd"
+/* 1 pin for SCL can be common all i2c adapter,
+ then 10 sda available + the default i2c controller (no bit bangint) */
+#define ADAPTER_MAX 10+1
+
+/* DIB0700 internal state machine */
+#define STATE_DISCONNECT 0
+#define STATE_I2C_REGISTER 1
+#define STATE_TTY_REGISTER 2
+#define STATE_TTY_INSTALL 3
+#define STATE_TTY_ENABLE 4
+#define STATE_TTY_TX_START 5
+
+#define kref_to_dib0700_state(d) container_of(d, struct dib0700_state, kref)
+
+static char *i2c_param = NULL;
+module_param(i2c_param, charp, 0000);
+MODULE_PARM_DESC(i2c_param, "i2c adapter parameter");
+
+static char *firmware = "dvb-usb-dib0700-1.20.fw";
+module_param(firmware, charp, 0000);
+MODULE_PARM_DESC(firmware, "firmware parameter");
+
+
+int dvb_usb_dib0700_debug;
+module_param_named(debug,dvb_usb_dib0700_debug, int, 0644);
+
+int serial_debug = 0;
+module_param_named(serial, serial_debug, int, 0644);
+
+static struct platform_device static_hook = {
+ .name = "static_hook",
+ .id = -1,
+ .dev = {
+ .platform_data = NULL,
+ },
+};
+
+struct i2c_adapter_properties {
+ /* if master=1, master i2c is used
+ there is not need of sda and scl pins */
+ int sda;
+ int scl;
+ int master;
+ int speed;
+
+ struct i2c_adapter i2c_adap;
+ int num_adapter;
+};
+
+#define USB_ENDP_CTRL_BUSY 0
+struct dib0700_state {
+ /* USB */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+
+ /* Misc */
+ u32 fw_version;
+ u32 hwversion;
+ u32 romversion;
+ u32 fwtype;
+ spinlock_t lock;
+ struct kref kref;
+ unsigned long istate; /* internal state */
+ wait_queue_head_t wq_endp0;
+
+
+ /* i2c */
+ struct i2c_adapter_properties props[ADAPTER_MAX];
+ int num_adapters;
+ struct rt_mutex bus_lock; /* clk is shared */
+
+#ifdef UART_DEV_PENDING
+ /* serial */
+ struct tty_port port;
+ struct tty_struct *tty;
+ struct work_struct work;
+ unsigned int index;
+ struct async_icount icount;
+ struct device *ttydev;
+ struct timer_list rx_timer;
+ unsigned long rx_expires;
+ struct timer_list tx_timer;
+ unsigned long tx_expires;
+
+#define NBR_MAX_RD_URBS 2
+/* The hook cannot manage request greater than 1408 bytes */
+#define RD_URB_SIZE 128 //1408
+ /* bytes for the size
+ of FIFO (in the dib0700 hook) */
+ /* 8 * 1000 * total urb size / baudrate */
+#define RX_TIMER_CONST (8000 * RD_URB_SIZE)
+
+ unsigned char *uart_in_buffers[NBR_MAX_RD_URBS];
+ struct urb *read_urbs[NBR_MAX_RD_URBS];
+ struct usb_ctrlrequest rd_ctrlrq;
+ unsigned long read_urbs_free;
+
+#define SRL_WRITE_OFFSET 1
+#define NBR_MAX_WR_URBS 3
+#define WR_URB_SIZE (64 - SRL_WRITE_OFFSET) //512
+#define TX_TIMER_CONST (8000 * WR_URB_SIZE * (NBR_MAX_WR_URBS-1))
+ unsigned char *uart_out_buffers[NBR_MAX_WR_URBS];
+ struct urb *write_urbs[NBR_MAX_WR_URBS];
+ struct usb_ctrlrequest wrt_ctrlrq;
+ unsigned long write_urbs_free;
+ struct kfifo write_fifo;
+ int tx_bytes;
+ int tx_urb_cnt;
+
+#define TX_MAX_RETRIES 10
+ int tx_retries;
+ unsigned long tx_retry;
+ unsigned long flags;
+#endif //UART_DEV_PENDING
+
+ /* gpio */
+ struct gpio_chip gpio_chip;
+};
+
+static struct dib0700_state *chip_state;
+#ifdef UART_DEV_PENDING
+/* serial */
+static struct dib0700_state *dib0700_uart_table[MAX_NBR_UARTS];
+static DEFINE_SPINLOCK(dib0700_uart_table_lock);
+static struct tty_driver *dib0700_uart_tty_driver;
+
+static void dib0700_write_start(struct dib0700_state *st);
+#endif //UART_DEV_PENDING
+
+static void __dib0700_mfd_put(struct dib0700_state *st);
+#ifdef DEBUG
+#define dib0700_mfd_put(st) \
+ do { pr_err("[%s][%d]\n",__func__, __LINE__); \
+ __dib0700_mfd_put(st); }while(0);
+#else
+#define dib0700_mfd_put(st) \
+ do { __dib0700_mfd_put(st); }while(0);
+#endif
+
+/* commonly used firmware download types and function */
+struct hexline {
+ u8 len;
+ u16 /*u32*/ addr;
+ u8 type;
+ u8 data[255];
+ u8 chk;
+};
+
+
+static enum dib07x0_gpios gpio_to_reg[] ={
+ GPIO0,
+ GPIO1,
+ GPIO2,
+ GPIO3,
+ GPIO4,
+ GPIO5,
+ GPIO6,
+ GPIO7,
+ GPIO8,
+ GPIO9,
+ GPIO10,
+};
+
+#define debug_dump_usb_xfer(st,text,data,size) \
+ do { if ((dvb_usb_dib0700_debug & 0x08)){ \
+ print_hex_dump(KERN_INFO, text, \
+ DUMP_PREFIX_NONE, \
+ 16, 1, data, \
+ size, 0); } } while (0)
+
+#ifdef DEBUG
+
+#define FUNC_IN do{ pr_err("[%s] in\n", __func__); }while(0);
+#define FUNC_OUT do{ pr_err("[%s][%d] out\n", __func__, __LINE__); }while(0);
+
+#else
+#define FUNC_IN
+#define FUNC_OUT
+#endif
+
+static int dib0700_usb_control_msg(struct dib0700_state *st, unsigned int pipe_num, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, int timeout)
+{
+ int ret;
+ unsigned int pipe;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate)) {
+ return -ENODEV;
+ }
+
+ kref_get(&st->kref);
+
+ if (test_and_set_bit_lock(USB_ENDP_CTRL_BUSY, &st->flags)) {
+ wait_event_interruptible(st->wq_endp0,
+ !test_and_set_bit_lock(USB_ENDP_CTRL_BUSY, &st->flags));
+ }
+
+ if (requesttype & USB_DIR_IN)
+ pipe = usb_rcvctrlpipe(st->udev, pipe_num);
+ else
+ pipe = usb_sndctrlpipe(st->udev, pipe_num);
+ ret = usb_control_msg(st->udev, pipe, request, requesttype, value, index, data, size, timeout);
+
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ dib0700_mfd_put(st);
+
+ return ret;
+}
+
+#ifdef UART_DEV_PENDING
+/***********************
+ UART
+************************/
+
+/* Misc UART */
+static void dib0700_serial_port_softint(struct dib0700_state *st)
+{
+ schedule_work(&st->work);
+}
+
+static void dib0700_port_work(struct work_struct *work)
+{
+ struct dib0700_state *st =
+ container_of(work, struct dib0700_state, work);
+ struct tty_struct *tty;
+
+ dev_dbg(&st->intf->dev,
+ "%s - port %d", __func__, st->index);
+
+ tty = tty_port_tty_get(&st->port);
+ if (!tty)
+ return;
+
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+}
+
+static void kill_urbs(struct dib0700_state *st)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(st->read_urbs); index++)
+ usb_kill_urb(st->read_urbs[index]);
+
+ for (index = 0; index < ARRAY_SIZE(st->write_urbs); index++)
+ usb_kill_urb(st->write_urbs[index]);
+}
+
+static void free_urbs(struct dib0700_state *st)
+{
+ int index, count = 0;
+
+ for (index = 0; index < NBR_MAX_RD_URBS; index++) {
+ if (st->read_urbs[index]) {
+ usb_free_urb(st->read_urbs[index]);
+ st->read_urbs[index] = NULL;
+ count++;
+ }
+
+ if (st->uart_in_buffers[index]) {
+ kfree(st->uart_in_buffers[index]);
+ st->uart_in_buffers[index] = NULL;
+ count++;
+ }
+ }
+
+ count = 0;
+
+ for (index = 0; index < NBR_MAX_WR_URBS; index++) {
+ if (st->write_urbs[index]) {
+ usb_free_urb(st->write_urbs[index]);
+ st->write_urbs[index] = NULL;
+ count++;
+ }
+
+ if (st->uart_out_buffers[index]) {
+ kfree(st->uart_out_buffers[index]);
+ st->uart_out_buffers[index] = NULL;
+ count++;
+ }
+ }
+}
+
+static void dib0700_read_start(struct dib0700_state *st)
+{
+
+ int result, idx;
+ struct urb *urb;
+ //struct usb_ctrlrequest *setup = NULL;
+ unsigned long flags;
+
+ if (!st)
+ return;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return;
+
+ if (test_and_set_bit_lock(USB_ENDP_CTRL_BUSY, &st->flags)) {
+ mod_timer(&st->rx_timer, jiffies + msecs_to_jiffies(10));
+ return;
+ }
+
+ spin_lock_irqsave(&st->lock, flags);
+ if (!test_bit(STATE_TTY_ENABLE, &st->istate)
+ || (!st->read_urbs_free)) {
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ spin_unlock_irqrestore(&st->lock, flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ if (!st->read_urbs_free) {
+ mod_timer(&st->rx_timer,
+ jiffies + st->rx_expires);
+ }
+ return;
+ }
+
+ idx = (int)find_first_bit(&st->read_urbs_free,
+ ARRAY_SIZE(st->read_urbs));
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ clear_bit(idx, &st->read_urbs_free);
+
+ urb = st->read_urbs[idx];
+
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result) {
+ pr_err("[%s] - error submitting urb: "
+ "%d (urb = %p, idx = %d)\n",
+ __func__, result, urb, idx);
+
+ set_bit(idx, &st->read_urbs_free);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ mod_timer(&st->rx_timer,
+ jiffies + st->rx_expires);
+ return;
+ }
+
+ //Debug
+ /*if (dvb_usb_dib0700_debug >= 0x06) {
+ setup = (struct usb_ctrlrequest*) urb->setup_packet;
+ dev_err(&st->intf->dev, "USB control read: %x %x %x %x %x %x %d = %d\n",
+ 0, setup->bRequest, setup->bRequestType,
+ setup->wValue, setup->wIndex,
+ setup->wLength, 0, result);
+ }*/
+
+ /*if (test_bit(STATE_TTY_ENABLE, &st->istate)) {
+ mod_timer(&st->rx_timer,
+ jiffies + st->rx_expires);
+ dev_dbg(&st->intf->dev,
+ "%s - timer restart", __func__);
+ }*/
+}
+
+static void dib0700_read_ctrl_callback(struct urb *urb)
+{
+ struct dib0700_state *st = urb->context;
+ unsigned char *data = urb->transfer_buffer;
+ int status = urb->status, idx, count = 0;
+ //struct usb_ctrlrequest *setup
+ // = (struct usb_ctrlrequest*) urb->setup_packet;
+ struct tty_struct *tty;
+ unsigned long flags;
+ char tty_flag = TTY_NORMAL;
+
+ /*if (dvb_usb_dib0700_debug >= 0x06)
+ dev_err(&st->intf->dev, "USB control read(clbck): %x %x %x %x %x %x %d = %d\n",
+ 0, setup->bRequest, setup->bRequestType,
+ setup->wValue, setup->wIndex,
+ setup->wLength, 0, status);
+
+ debug_dump_usb_xfer(st,"UART RX>",
+ urb->transfer_buffer,
+ urb->actual_length);*/
+
+ for (idx = 0; idx < ARRAY_SIZE(st->read_urbs); idx++)
+ if (urb == st->read_urbs[idx])
+ break;
+
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ //Should never occurs
+ if (idx >= ARRAY_SIZE(st->read_urbs) ) {
+ pr_err("[%s] Bad urb [%p]",
+ __func__, urb);
+ return;
+ }
+
+ if (status) {
+ //TODO manage retry
+ /* Free URB */
+ set_bit(idx, &st->read_urbs_free);
+
+ pr_err("[%s] - non-zero urb status: %d\n",
+ __func__, status);
+
+ mod_timer(&st->rx_timer,
+ jiffies + st->rx_expires);
+ return;
+ }
+
+ tty = tty_port_tty_get(&st->port);
+
+ count = tty_insert_flip_string_fixed_flag(tty,
+ data, tty_flag,
+ urb->actual_length);
+
+ if ( count < urb->actual_length) {
+ /* data are lost, the tty buffer has not been read */
+ pr_debug("[%s] - unable to copy all data received (%d<%d)\n",
+ __func__, count,
+ urb->actual_length);
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ tty_kref_put(tty);
+
+ set_bit(idx, &st->read_urbs_free);
+ spin_lock_irqsave(&st->lock, flags);
+ st->icount.rx += count;
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ if (urb->actual_length) {
+ dib0700_read_start(st);
+ } else {
+ mod_timer(&st->rx_timer,
+ jiffies + st->rx_expires);
+ }
+}
+
+static void dib0700_write_start(struct dib0700_state *st)
+{
+ struct urb *urb = NULL;
+ int count = 0, result;
+ unsigned long flags;
+ unsigned char *data;
+ struct usb_ctrlrequest *setup = NULL;
+ int idx;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return;
+
+ if (test_and_set_bit_lock(USB_ENDP_CTRL_BUSY, &st->flags)) {
+ mod_timer(&st->tx_timer, jiffies +
+ msecs_to_jiffies(20));
+ return;
+ }
+
+ if (st->tx_urb_cnt > NBR_MAX_WR_URBS) {
+ /* Sent enough buffer and let the bus free */
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ mod_timer(&st->tx_timer,
+ jiffies + st->tx_expires);
+ return;
+ }
+
+ spin_lock_irqsave(&st->lock, flags);
+ if (st->tx_retries && st->tx_retry) {
+
+ //Re-submit packet
+ idx = (int)find_first_bit(&st->tx_retry,
+ ARRAY_SIZE(st->write_urbs));
+
+ urb = st->write_urbs[idx];
+
+ if (st->tx_retries >= TX_MAX_RETRIES) {
+ pr_err("[%s] message dropped after "
+ "%d retries\n",
+ __func__, TX_MAX_RETRIES);
+
+#if 1 //Debug
+ print_hex_dump(KERN_INFO,"UART TX (dropped)>",
+ DUMP_PREFIX_NONE,
+ 16, 1,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ 1);
+#endif //Debug
+
+ st->tx_retries = 0;
+ clear_bit(idx, &st->tx_retry);
+ st->tx_bytes -= count;
+ st->icount.tx -= count;
+ set_bit(idx, &st->write_urbs_free);
+
+ spin_unlock_irqrestore(&st->lock, flags);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+ return;
+ }
+ spin_unlock_irqrestore(&st->lock, flags);
+ } else {
+
+ if (!st->write_urbs_free || !kfifo_len(&st->write_fifo)) {
+
+ // Delays processing
+ if (kfifo_len(&st->write_fifo)) {
+ mod_timer(&st->tx_timer,
+ jiffies + st->tx_expires);
+ } else {
+ //kfifo is empty
+ clear_bit(STATE_TTY_TX_START, &st->istate);
+ }
+
+ spin_unlock_irqrestore(&st->lock, flags);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+ return;
+ }
+
+ idx = (int)find_first_bit(&st->write_urbs_free,
+ ARRAY_SIZE(st->write_urbs));
+
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ clear_bit(idx, &st->write_urbs_free);
+ urb = st->write_urbs[idx];
+
+ data = (unsigned char *) urb->transfer_buffer;
+ data[0] = REQUEST_UART_WRITE;
+
+ count = kfifo_out_locked(&st->write_fifo,
+ data + SRL_WRITE_OFFSET,
+ (WR_URB_SIZE - SRL_WRITE_OFFSET),
+ &st->lock);
+
+ if (!count) {
+ /* should not happens, kfifo allready checked */
+ set_bit(idx, &st->write_urbs_free);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+ return;
+ }
+
+ urb->transfer_buffer_length = count + SRL_WRITE_OFFSET;
+ setup = (struct usb_ctrlrequest*) urb->setup_packet;
+ setup->wLength = cpu_to_le16(count + SRL_WRITE_OFFSET);
+
+ spin_lock_irqsave(&st->lock, flags);
+ st->icount.tx += count;
+ st->tx_bytes += count;
+ spin_unlock_irqrestore(&st->lock, flags);
+ }
+
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (result) {
+
+ /*
+ ENOMEM: Out of memory
+ ENODEV: Unplugged device
+ EPIPE: Stalled endpoint
+ */
+
+ spin_lock_irqsave(&st->lock, flags);
+
+ if (st->tx_retries < TX_MAX_RETRIES) {
+ //Re-submit
+ st->tx_retries++;
+ set_bit(idx, &st->tx_retry);
+ } else {
+ st->tx_retries = 0;
+ clear_bit(idx, &st->tx_retry);
+ st->tx_bytes -= count;
+ st->icount.tx -= count;
+ set_bit(idx, &st->write_urbs_free);
+
+#if 1 //Debug
+ print_hex_dump(KERN_INFO,"UART TX (dropped)>",
+ DUMP_PREFIX_NONE,
+ 16, 1,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ 1);
+#endif //Debug
+ }
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ if (st->tx_retries) {
+ mod_timer(&st->tx_timer,
+ jiffies + st->tx_expires);
+ } else {
+ pr_err("[%s] message dropped after "
+ "%d retries\n",
+ __func__, TX_MAX_RETRIES);
+ }
+
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+ return;
+ }
+
+
+ //Debug
+ debug_dump_usb_xfer(st,"UART TX>",
+ urb->transfer_buffer,
+ urb->actual_length);
+}
+
+/* similar to usb_serial_generic_write_bulk_callback */
+static void dib0700_write_ctrl_callback(struct urb *urb)
+{
+ struct dib0700_state *st = urb->context;
+ int status = urb->status, idx;
+ unsigned long flags;
+
+ for (idx = 0; idx < ARRAY_SIZE(st->write_urbs); idx++)
+ if (urb == st->write_urbs[idx])
+ break;
+
+ //Should never occurs
+ if (idx >= ARRAY_SIZE(st->write_urbs) ) {
+ pr_err("[%s] Bad urb [%p]", __func__, urb);
+ return;
+ }
+
+ if (status) {
+ // There is no more space left on the hook
+ spin_lock_irqsave(&st->lock, flags);
+ if (st->tx_retries < TX_MAX_RETRIES) {
+ st->tx_retries++;
+ set_bit(idx, &st->tx_retry);
+ } else {
+ st->tx_retries = 0;
+ clear_bit(idx, &st->tx_retry);
+ set_bit(idx, &st->write_urbs_free);
+ st->tx_bytes -= (urb->transfer_buffer_length
+ - SRL_WRITE_OFFSET);
+ st->icount.tx -= (urb->transfer_buffer_length
+ - SRL_WRITE_OFFSET);
+
+ pr_err("[%s] message dropped after "
+ "%d retries\n",
+ __func__, TX_MAX_RETRIES);
+
+#if 1 //Debug
+ print_hex_dump(KERN_INFO,"UART TX (dropped)>",
+ DUMP_PREFIX_NONE,
+ 16, 1,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ 1);
+#endif //Debug
+
+ }
+ spin_unlock_irqrestore(&st->lock, flags);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ if (st->tx_retries) {
+ mod_timer(&st->tx_timer, jiffies + st->tx_expires);
+ }
+
+ } else {
+
+#if 0 //Debug
+ print_hex_dump(KERN_INFO,"UART TX (cllbck)>",
+ DUMP_PREFIX_NONE,
+ 16, 1,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ 1);
+#endif //Debug
+
+ //Debug
+ debug_dump_usb_xfer(st,"UART TX (clbck)>",
+ urb->transfer_buffer,
+ urb->actual_length);
+
+ spin_lock_irqsave(&st->lock, flags);
+ st->tx_urb_cnt++;
+ st->tx_retries = 0;
+ clear_bit(idx, &st->tx_retry);
+ set_bit(idx, &st->write_urbs_free);
+ st->tx_bytes -= (urb->transfer_buffer_length
+ - SRL_WRITE_OFFSET);
+ spin_unlock_irqrestore(&st->lock, flags);
+ clear_bit_unlock(USB_ENDP_CTRL_BUSY, &st->flags);
+ wake_up_interruptible(&st->wq_endp0);
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ goto schedule_work;
+
+ dib0700_write_start(st);
+ }
+
+schedule_work:
+ dib0700_serial_port_softint(st);
+}
+
+static void rx_timer_routine(unsigned long data)
+{
+ dib0700_read_start((struct dib0700_state *) data);
+}
+
+#define TX_FIFO_REQUEST_BUSY 2
+static void tx_timer_routine(unsigned long data)
+{
+ struct dib0700_state *st = (struct dib0700_state *) data;
+ st->tx_urb_cnt = 0;
+ dib0700_write_start(st);
+}
+
+static struct dib0700_state *dib0700_uart_port_get(unsigned index)
+{
+ struct dib0700_state *st = NULL;
+
+ if (index >= MAX_NBR_UARTS)
+ return NULL;
+
+ spin_lock(&dib0700_uart_table_lock);
+ st = dib0700_uart_table[index];
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ st = NULL;
+
+ if (st)
+ kref_get(&st->kref);
+ spin_unlock(&dib0700_uart_table_lock);
+
+ return st;
+}
+
+
+static void dib0700_uart_port_remove(struct dib0700_state *st)
+{
+ struct tty_struct *tty;
+
+ if (!st)
+ return;
+
+ FUNC_IN;
+ BUG_ON(dib0700_uart_table[st->index] != st);
+
+ spin_lock(&dib0700_uart_table_lock);
+ dib0700_uart_table[st->index] = NULL;
+ spin_unlock(&dib0700_uart_table_lock);
+ tty_unregister_device(dib0700_uart_tty_driver,
+ st->index);
+
+ mutex_lock(&st->port.mutex);
+ tty = tty_port_tty_get(&st->port);
+
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+
+ mutex_unlock(&st->port.mutex);
+ FUNC_OUT;
+}
+
+/* Port operations */
+static int dib0700_uart_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ struct dib0700_state *st = tport->tty->driver_data;
+ struct ktermios dummy = {0};
+
+ if (!st)
+ return -ENODEV;
+
+ FUNC_IN;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return -ENODEV;
+
+ /* Activate uart on hook */
+ set_bit(STATE_TTY_ENABLE, &st->istate);
+
+ st->tx_bytes = 0;
+ st->tx_urb_cnt = 0;
+
+ /* Set the termios */
+ if (tport->tty->ops->set_termios)
+ tport->tty->ops->set_termios(tport->tty, &dummy);
+
+ /* Activate the timer to trigger the next reading */
+ mod_timer(&st->rx_timer, jiffies + st->rx_expires);
+
+ FUNC_OUT;
+ return 0;
+}
+
+static void dib0700_uart_shutdown(struct tty_port *tport)
+{
+ struct dib0700_state *st = tport->tty->driver_data;
+ struct ktermios dummy = {0};
+
+ if (!st)
+ return;
+
+ FUNC_IN;
+
+ /* Deactivate uart on hook */
+ clear_bit(STATE_TTY_ENABLE, &st->istate);
+
+ /* Deactivate the timer */
+ del_timer_sync(&st->rx_timer);
+ del_timer_sync(&st->tx_timer);
+
+ /* kill urbs */
+ kill_urbs(st);
+
+ clear_bit(STATE_TTY_TX_START, &st->istate);
+
+ /* Set the termios */
+ if (!test_bit(STATE_DISCONNECT, &st->istate))
+ if (tport->tty->ops->set_termios)
+ tport->tty->ops->set_termios(tport->tty, &dummy);
+
+ FUNC_OUT;
+}
+
+static const struct tty_port_operations dib0700_uart_port_ops = {
+ .activate = dib0700_uart_activate,
+ .shutdown = dib0700_uart_shutdown,
+};
+
+
+/* TTY operations */
+static int dib0700_uart_open(struct tty_struct *tty, struct file *filp)
+{
+ struct dib0700_state *st = tty->driver_data;
+ int err;
+
+ if (!st)
+ return -ENODEV;
+
+ FUNC_IN;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate)) {
+ FUNC_OUT;
+ return -ENODEV;
+ }
+
+ /* Set a timer that launch a read over serial line */
+ err = tty_port_open(&st->port, tty, filp);
+ FUNC_OUT;
+
+ return err;
+}
+
+static void dib0700_uart_close(struct tty_struct * tty, struct file * filp)
+{
+ struct dib0700_state *st = tty->driver_data;
+
+ if(!st)
+ return;
+
+ FUNC_IN;
+ tty_port_close(&st->port, tty, filp);
+
+ /* workaround to force call to dib0700_uart_cleanup
+ when the device is unplug while a user-application
+ still hold a file descriptor */
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ tty_kref_put(tty);
+
+ FUNC_OUT;
+}
+
+static int dib0700_uart_write (struct tty_struct * tty,
+ const unsigned char *buf, int count)
+{
+ struct dib0700_state *st = tty->driver_data;
+
+ if (!st)
+ return -ENODEV;
+
+ FUNC_IN;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate)) {
+ return -ENODEV;
+ }
+
+ if (!count)
+ return 0;
+
+ //Debug
+ /*print_hex_dump(KERN_INFO,"uart_write>",
+ DUMP_PREFIX_NONE,
+ 16, 1,
+ buf,count,
+ 1);*/
+ //Debug
+
+ count = kfifo_in_locked(&st->write_fifo, buf, count, &st->lock);
+
+ if (!test_and_set_bit(STATE_TTY_TX_START, &st->istate)) {
+ //Force 1st write if necessary
+ dib0700_write_start(st);
+ }
+
+ FUNC_OUT;
+ return count;
+}
+
+static int dib0700_uart_write_room(struct tty_struct *tty)
+{
+ struct dib0700_state *st = tty->driver_data;
+ unsigned long flags;
+ int room;
+
+ if (!st) /* TODO check */
+ return -ENODEV;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return -ENODEV;
+
+ spin_lock_irqsave(&st->lock, flags);
+ room = kfifo_avail(&st->write_fifo);
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ dev_dbg(&st->intf->dev,
+ "%s - returns %d\n", __func__, room);
+
+ return room;
+}
+
+static int dib0700_uart_chars_in_buffer(struct tty_struct *tty)
+{
+ struct dib0700_state *st = tty->driver_data;
+ unsigned long flags;
+ int chars;
+
+ if (!st) /* TODO check */
+ return -ENODEV;
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return -ENODEV;
+
+ spin_lock_irqsave(&st->lock, flags);
+ chars = kfifo_len(&st->write_fifo) + st->tx_bytes;
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ dev_dbg(&st->intf->dev,
+ "%s - returns (%d)\n", __func__, chars);
+
+ if (chars<0)
+ pr_err("[%s] fifo [%d] tx_bytes [%d]\n",
+ __func__, kfifo_len(&st->write_fifo),
+ st->tx_bytes);
+ return chars;
+}
+
+static int dib0700_uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ int idx = tty->index;
+ struct dib0700_state *st = dib0700_uart_port_get(idx);
+ int ret = -ENODEV;
+
+ if(!st)
+ goto no_dev;
+
+ FUNC_IN;
+ set_bit(STATE_TTY_INSTALL, &st->istate);
+
+ ret = tty_standard_install(driver, tty);
+
+ if (ret == 0) {
+ tty->driver_data = st;
+ }
+ else
+ dev_err(&st->intf->dev, "%s: failed\n", __func__);
+no_dev:
+ FUNC_OUT;
+ return ret;
+}
+
+static void dib0700_uart_cleanup(struct tty_struct *tty)
+{
+ struct dib0700_state *st = tty->driver_data;
+
+ FUNC_IN;
+ tty->driver_data = NULL;
+ if (test_and_clear_bit(STATE_TTY_INSTALL, &st->istate))
+ dib0700_mfd_put(st);
+ FUNC_OUT;
+}
+
+static void dib0700_uart_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
+{
+ // 31 24 16 12 8 0
+ // ...................................................................
+ // request type | parity | tty enable | baud rate (MSB)
+ // (8 bits) (4 bits) (4 bits) (16 bits)
+ //
+ // 31 0
+ // ..................................................................
+ // baud rate (LSB) |
+ // (16 bits)
+
+ struct dib0700_state *st = tty->driver_data;
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag, expires_ms = 0;
+ int result = 0;
+ speed_t baud = 0;
+ u8 buf[6] = {0};
+
+ FUNC_IN;
+
+ if (!st) /* probably disconnected */
+ return;
+
+ if (!tty_termios_hw_change(old_termios,
+ tty->termios))
+ return; /* no change */
+
+ spin_lock_irqsave(&st->lock, flags);
+
+ if (test_bit(STATE_TTY_ENABLE, &st->istate)) {
+ baud = tty_termios_baud_rate(tty->termios);
+ if (baud == 0)
+ baud = 9600;
+
+ expires_ms = RX_TIMER_CONST/baud;
+ st->rx_expires = msecs_to_jiffies(expires_ms);
+
+ expires_ms = TX_TIMER_CONST/baud;
+ st->tx_expires = msecs_to_jiffies(expires_ms);
+ }
+ spin_unlock_irqrestore(&st->lock, flags);
+
+ buf[0] = REQUEST_UART_SET_PARAM;
+ buf[1] = (cflag & PARENB) << 5
+ | (cflag & PARODD) << 4; /* parity */
+
+ if (serial_debug) {
+ /* Activate serial loopback in the firmware */
+ buf[1] |= 0x80;
+ }
+
+ if (test_bit(STATE_TTY_ENABLE, &st->istate))
+ buf[1] |= 0x7; /* UART enabling */
+
+ memcpy(&buf[2], (void*) &baud, 4); /* baud rate */
+
+ result = dib0700_usb_control_msg(st,
+ 0,
+ REQUEST_UART_SET_PARAM,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0, buf, ARRAY_SIZE(buf),
+ USB_CTRL_GET_TIMEOUT);
+
+ //Debug
+ /*print_hex_dump(KERN_INFO, "URB termios: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, buf,
+ 6, 0);*/
+
+ if (result < 0)
+ dev_err(&st->intf->dev, "UART settings failed (status = %d)\n", result);
+
+ //Debug
+ if (dvb_usb_dib0700_debug >= 0x06)
+ dev_err(&st->intf->dev, "USB control write: %x %x %x %x %x %x %d = %d\n",
+ 0, REQUEST_UART_SET_PARAM,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0,
+ 6, USB_CTRL_GET_TIMEOUT, result);
+
+ FUNC_OUT;
+}
+
+
+static const struct tty_operations dib0700_uart_ops = {
+ .open = dib0700_uart_open,
+ .close = dib0700_uart_close,
+ .write = dib0700_uart_write,
+ .write_room = dib0700_uart_write_room,
+ .chars_in_buffer = dib0700_uart_chars_in_buffer,
+ .install = dib0700_uart_install,
+ .cleanup = dib0700_uart_cleanup,
+ .set_termios = dib0700_uart_set_termios,
+};
+#endif //UART_DEV_PENDING
+
+/***********************
+ Misc
+************************/
+/*
+ Parse module argument
+ example : modprobe dib0700_mfd i2c_param="(3:5),(7:5),(2:5)"
+*/
+
+static void dib0700_destroy_ressources(struct kref *kref)
+{
+ struct dib0700_state *st;
+
+ st = kref_to_dib0700_state(kref);
+
+#ifdef UART_DEV_PENDING
+ /* Free URBs */
+ free_urbs(st);
+
+ kfifo_free(&st->write_fifo);
+#endif //UART_DEV_PENDING
+
+ /* release ref to usb device */
+ usb_put_dev(st->udev);
+ st->udev = NULL;
+
+ pr_debug("[%s][%d]\n", __func__, __LINE__);
+}
+
+
+static void __dib0700_mfd_put(struct dib0700_state *st)
+{
+ kref_put(&st->kref, dib0700_destroy_ressources);
+}
+
+static int dib0700_parse_i2c_param(struct dib0700_state *st)
+{
+ struct usb_device *udev = st->udev;
+ char *end, *s1, *s2 = i2c_param;
+ int val;
+
+ while (s2 != NULL) {
+
+ st->props[st->num_adapters].master = 0;
+
+ s1 = strchr(s2, '(');
+ if (s1) {
+ val = simple_strtoul(++s1, &end, 0);
+ if (*end != ':')
+ return -EINVAL;
+
+ //dev_err(&udev->dev, "sda : \"%d\"\n", val);
+
+ if (val < ARRAY_SIZE(gpio_to_reg)) {
+ st->props[st->num_adapters].sda =
+ gpio_to_reg[val];
+ }
+
+ s1 = ++end;
+
+ val = simple_strtoul(s1, &end, 0);
+ if (*end != ')')
+ return -EINVAL;
+
+ //dev_err(&udev->dev, "scl : \"%d\"\n", val);
+
+ if (val < ARRAY_SIZE(gpio_to_reg)) {
+ st->props[st->num_adapters++].scl =
+ gpio_to_reg[val];
+ }
+
+ s2 = ++end;
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (!strchr(s2, ',')) {
+ return 0;
+ }
+ }
+
+
+ dev_err(&udev->dev, "There is no parameter\n");
+ return 0;
+}
+
+/* expecting rx buffer: request data[0] data[1] ... data[2] */
+static int dib0700_ctrl_wr(struct dib0700_state *st, u8 *tx, u8 txlen)
+{
+ int status;
+ status = dib0700_usb_control_msg(st, 0,
+ tx[0], USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, tx, txlen,
+ USB_CTRL_GET_TIMEOUT);
+
+ if (status != txlen)
+ deb_data("ep 0 write error (status = %d, len: %d)\n",status,txlen);
+
+ return status < 0 ? status : 0;
+}
+
+static int dib0700_set_gpio(struct dib0700_state *st, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val)
+{
+ int ret;
+ u8 buf[3] = {0};
+
+
+ buf[0] = REQUEST_SET_GPIO;
+ buf[1] = gpio;
+ buf[2] = ((!!gpio_dir) << 7) | ((!!gpio_val) << 6);
+
+ ret = dib0700_ctrl_wr(st, buf, ARRAY_SIZE(buf));
+
+ return ret;
+}
+
+/* Gpiolib interface */
+static void dib0700_gpio_set(struct gpio_chip *c, unsigned gpio, int v)
+{
+ struct dib0700_state *st = container_of(c,
+ struct dib0700_state,
+ gpio_chip);
+
+ if (gpio >= ARRAY_SIZE(gpio_to_reg)) {
+ dev_err(&st->intf->dev, "Error: GPIO %u out of range", gpio);
+ return;
+ }
+
+ /* We should split value and direction in different functions */
+ (void)dib0700_set_gpio(st, gpio_to_reg[gpio], 1, v);
+}
+
+static int dib0700_gpio_direction_output(struct gpio_chip *c, unsigned gpio, int v)
+{
+ int ret = 0;
+ struct dib0700_state *st = container_of(c,
+ struct dib0700_state,
+ gpio_chip);
+
+ if (gpio >= ARRAY_SIZE(gpio_to_reg)) {
+ dev_err(&st->intf->dev, "Error: GPIO %u out of range", gpio);
+ return -1;
+ }
+
+ return dib0700_set_gpio(st, gpio_to_reg[gpio], 1, v);
+}
+
+int dib0700_gpio_init(struct dib0700_state *st)
+{
+ int ret;
+
+ st->gpio_chip.set = dib0700_gpio_set;
+ st->gpio_chip.direction_output = dib0700_gpio_direction_output;
+ st->gpio_chip.can_sleep = true;
+ // Let gpiolib find a base for us
+ st->gpio_chip.base = -1;
+ st->gpio_chip.label = DRIVER_NAME;
+ st->gpio_chip.ngpio = ARRAY_SIZE(gpio_to_reg);
+ st->gpio_chip.dev = &static_hook.dev;
+
+ ret = gpiochip_add(&st->gpio_chip);
+
+ if (ret) {
+ dev_err(&st->intf->dev,
+ "Couldn't register GPIO chip: %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_info(&st->intf->dev, "GPIO chip registered\n");
+
+ return 0;
+}
+
+static void dib0700_gpio_release(struct dib0700_state *st)
+{
+ int i;
+ int ret;
+ dev_info(&st->intf->dev, "GPIO chip is being removed\n");
+
+ /* XXX this is ugly but we don't have choice : we can't keep gpio
+ driver open while the device is removed because
+ */
+ for (i = st->gpio_chip.base; i < st->gpio_chip.base + st->gpio_chip.ngpio;
+ i++) {
+ gpio_free(i);
+ }
+
+ ret = gpiochip_remove(&st->gpio_chip);
+ if (ret < 0) {
+ dev_info(&st->intf->dev, "GPIO cannot be removed %d\n", ret);
+ }
+}
+
+/***********************
+ I2C stuff
+************************/
+#define I2C_WR_OFFSET 6
+
+static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int num)
+{
+ /* The new i2c firmware messages are more reliable and in particular
+ properly support i2c read calls not preceded by a write */
+
+ struct dib0700_state *st = i2c_get_adapdata(adap);
+ struct i2c_adapter_properties *prop
+ = container_of(adap, struct i2c_adapter_properties, i2c_adap);
+
+ uint8_t bus_mode = !prop->master; /* 0=eeprom bus, 1=frontend bus */
+ uint8_t gen_mode = !prop->master; /* 0=master i2c, 1=gpio i2c */
+ uint8_t en_start = 0;
+ uint8_t en_stop = 0;
+ int result, i;
+ uint8_t buf[256 + I2C_WR_OFFSET] = {0};
+
+ if (test_bit(STATE_DISCONNECT, &st->istate))
+ return -ENODEV;
+
+ rt_mutex_lock(&st->bus_lock);
+ /* Acces to i2c is protected by a mutex in i2c-core.c */
+
+ for (i = 0; i < num; i++) {
+ if (i == 0) {
+ /* First message in the transaction */
+ en_start = 1;
+ } else if (!(msg[i].flags & I2C_M_NOSTART)) {
+ /* Device supports repeated-start */
+ en_start = 1;
+ } else {
+ /* Not the first packet and device doesn't support
+ repeated start */
+ en_start = 0;
+ }
+ if (i == (num - 1)) {
+ /* Last message in the transaction */
+ en_stop = 1;
+ }
+
+ if (msg[i].flags & I2C_M_RD) {
+
+ /* Read request */
+ u16 index, value;
+ uint8_t i2c_dest;
+
+ i2c_dest = (msg[i].addr << 1);
+ value = ((en_start << 7) | (en_stop << 6)
+ | ((uint8_t)prop->sda)) << 8
+ | i2c_dest;
+
+ /* I2C ctrl + FE bus; */
+ index = ((gen_mode << 6) & 0xC0) |
+ ((bus_mode << 4) & 0x30) |
+ (uint8_t)prop->scl;
+
+
+ result = dib0700_usb_control_msg(st,
+ 0,
+ REQUEST_NEW_I2C_READ,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ value, index, msg[i].buf,
+ msg[i].len,
+ USB_CTRL_GET_TIMEOUT);
+
+ if (dvb_usb_dib0700_debug >= 0x06) {
+ dev_err(&adap->dev, "USB control read: %x %x %x %x %x %x %d = %d\n",
+ 0,
+ REQUEST_NEW_I2C_READ,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ value, index,
+ msg[i].len,
+ USB_CTRL_GET_TIMEOUT,
+ result);
+ }
+
+ if (result != msg[i].len) {
+ deb_info("i2c read error (status = %d)\n", result);
+ result = -EREMOTEIO;
+ goto exit_i2c_xfer;
+ }
+
+ debug_dump_usb_xfer(st, "I2C read",
+ msg[i].buf, msg[i].len);
+
+ } else {
+ buf[0] = REQUEST_NEW_I2C_WRITE;
+ buf[1] = msg[i].addr << 1;
+ buf[2] = (en_start << 7) | (en_stop << 6) |
+ (msg[i].len & 0x3F);
+ /* I2C ctrl + FE bus; */
+ buf[3] = ((gen_mode << 6) & 0xC0) |
+ ((bus_mode << 4) & 0x30);
+
+ buf[4] = ((uint8_t)prop->sda) << 4
+ | (uint8_t)prop->scl;
+
+ if ( msg[i].len + I2C_WR_OFFSET >
+ ARRAY_SIZE(buf)) {
+ dev_err(&adap->dev,
+ "msg[i].len = %d\n", msg[i].len);
+ dib0700_mfd_put(st);
+ return -EINVAL;
+ }
+
+ /* The Actual i2c payload */
+ memcpy(&buf[I2C_WR_OFFSET], msg[i].buf, msg[i].len);
+
+ result = dib0700_usb_control_msg(st,
+ 0,
+ REQUEST_NEW_I2C_WRITE,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0, buf, msg[i].len + I2C_WR_OFFSET,
+ USB_CTRL_GET_TIMEOUT);
+
+ if (dvb_usb_dib0700_debug >= 0x06) {
+ dev_err(&adap->dev, "USB control write: %x %x %x %x %x %x %d = %d\n",
+ 0,
+ REQUEST_NEW_I2C_WRITE,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0, msg[i].len + I2C_WR_OFFSET,
+ USB_CTRL_GET_TIMEOUT, result);
+ }
+
+ debug_dump_usb_xfer(st, "I2C write ",
+ buf, msg[i].len + I2C_WR_OFFSET);
+
+ if (result != msg[i].len + I2C_WR_OFFSET) {
+ deb_info("i2c write error (status = %d)\n", result);
+ result = -EREMOTEIO;
+ goto exit_i2c_xfer;
+ }
+ }
+ }
+
+
+ result = i;
+
+exit_i2c_xfer:
+ rt_mutex_unlock(&st->bus_lock);
+ return result;
+}
+
+static u32 dib0700_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm dib0700_i2c_algo = {
+ .master_xfer = dib0700_i2c_xfer,
+ .functionality = dib0700_i2c_func,
+};
+
+static int dib0700_set_i2c_speed(struct dib0700_state *st, u16 scl_kHz)
+{
+ u16 divider;
+ int ret;
+ u8 buf[8];
+
+ if (scl_kHz == 0)
+ return -EINVAL;
+
+ buf[0] = REQUEST_SET_I2C_PARAM;
+ divider = (u16) (30000 / scl_kHz);
+ buf[1] = 0;
+ buf[2] = (u8) (divider >> 8);
+ buf[3] = (u8) (divider & 0xff);
+ divider = (u16) (72000 / scl_kHz);
+ buf[4] = (u8) (divider >> 8);
+ buf[5] = (u8) (divider & 0xff);
+ divider = (u16) (72000 / scl_kHz); /* clock: 72MHz */
+ buf[6] = (u8) (divider >> 8);
+ buf[7] = (u8) (divider & 0xff);
+
+ ret = dib0700_ctrl_wr(st, buf, ARRAY_SIZE(buf));
+
+ deb_info("setting I2C speed: %04x %04x %04x (%d kHz).",
+ (buf[2] << 8) | (buf[3]), (buf[4] << 8) |
+ buf[5], (buf[6] << 8) | buf[7], scl_kHz);
+
+ return ret;
+}
+
+/***********************
+ Firmware stuff
+************************/
+static int usb_get_hexline(const struct firmware *fw, struct hexline *hx,
+ int *pos)
+{
+ u8 *b = (u8 *) &fw->data[*pos];
+ int data_offs = 4;
+ if (*pos >= fw->size)
+ return 0;
+
+ memset(hx,0,sizeof(struct hexline));
+
+ hx->len = b[0];
+
+ if ((*pos + hx->len + 4) >= fw->size)
+ return -EINVAL;
+
+ hx->addr = b[1] | (b[2] << 8);
+ hx->type = b[3];
+
+ if (hx->type == 0x04) {
+ /* b[4] and b[5] are the Extended linear address record data field */
+ hx->addr |= (b[4] << 24) | (b[5] << 16);
+/* hx->len -= 2;
+ data_offs += 2; */
+ }
+ memcpy(hx->data,&b[data_offs],hx->len);
+ hx->chk = b[hx->len + data_offs];
+
+ *pos += hx->len + 5;
+
+ return *pos;
+}
+
+
+static int dib0700_jumpram(struct usb_device *udev, u32 address)
+{
+ int ret = 0, actlen;
+ u8 *buf;
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf[0] = REQUEST_JUMPRAM;
+ buf[1] = 0;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = (address >> 24) & 0xff;
+ buf[5] = (address >> 16) & 0xff;
+ buf[6] = (address >> 8) & 0xff;
+ buf[7] = address & 0xff;
+
+ if ((ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01),
+ buf,8,&actlen,USB_CTRL_TMOUT)) < 0) {
+ dev_err(&udev->dev, "jumpram to 0x%x failed\n",address);
+ goto out;
+ }
+ if (actlen != 8) {
+ dev_err(&udev->dev, "jumpram to 0x%x failed\n",address);
+ ret = -EIO;
+ goto out;
+ }
+out:
+ kfree(buf);
+ return ret;
+}
+
+
+static int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw)
+{
+ struct hexline hx;
+ int pos = 0, ret, act_len;
+ u8 *buf;
+
+ buf = kmalloc(260, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while ((ret = usb_get_hexline(fw, &hx, &pos)) > 0) {
+ //dev_err(&udev->dev, "writing to address 0x%08x (buffer: 0x%02x %02x)\n",
+ // hx.addr, hx.len, hx.chk);
+ deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
+ hx.addr, hx.len, hx.chk);
+
+ buf[0] = hx.len;
+ buf[1] = (hx.addr >> 8) & 0xff;
+ buf[2] = hx.addr & 0xff;
+ buf[3] = hx.type;
+ memcpy(&buf[4],hx.data,hx.len);
+ buf[4+hx.len] = hx.chk;
+
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, 0x01),
+ buf,
+ hx.len + 5,
+ &act_len,
+ USB_CTRL_TMOUT);
+
+ if (ret < 0) {
+ dev_err(&udev->dev, "firmware download failed at %d with %d",pos,ret);
+ goto out;
+ }
+
+ }
+
+ if (ret == 0) {
+ /* start the firmware */
+ if ((ret = dib0700_jumpram(udev, 0x70000000)) == 0) {
+ dev_err(&udev->dev, "Firware started successfully\n");
+ msleep(200);
+ }
+ } else
+ ret = -EIO;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int dib0700_get_version(struct dib0700_state *st)
+{
+ int ret;
+ unsigned long flags;
+ u8 buf[16] = {0};
+
+ ret = usb_control_msg(st->udev, usb_rcvctrlpipe(st->udev, 0),
+ REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, 16, USB_CTRL_GET_TIMEOUT);
+
+ spin_lock_irqsave(&st->lock, flags);
+
+ st->hwversion = (buf[0] << 24) | (buf[1] << 16) |
+ (buf[2] << 8) | buf[3];
+
+ st->romversion = (buf[4] << 24) | (buf[5] << 16) |
+ (buf[6] << 8) | buf[7];
+
+ st->fw_version = (buf[8] << 24) | (buf[9] << 16) |
+ (buf[10] << 8) | buf[11];
+
+ st->fwtype = (buf[12] << 24) | (buf[13] << 16) |
+ (buf[14] << 8) | buf[15];
+
+ spin_unlock_irqrestore(&st->lock, flags);
+ return ret;
+}
+
+/***********************************************/
+#ifdef UART_DEV_PENDING
+
+static void dib0700_serial_release(struct dib0700_state *st)
+{
+ int index;
+
+ FUNC_IN;
+
+ /* Deactivate the timer */
+ del_timer_sync(&st->rx_timer);
+ del_timer_sync(&st->tx_timer);
+
+ for (index = 0; index < MAX_NBR_UARTS; index++) {
+ if (dib0700_uart_table[index]
+ && (dib0700_uart_table[st->index] == st)) {
+ dib0700_uart_port_remove(st);
+ }
+ }
+
+ /* Kill URBs */
+ kill_urbs(st);
+ cancel_work_sync(&st->work);
+
+ FUNC_OUT;
+}
+
+static int dib0700_serial_init(struct dib0700_state *st)
+{
+ int err = -ENOMEM, index;
+ struct usb_host_interface *intf_desc;
+
+ /* Look for available space for tty registration */
+ spin_lock(&dib0700_uart_table_lock);
+ for (index = 0; index < MAX_NBR_UARTS; index++) {
+ if (!dib0700_uart_table[index]) {
+ st->index = index;
+ dib0700_uart_table[index] = st;
+ break;
+ }
+ }
+ spin_unlock(&dib0700_uart_table_lock);
+
+ intf_desc = st->intf->cur_altsetting;
+ tty_port_init(&st->port);
+ st->port.ops = &dib0700_uart_port_ops;
+ spin_lock_init(&st->lock);
+ INIT_WORK(&st->work, dib0700_port_work);
+
+ if (index<MAX_NBR_UARTS) {
+ st->ttydev = tty_register_device(dib0700_uart_tty_driver,
+ st->index, &st->intf->dev);
+ if (IS_ERR(st->ttydev)) {
+ dib0700_uart_port_remove(st);
+ dev_info(&st->intf->dev, "TTY registration failed\n");
+ err = PTR_ERR(st->ttydev);
+ goto serial_init_failed;
+ } else
+ dev_info(&st->intf->dev, "TTY registration succeed\n");
+
+ set_bit(STATE_TTY_REGISTER, &st->istate);
+
+ /* Setup timer */
+ setup_timer(&st->rx_timer, rx_timer_routine, (unsigned long) st);
+ setup_timer(&st->tx_timer, tx_timer_routine, (unsigned long) st);
+
+ /* allocate and initialize URBs */
+ st->rd_ctrlrq.bRequest = REQUEST_UART_READ;
+ st->rd_ctrlrq.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
+ st->rd_ctrlrq.wValue = 0;
+ st->rd_ctrlrq.wIndex = 0;
+ st->rd_ctrlrq.wLength = cpu_to_le16(RD_URB_SIZE);
+
+ st->wrt_ctrlrq.bRequest = REQUEST_UART_WRITE;
+ st->wrt_ctrlrq.bRequestType = USB_TYPE_VENDOR
+ | USB_DIR_OUT;
+ st->wrt_ctrlrq.wValue = 0;
+ st->wrt_ctrlrq.wIndex = 0;
+ st->wrt_ctrlrq.wLength
+ = cpu_to_le16(WR_URB_SIZE + SRL_WRITE_OFFSET);
+
+ for (index = 0; index < NBR_MAX_RD_URBS; index++) {
+ /* similar to usb-serial */
+ set_bit(index, &st->read_urbs_free);
+
+ st->read_urbs[index] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!st->read_urbs[index]) {
+ dev_err(&st->intf->dev,
+ "No free urbs available\n");
+ goto serial_init_failed;
+ }
+ st->uart_in_buffers[index] = kmalloc(RD_URB_SIZE,
+ GFP_KERNEL);
+ if (!st->uart_in_buffers[index]) {
+ dev_err(&st->intf->dev,
+ "Couldn't allocate buffer for URBs\n");
+ goto serial_init_failed;
+ }
+
+ st->uart_in_buffers[index][0] = REQUEST_UART_READ;
+
+ usb_fill_control_urb(st->read_urbs[index], st->udev,
+ usb_rcvctrlpipe(st->udev, 0),
+ (unsigned char *)&st->rd_ctrlrq,
+ st->uart_in_buffers[index],
+ RD_URB_SIZE,
+ dib0700_read_ctrl_callback,
+ st);
+ }
+
+ for (index = 0; index < NBR_MAX_WR_URBS; index++) {
+ set_bit(index, &st->write_urbs_free);
+
+ st->write_urbs[index] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!st->write_urbs[index]) {
+ dev_err(&st->intf->dev,
+ "No free urbs available\n");
+ goto serial_init_failed;
+ }
+ st->uart_out_buffers[index] = kmalloc(WR_URB_SIZE,
+ GFP_KERNEL);
+ if (!st->uart_out_buffers[index]) {
+ dev_err(&st->intf->dev,
+ "Couldn't allocate buffer for URBs\n");
+ goto serial_init_failed;
+ }
+
+ usb_fill_control_urb(st->write_urbs[index], st->udev,
+ usb_sndctrlpipe(st->udev, 0),
+ (unsigned char *)&st->wrt_ctrlrq,
+ st->uart_out_buffers[index],
+ WR_URB_SIZE + 6,
+ dib0700_write_ctrl_callback,
+ st);
+ }
+
+ /* init write FIFO */
+ if (kfifo_alloc(&st->write_fifo, PAGE_SIZE, GFP_KERNEL)) {
+ dev_err(&st->intf->dev,
+ "FIFO can't be allocated\n");
+ goto serial_init_failed;
+ } else
+ err = 0;
+ }
+
+serial_init_failed:
+ return err;
+}
+#endif //UART_DEV_PENDING
+
+
+static void dib0700_i2c_init(struct dib0700_state *st)
+{
+ int idx, check = 0, loop;
+ int sda = 0, scl = 0;
+ struct usb_device *udev = st->udev;
+
+ rt_mutex_init(&st->bus_lock);
+ for (idx = 0; idx < st->num_adapters; idx++) {
+
+ if( (!st->props[idx].sda)
+ && (!st->props[idx].scl)
+ && (!st->props[idx].master) )
+ return;
+
+ st->props[idx].num_adapter = idx;
+ st->props[idx].i2c_adap.owner = THIS_MODULE;
+ st->props[idx].i2c_adap.algo = &dib0700_i2c_algo;
+ st->props[idx].i2c_adap.algo_data = NULL;
+ //st->props[idx].i2c_adap.dev.parent = &st->udev->dev;
+ st->props[idx].i2c_adap.dev.parent = &static_hook.dev;
+
+ if (!st->props[idx].master) {
+ for (check = 0; check < idx; check++) {
+
+ if ( (!st->props[check].master)
+ && ((st->props[idx].sda == st->props[check].sda)
+ || (st->props[idx].sda == st->props[check].scl)
+ || (st->props[idx].scl == st->props[check].sda))) {
+ dev_err(&udev->dev,
+ "Pin used\n[idx=%d] : (%d,%d)\t [check=%d] : (%d,%d)\n",
+ idx, st->props[idx].sda, st->props[idx].scl,
+ check, st->props[check].sda, st->props[check].scl);
+
+ break;
+ }
+ }
+ if (check!=idx)
+ continue;
+ }
+
+ /* Set pins as output */
+ if (dib0700_set_gpio(st, st->props[idx].sda, 0, 1)
+ || dib0700_set_gpio(st, st->props[idx].scl, 0, 1)) {
+ dev_err(&udev->dev, "can't set i2c pin as output\n");
+ continue;
+ }
+
+ //TODO change the name of adapter
+ for (loop = 0;
+ loop < ARRAY_SIZE(gpio_to_reg);
+ loop++) {
+
+ if (st->props[idx].sda
+ == gpio_to_reg[loop]) {
+ sda = loop;
+ } else if (st->props[idx].scl
+ == gpio_to_reg[loop]) {
+ scl = loop;
+ }
+ }
+
+ snprintf(st->props[idx].i2c_adap.name,
+ sizeof(st->props[idx].i2c_adap.name),
+ DRIVER_NAME ".%d.%d", sda, scl);
+
+ dev_info(&udev->dev, "adapter \"%s\" (%s sda:%d,scl:%d)\n",
+ st->props[idx].i2c_adap.name,
+ (st->props[idx].master)?
+ "master":"bit banging",
+ sda, scl);
+
+ i2c_set_adapdata(&st->props[idx].i2c_adap, st);
+
+ if (i2c_add_adapter(&st->props[idx].i2c_adap) < 0) {
+ dev_err(&udev->dev, "Failed to add adapter\n");
+ }
+
+ set_bit(STATE_I2C_REGISTER, &st->istate);
+ }
+}
+
+
+static int dib0700_hook_init(struct dib0700_state *st)
+{
+ int err = -1;
+ const struct firmware *fw = NULL;
+ struct usb_device *udev = st->udev;
+ //char *firmware = "dvb-usb-dib0700-1.20.fw";
+
+ /* Get firmware revision */
+ //if ((err = dib0700_get_version(st)) < 0) {
+ /* Download firmware */
+ dev_info(&st->intf->dev, "Request firware \"%s\"\n", firmware);
+ if ((err = request_firmware(&fw, firmware, &udev->dev)) != 0) {
+ dev_err(&st->intf->dev, "Firware \"%s\" not found\n", firmware);
+ goto init_failed;
+ }
+
+ /* TODO find a better solution for
+ struct hexline (see dvb-usb.h) and
+ dvb_usb_get_hexline/usb_get_hexline ( exported from dvb-usb-firmware.c )*/
+ if ((err = dib0700_download_firmware(st->udev, fw)) !=0) {
+ dev_err(&st->intf->dev, "Firware download failed\n");
+ goto init_failed;
+ }
+
+ dev_info(&st->intf->dev, "Firmware downloaded\n");
+
+ /* Get firmware revision */
+ if ((err = dib0700_get_version(st)) < 0) {
+ dev_err(&st->intf->dev, "Unable to get firmware version\n");
+ goto init_failed;
+ }
+
+ /* Force USB descriptor update (from ROM settings, to firmware) */
+ /*usb_control_msg(st->udev, usb_rcvctrlpipe(st->udev, 0),
+ REQUEST_DISCONNECT,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ NULL, 0, USB_CTRL_GET_TIMEOUT);
+ } else {
+ dev_info(&st->intf->dev, "Device in warm state\n");
+ }*/
+
+ err = 0;
+
+init_failed:
+ if (fw)
+ release_firmware(fw);
+ return err;
+}
+
+int dib0700_probe(struct usb_interface *intf, int *gpio_base)
+{
+ int err = -ENXIO;
+ struct dib0700_state *st = NULL;
+ if (chip_state) {
+ st = chip_state;
+ }
+ else {
+ st = kzalloc(sizeof(struct dib0700_state), GFP_KERNEL);
+ if (st == NULL) {
+ dev_err(&intf->dev, "Out of memory\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ st->intf = intf;
+ /* get a ref to usb device */
+ st->udev = usb_get_dev(interface_to_usbdev(intf));
+
+ kref_init(&st->kref);
+ spin_lock_init(&st->lock);
+ init_waitqueue_head(&st->wq_endp0);
+
+ usb_set_intfdata(intf, st);
+
+ if ((err = dib0700_hook_init(st)) !=0) {
+ dev_err(&intf->dev, "Init failed\n");
+ goto free_mem;
+ }
+ dev_info(&intf->dev, "Firmware version: %x, %d, 0x%x, %d\n",
+ st->hwversion, st->romversion,
+ st->fw_version, st->fwtype);
+
+
+ /* Define default i2c adapter */
+ st->num_adapters = 0;
+ st->props[0].master = 1;
+ st->num_adapters++;
+
+ /* Define i2c adapter that use bit banging */
+ if (dib0700_parse_i2c_param(st)){
+ dev_err(&intf->dev,
+ "Bad parameter (bad i2c_param)\n");
+ }
+
+#ifdef UART_DEV_PENDING
+ /* serial device */
+ err = dib0700_serial_init(st);
+ if (err<0) {
+ dev_err(&intf->dev,
+ "Serial init failed\n");
+ goto free_mem;
+ }
+#endif //UART_DEV_PENDING
+
+ if (!chip_state) {
+ dib0700_i2c_init(st);
+
+ err = dib0700_gpio_init(st);
+ if (err < 0) {
+ dev_err(&intf->dev,
+ "GPIO init failed\n");
+ goto release_uart;
+ }
+ }
+ *gpio_base = st->gpio_chip.base;
+
+ clear_bit(STATE_DISCONNECT, &st->istate);
+
+ chip_state = st;
+ return 0;
+
+release_uart:
+#ifdef UART_DEV_PENDING
+ if (test_and_clear_bit(STATE_TTY_REGISTER, &st->istate)) {
+ /* Remove serial */
+ dib0700_serial_release(st);
+ }
+#endif //UART_DEV_PENDING
+free_mem:
+ dib0700_mfd_put(st);
+error:
+ return err;
+}
+EXPORT_SYMBOL(dib0700_probe);
+
+void dib0700_device_exit(struct usb_interface *intf)
+{
+ struct dib0700_state *st = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+ set_bit(STATE_DISCONNECT, &st->istate);
+
+ /* TODO: protect retrieval by mutex */
+
+#ifdef UART_DEV_PENDING
+ if (test_and_clear_bit(STATE_TTY_REGISTER, &st->istate)) {
+ /* Remove serial */
+ dib0700_serial_release(st);
+ }
+#endif //UART_DEV_PENDING
+
+ dib0700_mfd_put(st);
+ dev_info(&intf->dev, "driver exit\n");
+}
+EXPORT_SYMBOL(dib0700_device_exit);
+
+static int __init dib0700_init(void)
+{
+ int ret;
+ struct tty_driver *tty_drv;
+
+ platform_device_register(&static_hook);
+
+ dib0700_uart_tty_driver = tty_drv = alloc_tty_driver(MAX_NBR_UARTS);
+ if (!tty_drv)
+ return -ENOMEM;
+
+ tty_drv->driver_name = DRIVER_NAME;
+ tty_drv->name = "ttyDIB";
+ tty_drv->major = 0;
+ tty_drv->minor_start = 0;
+ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_drv->subtype = SERIAL_TYPE_NORMAL;
+ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_drv->init_termios = tty_std_termios;
+ tty_drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ tty_drv->init_termios.c_ispeed = 9600;
+ tty_drv->init_termios.c_ospeed = 9600;
+ tty_set_operations(tty_drv, &dib0700_uart_ops);
+
+ /* register the TTY driver */
+ ret = tty_register_driver(tty_drv);
+ if (ret < 0) {
+ printk(KERN_ERR DRIVER_NAME "%s - tty_register_driver failed\n",
+ __func__);
+ goto tty_register_failed;
+ }
+
+ return 0;
+
+tty_register_failed:
+ put_tty_driver(tty_drv);
+ return ret;
+}
+
+static void __exit dib0700_exit(void)
+{
+ tty_unregister_driver(dib0700_uart_tty_driver);
+ //TODO check memory leak if unload without being used
+ put_tty_driver(dib0700_uart_tty_driver);
+
+ /* i2c and gpio subsystem are not robust to removal while in
+ use, don't remove them */
+ if (chip_state) {
+ if (test_and_clear_bit(STATE_I2C_REGISTER, &chip_state->istate)) {
+ int index;
+ for (index = 0; index < chip_state->num_adapters; index++)
+ i2c_del_adapter(&chip_state->props[index].i2c_adap);
+ }
+
+ /* this is safe to call here if we don't
+ delete st->gpio_chip */
+ dib0700_gpio_release(chip_state);
+
+ kfree(chip_state);
+ chip_state = NULL;
+ }
+}
+
+module_init(dib0700_init);
+module_exit(dib0700_exit);
+
+
+MODULE_AUTHOR("Christian ROSALIE <christian.rosalie@parrot.com>");
+MODULE_DESCRIPTION("Driver for multi-function devices based on DiBcom DiB0700 - USB bridge");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/mfd/dib07x0.h b/drivers/parrot/mfd/dib07x0.h
new file mode 100644
index 00000000000000..770de4791af8e3
--- /dev/null
+++ b/drivers/parrot/mfd/dib07x0.h
@@ -0,0 +1,65 @@
+#ifndef _DIB07X0_H_
+#define _DIB07X0_H_
+
+enum dib07x0_gpios {
+ GPIO0 = 0,
+ GPIO1 = 2,
+ GPIO2 = 3,
+ GPIO3 = 4,
+ GPIO4 = 5,
+ GPIO5 = 6,
+ GPIO6 = 8,
+ GPIO7 = 10,
+ GPIO8 = 11,
+ GPIO9 = 14,
+ GPIO10 = 15,
+};
+
+#define GPIO_IN 0
+#define GPIO_OUT 1
+
+extern int dvb_usb_dib0700_debug;
+#define dprintk(var,level,args...) \
+ do { if ((var & level)) { printk(args); } } while (0)
+
+#define debug_dump(b,l,func) {\
+ int loop_; \
+ for (loop_ = 0; loop_ < l; loop_++) func("%02x ", b[loop_]); \
+ func("\n");\
+}
+
+
+#define deb_info(args...) dprintk(dvb_usb_dib0700_debug,0x01,args)
+#define deb_fw(args...) dprintk(dvb_usb_dib0700_debug,0x02,args)
+#define deb_fwdata(args...) dprintk(dvb_usb_dib0700_debug,0x04,args)
+#define deb_data(args...) dprintk(dvb_usb_dib0700_debug,0x08,args)
+
+#define REQUEST_SET_USB_XFER_LEN 0x0 /* valid only for firmware version */
+ /* higher than 1.21 */
+#define REQUEST_I2C_READ 0x2
+#define REQUEST_I2C_WRITE 0x3
+#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */
+#define REQUEST_DISCONNECT 0x5 /* force Linux to renews USB descriptors */
+#define REQUEST_JUMPRAM 0x8
+#define REQUEST_SET_CLOCK 0xB
+#define REQUEST_SET_GPIO 0xC
+#define REQUEST_ENABLE_VIDEO 0xF
+ // 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog)
+ // 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1)
+ // 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " )
+#define REQUEST_SET_I2C_PARAM 0x10
+#define REQUEST_SET_RC 0x11
+#define REQUEST_NEW_I2C_READ 0x12
+#define REQUEST_NEW_I2C_WRITE 0x13
+#define REQUEST_GET_VERSION 0x15
+#define REQUEST_UART_SET_PARAM (REQUEST_GET_VERSION + 0x6)
+#define REQUEST_UART_WRITE (REQUEST_GET_VERSION + 0x7)
+#define REQUEST_UART_READ (REQUEST_GET_VERSION + 0x8)
+#define REQUEST_UART_TX_FIFO_LVL (REQUEST_GET_VERSION + 0x9)
+
+#define USB_VID_DIBCOM 0x10b8
+#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0066
+
+#define USB_CTRL_TMOUT 1000
+
+#endif
diff --git a/drivers/parrot/mfd/p7mu-clk.c b/drivers/parrot/mfd/p7mu-clk.c
new file mode 100644
index 00000000000000..cbdf8df4ec73ed
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu-clk.c
@@ -0,0 +1,439 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu-clk.c - Parrot7 power management unit internal
+ * clocks implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 21-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <mach/pwm.h>
+#include "p7mu.h"
+#include "p7mu-clk.h"
+
+/*
+ * Internal oscillator calibration registers (configuration block)
+ */
+#define P7MU_CFG_OSC_CAL_REG (P7MU_CFG_PAGE + 0x0c)
+
+#define P7MU_OSC_CAL_EN ((u16) 1U << 0)
+#define P7MU_OSC_CAL_SEL ((u16) 1U << 1)
+#define P7MU_OSC_SSM_ENABLE ((u16) 1U << 2)
+#define P7MU_OSC_CAL_OUT_SFT 9
+#define P7MU_OSC_CAL_OUT_MSK ((u16) 0x3fU << P7MU_OSC_CAL_OUT_SFT)
+#define P7MU_OSC_CAL_RDY ((u16) 1U << 15)
+
+/*
+ * Clock generator block register
+ */
+#define P7MU_CLK_PAGE ((u16) 0x0c00)
+#define P7MU_CLK_CFG_REG (P7MU_CLK_PAGE + 0x0)
+
+#define P7MU_CLK_EXT_EN ((u16) (1U << 0))
+#define P7MU_CLK_FAST_CLK4PWM_1 ((u16) (1U << 1))
+#define P7MU_CLK_EXT_USED ((u16) (1U << 2))
+
+/*
+ * Clock generator backup block register
+ */
+#define P7MU_CLKBKUP_PAGE ((u16) 0x0d00)
+#define P7MU_CLKBKUP_CFG_REG (P7MU_CLKBKUP_PAGE + 0x0)
+
+#define P7MU_CLKBKUP_EXT32K_EN ((u16) (1U << 0))
+#define P7MU_CLKBKUP_FAST_CLK4PWM_0 ((u16) (1U << 1))
+#define P7MU_CLKBKUP_EXT32K_USED ((u16) (1U << 2))
+#define P7MU_CLKBKUP_LDO_OCD_MASK_PGO0 ((u16) (1U << 7))
+
+/****************************************************************************
+ * SPI clock
+ ****************************************************************************/
+#if defined(CONFIG_P7MU_ADC) || defined(CONFIG_P7MU_ADC_MODULE)
+static unsigned long p7mu_spiclk_rate;
+long p7mu_get_spiclk_rate(void)
+{
+ int err;
+ u16 reg, msk, val;
+
+ if (p7mu_spiclk_rate)
+ goto out;
+
+ reg = P7MU_CLK_CFG_REG;
+ msk = P7MU_CLK_EXT_USED;
+ err = p7mu_read16(reg, &val);
+ if (err)
+ return (long) err;
+
+ if (val & msk)
+ p7mu_spiclk_rate = P7MU_SPI_HZ_MAX;
+ else
+ p7mu_spiclk_rate = P7MU_SPI_HZ_MIN;
+
+out:
+ return (long) p7mu_spiclk_rate;
+}
+EXPORT_SYMBOL(p7mu_get_spiclk_rate);
+#endif
+
+
+/****************************************************************************
+ * PWM clocks
+ *
+ * Function prototypes are kept compliant with common clock framework (if we
+ * ever need to switch to it).
+ ****************************************************************************/
+
+#if defined(CONFIG_PWM_P7MU) || defined(CONFIG_PWM_P7MU_MODULE)
+
+static unsigned long p7mu_pwmclk_rates[P7MU_PWMS_MAX];
+
+long p7mu_get_pwmclk_rate(unsigned int pwm_id)
+{
+ int err;
+ u16 reg, msk, val;
+
+#ifdef DEBUG
+ BUG_ON(pwm_id >= P7MU_PWMS_MAX);
+#endif
+
+ if (p7mu_pwmclk_rates[pwm_id])
+ goto out;
+
+ if (! pwm_id) {
+ reg = P7MU_CLKBKUP_CFG_REG;
+ msk = P7MU_CLKBKUP_FAST_CLK4PWM_0;
+ }
+ else {
+ reg = P7MU_CLK_CFG_REG;
+ msk = P7MU_CLK_FAST_CLK4PWM_1;
+ }
+
+ err = p7mu_read16(reg, &val);
+ if (err)
+ return (long) err;
+
+ if (val & msk)
+ p7mu_pwmclk_rates[pwm_id] = P7MU_PWM_HZ_MAX;
+ else
+ p7mu_pwmclk_rates[pwm_id] = P7MU_PWM_HZ_MIN;
+
+out:
+ return (long) p7mu_pwmclk_rates[pwm_id];
+}
+EXPORT_SYMBOL(p7mu_get_pwmclk_rate);
+
+int p7mu_set_pwmclk_rate(unsigned int pwm_id, unsigned long hz)
+{
+ int err;
+ u16 reg, msk, val;
+
+#ifdef DEBUG
+ BUG_ON(pwm_id >= P7MU_PWMS_MAX);
+ BUG_ON(hz != P7MU_PWM_HZ_MIN && hz != P7MU_PWM_HZ_MAX);
+#endif
+
+ if (hz == p7mu_pwmclk_rates[pwm_id])
+ return 0;
+
+ if (! pwm_id) {
+ reg = P7MU_CLKBKUP_CFG_REG;
+ msk = P7MU_CLKBKUP_FAST_CLK4PWM_0;
+ }
+ else {
+ reg = P7MU_CLK_CFG_REG;
+ msk = P7MU_CLK_FAST_CLK4PWM_1;
+ }
+
+ if (hz == P7MU_PWM_HZ_MIN)
+ val = 0;
+ else
+ val = msk;
+
+ err = p7mu_mod16(reg, val, msk);
+ if (err)
+ return (long) err;
+
+ p7mu_pwmclk_rates[pwm_id] = hz;
+ return 0;
+}
+EXPORT_SYMBOL(p7mu_set_pwmclk_rate);
+
+#endif /* defined(CONFIG_PWM_P7MU) || defined(CONFIG_PWM_P7MU_MODULE) */
+
+/********************************************
+ * Clock tree initialization implementation.
+ ********************************************/
+
+#define P7MU_CALIB_JIFFIES (HZ * 60 * 15) /* 15 mn calibration period */
+
+struct p7mu_clock_work {
+ struct delayed_work wk;
+ bool int_32k;
+ bool int_32m;
+};
+
+static struct p7mu_clock_work p7mu_clk_wk;
+
+static void p7mu_start_32m_calib(struct work_struct* work);
+
+static void p7mu_32m_calib_done(struct work_struct* work)
+{
+ u16 cfg;
+ int err = p7mu_read16(P7MU_CFG_OSC_CAL_REG, &cfg);
+
+ if (! err)
+ err = (cfg & P7MU_OSC_CAL_RDY) ? 0 : -ETIME;
+
+ if (err) {
+ /* Cancel calibration and restore default coefficients. */
+ p7mu_write16(P7MU_CFG_OSC_CAL_REG, P7MU_OSC_CAL_SEL);
+
+ dev_warn(&p7mu_i2c->dev,
+ "internal 32mHz calibration failed, "
+ "retrying in %d mn...\n",
+ P7MU_CALIB_JIFFIES / (60 * HZ));
+ }
+ else
+ dev_dbg(&p7mu_i2c->dev,
+ "internal 32mHz oscillator calibration completed "
+ "(coeff = %02x).\n",
+ (cfg & P7MU_OSC_CAL_OUT_MSK) >> P7MU_OSC_CAL_OUT_SFT);
+
+ /* Next run will start a new calibration process. */
+ PREPARE_DELAYED_WORK((struct delayed_work*) &p7mu_clk_wk,
+ &p7mu_start_32m_calib);
+ schedule_delayed_work((struct delayed_work*) &p7mu_clk_wk,
+ P7MU_CALIB_JIFFIES);
+}
+
+static void p7mu_start_32m_calib(struct work_struct* work)
+{
+ int const err = p7mu_write16(P7MU_CFG_OSC_CAL_REG, P7MU_OSC_CAL_EN);
+
+ dev_dbg(&p7mu_i2c->dev,
+ "starting internal 32mHz oscillator calibration...\n");
+
+ if (err) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to start internal 32mHz calibration, "
+ "retrying in %d mn...\n",
+ P7MU_CALIB_JIFFIES / (60 * HZ));
+ schedule_delayed_work((struct delayed_work*) &p7mu_clk_wk,
+ P7MU_CALIB_JIFFIES);
+ return;
+ }
+
+ /*
+ * Calibration process is about 215 usecs long: schedule check at
+ * next jiffie to allow other queued works to run.
+ */
+ PREPARE_DELAYED_WORK((struct delayed_work*) &p7mu_clk_wk,
+ &p7mu_32m_calib_done);
+ schedule_delayed_work((struct delayed_work*) &p7mu_clk_wk, 1);
+}
+
+static void __devinit p7mu_x32k_switch_done(struct work_struct* work)
+{
+ /*
+ * For whatever reason, we need to perform 3 retries most of the time to
+ * succeed in switching to external xtal.
+ *
+ */
+ u16 cfg_32k;
+ static int retries = 5;
+ int err = p7mu_read16(P7MU_CLKBKUP_CFG_REG, &cfg_32k);
+
+ if (err) {
+ /*
+ * Internal clock still in use: failure.
+ * Revert back to internal.
+ */
+ cfg_32k &= ~(P7MU_CLKBKUP_EXT32K_EN |
+ P7MU_CLKBKUP_EXT32K_USED);
+ p7mu_write16(P7MU_CLKBKUP_CFG_REG, cfg_32k);
+
+ goto err;
+ }
+
+ if (cfg_32k & P7MU_CLKBKUP_EXT32K_USED) {
+ /* External clock ready to be used. */
+ if (! p7mu_clk_wk.int_32k && p7mu_clk_wk.int_32m)
+ p7mu_start_32m_calib(NULL);
+
+ return;
+ }
+
+ if (retries) {
+ /* Retry in one more second. */
+ retries--;
+ schedule_delayed_work((struct delayed_work*) &p7mu_clk_wk, HZ);
+ return;
+ }
+ else
+ err = -EIO;
+
+err:
+ dev_warn(&p7mu_i2c->dev,
+ "failed to switch to external 32kHz xtal (%d)\n",
+ err);
+ p7mu_clk_wk.int_32k = true;
+}
+
+void p7mu_init_clk(void)
+{
+ struct p7mu_plat_data const* const pdata = dev_get_platdata(&p7mu_i2c->dev);
+ int err;
+ u16 cfg_32k;
+ u16 cfg_32m;
+
+ p7mu_clk_wk.int_32k = pdata->int_32k;
+ p7mu_clk_wk.int_32m = pdata->int_32m;
+
+ /*** start 32mHz / 48 mHz oscillator cfg ***/
+ err = p7mu_read16(P7MU_CLK_CFG_REG, &cfg_32m);
+ if (err) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to setup 32mHz / 48 mHz oscillator (%d)\n",
+ err);
+ return;
+ }
+
+ err = p7mu_read16(P7MU_CLKBKUP_CFG_REG, &cfg_32k);
+ if (err) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to setup 32kHz xtal (%d)\n",
+ err);
+ return;
+ }
+
+ if (!pdata->overide_otp) {
+ /* we should trust otp by default. only check if config
+ look correct */
+ if (p7mu_clk_wk.int_32m != !(cfg_32m & P7MU_CLK_EXT_USED))
+ dev_err(&p7mu_i2c->dev, "32 Mhz clock do not match wanted %s got %s\n",
+ p7mu_clk_wk.int_32m?"int":"ext", cfg_32m & P7MU_CLK_EXT_USED ? "ext":"int");
+
+ if (p7mu_clk_wk.int_32k != !(cfg_32k & P7MU_CLK_EXT_USED))
+ dev_warn(&p7mu_i2c->dev, "32 Khz clock do not match wanted %s got %s\n",
+ p7mu_clk_wk.int_32k?"int":"ext", cfg_32k & P7MU_CLK_EXT_USED ? "ext":"int");
+
+ return;
+ }
+
+ if (! p7mu_clk_wk.int_32m &&
+ ! (cfg_32m & P7MU_CLK_EXT_USED)) {
+ /* pdata want external but it is not enabled */
+ dev_dbg(&p7mu_i2c->dev, "switching to external 48mHz oscillator...\n");
+
+ p7mu_write16(P7MU_CLK_CFG_REG,
+ cfg_32m | P7MU_CLK_EXT_EN);
+
+ err = p7mu_read16(P7MU_CLK_CFG_REG, &cfg_32m);
+ if (! err && ! (cfg_32m & P7MU_CLK_EXT_USED)) {
+ /*
+ * Internal clock still in use: failure.
+ * Revert back to internal.
+ */
+ cfg_32m &= ~(P7MU_CLK_EXT_EN | P7MU_CLK_EXT_USED);
+ p7mu_write16(P7MU_CLK_CFG_REG, cfg_32m);
+ err = -EIO;
+ }
+
+ if (err) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to switch to external 48mHz oscillator (%d)\n",
+ err);
+
+ p7mu_clk_wk.int_32m = true;
+ }
+ }
+
+ if (p7mu_clk_wk.int_32m && (cfg_32m & P7MU_CLK_EXT_USED)) {
+ /* pdata want internal but it is external enabled */
+ dev_dbg(&p7mu_i2c->dev, "switching to internal 32mHz oscillator...\n");
+
+ err = p7mu_write16(P7MU_CLK_CFG_REG,
+ cfg_32m & ~P7MU_CLK_EXT_EN);
+ if (err) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to switch to internal 32mHz oscillator (%d)\n",
+ err);
+
+ p7mu_clk_wk.int_32m = false;
+ }
+ }
+
+ /*** Setup 32kHz xtal. ***/
+
+ INIT_DELAYED_WORK_DEFERRABLE(&p7mu_clk_wk.wk, NULL);
+
+ if (! p7mu_clk_wk.int_32k &&
+ ! (cfg_32k & P7MU_CLKBKUP_EXT32K_USED)) {
+ /* pdata want external but it is not enabled */
+ dev_dbg(&p7mu_i2c->dev, "switching to external 32kHz xtal...\n");
+
+ err = p7mu_write16(P7MU_CLKBKUP_CFG_REG,
+ cfg_32k | P7MU_CLKBKUP_EXT32K_EN);
+ if (! err) {
+ /* Switching to external clock may long up to 1 second. */
+ PREPARE_DELAYED_WORK((struct delayed_work*) &p7mu_clk_wk,
+ &p7mu_x32k_switch_done);
+ schedule_delayed_work((struct delayed_work*) &p7mu_clk_wk, HZ + 1);
+
+ return;
+ }
+
+ dev_warn(&p7mu_i2c->dev,
+ "failed to switch to external 32kHz xtal (%d)\n",
+ err);
+
+ p7mu_clk_wk.int_32k = true;
+ }
+
+ if (p7mu_clk_wk.int_32k &&
+ (cfg_32k & P7MU_CLKBKUP_EXT32K_USED)) {
+ /* pdata want internal but it is external enabled */
+ dev_dbg(&p7mu_i2c->dev, "switching to internal 32kHz xtal...\n");
+
+ err = p7mu_write16(P7MU_CLKBKUP_CFG_REG,
+ cfg_32k & ~P7MU_CLKBKUP_EXT32K_EN);
+ if (err)
+ dev_warn(&p7mu_i2c->dev,
+ "failed to switch to internal 32kHz xtal (%d)\n",
+ err);
+
+ p7mu_clk_wk.int_32k = false;
+ }
+
+ /*
+ * When external 32kHz xtal is used, it serves as a reference clock for
+ * internal 32mHz oscillator and needs periodic calibration.
+ */
+ if (! p7mu_clk_wk.int_32k && p7mu_clk_wk.int_32m)
+ p7mu_start_32m_calib(NULL);
+}
+
+void p7mu_exit_clk(void)
+{
+ cancel_delayed_work_sync((struct delayed_work*) &p7mu_clk_wk);
+}
+
+void p7mu_resume_clk(void)
+{
+ struct p7mu_plat_data const* const pdata = dev_get_platdata(&p7mu_i2c->dev);
+ if (!pdata->overide_otp) {
+ u16 otp;
+ /* restore 32Mhz */
+ p7mu_read16(0xe19, &otp);
+ p7mu_write16(P7MU_CLK_CFG_REG,
+ otp);
+ return;
+ }
+ p7mu_init_clk();
+}
diff --git a/drivers/parrot/mfd/p7mu-clk.h b/drivers/parrot/mfd/p7mu-clk.h
new file mode 100644
index 00000000000000..018323b7d6a39e
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu-clk.h
@@ -0,0 +1,50 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu-clk.h - Parrot7 power management unit internal
+ * clocks interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 21-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7MU_CLK_H
+#define _P7MU_CLK_H
+
+#include <linux/init.h>
+
+#if defined(CONFIG_P7MU_ADC) || defined(CONFIG_P7MU_ADC_MODULE)
+
+#define P7MU_SPI_HZ_MIN (32000000UL)
+#define P7MU_SPI_HZ_MAX (48000000UL)
+
+extern long p7mu_get_spiclk_rate(void);
+
+#endif
+
+#if defined(CONFIG_PWM_P7MU) || defined(CONFIG_PWM_P7MU_MODULE)
+
+#define P7MU_PWM_HZ_MIN (32768UL)
+#define P7MU_PWM_HZ_MAX (2000000UL)
+
+static inline long p7mu_round_pwmclk_rate(unsigned long hz)
+{
+ if (hz > P7MU_PWM_HZ_MAX)
+ return -ERANGE;
+
+ /* Divide by 256 to allow finer granularity when using 32kHz clock. */
+ return (hz < (P7MU_PWM_HZ_MIN / 256)) ? P7MU_PWM_HZ_MIN : P7MU_PWM_HZ_MAX;
+}
+
+extern long p7mu_get_pwmclk_rate(unsigned int);
+extern int p7mu_set_pwmclk_rate(unsigned int, unsigned long);
+
+#endif
+
+extern void p7mu_init_clk(void);
+extern void p7mu_exit_clk(void);
+extern void p7mu_resume_clk(void);
+
+#endif
diff --git a/drivers/parrot/mfd/p7mu-core.c b/drivers/parrot/mfd/p7mu-core.c
new file mode 100644
index 00000000000000..3a6a971101ba23
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu-core.c
@@ -0,0 +1,1174 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu.c - Parrot7 power management unit core driver
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 10-May-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ratelimit.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/mfd/core.h>
+#include <linux/fs.h>
+#include <asm/system_misc.h>
+#include <asm/proc-fns.h>
+#include <asm/smp_plat.h>
+#include "p7mu.h"
+#include "p7mu-pin.h"
+#include "p7mu-clk.h"
+
+/*
+ * Configuration block registers
+ */
+#define P7MU_CFG_DCDC0_CFG_0_REG (P7MU_CFG_PAGE + 0x00)
+#define P7MU_CFG_DCDC0_CFG_1_REG (P7MU_CFG_PAGE + 0x01)
+#define P7MU_CFG_DCDC1_CFG_0_REG (P7MU_CFG_PAGE + 0x02)
+#define P7MU_CFG_DCDC1_CFG_1_REG (P7MU_CFG_PAGE + 0x03)
+#define P7MU_CFG_DCDC2_CFG_0_REG (P7MU_CFG_PAGE + 0x04)
+#define P7MU_CFG_DCDC2_CFG_1_REG (P7MU_CFG_PAGE + 0x05)
+#define P7MU_CFG_DCDC3_CFG_0_REG (P7MU_CFG_PAGE + 0x06)
+#define P7MU_CFG_DCDC3_CFG_1_REG (P7MU_CFG_PAGE + 0x07)
+#define P7MU_CFG_DCDC_PGATE_DEL_REG (P7MU_CFG_PAGE + 0x08)
+#define P7MU_CFG_DCDC_NGATE_DEL_REG (P7MU_CFG_PAGE + 0x09)
+#define P7MU_CFG_LDOS_CFG_REG (P7MU_CFG_PAGE + 0x0a)
+#define P7MU_CFG_TS_CAL_REG (P7MU_CFG_PAGE + 0x0d)
+#define P7MU_CFG_TS_POINT_0_REG (P7MU_CFG_PAGE + 0x0e)
+#define P7MU_CFG_TS_POINT_1_REG (P7MU_CFG_PAGE + 0x0f)
+#define P7MU_CFG_CORE_CFG_REG (P7MU_CFG_PAGE + 0x10)
+#define P7MU_CFG_TEST_MUX0_REG (P7MU_CFG_PAGE + 0x11)
+#define P7MU_CFG_TEST_MUX1_REG (P7MU_CFG_PAGE + 0x12)
+#define P7MU_CFG_TEST_MUX2_REG (P7MU_CFG_PAGE + 0x13)
+#define P7MU_CFG_TEST_MUX3_REG (P7MU_CFG_PAGE + 0x14)
+#define P7MU_CFG_TEST_MUX4_REG (P7MU_CFG_PAGE + 0x15)
+#define P7MU_CFG_TEST_MUX5_REG (P7MU_CFG_PAGE + 0x16)
+#define P7MU_CFG_TEST_MUX6_REG (P7MU_CFG_PAGE + 0x17)
+#define P7MU_CFG_TEST_MUX7_REG (P7MU_CFG_PAGE + 0x18)
+#define P7MU_CFG_TEST_DDR_CFG_REG (P7MU_CFG_PAGE + 0x19)
+#define P7MU_CFG_ID_0_REG (P7MU_CFG_PAGE + 0x1a)
+#define P7MU_CFG_ID_1_REG (P7MU_CFG_PAGE + 0x1b)
+#define P7MU_CFG_ID_2_REG (P7MU_CFG_PAGE + 0x1c)
+#define P7MU_CFG_ID_3_REG (P7MU_CFG_PAGE + 0x1d)
+
+#define P7MU_CFG_ID P7MU_CFG_ID_0_REG
+
+/*
+ * Power Management Control block registers
+ */
+#define P7MU_PMC_PAGE ((u16) 0x0100)
+#define P7MU_PMC_POWER_SUPPLY_CTRL_REG ((u16) P7MU_PMC_PAGE + 0x00)
+#define P7MU_PMC_GPIO_EVENT_SEL_REG ((u16) P7MU_PMC_PAGE + 0x01)
+#define P7MU_PMC_GPIO_REG ((u16) P7MU_PMC_PAGE + 0x02)
+#define P7MU_PMC_INTERRUPT_0_REG ((u16) P7MU_PMC_PAGE + 0x03)
+#define P7MU_PMC_INTERRUPT_1_REG ((u16) P7MU_PMC_PAGE + 0x04)
+#define P7MU_PMC_INTERRUPT_EN_0_REG ((u16) P7MU_PMC_PAGE + 0x05)
+#define P7MU_PMC_INTERRUPT_EN_1_REG ((u16) P7MU_PMC_PAGE + 0x06)
+#define P7MU_PMC_CMD_REG ((u16) P7MU_PMC_PAGE + 0x07)
+#define P7MU_PMC_DEBUG_REG ((u16) P7MU_PMC_PAGE + 0x08)
+#define P7MU_PMC_STATUS_REG ((u16) P7MU_PMC_PAGE + 0x09)
+#define P7MU_PMC_TEMP_ERR_CNT_REG ((u16) P7MU_PMC_PAGE + 0x0a)
+#define P7MU_PMC_TEMP_ERR_CTRL_REG ((u16) P7MU_PMC_PAGE + 0x0b)
+#define P7MU_PMC_TEMP_ERR_TIME_REG ((u16) P7MU_PMC_PAGE + 0x0c)
+#define P7MU_PMC_POWER_SUPPLY_MASK_REG ((u16) P7MU_PMC_PAGE + 0x0d)
+#define P7MU_PMC_POWER_SUPPLY_FILTER_REG ((u16) P7MU_PMC_PAGE + 0x0e)
+#define P7MU_PMC_POWER_SUPPLY_STATUS_REG ((u16) P7MU_PMC_PAGE + 0x0f)
+#define P7MU_PMC_POWER_SUPPLY_CTRL_IDLE_REG ((u16) P7MU_PMC_PAGE + 0x10)
+
+#define P7MU_CMD_SHUTDOWN ((u16) (1U << 0))
+#define P7MU_CMD_STANDBY ((u16) (1U << 1))
+#define P7MU_CMD_REBOOT ((u16) (1U << 2))
+#define P7MU_CMD_RESET ((u16) (1U << 3))
+#define P7MU_CMD_RUN ((u16) (1U << 4))
+
+#define P7MU_INT_PWREN ((u16) (1U << 0))
+#define P7MU_INT_TEMP_WARN ((u16) (1U << 1))
+#define P7MU_INT_TEMP_ERR ((u16) (1U << 2))
+#define P7MU_INT_GPIO_1 ((u16) (1U << 3))
+#define P7MU_INT_GPIO_2 ((u16) (1U << 4))
+#define P7MU_INT_GPIO_3 ((u16) (1U << 5))
+#define P7MU_INT_RTC ((u16) (1U << 6))
+#define P7MU_INT_MEASUREMENT_ERROR ((u16) (1U << 7))
+#define P7MU_INT_FIFO_OVERFLOW ((u16) (1U << 8))
+#define P7MU_INT_DET0_DOWN ((u16) (1U << 9))
+#define P7MU_INT_DET0_UP ((u16) (1U << 10))
+#define P7MU_INT_DET1_DOWN ((u16) (1U << 11))
+#define P7MU_INT_DET1_UP ((u16) (1U << 12))
+#define P7MU_INT_POWER ((u16) (1U << 13))
+#define P7MU_INT_REBOOT ((u16) (1U << 14))
+#define P7MU_INT_EN ((u16) (1U << 15))
+
+#define P7MU_INT_GPIO_4 ((u16) (1U << 0))
+#define P7MU_INT_GPIO_5 ((u16) (1U << 1))
+#define P7MU_INT_DET2_DOWN ((u16) (1U << 2))
+#define P7MU_INT_DET2_UP ((u16) (1U << 3))
+#define P7MU_INT_DET3_DOWN ((u16) (1U << 4))
+#define P7MU_INT_DET3_UP ((u16) (1U << 5))
+
+/*
+ * RTC block registers
+ */
+#define P7MU_RTC_PAGE ((u16) 0x0600)
+#define P7MU_RTC_SZ 7
+
+/*
+ * PWM block registers
+ */
+#define P7MU_PWM0_PAGE ((u16) 0x0700)
+#define P7MU_PWM1_PAGE ((u16) 0x0800)
+#define P7MU_PWM_SZ 3
+
+/*
+ * ADC block registers
+ */
+#define P7MU_ADC_PAGE ((u16) 0x0500)
+#define P7MU_ADC_SZ 0x34
+
+/*
+ * SPI block register
+ */
+#define P7MU_SPI_PAGE ((u16) 0x0400)
+#define P7MU_SPI_SZ 2
+
+/*
+ * Application register
+ */
+#define P7MU_APPL_PAGE ((u16) 0x900)
+#define P7MU_APPL_SZ 16
+
+#define P7MU_APPL_DATA00_REG (P7MU_APPL_PAGE + 0x00)
+#define P7MU_APPL_DATA01_REG (P7MU_APPL_PAGE + 0x01)
+#define P7MU_APPL_DATA02_REG (P7MU_APPL_PAGE + 0x02)
+#define P7MU_APPL_DATA03_REG (P7MU_APPL_PAGE + 0x03)
+#define P7MU_APPL_DATA04_REG (P7MU_APPL_PAGE + 0x04)
+#define P7MU_APPL_DATA05_REG (P7MU_APPL_PAGE + 0x05)
+#define P7MU_APPL_DATA06_REG (P7MU_APPL_PAGE + 0x06)
+#define P7MU_APPL_DATA07_REG (P7MU_APPL_PAGE + 0x07)
+#define P7MU_APPL_DATA08_REG (P7MU_APPL_PAGE + 0x08)
+#define P7MU_APPL_DATA09_REG (P7MU_APPL_PAGE + 0x09)
+#define P7MU_APPL_DATA10_REG (P7MU_APPL_PAGE + 0x0a)
+#define P7MU_APPL_DATA11_REG (P7MU_APPL_PAGE + 0x0b)
+#define P7MU_APPL_DATA12_REG (P7MU_APPL_PAGE + 0x0c)
+#define P7MU_APPL_DATA13_REG (P7MU_APPL_PAGE + 0x0d)
+#define P7MU_APPL_DATA14_REG (P7MU_APPL_PAGE + 0x0e)
+#define P7MU_APPL_DATA15_REG (P7MU_APPL_PAGE + 0x0f)
+
+
+#define P7MU_REG_MAX ((u16) 0x0f0c)
+
+/***********************
+ * Global declarations.
+ ***********************/
+
+#define P7MU_PWREN_IRQ 0
+#define P7MU_TEMP_WARN_IRQ 1
+#define P7MU_TEMP_ERR_IRQ 2
+#define P7MU_GPIO_1_IRQ 3
+#define P7MU_GPIO_2_IRQ 4
+#define P7MU_GPIO_3_IRQ 5
+#define P7MU_RTC_IRQ 6
+#define P7MU_MEASUREMENT_ERROR_IRQ 7
+#define P7MU_FIFO_OVERFLOW_IRQ 8
+#define P7MU_DET0_DOWN_IRQ 9
+#define P7MU_DET0_UP_IRQ 10
+#define P7MU_DET1_DOWN_IRQ 11
+#define P7MU_DET1_UP_IRQ 12
+#define P7MU_POWER_IRQ 13
+#define P7MU_REBOOT_IRQ 14
+#define P7MU_GPIO_4_IRQ 16
+#define P7MU_GPIO_5_IRQ 17
+#define P7MU_DET2_DOWN_IRQ 18
+#define P7MU_DET2_UP_IRQ 19
+#define P7MU_DET3_DOWN_IRQ 20
+#define P7MU_DET3_UP_IRQ 21
+
+#define P7MU_IRQ_NR 22
+#define DEFAULT_IRQ (P7MU_INT_EN|P7MU_INT_TEMP_WARN|P7MU_INT_TEMP_ERR|P7MU_INT_POWER|P7MU_INT_PWREN|P7MU_INT_REBOOT|P7MU_INT_POWER)
+
+static struct resource p7mu_iores = {
+ .name = P7MU_DRV_NAME " io",
+ .start = 0,
+ .end = P7MU_REG_MAX,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static struct resource p7mu_irqres = {
+ .name = P7MU_DRV_NAME " irq",
+ .start = 0,
+ .end = P7MU_IRQ_NR - 1,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY
+};
+
+static struct resource p7mu_rtc_res[] __devinitdata = {
+ {
+ .start = P7MU_RTC_IRQ,
+ .end = P7MU_RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ .parent = &p7mu_irqres
+ },
+ {
+ .start = P7MU_RTC_PAGE,
+ .end = P7MU_RTC_PAGE + P7MU_RTC_SZ - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ }
+};
+
+static struct resource p7mu_gpio_res[] __devinitdata = {
+ {
+ .start = P7MU_PMC_GPIO_REG,
+ .end = P7MU_PMC_GPIO_REG + sizeof(u16) - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ }
+};
+
+static struct resource p7mu_adc_res[] __devinitdata = {
+ { /* ADC */
+ .start = P7MU_ADC_PAGE,
+ .end = P7MU_ADC_PAGE + P7MU_ADC_SZ - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ },
+ { /* SPI */
+ .start = P7MU_SPI_PAGE,
+ .end = P7MU_SPI_PAGE + sizeof(u16) - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ }
+};
+
+static struct resource p7mu_pwm_res[] __devinitdata = {
+ { /* PWM0 */
+ .start = P7MU_PWM0_PAGE,
+ .end = P7MU_PWM0_PAGE + P7MU_PWM_SZ - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ },
+ { /* PWM1 */
+ .start = P7MU_PWM1_PAGE,
+ .end = P7MU_PWM1_PAGE + P7MU_PWM_SZ - 1,
+ .flags = IORESOURCE_IO,
+ .parent = &p7mu_iores
+ }
+};
+
+static struct mfd_cell p7mu_cells[] __devinitdata = {
+ { /* RTC */
+ .name = P7MU_DRV_NAME "-rtc",
+ .id = -1,
+ .resources = p7mu_rtc_res,
+ .num_resources = ARRAY_SIZE(p7mu_rtc_res)
+ },
+ { /* GPIO */
+ .name = P7MU_DRV_NAME "-gpio",
+ .id = -1,
+ .resources = p7mu_gpio_res,
+ .num_resources = ARRAY_SIZE(p7mu_gpio_res)
+ },
+ { /* ADC */
+ .name = P7MU_DRV_NAME "-adc",
+ .id = -1,
+ .resources = p7mu_adc_res,
+ .num_resources = ARRAY_SIZE(p7mu_adc_res)
+ },
+ { /* PWM */
+ .name = P7MU_DRV_NAME "-pwm",
+ .id = -1,
+ .resources = p7mu_pwm_res,
+ .num_resources = ARRAY_SIZE(p7mu_pwm_res)
+ }
+};
+
+struct p7mu_irq {
+ u16 const msk;
+ unsigned const reg:1;
+ const char *name;
+};
+
+#define P7MU_INIT_IRQ(_name, _reg) \
+ [P7MU_ ## _name ## _IRQ] = { .msk = P7MU_INT_ ## _name, .reg = _reg, .name = #_name }
+
+static struct p7mu_irq p7mu_irqs[] = {
+ /* Interrupt register 0 */
+ P7MU_INIT_IRQ(PWREN, 0),
+ P7MU_INIT_IRQ(TEMP_WARN, 0),
+ P7MU_INIT_IRQ(TEMP_ERR, 0),
+ P7MU_INIT_IRQ(GPIO_1, 0),
+ P7MU_INIT_IRQ(GPIO_2, 0),
+ P7MU_INIT_IRQ(GPIO_3, 0),
+ P7MU_INIT_IRQ(RTC, 0),
+ P7MU_INIT_IRQ(MEASUREMENT_ERROR, 0),
+ P7MU_INIT_IRQ(FIFO_OVERFLOW, 0),
+ P7MU_INIT_IRQ(DET0_DOWN, 0),
+ P7MU_INIT_IRQ(DET0_UP, 0),
+ P7MU_INIT_IRQ(DET1_DOWN, 0),
+ P7MU_INIT_IRQ(DET1_UP, 0),
+ P7MU_INIT_IRQ(POWER, 0),
+ P7MU_INIT_IRQ(REBOOT, 0),
+ /* Interrupt register 1 */
+ P7MU_INIT_IRQ(GPIO_4, 1),
+ P7MU_INIT_IRQ(GPIO_5, 1),
+ P7MU_INIT_IRQ(DET2_DOWN, 1),
+ P7MU_INIT_IRQ(DET2_UP, 1),
+ P7MU_INIT_IRQ(DET3_DOWN, 1),
+ P7MU_INIT_IRQ(DET3_UP, 1)
+};
+
+struct i2c_client* p7mu_i2c;
+EXPORT_SYMBOL(p7mu_i2c);
+static struct pinctrl* p7mu_pctl;
+
+struct p7mu_chip {
+ struct mutex irq_lock;
+ uint32_t irq_mask;
+ uint32_t irq_en;
+ int irq_base;
+ u16 gpio_ev_update;
+ u16 gpio_ev;
+} p7mu_chip;
+
+#define P7MU_BOOT_REASON_FIRST_BOOT 0
+#define P7MU_BOOT_REASON_SOFT_REBOOT 1
+#define P7MU_BOOT_REASON_HARD_REBOOT 2
+#define P7MU_BOOT_REASON_P7MU_PWREN 3
+#define P7MU_BOOT_REASON_P7MU_REBOOT 4
+#define P7MU_BOOT_REASON_P7MU_POWER 5
+#define P7MU_BOOT_REASON_OTHER 6
+static u16 boot_reason;
+static u16 irq_before_reboot = 0;
+static u16 pending_irq = 0;
+static u16 power_supply_status = 0;
+static u16 status_register = 0;
+static u16 temperature_error_count = 0;
+
+static char *boot_reasons_str[] = {
+ "first boot",
+ "soft reboot",
+ "hard reboot",
+ "pwr_en boot",
+ "p7mu reboot",
+ "power reboot",
+ "other",
+};
+
+int p7mu_transfer(u16 offset, u16* buff, size_t cnt, bool read)
+{
+ struct i2c_msg msg[2];
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! p7mu_i2c);
+ BUG_ON((cnt * sizeof(*buff)) > (size_t) ((u16) (~0U)));
+#endif
+
+ msg[0].addr = p7mu_i2c->addr;
+ msg[0].flags = 0;
+ msg[0].len = (u16) sizeof(offset);
+ msg[0].buf = (u8*) &offset;
+
+ msg[1].addr = p7mu_i2c->addr;
+ msg[1].flags = read ? I2C_M_RD : I2C_M_NOSTART;
+ msg[1].len = (u16) (cnt * sizeof(*buff));
+ msg[1].buf = (u8*) buff;
+
+ err = i2c_transfer(p7mu_i2c->adapter, msg, ARRAY_SIZE(msg));
+ if (err == ARRAY_SIZE(msg))
+ return 0;
+ else if (err < 0)
+ return err;
+ else
+ return -EIO;
+}
+EXPORT_SYMBOL(p7mu_transfer);
+
+int p7mu_mod16(u16 off, u16 val, u16 msk)
+{
+ u16 v;
+ int const err = p7mu_read16(off, &v);
+
+ if (err)
+ return err;
+
+ if ((v & msk) == val)
+ return 0;
+
+ return p7mu_write16(off, (v & ~msk) | val);
+}
+EXPORT_SYMBOL(p7mu_mod16);
+
+/********************************
+ * Interrupt controller handling
+ ********************************/
+
+static void p7mu_irq_mask(struct irq_data *d)
+{
+ struct p7mu_chip *chip = irq_data_get_irq_chip_data(d);
+
+ chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
+}
+
+static void p7mu_irq_unmask(struct irq_data *d)
+{
+ struct p7mu_chip *chip = irq_data_get_irq_chip_data(d);
+
+ chip->irq_mask |= 1 << (d->irq - chip->irq_base);
+}
+
+static void p7mu_irq_bus_lock(struct irq_data *d)
+{
+ struct p7mu_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void p7mu_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct p7mu_chip *chip = irq_data_get_irq_chip_data(d);
+
+ if (chip->irq_en != chip->irq_mask) {
+ //printk("irq sync %x %x\n", chip->irq_mask, chip->irq_en);
+ p7mu_write16(P7MU_PMC_INTERRUPT_EN_0_REG, chip->irq_mask & 0xffff);
+ p7mu_write16(P7MU_PMC_INTERRUPT_EN_1_REG, chip->irq_mask >> 16);
+
+ chip->irq_en = chip->irq_mask;
+ }
+
+ if (chip->gpio_ev_update != chip->gpio_ev) {
+ p7mu_write16(0x101, chip->gpio_ev_update);
+ chip->gpio_ev = chip->gpio_ev_update;
+ }
+
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int p7mu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct p7mu_chip *chip = irq_data_get_irq_chip_data(d);
+ int num = d->irq - chip->irq_base;
+ uint16_t level;
+ u16 p7mu_gpio_ev;
+
+ switch(num) {
+ case 3:
+ level = 0;
+ case 4:
+ level = 1;
+ case 5:
+ level = 2;
+ case 16:
+ level = 3;
+ case 17:
+ level = 4;
+ break;
+ default:
+ return 0;
+ };
+ p7mu_gpio_ev = chip->gpio_ev_update;
+ p7mu_gpio_ev &= ~(3 << (level*2));
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ p7mu_gpio_ev |= 2 << (level*2);
+ else if (type & IRQ_TYPE_EDGE_FALLING)
+ p7mu_gpio_ev |= 3 << (level*2);
+ else if (type & IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+ else if (type & IRQ_TYPE_LEVEL_HIGH)
+ p7mu_gpio_ev |= 0 << (level*2);
+ else if (type & IRQ_TYPE_LEVEL_LOW)
+ p7mu_gpio_ev |= 1 << (level*2);
+
+ chip->gpio_ev_update = p7mu_gpio_ev;
+
+ return 0;
+}
+
+static struct irq_chip p7mu_irq_chip = {
+ .name = "p7mu",
+ .irq_enable = p7mu_irq_unmask,
+ .irq_disable = p7mu_irq_mask,
+ .irq_bus_lock = p7mu_irq_bus_lock,
+ .irq_bus_sync_unlock = p7mu_irq_bus_sync_unlock,
+ .irq_set_type = p7mu_gpio_irq_set_type,
+};
+
+static int p7mu_setup_irq(struct p7mu_chip *chip)
+{
+ int lvl;
+ const int irq_base = P7_IRQS;
+ const int irq_number = P7MU_GPIO_IRQS;
+
+ mutex_init(&chip->irq_lock);
+
+ chip->irq_base = irq_alloc_descs(-1, irq_base, irq_number, -1);
+ if (chip->irq_base < 0)
+ goto out_failed;
+
+ for (lvl = 0; lvl < irq_number; lvl++) {
+ int irq = lvl + chip->irq_base;
+
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
+ irq_set_chip_data(irq, chip);
+ irq_set_chip(irq, &p7mu_irq_chip);
+ irq_set_nested_thread(irq, true);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ }
+
+ p7mu_read16(0x101, &chip->gpio_ev);
+ chip->gpio_ev_update = chip->gpio_ev;
+
+
+ p7mu_rtc_res[0].start = p7mu_rtc_res[0].end += chip->irq_base;
+ dev_info(&p7mu_i2c->dev, "p7mu irq %d %d\n", chip->irq_base, chip->irq_base+irq_number);
+ return 0;
+
+out_failed:
+ return -ENODEV;
+}
+
+static irqreturn_t p7mu_handle_irq(int irq, void* dev_id)
+{
+ struct p7mu_chip *chip = dev_id;
+ u16 stat_raw[2];
+ u16 stat[2];
+ u16 status;
+ irqreturn_t ret = IRQ_HANDLED;
+ int tmp, tmp1;
+
+ tmp = p7mu_read16(P7MU_PMC_INTERRUPT_0_REG, &stat_raw[0]);
+ tmp1 = p7mu_read16(P7MU_PMC_INTERRUPT_1_REG, &stat_raw[1]);
+ if (tmp || tmp1) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to get interrupts status (%d %d)\n",
+ tmp, tmp1);
+ ret = IRQ_NONE;
+ }
+
+ if (stat_raw[0] == 0 && stat_raw[1] == 0) {
+ dev_warn(&p7mu_i2c->dev, "spurious interrupt ?\n");
+
+ ret = IRQ_NONE;
+ }
+
+ if (ret == IRQ_NONE)
+ return IRQ_NONE;
+
+ ret = IRQ_NONE;
+
+ /* XXX we could have race here. irq is triggered on p7mu, but while we are
+ handling p7mu irq (i2c sleep), we disable irq.
+ */
+ /* Serve enabled interrutps only. */
+ stat[0] = chip->irq_en & stat_raw[0];
+ stat[1] = (chip->irq_en >> 16) & stat_raw[1];
+
+ pending_irq = 0;
+ for (tmp = 0; tmp < ARRAY_SIZE(p7mu_irqs); tmp++) {
+ struct p7mu_irq const* const i = &p7mu_irqs[tmp];
+
+ if (!(stat_raw[i->reg] & i->msk))
+ continue;
+
+ if (stat[i->reg] & i->msk) {
+ if (i->reg == 0 && (i->msk & DEFAULT_IRQ)) {
+ p7mu_read16(P7MU_PMC_POWER_SUPPLY_STATUS_REG, &power_supply_status);
+ p7mu_read16(P7MU_PMC_STATUS_REG, &status_register);
+ p7mu_read16(P7MU_PMC_TEMP_ERR_CNT_REG, &temperature_error_count);
+
+ if (i->msk == P7MU_INT_PWREN)
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_P7MU_PWREN);
+ else if (i->msk == P7MU_INT_REBOOT)
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_P7MU_REBOOT);
+ else if (i->msk == P7MU_INT_POWER) {
+ p7mu_write16(P7MU_APPL_DATA06_REG, power_supply_status);
+ p7mu_read16(P7MU_PMC_POWER_SUPPLY_FILTER_REG, &status);
+ p7mu_write16(P7MU_APPL_DATA07_REG, status);
+ p7mu_read16(P7MU_PMC_POWER_SUPPLY_MASK_REG, &status);
+ p7mu_write16(P7MU_APPL_DATA08_REG, status);
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_P7MU_POWER);
+ }
+
+ dev_warn(&p7mu_i2c->dev, "irq : %s\n", i->name);
+ pending_irq |= i->msk;
+ }
+ else
+ handle_nested_irq(chip->irq_base + tmp);
+
+ ret = IRQ_HANDLED;
+ }
+ else
+ dev_warn(&p7mu_i2c->dev, "spurious irq : %s\n", i->name);
+ }
+
+ /* Acknowledge interrupts all at once. */
+ tmp = p7mu_write16(P7MU_PMC_INTERRUPT_0_REG, ~stat_raw[0]);
+ tmp1 = p7mu_write16(P7MU_PMC_INTERRUPT_1_REG, ~stat_raw[1]);
+ if (tmp || tmp1) {
+ dev_warn(&p7mu_i2c->dev,
+ "failed to acknowledge interrupts (%d %d)\n",
+ tmp, tmp1);
+ }
+
+ if (pending_irq)
+ sysfs_notify(&p7mu_i2c->dev.kobj, NULL, "irq");
+
+ return ret;
+}
+
+static int __devinit p7mu_init_irqs(void)
+{
+ struct p7mu_plat_data const* const pdata = p7mu_pdata();
+ int err;
+ struct p7mu_chip *chip = &p7mu_chip; //XXX
+
+ BUG_ON(! pdata);
+
+ /* Retrieve interrupt line the GPIO will act as. */
+ p7mu_i2c->irq = gpio_to_irq(pdata->gpio);
+ if (p7mu_i2c->irq < 0) {
+ /*
+ * GPIO platform data probably did not provide a
+ * suitable GPIO <-> IRQ mapping.
+ */
+ dev_err(&p7mu_i2c->dev, "failed to find a valid GPIO interrupt\n");
+ return p7mu_i2c->irq;
+ }
+
+ /* Disable all interrupts before enabling GPIO pin as interrupt source. */
+ err = p7mu_write16(P7MU_PMC_INTERRUPT_EN_0_REG, 0);
+ if (err) {
+ dev_err(&p7mu_i2c->dev, "failed to disable interrupts\n");
+ return err;
+ }
+ err = p7mu_write16(P7MU_PMC_INTERRUPT_EN_1_REG, 0);
+ if (err) {
+ dev_err(&p7mu_i2c->dev, "failed to disable interrupts\n");
+ return err;
+ }
+
+ /* Setup GPIO for interrupt usage. */
+ err = gpio_request_one(pdata->gpio, GPIOF_IN, P7MU_DRV_NAME "_irq");
+ if (err) {
+ dev_err(&p7mu_i2c->dev, "failed to request GPIO interrupt\n");
+ return err;
+ }
+
+ /* Ack all interrupts if any: might be caused by calibration ?? */
+ p7mu_write16(P7MU_PMC_INTERRUPT_0_REG, 0);
+ p7mu_write16(P7MU_PMC_INTERRUPT_1_REG, 0);
+
+ /* Register base interrupt handler in threaded context. */
+ err = request_threaded_irq(p7mu_i2c->irq,
+ NULL,
+ &p7mu_handle_irq,
+ IRQF_TRIGGER_HIGH|IRQF_ONESHOT,
+ P7MU_DRV_NAME,
+ chip);
+ if (err) {
+ dev_err(&p7mu_i2c->dev, "failed to request base irq %d\n", p7mu_i2c->irq);
+ goto free_domain;
+ }
+
+ err = p7mu_setup_irq(chip);
+ if (err) {
+ goto free_irq;
+ }
+
+ /* Enable hardware interrupts handling. */
+ chip->irq_mask = chip->irq_en = DEFAULT_IRQ;
+ err = p7mu_write16(P7MU_PMC_INTERRUPT_EN_0_REG, chip->irq_mask);
+ if (!err)
+ err = p7mu_write16(P7MU_PMC_INTERRUPT_EN_1_REG, chip->irq_mask >> 16);
+ if (! err)
+ return 0;
+
+free_irq:
+ dev_err(&p7mu_i2c->dev, "failed to enable interrupts\n");
+ free_irq(p7mu_i2c->irq, NULL);
+
+free_domain:
+/* Re-enable module once fixed. */
+ gpio_free(pdata->gpio);
+ return err;
+}
+
+static void p7mu_exit_irqs(void)
+{
+ struct p7mu_plat_data const* const pdata = p7mu_pdata();
+
+ /* For sake of security: irqs should have been freed using p7mu_free_irq. */
+ p7mu_write16(P7MU_PMC_INTERRUPT_EN_0_REG, 0);
+ p7mu_write16(P7MU_PMC_INTERRUPT_EN_1_REG, 0);
+
+
+ irq_free_descs(p7mu_chip.irq_base, P7MU_GPIO_IRQS);
+ free_irq(p7mu_i2c->irq, NULL);
+ gpio_free(pdata->gpio);
+}
+
+int p7mu_gpio_to_irq(unsigned int gpio)
+{
+ struct p7mu_chip *chip = &p7mu_chip; //XXX
+ int gpio_to_irq[] = {3, 4, 5, 16, 17};
+ if (gpio >= ARRAY_SIZE(gpio_to_irq))
+ return -EINVAL;
+ return gpio_to_irq[gpio] + chip->irq_base;
+}
+EXPORT_SYMBOL_GPL(p7mu_gpio_to_irq);
+/***************************
+ * MFD sub-devices handling
+ ***************************/
+
+struct resource const* p7mu_request_region(struct platform_device* pdev,
+ struct resource const* res)
+{
+ resource_size_t const start = res->start;
+
+ res = __request_region(&p7mu_iores,
+ start,
+ resource_size(res),
+ dev_name(&pdev->dev),
+ IORESOURCE_EXCLUSIVE);
+ if (res)
+ return res;
+
+ dev_warn(&pdev->dev, "failed to reserve I/O region 0x%04x\n", start);
+ return ERR_PTR(-EBUSY);
+}
+EXPORT_SYMBOL_GPL(p7mu_request_region);
+
+void p7mu_release_region(struct resource const* res)
+{
+ __release_region(&p7mu_iores, res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(p7mu_release_region);
+
+static void (*p7mu_restart)(char, char const*);
+
+static void p7mu_reboot(char mode, const char *cmd)
+{
+ p7mu_write16(0xa00, 6);
+ p7mu_write16(0xb00, 6);
+ if (mode == 's') {
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_SOFT_REBOOT);
+ if (! p7mu_write16(P7MU_PMC_CMD_REG, P7MU_CMD_REBOOT))
+ mdelay(100);
+ dev_warn(&p7mu_i2c->dev, "soft reset failed, trying hard reset...\n");
+ }
+
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_HARD_REBOOT);
+ p7mu_restart(mode, cmd);
+}
+
+static void (*p7mu_pwroff)(void);
+
+static void p7mu_shutdown(void)
+{
+ int ret;
+ p7mu_write16(0xa00, 6);
+ p7mu_write16(0xb00, 6);
+ dev_err(&p7mu_i2c->dev, "p7mu power off\n");
+ ret = p7mu_write16(P7MU_PMC_CMD_REG, P7MU_CMD_SHUTDOWN);
+
+ /* wait power off : p7mu take 5s before going off ... */
+ if (ret == 0)
+ mdelay(6000);
+
+ dev_err(&p7mu_i2c->dev, "power off failed %d.\n", ret);
+ while (1)
+ cpu_do_idle();
+}
+
+/* We will store the jump address in the first 2 application regs */
+int p7mu_save_cpu_jump(void *jump_addr)
+{
+ u32 addr = virt_to_phys(jump_addr);
+ u16 addr_hi = (u16)(addr >> 16);
+ u16 addr_lo = (u16)(addr & 0xffff);
+ u16 val;
+
+ printk("p7mu_save_cpu_jump storing 0x%x\n", addr);
+
+ if ((0 != p7mu_write16(P7MU_APPL_DATA00_REG, addr_hi))
+ || (0 != p7mu_write16(P7MU_APPL_DATA01_REG, addr_lo))) {
+ dev_err(&p7mu_i2c->dev, "cpu jump set fail.\n");
+ return -EIO;
+ }
+ if (p7mu_read16(P7MU_APPL_DATA00_REG, &val) || addr_hi != val) {
+ dev_err(&p7mu_i2c->dev, "DATA00_REG is wrong 0x%x\n", val);
+ return -EIO;
+ }
+ if (p7mu_read16(P7MU_APPL_DATA01_REG, &val) || addr_lo != val) {
+ dev_err(&p7mu_i2c->dev, "DATA01_REG is wrong 0x%x\n", val);
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(p7mu_save_cpu_jump);
+
+static void p7mu_wakeup_setup(void)
+{
+ struct p7mu_plat_data const* const pdata = p7mu_pdata();
+
+ p7mu_write16(P7MU_PMC_GPIO_EVENT_SEL_REG, pdata->gpio_event_sel);
+}
+
+static void p7mu_print_idstr(char const* id, size_t size)
+{
+ print_hex_dump(KERN_ERR,
+ "\t",
+ DUMP_PREFIX_NONE,
+ 16,
+ 1,
+ id,
+ size,
+ false);
+}
+
+static inline int p7mu_read_regs(u16 offset,
+ u16* buff,
+ size_t cnt)
+{
+ /* there is no endian conversion on data ! */
+ return p7mu_transfer(cpu_to_be16(offset), buff, cnt, true);
+}
+
+static ssize_t show_boot_reason(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%s\n", boot_reasons_str[boot_reason]);
+}
+DEVICE_ATTR(boot_reason,0444,show_boot_reason,NULL);
+
+static ssize_t show_irq_mask(int mask, char *buf)
+{
+ int writeidx = 0;
+ int tmp;
+
+ for (tmp = 0; tmp < ARRAY_SIZE(p7mu_irqs); tmp++) {
+ struct p7mu_irq const* const i = &p7mu_irqs[tmp];
+
+ if (i->reg != 0 || !(i->msk & DEFAULT_IRQ))
+ continue;
+
+ if (mask & i->msk) {
+ writeidx += snprintf(buf+writeidx,PAGE_SIZE-writeidx,
+ "%s\n", i->name);
+ }
+ }
+
+ return writeidx;
+}
+
+static ssize_t show_irq(struct device *d, struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ ret = show_irq_mask(pending_irq, buf);
+ pending_irq = 0;
+
+ return ret;
+}
+DEVICE_ATTR(irq,0444,show_irq,NULL);
+
+static ssize_t show_irq_before_reboot(struct device *d, struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ ret = show_irq_mask(irq_before_reboot, buf);
+
+ return ret;
+}
+DEVICE_ATTR(irq_before_reboot,0444,show_irq_before_reboot,NULL);
+
+static ssize_t show_power_supply_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%d\n", power_supply_status);
+}
+DEVICE_ATTR(power_supply_status,0444,show_power_supply_status,NULL);
+
+static ssize_t show_status_register(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%d\n", status_register);
+}
+DEVICE_ATTR(status_register,0444,show_status_register,NULL);
+
+static ssize_t show_temperature_error_count(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%d\n", temperature_error_count);
+}
+DEVICE_ATTR(temperature_error_count,0444,show_temperature_error_count,NULL);
+
+#ifdef CONFIG_SUSPEND
+extern int p7_resume_cnt;
+static long p7_resume_time;
+static ssize_t show_resume_cnt(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%d", p7_resume_cnt);
+}
+DEVICE_ATTR(resume_cnt,0644,show_resume_cnt,NULL);
+
+static ssize_t show_resume_time(struct device *d, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf,PAGE_SIZE, "%ld", p7_resume_time);
+}
+DEVICE_ATTR(resume_time,0644,show_resume_time,NULL);
+#endif
+
+static struct attribute *p7mu_attrs[] = {
+ &dev_attr_boot_reason.attr,
+ &dev_attr_irq.attr,
+ &dev_attr_irq_before_reboot.attr,
+ &dev_attr_power_supply_status.attr,
+ &dev_attr_status_register.attr,
+ &dev_attr_temperature_error_count.attr,
+#ifdef CONFIG_SUSPEND
+ &dev_attr_resume_cnt.attr,
+ &dev_attr_resume_time.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group p7mu_attr_group = {
+ .attrs = p7mu_attrs,
+};
+
+static char const p7mu_idstr[] __devinitconst = { '\0', 'P', '7', 'M', 'U', '_', 'R' };
+
+static int __devinit p7mu_probe_i2c(struct i2c_client* client,
+ struct i2c_device_id const* dev_id)
+{
+ struct i2c_adapter* const adapt = client->adapter;
+ char id[4 * sizeof(u16)];
+ int vers;
+ int err;
+ u16 val;
+ u16 last_power_status[4];
+
+ /*
+ * For instance, depend on I2C driver supporting I2C_M_NOSTART so that
+ * we can aggregate multiple writes in a single transfer. We would
+ * otherwise need to allocate temporary buffers and perform an additional
+ * mem copy to serve client requests.
+ */
+ if (! i2c_check_functionality(adapt, I2C_FUNC_PROTOCOL_MANGLING))
+ return -ENOTSUPP;
+
+ p7mu_i2c = client;
+
+ err = p7mu_read_regs(P7MU_CFG_ID, (u16*) id, 4);
+ if (err) {
+ dev_err(&client->dev, "failed to get id string\n");
+ goto err;
+ }
+ if (memcmp(p7mu_idstr, id, sizeof(p7mu_idstr))) {
+ /* XXX on some board the first read doesn't work, retry */
+ dev_err(&client->dev, "found invalid chip with wrong id string : retrying\n");
+ p7mu_print_idstr(id, ARRAY_SIZE(id));
+ p7mu_read_regs(P7MU_CFG_ID, (u16*) id, 4);
+ }
+
+ err = -ENODEV;
+ if (memcmp(p7mu_idstr, id, sizeof(p7mu_idstr))) {
+ dev_err(&client->dev, "found invalid chip with wrong id string\n");
+ p7mu_print_idstr(id, ARRAY_SIZE(id));
+ goto err;
+ }
+
+ vers = (int) id[7] - (int) '0';
+ if (vers < 0 || vers > 0xf) {
+ dev_err(&client->dev, "found invalid chip with wrong version string\n");
+ p7mu_print_idstr(id, ARRAY_SIZE(id));
+ goto err;
+ }
+
+ err = sysfs_create_group(&client->dev.kobj, &p7mu_attr_group);
+ if (err)
+ goto err;
+
+ p7mu_init_clk();
+
+ err = p7mu_read16(P7MU_APPL_DATA02_REG, &val);
+ if (err || val < P7MU_BOOT_REASON_FIRST_BOOT || val > P7MU_BOOT_REASON_OTHER)
+ {
+ dev_err(&client->dev, "failed to read boot reason\n");
+ boot_reason = P7MU_BOOT_REASON_FIRST_BOOT;
+ }
+ else
+ {
+ boot_reason = val;
+ dev_info(&client->dev, "boot reason: %s\n", boot_reasons_str[boot_reason]);
+ if (boot_reason == P7MU_BOOT_REASON_P7MU_POWER) {
+ p7mu_read16(P7MU_APPL_DATA06_REG, &last_power_status[0]);
+ p7mu_read16(P7MU_APPL_DATA07_REG, &last_power_status[1]);
+ p7mu_read16(P7MU_APPL_DATA08_REG, &last_power_status[2]);
+ dev_info(&client->dev, "satus before reboot %d, filter %04X, mask %04X\n",
+ last_power_status[0], last_power_status[1], last_power_status[2]);
+ }
+ }
+ err = p7mu_read16(0xf05, &val);
+ if (!err) {
+ dev_info(&client->dev, "p7mu event before reboot : %s%s%s%s%s\n",
+ (val & P7MU_INT_REBOOT) ? "pwren pulse,":"",
+ (val & P7MU_INT_POWER) ? "regulation failure,":"",
+ (val & P7MU_INT_TEMP_ERR) ? "temp error,":"",
+ (val & P7MU_INT_TEMP_WARN) ? "temp warn,":"",
+ (val & P7MU_INT_PWREN) ? "pwren failling,":""
+ );
+ irq_before_reboot = val;
+ }
+
+ // Set boot reason to other by default
+ p7mu_write16(P7MU_APPL_DATA02_REG, P7MU_BOOT_REASON_OTHER);
+
+ err = p7mu_init_irqs();
+ if (err) {
+ dev_err(&client->dev, "failed to initialize interrupts (%d)\n", err);
+ goto clk;
+ }
+
+ p7mu_pctl = pinctrl_get_select_default(&client->dev);
+ if (IS_ERR(p7mu_pctl)) {
+ dev_err(&client->dev, "failed to initialize pins (%ld)\n",
+ PTR_ERR(p7mu_pctl));
+ goto irq;
+ }
+
+ /* Now register cells (future platform_device) to MFD layer. */
+ err = mfd_add_devices(&client->dev,
+ 0,
+ p7mu_cells,
+ ARRAY_SIZE(p7mu_cells),
+ NULL,
+ 0);
+ if (err) {
+ dev_err(&client->dev, "failed to register sub-devices (%d)\n", err);
+ goto pin;
+ }
+
+
+ BUG_ON(! arm_pm_restart);
+ p7mu_restart = arm_pm_restart;
+ arm_pm_restart = p7mu_reboot;
+ p7mu_pwroff = pm_power_off;
+ pm_power_off = p7mu_shutdown;
+ p7mu_wakeup_setup();
+
+ dev_info(&client->dev, "attached device v%d to I2C bus %s.%d\n",
+ vers,
+ adapt->name,
+ adapt->nr);
+ return 0;
+
+pin:
+ pinctrl_put(p7mu_pctl);
+irq:
+ p7mu_exit_irqs();
+clk:
+ p7mu_exit_clk();
+ sysfs_remove_group(&client->dev.kobj, &p7mu_attr_group);
+err:
+ return err;
+}
+
+static int __devexit p7mu_remove_i2c(struct i2c_client* client)
+{
+ struct i2c_adapter const* const adapt = client->adapter;
+
+ sysfs_remove_group(&client->dev.kobj, &p7mu_attr_group);
+
+ BUG_ON(! p7mu_restart);
+ arm_pm_restart = p7mu_restart;
+ BUG_ON(! pm_power_off);
+ pm_power_off = p7mu_pwroff;
+
+ mfd_remove_devices(&client->dev);
+
+ pinctrl_put(p7mu_pctl);
+ p7mu_exit_irqs();
+ p7mu_exit_clk();
+
+ dev_info(&adapt->dev, "detached from I2C bus %s.%d\n",
+ adapt->name,
+ adapt->nr);
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static int p7mu_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ p7mu_write16(0xa00, 6);
+ p7mu_write16(0xb00, 6);
+
+ return 0;
+}
+static int p7mu_resume(struct i2c_client *client)
+{
+ static int resume_cnt;
+ //struct p7mu_data *data = i2c_get_clientdata(client);
+ p7mu_resume_clk();
+ p7mu_write16(0xa00, 4);
+ p7mu_write16(0xb00, 4);
+
+ if (resume_cnt != p7_resume_cnt) {
+ struct timespec tv;
+ do_posix_clock_monotonic_gettime(&tv);
+
+ p7_resume_time = tv.tv_sec;
+ resume_cnt = p7_resume_cnt;
+ }
+ return 0;
+}
+#else
+#define p7mu_suspend NULL
+#define p7mu_resume NULL
+#endif
+
+static struct i2c_device_id const p7mu_i2c_id[] = {
+ { P7MU_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, p7mu_i2c_id);
+
+static struct i2c_driver p7mu_i2c_driver = {
+ .driver = {
+ .name = P7MU_DRV_NAME,
+ },
+ .probe = &p7mu_probe_i2c,
+ .remove = __devexit_p(&p7mu_remove_i2c),
+ .suspend = p7mu_suspend,
+ .resume = p7mu_resume,
+ .id_table = p7mu_i2c_id
+};
+
+static int __init p7mu_init_core(void)
+{
+ return i2c_add_driver(&p7mu_i2c_driver);
+}
+postcore_initcall(p7mu_init_core);
+
+static void __exit p7mu_exit_core(void)
+{
+ i2c_del_driver(&p7mu_i2c_driver);
+}
+module_exit(p7mu_exit_core);
+
+MODULE_DESCRIPTION("Parrot Power Management Unit core driver");
+MODULE_AUTHOR("Grİgor Boirie <gregor.boirie@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/mfd/p7mu-pin.c b/drivers/parrot/mfd/p7mu-pin.c
new file mode 100644
index 00000000000000..ccaf56ffbb2510
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu-pin.c
@@ -0,0 +1,239 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu-pin.c - Parrot7 power management unit I/O pins
+ * internal implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 18-Sep-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <asm/errno.h>
+#include "p7mu-pin.h"
+
+/*
+ * Multiplexing function register configuration values
+ */
+static u16 const p7mu_mux_cfg[] = {
+ [P7MU_INGPIO_MUX] = 0,
+ [P7MU_PWM_MUX] = 2,
+ [P7MU_OUTGPIO_MUX] = 3
+};
+
+/*
+ * Build a bit mask of available multiplexing function for a pin.
+ */
+#define P7MU_MUX_MSK(_mux) \
+ ((u16) (1U << P7MU_ ## _mux ## _MUX))
+
+/*
+ * Available multiplexing functions per pin
+ */
+static u16 const p7mu_avail_mux[P7MU_PIN_NR] = {
+ [0] = P7MU_MUX_MSK(INGPIO) | P7MU_MUX_MSK(OUTGPIO) |
+ P7MU_MUX_MSK(PWM),
+ [1] = P7MU_MUX_MSK(INGPIO) | P7MU_MUX_MSK(OUTGPIO) |
+ P7MU_MUX_MSK(PWM),
+ [2] = P7MU_MUX_MSK(INGPIO) | P7MU_MUX_MSK(OUTGPIO) |
+ P7MU_MUX_MSK(PWM),
+ [3] = P7MU_MUX_MSK(INGPIO) | P7MU_MUX_MSK(OUTGPIO) |
+ P7MU_MUX_MSK(PWM),
+ [4] = P7MU_MUX_MSK(INGPIO) | P7MU_MUX_MSK(OUTGPIO) |
+ P7MU_MUX_MSK(PWM)
+};
+
+/*
+ * GPIO configuration register definitions.
+ */
+#define P7MU_PIN_SFT 2
+#define P7MU_PIN_MSK ((u16) ((1U << P7MU_PIN_SFT) - 1))
+
+static struct p7mu_pin p7mu_pins[P7MU_PIN_NR];
+static DEFINE_MUTEX( p7mu_pin_lck);
+
+#ifdef DEBUG
+
+#define CHAR_BIT 8
+
+/*
+ * Sanity checks:
+ * - there must exist one available multiplexing function mask per pin ;
+ * - we cannot handle more pins (configuration) than a full 16 bits register
+ * can hold (which is the width of P7MU_PIN_CFG register).
+ */
+#define P7MU_ASSERT_MUX(_mux) \
+ BUG_ON(_mux < 0); \
+ BUG_ON(_mux >= ARRAY_SIZE(p7mu_mux_cfg)); \
+ BUG_ON(ARRAY_SIZE(p7mu_avail_mux) != ARRAY_SIZE(p7mu_pins)); \
+ BUG_ON(ARRAY_SIZE(p7mu_avail_mux) > \
+ ((sizeof(u16) * CHAR_BIT) / P7MU_PIN_SFT)); \
+ BUG_ON(p7mu_mux_cfg[_mux] & ~P7MU_PIN_MSK)
+#else /* DEBUG */
+
+#define P7MU_ASSERT_MUX(_mux)
+
+#endif /* DEBUG */
+
+/*
+ * Return computed register configuration word to assign "mux" multiplexing
+ * function to "pin" pin.
+ */
+static int p7mu_pin_cfg(unsigned int pin, enum p7mu_mux mux)
+{
+ P7MU_ASSERT_MUX(mux);
+
+ if (pin >= ARRAY_SIZE(p7mu_pins))
+ return -EINVAL;
+
+ if (! ((u16) (1U << mux) & p7mu_avail_mux[pin]))
+ return -EPERM;
+
+ return (int) (p7mu_mux_cfg[mux] << (pin * P7MU_PIN_SFT));
+}
+
+/*
+ * Reserve "pin" pin for usage with multiplexing function "mux".
+ */
+int p7mu_request_pin(unsigned int pin, enum p7mu_mux mux)
+{
+ struct p7mu_pin* const p = &p7mu_pins[pin];
+ int cfg;
+ int err = 0;
+
+ cfg = p7mu_pin_cfg(pin, mux);
+ if (cfg < 0)
+ return cfg;
+
+ mutex_lock(&p7mu_pin_lck);
+
+ if (p->booked) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (p->mux != mux)
+ /* If I/O is not already setup as requested perform I2C access. */
+ err = p7mu_mod16(P7MU_PIN_CFG,
+ (u16) cfg,
+ P7MU_PIN_MSK << (pin * P7MU_PIN_SFT));
+ if (! err) {
+ p->booked = true;
+ p->mux = mux;
+ }
+
+unlock:
+ mutex_unlock(&p7mu_pin_lck);
+ return err;
+}
+EXPORT_SYMBOL(p7mu_request_pin);
+
+/*
+ * Release "pin" pin previously reserved by p7mu_request_pin.
+ * This will reset pin to a side-effects free default state.
+ */
+void p7mu_free_pin(unsigned int pin)
+{
+ struct p7mu_pin* const p = &p7mu_pins[pin];
+ int cfg;
+ int err = 0;
+
+ cfg = p7mu_pin_cfg(pin, P7MU_INGPIO_MUX);
+#ifdef DEBUG
+ /*
+ * Input GPIO must always be supported since this is the default state
+ * mentionned above.
+ */
+ BUG_ON(cfg < 0);
+#endif
+
+ mutex_lock(&p7mu_pin_lck);
+
+ if (! p->booked) {
+ err = -EBADFD;
+ goto unlock;
+ }
+
+ if (p->mux != P7MU_INGPIO_MUX) {
+ err = p7mu_mod16(P7MU_PIN_CFG,
+ (u16) cfg,
+ P7MU_PIN_MSK << (pin * P7MU_PIN_SFT));
+ }
+
+ if (! err) {
+ p->booked = false;
+ p->mux = P7MU_INGPIO_MUX;
+ }
+
+unlock:
+ mutex_unlock(&p7mu_pin_lck);
+
+ if (err)
+ dev_warn(&p7mu_i2c->dev,
+ "failed to release pin %d (%d)\n",
+ pin,
+ err);
+}
+EXPORT_SYMBOL(p7mu_free_pin);
+
+int p7mu_setup_pin(unsigned int pin, enum p7mu_mux mux)
+{
+ struct p7mu_pin* const p = &p7mu_pins[pin];
+ int cfg;
+ int err = 0;
+
+ cfg = p7mu_pin_cfg(pin, mux);
+ if (cfg < 0)
+ return cfg;
+
+ mutex_lock(&p7mu_pin_lck);
+
+ if (! p->booked) {
+ err = -EBADFD;
+ goto unlock;
+ }
+
+ if (p->mux != mux)
+ /* If I/O is not already setup as requested perform I2C access. */
+ err = p7mu_mod16(P7MU_PIN_CFG,
+ (u16) cfg,
+ P7MU_PIN_MSK << (pin * P7MU_PIN_SFT));
+ if (! err)
+ p->mux = mux;
+
+unlock:
+ mutex_unlock(&p7mu_pin_lck);
+ return err;
+}
+EXPORT_SYMBOL(p7mu_setup_pin);
+
+int p7mu_pin_mux(unsigned int pin)
+{
+ struct p7mu_pin const* p;
+ int err;
+
+ if (pin >= ARRAY_SIZE(p7mu_pins))
+ return -EINVAL;
+
+ p = &p7mu_pins[pin];
+
+ mutex_lock(&p7mu_pin_lck);
+
+ if (! p->booked) {
+ err = -EBADFD;
+ goto unlock;
+ }
+
+ err = p->mux;
+ P7MU_ASSERT_MUX(err);
+
+unlock:
+ mutex_unlock(&p7mu_pin_lck);
+ return err;
+}
+EXPORT_SYMBOL(p7mu_pin_mux);
diff --git a/drivers/parrot/mfd/p7mu-pin.h b/drivers/parrot/mfd/p7mu-pin.h
new file mode 100644
index 00000000000000..dfb64a1b36a669
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu-pin.h
@@ -0,0 +1,45 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu-pin.h.c - Parrot7 power management unit I/O pins
+ * internal interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 18-Sep-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7MU_PIN_H
+#define _P7MU_PIN_H
+
+#include "p7mu.h"
+
+/* I/O pins configuration register */
+#define P7MU_PIN_CFG (P7MU_CFG_PAGE + 0x0b)
+
+/*
+ * Multiplexing functions which may be assigned to I/O pins
+ */
+enum p7mu_mux {
+ P7MU_INGPIO_MUX, /* Input GPIO */
+ P7MU_OUTGPIO_MUX, /* Output GPIO */
+ P7MU_PWM_MUX /* PWM output */
+};
+
+/*
+ * Pin descriptor
+ */
+struct p7mu_pin {
+ /* Pin usage marker */
+ bool booked;
+ /* Multiplexing function currently assigned to pin */
+ enum p7mu_mux mux;
+};
+
+extern int p7mu_request_pin(unsigned int, enum p7mu_mux);
+extern void p7mu_free_pin(unsigned int);
+extern int p7mu_setup_pin(unsigned int, enum p7mu_mux);
+extern int p7mu_pin_mux(unsigned int);
+
+#endif /* _P7MU_PIN_H */
diff --git a/drivers/parrot/mfd/p7mu.h b/drivers/parrot/mfd/p7mu.h
new file mode 100644
index 00000000000000..4a3fa38df57dc2
--- /dev/null
+++ b/drivers/parrot/mfd/p7mu.h
@@ -0,0 +1,111 @@
+/**
+ * linux/drivers/parrot/mfd/p7mu.h - Parrot7 power management unit core driver
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 16-May-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7MU_H
+#define _P7MU_H
+
+#if defined(CONFIG_MFD_P7MU) || \
+ defined(CONFIG_MFD_P7MU_MODULE)
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/i2c.h>
+#include <mach/pwm.h>
+#include <mach/p7-adc.h>
+
+#define P7MU_DRV_NAME "p7mu"
+
+/* Number of controllable I/O pins */
+#define P7MU_PIN_NR 5
+
+#define P7MU_CFG_PAGE ((u16) 0x0000)
+
+extern struct i2c_client* p7mu_i2c;
+
+struct p7mu_plat_data {
+ /* GPIO line used as PxMU interrupt source */
+ int gpio;
+ unsigned int pwm_pins[P7MU_PWMS_MAX];
+ bool int_32k;
+ bool int_32m;
+ bool overide_otp;
+ int gpio_event_sel;
+
+ /* p7mu adc platform data */
+ struct p7_adc_chan_data *chan_data;
+};
+
+#define P7MU_PMC_GPIO1_EDGE_RISING ((u16) (1U << 10)|(2U << 0))
+#define P7MU_PMC_GPIO2_EDGE_RISING ((u16) (1U << 11)|(2U << 2))
+#define P7MU_PMC_GPIO3_EDGE_RISING ((u16) (1U << 12)|(2U << 4))
+#define P7MU_PMC_GPIO4_EDGE_RISING ((u16) (1U << 13)|(2U << 6))
+#define P7MU_PMC_GPIO5_EDGE_RISING ((u16) (1U << 14)|(2U << 8))
+
+static inline struct p7mu_plat_data const* p7mu_pdata(void)
+{
+ return dev_get_platdata(&p7mu_i2c->dev);
+}
+
+struct platform_device;
+
+extern struct resource const* p7mu_request_region(struct platform_device*,
+ struct resource const* res);
+extern void p7mu_release_region(struct resource const*);
+
+int p7mu_gpio_to_irq(unsigned int gpio);
+
+/////////////////
+
+extern int p7mu_transfer(u16, u16*, size_t, bool);
+
+static inline int p7mu_read16(u16 offset, u16* val)
+{
+ int const err = p7mu_transfer(cpu_to_be16(offset), (u16*)val, 1, true);
+
+ if (err)
+ return err;
+
+ be16_to_cpus(val);
+ return 0;
+}
+
+static inline int p7mu_write16(u16 offset, u16 val)
+{
+ val = cpu_to_be16(val);
+ return p7mu_transfer(cpu_to_be16(offset), &val, 1, false);
+}
+
+extern int p7mu_mod16(u16 offset, u16 val, u16 mask);
+
+static inline int p7mu_read32(u16 offset, u32* buff)
+{
+ int const err = p7mu_transfer(cpu_to_be16(offset), (u16*)buff, 2, true);
+
+ if (err)
+ return err;
+
+ be32_to_cpus(buff);
+ return 0;
+}
+
+static inline int p7mu_write32(u16 offset, u32 val)
+{
+ val = cpu_to_be32(val);
+ return p7mu_transfer(cpu_to_be16(offset),(u16*) &val, 2, false);
+}
+
+int p7mu_save_cpu_jump(void *jump_addr);
+
+#endif /* defined(CONFIG_MFD_P7MU) || \
+ defined(CONFIG_MFD_P7MU_MODULE) */
+
+#endif
diff --git a/drivers/parrot/mfd/sicilia-irradiance.c b/drivers/parrot/mfd/sicilia-irradiance.c
new file mode 100644
index 00000000000000..2f62445d89292a
--- /dev/null
+++ b/drivers/parrot/mfd/sicilia-irradiance.c
@@ -0,0 +1,132 @@
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+
+#include <parrot/mfd/dib0700_mfd.h>
+
+#define DRIVER_NAME "sicilia-irradiance"
+
+static struct led_info pca9633_leds[] = {
+ {
+ .name = "irradiance:red",
+ .default_trigger = "none",
+ },
+ {
+ .name = "irradiance:green",
+ .default_trigger = "none",
+ },
+ {
+ .name = "irradiance:blue",
+ .default_trigger = "none",
+ },
+};
+
+static struct led_platform_data pca9633_data = {
+ .num_leds = ARRAY_SIZE(pca9633_leds),
+ .leds = pca9633_leds,
+};
+
+static struct i2c_board_info pca9633_board_info = {
+ I2C_BOARD_INFO("pca9633", 0x62),
+ .platform_data = &pca9633_data,
+};
+
+#define TSL2591_SENSORS_NB 4
+#define GPIO_GNSS_POWER_EN 1
+#define GPIO_THERMAL_PWM 6
+#define GPIO_FLASH 8
+
+static struct i2c_board_info tsl2591_board_info = {
+ I2C_BOARD_INFO("tsl2591", 0x29),
+};
+
+static struct i2c_client * pca9633_client;
+static struct i2c_client * tsl2591_client[TSL2591_SENSORS_NB];
+
+static int irradiance_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct i2c_adapter * adapter = NULL;
+ int dib0700_gpio_base;
+ int i, j;
+
+ ret = dib0700_probe(intf, &dib0700_gpio_base);
+ if (ret) {
+ goto dib0700_probe_failed;
+ }
+
+ /* pca9633 leds i2c driver */
+ adapter = i2c_get_adapter(3);
+ pca9633_client = i2c_new_device(adapter, &pca9633_board_info);
+ if (pca9633_client == NULL) {
+ ret = -ENXIO;
+ goto pca966_probe_failed;
+ }
+
+ /* ts2591 illuminance i2c driver */
+ for (i = 0; i < TSL2591_SENSORS_NB; i++) {
+ adapter = i2c_get_adapter(3+i);
+ tsl2591_client[i] = i2c_new_device(adapter, &tsl2591_board_info);
+ if (tsl2591_client[i] == NULL) {
+ ret = -ENXIO;
+ goto tsl2591_probe_failed;
+ }
+ }
+
+ /* Power up gps */
+ gpio_request_one(dib0700_gpio_base + GPIO_GNSS_POWER_EN,
+ GPIOF_OUT_INIT_HIGH,
+ "gps_power_en");
+
+ return 0;
+
+tsl2591_probe_failed:
+ for (j = 0; j < i; j++) {
+ i2c_unregister_device(tsl2591_client[j]);
+ }
+ i2c_unregister_device(pca9633_client);
+pca966_probe_failed:
+ dib0700_device_exit(intf);
+dib0700_probe_failed:
+ return ret;
+}
+
+static void irradiance_device_exit(struct usb_interface *intf)
+{
+ int i;
+
+ for (i = 0; i < TSL2591_SENSORS_NB; i++) {
+ i2c_unregister_device(tsl2591_client[i]);
+ }
+
+ i2c_unregister_device(pca9633_client);
+ dib0700_device_exit(intf);
+}
+
+#define USB_VID_PARROT 0x19cf
+#define USB_PID_PARROT_HOOK_DEFAULT 0x5100
+
+static struct usb_device_id irradiance_usb_id_table[] = {
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_HOOK_DEFAULT) },
+ { USB_DEVICE(USB_VID_PARROT, USB_PID_PARROT_HOOK_DEFAULT) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(usb, irradiance_usb_id_table);
+
+static struct usb_driver irradiance_driver = {
+ .name = DRIVER_NAME,
+ .probe = irradiance_probe,
+ .disconnect = irradiance_device_exit,
+ .id_table = irradiance_usb_id_table,
+};
+
+module_usb_driver(irradiance_driver);
+
+MODULE_AUTHOR("Ronan CHAUVIN <ronan.chauvin@parrot.com>");
+MODULE_DESCRIPTION("Sicilia irradiance module driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/mmc/Kconfig b/drivers/parrot/mmc/Kconfig
new file mode 100644
index 00000000000000..873151bd5825e1
--- /dev/null
+++ b/drivers/parrot/mmc/Kconfig
@@ -0,0 +1,13 @@
+config MMC_SDHCI_ACS3
+ tristate "Arasan SD3.0 / SDIO3.0 / eMMC4.41 driver"
+ depends on MMC && MMC_SDHCI
+ select MMC_SDHCI_IO_ACCESSORS
+ default MMC_SDHCI if ARCH_PARROT7
+ help
+ Support Arasan Chip Systems SD3.0 / SDIO3.0 / eMMC4.41 AHB Host Controller driver.
+
+config MMC_SDHCI_ACS3_DEBUGFS
+ bool "ACS3 TDLs & max clk debugfs"
+ depends on MMC_SDHCI_ACS3
+ default n
+
diff --git a/drivers/parrot/mmc/Makefile b/drivers/parrot/mmc/Makefile
new file mode 100644
index 00000000000000..cf47224ad43c9f
--- /dev/null
+++ b/drivers/parrot/mmc/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_MMC_DEBUG) += -DDEBUG
+obj-$(CONFIG_MMC_SDHCI_ACS3) += acs3-sdhci.o
diff --git a/drivers/parrot/mmc/acs3-sdhci.c b/drivers/parrot/mmc/acs3-sdhci.c
new file mode 100644
index 00000000000000..422167ebcf88f4
--- /dev/null
+++ b/drivers/parrot/mmc/acs3-sdhci.c
@@ -0,0 +1,920 @@
+/**
+ * linux/driver/parrot/mmc/acs3-sdhci.h - Arasan Chip Systems eMMC / SD / SDIO
+ * host driver implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Samir Ammenouche <samir.ammenouche@parrot.com>
+ * Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 20-Jan-2012
+ *
+ * This file is released under the GPL.
+ *
+ * Head to https://smet/projects/p7/wiki/SDIO !!
+ *
+ * TODO:
+ * - add support for SD bus clock / data edges alignment
+ * - add support for SD bus I/Os power cycling
+ * - add support for driver type selection
+ * - add support for voltage regulator if needed (see vmms field of
+ * sdhci_host structure);
+ * - review me !!
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include "acs3-sdhci.h"
+#include "../../mmc/host/sdhci.h"
+#include "../clk/p7clk-provider.h"
+
+#define ACS3_NAME_MAX (32U)
+
+#define ACS3_CUR_RETUNING_MODE ACS3_RETUNING_MODE_1
+
+#define ACS3_MAX_CURRENT 500
+
+/*
+ * Per host private data holder
+ */
+struct acs3_priv {
+ /* Platform specific data */
+ struct acs3_plat_data const* pdata;
+ /* Current mode */
+ int cur_mode;
+ /* Current mode config */
+ const struct acs3_mode_config* cur_config;
+ /* SD bus clock descriptor */
+ struct clk* sd;
+ /* XIN bus clock descriptor */
+ struct clk* xin;
+ /* Optional activity led GPIO line name */
+ char led_name[ACS3_NAME_MAX];
+ /* Optional write protect GPIO line name */
+ char wp_name[ACS3_NAME_MAX];
+ /* Optional card detect GPIO line name */
+ char cd_name[ACS3_NAME_MAX];
+ /* Optional eMMC hardware reset GPIO line name */
+ char rst_name[ACS3_NAME_MAX];
+ /* Card detect interrupt if any */
+ int cd_irq;
+ /* Host controller clock */
+ struct clk* clk;
+ /* Host controller pins */
+ struct pinctrl* pctl;
+ /* Host controller register space */
+ struct resource* res;
+ /* Array to save GBC registers during suspend/resume procedure */
+ u32 gbc_regs[4];
+};
+
+static void acs3_set_current_config(struct sdhci_host* host, u32 mode);
+
+/*************************
+ * SDHCI layer operations
+ *************************/
+
+/*
+ * Capabilities & present state registers interposer.
+ *
+ * Some capabilities MUST be set by host driver. Moreover we need to implement
+ * GPIO based card detection.
+ *
+ * May be called from interrupt context.
+ */
+static u32 acs3_readl(struct sdhci_host* host, int reg)
+{
+ struct acs3_priv * const priv = sdhci_priv(host);
+ u32 val = __raw_readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ if (! (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ /* Use card detect gpio if not polling card status. */
+ if (gpio_get_value_cansleep(priv->pdata->cd_gpio))
+ /* gpio line tells us there is no card. */
+ val &= ~SDHCI_CARD_PRESENT;
+ }
+ /* When no card is present, the configuration is reseted.
+ * This is necessary when a ACS3_DEFAULT_SPEED card is inserted
+ * after an ACS3_HIGH_SPEED or UHS card.
+ */
+ if (!(val & SDHCI_CARD_PRESENT))
+ acs3_set_current_config(host, ACS3_DEFAULT_SPEED);
+ }
+ else if (unlikely(reg == SDHCI_CAPABILITIES_1)) {
+ /* Timer count cannot be 0. */
+ val = (val & ~SDHCI_RETUNING_TIMER_COUNT_MASK) + (1 << SDHCI_RETUNING_TIMER_COUNT_SHIFT);
+ /* Disable DDR50 support on P7R2 */
+ if (priv->pdata->disable_ddr50)
+ val &= ~(SDHCI_SUPPORT_DDR50);
+ }
+ else if (unlikely(reg == SDHCI_MAX_CURRENT)) {
+ u32 const max_curr = ACS3_MAX_CURRENT / SDHCI_MAX_CURRENT_MULTIPLIER;
+
+ /*
+ * Properly advertise board specific maximum current the
+ * host may deliver.
+ */
+ val |= (max_curr << SDHCI_MAX_CURRENT_330_SHIFT) &
+ SDHCI_MAX_CURRENT_330_MASK;
+ val |= (max_curr << SDHCI_MAX_CURRENT_300_SHIFT) &
+ SDHCI_MAX_CURRENT_300_MASK;
+
+ /* Set maximum current for 1.8V capable combinations. */
+ val |= (max_curr << SDHCI_MAX_CURRENT_180_SHIFT) &
+ SDHCI_MAX_CURRENT_180_MASK;
+ }
+
+ return val;
+}
+
+/*
+ * Interrupt related registers interposer.
+ *
+ * When using card detect GPIO interrupt (i.e., not polling card status), ensure
+ * we don't enable card presence notifications at host controller level (might
+ * also generate unexpected interrupts otherwise ??).
+ */
+static void acs3_writel(struct sdhci_host* host, u32 val, int reg)
+{
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (! (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ /*
+ * These interrupts won't work with a custom
+ * card detect gpio. Do not use them.
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ }
+ }
+
+ __raw_writel(val, host->ioaddr + reg);
+}
+
+/*
+ * Host control 2 register interposer.
+ *
+ * Prevents the enabling of preset values.
+ *
+ * As 1.8V switch signals (sig_en_18v & ush1_18v_vreg_fail) are not connected,
+ * we need to wrap write accesses to properly drive external voltage switching
+ * device (managed using GPIO line).
+ *
+ * Set specific configuration for UHS modes.
+ */
+static void acs3_writew(struct sdhci_host* host, u16 val, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_CONTROL2) && (val & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+ val &= ~(SDHCI_CTRL_PRESET_VAL_ENABLE);
+ }
+
+ __raw_writew(val, host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_HOST_CONTROL2) && host->vqmmc &&
+ regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000) &&
+ (val & SDHCI_CTRL_VDD_180)) {
+ if ((val & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR12)
+ acs3_set_current_config(host, ACS3_UHS_SDR12);
+ else if ((val & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR25)
+ acs3_set_current_config(host, ACS3_UHS_SDR25);
+ else if ((val & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50)
+ acs3_set_current_config(host, ACS3_UHS_SDR50);
+ else if ((val & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104)
+ acs3_set_current_config(host, ACS3_UHS_SDR104);
+ else if ((val & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_DDR50)
+ acs3_set_current_config(host, ACS3_UHS_DDR50);
+ }
+}
+
+static u16 acs3_readw(struct sdhci_host* host, int reg)
+{
+ u32 val = __raw_readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_HOST_CONTROL2)) {
+ val &= ~SDHCI_CTRL_VDD_180;
+
+ if (host->vqmmc && (regulator_get_voltage(host->vqmmc) == 1800000))
+ /* Request voltage switching to 1.8V. */
+ val |= SDHCI_CTRL_VDD_180;
+ }
+ /* Empty the buffer register during the tuning process. */
+ else if (unlikely(reg == SDHCI_COMMAND)) {
+
+ if (SDHCI_GET_CMD(val) == MMC_SEND_TUNING_BLOCK ||
+ SDHCI_GET_CMD(val) == MMC_SEND_TUNING_BLOCK_HS200) {
+ int i;
+ for (i = 0; i < 16; i++) {
+ __raw_readl(host->ioaddr + SDHCI_BUFFER);
+ }
+ }
+ }
+
+ return val;
+}
+
+/*
+ * Host control register interposer.
+ *
+ * As activity led signal is not connected, we need to wrap write accesses to
+ * drive activity led GPIO line accordingly.
+ *
+ * Activate ACS3_HIGH_SPEED configuration in High-Speed mode.
+ *
+ * May be called from atomic context.
+ */
+static void acs3_writeb(struct sdhci_host* host, u8 val, int reg)
+{
+ if (unlikely(reg == SDHCI_HOST_CONTROL)) {
+ struct acs3_priv const* const priv = sdhci_priv(host);
+
+ if (priv->led_name[0])
+ /* Blink led. */
+ gpio_set_value(priv->pdata->led_gpio, val & SDHCI_CTRL_LED);
+ }
+
+ __raw_writeb(val, host->ioaddr + reg);
+
+ /* Sometimes in UHS mode the bit SDHCI_CTRL_HISPD is high, so this case has
+ * to be checked before activating ACS3_HIGH_SPEED configuration.
+ */
+ if (unlikely(reg == SDHCI_HOST_CONTROL)){
+ u32 val2 = __raw_readl(host->ioaddr + SDHCI_HOST_CONTROL2);
+ if ((val & SDHCI_CTRL_HISPD) && !(val2 & SDHCI_CTRL_VDD_180)) {
+ acs3_set_current_config(host, ACS3_HIGH_SPEED);
+ }
+ }
+}
+
+/*
+ * Maximum SD clock reachable frequency is 130 mHz.
+ */
+static unsigned int acs3_get_maxclk(struct sdhci_host* host)
+{
+ struct acs3_priv const* const priv = sdhci_priv(host);
+
+ if ((priv->cur_config) && (priv->cur_config->max_Hz))
+ return priv->cur_config->max_Hz;
+ else
+ return 130000000;
+}
+
+/*
+ * Compute minimum reachable SD clock frequency.
+ */
+static unsigned int acs3_get_minclk(struct sdhci_host* host)
+{
+ return (unsigned int)
+ clk_round_rate(((struct acs3_priv*) sdhci_priv(host))->xin, 0) /
+ SDHCI_MAX_DIV_SPEC_300;
+}
+
+/*
+ * Set XIN clock in order to optimize SD clock rate.
+ */
+static void acs3_set_clk(struct sdhci_host* host, unsigned int hz)
+{
+ if (hz) {
+ struct acs3_priv const* const priv = sdhci_priv(host);
+ int err;
+
+ err = clk_set_rate(priv->sd, hz);
+ if (err) {
+ dev_warn(mmc_dev(host->mmc),
+ "failed to set sd clock rate to %u (%d)\n",
+ hz,
+ err);
+ return;
+ }
+
+ host->max_clk = clk_get_rate(priv->xin);
+ }
+}
+
+/*
+ * Card lock / read only handling.
+ *
+ * As card write protect pin is not connected, we get read only status through
+ * an optional GPIO line usage.
+ * May be called from atomic context.
+ */
+static unsigned int acs3_get_ro(struct sdhci_host* host)
+{
+ struct acs3_priv const* const priv = sdhci_priv(host);
+
+ if (! priv->wp_name[0])
+ /*
+ * Warning: this won't work if SDHCI_QUIRK_INVERTED_WRITE_PROTECT quirk
+ * is used (see sdhci_check_ro).
+ * If needed, implement mmc_host_ops.get_ro directly instead of using
+ * the one installed be sdhci layer (see sdhci_get_ro). Or use a
+ * platform specific flag to indicate RO GPIO line meaning is inverted.
+ */
+ return -ENOSYS;
+
+ return gpio_get_value_cansleep(priv->pdata->wp_gpio);
+}
+
+/*
+ * eMMC4 hardware reset handling (extracted from sdhci-pci.c)
+ *
+ * As eMMC reset pin is unconnected, hardware reset is carried out using a GPIO
+ * line. Some board may choose not to provide one.
+ */
+static void acs3_reset(struct sdhci_host* host)
+{
+ struct acs3_priv const* const priv = sdhci_priv(host);
+
+ if (priv->rst_name[0]) {
+ /* Perform hardware reset only when GPIO line is available. */
+ int const gpio = priv->pdata->rst_gpio;
+
+ gpio_set_value_cansleep(gpio, 0);
+ /* For eMMC, minimum is 1us but give it 10us for good measure */
+ udelay(10);
+
+ gpio_set_value_cansleep(gpio, 1);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+ }
+}
+
+static struct sdhci_ops acs3_sdhci_ops = {
+ .read_l = acs3_readl,
+ .read_w = acs3_readw,
+ .write_l = acs3_writel,
+ .write_w = acs3_writew,
+ .write_b = acs3_writeb,
+ .set_clock = acs3_set_clk,
+ .get_max_clock = acs3_get_maxclk,
+ .get_min_clock = acs3_get_minclk,
+ .get_ro = acs3_get_ro,
+ .hw_reset = acs3_reset
+};
+
+/*
+ * TDL (Tap Delay Logic) setting
+ *
+ * TDL1 : In some cases the board layout may not be optimal and the Data may
+ * have hold time violations on the card. TDL1 can be used to delay the data so
+ * that the Hold time violations are fixed.
+ *
+ * TDL2 : During read operation the host controller acts as a receiver and the
+ * data may not be exactly aligned with respect to the clock. The TDL2 is used
+ * to phase shift the clock so that it is aligned with the received data.
+ */
+static inline void acs3_set_tdl(struct acs3_priv * priv, u32 mode)
+{
+ if ((!priv->pdata->regs) || (!priv->cur_config))
+ return;
+
+ /* TDL1 is used in all HS and UHS modes */
+ if (priv->pdata->regs->tdl1) {
+ if (priv->cur_config->tdl1)
+ __raw_writel(priv->cur_config->tdl1, priv->pdata->regs->tdl1);
+ else
+ __raw_writel(0x100, priv->pdata->regs->tdl1); /* 0x100 is the reset value of TDL1 register */
+
+ }
+
+ /* In DDR50 mode, the TDL2 is manually set. */
+ if ((mode & ACS3_UHS_DDR50) && (priv->pdata->regs->tdl2)) {
+ if (priv->cur_config->tdl2)
+ __raw_writel(priv->cur_config->tdl2, priv->pdata->regs->tdl2);
+ else
+ __raw_writel(0x100, priv->pdata->regs->tdl2); /* 0x100 is the reset value of TDL2 register */
+ }
+ /* In SDR50 and SDR104 modes, the TDL2 is calculated by the tuning. */
+ else if (mode & (ACS3_UHS_SDR50 | ACS3_UHS_SDR104)) {
+ /* When tuning is active, TDL2 must be enabled. */
+ if (priv->pdata->regs->tdl2) {
+ u32 tdl2_reg = __raw_readl(priv->pdata->regs->tdl2);
+ tdl2_reg |= 1; /* enable TDL2 */
+ __raw_writel(tdl2_reg, priv->pdata->regs->tdl2);
+ }
+ /* Tuning success count cannot be 0. */
+ if (priv->pdata->regs->ctrl) {
+ /* Get the CTRL register's current value and clear
+ * the tuning succes count. */
+ u32 val = __raw_readl(priv->pdata->regs->ctrl) & ~ACS3_CTRL_SUCCESS_CNT_MASK;
+
+ /* Add the tuning success count. */
+ if (!priv->cur_config->tuning_success_count)
+ val += 1 << ACS3_CTRL_SUCCESS_CNT_SHIFT;
+ else if (priv->cur_config->tuning_success_count > 0xf)
+ val += 0xf << ACS3_CTRL_SUCCESS_CNT_SHIFT;
+ else
+ val += priv->cur_config->tuning_success_count << ACS3_CTRL_SUCCESS_CNT_SHIFT;
+
+ __raw_writel(val, priv->pdata->regs->ctrl);
+ }
+ }
+}
+
+/*
+ * Set max clock according to current mode configuration
+ */
+static inline void acs3_set_mode_clock(struct sdhci_host* host) {
+ struct mmc_host *mmc = host->mmc;
+
+ /* Those parameters are used to set the host clock */
+ if (mmc) {
+ mmc->f_max = acs3_get_maxclk(host);
+ mmc->f_min = acs3_get_minclk(host);
+ }
+}
+
+/*
+ * Run through the mode_configs array to find a matching configuration
+ *
+ * Return 0 : if no matching configuration.
+ * Return 1 : if the matching configuration is different than the previous one.
+ * Return 2 : if the matching configuration is the same than the previous one.
+ */
+static inline int acs3_get_new_config(struct acs3_priv* priv, u32 mode,
+ const struct acs3_mode_config* mode_configs,
+ int mode_configs_size) {
+ int i;
+
+ for (i = 0; i < mode_configs_size; i++) {
+ if (mode_configs[i].mode & mode) {
+ if (priv->cur_config != &(mode_configs[i])) {
+ /* Update the config */
+ priv->cur_config = &(mode_configs[i]);
+ return 1;
+ }
+ else
+ /* In this case, the config in the array is
+ * found and is the same than the previous one.
+ */
+ return 2;
+ }
+ }
+ /* The mode is not supported in the mode_configs array */
+ return 0;
+}
+
+/*
+ * When the host changes mode, the configuration must be updated.
+ *
+ * The following parameters must be updated:
+ * - max clock
+ * - TDLs
+ * - retuning parameters
+ */
+static void acs3_set_current_config(struct sdhci_host* host, u32 mode) {
+ struct acs3_priv* priv = sdhci_priv(host);
+ int mode_changed = (priv->cur_mode == mode) ? 0 : 1;
+
+ if (mode_changed) {
+ int config_found = 0;
+ /* Get a new configuration in privat_data if needed. */
+ if (priv->pdata->mode_config)
+ config_found = acs3_get_new_config(priv, mode,
+ priv->pdata->mode_config,
+ priv->pdata->nb_config);
+
+ /* If no configuration for the current mode is defined in the
+ * private data, we get the default configuration for this
+ * mode. */
+ if (!config_found)
+ acs3_get_new_config(priv, mode,
+ priv->pdata->default_mode_config, 4);
+
+ /* If the mode has changed, apply the configuration. */
+ priv->cur_mode = mode;
+ acs3_set_tdl(priv, mode);
+ acs3_set_mode_clock(host);
+ if (priv->cur_config->retune_timer)
+ host->tuning_count = priv->cur_config->retune_timer;
+ }
+}
+
+/********************
+ * GPIO line helpers
+ ********************/
+
+static int _acs3_init_gpio(struct sdhci_host* host,
+ int gpio,
+ unsigned long flags,
+ char* name,
+ char const* suffix,
+ size_t size)
+{
+ struct device const* const dev = mmc_dev(host->mmc);
+ char const* const dname = dev_name(dev);
+
+ name[0] = '\0';
+ if (! gpio_is_valid(gpio))
+ return -EINVAL;
+
+ snprintf(name,
+ min(strlen(dname) + 1 + size, ACS3_NAME_MAX),
+ "%s %s",
+ dname,
+ suffix);
+ if (! gpio_request_one(gpio, flags, name))
+ return 0;
+
+ name[0] = '\0';
+ dev_warn(dev,
+ "failed to request %s gpio %d (" __stringify(-EBUSY) ")\n",
+ suffix,
+ gpio);
+
+ return -EBUSY;
+}
+
+#define acs3_init_gpio(_host, _gpio, _flags, _name, _suffix) \
+ _acs3_init_gpio(_host, _gpio, _flags, _name, _suffix, sizeof(_suffix))
+
+static void acs3_exit_gpio(int gpio, char const* name)
+{
+ if (name[0])
+ gpio_free(gpio);
+}
+
+static irqreturn_t acs3_handle_cd(int irq, void* dev_id)
+{
+ struct work_struct *w = dev_id;
+
+ /* Since the request_irq has to be called before the add_host where the
+ * workqueue is initialized, it is mandatory to test the initialization of
+ * the workqueue before schedule it. */
+ if (w->func)
+ schedule_work(w);
+ return IRQ_HANDLED;
+};
+
+/***********************************
+ * Host controller setup & teardown
+ ***********************************/
+
+static void acs3_exit_host(struct sdhci_host* host,
+ struct acs3_plat_data const* pdata,
+ struct acs3_priv const* priv)
+{
+ acs3_exit_gpio(pdata->rst_gpio, priv->rst_name);
+ if (! (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ free_irq(priv->cd_irq, &host->card_detect_work);
+ gpio_free(pdata->cd_gpio);
+ }
+ acs3_exit_gpio(pdata->wp_gpio, priv->wp_name);
+ acs3_exit_gpio(pdata->led_gpio, priv->led_name);
+}
+
+static int __devinit acs3_init_host(struct sdhci_host* host,
+ struct acs3_plat_data const* pdata,
+ struct acs3_priv* priv)
+{
+ int err;
+
+ priv->pdata = pdata;
+ host->hw_name = ACS3_DRV_NAME;
+ host->ops = &acs3_sdhci_ops;
+ acs3_set_current_config(host, ACS3_DEFAULT_SPEED);
+
+ /*
+ * Controller does not support unaligned buffer start address and size for
+ * ADMA mode (see section 2.9 of Arasan SD3.0 / SDIO3.0 / eMMC4.41 AHB Host
+ * Controller user guide).
+ */
+ host->quirks = SDHCI_QUIRK_32BIT_ADMA_SIZE |
+ SDHCI_QUIRK_32BIT_DMA_ADDR;
+
+ /* Timeout clock (TMCLK) is the same as sd clock.
+ * Note: sometimes a device may specify timeouts that are larger than the
+ * highest possible timeout in the host controller. In this case Linux
+ * generates a warning you may inhibit using the
+ * SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (see sdhci_calc_timeout).
+ */
+ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+
+ /* Setup activity led blinking */
+ acs3_init_gpio(host,
+ pdata->led_gpio,
+ GPIOF_OUT_INIT_LOW,
+ priv->led_name,
+ "led");
+
+ /* Setup card lock / read only status */
+ acs3_init_gpio(host,
+ pdata->wp_gpio,
+ GPIOF_IN,
+ priv->wp_name,
+ "wp");
+
+ /* Setup card detect */
+ err = acs3_init_gpio(host,
+ pdata->cd_gpio,
+ GPIOF_IN,
+ priv->cd_name,
+ "cd");
+ if (! err) {
+ priv->cd_irq = gpio_to_irq(pdata->cd_gpio);
+#ifdef DEBUG
+ BUG_ON(priv->cd_irq < 0);
+#endif
+
+ /* If the GPIO access can sleep we have to use a threaded IRQ
+ * handler. */
+ if (gpio_cansleep(pdata->cd_gpio))
+ err = request_threaded_irq(priv->cd_irq,
+ NULL,
+ acs3_handle_cd,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING,
+ dev_name(mmc_dev(host->mmc)),
+ &host->card_detect_work);
+ else
+ err = request_irq(priv->cd_irq,
+ acs3_handle_cd,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ dev_name(mmc_dev(host->mmc)),
+ &host->card_detect_work);
+
+ if (err) {
+ gpio_free(pdata->cd_gpio);
+ dev_warn(mmc_dev(host->mmc),
+ "failed to request cd irq %d (%d)\n",
+ priv->cd_irq,
+ err);
+ }
+ }
+ if (err)
+ /* Enable card detection in polling mode. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ /* Setup eMMC hardware reset */
+ if (! acs3_init_gpio(host,
+ pdata->rst_gpio,
+ GPIOF_OUT_INIT_LOW,
+ priv->rst_name,
+ "rst"))
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+
+ /* Setup card available voltages. */
+#ifdef DEBUG
+ BUG_ON(! pdata->brd_ocr);
+#endif
+ host->ocr_avail_mmc = pdata->brd_ocr;
+ host->ocr_avail_sd = host->ocr_avail_mmc;
+ host->ocr_avail_sdio = host->ocr_avail_mmc;
+
+ /* Enable DDR mode for eMMC chips */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ /* Some specific capabilities can be set according to plat_data.
+ * It has to be done before the sdhci_add_host, so the
+ * necessary caps are already set when the host is initialized. */
+ if (pdata->mmc_caps)
+ host->mmc->caps |= pdata->mmc_caps;
+ if (pdata->mmc_caps2)
+ host->mmc->caps2 |= pdata->mmc_caps2;
+
+ /* Register host to SDHCI layer */
+ err = sdhci_add_host(host);
+ if (! err)
+ return 0;
+
+ acs3_exit_host(host, pdata, priv);
+ dev_err(mmc_dev(host->mmc), "failed to register host (%d)\n", err);
+ return err;
+}
+
+/*************************
+ * Power management hooks
+ *************************/
+
+#ifdef CONFIG_PM
+
+static int acs3_suspend(struct platform_device* pdev, pm_message_t state)
+{
+ struct sdhci_host* const host = platform_get_drvdata(pdev);
+ struct acs3_priv * const priv = sdhci_priv(host);
+ int const err = sdhci_suspend_host(host);
+
+ if (err)
+ return err;
+
+ /* Save SDIO GBC registers */
+ priv->gbc_regs[0] = __raw_readl(priv->pdata->regs->tdl1);
+ priv->gbc_regs[1] = __raw_readl(priv->pdata->regs->tdl2);
+ priv->gbc_regs[2] = __raw_readl(priv->pdata->regs->ctrl);
+ priv->gbc_regs[3] = __raw_readl(priv->pdata->regs->retune);
+
+ clk_disable_unprepare(priv->xin);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int acs3_resume(struct platform_device* pdev)
+{
+ struct sdhci_host* const host = platform_get_drvdata(pdev);
+ struct acs3_priv const* const priv = sdhci_priv(host);
+ int err;
+
+ clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->xin);
+
+ /* Restore SDIO GBC registers */
+ __raw_writel(priv->gbc_regs[0], priv->pdata->regs->tdl1);
+ __raw_writel(priv->gbc_regs[1], priv->pdata->regs->tdl2);
+ __raw_writel(priv->gbc_regs[2], priv->pdata->regs->ctrl);
+ __raw_writel(priv->gbc_regs[3], priv->pdata->regs->retune);
+
+ err = sdhci_resume_host(host);
+ if (! err)
+ return 0;
+
+ clk_disable_unprepare(priv->xin);
+ clk_disable_unprepare(priv->clk);
+ return err;
+}
+
+#else /* CONFIG_PM */
+
+#define acs3_suspend NULL
+#define acs3_resume NULL
+
+#endif /* CONFIG_PM */
+
+/************************
+ * Platform driver glue
+ ************************/
+
+static int __devinit acs3_probe(struct platform_device* pdev)
+{
+ int err;
+ struct resource* res;
+ struct acs3_priv* priv;
+ struct acs3_plat_data* const pdata = dev_get_platdata(&pdev->dev);
+ struct sdhci_host* const host = sdhci_alloc_host(&pdev->dev,
+ sizeof(*priv));
+
+ if (IS_ERR(host)) {
+ dev_err(&pdev->dev, "failed to allocate SDHCI host\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (! res) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "failed to retrieve register space\n");
+ goto free;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get interrupt\n");
+ goto free;
+ }
+ host->irq = err;
+
+ priv = sdhci_priv(host);
+ priv->res = request_mem_region(res->start,
+ resource_size(res),
+ dev_name(&pdev->dev));
+ if (! priv->res) {
+ err = -EBUSY;
+ dev_err(&pdev->dev, "failed to request register space\n");
+ goto free;
+ }
+
+ /* Set the retuning mode. This has to be done before the controller is unresetted */
+ if (pdata->regs && pdata->regs->ctrl) {
+ u32 val = __raw_readl(pdata->regs->ctrl) & ~ACS3_CTRL_RETUNING_MODE_MASK;
+ val += ACS3_CUR_RETUNING_MODE << ACS3_CTRL_RETUNING_MODE_SHIFT;
+ __raw_writel(val, pdata->regs->ctrl);
+ }
+
+ host->ioaddr = ioremap(res->start, resource_size(res));
+ if (! host->ioaddr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "failed to remap register space\n");
+ goto release;
+ }
+
+ priv->clk = clk_get(&pdev->dev, "host");
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "failed to get host clock (%d)\n", err);
+ goto unmap;
+ }
+
+ priv->xin = clk_get(&pdev->dev, "xin");
+ if (IS_ERR(priv->xin)) {
+ err = PTR_ERR(priv->xin);
+ dev_err(&pdev->dev, "failed to get xin clock (%d)\n", err);
+ goto put_clk;
+ }
+
+ /* Not sure if this is really necessary... */
+ err = clk_set_rate(priv->xin, 25000000);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set initial xin clock rate (%d)\n", err);
+ goto put_xin;
+ }
+
+ priv->sd = clk_get(&pdev->dev, "sd");
+ if (IS_ERR(priv->sd)) {
+ err = PTR_ERR(priv->sd);
+ dev_err(&pdev->dev, "failed to get sd clock (%d)\n", err);
+ goto put_xin;
+ }
+
+ priv->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(priv->pctl)) {
+ err = PTR_ERR(priv->pctl);
+ dev_err(&pdev->dev, "failed to select I/O pins (%d)\n", err);
+ goto put_sd;
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable host clock (%d)\n", err);
+ goto put_pins;
+ }
+
+ err = clk_prepare_enable(priv->xin);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable xin clock (%d)\n", err);
+ goto disable_clk;
+ }
+
+ err = acs3_init_host(host, pdata, priv);
+ if (! err) {
+ platform_set_drvdata(pdev, host);
+ return 0;
+ }
+
+ clk_disable_unprepare(priv->xin);
+
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+put_pins:
+ pinctrl_put(priv->pctl);
+put_sd:
+ clk_put(priv->sd);
+put_xin:
+ clk_put(priv->xin);
+put_clk:
+ clk_put(priv->clk);
+unmap:
+ iounmap(host->ioaddr);
+release:
+ release_mem_region(res->start, resource_size(res));
+free:
+ sdhci_free_host(host);
+ return err;
+}
+
+static int __devexit acs3_remove(struct platform_device* pdev)
+{
+ struct sdhci_host* const host = platform_get_drvdata(pdev);
+ struct acs3_priv* const priv = sdhci_priv(host);
+ int err = 0;
+
+ sdhci_remove_host(host,
+ __raw_readl(host->ioaddr +
+ SDHCI_INT_STATUS) == ~(0U));
+
+ acs3_exit_host(host, priv->pdata, priv);
+
+ clk_disable_unprepare(priv->xin);
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->sd);
+ clk_put(priv->xin);
+ clk_put(priv->clk);
+
+ pinctrl_put(priv->pctl);
+
+ iounmap(host->ioaddr);
+ release_mem_region(priv->res->start, resource_size(priv->res));
+
+ sdhci_free_host(host);
+ return err;
+}
+
+static struct platform_driver acs3_driver = {
+ .probe = acs3_probe,
+ .remove = __devexit_p(acs3_remove),
+ .suspend = acs3_suspend,
+ .resume = acs3_resume,
+ .driver = {
+ .name = ACS3_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(acs3_driver);
+
+MODULE_AUTHOR("Samir Ammenouche <samir.ammenouche@parrot.com>");
+MODULE_DESCRIPTION("Arasan Chip Systems SDHCI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/mmc/acs3-sdhci.h b/drivers/parrot/mmc/acs3-sdhci.h
new file mode 100644
index 00000000000000..ec518b9a418861
--- /dev/null
+++ b/drivers/parrot/mmc/acs3-sdhci.h
@@ -0,0 +1,113 @@
+/**
+ * linux/driver/parrot/mmc/acs3-sdhci.h - Arasan Chip Systems eMMC / SD / SDIO
+ * host driver interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 10-Sep-2012
+ *
+ * This file is released under the GPL
+ *
+ * See linux/drivers/parrot/mmc/acs3-sdhci.c file header
+ */
+
+#ifndef _ACS3_SDHCI_H
+#define _ACS3_SDHCI_H
+
+#include "../pinctrl/p7-pinctrl.h"
+
+#define ACS3_DRV_NAME "acs3-sdhci"
+
+/**
+ * struct acs3_regs - acs3 host registers addresses
+ * tdl1: TDL1 config register
+ * tdl2: TDL2 config register
+ * ctrl: Control register
+ * retune: Retune request register (for retuning mode 2)
+ */
+struct acs3_regs {
+ unsigned long tdl1;
+ unsigned long tdl2;
+ unsigned long ctrl;
+ unsigned long retune;
+};
+
+#define ACS3_CTRL_CMD_CONFLICT_DIS_MASK 0x1
+#define ACS3_CTRL_SUCCESS_CNT_MASK 0x1e
+#define ACS3_CTRL_SUCCESS_CNT_SHIFT 1
+#define ACS3_CTRL_RETUNING_MODE_MASK 0x60
+#define ACS3_CTRL_RETUNING_MODE_SHIFT 5
+
+#define ACS3_RETUNING_MODE_1 0
+#define ACS3_RETUNING_MODE_2 1
+#define ACS3_RETUNING_MODE_3 2
+
+/**
+ * struct acs3_plat_data - platform / board specific data
+ * @led_gpio: GPIO line used to drive activity led
+ * @wp_gpio: card lock / read only status GPIO line
+ * @cd_gpio: card detect GPIO line
+ * @rst_gpio: eMMC4 hardware reset GPIO line
+ * @brd_ocr: bit field of card Vdd voltages board supports
+ * @max_clk: max clk supported by board
+ * @mmc_caps: host specific capabilities
+ * @mmc_caps2: more host specific capabilities
+ * @regs: addresses of the host's GBC registers
+ * @default_mode_config: array of default modes configurations (limited to 4
+ * different configurations)
+ * @mode_config: array of modes configurations
+ * @nb_config: size of @mode_config
+ */
+struct acs3_plat_data {
+ int led_gpio;
+ int wp_gpio;
+ int cd_gpio;
+ int rst_gpio;
+ u32 brd_ocr;
+ u32 mmc_caps;
+ u32 mmc_caps2;
+ const struct acs3_regs* regs;
+ const struct acs3_mode_config* default_mode_config;
+ const struct acs3_mode_config* mode_config;
+ unsigned int nb_config;
+ bool disable_ddr50;
+};
+
+/**
+ * The preset value registers support 8 modes (Initialization, default speed,
+ * high speed, SDR12, SDR25, SDR50, SDR104 and DDR50).
+ */
+#define ACS3_INIT (1U)
+#define ACS3_DEFAULT_SPEED (1U << 1)
+#define ACS3_HIGH_SPEED (1U << 2)
+#define ACS3_UHS_SDR12 (1U << 3)
+#define ACS3_UHS_SDR25 (1U << 4)
+#define ACS3_UHS_SDR50 (1U << 5)
+#define ACS3_UHS_SDR104 (1U << 6)
+#define ACS3_UHS_DDR50 (1U << 7)
+#define ACS3_UHS_SDR (ACS3_UHS_SDR12 | ACS3_UHS_SDR25 | \
+ ACS3_UHS_SDR50 | ACS3_UHS_SDR104)
+
+/**
+ * struct acs3_mode_config - mode specific configuration
+ * @mode: Modes supported by this configuration. This parameter
+ * is a bitfield.
+ * @max_Hz: Max clock supported in this mode
+ * @tdl1: TDL1 for this mode (supported in HS & UHS)
+ * @tdl2: TDL2 for this mode (supported only in DDR50 mode)
+ * @tuning_success_count: Number of success tuning pattern (supported only in
+ * SDR50 & SDR104 mode)
+ * @retune_timer: Timer count for re-tuning (supported only in SDR50 &
+ * SDR104 mode)
+ */
+struct acs3_mode_config {
+ u32 mode;
+ u32 max_Hz;
+ u32 tdl1;
+ u32 tdl2;
+ u32 tuning_success_count;
+ u32 retune_timer;
+};
+
+#endif
diff --git a/drivers/parrot/nand/Kconfig b/drivers/parrot/nand/Kconfig
new file mode 100644
index 00000000000000..adc82c2653f112
--- /dev/null
+++ b/drivers/parrot/nand/Kconfig
@@ -0,0 +1,6 @@
+config MTD_NAND_CAST
+ tristate "Support for CAST NAND Controller"
+ depends on MTD_NAND
+ default MTD_NAND if ARCH_PARROT7
+ help
+ Enables support for CAST NAND Controller with hardware ECC module.
diff --git a/drivers/parrot/nand/Makefile b/drivers/parrot/nand/Makefile
new file mode 100644
index 00000000000000..4999e4af2213f1
--- /dev/null
+++ b/drivers/parrot/nand/Makefile
@@ -0,0 +1,5 @@
+#
+# linux/drivers/nand/Makefile
+#
+obj-$(CONFIG_MTD_NAND_CAST) += cast-nand.o
+cast-nand-objs := cast-base.o cast-timings.o
diff --git a/drivers/parrot/nand/cast-base.c b/drivers/parrot/nand/cast-base.c
new file mode 100644
index 00000000000000..ed8c81ac08843f
--- /dev/null
+++ b/drivers/parrot/nand/cast-base.c
@@ -0,0 +1,2173 @@
+/**
+ * CAST Nand Controller Driver
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: G.Tian <guangye.tian@parrot.com>
+ * date: 2012-10-24
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/jiffies.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/jiffies.h>
+#include <linux/pinctrl/consumer.h>
+#include "cast-regs.h"
+#include "cast-nand.h"
+#include "cast-timings.h"
+
+#define NF_TRANSFER_WRITE 0
+#define NF_TRANSFER_READ 1
+
+/* bad-block marker bytes */
+#define BBM_BYTES 2
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int synmode = 0;
+module_param(synmode, int, 0644);
+
+static int eccbits = 0;
+module_param(eccbits, int, 0644);
+
+static int cast_tim_asyn_wr;
+static int cast_tim_asyn_rd;
+
+static int cast_wait_ready(struct mtd_info *mtd, int irq);
+
+struct cast_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *regs;
+
+ /* First 5 bytes of read id 0x00 command */
+ u8 id[8];
+
+ /* page per block, complementing mtd_info */
+ u32 ppb;
+ int ecc_bits;
+
+ /*
+ * data is read word by word from cast
+ * unused bytes should be saved.
+ */
+ u8 data_buf[4];
+ u32 data_flag; /* 1: data_buf contains data 0: no */
+ u32 byte_idx;
+
+ /* DMA operations */
+ dma_addr_t dmaaddr;
+ unsigned char *dmabuf;
+
+ u32 cmd_precd;
+ struct completion complete;
+
+ int mode_curr;
+ struct onfi_par_page par_page;
+
+ int chip_nr;
+ struct clk* clk_ctrl;
+ struct clk* clk_phy;
+ struct pinctrl *pctl;
+ struct resource *area;
+
+ /*
+ * programmed-page marker size in spare area
+ * ppm follows bad block marker (offset 2)
+ */
+ uint32_t ppm;
+
+ uint32_t ecc_threshold;
+
+ int benand;
+};
+
+#define cast_readl(_host, _reg) readl(_host->regs + _reg)
+#define cast_writel(_val, _host, _reg) writel(_val, _host->regs + _reg)
+
+static struct nand_ecclayout cast_ecc_oob_layout;
+
+static ssize_t cast_read_ctrl(struct device *dev, char *buf, int reg)
+{
+ struct platform_device *pdev;
+ struct cast_nand_host *host;
+ u32 reg_val;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ host = platform_get_drvdata(pdev);
+ reg_val = cast_readl(host, reg);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static ssize_t get_rom_ctrl_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev;
+ struct cast_nand_host *host;
+ u32 reg_val;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ host = platform_get_drvdata(pdev);
+
+ reg_val = cast_readl(host, CAST_CONTROL);
+ reg_val &= ~(CAST_CTRL_SPARE_EN |
+ CAST_CTRL_INT_EN |
+ CAST_CTRL_CUSTOM_SIZE_EN |
+ CAST_CTRL_IO_WIDTH_16 |
+ CAST_CTRL_SYNC_MODE);
+ reg_val |= CAST_CTRL_ECC_EN;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static ssize_t get_rom_eccctrl_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return cast_read_ctrl(dev, buf, CAST_ECC_CTRL);
+}
+
+static ssize_t get_rom_eccoff_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return cast_read_ctrl(dev, buf, CAST_ECC_OFFSET);
+}
+
+static ssize_t get_rom_gen_seq_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /* customized nand read/write sequence for rom */
+ u32 reg_val = 0x30053;
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static ssize_t get_rom_time_seq0_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 reg_val = 0x090d0d1f;
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static ssize_t get_rom_time_seq1_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 reg_val = 0x0000081f;
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static ssize_t get_rom_time_asyn_reg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 reg_val = 0x00000084;
+ return scnprintf(buf, PAGE_SIZE, "0x%08x\n", reg_val);
+}
+
+static DEVICE_ATTR(rom_ctrl, 0644, get_rom_ctrl_reg, NULL);
+static DEVICE_ATTR(rom_ecc_ctrl, 0644, get_rom_eccctrl_reg, NULL);
+static DEVICE_ATTR(rom_ecc_offset, 0644, get_rom_eccoff_reg, NULL);
+static DEVICE_ATTR(rom_gen_seq_ctrl, 0644, get_rom_gen_seq_reg, NULL);
+static DEVICE_ATTR(rom_time_seq_0, 0644, get_rom_time_seq0_reg, NULL);
+static DEVICE_ATTR(rom_time_seq_1, 0644, get_rom_time_seq1_reg, NULL);
+static DEVICE_ATTR(rom_time_asyn, 0644, get_rom_time_asyn_reg, NULL);
+
+static irqreturn_t cast_irq(int irq, void *dev_id)
+{
+ struct cast_nand_host *host = dev_id;
+ u32 status = cast_readl(host, CAST_INT_STATUS);
+ if (status == 0)
+ return IRQ_NONE;
+ /* clear irq */
+ cast_writel(0, host, CAST_INT_STATUS);
+
+ complete(&host->complete);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Wait until a register flag reaches a target value.
+ *
+ * @param host cast_nand_host structure pointer
+ * @param reg Register to poll
+ * @param mask Mask to apply to register value
+ * @param target Set to 0 or 1 to indicate target value (0 or mask)
+ * @param timeo Timeout value in jiffies
+ *
+ * @return 0 if successful, -1 upon timeout
+ */
+static int wait_for_flag(struct cast_nand_host *host,
+ uint32_t reg, uint32_t mask,
+ uint32_t target, unsigned long timeo)
+{
+ uint32_t busy = 0;
+ timeo += jiffies;
+
+ target = target ? mask : 0;
+
+ busy = ((cast_readl(host, reg) & mask) != target);
+ while (busy) {
+ busy = ((cast_readl(host, reg) & mask) != target);
+ if (time_is_before_jiffies(timeo)) {
+ break;
+ }
+ cpu_relax();
+ }
+
+ return busy ? -1 : 0;
+}
+
+/**
+ * Wait target ready
+ *
+ * @param host cast_nand_host structure pointer
+ * @param timeo timeout in jiffies
+ *
+ * @return 0 if successful; -1 upon timeout
+ */
+static int wait_until_target_rdy(struct cast_nand_host *host,
+ unsigned long timeo)
+{
+ uint32_t busy;
+
+ /* wait for controller */
+ busy = wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT,
+ 0, timeo);
+
+ /* wait for device */
+ if (!busy)
+ wait_for_flag(host, CAST_STATUS,
+ CAST_STAT_MEMX_ST(host->chip_nr), 1, timeo);
+
+ return busy;
+}
+
+/* Wait for the nand flash RNB line to get high */
+static int cast_wait_ready(struct mtd_info *mtd, int irq)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ unsigned long timeo;
+ int res;
+ int err = 0;
+
+ if (nand_chip->state == FL_ERASING) {
+ timeo = msecs_to_jiffies(4000);
+ } else {
+ timeo = msecs_to_jiffies(2000);
+ }
+
+ if (irq) {
+ res = wait_for_completion_timeout(&host->complete, timeo);
+ if (!res) {
+ dev_err(mtd->dev.parent, "Timeout in irq\n");
+ err = -1;
+ }
+ /* Disable all interrupts */
+ cast_writel(0, host, CAST_INT_MASK);
+ } else {
+ err = wait_until_target_rdy(host, timeo);
+ }
+ return err;
+}
+
+#define NAND_CMD_RESET_SYN 0xfc
+static void nand_reset(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ if (cast_readl(host, CAST_CONTROL) & CAST_CTRL_SYNC_MODE)
+ reg = (NAND_CMD_RESET_SYN << SHIFT_CMD_0) | CAST_CMD_SEQ_0;
+ else
+ reg = (NAND_CMD_RESET << SHIFT_CMD_0) | CAST_CMD_SEQ_0;
+ cast_writel(reg, host, CAST_COMMAND);
+ cast_wait_ready(mtd, 0);
+}
+
+static void nand_read_id(struct mtd_info *mtd, int column)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ /* readid has only two probobal addresses */
+ if (column != 0x20)
+ column = 0x00;
+
+ /* Transfer customized size of data */
+ reg = cast_readl(host, CAST_CONTROL);
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ /* ECC module can be turned on only when CUSTOM_SIZE_EN = 0 */
+ reg &= ~CAST_CTRL_ECC_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ /* 5 byte sould be enough for id or onfi signature */
+ cast_writel(5, host, CAST_DATA_SIZE);
+
+ /* 1 cycle address: 0x00 */
+ cast_writel(column, host, CAST_ADDR0_0);
+ cast_writel(0x00, host, CAST_ADDR0_1);
+
+ /* Read ID command */
+ reg = (NAND_CMD_READID << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_INPUT_SEL_SIU |
+ CAST_CMD_SEQ_1;
+ cast_writel(reg, host, CAST_COMMAND);
+
+ cast_wait_ready(mtd, 0);
+
+ /* clean internal buffer */
+ host->data_flag = 0;
+ host->byte_idx = 0;
+}
+
+static void nand_read_status(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ /* wait for controller */
+ wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT, 0,
+ msecs_to_jiffies(200));
+
+ /* READ STATUS command */
+ reg = (NAND_CMD_STATUS << SHIFT_CMD_0) | CAST_CMD_SEQ_4;
+ cast_writel(reg, host, CAST_COMMAND);
+
+ /* Wait for read_status register ready */
+ wait_for_flag(host, CAST_STATUS,
+ CAST_STAT_MEMX_ST(host->chip_nr), 1,
+ msecs_to_jiffies(200));
+}
+
+/* @column is the offset within the oob area */
+static int cast_read_oob(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+ int i;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_ECC_EN;
+ reg &= ~CAST_CTRL_SPARE_EN;
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(mtd->oobsize, host, CAST_DATA_SIZE);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+
+ cast_writel((mtd->writesize & 0xFFFF) | (page_addr << 16),
+ host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_READSTART << SHIFT_CMD_1) |
+ (NAND_CMD_READ0 << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_INPUT_SEL_SIU |
+ CAST_CMD_SEQ_10;
+
+ cast_writel(reg, host, CAST_COMMAND);
+
+ /* wait for controller */
+ wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT, 0,
+ msecs_to_jiffies(200));
+
+ /*
+ * Spare size read can only be started from
+ * the begining of the spare area.
+ * Remove surplus data from the fifo.
+ */
+ for (i = 0; i < column; i += 4) {
+ wait_for_flag(host, CAST_FIFO_STATE, CAST_FIFO_STATE_STAT, 1,
+ msecs_to_jiffies(200));
+ *(uint32_t *)(host->data_buf) = cast_readl(host,
+ CAST_FIFO_DATA);
+ }
+ if (i == column) {
+ host->data_flag = 0;
+ host->byte_idx = 0;
+ } else {
+ host->data_flag = 1;
+ host->byte_idx = column % 4;
+ }
+ return 0;
+}
+
+static int cast_onfi_param(struct mtd_info *mtd, int column, int size)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ /* Transfer customized size of data */
+ reg = cast_readl(host, CAST_CONTROL);
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ reg &= ~CAST_CTRL_ECC_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(0, host, CAST_DATA_SIZE);
+
+ /* 1 cycle address: 0x00 */
+ cast_writel(0x00, host, CAST_ADDR0_0);
+ cast_writel(0x00, host, CAST_ADDR0_1);
+
+ /* read parameter page command */
+ reg = (NAND_CMD_PARAM << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_INPUT_SEL_SIU |
+ CAST_CMD_SEQ_2;
+ cast_writel(reg, host, CAST_COMMAND);
+ cast_wait_ready(mtd, 0);
+
+ cast_writel(size, host, CAST_DATA_SIZE);
+ cast_writel(column, host, CAST_ADDR0_0);
+
+ reg = (0x05 << SHIFT_CMD_0) |
+ (0xe0 << SHIFT_CMD_1) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_6;
+ cast_writel(reg, host, CAST_COMMAND);
+ cast_wait_ready(mtd, 0);
+
+ /* clean internal buffer */
+ host->data_flag = 0;
+ host->byte_idx = 0;
+
+ return 0;
+}
+
+static void cast_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ switch (command) {
+ case NAND_CMD_RESET:
+ nand_reset(mtd);
+ break;
+ case NAND_CMD_READID:
+ nand_read_id(mtd, column);
+ break;
+ case NAND_CMD_STATUS:
+ nand_read_status(mtd);
+ host->cmd_precd = NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_READOOB:
+ command = NAND_CMD_READ0;
+ cast_read_oob(mtd, column, page_addr);
+ break;
+ case NAND_CMD_PARAM:
+ cast_onfi_param(mtd, 0,
+ sizeof(struct nand_onfi_params)*3);
+ break;
+ case NAND_CMD_READ0:
+ // XXX
+ break;
+ default:
+ dev_err(mtd->dev.parent, "unsupported cmd %d\n", command);
+ dump_stack();
+ break;
+ }
+}
+
+/* Access to ready/busy line */
+static int cast_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ int stat = (cast_readl(host, CAST_STATUS)
+ & CAST_STAT_MEMX_ST(host->chip_nr)) == 0 ? 0 : 1;
+ return stat;
+}
+
+static void cast_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ if (chip == -1) {
+ /* Just ignore if deselecting chip */
+ return;
+ } else if ((chip >= 8) || (chip < 0)) {
+ dev_err(mtd->dev.parent, "Chip %d exceeds maximum"
+ " supported flash\n", chip);
+ return;
+ } else {
+ host->chip_nr = chip;
+ reg = cast_readl(host, CAST_MEM_CTRL);
+ reg &= ~CAST_MEM_CTRL_CE_MASK;
+ reg |= chip;
+ cast_writel(reg, host, CAST_MEM_CTRL);
+ }
+}
+
+static u8 cast_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u8 one_byte;
+
+ /*
+ * Byte returned from read status cmd is stored in READ_STATUS reg
+ * not in the FIFO_DATA reg.
+ */
+ if (host->cmd_precd == NAND_CMD_STATUS) {
+ *(u32 *)(host->data_buf) = cast_readl(host, CAST_READ_STATUS);
+ host->cmd_precd = -1;
+ return host->data_buf[0];
+ }
+
+ if (host->data_flag) {
+ one_byte = host->data_buf[host->byte_idx];
+ host->byte_idx++;
+ if (host->byte_idx == 4) {
+ host->byte_idx = 0;
+ host->data_flag = 0;
+ }
+ } else {
+ wait_for_flag(host, CAST_FIFO_STATE, CAST_FIFO_STATE_STAT,
+ 1, (HZ * 200) / 1000);
+ *(u32 *)(host->data_buf) = cast_readl(host, CAST_FIFO_DATA);
+ one_byte = host->data_buf[0];
+ host->data_flag = 1;
+ host->byte_idx = 1;
+ }
+ return one_byte;
+}
+
+static void cast_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ while (len--) {
+ *buf++ = cast_read_byte(mtd);
+ }
+}
+
+/**
+ * Read @size bytes directly from NAND controller
+ *
+ * @param host cast nand host
+ * @param buf data buffer
+ * @param size bytes to be read
+ *
+ * @return 0 - success, -1 - timeout
+ */
+static int nand_read_buf(struct cast_nand_host *host,
+ uint8_t *buf, unsigned int size)
+{
+ int err = 0;
+ unsigned int i;
+ uint32_t *data;
+ struct mtd_info *mtd = &(host->mtd);
+
+ WARN_ON((unsigned long)buf & 3);
+ WARN_ON(size & 3);
+ /* transfer 32-bit words */
+ data = (uint32_t *)((unsigned long)buf & ~3UL);
+ size >>= 2;
+
+ for (i = 0; i < size; i++) {
+ err = wait_for_flag(host, CAST_FIFO_STATE,
+ CAST_FIFO_STATE_STAT, 1,
+ msecs_to_jiffies(200));
+ if (err) {
+ dev_dbg(mtd->dev.parent,
+ "%s: transfer timeout\n", __func__);
+ break;
+ }
+ *data++ = cast_readl(host, CAST_FIFO_DATA);
+ }
+ return err;
+}
+
+/**
+ * Write @size bytes to NAND controller
+ *
+ * @param host cast nand host
+ * @param buf data buffer
+ * @param size bytes to write
+ *
+ * @return 0 - success, -1 - timeout
+ */
+static int nand_write_buf(struct cast_nand_host *host,
+ const u8 *buf, unsigned int size)
+{
+ int err = 0;
+ unsigned int i;
+ u32 *data;
+ struct mtd_info *mtd = &(host->mtd);
+
+ /* transfer 32-bit words */
+ data = (u32 *)((unsigned long)buf & ~3UL);
+ size >>= 2;
+
+ err = wait_for_flag(host, CAST_FIFO_STATE,
+ CAST_FIFO_STATE_STAT, 1,
+ msecs_to_jiffies(200));
+ if (!err) {
+ for (i = 0; i < size; i++)
+ cast_writel(*data++, host, CAST_FIFO_DATA);
+ } else {
+ dev_dbg(mtd->dev.parent, "%s: transfer timeout\n", __func__);
+ }
+ return err;
+}
+
+/*
+ * Setting CAST controller timing registers to
+ * desired speed mode and data interface
+ */
+static void cast_init_timings(struct mtd_info *mtd, int mode, int syn, int tccs)
+{
+ u32 period;
+ u32 pll;
+ int interface;
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ struct cast_plat_data const* pdata;
+ int trhw;
+ int tref;
+
+ int i;
+ int temp;
+
+ union {
+ struct cast_timings timings;
+ u16 values[sizeof(struct cast_timings)/sizeof(u16)];
+ } ucycles;
+
+ struct cast_timings * const cycles = &ucycles.timings;
+
+ /* get device cast-nand.0 platform data */
+ pdata = dev_get_platdata(mtd->dev.parent);
+
+ /* Interruption cleaning up */
+ cast_writel(0, host, CAST_INT_STATUS);
+ cast_writel(0, host, CAST_INT_MASK);
+
+ interface = syn ? T_SYN : T_ASYN;
+
+ if (nand_chip->onfi_version && (nand_chip->onfi_params.t_ccs)
+ && !tccs) {
+ tccs = nand_chip->onfi_params.t_ccs;
+ } else if (!tccs) {
+ /* no onfi or not yet detected */
+ tccs = 500;
+ }
+
+ dev_dbg(mtd->dev.parent, "chip_rev: %d\n", pdata->chip_rev);
+ if (syn)
+ pll = cast_modes[T_SYN][mode].pll;
+ else if (pdata->chip_rev == 0) {
+ /*
+ * This is for correcting mpw1 limit:
+ * Trhw is fixed at 6 clock cycles instead of
+ * being reconfigurable by register TIME_SEQ_0.
+ */
+ trhw = cast_modes[interface][mode].trhw;
+ pll = (1000000000/DIV_ROUND_UP(trhw, 6)) << 2;
+ } else {
+ /*
+ * The tccs/tadl/trhw/twhr fields of cast controller
+ * contains the biggest value which set the lower limit
+ * for clock period.
+ */
+ tref = max(max(tccs, cast_modes[interface][mode].trhw),
+ max(cast_modes[interface][mode].twhr,
+ cast_modes[interface][mode].tadl));
+ pll = ((1000000000/tref)<<5) << 2;
+
+ if (mode == 5 && pll > 400000000)
+ pll = 400000000;
+ else if (mode == 4 && pll > 360000000)
+ pll = 360000000;
+ else if (pll > 400000000)
+ pll = 400000000;
+ }
+
+ clk_disable(host->clk_phy);
+ clk_set_rate(host->clk_phy, pll);
+ clk_enable(host->clk_phy);
+
+ temp = pll;
+ pll = clk_get_rate(host->clk_phy);
+ dev_info(mtd->dev.parent, "pll_nand: %dHz (wanted=%dHz mode=%d)\n",
+ pll, temp, mode);
+
+ period = (4000000000UL / (pll / 1000));
+ dev_dbg(mtd->dev.parent, "period %d\n", period);
+
+ /* convert timings from ns to cycles */
+ memcpy(cycles, &cast_modes[interface][mode], sizeof(*cycles));
+ cycles->tccs = tccs;
+ for (i = 0; i < ARRAY_SIZE(ucycles.values); i++)
+ ucycles.values[i] = DIV_ROUND_UP(ucycles.values[i]*1000, period);
+
+ if (cycles->tccs > 32)
+ cycles->tccs = 32;
+
+ temp = cycles->twc - cycles->twh - cycles->twp;
+ if (temp > 0)
+ cycles->twp += temp;
+
+ /* we increase here trp because it should be > tREA + board_REA */
+ temp = cycles->trc - cycles->treh - cycles->trp;
+ if (temp > 0)
+ cycles->trp += temp;
+
+ /* real cycle is (reg_value + 1) */
+ for (i = 0; i < ARRAY_SIZE(ucycles.values); i++)
+ if (ucycles.values[i] >= 1)
+ ucycles.values[i] -= 1;
+
+ WARN_ON(cycles->treh < cycles->twh);
+ WARN_ON(cycles->trp < cycles->twp);
+ /*
+ * TIMINGS_ASYN
+ * TRWH[7:4], TRWP[3:0]
+ */
+ cast_tim_asyn_rd = ((cycles->treh & 0xf) << 4)| (cycles->trp & 0xf);
+ cast_tim_asyn_wr = ((cycles->twh & 0xf) << 4)| (cycles->twp & 0xf);
+
+ cast_writel(cast_tim_asyn_rd, host, CAST_TIMINGS_ASYN);
+ dev_info(mtd->dev.parent, "CAST_TIMINGS_ASYN rd=0x%x wr=0x%x\n",
+ cast_tim_asyn_rd, cast_tim_asyn_wr);
+
+ /*
+ * TIMINGS_SYN
+ * TCAD[3:0]
+ */
+ cast_writel(cycles->tcad & 0xf, host, CAST_TIMINGS_SYN);
+
+ /*
+ * TIME_SEQ_0
+ * TWHR[28:24], TRHW[20:16], TADL[12:8], TCCS[4:0]
+ */
+ cast_writel(((cycles->twhr & 0x1f) << 24) |
+ ((cycles->trhw & 0x1f) << 16) |
+ ((cycles->tadl & 0x1f) << 8) |
+ (cycles->tccs & 0x1f), host, CAST_TIME_SEQ_0);
+ dev_dbg(mtd->dev.parent, "CAST_TIME_SEQ_0 0x%x\n", ((cycles->twhr & 0x1f) << 24) |
+ ((cycles->trhw & 0x1f) << 16) |
+ ((cycles->tadl & 0x1f) << 8) |
+ (cycles->tccs & 0x1f));
+
+ /*
+ * TIME_SEQ_1
+ * TRR[12:9], TWB[4:0]
+ */
+ cast_writel(((cycles->trr & 0xf) << 9) |
+ (cycles->twb & 0x1f), host, CAST_TIME_SEQ_1);
+ dev_dbg(mtd->dev.parent, "CAST_TIME_SEQ_1 0x%x\n", ((cycles->trr & 0xf) << 9) |
+ (cycles->twb & 0x1f));
+
+}
+
+/* structure of equivalent onfi speed for non-onfi nand flash */
+struct nonfi_speed_t {
+ u16 id; /* maf_id and dev_id */
+ u16 onfi_speed;
+ u32 tccs;
+} nonfi_speed[] = {
+ /* TC58NVG3S0FBAID (FC7100) */
+ {
+ .id = 0x98 | (0xd3 << 8),
+ .onfi_speed = 4,
+ .tccs = 60,
+ },
+ /* TC58NVG3S0FBAID (FC7100) */
+ {
+ .id = 0x98 | (0xa3 << 8),
+ .onfi_speed = 4,
+ .tccs = 60,
+ },
+ /* TC58NVG3S0FBAID (FC7100) 4Gb */
+ {
+ .id = 0x98 | (0xac << 8),
+ .onfi_speed = 4,
+ .tccs = 60,
+ },
+ /* TC58BYG0S3HBAI6 (MYKONOS3) */
+ {
+ .id = 0x98 | (0xa1 << 8),
+ .onfi_speed = 4,
+ .tccs = 60,
+ },
+};
+
+static void onfi_compatible(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+ int i;
+ int err = 0;
+ const int buf_size = 20;
+ int tccs;
+ u8 buf[buf_size];
+ u16 nand_id = host->id[0] | (host->id[1] << 8);
+
+ host->par_page.syn_mode = -1;
+ host->par_page.asyn_mode = 0;
+
+ if (nand_chip->onfi_version) {
+ for (i = 0; i < 6; i++)
+ if (nand_chip->onfi_params.async_timing_mode
+ & (1 << i))
+ host->par_page.asyn_mode = i;
+
+ for (i = 0; i < 6; i++)
+ if (nand_chip->onfi_params.src_sync_timing_mode
+ & (1 << i))
+ host->par_page.syn_mode = i;
+ } else { /* use equivalent onfi speed mode for non-onfi nand */
+ tccs = 0;
+ for (i = 0; i < ARRAY_SIZE(nonfi_speed); i++) {
+ if (nonfi_speed[i].id == nand_id) {
+ host->par_page.asyn_mode
+ = nonfi_speed[i].onfi_speed;
+ tccs = nonfi_speed[i].tccs;
+ break;
+ }
+ }
+ cast_init_timings(mtd, host->par_page.asyn_mode, 0, tccs);
+
+ goto finish;
+ }
+
+ /* switch to supported timing mode */
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ cast_writel(4, host, CAST_DATA_SIZE);
+ cast_writel(0x01, host, CAST_ADDR0_0);
+ cast_writel(0, host, CAST_ADDR0_1);
+ buf[1] = 0;
+ buf[2] = 0;
+ buf[3] = 0;
+ reg = (0xef << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_INPUT_SEL_SIU|
+ CAST_CMD_SEQ_3;
+
+ if (synmode && (host->par_page.syn_mode != -1)) {
+ cast_writel(reg, host, CAST_COMMAND);
+ buf[0] = (0x1 << 4) | (host->par_page.syn_mode & 0xf);
+ nand_write_buf(host, buf, 4);
+ err = cast_wait_ready(mtd, 0);
+ if (err)
+ goto finish;
+ cast_init_timings(mtd, host->par_page.syn_mode, 1, 0);
+ reg = cast_readl(host, CAST_CONTROL);
+ reg |= CAST_CTRL_SYNC_MODE | CAST_CTRL_IO_WIDTH_16;
+ cast_writel(reg, host, CAST_CONTROL);
+ } else {
+ cast_writel(reg, host, CAST_COMMAND);
+ buf[0] = host->par_page.asyn_mode & 0xf;
+
+ nand_write_buf(host, buf, 4);
+ err = cast_wait_ready(mtd, 0);
+ if (err)
+ goto finish;
+ cast_init_timings(mtd, host->par_page.asyn_mode, 0, 0);
+ }
+
+ dev_dbg(mtd->dev.parent, "onfi: %d\n", nand_chip->onfi_version);
+ dev_dbg(mtd->dev.parent, "syn_mode: %d\n", host->par_page.syn_mode);
+ dev_dbg(mtd->dev.parent, "asyn_mode: %d\n", host->par_page.asyn_mode);
+ dev_dbg(mtd->dev.parent, "tccs: %d\n", nand_chip->onfi_params.t_ccs);
+finish:
+ return;
+}
+
+/* CAST controller register configuration called from probe function */
+static void cast_config_controller(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+
+ switch (mtd->writesize) {
+ case 256:
+ reg = CAST_CTRL_PAGE_256;
+ break;
+ case 512:
+ reg = CAST_CTRL_PAGE_512;
+ break;
+ case 1024:
+ reg = CAST_CTRL_PAGE_1K;
+ break;
+ case 2048:
+ reg = CAST_CTRL_PAGE_2K;
+ break;
+ case 4096:
+ reg = CAST_CTRL_PAGE_4K;
+ break;
+ case 8192:
+ reg = CAST_CTRL_PAGE_8K;
+ break;
+ case 16384:
+ reg = CAST_CTRL_PAGE_16K;
+ break;
+ default:
+ reg = CAST_CTRL_PAGE_0;
+ }
+
+ switch(mtd->erasesize >> (ffs(mtd->writesize) - 1)) {
+ case 32:
+ reg |= CAST_CTRL_BLOCK_32;
+ break;
+ case 64:
+ reg |= CAST_CTRL_BLOCK_64;
+ break;
+ case 128:
+ reg |= CAST_CTRL_BLOCK_128;
+ break;
+ case 256:
+ reg |= CAST_CTRL_BLOCK_256;
+ break;
+ default:
+ dev_err(mtd->dev.parent, "Non supported erase size for "
+ "%d\n", mtd->erasesize);
+
+ BUG();
+ }
+
+ /* nb of cycles is read cycle */
+ reg = reg | CAST_CTRL_ADDR_CYCLE0(5) | CAST_CTRL_ADDR_CYCLE1(5);
+
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ reg |= CAST_CTRL_IO_WIDTH_16;
+ } else {
+ reg |= CAST_CTRL_IO_WIDTH_8;
+ }
+
+ /* Disable all interrupts */
+ cast_writel(0, host, CAST_INT_MASK);
+ /* Clear interrupt status */
+ cast_writel(0, host, CAST_INT_STATUS);
+
+ /* Global interrupt enable */
+ reg |= CAST_CTRL_INT_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ switch (nand_chip->ecc.size) {
+ case 256:
+ reg = CAST_ECC_CTRL_BLOCK_256;
+ break;
+ case 512:
+ reg = CAST_ECC_CTRL_BLOCK_512;
+ break;
+ case 1024:
+ reg = CAST_ECC_CTRL_BLOCK_1K;
+ break;
+ default:
+ dev_err(mtd->dev.parent, "Non supported ecc size for "
+ "%d\n", nand_chip->ecc.size);
+ BUG();
+ }
+ reg |= CAST_ECC_CTRL_CAP(host->ecc_bits);
+
+ reg |= (host->ecc_threshold - 1) << CAST_ECC_CTRL_ERR_THRLD;
+
+ cast_writel(reg, host, CAST_ECC_CTRL);
+
+ cast_writel(mtd->writesize + nand_chip->ecc.layout->eccpos[0],
+ host, CAST_ECC_OFFSET);
+}
+
+static int cast_init_clock(struct cast_nand_host *host,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct clk *cast_clk;
+ struct clk *phy_clk;
+
+ cast_clk = clk_get(&pdev->dev, "ctrl");
+ if (IS_ERR(cast_clk)) {
+ ret = PTR_ERR(cast_clk);
+ goto out;
+ }
+ ret = clk_prepare_enable(cast_clk);
+ if (unlikely(ret)) {
+ goto put_ctrl_clk;
+ }
+ host->clk_ctrl = cast_clk;
+
+ phy_clk = clk_get(&pdev->dev, "phy");
+ if (IS_ERR(phy_clk)) {
+ ret = PTR_ERR(phy_clk);
+ goto out_phy_clk;
+ }
+ ret = clk_prepare_enable(phy_clk);
+ if (unlikely(ret)) {
+ goto put_phy_clk;
+ }
+ host->clk_phy = phy_clk;
+
+ return 0;
+
+put_phy_clk:
+ clk_put(phy_clk);
+out_phy_clk:
+ clk_disable_unprepare(cast_clk);
+put_ctrl_clk:
+ clk_put(cast_clk);
+out:
+ return ret;
+}
+
+/**
+ * Prepare CAST controller for DMA transfer (before command sent)
+ *
+ * @host cast_nand_host structure
+ * @size nb of bytes to be transfered
+ * @direction operation direction (write/read)
+ *
+ * @return 0 - success; -1 - upon failure
+ */
+static int cast_dma_prepare_transfer(struct cast_nand_host *host,
+ int size, int direction,
+ dma_addr_t dmaaddr)
+{
+ u32 reg;
+
+ cast_writel(dmaaddr, host, CAST_DMA_ADDR);
+
+ /* DMA SFR mode */
+ cast_writel(0, host, CAST_DMA_ADDR_OFFSET);
+ cast_writel(size, host, CAST_DMA_CNT);
+ reg = CAST_DMA_CTRL_DMA_MODE_SFR;
+
+ if (direction == NF_TRANSFER_READ) {
+ reg |= CAST_DMA_CTRL_DMA_DIR_READ;
+ } else {
+ reg |= CAST_DMA_CTRL_DMA_DIR_WRITE;
+ }
+
+ reg |= DMA_BURST_UNSPEC_LENGTH;
+
+ reg |= CAST_DMA_CTRL_DMA_START;
+ cast_writel(reg, host, CAST_DMA_CTRL);
+
+ return 0;
+}
+
+/**
+ * Wait for CAST controller finish DMA transfer (after command sent)
+ *
+ * @host cast_nand_host structure
+ * @irq choose irq or polling
+ *
+ * @return 0 - success; -1 - upon failure
+ */
+static int cast_wait_dma_finish_transfer(struct cast_nand_host *host, int irq)
+{
+ int err = 0;
+ int res;
+ u32 reg;
+ unsigned long timeo = msecs_to_jiffies(2000);
+ struct mtd_info *mtd = &(host->mtd);
+
+ if (irq) {
+ res = wait_for_completion_timeout(&host->complete, timeo);
+ if (!res) {
+ dev_dbg(mtd->dev.parent,
+ "%s: Timeout in irq\n", __func__);
+ err = -1;
+ }
+ /* Disable all interrupts */
+ cast_writel(0, host, CAST_INT_MASK);
+ } else {
+ err = wait_for_flag(host, CAST_DMA_CTRL,
+ CAST_DMA_CTRL_DMA_READY, 1, timeo);
+ }
+
+ if (err == -1) {
+ dev_err(mtd->dev.parent,
+ "%s: Wait for DMA transfer timeout\n", __func__);
+ goto fail;
+ }
+
+ reg = cast_readl(host, CAST_DMA_CTRL);
+
+ if (reg & CAST_DMA_CTRL_DMA_ERR_FLAG) {
+ dev_dbg(mtd->dev.parent, "%s: DMA transfer\n", __func__);
+ err = -1;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int cast_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ struct cast_nand_host *host = chip->priv;
+ u8 *buf = chip->oob_poi;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ if (sndcmd) {
+ int err;
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_ECC_EN;
+ reg &= ~CAST_CTRL_SPARE_EN;
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(mtd->oobsize, host, CAST_DATA_SIZE);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+
+ cast_writel((mtd->writesize & 0xFFFF) | (page_addr << 16),
+ host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_READSTART << SHIFT_CMD_1) |
+ (NAND_CMD_READ0 << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_10;
+
+ if (usedma) {
+ cast_dma_prepare_transfer(host, mtd->oobsize,
+ NF_TRANSFER_READ,
+ host->dmaaddr);
+ reg |= CAST_CMD_INPUT_SEL_DMA;
+
+ /* enable interrupt for dma transfer */
+ cast_writel(CAST_INT_MASK_DMA_READY_EN, host, CAST_INT_MASK);
+ } else {
+ reg |= CAST_CMD_INPUT_SEL_SIU;
+ }
+
+ cast_writel(reg, host, CAST_COMMAND);
+
+ if (usedma)
+ err = cast_wait_dma_finish_transfer(host, 1);
+ else
+ err = wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT, 0,
+ msecs_to_jiffies(200));
+
+ if (err)
+ return err;
+
+ sndcmd = 0;
+ }
+
+ if (usedma)
+ memcpy(buf, host->dmabuf, mtd->oobsize);
+ else
+ nand_read_buf(host, buf, mtd->oobsize);
+
+ return sndcmd;
+}
+
+static int cast_read_page_data(struct mtd_info *mtd, int page,
+ u8 *buf, int raw,
+ int spare_user_size, u8 *spare_buf)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+ int err = 0;
+ int transfer_size;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_CUSTOM_SIZE_EN;
+ if (spare_user_size && spare_buf)
+ reg |= CAST_CTRL_SPARE_EN;
+ else
+ reg &= ~CAST_CTRL_SPARE_EN;
+ if (raw) {
+ reg &= ~CAST_CTRL_ECC_EN;
+ } else {
+ reg |= CAST_CTRL_ECC_EN;
+ }
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(spare_user_size, host, CAST_SPARE_SIZE);
+
+ transfer_size = mtd->writesize
+ + ((spare_user_size && spare_buf) ? spare_user_size : 0);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+ cast_writel((page_addr << 16), host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_READSTART << SHIFT_CMD_1) |
+ (NAND_CMD_READ0 << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_10;
+
+ if (usedma) {
+ cast_dma_prepare_transfer(host,
+ transfer_size,
+ NF_TRANSFER_READ,
+ host->dmaaddr);
+
+ reg |= CAST_CMD_INPUT_SEL_DMA;
+ /* enable interrupt for dma transfer */
+ cast_writel(CAST_INT_MASK_DMA_READY_EN, host, CAST_INT_MASK);
+ } else {
+ reg |= CAST_CMD_INPUT_SEL_SIU;
+ }
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ cast_writel(reg, host, CAST_COMMAND);
+
+ if (usedma) {
+ err = cast_wait_dma_finish_transfer(host, 1);
+ if (!err) {
+ memcpy(buf, (u8 *)host->dmabuf, mtd->writesize);
+ if (spare_user_size && spare_buf)
+ memcpy(spare_buf,
+ (u8 *)host->dmabuf + mtd->writesize,
+ spare_user_size);
+ }
+ } else {
+ /* wait for controller */
+ err = wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT, 0,
+ msecs_to_jiffies(200));
+ if (!err) {
+ err = nand_read_buf(host, buf, mtd->writesize);
+ }
+
+ if (!err && spare_user_size && spare_buf)
+ err = nand_read_buf(host, spare_buf, spare_user_size);
+ }
+
+ return err;
+}
+
+int page_is_programmed(u8 *marker_buf, int marker_len)
+{
+ int hw, i;
+
+ hw = 0;
+ for (i = 0; i < marker_len; i++)
+ hw += hweight8(marker_buf[i]);
+
+ /*
+ * threshold: 1/2 of (marker_len*8) bits
+ * hamming weight < threshold ==> programmed page
+ */
+ return ((hw < (marker_len << 2)) ? 1 : 0);
+}
+
+/*
+ * Test whether a non-marked page (containing ff with possible bit-flips)
+ * is correctable.
+ *
+ * @param buf nand page buffer
+ * @param length page size
+ * @param ecc_size ecc block size (512/1024)
+ * @param ecc_bits ecc correction capability
+ *
+ * @return n (>=0): number of ecc bits corrected
+ * -1: uncorrectable error
+ */
+int correctable_empty_page(u8 *buf, int length, int ecc_size, int ecc_bits)
+{
+ int ecc_block;
+ int i;
+ int bit_flip;
+ u32 *ptr;
+ int ret = 0;
+
+ for (ecc_block = 0; ecc_block < length; ecc_block += ecc_size) {
+ bit_flip = 0;
+ ptr = (u32 *)(buf + ecc_block);
+ for (i = 0; i < (ecc_size >> 2); i++) {
+ bit_flip += hweight32(~ptr[i]);
+ ptr[i] = 0xffffffff; /* correct the page */
+ }
+
+ ret += bit_flip;
+
+ if (bit_flip > ecc_bits) {
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int cast_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int page)
+{
+ int ret;
+ struct cast_nand_host *host = chip->priv;
+ u32 reg_ecc_ctrl;
+ u8 status;
+ u8 spare_buf[NAND_MAX_OOBSIZE];
+
+ ret = cast_read_page_data(mtd, page, buf, 0,
+ host->ppm + BBM_BYTES, spare_buf);
+ if (ret)
+ return ret;
+
+ reg_ecc_ctrl = cast_readl(host, CAST_ECC_CTRL);
+
+ chip->ecc.read_oob(mtd, chip, page, 1);
+
+ cast_cmdfunc(mtd, NAND_CMD_STATUS, 0, 0);
+ status = cast_read_byte(mtd);
+
+ if (!(status & NAND_STATUS_READY))
+ return -EIO;
+
+ if (host->benand) {
+ if (status & NAND_STATUS_FAIL) {
+ mtd->ecc_stats.failed++;
+ dev_info(mtd->dev.parent, "benand fail 0x%x page %d\n", status, page);
+ return 0;
+ }
+ else if (status & 8) {
+ /* nand with on die ecc :
+ toshiba benand
+ */
+ mtd->ecc_stats.corrected += host->ecc_threshold;
+ dev_info(mtd->dev.parent, "benand corrected 0x%x page %d\n", status, page);
+ }
+
+ /* we should have no ecc correction (corrected by flash) */
+ if (reg_ecc_ctrl & CAST_ECC_CTRL_ERR_UNCORRECT) {
+ if (page_is_programmed(spare_buf + BBM_BYTES,
+ host->ppm))
+ dev_err(mtd->dev.parent, "cast ecc uncorr on benand status %x %x\n",
+ status, reg_ecc_ctrl);
+ }
+ else if (reg_ecc_ctrl & CAST_ECC_CTRL_ERR_CORRECT) {
+ dev_err(mtd->dev.parent, "cast ecc correction on benand status %x %x\n",
+ status, reg_ecc_ctrl);
+ }
+ return 0;
+ }
+ if (reg_ecc_ctrl & CAST_ECC_CTRL_ERR_UNCORRECT) {
+ if (page_is_programmed(spare_buf + BBM_BYTES,
+ host->ppm)) {
+ mtd->ecc_stats.failed++;
+ } else {
+ //XXX this look wrong as we don't check bitflip
+ //in spare aera.
+ //Also this should take lot's of time to count
+ //bitflip on the whole page
+ ret = correctable_empty_page(buf, mtd->writesize,
+ chip->ecc.size,
+ host->ecc_bits);
+
+ /* report as corrected page if within our capacity */
+ if (ret >= 0)
+ mtd->ecc_stats.corrected += ret;
+ else
+ mtd->ecc_stats.failed++;
+ }
+ } else if ((reg_ecc_ctrl & CAST_ECC_CTRL_ERR_CORRECT) &&
+ (reg_ecc_ctrl & CAST_ECC_CTRL_ERR_OVER)) {
+ mtd->ecc_stats.corrected += host->ecc_threshold;
+ }
+
+ return 0;
+}
+
+static int cast_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int page)
+{
+ struct cast_nand_host *host = chip->priv;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+ int err = 0;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_ECC_EN;
+ reg &= ~CAST_CTRL_SPARE_EN;
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(mtd->writesize + mtd->oobsize, host, CAST_DATA_SIZE);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+ cast_writel((page_addr << 16), host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_READSTART << SHIFT_CMD_1) |
+ (NAND_CMD_READ0 << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_10;
+
+ if (usedma) {
+ cast_dma_prepare_transfer(host,
+ mtd->writesize + mtd->oobsize,
+ NF_TRANSFER_READ,
+ host->dmaaddr);
+ reg |= CAST_CMD_INPUT_SEL_DMA;
+
+ /* enable interrupt for dma transfer */
+ cast_writel(CAST_INT_MASK_DMA_READY_EN, host, CAST_INT_MASK);
+ } else {
+ reg |= CAST_CMD_INPUT_SEL_SIU;
+ }
+
+ cast_writel(reg, host, CAST_COMMAND);
+
+ if (usedma) {
+ err = cast_wait_dma_finish_transfer(host, 1);
+ if (!err) {
+ memcpy(buf, (u8 *)host->dmabuf, mtd->writesize);
+ memcpy(chip->oob_poi,
+ (u8 *)host->dmabuf + mtd->writesize,
+ mtd->oobsize);
+ }
+ } else {
+ /* wait for controller */
+ err = wait_for_flag(host, CAST_STATUS, CAST_STAT_CTRL_STAT, 0,
+ msecs_to_jiffies(200));
+
+ if (!err) {
+ nand_read_buf(host, buf, mtd->writesize);
+ nand_read_buf(host, chip->oob_poi, mtd->oobsize);
+ }
+ }
+ return err;
+}
+
+static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ u8 status;
+ const u8 *buf = chip->oob_poi;
+ struct cast_nand_host *host = chip->priv;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+ int err = 0;
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ reg &= ~CAST_CTRL_ECC_EN;
+ reg &= ~CAST_CTRL_SPARE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(0, host, CAST_SPARE_SIZE);
+ cast_writel(mtd->oobsize, host, CAST_DATA_SIZE);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+ cast_writel((page_addr << 16) | (mtd->writesize & 0xFFFF),
+ host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_PAGEPROG << SHIFT_CMD_1) |
+ (NAND_CMD_SEQIN << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_12;
+
+ if (usedma) {
+ memcpy(host->dmabuf, buf, mtd->oobsize);
+ cast_dma_prepare_transfer(host, mtd->oobsize,
+ NF_TRANSFER_WRITE,
+ host->dmaaddr);
+ reg |= CAST_CMD_INPUT_SEL_DMA;
+
+ } else {
+ reg |= CAST_CMD_INPUT_SEL_SIU;
+ }
+
+ cast_writel(cast_tim_asyn_wr, host, CAST_TIMINGS_ASYN);
+ /* enable interrupt */
+ cast_writel(CAST_INT_MASK_MEMx_RDY_INT_EN(host->chip_nr),
+ host, CAST_INT_MASK);
+ cast_writel(reg, host, CAST_COMMAND);
+
+ if (!usedma) {
+ err = nand_write_buf(host, buf, mtd->oobsize);
+ }
+ if (err)
+ goto fail;
+
+ err = cast_wait_ready(mtd, 1);
+ if (err)
+ goto fail;
+
+ /* Check dma error if using dma */
+ if (usedma) {
+ reg = cast_readl(host, CAST_DMA_CTRL);
+ if (reg & CAST_DMA_CTRL_DMA_ERR_FLAG) {
+ dev_dbg(mtd->dev.parent, "%s: DMA Transfer error\n"
+ , __func__);
+ err = -1;
+ goto fail;
+ }
+ }
+
+ cast_writel(cast_tim_asyn_rd, host, CAST_TIMINGS_ASYN);
+
+ cast_cmdfunc(mtd, NAND_CMD_STATUS, 0, 0);
+ status = cast_read_byte(mtd);
+
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+fail:
+ cast_writel(cast_tim_asyn_rd, host, CAST_TIMINGS_ASYN);
+ return err;
+}
+
+static void dummy_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ dev_err(mtd->dev.parent, "Should not enter dummy function\n");
+ BUG();
+}
+
+static int cast_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int cached, int raw)
+{
+ u8 status;
+ struct cast_nand_host *host = chip->priv;
+ u32 reg;
+ u32 block, page_shift, page_addr;
+ int err = 0;
+ u8 spare_buf[NAND_MAX_OOBSIZE] = { 0xff, 0xff, 0 };
+
+ wait_until_target_rdy(host, msecs_to_jiffies(200));
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_CUSTOM_SIZE_EN;
+ reg |= CAST_CTRL_SPARE_EN;
+ if (raw) {
+ reg &= ~CAST_CTRL_ECC_EN;
+ } else {
+ reg |= CAST_CTRL_ECC_EN;
+ }
+ cast_writel(reg, host, CAST_CONTROL);
+
+ /* spare size */
+ cast_writel(mtd->oobsize, host, CAST_SPARE_SIZE);
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+ cast_writel((page_addr << 16), host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_PAGEPROG << SHIFT_CMD_1) |
+ (NAND_CMD_SEQIN << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_12;
+
+ if (usedma) {
+ memcpy(host->dmabuf, buf, mtd->writesize);
+ memcpy(host->dmabuf + mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+ /* spare area marker */
+ memcpy(host->dmabuf + mtd->writesize, spare_buf,
+ host->ppm + BBM_BYTES);
+ cast_dma_prepare_transfer(
+ host,
+ mtd->writesize + mtd->oobsize,
+ NF_TRANSFER_WRITE,
+ host->dmaaddr);
+ reg |= CAST_CMD_INPUT_SEL_DMA;
+ } else {
+ reg |= CAST_CMD_INPUT_SEL_SIU;
+ }
+
+ cast_writel(cast_tim_asyn_wr, host, CAST_TIMINGS_ASYN);
+ /* enable interrupt */
+ cast_writel(CAST_INT_MASK_MEMx_RDY_INT_EN(host->chip_nr),
+ host, CAST_INT_MASK);
+ cast_writel(reg, host, CAST_COMMAND);
+
+ if (!usedma) {
+ err = nand_write_buf(host, buf, mtd->writesize);
+ /* transfer spare area marker in fifo mode */
+ err |= nand_write_buf(host, spare_buf, host->ppm + BBM_BYTES);
+ err |= nand_write_buf(host, chip->oob_poi + host->ppm + BBM_BYTES, mtd->oobsize - (host->ppm + BBM_BYTES));
+ }
+ if (err)
+ goto fail;
+
+ err = cast_wait_ready(mtd, 1);
+ if (err)
+ goto fail;
+
+ /* Check dma error if using dma */
+ if (usedma) {
+ reg = cast_readl(host, CAST_DMA_CTRL);
+ if (reg & CAST_DMA_CTRL_DMA_ERR_FLAG) {
+ dev_dbg(mtd->dev.parent, "%s: DMA Transfer error\n"
+ , __func__);
+ err = -1;
+ goto fail;
+ }
+ }
+
+ cast_writel(cast_tim_asyn_rd, host, CAST_TIMINGS_ASYN);
+
+ cast_cmdfunc(mtd, NAND_CMD_STATUS, 0, 0);
+ status = cast_read_byte(mtd);
+
+ WARN_ON(!(status & NAND_STATUS_READY));
+
+ // XXX
+ if ((status & NAND_STATUS_FAIL) && (chip->errstat)) {
+ status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+fail:
+ cast_writel(cast_tim_asyn_rd, host, CAST_TIMINGS_ASYN);
+ return err;
+}
+
+static void nand_erase_cmd(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 block, page_shift, page_addr;
+ u32 reg;
+
+ block = page / host->ppb;
+ page -= block * host->ppb;
+ page_shift = fls(host->ppb - 1);
+ page_addr = (block << page_shift) | page;
+
+ /*
+ * clear eventual set of custom size bit
+ * of cast ctrl register by other functions
+ */
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_CUSTOM_SIZE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(page_addr << 16, host, CAST_ADDR0_0);
+ cast_writel(page_addr >> 16, host, CAST_ADDR0_1);
+
+ reg = (NAND_CMD_ERASE2 << SHIFT_CMD_1) |
+ (NAND_CMD_ERASE1 << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_SEQ_14;
+
+ /* enable interrupt */
+ cast_writel(CAST_INT_MASK_MEMx_RDY_INT_EN(host->chip_nr),
+ host, CAST_INT_MASK);
+
+ cast_writel(reg, host, CAST_COMMAND);
+
+ cast_wait_ready(mtd, 1);
+}
+
+/* print current feature of 'address' */
+static void print_feature(struct mtd_info *mtd, u32 address)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct cast_nand_host *host = nand_chip->priv;
+ u32 reg;
+ u8 buf[4];
+ int err = 0;
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ reg = cast_readl(host, CAST_CONTROL);
+ reg &= ~CAST_CTRL_ECC_EN;
+ reg &= ~CAST_CTRL_SPARE_EN;
+ reg |= CAST_CTRL_CUSTOM_SIZE_EN;
+ cast_writel(reg, host, CAST_CONTROL);
+
+ cast_writel(4, host, CAST_DATA_SIZE);
+ cast_writel(address, host, CAST_ADDR0_0);
+ cast_writel(0, host, CAST_ADDR0_1);
+ reg = (0xee << SHIFT_CMD_0) |
+ CAST_CMD_ADDR_SEL0 |
+ CAST_CMD_INPUT_SEL_SIU|
+ CAST_CMD_SEQ_2;
+
+ /* enable interrupt */
+ cast_writel(CAST_INT_MASK_MEMx_RDY_INT_EN(host->chip_nr),
+ host, CAST_INT_MASK);
+ cast_writel(reg, host, CAST_COMMAND);
+
+ err = cast_wait_ready(mtd, 1);
+ if (err) {
+ dev_err(mtd->dev.parent, "Get feature error\n");
+ goto finish;
+ }
+
+ nand_read_buf(host, buf, 4);
+ dev_info(mtd->dev.parent, "Features: %x %x %x %x\n",
+ buf[0], buf[1], buf[2], buf[3]);
+
+finish:
+ return;
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int __devinit cast_nand_probe(struct platform_device *pdev)
+{
+ struct cast_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ struct resource *res;
+ /*struct cast_plat_data const* const pdata = dev_get_platdata(&pdev->dev);*/
+ int err = 0;
+ int i;
+ int ecc_steps, ecc_size, ecc_bytes;
+ int spare_user_size;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct cast_nand_host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "failed to allocate device\n");
+ return -ENOMEM;
+ }
+ host->mode_curr = T_ASYN;
+
+ if (usedma) {
+ host->dmabuf = dma_alloc_coherent(&pdev->dev,
+ NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE,
+ &host->dmaaddr, GFP_KERNEL);
+ if (!host->dmabuf) {
+ dev_err(&pdev->dev, "failed to allocate dma buffer\n");
+ err = -ENOMEM;
+ goto no_dma;
+ }
+
+ BUG_ON(! (IS_ALIGNED(host->dmaaddr, sizeof(u32))));
+ dev_info(&pdev->dev, "DMA mode enabled\n");
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_info(&pdev->dev, "invalid I/O resource\n");
+ err = -ENXIO;
+ goto no_res;
+ }
+
+ host->area = request_mem_region(res->start,
+ resource_size(res),
+ dev_name(&pdev->dev));
+
+ if (host->area == NULL) {
+ err = -EBUSY;
+ goto no_res;
+ }
+
+ host->regs = ioremap(res->start, resource_size(res));
+ if (host->regs == NULL) {
+ err = -EIO;
+ goto no_ioremap;
+ }
+
+ /* Init cast_nand_host fields */
+ host->cmd_precd = -1;
+ host->data_flag = 0;
+ host->byte_idx = 0;
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = "nand0";
+ mtd->priv = &host->nand_chip;
+ nand_chip->priv = host;
+
+ nand_chip->IO_ADDR_R = nand_chip->IO_ADDR_W = 0;
+ nand_chip->cmdfunc = cast_cmdfunc;
+ nand_chip->dev_ready = cast_dev_ready;
+ nand_chip->select_chip = cast_select_chip;
+ nand_chip->read_byte = cast_read_byte;
+ nand_chip->read_buf = cast_read_buf;
+
+ if (cast_init_clock(host, pdev)) {
+ err = -EIO;
+ goto no_clk_init;
+ }
+
+ /* initialize cast with speed mode 0 and aynchronous */
+ cast_init_timings(mtd, 0, 0, 0);
+
+ /* No write protect */
+ cast_writel(0xFF00, host, CAST_MEM_CTRL);
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ init_completion(&host->complete);
+
+ host->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(host->pctl)) {
+ err = PTR_ERR(host->pctl);
+ goto no_pin;
+ }
+
+ err = request_irq(P7_NAND_IRQ, &cast_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (err) {
+ err = -ENOENT;
+ goto no_irq;
+ }
+
+ /*
+ * get the device id information for timing matching
+ */
+ nand_read_id(mtd, 0x00);
+ nand_read_buf(host, (uint8_t *)host->id, 8);
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ dev_dbg(&pdev->dev, "nand id %x %x %x %x %x %x %x %x\n",
+ host->id[0], host->id[1], host->id[2], host->id[3],
+ host->id[4], host->id[5], host->id[6], host->id[7]);
+
+ if (host->id[0] == 0x98 && (host->id[4] & 0x80)) {
+ host->benand = 1;
+ dev_info(&pdev->dev, "nand is benand\n");
+ }
+
+ /* first part of the scan to get chip information */
+ if (nand_scan_ident(mtd, FLASH_COUNT, NULL)) {
+ err = -ENXIO;
+ goto no_flash;
+ }
+
+ /* Test bus width support conflict */
+ if ((!SUPPORT_BUSWIDTH_16)&&(nand_chip->options&NAND_BUSWIDTH_16)) {
+ err = -ENXIO;
+ goto no_flash;
+ }
+
+ host->ppb = mtd->erasesize / mtd->writesize;
+
+ /* ECC Module related settings */
+ nand_chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Compared to 1024, 512 deals better with
+ * unbalanced bit-flip distribution
+ */
+ nand_chip->ecc.size = 512;
+
+ /*
+ * cast: user data size in spare area should be 8 bytes aligned,
+ * therefore at least 8 bytes need to be reserved (2 bbm + 6 ppm).
+ */
+ host->ppm = 6;
+ spare_user_size = BBM_BYTES + host->ppm;
+
+ /*
+ * For each bit of correction capability, we need
+ * 14 bits (BCH ecc) + 1 bit (programming marker)
+ */
+ ecc_size = mtd->oobsize - spare_user_size;
+ ecc_steps = mtd->writesize/nand_chip->ecc.size;
+ host->ecc_bits = ((ecc_size / ecc_steps) * 8) / (14 + 1);
+ host->ecc_bits &= ~1U;
+ nand_chip->ecc.bytes = (14 * host->ecc_bits + 7) >> 3;
+ ecc_bytes = nand_chip->ecc.bytes * ecc_steps;
+
+ /* use as many marker bits as host->ecc_bits for each ecc block */
+ host->ppm = (host->ecc_bits * ecc_steps + 7) >> 3;
+ /* programmed page marker should be at least 6 bytes */
+ if (host->ppm < 6)
+ host->ppm = 6;
+
+ spare_user_size = host->ppm + BBM_BYTES;
+ /* adjust number of marker bytes after rounding */
+ if ((spare_user_size + ecc_bytes) > mtd->oobsize)
+ spare_user_size = mtd->oobsize - ecc_bytes;
+
+ /* spare_user_size should be 8-byte aligned */
+ spare_user_size &= ~7U;
+ host->ppm = spare_user_size - BBM_BYTES;
+
+ /* contruct from template layout structure */
+ cast_ecc_oob_layout.eccbytes = ecc_bytes;
+
+ for (i = 0; i < cast_ecc_oob_layout.eccbytes; i++)
+ cast_ecc_oob_layout.eccpos[i] = spare_user_size + i;
+
+ cast_ecc_oob_layout.oobfree[0].offset =
+ spare_user_size + cast_ecc_oob_layout.eccbytes;
+ cast_ecc_oob_layout.oobfree[0].length =
+ mtd->oobsize - cast_ecc_oob_layout.oobfree[0].offset;
+ nand_chip->ecc.layout = &cast_ecc_oob_layout;
+
+ nand_chip->ecc.total = (nand_chip->ecc.layout)->eccbytes;
+
+ /*
+ * Threshold is set half of the correction capability.
+ * The cast controller often corrects more bits than required by
+ * the manufacture. It is safer to report the error earlier
+ * to avoid a sudden degradation.
+ */
+ host->ecc_threshold = (host->ecc_bits >> 1) & 0x3f;
+ if (!host->ecc_threshold)
+ host->ecc_threshold = 1;
+
+ cast_config_controller(mtd);
+
+ nand_chip->ecc.read_page = cast_read_page_ecc;
+ nand_chip->ecc.read_page_raw = cast_read_page_raw;
+ nand_chip->write_page = cast_write_page;
+ nand_chip->ecc.write_page = dummy_ecc_write_page;
+ nand_chip->ecc.write_oob = nand_write_oob_std;
+ nand_chip->erase_cmd = nand_erase_cmd;
+ nand_chip->ecc.read_oob = cast_read_oob_std;
+
+ /*
+ * CAST constroller supports ecc only when the whole page is read.
+ */
+ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ err = nand_scan_tail(mtd);
+ if (err)
+ goto no_flash;
+
+ /* Do not use default partitions anymore,
+ * only take into account mtdparts= from cmdline */
+ err = mtd_device_parse_register(mtd,
+ NULL,
+ NULL,
+ /*pdata ? pdata->parts : */NULL,
+ /*pdata ? pdata->parts_nr : */0);
+ if (err)
+ goto no_part;
+ onfi_compatible(mtd);
+ if (nand_chip->onfi_version)
+ print_feature(mtd, 0x01);
+
+ dev_dbg(&pdev->dev, "reg_ctrl: 0x%x\n", cast_readl(host, CAST_CONTROL));
+ dev_dbg(&pdev->dev, "ecc_ctrl: 0x%x\n", cast_readl(host, CAST_ECC_CTRL));
+ dev_dbg(&pdev->dev, "ecc_off: 0x%x\n", cast_readl(host, CAST_ECC_OFFSET));
+
+ platform_set_drvdata(pdev, host);
+
+ /* Register sysfs files */
+ device_create_file(&pdev->dev, &dev_attr_rom_ctrl);
+ device_create_file(&pdev->dev, &dev_attr_rom_ecc_ctrl);
+ device_create_file(&pdev->dev, &dev_attr_rom_ecc_offset);
+ device_create_file(&pdev->dev, &dev_attr_rom_gen_seq_ctrl);
+ device_create_file(&pdev->dev, &dev_attr_rom_time_seq_0);
+ device_create_file(&pdev->dev, &dev_attr_rom_time_seq_1);
+ device_create_file(&pdev->dev, &dev_attr_rom_time_asyn);
+
+ return 0;
+
+no_part:
+ nand_release(mtd);
+no_flash:
+ free_irq(P7_NAND_IRQ, host);
+no_irq:
+ pinctrl_put(host->pctl);
+no_pin:
+ clk_disable_unprepare(host->clk_ctrl);
+ clk_put(host->clk_ctrl);
+ clk_disable_unprepare(host->clk_phy);
+ clk_put(host->clk_phy);
+no_clk_init:
+ iounmap(host->regs);
+no_ioremap:
+ release_mem_region(host->area->start, resource_size(host->area));
+no_res:
+ if (host->dmabuf) {
+ dma_free_coherent(&pdev->dev,
+ NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE,
+ host->dmabuf, host->dmaaddr);
+ }
+no_dma:
+ kfree(host);
+ return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __devexit cast_nand_remove(struct platform_device *pdev)
+{
+ struct cast_nand_host *host = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
+ /* Remove sysfs files */
+ device_remove_file(&pdev->dev, &dev_attr_rom_ctrl);
+ device_remove_file(&pdev->dev, &dev_attr_rom_ecc_ctrl);
+ device_remove_file(&pdev->dev, &dev_attr_rom_ecc_offset);
+ device_remove_file(&pdev->dev, &dev_attr_rom_gen_seq_ctrl);
+ device_remove_file(&pdev->dev, &dev_attr_rom_time_seq_0);
+ device_remove_file(&pdev->dev, &dev_attr_rom_time_seq_1);
+ device_remove_file(&pdev->dev, &dev_attr_rom_time_asyn);
+
+ nand_release(&host->mtd);
+ free_irq(platform_get_irq(pdev, 0), host);
+ pinctrl_put(host->pctl);
+ iounmap(host->regs);
+ clk_disable_unprepare(host->clk_ctrl);
+ clk_put(host->clk_ctrl);
+ clk_disable_unprepare(host->clk_phy);
+ clk_put(host->clk_phy);
+ release_mem_region(host->area->start, resource_size(host->area));
+ if (host->dmabuf) {
+ dma_free_coherent(&pdev->dev,
+ NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE,
+ host->dmabuf, host->dmaaddr);
+ }
+ kfree(host);
+ return 0;
+}
+
+static int cast_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct cast_nand_host *host = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "suspend\n");
+ clk_disable_unprepare(host->clk_ctrl);
+ clk_put(host->clk_ctrl);
+ clk_disable_unprepare(host->clk_phy);
+ clk_put(host->clk_phy);
+
+ return 0;
+}
+
+static int cast_resume(struct platform_device *pdev)
+{
+ struct cast_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+ int nand_id;
+
+ dev_info(&pdev->dev, "resume\n");
+
+ cast_init_clock(host, pdev);
+
+ /* initialize cast with speed mode 0 and aynchronous */
+ cast_init_timings(mtd, 0, 0, 0);
+
+ /* No write protect */
+ cast_writel(0xFF00, host, CAST_MEM_CTRL);
+
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ nand_reset(mtd);
+ nand_read_id(mtd, 0x00);
+ nand_read_buf(host, (uint8_t *)&nand_id, 4);
+ /* Reset fifo */
+ cast_writel(CAST_FIFO_INIT_FLASH, host, CAST_FIFO_INIT);
+
+ dev_dbg(&pdev->dev, "nand_id: 0x%x\n", nand_id);
+
+ cast_config_controller(mtd);
+
+ onfi_compatible(mtd);
+
+ dev_dbg(&pdev->dev, "reg_ctrl: 0x%x\n", cast_readl(host, CAST_CONTROL));
+ dev_dbg(&pdev->dev, "ecc_ctrl: 0x%x\n", cast_readl(host, CAST_ECC_CTRL));
+ dev_dbg(&pdev->dev, "ecc_off: 0x%x\n", cast_readl(host, CAST_ECC_OFFSET));
+
+ return 0;
+}
+
+static struct platform_driver cast_nand_driver = {
+ .driver = {
+ .name = CAST_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = cast_nand_probe,
+ .suspend = cast_suspend,
+ .resume = cast_resume,
+ .remove = __devexit_p(cast_nand_remove)
+};
+
+module_platform_driver(cast_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Guangye Tian guangye.tian@parrot.com");
+MODULE_DESCRIPTION("NAND driver for CAST Controller");
+MODULE_ALIAS("platform:cast_nand");
diff --git a/drivers/parrot/nand/cast-nand.h b/drivers/parrot/nand/cast-nand.h
new file mode 100644
index 00000000000000..629c15ef06cd3b
--- /dev/null
+++ b/drivers/parrot/nand/cast-nand.h
@@ -0,0 +1,36 @@
+/**
+ * drivers/parrot/nand/cast_nand.h - Evatronix NANDFLASH-CTRL IP definitions &
+ * NAND FLASH definitions
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: guangye.tian@parrot.com
+ * date: 2010-09-22
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#ifndef __CAST_NAND_H
+#define __CAST_NAND_H
+
+#if defined(CONFIG_MTD_NAND_CAST) || \
+ defined(CONFIG_MTD_NAND_CAST_MODULE)
+
+#define CAST_DRV_NAME "cast-nand"
+
+struct mtd_partition;
+
+struct cast_plat_data {
+ struct mtd_partition const* const parts;
+ size_t parts_nr;
+ unsigned int chip_rev;
+};
+
+#define FLASH_COUNT 1 /* Nb of Nand Flash Devices */
+#define SUPPORT_BUSWIDTH_16 0
+
+#endif /* defined(CONFIG_MTD_NAND_CAST) || \
+ defined(CONFIG_MTD_NAND_CAST_MODULE) */
+
+#endif /* __CAST_NAND_H */
diff --git a/drivers/parrot/nand/cast-regs.h b/drivers/parrot/nand/cast-regs.h
new file mode 100644
index 00000000000000..21e40995f1d36c
--- /dev/null
+++ b/drivers/parrot/nand/cast-regs.h
@@ -0,0 +1,219 @@
+/**
+ * CAST Nand Controller Driver
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: G.Tian <guangye.tian@parrot.com>
+ * date: 2012-10-24
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#ifndef __CAST_REGS_H
+#define __CAST_REGS_H
+
+/* Special Function Registers of NANDFLASH-CTRL IP */
+#define CAST_COMMAND 0x00 /* Controller commands register */
+#define CAST_CONTROL 0x04 /* The main configurations register */
+#define CAST_STATUS 0x08 /* The controller status register */
+#define CAST_INT_MASK 0x0C /* Interrupts mask register */
+#define CAST_INT_STATUS 0x10 /* Interrupts status register */
+#define CAST_ECC_CTRL 0x14 /* ECC module status register */
+#define CAST_ECC_OFFSET 0x18 /* ECC offset in the spare area */
+#define CAST_ADDR0_1 0x24 /* The most significant part
+ * of address register 0
+ */
+#define CAST_ADDR0_0 0x1C /*
+ * The least significant part
+ * of address register 0
+ */
+#define CAST_ADDR1_1 0x28
+#define CAST_ADDR1_0 0x20
+#define CAST_SPARE_SIZE 0x30 /*
+ * Actual value of the user
+ * data size in spare area
+ */
+#define CAST_PROTECT 0x38 /* write/erase protect */
+#define CAST_LOOKUP_EN 0x40 /*
+ * Enables Look-up registers during
+ * NAND Flash memory addressing
+ */
+#define CAST_LOOKUP0 0x44 /* LUTs are used for address remapping
+ * Essential for Bad-Block management
+ */
+#define CAST_LOOKUP1 0x48
+#define CAST_LOOKUP2 0x4C
+#define CAST_LOOKUP3 0x50
+#define CAST_LOOKUP4 0x54
+#define CAST_LOOKUP5 0x58
+#define CAST_LOOKUP6 0x5C
+#define CAST_LOOKUP7 0x60
+#define CAST_DMA_ADDR 0x64 /* DMA module base address */
+#define CAST_DMA_CNT 0x68 /* DMA module counter initial value */
+#define CAST_DMA_CTRL 0x6C /* DMA module control register */
+#define CAST_MEM_CTRL 0x80 /* Memory device control register */
+#define CAST_DATA_SIZE 0x84 /* Custom data size value */
+#define CAST_READ_STATUS 0x88 /* READ_STATUS cmd output value */
+#define CAST_TIME_SEQ_0 0x8C /* Timing configuration */
+#define CAST_TIME_SEQ_1 0xBC
+#define CAST_TIMINGS_ASYN 0x90
+#define CAST_TIMINGS_SYN 0x94
+#define CAST_FIFO_DATA 0x98 /* FIFO module interface */
+#define CAST_DMA_ADDR_OFFSET 0xA0 /* DMA module address offset */
+#define CAST_FIFO_INIT 0xB0 /* FIFO module control register */
+#define CAST_GENERIC_SEQ_CTRL 0xB4 /* generic sequence register */
+#define CAST_FIFO_STATE 0xB8 /* FIFO module status */
+#define CAST_MLUN 0xC0 /* MLUN register */
+
+/* Control register bit coding */
+/* Address cycle_0, CTRL_REG[2:0] */
+#define CAST_CTRL_ADDR_CYCLE0(_n) (_n)
+#define CAST_CTRL_ADDR_CYCLE0_MASK 7
+/* Address cycle_1, CTRL_REG[20:18] */
+#define CAST_CTRL_ADDR_CYCLE1(_n) ((_n) << 18)
+#define CAST_CTRL_ADDR_CYCLE1_MASK (7 << 18)
+#define CAST_CTRL_SPARE_EN (1 << 3)
+#define CAST_CTRL_INT_EN (1 << 4)
+#define CAST_CTRL_ECC_EN (1 << 5)
+#define CAST_CTRL_BLOCK_32 (0 << 6) /* Pages/Block, CTRL_REG[7:6] */
+#define CAST_CTRL_BLOCK_64 (1 << 6)
+#define CAST_CTRL_BLOCK_128 (2 << 6)
+#define CAST_CTRL_BLOCK_256 (3 << 6)
+#define CAST_CTRL_PAGE_256 (0 << 8) /* Bytes/Page, CTRL_REG[10:8] */
+#define CAST_CTRL_PAGE_512 (1 << 8)
+#define CAST_CTRL_PAGE_1K (2 << 8)
+#define CAST_CTRL_PAGE_2K (3 << 8)
+#define CAST_CTRL_PAGE_4K (4 << 8)
+#define CAST_CTRL_PAGE_8K (5 << 8)
+#define CAST_CTRL_PAGE_16K (6 << 8)
+#define CAST_CTRL_PAGE_0 (7 << 8)
+#define CAST_CTRL_CUSTOM_SIZE_EN (1 << 11)
+#define CAST_CTRL_IO_WIDTH_8 (0 << 12)
+#define CAST_CTRL_IO_WIDTH_16 (1 << 12)
+#define CAST_CTRL_SYNC_MODE (1 << 15)
+
+/* Command register bit coding */
+#define SHIFT_CMD_2 24
+#define SHIFT_CMD_1 16
+#define SHIFT_CMD_0 8
+#define CAST_CMD_ADDR_SEL0 (0 << 7) /* ADDR0 */
+#define CAST_CMD_ADDR_SEL1 (1 << 7) /* ADDR1 */
+#define CAST_CMD_INPUT_SEL_SIU (0 << 6)
+#define CAST_CMD_INPUT_SEL_DMA (1 << 6)
+#define CAST_CMD_SEQ_0 0b000000
+#define CAST_CMD_SEQ_1 0b100001
+#define CAST_CMD_SEQ_2 0b100010
+#define CAST_CMD_SEQ_3 0b000011
+#define CAST_CMD_SEQ_4 0b100100
+#define CAST_CMD_SEQ_5 0b100101
+#define CAST_CMD_SEQ_6 0b100110
+#define CAST_CMD_SEQ_7 0b100111
+#define CAST_CMD_SEQ_8 0b001000
+#define CAST_CMD_SEQ_9 0b101001
+#define CAST_CMD_SEQ_10 0b101010
+#define CAST_CMD_SEQ_11 0b101011
+#define CAST_CMD_SEQ_12 0b001100
+#define CAST_CMD_SEQ_13 0b001101
+#define CAST_CMD_SEQ_14 0b001110
+#define CAST_CMD_SEQ_15 0b101111
+#define CAST_CMD_SEQ_16 0b110000
+#define CAST_CMD_SEQ_17 0b010101
+#define CAST_CMD_SEQ_18 0b110010
+#define CAST_CMD_SEQ_19 0b010011
+
+/* ECC_CTRL register bit coding */
+/* BCH32 ECC */
+#define CAST_ECC_CTRL_ERR_THRLD (8) /* ecc error threshold ECC_CTRL[13:8] */
+#define CAST_ECC_CTRL_CAP_2 (0 << 4) /* ECC capacity ECC_CTRL[7:4] */
+#define CAST_ECC_CTRL_CAP_4 (1 << 4)
+#define CAST_ECC_CTRL_CAP_6 (2 << 4)
+#define CAST_ECC_CTRL_CAP_8 (3 << 4)
+#define CAST_ECC_CTRL_CAP_10 (4 << 4)
+#define CAST_ECC_CTRL_CAP_12 (5 << 4)
+#define CAST_ECC_CTRL_CAP_14 (6 << 4)
+#define CAST_ECC_CTRL_CAP_16 (7 << 4)
+#define CAST_ECC_CTRL_CAP_18 (8 << 4)
+#define CAST_ECC_CTRL_CAP_20 (9 << 4)
+#define CAST_ECC_CTRL_CAP_22 (10 << 4)
+#define CAST_ECC_CTRL_CAP_24 (11 << 4)
+#define CAST_ECC_CTRL_CAP_26 (12 << 4)
+#define CAST_ECC_CTRL_CAP_28 (13 << 4)
+#define CAST_ECC_CTRL_CAP_30 (14 << 4)
+#define CAST_ECC_CTRL_CAP_32 (15 << 4)
+#define CAST_ECC_CTRL_CAP(_n) ((((_n)>>1)-1)<<4)
+
+#define CAST_ECC_CTRL_BLOCK_256 (0 << 14)
+#define CAST_ECC_CTRL_BLOCK_512 (1 << 14)
+#define CAST_ECC_CTRL_BLOCK_1K (2 << 14)
+
+/* Set when err_bits>ERR_THRESHOLD */
+#define CAST_ECC_CTRL_ERR_OVER (1 << 2)
+/* Set when uncorrectable err occurs */
+#define CAST_ECC_CTRL_ERR_UNCORRECT (1 << 1)
+/* Set when correctable err occurs */
+#define CAST_ECC_CTRL_ERR_CORRECT (1 << 0)
+
+/* MEM_CTRL Register bit coding */
+/* MEM_CE[2:0], selected mem */
+#define CAST_MEM_CTRL_CE_MASK 7
+/* device(_n) WP line state */
+#define CAST_MEM_CTRL_WP(_n) (8 << (_n))
+
+/* STATUS Register bit coding */
+/* Controller 0: ready; 1: busy */
+#define CAST_STAT_CTRL_STAT (1 << 8)
+/* Device 0: busy; 1: ready */
+#define CAST_STAT_MEMX_ST(_n) (1 << (_n))
+
+/* READ STATUS Register bit coding */
+#define CAST_READ_STAT_MASK 0xFF /* Status bit in last 8 bits */
+
+/* FIFO INIT Register bit coding */
+/* Set this bit flashes the fifo */
+#define CAST_FIFO_INIT_FLASH (1 << 0)
+
+/* FIFO STATE Register bit coding */
+#define CAST_FIFO_STATE_STAT (1 << 0)
+
+/* Interrupt registers */
+#define CAST_INT_MASK_DMA_READY_EN (1 << 13)
+#define CAST_INT_MASK_FIFO_ERROR_EN (1 << 12)
+#define CAST_INT_MASK_MEMx_RDY_INT_EN(_n) (16 << (_n))
+#define CAST_INT_MASK_ECC_TRSH_ERR_EN (1 << 3)
+#define CAST_INT_MASK_ECC_FATAL_ERR_EN (1 << 2)
+#define CAST_INT_MASK_CMD_END_INT_EN (1 << 1)
+#define CAST_INT_MASK_PROT_INT_EN (1 << 0)
+
+#define CAST_INT_STATUS_DMA_READY_FL (1 << 13)
+#define CAST_INT_STATUS_FIFO_ERROR_FL (1 << 12)
+#define CAST_INT_STATUS_MEMx_RDY_INT_FL(_n) (16 << (_n))
+#define CAST_INT_STATUS_ECC_TRSH_ERR_FL (1 << 3)
+#define CAST_INT_STATUS_ECC_FATAL_ERR_FL (1 << 2)
+#define CAST_INT_STATUS_CMD_END_INT_FL (1 << 1)
+#define CAST_INT_STATUS_PROT_INT_FL (1 << 0)
+
+/* DMA CTRL register mapping */
+#define CAST_DMA_CTRL_DMA_START (1 << 7)
+#define CAST_DMA_CTRL_DMA_DIR_READ (1 << 6) /* CNTL's view */
+#define CAST_DMA_CTRL_DMA_DIR_WRITE (0 << 6) /* CNTL's view */
+#define CAST_DMA_CTRL_DMA_MODE_SFR (0 << 5)
+#define CAST_DMA_CTRL_DMA_MODE_S_G (1 << 5)
+#define CAST_DMA_CTRL_DMA_BURST_MASK (7 << 2)
+#define CAST_DMA_CTRL_DMA_ERR_FLAG (1 << 1)
+#define CAST_DMA_CTRL_DMA_READY (1 << 0)
+
+/* incremental burst (burst length is four) */
+#define DMA_BURST_INC_BURST_LENGTH_4 (0 << 2)
+/* Stream burst (address const) */
+#define DMA_BURST_STREAM_BURST (1 << 2)
+/* single transfer (address increment) */
+#define DMA_BURST_SINGLE_TRANSFER (2 << 2)
+/* burst of unspecified length (address increment) */
+#define DMA_BURST_UNSPEC_LENGTH (3 << 2)
+/* incremental burst (burst length eight) */
+#define DMA_BURST_INC_BURST_LENGTH_8 (4 << 2)
+/* incremental burst (burst length sixteen) */
+#define DMA_BURST_INC_BURST_LENGTH_16 (5 << 2)
+
+#endif /* __CAST_REGS_H */
diff --git a/drivers/parrot/nand/cast-timings.c b/drivers/parrot/nand/cast-timings.c
new file mode 100644
index 00000000000000..99efbe3dd6d6c0
--- /dev/null
+++ b/drivers/parrot/nand/cast-timings.c
@@ -0,0 +1,105 @@
+/*
+ * @file cast_timings.c
+ * @brief CAST timing parameters for mode[0 - 5]
+ *
+ * @Author G.Tian (guangye.tian@parrot.com)
+ * @Date 2011-11-18
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ */
+
+#include <linux/types.h>
+#include "cast-timings.h"
+
+const struct cast_timings cast_modes[2][6] = {
+ /* asynchronous data interface */
+ [T_ASYN] = {
+ /* mode 0 */
+ [0] = {
+ .twhr = 120, .trhw = 200, .tadl = 200,
+ .tccs = 500, .trr = 40, .twb = 200,
+ .twp = 50, .twh = 30, .twc = 100, .tcad = 45,
+ .trp = 53, .treh = 30, .trc = 100, .pll = 0,
+ },
+ /* mode 1 */
+ [1] = {
+ .twhr = 80, .trhw = 100, .tadl = 100,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 25, .twh = 15, .twc = 45, .tcad = 45,
+ .trp = 43, .treh = 15, .trc = 58, .pll = 0,
+ },
+ /* mode 2 */
+ [2] = {
+ .twhr = 80, .trhw = 100, .tadl = 100,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 17, .twh = 15, .twc = 35, .tcad = 45,
+ .trp = 38, .treh = 15, .trc = 53, .pll = 0,
+ },
+ /* mode 3 */
+ [3] = {
+ .twhr = 60, .trhw = 100, .tadl = 100,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 15, .twh = 10, .twc = 30, .tcad = 45,
+ .trp = 33, .treh = 10, .trc = 43, .pll = 0,
+ },
+ /* mode 4 (edo is not supported by CAST) */
+ [4] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 12, .twh = 10, .twc = 25, .tcad = 45,
+ .trp = 33, .treh = 10, .trc = 43, .pll = 0,
+ },
+ /* mode 5 (edo is not supported by CAST) */
+ [5] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 10, .twh = 7, .twc = 20, .tcad = 45,
+ .trp = 29, .treh = 7, .trc = 36, .pll = 0,
+ },
+ },
+ /* synchronous data interface */
+ [T_SYN] = {
+ /* mode 0 */
+ [0] = {
+ .twhr = 80, .trhw = 100, .tadl = 100,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 50, .twh = 30, .twc = 100, .tcad = 45,
+ .trp = 53, .treh = 30, .trc = 100, .pll = (20000000*4),
+ },
+ /* mode 1 */
+ [1] = {
+ .twhr = 60, .trhw = 100, .tadl = 100,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 25, .twh = 15, .twc = 45, .tcad = 45,
+ .trp = 43, .treh = 15, .trc = 58, .pll = (33000000*4),
+ },
+ /* mode 2 */
+ [2] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 17, .twh = 15, .twc = 35, .tcad = 45,
+ .trp = 38, .treh = 15, .trc = 53, .pll = (50000000*4),
+ },
+ /* mode 3 */
+ [3] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 15, .twh = 10, .twc = 30, .tcad = 45,
+ .trp = 33, .treh = 10, .trc = 43, .pll = (66000000*4),
+ },
+ /* mode 4 */
+ [4] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 12, .twh = 10, .twc = 25, .tcad = 45,
+ .trp = 33, .treh = 10, .trc = 43, .pll = (83000000*4),
+ },
+ /* mode 5 */
+ [5] = {
+ .twhr = 60, .trhw = 100, .tadl = 70,
+ .tccs = 500, .trr = 20, .twb = 100,
+ .twp = 10, .twh = 7, .twc = 20, .tcad = 45,
+ .trp = 29, .treh = 7, .trc = 36, .pll = (100000000*4),
+ },
+ },
+};
diff --git a/drivers/parrot/nand/cast-timings.h b/drivers/parrot/nand/cast-timings.h
new file mode 100644
index 00000000000000..8bf6f41ca0fe88
--- /dev/null
+++ b/drivers/parrot/nand/cast-timings.h
@@ -0,0 +1,67 @@
+/**
+ * CAST Nand Controller Driver
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: G.Tian <guangye.tian@parrot.com>
+ * date: 2012-10-24
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#ifndef __CAST_TIMINGS_H
+#define __CAST_TIMINGS_H
+
+/* Timing parameters corresponding to cast registers (ns) */
+struct cast_timings {
+ /*
+ * (ASYN) WE# high to RE# low time
+ * (SYN) Cmd to data output time
+ */
+ u16 twhr;
+ /*
+ * (ASYN) RE# high to WE# low time
+ * (SYN) Data output to command, addr, data input time
+ */
+ u16 trhw;
+ /*
+ * (ASYN) ALE to data start time
+ * (SYN) ALE to data loading time
+ */
+ u16 tadl;
+ /*
+ * Change column setup
+ * To be determined with the parameter page for non-zero timing mode
+ */
+ u16 tccs;
+ /* Read high to read low */
+ u16 trr;
+ /* Busy time for interface change (asyn<->syn)*/
+ u16 twb;
+ /* (ASYN) */
+ u16 twp;
+ u16 twh;
+ u16 twc;
+ /* (SYN) Command, address and data delay */
+ u16 tcad;
+ /* (ASYN) */
+ u16 trp;
+ u16 treh;
+ u16 trc;
+
+ /* predefined pll value */
+ u32 pll;
+};
+
+#define T_SYN 0
+#define T_ASYN 1
+
+extern const struct cast_timings cast_modes[2][6];
+
+struct onfi_par_page {
+ int syn_mode;
+ int asyn_mode;
+};
+
+#endif /* __CAST_TIMINGS_H */
diff --git a/drivers/parrot/net/Kconfig b/drivers/parrot/net/Kconfig
new file mode 100644
index 00000000000000..a0ce33e1abfde1
--- /dev/null
+++ b/drivers/parrot/net/Kconfig
@@ -0,0 +1,10 @@
+config KSZ8851SNL
+ tristate "KSZ8851SNL Ethernet support"
+ depends on ARM && SPI && ETHERNET
+ help
+ If you have a KSZ8851SNL Ethernet chip on your board say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ksz8851snl.
+
diff --git a/drivers/parrot/net/Makefile b/drivers/parrot/net/Makefile
new file mode 100644
index 00000000000000..8653f73cde36ad
--- /dev/null
+++ b/drivers/parrot/net/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_KSZ8851SNL) += ksz8851snl.o
diff --git a/drivers/parrot/net/ksz8851_hw.h b/drivers/parrot/net/ksz8851_hw.h
new file mode 100644
index 00000000000000..63197663295c61
--- /dev/null
+++ b/drivers/parrot/net/ksz8851_hw.h
@@ -0,0 +1,487 @@
+/*
+ * ks8851Reg.h
+ *
+ * Micrel KS8851 Registers definitions.
+ *
+ * Copyright (C) 2007-2008 Micrel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Support and updates available at
+ * ftp://www.micrel.com/ethernet/ks8851
+ ------------------------------------------------------------------------------
+
+ Author Date Version Description
+ PCD 07/26/07 Register definition base on KS8851 datasheet.
+ FB 01/10/08 Imported from micrel src with some small modifications
+ ------------------------------------------------------------------------------
+ */
+#ifndef __KS8851REG_h
+#define __KS8851REG_h
+
+
+/*
+ * MAC Registers
+ * (Offset 0x00 - 0x25)
+ */
+#define REG_BUS_ERROR_STATUS 0x06 /* BESR */
+#define BUS_ERROR_IBEC 0x8000
+#define BUS_ERROR_IBECV_MASK 0x7800 /* Default IPSec clock at 166Mhz */
+
+#define REG_CHIP_CFG_STATUS 0x08 /* CCFG */
+#define LITTLE_ENDIAN_BUS_MODE 0x0400 /* Bus in little endian mode */
+#define EEPROM_PRESENCE 0x0200 /* External EEPROM is used */
+#define SPI_BUS_MODE 0x0100 /* In SPI bus mode */
+#define DATA_BUS_8BIT 0x0080 /* In 8-bit bus mode operation */
+#define DATA_BUS_16BIT 0x0040 /* In 16-bit bus mode operation */
+#define DATA_BUS_32BIT 0x0020 /* In 32-bit bus mode operation */
+#define MULTIPLEX_MODE 0x0010 /* Data and address bus are shared */
+#define CHIP_PACKAGE_128PIN 0x0008 /* 128-pin package */
+#define CHIP_PACKAGE_80PIN 0x0004 /* 80-pin package */
+#define CHIP_PACKAGE_48PIN 0x0002 /* 48-pin package */
+#define CHIP_PACKAGE_32PIN 0x0001 /* 32-pin package for SPI host interface only */
+
+#define REG_MAC_ADDR_0 0x10 /* MARL */
+#define REG_MAC_ADDR_1 0x11 /* MARL */
+#define REG_MAC_ADDR_2 0x12 /* MARM */
+#define REG_MAC_ADDR_3 0x13 /* MARM */
+#define REG_MAC_ADDR_4 0x14 /* MARH */
+#define REG_MAC_ADDR_5 0x15 /* MARH */
+
+#define REG_BUS_CLOCK_CTRL 0x20 /* OBCR */
+//David Choi
+#define BUS_CLOCK_16MA 0x0040 /* 16mA output */
+
+#define BUS_CLOCK_DIVIDEDBY_3 0x0002 /* Bus clock devided by 3 */
+#define BUS_CLOCK_DIVIDEDBY_2 0x0001 /* Bus clock devided by 2 */
+#define BUS_CLOCK_DIVIDEDBY_1 0x0000 /* Bus clock devided by 1 */
+#define BUS_CLOCK_DIVIDED_MASK 0x0003 /* Bus clock devider mask */
+
+#define BUS_SPEED_125_MHZ 0x0000 /* Set bus speed to 125 MHz */
+#define BUS_SPEED_62_5_MHZ 0x0001 /* Set bus speed to 62.5 MHz (125/2) */
+#define BUS_SPEED_41_7_MHZ 0x0002 /* Set bus speed to 41.67 MHz (125/3) */
+
+
+#define REG_EEPROM_CTRL 0x22 /* EEPCR */
+#define EEPROM_ACCESS_ENABLE 0x0010 /* Enable software to access EEPROM through bit 3 to bit 0 */
+#define EEPROM_DATA_IN 0x0008 /* Data receive from EEPROM (EEDI pin) */
+#define EEPROM_DATA_OUT 0x0004 /* Data transmit to EEPROM (EEDO pin) */
+#define EEPROM_SERIAL_CLOCK 0x0002 /* Serial clock (EESK pin) */
+#define EEPROM_CHIP_SELECT 0x0001 /* EEPROM chip select (EECS pin) */
+
+#define REG_MEM_BIST_INFO 0x24 /* MBIR */
+#define TX_MEM_TEST_FINISHED 0x1000 /* TX memeory BIST test finish */
+#define TX_MEM_TEST_FAILED 0x0800 /* TX memory BIST test fail */
+#define TX_MEM_TEST_FAILED_COUNT 0x0700 /* TX memory BIST test fail count */
+#define RX_MEM_TEST_FINISHED 0x0010 /* RX memory BIST test finish */
+#define RX_MEM_TEST_FAILED 0x0008 /* RX memory BIST test fail */
+#define RX_MEM_TEST_FAILED_COUNT 0x0003 /* RX memory BIST test fail count */
+
+#define REG_RESET_CTRL 0x26 /* GRR */
+#define QMU_SOFTWARE_RESET 0x0002 /* QMU soft reset (clear TxQ, RxQ) */
+#define GLOBAL_SOFTWARE_RESET 0x0001 /* Global soft reset (PHY, MAC, QMU) */
+
+
+/*
+ * Wake On Lan Control Registers
+ * (Offset 0x2A - 0x6B)
+ */
+#define REG_WOL_CTRL_ETH 0x2A /* WFCR */
+#define WOL_MAGIC_ENABLE_ETH 0x0080 /* Enable the magic packet pattern detection */
+#define WOL_FRAME3_ENABLE_ETH 0x0008 /* Enable the wake up frame 3 pattern detection */
+#define WOL_FRAME2_ENABLE_ETH 0x0004 /* Enable the wake up frame 2 pattern detection */
+#define WOL_FRAME1_ENABLE_ETH 0x0002 /* Enable the wake up frame 1 pattern detection */
+#define WOL_FRAME0_ENABLE_ETH 0x0001 /* Enable the wake up frame 0 pattern detection */
+
+#define REG_WOL_FRAME0_CRC0 0x30 /* WF0CRC0 */
+#define REG_WOL_FRAME0_CRC1 0x32 /* WF0CRC1 */
+#define REG_WOL_FRAME0_BYTE_MASK0 0x34 /* WF0BM0 */
+#define REG_WOL_FRAME0_BYTE_MASK1 0x36 /* WF0BM1 */
+#define REG_WOL_FRAME0_BYTE_MASK2 0x38 /* WF0BM2 */
+#define REG_WOL_FRAME0_BYTE_MASK3 0x3A /* WF0BM3 */
+
+#define REG_WOL_FRAME1_CRC0 0x40 /* WF1CRC0 */
+#define REG_WOL_FRAME1_CRC1 0x42 /* WF1CRC1 */
+#define REG_WOL_FRAME1_BYTE_MASK0 0x44 /* WF1BM0 */
+#define REG_WOL_FRAME1_BYTE_MASK1 0x46 /* WF1BM1 */
+#define REG_WOL_FRAME1_BYTE_MASK2 0x48 /* WF1BM2 */
+#define REG_WOL_FRAME1_BYTE_MASK3 0x4A /* WF1BM3 */
+
+#define REG_WOL_FRAME2_CRC0 0x50 /* WF2CRC0 */
+#define REG_WOL_FRAME2_CRC1 0x52 /* WF2CRC1 */
+#define REG_WOL_FRAME2_BYTE_MASK0 0x54 /* WF2BM0 */
+#define REG_WOL_FRAME2_BYTE_MASK1 0x56 /* WF2BM1 */
+#define REG_WOL_FRAME2_BYTE_MASK2 0x58 /* WF2BM2 */
+#define REG_WOL_FRAME2_BYTE_MASK3 0x5A /* WF2BM3 */
+
+#define REG_WOL_FRAME3_CRC0 0x60 /* WF3CRC0 */
+#define REG_WOL_FRAME3_CRC1 0x62 /* WF3CRC1 */
+#define REG_WOL_FRAME3_BYTE_MASK0 0x64 /* WF3BM0 */
+#define REG_WOL_FRAME3_BYTE_MASK1 0x66 /* WF3BM1 */
+#define REG_WOL_FRAME3_BYTE_MASK2 0x68 /* WF3BM2 */
+#define REG_WOL_FRAME3_BYTE_MASK3 0x6A /* WF3BM3 */
+
+
+/*
+ * Transmit/Receive Control Registers
+ * (Offset 0x70 - 0x9F)
+ */
+
+/* Transmit Frame Header */
+#define REG_QDR_DUMMY 0x00 /* Dummy address to access QMU RxQ, TxQ */
+#define TX_CTRL_INTERRUPT_ON 0x8000 /* Transmit Interrupt on Completion */
+
+#define REG_TX_CTRL 0x70 /* TXCR */
+#define TX_CTRL_ICMP_CHECKSUM 0x0100 /* Enable ICMP frame checksum generation */
+#define TX_CTRL_UDP_CHECKSUM 0x0080 /* Enable UDP frame checksum generation */
+#define TX_CTRL_TCP_CHECKSUM 0x0040 /* Enable TCP frame checksum generation */
+#define TX_CTRL_IP_CHECKSUM 0x0020 /* Enable IP frame checksum generation */
+#define TX_CTRL_FLUSH_QUEUE 0x0010 /* Clear transmit queue, reset tx frame pointer */
+#define TX_CTRL_FLOW_ENABLE 0x0008 /* Enable transmit flow control */
+#define TX_CTRL_PAD_ENABLE 0x0004 /* Enable adding a padding to a packet shorter than 64 bytes */
+#define TX_CTRL_CRC_ENABLE 0x0002 /* Enable adding a CRC to the end of transmit frame */
+#define TX_CTRL_ENABLE 0x0001 /* Enable tranmsit */
+
+#define REG_TX_STATUS 0x72 /* TXSR */
+#define TX_STAT_LATE_COL 0x2000 /* Trnasmit late collision occurs */
+#define TX_STAT_MAX_COL 0x1000 /* Tranmsit maximum collision is reached */
+#define TX_FRAME_ID_MASK 0x003F /* Transmit frame ID mask */
+#define TX_STAT_ERRORS ( TX_STAT_MAX_COL | TX_STAT_LATE_COL )
+
+#define REG_RX_CTRL1 0x74 /* RXCR1 */
+#define RX_CTRL_FLUSH_QUEUE 0x8000 /* Clear receive queue, reset rx frame pointer */
+#define RX_CTRL_UDP_CHECKSUM 0x4000 /* Enable UDP frame checksum verification */
+#define RX_CTRL_TCP_CHECKSUM 0x2000 /* Enable TCP frame checksum verification */
+#define RX_CTRL_IP_CHECKSUM 0x1000 /* Enable IP frame checksum verification */
+#define RX_CTRL_MAC_FILTER 0x0800 /* Receive with address that pass MAC address filtering */
+#define RX_CTRL_FLOW_ENABLE 0x0400 /* Enable receive flow control */
+#define RX_CTRL_BAD_PACKET 0x0200 /* Enable receive CRC error frames */
+#define RX_CTRL_HASH_FILTER 0x0100 /* Receive multicast frames that pass the CRC hash filtering */
+#define RX_CTRL_BROADCAST 0x0080 /* Receive all the broadcast frames */
+#define RX_CTRL_ALL_MULTICAST 0x0040 /* Receive all the multicast frames (including broadcast frames) */
+#define RX_CTRL_UNICAST 0x0020 /* Receive unicast frames that match the device MAC address */
+#define RX_CTRL_PROMISCUOUS 0x0010 /* Receive all incoming frames, regardless of frame's DA */
+#define RX_CTRL_INVERSE_FILTER 0x0002 /* Receive with address check in inverse filtering mode */
+#define RX_CTRL_ENABLE 0x0001 /* Enable receive */
+
+/* #5 hash perfect (from micrel datasheet)
+ *
+ * All Rx frames are passed with Physical address (DA) matching the MAC address
+ * and with Multicast address matching the MAC address hash table
+ */
+#define RX_CTRL_MODE_HASH (RX_CTRL_UDP_CHECKSUM | RX_CTRL_TCP_CHECKSUM | \
+ RX_CTRL_IP_CHECKSUM | RX_CTRL_FLOW_ENABLE | \
+ RX_CTRL_ENABLE | RX_CTRL_UNICAST | \
+ RX_CTRL_ALL_MULTICAST |RX_CTRL_BROADCAST | \
+ RX_CTRL_PROMISCUOUS |RX_CTRL_INVERSE_FILTER)
+
+#define RX_CTRL_MODE_NORMAL (RX_CTRL_MODE_HASH & ~RX_CTRL_ALL_MULTICAST)
+
+
+/* #7 promiscous (from micrel datasheet)
+ *
+ * All Rx frames are passed without any condition
+ */
+#define RX_CTRL_MODE_PROMISC (RX_CTRL_UDP_CHECKSUM | RX_CTRL_TCP_CHECKSUM | \
+ RX_CTRL_IP_CHECKSUM | RX_CTRL_FLOW_ENABLE | \
+ RX_CTRL_ENABLE | RX_CTRL_UNICAST | \
+ RX_CTRL_ALL_MULTICAST |RX_CTRL_BROADCAST | \
+ RX_CTRL_PROMISCUOUS |RX_CTRL_INVERSE_FILTER)
+
+
+/* #9 perfect with multicast address passed (from micrel datasheet)
+ *
+ * All Rx frames are passed with Physical Address (DA) matching the MAC address
+ * and with Multicast address without any condition
+ */
+#define RX_CTRL_MODE_PMAP (RX_CTRL_UDP_CHECKSUM | RX_CTRL_TCP_CHECKSUM | \
+ RX_CTRL_IP_CHECKSUM | RX_CTRL_FLOW_ENABLE | \
+ RX_CTRL_ENABLE | RX_CTRL_UNICAST | \
+ RX_CTRL_ALL_MULTICAST |RX_CTRL_BROADCAST | \
+ RX_CTRL_HASH_FILTER | RX_CTRL_MAC_FILTER | \
+ RX_CTRL_PROMISCUOUS)
+
+#define REG_RX_CTRL2 0x76 /* RXCR2 */
+#define RX_CTRL_IPV6_UDP_FRAG_PASS 0x0010 /* No checksum generation and verification if IPv6 UDP is fragment */
+#define RX_CTRL_IPV6_UDP_ZERO_PASS 0x0008 /* Receive pass IPv6 UDP frame with UDP checksum is zero */
+#define RX_CTRL_UDP_LITE_CHECKSUM 0x0004 /* Enable UDP Lite frame checksum generation and verification */
+#define RX_CTRL_ICMP_CHECKSUM 0x0002 /* Enable ICMP frame checksum verification */
+#define RX_CTRL_BLOCK_MAC 0x0001 /* Receive drop frame if the SA is same as device MAC address */
+#define RX_CTRL_SINGLE_FRAME_BURST (0x4<<5) /* Single frame data bust with DMA */
+
+#define REG_TX_MEM_INFO 0x78 /* TXMIR */
+#define TX_MEM_AVAILABLE_MASK 0x1FFF /* The amount of memory available in TXQ */
+
+#define REG_RX_FHR_STATUS 0x7C /* RXFHSR */
+#define RX_VALID 0x8000 /* Frame in the receive packet memory is valid */
+#define RX_ICMP_ERROR 0x2000 /* ICMP checksum field doesn't match */
+#define RX_IP_ERROR 0x1000 /* IP checksum field doesn't match */
+#define RX_TCP_ERROR 0x0800 /* TCP checksum field doesn't match */
+#define RX_UDP_ERROR 0x0400 /* UDP checksum field doesn't match */
+#define RX_BROADCAST 0x0080 /* Received frame is a broadcast frame */
+#define RX_MULTICAST 0x0040 /* Received frame is a multicast frame */
+#define RX_UNICAST 0x0020 /* Received frame is a unicast frame */
+#define RX_PHY_ERROR 0x0010 /* Received frame has runt error */
+#define RX_FRAME_ETHER 0x0008 /* Received frame is an Ethernet-type frame */
+#define RX_TOO_LONG 0x0004 /* Received frame length exceeds max size 0f 2048 bytes */
+#define RX_RUNT_ERROR 0x0002 /* Received frame was damaged by a collision */
+#define RX_BAD_CRC 0x0001 /* Received frame has a CRC error */
+#define RX_CRC_ERROR ( RX_BAD_CRC | RX_ICMP_ERROR | RX_IP_ERROR | RX_TCP_ERROR |\
+ RX_UDP_ERROR )
+#define RX_ERRORS ( RX_CRC_ERROR | RX_TOO_LONG | RX_RUNT_ERROR | RX_PHY_ERROR )
+
+#define REG_RX_FHR_BYTE_CNT 0x7E /* RXFHBCR */
+#define RX_BYTE_CNT_MASK 0x0FFF /* Received frame byte size mask */
+
+#define REG_TXQ_CMD 0x80 /* TXQCR */
+#define TXQ_AUTO_ENQUEUE 0x0004 /* Enable enqueue tx frames from tx buffer automatically */
+#define TXQ_MEM_AVAILABLE_INT 0x0002 /* Enable generate interrupt when tx memory is available */
+#define TXQ_ENQUEUE 0x0001 /* Enable enqueue tx frames one frame at a time */
+
+#define REG_RXQ_CMD 0x82 /* RXQCR */
+#define RXQ_STAT_TIME_INT 0x1000 /* RX interrupt is occured by timer duration */
+#define RXQ_STAT_BYTE_CNT_INT 0x0800 /* RX interrupt is occured by byte count threshold */
+#define RXQ_STAT_FRAME_CNT_INT 0x0400 /* RX interrupt is occured by frame count threshold */
+#define RXQ_TWOBYTE_OFFSET 0x0200 /* Enable adding 2-byte before frame header for IP aligned with DWORD */
+#define RXQ_TIME_INT 0x0080 /* Enable RX interrupt by timer duration */
+#define RXQ_BYTE_CNT_INT 0x0040 /* Enable RX interrupt by byte count threshold */
+#define RXQ_FRAME_CNT_INT 0x0020 /* Enable RX interrupt by frame count threshold */
+#define RXQ_AUTO_DEQUEUE 0x0010 /* Enable release rx frames from rx buffer automatically */
+#define RXQ_START 0x0008 /* Start QMU transfer operation */
+#define RXQ_CMD_FREE_PACKET 0x0001 /* Manual dequeue (release the current frame from RxQ) */
+#define RXQ_CMD_CNTL (RXQ_FRAME_CNT_INT|RXQ_AUTO_DEQUEUE)
+
+#define REG_TX_ADDR_PTR 0x84 /* TXFDPR */
+#define REG_RX_ADDR_PTR 0x86 /* RXFDPR */
+#define ADDR_PTR_AUTO_INC 0x4000 /* Enable Frame data pointer increments automatically */
+
+#define REG_RX_TIME_THRES 0x8C /* RXDTTR */
+#define RX_TIME_THRESHOLD_MASK 0xFFFF /* Set receive timer duration threshold */
+#define RX_TIME_THRESHOLD_MAX 0xCFFF /* Max value */
+
+#define REG_RX_BYTE_CNT_THRES 0x8E /* RXDBCTR */
+#define RX_BYTE_THRESHOLD_MASK 0xFFFF /* Set receive byte count threshold */
+
+#define REG_INT_MASK 0x90 /* IER */
+#define INT_PHY 0x8000 /* Enable link change interrupt */
+#define INT_TX 0x4000 /* Enable transmit done interrupt */
+#define INT_RX 0x2000 /* Enable receive interrupt */
+#define INT_RX_OVERRUN 0x0800 /* Enable receive overrun interrupt */
+#define INT_TX_STOPPED 0x0200 /* Enable transmit process stopped interrupt */
+#define INT_RX_STOPPED 0x0100 /* Enable receive process stopped interrupt */
+#define INT_RX_ERROR 0x0080 /* Enable receive error frame interrupt */
+#define INT_TX_SPACE 0x0040 /* Enable transmit space available interrupt */
+#define INT_RX_WOL_FRAME 0x0020 /* Enable WOL on receive wake-up frame detect interrupt */
+#define INT_RX_WOL_MAGIC 0x0010 /* Enable WOL on receive magic packet detect interrupt */
+#define INT_RX_WOL_LINKUP 0x0008 /* Enable WOL on link up detect interrupt */
+#define INT_RX_WOL_ENERGY 0x0004 /* Enable WOL on energy detect interrupt */
+#define INT_RX_SPI_ERROR 0x0002 /* Enable receive SPI bus error interrupt */
+#define INT_RX_WOL_DELAY_ENERGY 0x0001 /* Enable WOL on delay energy detect interrupt */
+
+#define INT_SETUP_MASK (INT_PHY | INT_RX)
+
+
+#define REG_INT_STATUS 0x92 /* ISR */
+
+#define REG_RX_FRAME_CNT_THRES 0x9C /* RXFCTFC */
+#define RX_FRAME_CNT_MASK 0xFF00 /* Received frame count mask */
+#define RX_FRAME_THRESHOLD_MASK 0x00FF /* Set receive frame count threshold mask */
+
+#define REG_TX_TOTAL_FRAME_SIZE 0x9E /* TXNTFSR */
+#define TX_TOTAL_FRAME_SIZE_MASK 0xFFFF /* Set next total tx frame size mask */
+
+
+/*
+ * MAC Address Hash Table Control Registers
+ * (Offset 0xA0 - 0xA7)
+ */
+#define REG_MAC_HASH_0 0xA0 /* MAHTR0 */
+#define REG_MAC_HASH_1 0xA1
+
+#define REG_MAC_HASH_2 0xA2 /* MAHTR1 */
+#define REG_MAC_HASH_3 0xA3
+
+#define REG_MAC_HASH_4 0xA4 /* MAHTR2 */
+#define REG_MAC_HASH_5 0xA5
+
+#define REG_MAC_HASH_6 0xA6 /* MAHTR3 */
+#define REG_MAC_HASH_7 0xA7
+
+
+/*
+ * QMU Receive Queue Watermark Control Registers
+ * (Offset 0xB0 - 0xB5)
+ */
+#define REG_RX_LOW_WATERMARK 0xB0 /* FCLWR */
+#define RX_LOW_WATERMARK_MASK 0x0FFF /* Set QMU RxQ low watermark mask */
+
+#define REG_RX_HIGH_WATERMARK 0xB2 /* FCHWR */
+#define RX_HIGH_WATERMARK_MASK 0x0FFF /* Set QMU RxQ high watermark mask */
+
+#define REG_RX_OVERRUN_WATERMARK 0xB4 /* FCOWR */
+#define RX_OVERRUN_WATERMARK_MASK 0x0FFF /* Set QMU RxQ overrun watermark mask */
+
+
+/*
+ * Global Control Registers
+ * (Offset 0xC0 - 0xD3)
+ */
+#define REG_CHIP_ID 0xC0 /* CIDER */
+#define CHIP_ID_MASK 0xFFF0 /* Family ID and chip ID mask */
+#define REVISION_MASK 0x000E /* Chip revision mask */
+#define CHIP_ID_SHIFT 4
+#define REVISION_SHIFT 1
+#define CHIP_ID_8851_16 0x8870 /* KS8851-16/32MQL chip ID */
+
+#define REG_LED_CTRL 0xC6 /* CGCR */
+#define LED_CTRL_SEL1 0x8000 /* Select LED3/LED2/LED1/LED0 indication */
+#define LED_CTRL_SEL0 0x0200 /* Select LED3/LED2/LED1/LED0 indication */
+
+#define REG_IND_IACR 0xC8 /* IACR */
+#define TABLE_READ 0x1000 /* Indirect read */
+#define TABLE_MIB_ETH 0x0C00 /* Select MIB counter table */
+#define TABLE_ENTRY_MASK 0x001F /* Set table entry to access */
+
+#define REG_IND_DATA_LOW 0xD0 /* IADLR */
+#define REG_IND_DATA_HIGH 0xD2 /* IADHR */
+
+
+/*
+ * Power Management Control Registers
+ * (Offset 0xD4 - 0xD7)
+ */
+#define REG_POWER_CNTL 0xD4 /* PMECR */
+#define PME_DELAY_ENABLE 0x4000 /* Enable the PME output pin assertion delay */
+#define PME_ACTIVE_HIGHT 0x1000 /* PME output pin is active high */
+#define PME_FROM_WKFRAME 0x0800 /* PME asserted when wake-up frame is detected */
+#define PME_FROM_MAGIC 0x0400 /* PME asserted when magic packet is detected */
+#define PME_FROM_LINKUP 0x0200 /* PME asserted when link up is detected */
+#define PME_FROM_ENERGY 0x0100 /* PME asserted when energy is detected */
+#define PME_EVENT_MASK 0x0F00 /* PME asserted event mask */
+#define WAKEUP_AUTO_ENABLE 0x0080 /* Enable auto wake-up in energy mode */
+#define WAKEUP_NORMAL_AUTO_ENABLE 0x0040 /* Enable auto goto normal mode from energy detecion mode */
+#define WAKEUP_FROM_WKFRAME 0x0020 /* Wake-up from wake-up frame event detected */
+#define WAKEUP_FROM_MAGIC 0x0010 /* Wake-up from magic packet event detected */
+#define WAKEUP_FROM_LINKUP 0x0008 /* Wake-up from link up event detected */
+#define WAKEUP_FROM_ENERGY 0x0004 /* Wake-up from energy event detected */
+#define WAKEUP_EVENT_MASK 0x003C /* Wake-up event mask */
+#define POWER_STATE_D1 0x0003 /* Power saving mode */
+#define POWER_STATE_D3 0x0002 /* Power down mode */
+#define POWER_STATE_D2 0x0001 /* Power detection mode */
+#define POWER_STATE_D0 0x0000 /* Normal operation mode (default) */
+#define POWER_STATE_MASK 0x0003 /* Power management mode mask */
+
+
+#define REG_WAKEUP_TIME 0xD6 /* GSWUTR */
+#define WAKEUP_TIME 0xFF00 /* Min time (sec) wake-uo after detected energy */
+#define GOSLEEP_TIME 0x00FF /* Min time (sec) before goto sleep when in energy mode */
+
+
+/*
+ * PHY Control Registers
+ * (Offset 0xD8 - 0xF9)
+ */
+#define REG_PHY_RESET 0xD8 /* PHYRR */
+#define PHY_RESET 0x0001 /* Reset PHY */
+
+#define REG_PHY_CNTL 0xE4 /* P1MBCR */
+//SOI,David Choi
+#define PHY_LOCAL_LOOPBACK 0x4000 /* Force PHY 100Mbps */
+#define PHY_SPEED_100MBIT 0x2000 /* Force PHY 100Mbps */
+#define PHY_AUTO_NEG_ENABLE 0x1000 /* Enable PHY auto-negotiation */
+#define PHY_POWER_DOWN 0x0800 /* Set PHY power-down */
+#define PHY_AUTO_NEG_RESTART 0x0200 /* Restart PHY auto-negotiation */
+#define PHY_FULL_DUPLEX 0x0100 /* Force PHY in full duplex mode */
+#define PHY_HP_MDIX 0x0020 /* Set PHY in HP auto MDI-X mode */
+#define PHY_FORCE_MDIX 0x0010 /* Force MDI-X */
+#define PHY_AUTO_MDIX_DISABLE 0x0008 /* Disable auto MDI-X */
+#define PHY_TRANSMIT_DISABLE 0x0002 /* Disable PHY transmit */
+#define PHY_LED_DISABLE 0x0001 /* Disable PHY LED */
+
+#define REG_PHY_STATUS 0xE6 /* P1MBSR */
+#define PHY_100BT4_CAPABLE 0x8000 /* 100 BASE-T4 capable */
+#define PHY_100BTX_FD_CAPABLE 0x4000 /* 100BASE-TX full duplex capable */
+#define PHY_100BTX_CAPABLE 0x2000 /* 100BASE-TX half duplex capable */
+#define PHY_10BT_FD_CAPABLE 0x1000 /* 10BASE-TX full duplex capable */
+#define PHY_10BT_CAPABLE 0x0800 /* 10BASE-TX half duplex capable */
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020 /* Auto-negotiation complete */
+#define PHY_AUTO_NEG_CAPABLE 0x0008 /* Auto-negotiation capable */
+#define PHY_LINK_UP 0x0004 /* PHY link is up */
+#define PHY_EXTENDED_CAPABILITY 0x0001 /* PHY extended register capable */
+
+#define REG_PHY_ID_LOW 0xE8 /* PHY1ILR */
+#define REG_PHY_ID_HIGH 0xEA /* PHY1IHR */
+
+#define REG_PHY_AUTO_NEGOTIATION 0xEC /* P1ANAR */
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400 /* Advertise pause capability */
+#define PHY_AUTO_NEG_100BTX_FD 0x0100 /* Advertise 100 full-duplex capability */
+#define PHY_AUTO_NEG_100BTX 0x0080 /* Advertise 100 half-duplex capability */
+#define PHY_AUTO_NEG_10BT_FD 0x0040 /* Advertise 10 full-duplex capability */
+#define PHY_AUTO_NEG_10BT 0x0020 /* Advertise 10 half-duplex capability */
+#define PHY_AUTO_NEG_SELECTOR 0x001F /* Selector field mask */
+#define PHY_AUTO_NEG_802_3 0x0001 /* 802.3 */
+
+#define REG_PHY_REMOTE_CAPABILITY 0xEE /* P1ANLPR */
+#define PHY_REMOTE_SYM_PAUSE 0x0400 /* Link partner pause capability */
+#define PHY_REMOTE_100BTX_FD 0x0100 /* Link partner 100 full-duplex capability */
+#define PHY_REMOTE_100BTX 0x0080 /* Link partner 100 half-duplex capability */
+#define PHY_REMOTE_10BT_FD 0x0040 /* Link partner 10 full-duplex capability */
+#define PHY_REMOTE_10BT 0x0020 /* Link partner 10 half-duplex capability */
+
+#define REG_PORT_LINK_MD 0xF4 /* P1SCLMD */
+#define PORT_CABLE_10M_SHORT 0x8000 /* Cable length is less than 10m short */
+#define PORT_CABLE_STAT_FAILED 0x6000 /* Cable diagnostic test fail */
+#define PORT_CABLE_STAT_SHORT 0x4000 /* Short condition detected in the cable */
+#define PORT_CABLE_STAT_OPEN 0x2000 /* Open condition detected in the cable */
+#define PORT_CABLE_STAT_NORMAL 0x0000 /* Normal condition */
+#define PORT_CABLE_DIAG_RESULT 0x6000 /* Cable diagnostic test result mask */
+#define PORT_START_CABLE_DIAG 0x1000 /* Enable cable diagnostic test */
+#define PORT_FORCE_LINK 0x0800 /* Enable force link pass */
+#define PORT_POWER_SAVING 0x0400 /* Disable power saving */
+#define PORT_REMOTE_LOOPBACK 0x0200 /* Enable remote loopback at PHY */
+#define PORT_CABLE_FAULT_COUNTER 0x01FF /* Cable length distance to the fault */
+
+#define REG_PORT_CTRL 0xF6 /* P1CR */
+#define PORT_LED_OFF 0x8000 /* Turn off all the port LEDs (LED3/LED2/LED1/LED0) */
+#define PORT_TX_DISABLE 0x4000 /* Disable port transmit */
+#define PORT_AUTO_NEG_RESTART 0x2000 /* Restart auto-negotiation */
+#define PORT_POWER_DOWN 0x0800 /* Set port power-down */
+#define PORT_AUTO_MDIX_DISABLE 0x0400 /* Disable auto MDI-X */
+#define PORT_FORCE_MDIX 0x0200 /* Force MDI-X */
+#define PORT_AUTO_NEG_ENABLE 0x0080 /* Enable auto-negotiation */
+#define PORT_FORCE_100_MBIT 0x0040 /* Force PHY 100Mbps */
+#define PORT_FORCE_FULL_DUPLEX 0x0020 /* Force PHY in full duplex mode */
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010 /* Advertise pause capability */
+#define PORT_AUTO_NEG_100BTX_FD 0x0008 /* Advertise 100 full-duplex capability */
+#define PORT_AUTO_NEG_100BTX 0x0004 /* Advertise 100 half-duplex capability */
+#define PORT_AUTO_NEG_10BT_FD 0x0002 /* Advertise 10 full-duplex capability */
+#define PORT_AUTO_NEG_10BT 0x0001 /* Advertise 10 half-duplex capability */
+
+#define REG_PORT_STATUS 0xF8 /* P1SR */
+#define PORT_HP_MDIX 0x8000 /* Set PHY in HP auto MDI-X mode */
+#define PORT_REVERSED_POLARITY 0x2000 /* Polarity is reversed */
+#define PORT_RX_FLOW_CTRL 0x1000 /* Receive flow control feature is active */
+#define PORT_TX_FLOW_CTRL 0x0800 /* Transmit flow control feature is active */
+#define PORT_STAT_SPEED_100MBIT 0x0400 /* Link is 100Mbps */
+#define PORT_STAT_FULL_DUPLEX 0x0200 /* Link is full duplex mode */
+#define PORT_MDIX_STATUS 0x0080 /* Is MDI */
+#define PORT_AUTO_NEG_COMPLETE 0x0040 /* Auto-negotiation complete */
+#define PORT_STATUS_LINK_GOOD 0x0020 /* PHY link is up */
+#define PORT_REMOTE_SYM_PAUSE 0x0010 /* Link partner pause capability */
+#define PORT_REMOTE_100BTX_FD 0x0008 /* Link partner 100 full-duplex capability */
+#define PORT_REMOTE_100BTX 0x0004 /* Link partner 100 half-duplex capability */
+#define PORT_REMOTE_10BT_FD 0x0002 /* Link partner 10 full-duplex capability */
+#define PORT_REMOTE_10BT 0x0001 /* Link partner 10 half-duplex capability */
+
+#endif
+
+/* END */
diff --git a/drivers/parrot/net/ksz8851snl.c b/drivers/parrot/net/ksz8851snl.c
new file mode 100644
index 00000000000000..17477430282600
--- /dev/null
+++ b/drivers/parrot/net/ksz8851snl.c
@@ -0,0 +1,1165 @@
+/*
+ * KSZ8851SNL ethernet driver
+ *
+ * Copyright (C) 2008 Parrot SA
+ * Author: Florent Bayendrian <florent.bayendrian@parrot.com>
+ * Inspired by enc28j60.c and the Micrel GPL driver for s3c2412
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: ksz8851snl.c,v 1.12 2009-08-26 09:11:44 fbayendrian Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/time.h>
+#include <linux/crc32.h>
+
+#include "ksz8851_hw.h"
+
+#define DRV_NAME "ksz8851snl"
+#define DRV_VERSION "$Revision: 1.12 $"
+
+#define SPI_OPLEN 2
+#define SPI_BUFOPLEN 1
+#define SPI_DUMMY_READBUF 8 /* 4 real dummy bytes + RXFHSR + RXFHBCR */
+
+#define SPI_OPCODE_IOREAD (0x0<<6)
+#define SPI_OPCODE_IOWRITE (0x1<<6)
+#define SPI_OPCODE_BREAD (0x2<<6)
+#define SPI_OPCODE_BWRITE (0x3<<6)
+
+#define KSZ8851SNL_MSG_DEFAULT \
+ (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
+
+#define MAX_FRAMELEN 1518
+/* Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame). */
+#define SPI_TRANSFER_BUF_LEN (SPI_OPCODE_BREAD + \
+ SPI_DUMMY_READBUF + \
+ MAX_FRAMELEN + 3) // word align access
+
+#define TX_TIMEOUT (4 * HZ)
+
+#define RX_HIGH_WATERMARK 0x300
+#define RX_LOW_WATERMARK 0x500
+#define RX_OVERRUN_WATERMARK 0x40
+
+// for the first silicon bug
+#define KSZ8851SNL_LOWSPEED 31250000
+
+#define FILTER_LIMIT 32
+
+static int ksz8851snl_setlink(struct net_device *dev);
+
+/*
+ * delay rx interrupt from ksz8851 to the host. max value is 0xCFFF
+ */
+static unsigned int rx_delay_us = 0;
+module_param(rx_delay_us, uint, 0444);
+
+
+/* Driver local data */
+struct ksz8851snl_net {
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct irq_work;
+ struct work_struct setrx_work;
+ bool hw_enable;
+ u8 rev_id;
+ u32 speed_hz;
+ u32 autoneg;
+ u32 speed;
+ u32 duplex;
+ u32 msg_enable;
+ u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN];
+};
+
+static int
+spi_read_byte(struct ksz8851snl_net *priv, u8 addr, u8* data)
+{
+ struct spi_transfer t[2];
+ struct spi_message msg;
+ u8 cmd[SPI_OPLEN];
+ int ret;
+
+ cmd[0] = SPI_OPCODE_IOREAD |
+ ((1 << (addr & 0x3)) << 2) |
+ (addr & 0xC0)>>6;
+ cmd[1] = (addr & ~(0x3)) << 2;
+ memset(t, 0, sizeof(t));
+ t[0].tx_buf = &cmd;
+ t[0].len = SPI_OPLEN;
+ t[0].speed_hz = priv->speed_hz;
+ t[1].rx_buf = data;
+ t[1].len = sizeof(u8);
+ t[1].speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t[0], &msg);
+ spi_message_add_tail(&t[1], &msg);
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static int
+spi_write_byte(struct ksz8851snl_net *priv, u8 addr, u8 data)
+{
+ struct spi_transfer t;
+ struct spi_message msg;
+ u8 buf[SPI_OPLEN + sizeof(u8)];
+ int ret;
+
+ buf[0] = SPI_OPCODE_IOWRITE |
+ ((1 << (addr & 0x3)) << 2) |
+ (addr & 0xC0)>>6;
+ buf[1] = (addr & ~(0x3)) << 2;
+ buf[2] = data;
+ memset(&t, 0, sizeof(t));
+ t.tx_buf = buf;
+ t.len = SPI_OPLEN + sizeof(u8);
+ t.speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t, &msg);
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static int
+spi_read_hword(struct ksz8851snl_net *priv, u8 addr, u16 *data)
+{
+ struct spi_transfer t[2];
+ struct spi_message msg;
+ u8 cmd[SPI_OPLEN];
+ int ret;
+
+ addr &= ~0x1; // make addr even
+ cmd[0] = (addr & 0x2)? 0xC<<2 : 0x3<<2;
+ cmd[0] |= SPI_OPCODE_IOREAD |
+ (addr & 0xC0)>>6;
+ cmd[1] = (addr & ~(0x3)) << 2;
+
+ memset(t, 0, sizeof(t));
+ t[0].tx_buf = &cmd;
+ t[0].len = SPI_OPLEN;
+ t[0].speed_hz = priv->speed_hz;
+ t[1].rx_buf = data;
+ t[1].len = sizeof(u16);
+ t[1].speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t[0], &msg);
+ spi_message_add_tail(&t[1], &msg);
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static int
+spi_write_hword(struct ksz8851snl_net *priv, u8 addr, u16 data)
+{
+ struct spi_transfer t;
+ struct spi_message msg;
+ u8 buf[SPI_OPLEN + sizeof(u16)];
+ int ret;
+
+ addr &= ~0x1; // make addr even
+ buf[0] = (addr & 0x2)? 0xC<<2 : 0x3<<2;
+ buf[0] |= SPI_OPCODE_IOWRITE |
+ (addr & 0xC0)>>6;
+ buf[1] = (addr & ~(0x3)) << 2;
+ buf[2] = data & 0xFF;
+ buf[3] = data >> 8;
+
+ memset(&t, 0, sizeof(t));
+ t.tx_buf = buf;
+ t.len = SPI_OPLEN + sizeof(u16);
+ t.speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t, &msg);
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static void
+spi_read_buf_enable(struct ksz8851snl_net *priv)
+{
+ u16 reg;
+
+ spi_write_hword(priv, REG_RX_ADDR_PTR, ADDR_PTR_AUTO_INC);
+ spi_read_hword(priv, REG_RXQ_CMD, &reg);
+ reg |= RXQ_CMD_CNTL | RXQ_START;
+ spi_write_hword(priv, REG_RXQ_CMD, reg);
+}
+
+static void spi_read_buf_disable(struct ksz8851snl_net *priv)
+{
+ spi_write_hword(priv, REG_RXQ_CMD, RXQ_CMD_CNTL);
+}
+
+
+/*
+ * SPI read buffer
+ */
+static int
+spi_read_buf(struct ksz8851snl_net *priv, unsigned int len, u8 *data)
+{
+ struct spi_transfer t[4];
+ struct spi_message msg;
+ u8 cmd[SPI_BUFOPLEN];
+ u8 dummy[SPI_DUMMY_READBUF];
+ int ret;
+
+ cmd[0] = SPI_OPCODE_BREAD;
+ memset(t, 0, sizeof(t));
+ t[0].tx_buf = cmd;
+ t[0].len = SPI_BUFOPLEN;
+ t[0].speed_hz = priv->speed_hz;
+ t[1].rx_buf = dummy;
+ t[1].len = SPI_DUMMY_READBUF;
+ t[1].speed_hz = priv->speed_hz;
+ // packet rx
+ t[2].rx_buf = data;
+ t[2].len = len;
+ t[2].speed_hz = priv->speed_hz;
+ // padding
+ t[3].rx_buf = priv->spi_transfer_buf;
+ t[3].len = ((len + 3) & ~0x3) - len;
+ t[2].speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t[0], &msg);
+ spi_message_add_tail(&t[1], &msg);
+ spi_message_add_tail(&t[2], &msg);
+ if (t[3].len)
+ spi_message_add_tail(&t[3], &msg);
+
+ spi_read_buf_enable(priv);
+ ret = spi_sync(priv->spi, &msg);
+ spi_read_buf_disable(priv);
+
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static void
+spi_write_buf_enable(struct ksz8851snl_net *priv)
+{
+ spi_write_hword(priv, REG_TX_ADDR_PTR, ADDR_PTR_AUTO_INC);
+ spi_write_hword(priv, REG_RXQ_CMD, RXQ_CMD_CNTL | RXQ_START);
+}
+
+static void
+spi_write_buf_disable(struct ksz8851snl_net *priv)
+{
+ spi_write_hword(priv, REG_RXQ_CMD, RXQ_CMD_CNTL);
+ spi_write_hword(priv, REG_TXQ_CMD, TXQ_ENQUEUE);
+}
+
+/*
+ * SPI write buffer
+ */
+static int spi_write_buf(struct ksz8851snl_net *priv, unsigned int len,
+ const u8 *data)
+{
+ struct spi_transfer t[3];
+ struct spi_message msg;
+ u8 buf[5];
+ int ret;
+
+ buf[0] = SPI_OPCODE_BWRITE;
+ buf[1] = TX_CTRL_INTERRUPT_ON & 0xff;
+ buf[2] = (TX_CTRL_INTERRUPT_ON >> 8) & 0xff;
+ buf[3] = len & 0xff;
+ buf[4] = (len >> 8) & 0xff;
+ memset(t, 0, sizeof(t));
+ t[0].tx_buf = buf;
+ t[0].len = 5;
+ t[0].speed_hz = priv->speed_hz;
+ // packet data
+ if ((len & 0x3) != 0)
+ memset(priv->spi_transfer_buf + len, 0, 3);
+ memcpy(priv->spi_transfer_buf, data, len);
+ t[1].tx_buf = data;
+ t[1].len = len;
+ t[1].speed_hz = priv->speed_hz;
+ t[2].tx_buf = priv->spi_transfer_buf;
+ t[2].len = ((len + 3) & ~0x3) - len;
+ t[2].speed_hz = priv->speed_hz;
+ spi_message_init(&msg);
+ spi_message_add_tail(&t[0], &msg);
+ spi_message_add_tail(&t[1], &msg);
+ if (t[2].len)
+ spi_message_add_tail(&t[2], &msg);
+ spi_write_buf_enable(priv);
+ ret = spi_sync(priv->spi, &msg);
+ spi_write_buf_disable(priv);
+ if (ret == 0) {
+ ret = msg.status;
+ }
+
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __FUNCTION__, ret);
+
+ return ret;
+}
+
+static void ksz8851snl_soft_reset(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+
+ spi_write_hword(priv, REG_RESET_CTRL, GLOBAL_SOFTWARE_RESET);
+ msleep(10); // wait for device to reset
+ spi_write_hword(priv, REG_RESET_CTRL, 0);
+#if 0
+ {
+ u16 reg;
+ /* from Micrel driver:
+ * "workaround link setup issue for KSZ8851"
+ */
+ spi_read_hword(priv, 0xe0, &reg);
+ spi_write_hword(priv, 0xe0, reg | 0x8000);
+ spi_read_hword(priv, 0xe0, &reg);
+ mdelay(4000); // "wait until finishing AN"
+ }
+#endif
+}
+
+static int ksz8851snl_read_hw_macaddr(struct net_device *dev)
+{
+ int ret = 0;
+ unsigned int i;
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->lock);
+
+ for (i = 0; i < ETH_ALEN && !ret; i++) {
+ ret = spi_read_byte(priv, REG_MAC_ADDR_0 + i,
+ &dev->dev_addr[ETH_ALEN-1+i]);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static int ksz8851snl_set_hw_macaddr(struct net_device *dev)
+{
+ int ret = 0;
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ int i;
+
+ if (!priv->hw_enable) {
+ if (netif_msg_drv(priv)) {
+ printk(KERN_INFO DRV_NAME
+ ": %s: Setting MAC address\n",
+ dev->name);
+ }
+ for (i = 0; i < ETH_ALEN && !ret; i++) {
+ ret = spi_write_byte(priv, REG_MAC_ADDR_0 + i,
+ dev->dev_addr[ETH_ALEN-1-i]);
+ }
+ } else {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": %s() Hardware must be disabled to set "
+ "MAC address\n", __FUNCTION__);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+/*
+ * Store the new hardware address in dev->dev_addr, and update the MAC.
+ */
+static int ksz8851snl_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ int ret;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&priv->lock);
+ ret = ksz8851snl_set_hw_macaddr(dev);
+ if (!ret)
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Access the PHY to determine link status
+ */
+static void ksz8851snl_check_link_status(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ unsigned int old_carrier, new_carrier;
+ u16 link_status;
+
+ old_carrier = netif_carrier_ok(dev) ? 1 : 0;
+ spi_read_hword(priv, REG_PORT_STATUS, &link_status);
+ new_carrier = (link_status & PORT_STATUS_LINK_GOOD)? 1: 0;
+
+ if (new_carrier != old_carrier) {
+ if (new_carrier) {
+ netif_carrier_on(dev);
+ } else {
+ netif_carrier_off(dev);
+ }
+ if (netif_msg_link(priv)) {
+ printk(KERN_INFO "%s: link %s", dev->name,
+ new_carrier ? "up": "down");
+ if (new_carrier) {
+ if (link_status & PORT_STAT_SPEED_100MBIT) {
+ printk(", 100Mbps ");
+ } else {
+ printk(", 10Mbps ");
+ }
+ if (link_status & PORT_STAT_FULL_DUPLEX) {
+ printk("full-duplex");
+ } else {
+ printk("half-duplex");
+ }
+ }
+ printk("\n");
+
+ }
+ }
+}
+
+static int ksz8851snl_hw_check(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ int ret = 0;
+ u16 cider;
+
+ /* read the chip ID, just to be sure that the chip is connected */
+ spi_read_hword(priv, REG_CHIP_ID, &cider);
+
+ priv->rev_id = (cider & REVISION_MASK) >> REVISION_SHIFT;
+ if ((cider & CHIP_ID_MASK) != CHIP_ID_8851_16) {
+ ret = -EINVAL;
+ dev_err(&priv->spi->dev, "Invalid chip ID 0x%4.4x\n", cider);
+ }
+
+ return ret;
+}
+
+static void ksz8851snl_hw_enable(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ spi_write_hword(priv, REG_INT_MASK, 0);
+ spi_write_hword(priv, REG_TX_ADDR_PTR, ADDR_PTR_AUTO_INC);
+ spi_write_hword(priv, REG_RX_ADDR_PTR, ADDR_PTR_AUTO_INC);
+ spi_write_hword(priv, REG_RX_FRAME_CNT_THRES,
+ 1 & RX_FRAME_THRESHOLD_MASK);
+ /* Setup Receive High Water Mark to avoid loss packets under flow control */
+ spi_write_hword(priv, REG_RX_HIGH_WATERMARK, RX_HIGH_WATERMARK);
+ spi_write_hword(priv, REG_RX_LOW_WATERMARK, RX_LOW_WATERMARK);
+ spi_write_hword(priv, REG_RX_OVERRUN_WATERMARK, RX_OVERRUN_WATERMARK);
+ if (rx_delay_us)
+ spi_write_hword(priv, REG_RXQ_CMD, RXQ_CMD_CNTL|RXQ_TIME_INT);
+ else
+ spi_write_hword(priv, REG_RXQ_CMD, RXQ_CMD_CNTL);
+
+ // interrupt setup
+ if (rx_delay_us > RX_TIME_THRESHOLD_MAX)
+ rx_delay_us = RX_TIME_THRESHOLD_MAX;
+ if (rx_delay_us)
+ spi_write_hword(priv, REG_RX_TIME_THRES, rx_delay_us);
+ spi_write_hword(priv, REG_INT_MASK, INT_SETUP_MASK | INT_RX_OVERRUN);
+
+ // enable tx
+ spi_write_hword(priv, REG_TX_CTRL,
+ TX_CTRL_ICMP_CHECKSUM | TX_CTRL_UDP_CHECKSUM |
+ TX_CTRL_TCP_CHECKSUM | TX_CTRL_IP_CHECKSUM |
+ TX_CTRL_FLOW_ENABLE | TX_CTRL_PAD_ENABLE |
+ TX_CTRL_CRC_ENABLE | TX_CTRL_ENABLE);
+
+ // enable rx
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_MODE_PMAP);
+ spi_write_hword(priv, REG_RX_CTRL2, RX_CTRL_SINGLE_FRAME_BURST |
+ RX_CTRL_UDP_LITE_CHECKSUM |
+ RX_CTRL_ICMP_CHECKSUM);
+ spi_write_hword(priv, REG_INT_STATUS, INT_RX_STOPPED);
+ priv->hw_enable = true;
+
+ netif_start_queue(dev);
+}
+
+static void ksz8851snl_hw_disable(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ /* disable interrupt and packet reception */
+ spi_write_hword(priv, REG_INT_MASK, 0);
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_FLUSH_QUEUE);
+ spi_write_hword(priv, REG_RX_CTRL1, TX_CTRL_FLUSH_QUEUE);
+ priv->hw_enable = false;
+}
+
+static void ksz8851snl_free_rx_packet(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ spi_write_hword(priv, REG_RXQ_CMD,RXQ_CMD_CNTL | RXQ_CMD_FREE_PACKET);
+}
+
+/*
+ * Read all rx frames and update stats
+ */
+static void ksz8851snl_rx_handler(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ u16 frame_count;
+
+ spi_read_hword(priv, REG_RX_FRAME_CNT_THRES, &frame_count);
+ frame_count >>= 8;
+ dev->stats.rx_packets += frame_count;
+
+ while (frame_count--) {
+ u16 rx_status, rx_length;
+
+ spi_read_hword(priv, REG_RX_FHR_STATUS, &rx_status);
+ spi_read_hword(priv, REG_RX_FHR_BYTE_CNT, &rx_length);
+ rx_length &= RX_BYTE_CNT_MASK;
+ dev->stats.rx_bytes += rx_length;
+
+ if (rx_status & RX_MULTICAST)
+ dev->stats.multicast++;
+
+ if ((rx_status & RX_VALID) && !(rx_status & RX_ERRORS)) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(rx_length + NET_IP_ALIGN);
+ if (skb) {
+ skb_reserve(skb, NET_IP_ALIGN);
+ spi_read_buf(priv, rx_length,
+ skb_put(skb, rx_length));
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ dev->last_rx = jiffies;
+ netif_rx_ni(skb);
+ } else {
+ dev->stats.rx_dropped++;
+ ksz8851snl_free_rx_packet(dev);
+ printk(KERN_NOTICE "%s: Low memory, "
+ "packet dropped\n",
+ dev->name);
+ }
+ } else {
+ dev->stats.rx_errors++;
+ if (rx_status & RX_BAD_CRC)
+ dev->stats.rx_crc_errors++;
+ if (rx_status & RX_TOO_LONG)
+ dev->stats.rx_length_errors++;
+ if (rx_status & RX_RUNT_ERROR)
+ dev->stats.collisions++;
+ ksz8851snl_free_rx_packet(dev);
+ }
+ }
+}
+
+static void ksz8851snl_irq_work_handler(struct work_struct *work)
+{
+ struct ksz8851snl_net *priv =
+ container_of(work, struct ksz8851snl_net, irq_work);
+ struct net_device *dev = priv->netdev;
+ u16 intflags, intmask;
+
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+
+ mutex_lock(&priv->lock);
+
+ // temporary disable interrupt
+ spi_read_hword(priv, REG_INT_MASK, &intmask);
+ spi_write_hword(priv, REG_INT_MASK, 0);
+
+ // ack
+ spi_read_hword(priv, REG_INT_STATUS, &intflags);
+ spi_write_hword(priv, REG_INT_STATUS, intflags);
+
+ /* link change handler */
+ if (intflags & INT_PHY) {
+ u16 pmecr;
+
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG "%s: link changed\n", dev->name);
+ ksz8851snl_check_link_status(dev);
+ // ack the link change
+ spi_read_hword(priv, REG_POWER_CNTL, &pmecr);
+ spi_write_hword(priv, REG_POWER_CNTL, pmecr);
+ }
+
+ if (intflags & INT_RX) {
+ ksz8851snl_rx_handler(dev);
+ }
+
+ if (intflags & INT_RX_SPI_ERROR) {
+ dev->stats.rx_errors++;
+ // protocol or HW error
+ printk(KERN_ERR "%s: SPI error\n", dev->name);
+ }
+
+ if (intflags & INT_RX_OVERRUN) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_over_errors++;
+ printk(KERN_ERR "%s: RX FIFO overrun\n", dev->name);
+ }
+
+ // re-enable interrupt
+ spi_write_hword(priv, REG_INT_MASK, intmask);
+
+ mutex_unlock(&priv->lock);
+
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__);
+}
+
+/*
+ * Transmit function.
+ * Fill the buffer memory and send the contents of the transmit buffer
+ * onto the network
+ */
+static int ksz8851snl_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ if (netif_msg_tx_queued(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__);
+
+ netif_stop_queue(dev);
+
+ /* save the timestamp */
+ dev->trans_start = jiffies;
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+
+ schedule_work(&priv->tx_work);
+
+ return 0;
+}
+
+static void ksz8851snl_tx_work_handler(struct work_struct *work)
+{
+ struct ksz8851snl_net *priv =
+ container_of(work, struct ksz8851snl_net, tx_work);
+ struct net_device *dev = priv->netdev;
+ unsigned int write_size;
+ u16 mem_avail;
+
+ if (netif_msg_tx_queued(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": Tx Packet Len:%d\n", priv->tx_skb->len);
+
+ write_size = ((priv->tx_skb->len + 3) & ~0x3) + 5;
+
+ mutex_lock(&priv->lock);
+
+ spi_read_hword(priv, REG_TX_MEM_INFO, &mem_avail);
+ while (mem_avail < write_size) {
+ cpu_relax();
+ spi_read_hword(priv, REG_PHY_CNTL, &mem_avail);
+ }
+
+ spi_write_buf(priv, priv->tx_skb->len, priv->tx_skb->data);
+
+ // update transmit statistics
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += priv->tx_skb->len;
+ // free tx resources
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+ netif_wake_queue(priv->netdev);
+
+ mutex_unlock(&priv->lock);
+}
+
+static irqreturn_t ksz8851snl_irq(int irq, void *dev_id)
+{
+ struct ksz8851snl_net *priv = dev_id;
+
+ /*
+ * Can't do anything in interrupt context because we need to
+ * block (spi_sync() is blocking) so fire of the interrupt
+ * handling workqueue.
+ * Remember that we access ksz8851snl registers through SPI bus
+ * via spi_sync() call.
+ */
+ schedule_work(&priv->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int ksz8851snl_net_open(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG "%s: %s() enter\n", __FUNCTION__, dev->name);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (netif_msg_ifup(priv)) {
+ printk(KERN_ERR "%s: invalid MAC address\n",
+ dev->name);
+ }
+ return -EADDRNOTAVAIL;
+ }
+ /* Reset the hardware here */
+ ksz8851snl_soft_reset(dev);
+ if (ksz8851snl_hw_check(dev)) {
+ if (netif_msg_ifup(priv))
+ printk("hw_check() failed\n");
+ return -EINVAL;
+ }
+ /* Update the MAC address (in case user has changed it) */
+ mutex_lock(&priv->lock);
+ ksz8851snl_set_hw_macaddr(dev);
+ ksz8851snl_setlink(dev);
+ netif_carrier_off(dev);
+ ksz8851snl_hw_enable(dev);
+ mutex_unlock(&priv->lock);
+
+ /* We are now ready to accept transmit requests from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+
+ return 0;
+}
+
+static int ksz8851snl_net_stop(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG "%s: %s() enter\n", dev->name,
+ __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ mutex_lock(&priv->lock);
+ ksz8851snl_hw_disable(dev);
+ if (priv->tx_skb) {
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter
+ */
+static void ksz8851snl_set_rx_mode(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ schedule_work(&priv->setrx_work);
+}
+
+static void ksz8851snl_setrx_work_handler(struct work_struct *work)
+{
+ struct ksz8851snl_net *priv =
+ container_of(work, struct ksz8851snl_net, setrx_work);
+ struct net_device *dev = priv->netdev;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG "%s: promiscuous mode\n",
+ dev->name);
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_MODE_PROMISC);
+ } else if (dev->flags & IFF_ALLMULTI) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG "%s: multicast mode\n", dev->name);
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_MODE_PMAP);
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ u16 mc_filter[4];
+ int i;
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG "%s: normal mode with hash table\n",
+ dev->name);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 4] |= 1 << (bit_nr & 31);
+ }
+ for (i = 0; i < 4; i++)
+ spi_write_hword(priv, REG_MAC_HASH_0 + 2*i,
+ mc_filter[i]);
+ // now enable the hash filter mode
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_MODE_HASH);
+ } else {
+ // just accept broadcast / unicast
+ spi_write_hword(priv, REG_RX_CTRL1, RX_CTRL_MODE_NORMAL);
+ }
+}
+
+/* ......................... ETHTOOL SUPPORT ........................... */
+
+static void
+ksz8851snl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static int
+ksz8851snl_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+
+ u16 mii_ctrl, link_status;
+
+ spi_read_hword(priv, REG_PHY_CNTL, &mii_ctrl);
+ spi_read_hword(priv, REG_PORT_STATUS, &link_status);
+
+
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half| SUPPORTED_100baseT_Full|
+ SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_TP;
+
+ if (link_status & PORT_STATUS_LINK_GOOD) {
+ cmd->speed = (link_status & PORT_STAT_SPEED_100MBIT) ?
+ SPEED_100:
+ SPEED_10;
+ cmd->duplex = (link_status & PORT_STAT_FULL_DUPLEX) ?
+ DUPLEX_FULL:
+ DUPLEX_HALF;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ cmd->port = PORT_TP;
+ cmd->autoneg = (mii_ctrl & PHY_AUTO_NEG_ENABLE) ?
+ AUTONEG_DISABLE:
+ AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int ksz8851snl_setlink(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ u16 mii_ctrl;
+
+ spi_read_hword(priv, REG_PHY_CNTL, &mii_ctrl);
+
+ if ((mii_ctrl & PHY_AUTO_NEG_ENABLE) &&
+ priv->autoneg == AUTONEG_ENABLE)
+ return 0;
+
+ if (netif_msg_link(priv))
+ printk("%s: setting autoneg = %d, speed=%d, duplex=%s\n",
+ dev->name,
+ (priv->autoneg == AUTONEG_ENABLE)? 1: 0,
+ (priv->speed == SPEED_100)? 100: 10,
+ (priv->duplex == DUPLEX_FULL)? "full": "half");
+
+ if (priv->autoneg == AUTONEG_ENABLE) {
+ mii_ctrl |= PHY_AUTO_NEG_ENABLE;
+ } else {
+ mii_ctrl &= ~PHY_AUTO_NEG_ENABLE;
+ }
+
+ if (priv->speed == SPEED_100) {
+ mii_ctrl |= PHY_SPEED_100MBIT;
+ } else {
+ mii_ctrl &= ~PHY_SPEED_100MBIT;
+ }
+
+ if (priv->duplex == DUPLEX_FULL) {
+ mii_ctrl |= PHY_FULL_DUPLEX;
+ } else {
+ mii_ctrl &= ~PHY_FULL_DUPLEX;
+ }
+
+ mii_ctrl |= PHY_AUTO_NEG_RESTART;
+ return spi_write_hword(priv, REG_PHY_CNTL, mii_ctrl);
+}
+
+static int
+ksz8851snl_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->hw_enable) {
+ if ((cmd->speed == SPEED_100 || cmd->speed == SPEED_10) &&
+ (cmd->duplex == DUPLEX_FULL || cmd->duplex == DUPLEX_HALF)) {
+ priv->autoneg = cmd->autoneg;
+ priv->speed = cmd->speed;
+ priv->duplex = cmd->duplex;
+ ksz8851snl_setlink(dev);
+ } else {
+ if (netif_msg_link(priv))
+ printk("%s: unsupported link setting\n",
+ dev->name);
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ if (netif_msg_link(priv))
+ printk(KERN_DEBUG DRV_NAME "hw must be disabled "
+ "to set link mode\n");
+ ret = -EBUSY;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static u32 ksz8851snl_get_msglevel(struct net_device *dev)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void ksz8851snl_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct ksz8851snl_net *priv = netdev_priv(dev);
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops ksz8851snl_ethtool_ops = {
+ .get_settings = ksz8851snl_get_settings,
+ .set_settings = ksz8851snl_set_settings,
+ .get_drvinfo = ksz8851snl_get_drvinfo,
+ .get_msglevel = ksz8851snl_get_msglevel,
+ .set_msglevel = ksz8851snl_set_msglevel,
+};
+
+static const struct net_device_ops ks8851snl_netdev_ops = {
+ .ndo_open = ksz8851snl_net_open,
+ .ndo_stop = ksz8851snl_net_stop,
+ .ndo_start_xmit = ksz8851snl_start_xmit,
+ .ndo_set_mac_address = ksz8851snl_set_mac_address,
+ .ndo_set_rx_mode = ksz8851snl_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit ksz8851snl_probe(struct spi_device *spi)
+{
+ struct net_device *dev;
+ struct ksz8851snl_net *priv;
+ int ret = 0;
+ void (*jtag_spi_mode) (int enable) = spi->dev.platform_data;
+
+ dev_info(&spi->dev, "ethernet driver loaded\n");
+
+ dev = alloc_etherdev(sizeof(struct ksz8851snl_net));
+ if (!dev) {
+ dev_err(&spi->dev, "unable to alloc new etherdev\n");
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ priv = netdev_priv(dev);
+
+ priv->netdev = dev; /* priv to netdev reference */
+ priv->spi = spi; /* priv to spi reference */
+ priv->msg_enable = KSZ8851SNL_MSG_DEFAULT;
+ priv->hw_enable = false;
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->speed = SPEED_100;
+ priv->duplex = DUPLEX_FULL;
+ priv->speed_hz = KSZ8851SNL_LOWSPEED;
+ mutex_init(&priv->lock);
+ INIT_WORK(&priv->tx_work, ksz8851snl_tx_work_handler);
+ INIT_WORK(&priv->setrx_work, ksz8851snl_setrx_work_handler);
+ INIT_WORK(&priv->irq_work, ksz8851snl_irq_work_handler);
+ dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */
+ SET_NETDEV_DEV(dev, &spi->dev);
+
+ if (jtag_spi_mode)
+ jtag_spi_mode(1);
+ /* reset the chip */
+ ksz8851snl_soft_reset(dev);
+
+ if (ksz8851snl_hw_check(dev)) {
+ ret = -EINVAL;
+ goto error_chip;
+ }
+
+ dev_info(&spi->dev, "revision %u\n", priv->rev_id);
+ if (priv->rev_id >= 1) {
+ priv->speed_hz = 0;
+ }
+
+ random_ether_addr(dev->dev_addr);
+
+ ksz8851snl_set_hw_macaddr(dev);
+ ksz8851snl_read_hw_macaddr(dev);
+
+ ret = request_irq(spi->irq, ksz8851snl_irq, IRQF_TRIGGER_NONE,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&spi->dev, "request irq %d failed "
+ "(ret = %d)\n", spi->irq, ret);
+ goto error_irq;
+ }
+
+ dev->irq = spi->irq;
+ dev->netdev_ops = &ks8851snl_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->features = NETIF_F_HW_CSUM;
+ SET_ETHTOOL_OPS(dev, &ksz8851snl_ethtool_ops);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&spi->dev, "register netdev "
+ "failed (ret = %d)\n", ret);
+ goto error_register;
+ }
+ dev_info(&spi->dev, "driver registered\n");
+
+ return 0;
+
+error_register:
+ free_irq(spi->irq, priv);
+error_irq:
+error_chip:
+ free_netdev(dev);
+error_alloc:
+ if (jtag_spi_mode)
+ jtag_spi_mode(0);
+ dev_set_drvdata(&spi->dev, NULL);
+ dev_info(&spi->dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit ksz8851snl_remove(struct spi_device *spi)
+{
+ struct ksz8851snl_net *priv = dev_get_drvdata(&spi->dev);
+ void (*jtag_spi_mode) (int enable) = spi->dev.platform_data;
+
+ if (priv) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": remove\n");
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
+ free_netdev(priv->netdev);
+ }
+ if (jtag_spi_mode)
+ jtag_spi_mode(0);
+
+ return 0;
+}
+
+static struct spi_driver ksz8851snl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ksz8851snl_probe,
+ .remove = __devexit_p(ksz8851snl_remove),
+};
+
+static int __init ksz8851snl_init(void)
+{
+ printk(KERN_INFO "Parrot6 " DRV_NAME " driver " DRV_VERSION "\n");
+ return spi_register_driver(&ksz8851snl_driver);
+}
+
+static void __exit ksz8851snl_exit(void)
+{
+ spi_unregister_driver(&ksz8851snl_driver);
+}
+
+module_init(ksz8851snl_init);
+module_exit(ksz8851snl_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Florent Bayendrian <florent.bayendrian@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/pinctrl/Kconfig b/drivers/parrot/pinctrl/Kconfig
new file mode 100644
index 00000000000000..8f2f88a2a3259e
--- /dev/null
+++ b/drivers/parrot/pinctrl/Kconfig
@@ -0,0 +1,7 @@
+config PINCTRL_PARROT7
+ bool
+ depends on ARCH_PARROT7
+ select PINCTRL
+ select PINMUX
+ select PINCONF
+ default y
diff --git a/drivers/parrot/pinctrl/Makefile b/drivers/parrot/pinctrl/Makefile
new file mode 100644
index 00000000000000..cb13636c8a636d
--- /dev/null
+++ b/drivers/parrot/pinctrl/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
+obj-$(CONFIG_PINCTRL_PARROT7) += p7-pinctrl.o
diff --git a/drivers/parrot/pinctrl/p7-pinctrl.c b/drivers/parrot/pinctrl/p7-pinctrl.c
new file mode 100644
index 00000000000000..e083128dd3734f
--- /dev/null
+++ b/drivers/parrot/pinctrl/p7-pinctrl.c
@@ -0,0 +1,763 @@
+/**
+ * linux/drivers/parrot/pinctrl/p7-pinctrl.c - Parrot7 pin controller driver
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 01-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include "p7-pinctrl.h"
+#include "../../pinctrl/core.h"
+
+/*****************************************************************************
+ * Parrot7 chips pinctrl driver: implements I/O pin space handling.
+ *
+ * Pin / group mapping is particular here since our hardware allows
+ * us to multiplex every controllable pins using up to 4 mutually exclusive
+ * functions. There is no notion of per group multiplexing at hardware level.
+ * However, pinctrl subsystem requires driver to provide groups of pins to
+ * support I/O multiplexing.
+ * Hence, an identity mapping between hardware pin and corresponding fake
+ * software / logical group is carried out through the use of pinctrl_ops
+ * operation pointers.
+ * Hardware pin multiplexing is managed by assigning up to 4 pinmux functions
+ * for each logical group (i.e., per physical pin thanks to identity map).
+ *
+ * Althought I could have grouped pins in an arbitrary manner to reduce number
+ * of multiplexing entities, this would not fit current needs since we rely on
+ * a flexible way to support future boards (we have not heard of yet) without
+ * breaking existing platform dependent pinctrl code which instantiates
+ * pins / groups / functions.
+ * Moreover, I wanted to avoid misconfigurations between drivers and platform
+ * pinctrl code for IPs which have the ability to multiplex their own outputs
+ * to chip pins (like AAI, SPI...). It seems to me the simplest way to achieve
+ * this, is to enforce a direct group / pin mapping at pinctrl level and
+ * perform complex multiplexing inside the IP / driver (since it is needed any
+ * way).
+ ****************************************************************************/
+
+/* I/O remapped address of register space */
+static u32 volatile __iomem* p7ctl_vaddr;
+/* Physical address of register space */
+static phys_addr_t p7ctl_paddr;
+/* Size of register space */
+static resource_size_t p7ctl_sz;
+/* Pinctrl platform device */
+static struct platform_device const* p7ctl_pdev;
+/* Pinctrl device */
+static struct pinctrl_dev* p7ctl_dev;
+/* Platform specific data given at loading time */
+static struct p7ctl_plat_data const* p7ctl_pdata;
+#ifdef CONFIG_PM_SLEEP
+/* Memory area to store registers */
+static u32* p7ctl_saved_regs;
+#endif
+
+#ifdef DEBUG
+#define P7CTL_ASSERT_PIN(_pin_id) \
+ BUG_ON(! p7ctl_pdata->pins); \
+ BUG_ON(! p7ctl_pdata->pins_nr); \
+ BUG_ON(_pin_id >= p7ctl_pdata->pins_nr); \
+ BUG_ON(p7ctl_pdata->pins[_pin_id].number != _pin_id); \
+ BUG_ON(! p7ctl_pdata->pins[_pin_id].name)
+#else
+#define P7CTL_ASSERT_PIN(_pin_id)
+#endif
+
+/************************
+ * "Dummy" functions handling
+ ************************/
+
+/* Not all functions exist on all chip revisions, to simplify things we just
+ * fill the blanks with "dummy" pins */
+static inline int p7ctl_func_available(int func_id)
+{
+ BUG_ON(func_id >= p7ctl_pdata->funcs_nr);
+
+ if (p7ctl_pdata->funcs[func_id].name != NULL) {
+#ifdef DEBUG
+ BUG_ON(! p7ctl_pdata->funcs[func_id].name);
+ BUG_ON(p7ctl_pdata->funcs[func_id].mux_id >= 4);
+ BUG_ON(p7ctl_pdata->funcs[func_id].mux_id == 1);
+ P7CTL_ASSERT_PIN(p7ctl_pdata->funcs[func_id].pin_id);
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+/******************
+ * Fake pin groups
+ ******************/
+
+/* Does fake group exists (i.e., physical pin) ? */
+static int p7ctl_list_grp(struct pinctrl_dev* pctl, unsigned int grp_id)
+{
+ if (grp_id >= p7ctl_pdata->pins_nr)
+ return -EINVAL;
+
+ P7CTL_ASSERT_PIN(grp_id);
+ return 0;
+}
+
+/* Returns fake group name, i.e., physical pin name provided by platform. */
+static char const* p7ctl_grp_name(struct pinctrl_dev* pctl, unsigned int grp_id)
+{
+ if (grp_id >= p7ctl_pdata->pins_nr)
+ return NULL;
+
+ P7CTL_ASSERT_PIN(grp_id);
+ return p7ctl_pdata->pins[grp_id].name;
+}
+
+/* Return the single physical pin attached to fake group. */
+static int p7ctl_grp_pins(struct pinctrl_dev* pctl,
+ unsigned int grp_id,
+ unsigned int const** pins,
+ unsigned int* pins_nr)
+{
+ if (grp_id >= p7ctl_pdata->pins_nr)
+ return -EINVAL;
+
+ P7CTL_ASSERT_PIN(grp_id);
+
+ *pins = &p7ctl_pdata->pins[grp_id].number;
+ *pins_nr = 1;
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void p7ctl_show_pin(struct pinctrl_dev* pctl,
+ struct seq_file* file,
+ unsigned pin_id)
+{
+ seq_printf(file, " " P7CTL_DRV_NAME);
+}
+
+#else
+#define p7ctl_show_pin NULL
+#endif
+
+static struct pinctrl_ops p7ctl_grp_ops = {
+ .list_groups = p7ctl_list_grp,
+ .get_group_name = p7ctl_grp_name,
+ .get_group_pins = p7ctl_grp_pins,
+ .pin_dbg_show = p7ctl_show_pin
+};
+
+/********************
+ * Pin configuration
+ ********************/
+
+static union p7ctl_setting p7ctl_cfg2set(unsigned long config)
+{
+ return *((union p7ctl_setting*) (&config));
+}
+
+/* Check pin setting consistency. */
+static int p7ctl_check_set(union p7ctl_setting settings)
+{
+ /* Check pull up / down consistency. */
+ switch (settings.fields.pud) {
+ case P7CTL_PUD_HIGHZ:
+ case P7CTL_PUD_UP:
+ case P7CTL_PUD_DOWN:
+ case P7CTL_PUD_KEEP:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Check slewrate consistency. */
+ if (settings.fields.slr > P7CTL_SLR_3)
+ return -EINVAL;
+
+ /* Check drive strength consistency. */
+ switch (settings.fields.drv) {
+ case P7CTL_DRV_TRI:
+ case P7CTL_DRV_0:
+ case P7CTL_DRV_1:
+ case P7CTL_DRV_2:
+ case P7CTL_DRV_3:
+ case P7CTL_DRV_4:
+ case P7CTL_DRV_5:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int p7ctl_get_cfg(struct pinctrl_dev* pctl,
+ unsigned int pin_id,
+ unsigned long* config)
+{
+ P7CTL_ASSERT_PIN(pin_id);
+
+ *config = (unsigned long) __raw_readl(p7ctl_vaddr + pin_id);
+ return 0;
+}
+
+static int p7ctl_set_cfg(struct pinctrl_dev* pctl,
+ unsigned int pin_id,
+ unsigned long config)
+{
+ u32 volatile __iomem* const addr = p7ctl_vaddr + pin_id;
+ union p7ctl_setting cmd = { .word = __raw_readl(addr) };
+ union p7ctl_setting const set = p7ctl_cfg2set(config);
+ int const err = p7ctl_check_set(set);
+
+ P7CTL_ASSERT_PIN(pin_id);
+ if (err)
+ return err;
+
+ /* Setup Schmitt trigger. */
+ if (set.fields.set_smt)
+ cmd.fields.smt = set.fields.smt;
+
+ /* Setup pull up/down. */
+ if (set.fields.set_pud)
+ cmd.fields.pud = set.fields.pud;
+
+ /* Setup slew rate. */
+ if (set.fields.set_slr)
+ cmd.fields.slr = set.fields.slr;
+
+ /* Setup drive strength. */
+ if (set.fields.set_drv)
+ cmd.fields.drv = set.fields.drv;
+
+ __raw_writel(cmd.word, addr);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void p7ctl_show_pincfg(struct pinctrl_dev* pctl,
+ struct seq_file* file,
+ unsigned int pin_id)
+{
+ union p7ctl_setting const set = { .word = __raw_readl(p7ctl_vaddr +
+ pin_id) };
+
+ P7CTL_ASSERT_PIN(pin_id);
+
+#define ON_OFF(_b) ((_b) ? "on" : "off")
+
+ seq_printf(file, " schmitt(%s) pull_up(%s) pull_down(%s) bus_keeper(%s)",
+ ON_OFF(set.fields.smt),
+ ON_OFF(set.fields.pud & 1),
+ ON_OFF(set.fields.pud & 2),
+ ON_OFF(set.fields.pud & 4));
+
+ seq_printf(file, " slew_rate(%u)", set.fields.slr);
+ seq_printf(file, " drive_strength(%u)", set.fields.drv);
+}
+
+/*
+ * Callback for writes to the pincfg debugfs.
+ *
+ * buf is a null terminated string
+ *
+ * Format samples:
+ *
+ * echo '170:drive_strength(31)' > pinconf-pins
+ * echo '170:drive_strength:31' > pinconf-pins
+ * echo '170:pull_up:on' > pinconf-pins
+ */
+static int p7ctl_set_pincfg(struct pinctrl_dev* pctl, char *buf)
+{
+ union p7ctl_setting set;
+ char *last;
+ char *function;
+ unsigned pin;
+ unsigned val;
+
+ pin = simple_strtoul(buf, &last, 0);
+ if (*last++ != ':')
+ return -EINVAL;
+
+ function = last;
+
+ for (;;) {
+ if (*last == '\0')
+ /* Unexpected end of string */
+ return -EINVAL;
+
+ if (*last == '(' || *last == ':') {
+ *last = '\0';
+ break;
+ }
+
+ last++;
+ }
+
+ last++;
+
+ switch (*last) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ val = simple_strtoul(last, &last, 0);
+ break;
+ case 'o':
+ /* Discriminate between "on" and "off" */
+ if (last[1] == 'n')
+ val = 1;
+ else if (last[1] == 'f')
+ val = 0;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pin >= p7ctl_pdata->pins_nr)
+ return -EINVAL;
+
+ if (strcmp(function, "drive_strength") == 0) {
+ set.fields.set_drv = 1;
+
+ if (val >= P7CTL_DRV_5)
+ val = P7CTL_DRV_5;
+ else if (val >= P7CTL_DRV_4)
+ val = P7CTL_DRV_4;
+ else if (val >= P7CTL_DRV_3)
+ val = P7CTL_DRV_3;
+ else if (val >= P7CTL_DRV_2)
+ val = P7CTL_DRV_2;
+ else if (val >= P7CTL_DRV_1)
+ val = P7CTL_DRV_1;
+ else if (val >= P7CTL_DRV_0)
+ val = P7CTL_DRV_0;
+
+ set.fields.drv = val;
+ } else if (strcmp(function, "schmitt") == 0) {
+ set.fields.set_smt = 1;
+ set.fields.smt = val;
+ } else if (strcmp(function, "pull_up") == 0) {
+ set.fields.set_pud = 1;
+ set.fields.pud = val ? P7CTL_PUD_UP : 0;
+ } else if (strcmp(function, "pull_down") == 0) {
+ set.fields.set_pud = 1;
+ set.fields.pud = val ? P7CTL_PUD_DOWN : 0;
+ } else if (strcmp(function, "bus_keeper") == 0) {
+ set.fields.set_pud = 1;
+ set.fields.pud = val ? P7CTL_PUD_KEEP : 0;
+ } else if (strcmp(function, "slew_rate") == 0) {
+ set.fields.set_slr = 1;
+
+ if (val >= P7CTL_SLR_3)
+ val = P7CTL_SLR_3;
+ else if (val >= P7CTL_SLR_2)
+ val = P7CTL_SLR_2;
+ else if (val >= P7CTL_SLR_1)
+ val = P7CTL_SLR_1;
+ else if (val >= P7CTL_SLR_0)
+ val = P7CTL_SLR_0;
+
+ set.fields.slr = val;
+ } else
+ return -EINVAL;
+
+ return p7ctl_set_cfg(pctl, pin, set.word);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static struct pinconf_ops p7ctl_cfg_ops = {
+ .pin_config_get = p7ctl_get_cfg,
+ .pin_config_set = p7ctl_set_cfg,
+
+#ifdef CONFIG_DEBUG_FS
+ .pin_config_dbg_show = p7ctl_show_pincfg,
+ .pin_config_dbg_set = p7ctl_set_pincfg,
+ .pin_config_group_dbg_show = p7ctl_show_pincfg
+#endif /* CONFIG_DEBUG_FS */
+};
+
+/************************
+ * Multiplexing function
+ ************************/
+
+/* Does multiplexing function exists ? */
+static int p7ctl_list_func(struct pinctrl_dev* pctl,
+ unsigned int func_id)
+{
+ if (func_id >= p7ctl_pdata->funcs_nr)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Go and get function name provided by platform. */
+static char const* p7ctl_func_name(struct pinctrl_dev* pctl,
+ unsigned int func_id)
+{
+ if (func_id >= p7ctl_pdata->funcs_nr)
+ return NULL;
+
+ if (p7ctl_func_available(func_id))
+ return p7ctl_pdata->funcs[func_id].name;
+
+ return "NOT_AVAILABLE";
+}
+
+/* Go and get list of groups (i.e., physical pins) attached to function. */
+static int p7ctl_func_grp(struct pinctrl_dev* pctl,
+ unsigned func_id,
+ char const* const** groups,
+ unsigned int* group_nr)
+{
+ if (func_id >= p7ctl_pdata->funcs_nr)
+ return -EINVAL;
+
+ if (!p7ctl_func_available(func_id))
+ return -EINVAL;
+
+ *groups = &p7ctl_pdata->pins[p7ctl_pdata->funcs[func_id].pin_id].name;
+ *group_nr = 1;
+ return 0;
+}
+
+/* Enable physical pin and activate multiplexing function specified for it. */
+static void p7ctl_enable_pin(unsigned int pin_id,
+ unsigned int mux_id)
+{
+ u32 volatile __iomem* const addr = p7ctl_vaddr + pin_id;
+ union p7ctl_setting cmd = { .word = __raw_readl(addr) };
+
+ if (cmd.fields.ie) {
+ if (cmd.fields.func == mux_id)
+ return;
+
+ /* Disable pin at first. */
+ cmd.fields.ie = 0;
+ __raw_writel(cmd.word, addr);
+ }
+
+ /* Apply settings. */
+ cmd.fields.func = mux_id;
+ writel(cmd.word, addr);
+
+ /* Re-enable pin. */
+ cmd.fields.ie = 1;
+ writel(cmd.word, addr);
+}
+
+/* Enable physical pin and activate multiplexing function specified for it. */
+static int p7ctl_enable_func(struct pinctrl_dev* pctl,
+ unsigned int func_id,
+ unsigned int grp_id)
+{
+ struct p7ctl_function const* const func = &p7ctl_pdata->funcs[func_id];
+
+ if (!p7ctl_func_available(func_id))
+ return -EINVAL;
+
+#ifdef DEBUG
+ BUG_ON(grp_id != func->pin_id);
+#endif
+
+ p7ctl_enable_pin(grp_id, func->mux_id);
+
+ dev_dbg(&p7ctl_pdev->dev,
+ "enabled %s pin as %s\n",
+ p7ctl_pdata->pins[grp_id].name,
+ func->name);
+ return 0;
+}
+
+/* Disable physical pin. */
+static void p7ctl_disable_pin(unsigned int pin_id)
+{
+ u32 volatile __iomem* const addr = p7ctl_vaddr + pin_id;
+ union p7ctl_setting cmd;
+
+ cmd.word = __raw_readl(addr);
+ cmd.fields.ie = 0;
+ /* Setting ie to 0 just forces the input value reported by the pad to
+ * low. It still allows for the IP selected by the function to drive the
+ * PAD in output. There's no way to really "disable" the PAD. As a
+ * workaround, I switch back the PAD to function 1. It's either a GPIO
+ * (and unless it's buggy the GPIO controller should have unused PADs
+ * configured as inputs) or nothing. */
+ cmd.fields.func = 1;
+
+ __raw_writel(cmd.word, addr);
+
+ dev_dbg(&p7ctl_pdev->dev,
+ "disabled %s pin\n",
+ p7ctl_pdata->pins[pin_id].name);
+}
+
+/* Disable multiplexing function and turn off associated physical pin. */
+static void p7ctl_disable_func(struct pinctrl_dev* pctl,
+ unsigned int func_id,
+ unsigned int grp_id)
+{
+#ifdef DEBUG
+ struct p7ctl_function const* const func = &p7ctl_pdata->funcs[func_id];
+
+ if (!p7ctl_func_available(func_id))
+ return;
+
+ BUG_ON(grp_id != func->pin_id);
+#endif
+
+ p7ctl_disable_pin(grp_id);
+}
+
+static int p7ctl_request(struct pinctrl_dev* pctl, unsigned int pin)
+{
+ struct pin_desc *desc;
+ desc = pin_desc_get(pctl, pin);
+ if (desc->gpio_owner)
+ return -EBUSY;
+
+ return 0;
+}
+
+#define CHAR_BIT 8
+
+static int p7ctl_enable_gpio(struct pinctrl_dev* pctl,
+ struct pinctrl_gpio_range* range,
+ unsigned int gpio)
+{
+ struct pin_desc *desc;
+ unsigned int const word = gpio / (sizeof(p7ctl_pdata->gpios[0]) *
+ CHAR_BIT);
+
+#ifdef DEBUG
+ P7CTL_ASSERT_PIN(gpio);
+ BUG_ON(word >= p7ctl_pdata->gpios_nr);
+#endif
+
+ desc = pin_desc_get(pctl, gpio);
+ if (desc->mux_owner)
+ return -EBUSY;
+
+ if (! (p7ctl_pdata->gpios[word] &
+ (1UL << (gpio % (sizeof(p7ctl_pdata->gpios[0]) * CHAR_BIT)))))
+ return -EINVAL;
+
+ p7ctl_enable_pin(gpio, 1);
+
+ dev_dbg(&p7ctl_pdev->dev,
+ "enabled %s pin as gpio %d\n",
+ p7ctl_pdata->pins[gpio].name,
+ gpio);
+ return 0;
+}
+
+static void p7ctl_disable_gpio(struct pinctrl_dev* pctl,
+ struct pinctrl_gpio_range* range,
+ unsigned int gpio)
+{
+ unsigned int const word = gpio / (sizeof(p7ctl_pdata->gpios[0]) *
+ CHAR_BIT);
+
+ P7CTL_ASSERT_PIN(gpio);
+ BUG_ON(word >= p7ctl_pdata->gpios_nr);
+
+ if (! (p7ctl_pdata->gpios[word] &
+ (1UL << (gpio % (sizeof(p7ctl_pdata->gpios[0]) * CHAR_BIT))))) {
+ dev_warn(&p7ctl_pdev->dev,
+ "trying to disable non gpio %d pin %s\n",
+ gpio,
+ p7ctl_pdata->pins[gpio].name);
+ return;
+ }
+
+ p7ctl_disable_pin(gpio);
+}
+
+static struct pinmux_ops p7ctl_mux_ops = {
+ .request = p7ctl_request,
+ .list_functions = p7ctl_list_func,
+ .get_function_name = p7ctl_func_name,
+ .get_function_groups = p7ctl_func_grp,
+ .enable = p7ctl_enable_func,
+ .disable = p7ctl_disable_func,
+ .gpio_request_enable = p7ctl_enable_gpio,
+ .gpio_disable_free = p7ctl_disable_gpio
+};
+
+/*****************
+ * Pin controller
+ *****************/
+
+static struct pinctrl_gpio_range p7ctl_gpio_range = {
+ .id = 0,
+ .base = 0,
+ .pin_base = 0
+};
+
+static struct pinctrl_desc p7ctl_desc = {
+ .name = P7CTL_DRV_NAME,
+ .pctlops = &p7ctl_grp_ops,
+ .pmxops = &p7ctl_mux_ops,
+ .confops = &p7ctl_cfg_ops,
+ .owner = THIS_MODULE
+};
+
+static int __devinit p7ctl_probe(struct platform_device* pdev)
+{
+ int err;
+ char const* msg;
+ struct p7ctl_plat_data const* const pdata = dev_get_platdata(&pdev->dev);
+ struct resource const* const res = platform_get_resource(pdev,
+ IORESOURCE_MEM,
+ 0);
+#ifdef DEBUG
+ BUG_ON(! pdata);
+ BUG_ON(! pdata->funcs);
+ BUG_ON(! pdata->funcs_nr);
+ BUG_ON(! pdata->pins);
+ BUG_ON(! pdata->pins_nr);
+ BUG_ON(! pdata->gpios);
+ BUG_ON(! pdata->gpios_nr);
+ BUG_ON(pdata->gpios_nr !=
+ ((pdata->pins_nr + (sizeof(p7ctl_pdata->gpios[0]) * CHAR_BIT) - 1) /
+ (sizeof(p7ctl_pdata->gpios[0]) * CHAR_BIT)));
+ BUG_ON(! pdata->gpio_drv);
+ BUG_ON(! res);
+ dev_dbg(&pdev->dev, "probing...\n");
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+ p7ctl_saved_regs = kmalloc(pdata->pins_nr * sizeof(u32), GFP_KERNEL);
+ if (! p7ctl_saved_regs) {
+ dev_err(&pdev->dev, "Failed to allocate memory to save registers\n");
+ return -ENOMEM;
+ }
+#endif
+
+ p7ctl_paddr = res->start;
+ p7ctl_sz = resource_size(res);
+ if (! request_mem_region(p7ctl_paddr,
+ p7ctl_sz,
+ dev_name(&pdev->dev))) {
+ msg = "failed to request memory";
+ err = -EBUSY;
+ goto err;
+ }
+
+ p7ctl_vaddr = ioremap(p7ctl_paddr, p7ctl_sz);
+ if (! p7ctl_vaddr) {
+ msg = "failed to remap memory";
+ err = -ENOMEM;
+ goto release;
+ }
+
+ p7ctl_pdev = pdev;
+ p7ctl_pdata = pdata;
+ p7ctl_desc.pins = pdata->pins,
+ p7ctl_desc.npins = pdata->pins_nr;
+ p7ctl_dev = pinctrl_register(&p7ctl_desc, &pdev->dev, NULL);
+ if (p7ctl_dev) {
+ p7ctl_gpio_range.npins = pdata->pins_nr;
+ p7ctl_gpio_range.name = pdata->gpio_drv;
+ pinctrl_add_gpio_range(p7ctl_dev, &p7ctl_gpio_range);
+ dev_info(&pdev->dev, "loaded\n");
+ return 0;
+ }
+
+ iounmap(p7ctl_vaddr);
+
+ err = -EINVAL;
+ msg = "failed to register pins controller";
+
+release:
+ release_mem_region(p7ctl_paddr, p7ctl_sz);
+err:
+ dev_err(&pdev->dev, "%s (%d)\n", msg, err);
+ return err;
+}
+
+static int __devexit p7ctl_remove(struct platform_device* pdev)
+{
+ pinctrl_unregister(p7ctl_dev);
+ iounmap(p7ctl_vaddr);
+ release_mem_region(p7ctl_paddr, p7ctl_sz);
+#ifdef CONFIG_PM_SLEEP
+ kfree(p7ctl_saved_regs);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7ctl_suspend_noirq(struct device *dev)
+{
+ memcpy_fromio(p7ctl_saved_regs, p7ctl_vaddr,
+ p7ctl_pdata->pins_nr * sizeof(u32));
+ return 0;
+}
+
+static int p7ctl_resume_noirq(struct device *dev)
+{
+ memcpy_toio(p7ctl_vaddr, p7ctl_saved_regs,
+ p7ctl_pdata->pins_nr * sizeof(u32));
+ return 0;
+}
+
+#else
+#define p7ctl_suspend_noirq NULL
+#define p7ctl_resume_noirq NULL
+#endif
+
+static struct dev_pm_ops p7ctl_dev_pm_ops = {
+ .suspend_noirq = p7ctl_suspend_noirq,
+ .resume_noirq = p7ctl_resume_noirq,
+};
+
+static struct platform_driver p7ctl_driver = {
+ .driver = {
+ .name = P7CTL_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7ctl_dev_pm_ops,
+ },
+ .probe = &p7ctl_probe,
+ .remove = __devexit_p(&p7ctl_remove),
+};
+
+static int __init p7ctl_init(void)
+{
+ return platform_driver_register(&p7ctl_driver);
+}
+postcore_initcall(p7ctl_init);
+
+static void __exit p7ctl_exit(void)
+{
+ platform_driver_unregister(&p7ctl_driver);
+}
+module_exit(p7ctl_exit);
+
+MODULE_DESCRIPTION("Parrot7 I/O pins control driver");
+MODULE_AUTHOR("Grİgor Boirie <gregor.boirie@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/pinctrl/p7-pinctrl.h b/drivers/parrot/pinctrl/p7-pinctrl.h
new file mode 100644
index 00000000000000..5123c502dfcd18
--- /dev/null
+++ b/drivers/parrot/pinctrl/p7-pinctrl.h
@@ -0,0 +1,204 @@
+/**
+ * linux/drivers/parrot/pinctrl/p7-pinctrl.h - Parrot7 pin controller driver
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 04-Jun-2012
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef _P7_PINCTRL_H
+#define _P7_PINCTRL_H
+
+#ifdef CONFIG_PINCTRL_PARROT7
+
+#include <linux/stringify.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#define P7CTL_DRV_NAME "p7-pinctrl"
+
+/**
+ * union p7ctl_setting - Parrot7 pin controller pin setting
+ *
+ * @func: multiplexing FUNCtion
+ * @set_smt: this setting contains a SchMitt Trigger configuration
+ * @set_pud: this setting contains a Pull Up / Down configuration
+ * @set_slr: this setting contains a SLew Rate configuration
+ * @set_drv: this setting contains a DRiVe strength configuration
+ * @smt: SchMitt Trigger configuration
+ * @pud: Pull Up / Down configuration
+ * @ie: Input Enable. If 1 the input takes the pad value, otherwise 0.
+ * @slr: SLew Rate configuration
+ * @drv: DRiVe strength configuration
+ *
+ * Holds physical pin configuration. The union is designed in a such way that
+ * configuration values are directly mappable to hardware registers of pin
+ * controller.
+ *
+ * The set_* members are mapped to unused bits in the register. We use them to
+ * tell the pinctrl interface what we want to modify.
+ */
+union p7ctl_setting {
+ u32 word;
+ struct {
+ unsigned func:2;
+ unsigned :2;
+ unsigned set_smt:1;
+ unsigned set_pud:1;
+ unsigned set_slr:1;
+ unsigned set_drv:1;
+ unsigned smt:1;
+ unsigned pud:3;
+ unsigned ie:1;
+ unsigned :3;
+ unsigned slr:2;
+ unsigned drv:6;
+ } fields;
+};
+
+/*
+ * Pad configuration bit field shifts
+ */
+#define P7CTL_FUNC_SFT 0 /* IO Mux control */
+#define P7CTL_SMT_SFT 8 /* schmitt trigger enable */
+#define P7CTL_PUD_SFT 9 /* Pull Up enable */
+#define P7CTL_SLR_SFT 16 /* Slew Rate configuration */
+#define P7CTL_DRV_SFT 18 /* Drive Strength */
+
+#define P7CTL_SET_SMT_SFT 4 /* Enable function setting */
+#define P7CTL_SET_PUD_SFT 5 /* Enable pull up / down setting */
+#define P7CTL_SET_SLR_SFT 6 /* Enable slew rate setting */
+#define P7CTL_SET_DRV_SFT 7 /* Enable drive strength setting */
+
+/***************************************************
+ * Helper macros for generating a pad configuration
+ ***************************************************/
+
+/* Enable/disable Schmitt trigger */
+#define P7CTL_SMT_ON (1U)
+#define P7CTL_SMT_OFF (0U)
+
+/**
+ * P7CTL_SMT_CFG() - Define a Parrot7 pin Schmitt trigger setting
+ *
+ * @_on: %OFF, %ON
+ */
+#define P7CTL_SMT_CFG(_on) \
+ ((P7CTL_SMT_ ## _on << P7CTL_SMT_SFT) | \
+ (1U << P7CTL_SET_SMT_SFT))
+
+/*
+ * Pull up / pull down /
+ * bus keeper / high impedence settings
+ */
+#define P7CTL_PUD_HIGHZ (0U) /* High impedence */
+#define P7CTL_PUD_UP (1U) /* Pull up */
+#define P7CTL_PUD_DOWN (2U) /* Pull down */
+#define P7CTL_PUD_KEEP (4U) /* Bus keeper */
+
+/**
+ * P7CTL_PUD_CFG() - Define a Parrot7 pin Pull UP / Down setting
+ *
+ * @_pud: %HIGHZ, %UP, %DOWN, %KEEP
+ */
+#define P7CTL_PUD_CFG(_pud) \
+ ((P7CTL_PUD_ ## _pud << P7CTL_PUD_SFT) | \
+ (1U << P7CTL_SET_PUD_SFT))
+
+/* Slew rate settings */
+#define P7CTL_SLR_0 (0U) /* Slowest */
+#define P7CTL_SLR_1 (1U)
+#define P7CTL_SLR_2 (2U)
+#define P7CTL_SLR_3 (3U) /* Fastest */
+
+/**
+ * P7CTL_SLR_CFG() - Define a Parrot7 pin slew rate setting
+ *
+ * @_rate: %0, %1, %2, %3
+ */
+#define P7CTL_SLR_CFG(_slr) \
+ ((P7CTL_SLR_ ## _slr << P7CTL_SLR_SFT) | \
+ (1U << P7CTL_SET_SLR_SFT))
+
+#define P7CTL_DRV_TRI (0U) /* Tri-state */
+#define P7CTL_DRV_0 (1U) /* Weakest */
+#define P7CTL_DRV_1 (3U)
+#define P7CTL_DRV_2 (7U)
+#define P7CTL_DRV_3 (0xfU)
+#define P7CTL_DRV_4 (0x1fU)
+#define P7CTL_DRV_5 (0x3fU) /* Strongest */
+
+/**
+ * P7CTL_DRV_CFG() - Define a Parrot7 pin slew rate setting
+ *
+ * @_strength: %TRI, %0, %1, %2, %3, %4, %5
+ */
+#define P7CTL_DRV_CFG(_drv) \
+ ((P7CTL_DRV_ ## _drv << P7CTL_DRV_SFT) | \
+ (1 << P7CTL_SET_DRV_SFT))
+
+/**
+ * P7CTL_INIT_PIN() - Define a Parrot7 physical pin
+ *
+ * @_pid: pin identifier (same as GPIO number)
+ * @_name: pin name
+ */
+#define P7CTL_INIT_PIN(_pid, _name) PINCTRL_PIN(_pid, _name)
+
+/**
+ * struct p7ctl_function - Parrot7 pin multiplexing function
+ *
+ * @name: function name
+ * @pin_id: physical pin identifier (same as GPIO number)
+ * @mux_id: hardware multiplexing function assigned to pin
+ */
+struct p7ctl_function {
+ char const* name;
+ unsigned short pin_id;
+ unsigned short mux_id;
+};
+
+/**
+ * P7CTL_INIT_FUNC() - Define a Parrot7 multiplexed pin function
+ *
+ * @_name: function name
+ * @pin_id: physical pin identifier (same as GPIO number)
+ * @mux_id: hardware multiplexing function to assign to physical pin
+ */
+#define P7CTL_INIT_FUNC(_func_id, _pin_id, _mux_id) \
+ { \
+ .name = __stringify(_func_id), \
+ .pin_id = _pin_id, \
+ .mux_id = _mux_id \
+ }
+
+/**
+ * struct p7ctl_pdata - Parrot7 pins controller platform specific data
+ *
+ * @funcs: table of multiplexing functions
+ * @funcs_nr: number of multiplexing functions
+ * @pins: table of hardware pins
+ * @pins_nr: number of hardware pins
+ * @gpios: table of hardware pins usable as GPIOs
+ * @gpios_nr: number of entries of @gpios array
+ * @gpio_drv: GPIO driver name
+ *
+ * Holds platform specific pinctrl descriptors to allow Parrot7 chip
+ * variants to customize their own I/O pins space.
+ */
+struct p7ctl_plat_data {
+ struct p7ctl_function const* funcs;
+ size_t funcs_nr;
+ struct pinctrl_pin_desc const* pins;
+ size_t pins_nr;
+ unsigned long const* gpios;
+ size_t gpios_nr;
+ char const* gpio_drv;
+};
+
+#endif /* CONFIG_PINCTRL_PARROT7 */
+
+#endif /* _P7_PINCTRL_H */
diff --git a/drivers/parrot/power/Kconfig b/drivers/parrot/power/Kconfig
new file mode 100644
index 00000000000000..8e36f8e90b832f
--- /dev/null
+++ b/drivers/parrot/power/Kconfig
@@ -0,0 +1,8 @@
+
+config FAKE_BATTERY
+ bool "Fake Android Battery Driver"
+ depends on POWER_SUPPLY
+ help
+ Fake battery driver for Android battery service
+
+
diff --git a/drivers/parrot/power/Makefile b/drivers/parrot/power/Makefile
new file mode 100644
index 00000000000000..ec77f58d55c890
--- /dev/null
+++ b/drivers/parrot/power/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FAKE_BATTERY) += fake_battery.o
diff --git a/drivers/parrot/power/fake_battery.c b/drivers/parrot/power/fake_battery.c
new file mode 100644
index 00000000000000..d21fb06d4ea07d
--- /dev/null
+++ b/drivers/parrot/power/fake_battery.c
@@ -0,0 +1,284 @@
+/*
+ * Fake Power supply driver for android.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+static struct platform_device *fake_battery_pdev;
+
+static int ac_online = 1;
+static int battery_status = POWER_SUPPLY_STATUS_CHARGING;
+static int battery_capacity = 100;
+
+static int fake_battery_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = battery_status;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = 20;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fake_battery_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ac_online;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static enum power_supply_property fake_battery_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static enum power_supply_property fake_battery_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static struct power_supply fake_battery_bat = {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = fake_battery_battery_props,
+ .num_properties = ARRAY_SIZE(fake_battery_battery_props),
+ .get_property = fake_battery_bat_get_property,
+ .use_for_apm = 1,
+};
+
+static struct power_supply fake_battery_ac = {
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = fake_battery_ac_props,
+ .num_properties = ARRAY_SIZE(fake_battery_ac_props),
+ .get_property = fake_battery_ac_get_property,
+};
+
+static struct power_supply fake_battery_usb = {
+ .name = "usb",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = fake_battery_ac_props,
+ .num_properties = ARRAY_SIZE(fake_battery_ac_props),
+ .get_property = fake_battery_ac_get_property,
+};
+
+#define MAX_KEYLENGTH 25
+struct battery_property_map {
+ int value;
+ char const *key;
+};
+
+static struct battery_property_map map_online[] = {
+ { 0, "off" },
+ { 1, "on" },
+ { -1, NULL },
+};
+
+static struct battery_property_map map_status[] = {
+ { POWER_SUPPLY_STATUS_CHARGING, "charging" },
+ { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" },
+ { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
+ { POWER_SUPPLY_STATUS_FULL, "full" },
+ { -1, NULL },
+};
+
+static int map_get_value(struct battery_property_map *map, const char *key,
+ int def_val)
+{
+ char buf[MAX_KEYLENGTH];
+ int cr;
+
+ strncpy(buf, key, MAX_KEYLENGTH);
+ buf[MAX_KEYLENGTH - 1] = '\0';
+
+ cr = strnlen(buf, MAX_KEYLENGTH) - 1;
+ if (buf[cr] == '\n')
+ buf[cr] = '\0';
+
+ while (map->key) {
+ if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
+ return map->value;
+ map++;
+ }
+
+ return def_val;
+}
+
+static const char *map_get_key(struct battery_property_map *map, int value,
+ const char *def_key)
+{
+ while (map->key) {
+ if (map->value == value)
+ return map->key;
+ map++;
+ }
+
+ return def_key;
+}
+
+
+static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+{
+ ac_online = map_get_value(map_online, key, ac_online);
+ power_supply_changed(&fake_battery_ac);
+ return 0;
+}
+
+static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_online, ac_online, "unknown"));
+ return strlen(buffer);
+}
+
+static struct kernel_param_ops param_ops_ac_online = {
+ .set = param_set_ac_online,
+ .get = param_get_ac_online,
+};
+
+static int param_set_battery_status(const char *key,
+ const struct kernel_param *kp)
+{
+ battery_status = map_get_value(map_status, key, battery_status);
+ power_supply_changed(&fake_battery_bat);
+ return 0;
+}
+
+static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
+ return strlen(buffer);
+}
+
+static struct kernel_param_ops param_ops_battery_status = {
+ .set = param_set_battery_status,
+ .get = param_get_battery_status,
+};
+
+static int param_set_battery_capacity(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_capacity = tmp;
+ power_supply_changed(&fake_battery_bat);
+ return 0;
+}
+
+#define param_get_battery_capacity param_get_int
+
+static struct kernel_param_ops param_ops_battery_capacity = {
+ .set = param_set_battery_capacity,
+ .get = param_get_battery_capacity,
+};
+
+#define param_check_ac_online(name, p) __param_check(name, p, void);
+#define param_check_battery_status(name, p) __param_check(name, p, void);
+#define param_check_battery_capacity(name, p) __param_check(name, p, void);
+
+module_param(ac_online, ac_online, 0644);
+MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
+
+module_param(battery_status, battery_status, 0644);
+MODULE_PARM_DESC(battery_status,
+ "battery status <charging|discharging|not-charging|full>");
+
+module_param(battery_capacity, battery_capacity, 0644);
+MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
+
+static int __init fake_battery_init(void)
+{
+ int ret = 0;
+
+ fake_battery_pdev = platform_device_register_simple("battery", 0, NULL, 0);
+ if (IS_ERR(fake_battery_pdev))
+ return PTR_ERR(fake_battery_pdev);
+
+ ret = power_supply_register(&fake_battery_pdev->dev, &fake_battery_bat);
+ if (ret)
+ goto bat_failed;
+
+ ret = power_supply_register(&fake_battery_pdev->dev, &fake_battery_ac);
+ if (ret)
+ goto ac_failed;
+
+ ret = power_supply_register(&fake_battery_pdev->dev, &fake_battery_usb);
+ if (ret)
+ goto usb_failed;
+
+ goto success;
+
+bat_failed:
+ power_supply_unregister(&fake_battery_bat);
+ac_failed:
+ power_supply_unregister(&fake_battery_ac);
+usb_failed:
+ power_supply_unregister(&fake_battery_usb);
+ platform_device_unregister(fake_battery_pdev);
+success:
+ return ret;
+}
+
+static void __exit fake_battery_exit(void)
+{
+ power_supply_unregister(&fake_battery_bat);
+ power_supply_unregister(&fake_battery_ac);
+ power_supply_unregister(&fake_battery_usb);
+ platform_device_unregister(fake_battery_pdev);
+}
+
+module_init(fake_battery_init);
+module_exit(fake_battery_exit);
+MODULE_AUTHOR("jean-baptiste.dubois@parrot.com");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fake Battery driver for Android");
+
diff --git a/drivers/parrot/pwm/Kconfig b/drivers/parrot/pwm/Kconfig
new file mode 100644
index 00000000000000..4e7297a6959145
--- /dev/null
+++ b/drivers/parrot/pwm/Kconfig
@@ -0,0 +1,29 @@
+config PWM_P7MU
+ tristate "Parrot P7MU PWM"
+ depends on PWM && MFD_P7MU
+ select HAVE_PWM
+ default m
+ help
+ Builds the Parrot7 power Management Unit PWM blocks driver.
+
+config PWM_P7MU_DEBUG
+ bool "Debug P7MU PWM"
+ depends on PWM_P7MU
+ default no
+ help
+ Build Parrot7 power Management Unit PWM blocks driver with debug enabled
+
+config PWM_PARROT7
+ tristate "Parrot7 PWM driver (Pulse Width Modulation)"
+ depends on PWM
+ select HAVE_PWM
+ default m
+ help
+ Enables support for Parrot7 PWM driver
+
+config PWM_PARROT7_DEBUG
+ bool "Debug PARROT7 PWM"
+ depends on PWM_PARROT7
+ default no
+ help
+ Build Parrot7 PWM block driver with debug enabled
diff --git a/drivers/parrot/pwm/Makefile b/drivers/parrot/pwm/Makefile
new file mode 100644
index 00000000000000..79369b0842b885
--- /dev/null
+++ b/drivers/parrot/pwm/Makefile
@@ -0,0 +1,9 @@
+ifeq ($(CONFIG_ARCH_PARROT7),y)
+ obj-$(CONFIG_PWM_PARROT7) += p7_pwm.o
+endif
+
+ifeq ($(CONFIG_PWM_P7MU_DEBUG),y)
+ CFLAGS_p7mu-pwm.o += -DDEBUG
+endif
+CFLAGS_p7mu-pwm.o += -I$(srctree)/drivers/parrot
+obj-$(CONFIG_PWM_P7MU) += p7mu-pwm.o
diff --git a/drivers/parrot/pwm/p7_pwm.c b/drivers/parrot/pwm/p7_pwm.c
new file mode 100644
index 00000000000000..448ce2c7a57129
--- /dev/null
+++ b/drivers/parrot/pwm/p7_pwm.c
@@ -0,0 +1,690 @@
+/**
+ * linux/drivers/parrot/pwm/p7-pwm.c - Parrot7 PWM driver implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Victor Lambret <victor.lambret.ext@parrot.com>
+ * date: 28-Nov-2012
+ *
+ * This file is released under the GPL
+ */
+
+#if defined(CONFIG_PWM_PARROT7_DEBUG)
+#define DEBUG
+#define P7PWM_ASSERT(_chip, _pwm) \
+ BUG_ON(! _chip); \
+ BUG_ON(! _pwm);
+
+#else /*Empty debug macro if debug is OFF*/
+#define P7PWM_ASSERT(_chip, _pwm)
+
+#endif /*CONFIG_PWM_PARROT7_DEBUG*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <mach/hardware.h>
+#include <linux/pinctrl/consumer.h>
+#include "p7_pwm.h"
+
+
+/******************************************************************************
+ * DRIVER DATA
+******************************************************************************/
+
+
+/* Comment on the usage of the mutex :
+ * The linux PWM framework already deal with concurrency problems so the
+ * drivers dont need to use mutex. At the moment, this mutex is useless, but
+ * we may have to write another pwm driver to use all P7 PWM capabilities not
+ * managed by linux PWM framework like servomotors. The usage of the mutex is a
+ * guaranty that the access to PWM registers will be shared without conflicts
+ * between those drivers.
+ *
+ * This is not true for enable/disable/configure where no lock is taken by
+ * the framework.
+ */
+
+static DEFINE_SPINLOCK(p7pwm_spinlock);
+
+struct p7pwm_config {
+ int min_ratio;
+ int speed_regval;
+ int ratio_regval;
+ int duty_ns;
+ int period_ns;
+ struct pinctrl *p;
+};
+
+static struct p7pwm_pdata * pdata;
+static unsigned long clk_freq;
+static struct clk * clk;
+static void __iomem * mmio_base;
+static struct p7pwm_config config[P7PWM_NUMBER];
+static struct pinctrl * pins;
+
+#define P7PWM_SPEED(i) (0x0+0x10*(i))
+#define P7PWM_RATIO(i) (0x4+0x10*(i))
+#define P7PWM_START (0xFF00)
+#define P7PWM_MODE (0xFF04)
+
+/******************************************************************************
+ * DRIVER HELPERS
+******************************************************************************/
+
+static int is_mode(int pwm, int mode)
+{
+ if (NULL == pdata->conf[pwm])
+ return 0;
+ else
+ return (mode ==pdata->conf[pwm]->mode);
+}
+
+#define is_used(_pwm) (pdata->used & (1 << (_pwm)))
+
+#define DIV64_ROUND_CLOSEST(number, divisor)( \
+{ \
+ typeof(number) __number = (number) + ((divisor) / 2); \
+ typeof(divisor) __div = (divisor); \
+ do_div((__number),(__div)); \
+ __number; \
+} \
+)
+
+static void write_config(uint32_t pwm, uint32_t speed, uint32_t ratio, struct p7pwm_config *config)
+{
+ if (!config || config->speed_regval != speed)
+ __raw_writel(speed, mmio_base + P7PWM_SPEED(pwm));
+ if (!config || config->ratio_regval != ratio)
+ __raw_writel(ratio, mmio_base + P7PWM_RATIO(pwm));
+}
+
+static unsigned long freq(uint32_t speed_reg, uint32_t ratio, unsigned long mode)
+{
+ unsigned long res;
+ res = DIV64_ROUND_CLOSEST(clk_freq, 2*(speed_reg +1));
+ if (P7PWM_MODE_NORMAL == mode)
+ res = res >> ratio;
+ return res;
+}
+
+static unsigned long duty(uint32_t ratio_reg, unsigned long mode)
+{
+ unsigned long rt;
+ if (1 == mode)
+ rt = 50;
+ else {
+ uint32_t ratio, hratio;
+ ratio = (ratio_reg >> 16 );
+ hratio = ratio_reg & 0xFFFF;
+ rt = DIV64_ROUND_CLOSEST(100*hratio, (1 << ratio));
+ }
+ return rt;
+}
+
+static int to_min_ratio(uint16_t precision)
+{
+ int r = 0;
+ if (precision < 1)
+ r = 16;
+ else if (precision < 2)
+ r = 7;
+ else if (precision < 4)
+ r = 6;
+ else if (precision < 7)
+ r = 5;
+ else if (precision < 13)
+ r = 4;
+ else if (precision < 25)
+ r = 3;
+ else if (precision < 50)
+ r = 2;
+ else
+ r = 1;
+
+ return r;
+}
+
+/******************************************************************************
+ * PWM FRAMEWORK INTERFACE
+******************************************************************************/
+
+static int p7pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ int rc = -ENOENT;
+
+ P7PWM_ASSERT(chip, pwm);
+
+ dev_dbg(chip->dev, "%s:requesting pwm %d\n", __func__,pwm->hwpwm);
+
+ if (is_used(pwm->hwpwm)) {
+ config[pwm->hwpwm].speed_regval = -1;
+ config[pwm->hwpwm].ratio_regval = -1;
+ config[pwm->hwpwm].min_ratio = to_min_ratio(pdata->conf[pwm->hwpwm]->duty_precision);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void p7pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ P7PWM_ASSERT(chip, pwm);
+
+ dev_dbg(chip->dev,"p7pwm_free: P7 pwm %d\n",pwm->hwpwm);
+}
+
+
+
+/* Calculation :
+ *
+ * Refer to the Pulse with Modulation section of the P7 User Manual for more
+ * the original specification.
+ *
+ * The goal of the calculus is to compute the best speed and ratio register
+ * values to reach demanded period and duty cycle.
+ *
+ * Basicly we have to compute three values noted :
+ *
+ * speed : The speed value
+ * ratio : The bits [20:16] of the ratio register.
+ * hratio : The bits [15:0] of the ratio register.
+ *
+ * The period formula is :
+ * (period_ns / 10^9) = (2*(Speed+1)*2^ratio) / clk_freq
+ *
+ * With a little transformation it is equivalent to :
+ * (Speed+1)*2^ratio = (period_ns * clk_freq) / (2*10^9)
+ *
+ * We call ideal_div = (period_ns * clk_freq) / (2*10^9) the ideal divider
+ *
+ * many couples (Speed,ratio) are solution of this equation.
+ *
+ *
+ * Frequency computation Steps
+ * ===========================
+ *
+ * 1) Compute the ideal divider
+ * 2) The initial ratio is the binary logarithm of the 2 most significant
+ * bytes.
+ * 3) With the ratio fixed, initial speed can be computed with :
+ * Speed = (ideal_div / (2^ratio)) -1
+ *
+ * This couple is the couple maximizing frequency. As Frequency and duty cycle
+ * accuracy are linked, it may not be sufficient to get the demanded accuracy
+ * on duty cycle. If a minimal ratio is specified with platform-data we try to
+ * increase ratio and keep the frequency close to the initial one.
+ *
+ * If initial ratio r1 is inferior to minimal ratio r2 then we have :
+ * r2 = r1 + n
+ * If we set ratio to r2, we have a new speed Speed2 computed with :
+ *
+ * Speed2 = ((Speed1 + 1) / 2^n) -1
+ *
+ * 4) If needed set Speed2 as new speed and minimal ratio as ratio.
+ *
+ *
+ * Duty cycle computations steps
+ * =============================
+ *
+ * The duty cycle is given by the formula :
+ *
+ * (duty_ns / period_ns) = hratio / 2^ratio
+ *
+ * so it means that hratio = (duty_ns * 2^ratio) / period
+ *
+ * 5) Compute hratio
+ * 6) We keep extreme values only if they are really the extremes :
+ * - If hratio is 0 and duty_ns > 0 then set hratio to 1.
+ * - If hratio is max and duty-ns < period_ns then substract one to
+ * hratio.
+ */
+
+static int p7pwm_configure(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+
+ uint64_t divider;
+ uint32_t speed, ratio, hratio = 0;
+ int rc = 0;
+ unsigned long f, res ;
+
+ P7PWM_ASSERT(chip, pwm);
+ dev_dbg(chip->dev,"%s: PWM_%d, modeclock=%d, period=%d, ratio=%d\n",
+ __func__,
+ pwm->hwpwm,
+ pdata->conf[pwm->hwpwm]->mode,
+ period_ns,
+ duty_ns);
+
+ /* Check invalid parameters values */
+ if ((period_ns <= 0) || ((duty_ns < 0) && is_mode(pwm->hwpwm,P7PWM_MODE_NORMAL))) {
+ dev_warn(chip->dev,"%s:Invalid argument\n",__func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (duty_ns > 0 && is_mode(pwm->hwpwm,P7PWM_MODE_CLOCK))
+ dev_warn(chip->dev,"%s:non zero duty with PWM %d configured in clock mode !\n",
+ __func__,
+ pwm->hwpwm);
+
+ /* Step 1 : Compute ideal divider */
+ divider = ((uint64_t)period_ns) * clk_freq;
+ dev_dbg(chip->dev,"STEP1 : divider1=%llu\n",divider);
+ divider = DIV64_ROUND_CLOSEST(divider, 2*1000000000);
+ dev_dbg(chip->dev,"STEP1 : divider2=%llu\n",divider);
+ /* A divider of zero cause problems, force divider to be 1 at least */
+ if (0 == divider)
+ divider = 1;
+
+ /* Step 2 : Compute initial ratio */
+ if (is_mode(pwm->hwpwm,P7PWM_MODE_NORMAL)) {
+ ratio = ilog2((divider >> 16 ) & 0xFFFF) + 1;
+ } else {
+ ratio = 0;
+ }
+ dev_dbg(chip->dev,"STEP2 : ratio=0x%x\n",ratio);
+
+ /* Step 3 : Compute initial speed */
+ speed = (divider >> ratio) - 1;
+ dev_dbg(chip->dev,"STEP3 : speed=0x%x\n",speed);
+
+ if (is_mode(pwm->hwpwm,P7PWM_MODE_NORMAL)) {
+ int min_ratio;
+ min_ratio = config[pwm->hwpwm].min_ratio;
+ /* Step 4 : If needed, increase ratio and recompute speed with new ratio */
+ if (ratio < min_ratio) {
+ unsigned int delta_div= 1 << (min_ratio - ratio);
+ dev_dbg(chip->dev,"%s: Adjusting for a minimal ratio of %d\n",
+ __func__,
+ min_ratio);
+ speed = DIV64_ROUND_CLOSEST((uint64_t)speed+1, delta_div) - 1;
+ ratio = min_ratio;
+ }
+
+ /* Step 5 : Compute hratio */
+ hratio = 0xFFFF & DIV64_ROUND_CLOSEST((uint64_t)(1<<ratio)*duty_ns, period_ns);
+
+ /* Step 6 : Deal with hratio extreme values */
+ if ((0 == hratio) && (duty_ns > 0))
+ {
+ dev_dbg(chip->dev,"%s: set hratio to 1 because duty cycle is positive\n",
+ __func__);
+ hratio = 1;
+ }
+ else if (((1 << ratio) <= hratio) && (duty_ns < period_ns))
+ {
+ dev_dbg(chip->dev,"%s: reduce hratio by 1 because duty cycle is not 100\n",
+ __func__);
+ hratio--;
+ }
+ }
+ speed = min(speed, 0xFFFFU);
+
+ /* Test if frequency precision is high enough */
+ dev_dbg(chip->dev,"PRECISION0: speed=%d,ratio=%d,mode=%d\n",speed,ratio,pdata->conf[pwm->hwpwm]->mode);
+ f = freq(speed,ratio,pdata->conf[pwm->hwpwm]->mode);
+ dev_dbg(chip->dev,"PRECISION1: f=%lu\n",f);
+ if (f == 0)
+ f = 1;
+ res = DIV64_ROUND_CLOSEST(1000000000,f);
+ dev_dbg(chip->dev,"PRECISION1: res1=%lu\n",res);
+ if (res > period_ns) {
+ res = DIV64_ROUND_CLOSEST(100*(res - period_ns), period_ns);
+ dev_dbg(chip->dev,"PRECISION2: res2=%lu\n",res);
+ }
+ else {
+ dev_dbg(chip->dev,"PRECISION2: res3=%lu\n",res);
+ res = DIV64_ROUND_CLOSEST(100*(period_ns - res), period_ns);
+ }
+ dev_dbg(chip->dev,"%s: period precision computed :%lu/100 (needed : %d/100)\n",
+ __func__,
+ res,
+ pdata->conf[pwm->hwpwm]->period_precision);
+ if (res > pdata->conf[pwm->hwpwm]->period_precision) {
+ dev_err(chip->dev,"%s: ERROR : insuffisant precision of %lu/100 (needed : %d/100)\n",
+ __func__,
+ res,
+ pdata->conf[pwm->hwpwm]->period_precision);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* Write the values in registers */
+ speed = speed & 0xFFFF;
+ ratio = ((ratio & 0x1F) << 16) | (hratio & 0xFFFF);
+
+ dev_dbg(chip->dev,"%s: computed values: period=0x%x, ratio=0x%x\n",
+ __func__,
+ speed,
+ ratio);
+
+ write_config(pwm->hwpwm, speed, ratio, &config[pwm->hwpwm]);
+
+ config[pwm->hwpwm].speed_regval = speed;
+ config[pwm->hwpwm].ratio_regval = ratio;
+ config[pwm->hwpwm].duty_ns = duty_ns;
+ config[pwm->hwpwm].period_ns = period_ns;
+
+
+error:
+ return rc;
+}
+
+static int p7pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ uint32_t start;
+ int rc = 0;
+
+ P7PWM_ASSERT(chip, pwm) ;
+ dev_dbg(chip->dev,"%s: PWM_%d\n",__func__,
+ pwm->hwpwm);
+ spin_lock(&p7pwm_spinlock);
+ /* start of MPW1 workaround */
+ /* restore configure overwritten in disable */
+ if (config[pwm->hwpwm].speed_regval != -1 && config[pwm->hwpwm].ratio_regval != -1) {
+ /* disable counter */
+ start = __raw_readl(mmio_base + P7PWM_START);
+ start = start & ~(0x1 << pwm->hwpwm);
+ __raw_writel(start, mmio_base + P7PWM_START);
+
+ write_config(pwm->hwpwm, config[pwm->hwpwm].speed_regval, config[pwm->hwpwm].ratio_regval, NULL);
+ }
+ /* end of MPW1 workaround */
+
+
+ if (is_mode(pwm->hwpwm,P7PWM_MODE_CLOCK)) {
+ uint32_t mode ;
+ mode = __raw_readl(mmio_base + P7PWM_MODE);
+ mode = mode | (0x1 << pwm->hwpwm);
+ __raw_writel(mode, mmio_base + P7PWM_MODE);
+ }
+
+ /* Set the pwm start bit */
+ start = __raw_readl(mmio_base + P7PWM_START);
+ start = start | (0x1 << pwm->hwpwm);
+ __raw_writel(start, mmio_base + P7PWM_START);
+ spin_unlock(&p7pwm_spinlock);
+
+ return rc ;
+}
+
+
+static void p7pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ uint32_t start;
+
+ P7PWM_ASSERT(chip, pwm);
+ dev_dbg(chip->dev,"p7pwm_disable: P7 pwm %d\n",pwm->hwpwm);
+
+ spin_lock(&p7pwm_spinlock);
+
+ start = __raw_readl(mmio_base + P7PWM_START);
+ dev_dbg(chip->dev,"%s: start init = 0x%x\n",__func__,start);
+
+ /* Disable counters */
+ start = start & ~(0x1 << pwm->hwpwm);
+ dev_dbg(chip->dev,"%s: start modified = 0x%x\n",__func__,start);
+ __raw_writel(start, mmio_base + P7PWM_START);
+
+ /* If clock mode, switch on normal mode */
+ if (is_mode(pwm->hwpwm,P7PWM_MODE_CLOCK)) {
+ uint32_t mode;
+ mode = __raw_readl(mmio_base + P7PWM_MODE);
+ mode = mode & ~(0x1 << pwm->hwpwm);
+ __raw_writel(mode, mmio_base + P7PWM_MODE);
+ }
+
+ /* It's not possible to disable the pwm using the START bit in MPW1, so it just
+ * configure with a duty cycle of 0 and a at speed MAX */
+
+ /* Configured speed at maximum and ratio to have a low state signal */
+ __raw_writel(0, mmio_base + P7PWM_SPEED(pwm->hwpwm));
+ __raw_writel(0x10000, mmio_base + P7PWM_RATIO(pwm->hwpwm));
+
+ /* Restart counters */
+ start = start | (0x1 << pwm->hwpwm);
+ __raw_writel(start, mmio_base + P7PWM_START);
+ /* end of MPW1 workaround */
+
+
+ spin_unlock(&p7pwm_spinlock);
+}
+
+/******************************************************************************
+ * DEBUGFS INTERFACE
+******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+static void p7pwm_info(struct pwm_device const* pwm,
+ unsigned long* frequency,
+ unsigned long* dutycycle,
+ unsigned long* dutyns,
+ unsigned long mode)
+{
+ int n= pwm->hwpwm;
+ if (config[n].speed_regval == -1 || config[n].ratio_regval == -1) {
+ *frequency = 1;
+ *dutycycle = 0;
+ *dutyns = 0;
+ }
+ else {
+ unsigned long freqdiv = freq(config[n].speed_regval, 0, P7PWM_MODE_CLOCK);
+
+ *frequency = freq( config[n].speed_regval,
+ (config[n].ratio_regval >> 16),
+ mode);
+ *dutycycle = duty( config[n].ratio_regval,
+ mode);
+ *dutyns = (config[n].ratio_regval & 0xffff) * (1000000000 / freqdiv);
+ }
+}
+
+static void p7_show_pwm(struct pwm_chip* chip, struct seq_file* s)
+{
+ unsigned int p;
+
+ for (p = 0; p < chip->npwm; p++) {
+ struct pwm_device const* const pwm = &chip->pwms[p];
+ bool const req = test_bit(PWMF_REQUESTED, &pwm->flags);
+ bool const en = test_bit(PWMF_ENABLED, &pwm->flags);
+ unsigned long freq, duty, duty_ns ;
+ int clock ;
+
+ P7PWM_ASSERT(chip, pwm);
+
+
+ /* Get CLOCK MODE infos*/
+ if (is_mode(p,P7PWM_MODE_CLOCK))
+ clock=1 ;
+ else
+ clock=0 ;
+
+ seq_printf(s, " pwm-%-3d (%-20.20s)", p, pwm->label);
+ if (req || en) {
+ p7pwm_info(pwm, &freq, &duty, &duty_ns, clock);
+
+ seq_printf(s, "requested=%d enabled=%d clock_mode=%d freq=%luHz duty=%lu/100(%luns)\n",
+ req,
+ en,
+ clock,
+ freq,
+ duty,
+ duty_ns);
+ seq_printf(s," period_precision=%d, duty_precision=%d (min_ratio=%d)\n",
+ pdata->conf[pwm->hwpwm]->period_precision,
+ pdata->conf[pwm->hwpwm]->duty_precision,
+ config[pwm->hwpwm].min_ratio);
+ seq_printf(s," period error=%d/1000, duty error=%d/1000\n",
+ (int)(config[pwm->hwpwm].period_ns-1000000000/freq)*1000/config[pwm->hwpwm].period_ns,
+ config[pwm->hwpwm].duty_ns ? (int)(config[pwm->hwpwm].duty_ns-duty_ns)*1000/config[pwm->hwpwm].duty_ns : 0);
+ seq_printf(s," REGS={speed=0x%x, ratio=0x%x}\n",
+ config[pwm->hwpwm].speed_regval,
+ config[pwm->hwpwm].ratio_regval);
+ seq_printf(s," user={period=%dns, duty=%dns}\n",
+ config[pwm->hwpwm].period_ns,
+ config[pwm->hwpwm].duty_ns);
+ } else
+ seq_printf(s, "\n");
+ }
+}
+
+#define P7_SHOW_PWM p7_show_pwm
+
+#else /* CONFIG_DEBUG_FS */
+
+#define P7_SHOW_PWM NULL
+
+#endif /* CONFIG_DEBUG_FS */
+
+static struct pwm_ops p7pwm_ops = {
+ .request = p7pwm_request,
+ .free = p7pwm_free,
+ .enable = p7pwm_enable,
+ .disable = p7pwm_disable,
+ .config = p7pwm_configure,
+#ifdef CONFIG_DEBUG_FS
+ .dbg_show = P7_SHOW_PWM,
+#endif
+ .owner = THIS_MODULE,
+};
+
+
+static struct pwm_chip p7pwm_chip = {
+ .ops = &p7pwm_ops,
+ .base = P7_FIRST_PWM,
+ .npwm = P7PWM_NUMBER
+};
+
+static int __devinit p7pwm_probe(struct platform_device *pdev)
+{
+ struct resource * r ;
+ int rc;
+
+ dev_dbg(&pdev->dev,"p7pwm_probe:\n") ;
+
+ rc = 0 ;
+
+ /* get the platform_data */
+ pdata = (struct p7pwm_pdata *) dev_get_platdata(&pdev->dev) ;
+
+ if (NULL == pdata) {
+ dev_warn(&pdev->dev,"p7pwm_probe: Cant get minimal ratio : no platform data\n") ;
+ rc = -EINVAL ;
+ goto err_exit ;
+ }
+
+ /*Enable Clock*/
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_warn(&pdev->dev,"p7pwm_probe:Failed : Cant get clock\n") ;
+ rc = PTR_ERR(clk);
+ goto err_exit ;
+ }
+ /* Get clock rate */
+ clk_freq = clk_get_rate(clk);
+ dev_dbg(&pdev->dev,"p7pwm_probe: clk rate is %ld\n",clk_freq) ;
+
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_warn(&pdev->dev,"Pp7pwm_probe:Failed :Cant start clock\n") ;
+ goto err_putclk ;
+ }
+
+ /*Get base adress*/
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_warn(&pdev->dev,"p7pwm_probe:Failed : No memory resource defined\n");
+ rc = -ENODEV;
+ goto err_disclk ;
+ }
+
+ mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ dev_dbg(&pdev->dev,"%s: mmio_base = 0x%x\n",__func__,(unsigned int) mmio_base) ;
+ if (mmio_base == NULL) {
+ dev_warn(&pdev->dev,"p7pwm_probe:Failed:Cant get memory base\n");
+ rc = -EADDRNOTAVAIL;
+ goto err_disclk ;
+ }
+
+ /*Get Pins*/
+ pins = pinctrl_get_select_default(&pdev->dev) ;
+ if (IS_ERR(pins)) {
+ dev_err(&pdev->dev, "%s:failed to request pins\n", __func__);
+ rc = PTR_ERR(pins);
+ goto err_disclk ;
+ }
+
+ /*Fill pwm_chip*/
+ p7pwm_chip.dev = &pdev->dev ;
+
+ rc = pwmchip_add(&p7pwm_chip);
+ if (rc < 0) {
+ dev_warn(&pdev->dev,"p7pwm_probe:Failed :Cant add chip\n");
+ }
+
+goto err_exit ;
+
+err_disclk:
+ clk_disable(clk) ;
+err_putclk:
+ clk_put(clk) ;
+err_exit:
+ return rc;
+}
+
+
+static int __devexit p7pwm_remove(struct platform_device *pdev)
+{
+ int rc ;
+
+ dev_dbg(&pdev->dev,"p7pwm_remove:\n") ;
+
+ rc = 0 ;
+ pinctrl_put(pins);
+
+ rc = pwmchip_remove(&p7pwm_chip) ;
+ if (rc) {
+ dev_warn(&pdev->dev,"p7pwm_remove:Removing Driver with a PWM still in use\n") ;
+ }
+
+ clk_disable(clk) ;
+ clk_put(clk) ;
+
+ return rc ;
+}
+
+
+/* XXX there is no suspend/resume support */
+static struct platform_driver p7pwm_driver = {
+ .driver = {
+ .name = P7PWM_DRV_NAME,
+ },
+ .probe = p7pwm_probe,
+ .remove = __devexit_p(p7pwm_remove),
+};
+
+
+static int __init p7pwm_init(void)
+{
+ return platform_driver_register(&p7pwm_driver);
+}
+postcore_initcall(p7pwm_init);
+
+
+static void __exit p7pwm_exit(void)
+{
+ platform_driver_unregister(&p7pwm_driver);
+}
+module_exit(p7pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Victor Lambret <victor.lambret.ext@parrot.com>");
diff --git a/drivers/parrot/pwm/p7_pwm.h b/drivers/parrot/pwm/p7_pwm.h
new file mode 100644
index 00000000000000..499900a861cdb7
--- /dev/null
+++ b/drivers/parrot/pwm/p7_pwm.h
@@ -0,0 +1,44 @@
+/**
+ * linux/drivers/parrot/pwm/p7-pwm.h - Parrot7 PWM driver interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Victor Lambret <victor.lambret.ext@parrot.com>
+ * date: 28-Nov-2012
+ *
+ * This file is released under the GPL
+ *
+ */
+
+#ifndef P7PWM_H
+#define P7PWM_H
+
+#include <mach/pwm.h>
+
+#define P7PWM_NUMBER (P7_PWMS_MAX)
+
+#define P7PWM_DRV_NAME "p7_pwm"
+
+#define P7PWM_MODE_NORMAL 0
+#define P7PWM_MODE_CLOCK 1
+
+/* period_precision : a per cent value for the max error rate accepted on
+ * frequency
+ * duty_precision : a per cent value for the max error rate accepted on
+ * duty cycle (XXX this not really a percent)
+ * mode : P7PWM_MODE_NORMAL for normal PWM mode
+ * P7PWM_MODE_CLOCK for clock mode
+ * */
+
+struct p7pwm_conf {
+ int16_t period_precision ;
+ int16_t duty_precision ;
+ uint8_t mode ;
+} ;
+
+struct p7pwm_pdata {
+ size_t used ;
+ struct p7pwm_conf * conf[P7PWM_NUMBER] ;
+} ;
+
+#endif /*P7PWM_H*/
diff --git a/drivers/parrot/pwm/p7mu-pwm.c b/drivers/parrot/pwm/p7mu-pwm.c
new file mode 100644
index 00000000000000..de0f8b077b7c2a
--- /dev/null
+++ b/drivers/parrot/pwm/p7mu-pwm.c
@@ -0,0 +1,441 @@
+/**
+ * linux/drivers/parrot/pwm/p7mu-pwm.c - Parrot7 power management unit PWM
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 16-Nov-2012
+ *
+ * This file is released under the GPL
+ *
+ * TODO:
+ * - review
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/pwm.h>
+#include <mfd/p7mu-pin.h>
+#include <mfd/p7mu-clk.h>
+
+#define P7MU_PWM_RATIO ((u16) 0)
+#define P7MU_PWM_DIV ((u16) 1)
+#define P7MU_PWM_CTRL ((u16) 2)
+#define P7MU_PWM_EN ((u16) (1U << 0))
+#define P7MU_PWM_LOAD ((u16) (1U << 1))
+
+#ifdef DEBUG
+#define P7MU_ASSERT_PWM(_chip, _pwm) \
+ BUG_ON(! _chip); \
+ BUG_ON(! _pwm); \
+ BUG_ON(_chip != &p7mu_pwm_chip); \
+ BUG_ON(_chip != _pwm->chip); \
+ BUG_ON(_chip->npwm != P7MU_PWMS_MAX); \
+ BUG_ON(! _chip->pwms); \
+ BUG_ON(_pwm->hwpwm >= chip->npwm); \
+ BUG_ON(! p7mu_pwm_data[_pwm->hwpwm].res); \
+ BUG_ON(IS_ERR(p7mu_pwm_data[_pwm->hwpwm].res))
+#else
+#define P7MU_ASSERT_PWM(_chip, _pwm)
+#endif
+
+struct p7mu_pwm {
+ u16 div;
+ u16 ratio;
+ struct resource const* res;
+};
+
+static struct pwm_chip p7mu_pwm_chip;
+static struct p7mu_pwm p7mu_pwm_data[P7MU_PWMS_MAX];
+
+/* Multiplex requested PWM block to pin setup by platform. */
+static int p7mu_request_pwm(struct pwm_chip* chip, struct pwm_device* pwm)
+{
+ int err;
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ /* Forward call to P7MU pin management layer. */
+ err = p7mu_request_pin((unsigned int) p7mu_pdata()->pwm_pins[pwm->hwpwm],
+ P7MU_PWM_MUX);
+ if (! err) {
+ dev_dbg(chip->dev, "requested %d[%d]\n", pwm->pwm, pwm->hwpwm);
+ return 0;
+ }
+
+ dev_err(chip->dev,
+ "failed to request %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ err);
+ return err;
+}
+
+static void p7mu_free_pwm(struct pwm_chip* chip, struct pwm_device* pwm)
+{
+ struct p7mu_pwm* const data = &p7mu_pwm_data[pwm->hwpwm];
+ int err;
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ err = p7mu_write16(data->res->start + P7MU_PWM_RATIO, 0);
+ if (err) {
+ dev_dbg(chip->dev, "failed to clear ratio (%d)\n", err);
+ goto err;
+ }
+
+ err = p7mu_mod16(data->res->start + P7MU_PWM_CTRL,
+ P7MU_PWM_LOAD,
+ P7MU_PWM_LOAD);
+ if (err) {
+ dev_dbg(chip->dev, "failed to reload ratio (%d)\n", err);
+ goto err;
+ }
+ else
+ data->ratio = 0;
+
+ err = p7mu_write16(data->res->start + P7MU_PWM_CTRL, 0);
+ if (err) {
+ dev_dbg(chip->dev, "failed to stop (%d)\n", err);
+ goto err;
+ }
+
+ /* Forward call to P7MU pin management layer. */
+ p7mu_free_pin((unsigned int) p7mu_pdata()->pwm_pins[pwm->hwpwm]);
+ dev_dbg(chip->dev, "released %d[%d]\n", pwm->pwm, pwm->hwpwm);
+
+err:
+ dev_err(chip->dev,
+ "failed to release %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ err);
+}
+
+static unsigned long p7mu_pwm_period(uint16_t div, unsigned long hz)
+{
+ return ((256 * ((unsigned long) div + 1)) *
+ ((unsigned long) USEC_PER_SEC / 100UL)) /
+ (hz / 100UL);
+}
+
+static unsigned long p7mu_pwm_duty(unsigned long period, uint16_t ratio)
+{
+ return (period * (unsigned long) ratio) / 256;
+}
+
+/*
+ * PWM layer guarantees duty_ns <= period_ns & period_ns > 0.
+ * Compute using usecs to avoid overflows. Periods range:
+ * 128000 nsec -> 0xffffffff nsec (~ 4.3 sec)
+ * 7128 Hz -> ~ 0.23 Hz
+ */
+static int p7mu_config_pwm(struct pwm_chip* chip,
+ struct pwm_device* pwm,
+ int duty_ns,
+ int period_ns)
+{
+ struct p7mu_pwm* const data = &p7mu_pwm_data[pwm->hwpwm];
+ unsigned long const us = (unsigned long) period_ns / NSEC_PER_USEC;
+ uint16_t div, ratio;
+ unsigned long phz;
+ int err;
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ if (! us) {
+ dev_dbg(chip->dev, "invalid period requested\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Get parent clock frequency in Hz. It must be (at least) 256 times the target
+ * frequency since this is the minimum divisor value.
+ */
+ phz = p7mu_round_pwmclk_rate((256UL * (unsigned long) USEC_PER_SEC) / us);
+ if ((int) phz < 0) {
+ dev_dbg(chip->dev, "out of range period requested\n");
+ err = -ERANGE;
+ goto err;
+ }
+
+ /*
+ * Compute in msecs since this migth otherwise overflow numerator. This
+ * implies we may very rarely compute a frequency a bit higher than the
+ * requested one because numerator is rounded down when dividing (in
+ * addition to loosing precision).
+ * Consider this as acceptable for now...
+ */
+ div = (uint16_t) DIV_ROUND_UP((us / 10UL) * (phz / 100UL),
+ 256UL * (unsigned long) MSEC_PER_SEC) - 1;
+ ratio = (uint16_t) DIV_ROUND_CLOSEST((unsigned long) duty_ns,
+ (unsigned long) period_ns / 256UL);
+#ifdef DEBUG
+ BUG_ON(ratio & ~((u16) 0x1ff));
+#endif
+
+ if (div != data->div || ratio != data->ratio) {
+ /* Avoid useless I2C accesses. */
+ err = p7mu_write16(data->res->start + P7MU_PWM_RATIO, ratio);
+ if (err) {
+ dev_dbg(chip->dev, "failed to set ratio\n");
+ goto err;
+ }
+
+ err = p7mu_write16(data->res->start + P7MU_PWM_DIV, div);
+ if (err) {
+ dev_dbg(chip->dev, "failed to set period\n");
+ goto err;
+ }
+
+ err = p7mu_set_pwmclk_rate(pwm->hwpwm, phz);
+ if (err) {
+ dev_dbg(chip->dev, "failed to set parent clock frequency\n");
+ goto err;
+ }
+
+ err = p7mu_mod16(data->res->start + P7MU_PWM_CTRL,
+ P7MU_PWM_LOAD,
+ P7MU_PWM_LOAD);
+ if (err) {
+ dev_dbg(chip->dev, "failed to load ratop / period\n");
+ goto err;
+ }
+
+#ifdef DEBUG
+ {
+ unsigned long const t = p7mu_pwm_period(div, phz);
+
+ dev_dbg(chip->dev, "configured %d[%d] period=%luusec, duty=%luusec\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ t,
+ p7mu_pwm_duty(t, ratio));
+ }
+#endif
+ }
+
+ data->div = div;
+ data->ratio = ratio;
+ return 0;
+
+err:
+ dev_err(chip->dev,
+ "failed to setup %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ err);
+ return err;
+}
+
+static int p7mu_enable_pwm(struct pwm_chip* chip, struct pwm_device* pwm)
+{
+ int err;
+ struct p7mu_pwm const* const data = &p7mu_pwm_data[pwm->hwpwm];
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ err = p7mu_mod16(data->res->start + P7MU_PWM_CTRL,
+ P7MU_PWM_EN,
+ P7MU_PWM_EN);
+ if (err)
+ dev_err(chip->dev,
+ "failed to enable %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ err);
+ else
+ dev_dbg(chip->dev,
+ "enabled %d[%d]\n",
+ pwm->pwm,
+ pwm->hwpwm);
+ return err;
+}
+
+static void p7mu_disable_pwm(struct pwm_chip* chip, struct pwm_device* pwm)
+{
+ int err;
+ struct p7mu_pwm const* const data = &p7mu_pwm_data[pwm->hwpwm];
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ err = p7mu_mod16(data->res->start + P7MU_PWM_CTRL,
+ 0,
+ P7MU_PWM_EN);
+
+ if (err)
+ dev_err(chip->dev,
+ "failed to disable %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ err);
+ else
+ dev_dbg(chip->dev,
+ "disabled %d[%d]\n",
+ pwm->pwm,
+ pwm->hwpwm);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int p7mu_pwm_info(struct pwm_device const* pwm,
+ unsigned long* period,
+ unsigned long* duty)
+{
+ struct p7mu_pwm const* const data = &p7mu_pwm_data[pwm->hwpwm];
+ unsigned long const phz = p7mu_get_pwmclk_rate(pwm->hwpwm);
+
+ if ((int) phz < 0) {
+ dev_warn(pwm->chip->dev,
+ "failed to get parent clock rate for %d[%d] (%d)\n",
+ pwm->pwm,
+ pwm->hwpwm,
+ (int) phz);
+ return (int) phz;
+ }
+
+ if (data->ratio == (u16) ~(0U))
+ /* Not configured yet. */
+ return -EBADFD;
+
+ *period = p7mu_pwm_period(data->div, phz);
+ *duty = p7mu_pwm_duty(*period, data->ratio);
+
+ return 0;
+}
+
+static void p7mu_show_pwm(struct pwm_chip* chip, struct seq_file* s)
+{
+ unsigned int p;
+
+ for (p = 0; p < chip->npwm; p++) {
+ struct pwm_device const* const pwm = &chip->pwms[p];
+ bool const req = test_bit(PWMF_REQUESTED, &pwm->flags);
+ bool const en = test_bit(PWMF_ENABLED, &pwm->flags);
+ unsigned long period, duty;
+
+ P7MU_ASSERT_PWM(chip, pwm);
+
+ seq_printf(s, " pwm-%-3d (%-20.20s)", p, pwm->label);
+ if (req || en) {
+ int const err = p7mu_pwm_info(pwm, &period, &duty);
+
+ if (! err) {
+ if (req)
+ seq_printf(s, ": requested @ %lu / %lu usec", duty, period);
+ else if (en)
+ seq_printf(s, ": enabled @ %lu / %lu usec", duty, period);
+ }
+ }
+ seq_printf(s, "\n");
+ }
+}
+
+#define P7MU_SHOW_PWM p7mu_show_pwm
+
+#else /* CONFIG_DEBUG_FS */
+
+#define P7MU_SHOW_PWM NULL
+
+#endif /* CONFIG_DEBUG_FS */
+
+static struct pwm_ops const p7mu_pwm_ops = {
+ .request = p7mu_request_pwm,
+ .free = p7mu_free_pwm,
+ .config = p7mu_config_pwm,
+ .enable = p7mu_enable_pwm,
+ .disable = p7mu_disable_pwm,
+#ifdef CONFIG_DEBUG_FS
+ .dbg_show = P7MU_SHOW_PWM,
+#endif
+ .owner = THIS_MODULE
+};
+
+static struct pwm_chip p7mu_pwm_chip = {
+ .ops = &p7mu_pwm_ops,
+ .base = P7MU_FIRST_PWM,
+ .npwm = P7MU_PWMS_MAX
+};
+
+static int __devinit p7mu_probe_pwm(struct platform_device* pdev)
+{
+ unsigned int r;
+ unsigned int nr;
+ int err;
+
+ for (r = 0, nr = 0; r < pdev->num_resources; r++) {
+ struct resource const* const res = &pdev->resource[r];
+
+ if (resource_type(res) != IORESOURCE_IO)
+ continue;
+
+ p7mu_pwm_data[nr].ratio = (u16) ~(0U);
+ p7mu_pwm_data[nr].res = p7mu_request_region(pdev, res);
+ if (! IS_ERR(p7mu_pwm_data[nr].res))
+ nr++;
+ }
+
+ if (nr != P7MU_PWMS_MAX) {
+ dev_err(&pdev->dev, "failed to find I/O regions\n");
+ err = -ENXIO;
+ goto err;
+ }
+
+ p7mu_pwm_chip.dev = &pdev->dev;
+ err = pwmchip_add(&p7mu_pwm_chip);
+ if (! err)
+ return 0;
+
+err:
+ for (r = 0; r < nr; r++)
+ p7mu_release_region(p7mu_pwm_data[r].res);
+
+ dev_err(&pdev->dev,
+ "failed to register chip (%d)\n",
+ err);
+ return err;
+}
+
+static int __devexit p7mu_remove_pwm(struct platform_device* pdev)
+{
+ unsigned int r;
+ int const err = pwmchip_remove(&p7mu_pwm_chip);
+
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to unregister chip (%d)\n",
+ err);
+ return err;
+ }
+
+ for (r = 0; r < P7MU_PWMS_MAX; r++) {
+#ifdef DEBUG
+ BUG_ON(! p7mu_pwm_data[r].res);
+ BUG_ON(IS_ERR(p7mu_pwm_data[r].res));
+#endif
+ p7mu_release_region(p7mu_pwm_data[r].res);
+ }
+
+ return 0;
+}
+
+#define P7MU_PWM_DRV_NAME "p7mu-pwm"
+
+static struct platform_driver p7mu_pwm_driver = {
+ .driver = {
+ .name = P7MU_PWM_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = p7mu_probe_pwm,
+ .remove = __devexit_p(&p7mu_remove_pwm),
+};
+
+module_platform_driver(p7mu_pwm_driver);
+
+MODULE_DESCRIPTION("Parrot Power Management Unit PWM driver");
+MODULE_AUTHOR("Grİgor Boirie <gregor.boirie@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/regulator/Kconfig b/drivers/parrot/regulator/Kconfig
new file mode 100644
index 00000000000000..051cde8b9cec8e
--- /dev/null
+++ b/drivers/parrot/regulator/Kconfig
@@ -0,0 +1,7 @@
+config REGULATOR_SWITCH_VOLTAGE
+ bool "Switch voltage regulator"
+ depends on REGULATOR
+ default MMC_SDHCI_ACS3 if ARCH_PARROT7
+ help
+ Switch voltage regulator based on GPIO regulator.
+
diff --git a/drivers/parrot/regulator/Makefile b/drivers/parrot/regulator/Makefile
new file mode 100644
index 00000000000000..c0a3f3c738b7eb
--- /dev/null
+++ b/drivers/parrot/regulator/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
+obj-$(CONFIG_REGULATOR_SWITCH_VOLTAGE) += switch-voltage-regulator.o
diff --git a/drivers/parrot/regulator/switch-voltage-regulator.c b/drivers/parrot/regulator/switch-voltage-regulator.c
new file mode 100644
index 00000000000000..f31c0514e74d1d
--- /dev/null
+++ b/drivers/parrot/regulator/switch-voltage-regulator.c
@@ -0,0 +1,288 @@
+/**
+ * linux/driver/parrot/mmc/switch_voltage_regulator.c - Voltage switch
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ * date: 06-Jun-2013
+ *
+ * based on gpio-regulator.c
+ *
+ * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include "switch-voltage-regulator.h"
+
+struct switch_voltage_data {
+ struct regulator_desc desc;
+ struct regulator_dev *dev;
+
+ bool is_enabled;
+ struct switch_voltage_config *config;
+};
+
+static int switch_voltage_is_enabled(struct regulator_dev *dev)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ return data->is_enabled;
+}
+
+static int switch_voltage_enable(struct regulator_dev *dev)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->config->enable_gpio)) {
+ gpio_set_value_cansleep(data->config->enable_gpio,
+ data->config->enable_high);
+ data->is_enabled = true;
+ }
+
+ return 0;
+}
+
+static int switch_voltage_disable(struct regulator_dev *dev)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->config->enable_gpio)) {
+ gpio_set_value_cansleep(data->config->enable_gpio,
+ !data->config->enable_high);
+ data->is_enabled = false;
+ }
+
+ return 0;
+}
+
+static int switch_voltage_enable_time(struct regulator_dev *dev)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ return data->config->startup_delay;
+}
+
+static int switch_voltage_get_voltage(struct regulator_dev *dev)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->config->switch_gpio)) {
+ int gpio_value = gpio_get_value_cansleep(data->config->switch_gpio);
+ int ptr;
+ for (ptr = 0; ptr < 2; ptr++)
+ if (data->config->states[ptr].gpio == gpio_value)
+ return data->config->states[ptr].value;
+ } else
+ return data->config->states[0].value;
+
+ return -EINVAL;
+}
+
+static int switch_voltage_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->config->switch_gpio)) {
+ unsigned int ptr, target;
+ bool was_enabled = data->is_enabled;
+
+ target = -1;
+ for (ptr = 0; ptr < 2; ptr++)
+ if (data->config->states[ptr].value >= min_uV &&
+ data->config->states[ptr].value <= max_uV)
+ target = data->config->states[ptr].gpio;
+
+ if (target < 0)
+ return -EINVAL;
+
+ if (was_enabled && data->config->switch_disabled)
+ switch_voltage_disable(dev);
+
+ gpio_set_value_cansleep(data->config->switch_gpio, target);
+
+ if (data->config->switch_delay)
+ msleep(data->config->switch_delay);
+
+ if (was_enabled && data->config->switch_disabled)
+ switch_voltage_enable(dev);
+ }
+
+ return 0;
+}
+
+static int switch_voltage_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct switch_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (selector >= 2)
+ return -EINVAL;
+
+ if (gpio_is_valid(data->config->switch_gpio))
+ return data->config->states[selector].value;
+ else
+ return data->config->states[0].value;
+}
+
+static struct regulator_ops switch_voltage_ops = {
+ .is_enabled = switch_voltage_is_enabled,
+ .enable = switch_voltage_enable,
+ .disable = switch_voltage_disable,
+ .enable_time = switch_voltage_enable_time,
+ .get_voltage = switch_voltage_get_voltage,
+ .set_voltage = switch_voltage_set_voltage,
+ .list_voltage = switch_voltage_list_voltage,
+};
+
+static int __devinit switch_voltage_probe(struct platform_device *pdev)
+{
+ struct switch_voltage_config *config = pdev->dev.platform_data;
+ struct switch_voltage_data *drvdata;
+ int ret;
+
+ drvdata = kzalloc(sizeof(struct switch_voltage_data), GFP_KERNEL);
+ if (drvdata == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+
+ drvdata->desc.name = config->supply_name;
+
+ drvdata->desc.owner = THIS_MODULE;
+ drvdata->desc.type = REGULATOR_VOLTAGE;
+ drvdata->desc.ops = &switch_voltage_ops;
+
+ drvdata->config = config;
+
+ if (gpio_is_valid(config->enable_gpio)) {
+ ret = gpio_request(config->enable_gpio, config->supply_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not obtain regulator enable GPIO %d: %d\n",
+ config->enable_gpio, ret);
+ goto err;
+ }
+
+ /* set output direction without changing state
+ * to prevent glitch
+ */
+ if (config->enabled_at_boot) {
+ drvdata->is_enabled = true;
+ ret = gpio_direction_output(config->enable_gpio,
+ config->enable_high);
+ } else {
+ drvdata->is_enabled = false;
+ ret = gpio_direction_output(config->enable_gpio,
+ !config->enable_high);
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not configure regulator enable GPIO %d direction: %d\n",
+ config->enable_gpio, ret);
+ goto err_enablegpio;
+ }
+ } else {
+ /* Regulator without GPIO control is considered
+ * always enabled
+ */
+ drvdata->is_enabled = true;
+ }
+
+ if (gpio_is_valid(config->switch_gpio)) {
+ ret = gpio_request(config->switch_gpio, config->supply_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not obtain regulator switch GPIO %d: %d\n",
+ config->switch_gpio, ret);
+ goto err_enablegpio;
+ }
+
+ ret = gpio_direction_output(config->switch_gpio,
+ config->states[0].gpio);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not configure regulator switch GPIO %d direction: %d\n",
+ config->switch_gpio, ret);
+ goto err_switchgpio;
+ }
+
+ drvdata->desc.n_voltages = 2;
+ } else
+ drvdata->desc.n_voltages = 1;
+
+ drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
+ config->init_data, drvdata, NULL);
+ if (IS_ERR(drvdata->dev)) {
+ ret = PTR_ERR(drvdata->dev);
+ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+ goto err_switchgpio;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+
+err_switchgpio:
+ if (gpio_is_valid(config->switch_gpio))
+ gpio_free(config->switch_gpio);
+err_enablegpio:
+ if (gpio_is_valid(config->enable_gpio))
+ gpio_free(config->enable_gpio);
+err:
+ kfree(drvdata);
+ return ret;
+}
+
+static int __devexit switch_voltage_remove(struct platform_device *pdev)
+{
+ struct switch_voltage_data *drvdata = platform_get_drvdata(pdev);
+
+ regulator_unregister(drvdata->dev);
+
+ if (gpio_is_valid(drvdata->config->switch_gpio))
+ gpio_free(drvdata->config->switch_gpio);
+ if (gpio_is_valid(drvdata->config->enable_gpio))
+ gpio_free(drvdata->config->enable_gpio);
+
+ kfree(drvdata->desc.name);
+ kfree(drvdata);
+
+ return 0;
+}
+
+static struct platform_driver switch_voltage_driver = {
+ .probe = switch_voltage_probe,
+ .remove = __devexit_p(switch_voltage_remove),
+ .driver = {
+ .name = "switch-voltage",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init switch_voltage_init(void)
+{
+ return platform_driver_register(&switch_voltage_driver);
+}
+subsys_initcall(switch_voltage_init);
+
+static void __exit switch_voltage_exit(void)
+{
+ platform_driver_unregister(&switch_voltage_driver);
+}
+module_exit(switch_voltage_exit);
+
+MODULE_AUTHOR("Jeremie Samuel<jeremie.samuel.ext@parrot.com>");
+MODULE_DESCRIPTION("switch voltage regulator");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:switch-voltage");
diff --git a/drivers/parrot/regulator/switch-voltage-regulator.h b/drivers/parrot/regulator/switch-voltage-regulator.h
new file mode 100644
index 00000000000000..2e6e243b43cfba
--- /dev/null
+++ b/drivers/parrot/regulator/switch-voltage-regulator.h
@@ -0,0 +1,75 @@
+/**
+ * linux/driver/parrot/mmc/switch_voltage_regulator.h - Voltage switch
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Jeremie Samuel <jeremie.samuel.ext@parrot.com>
+ * date: 06-Jun-2013
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef __SWITCH_VOLTAGE_H
+#define __SWITCH_VOLTAGE_H
+
+#include <linux/regulator/driver.h>
+
+struct regulator_init_data;
+
+enum regulator_type;
+
+/**
+ * struct switch_voltage_state - state description
+ * @value: microvolts or microamps
+ * @gpios: gpio target-state for the value
+ *
+ * This structure describes a supported setting of the regulator
+ * and the necessary gpio-state to achieve it.
+ */
+struct switch_voltage_state {
+ int value;
+ int gpio;
+};
+
+/**
+ * struct switch_voltage_config - config structure
+ * @supply_name: Name of the regulator supply
+ * @enable_gpio: GPIO to use for enable control
+ * set to -EINVAL if not used
+ * @enable_high: Polarity of enable GPIO
+ * 1 = Active high, 0 = Active low
+ * @enabled_at_boot: Whether regulator has been enabled at
+ * boot or not. 1 = Yes, 0 = No
+ * This is used to keep the regulator at
+ * the default state
+ * @startup_delay: Start-up time in microseconds
+ * @switch_gpio: Switch GPIO
+ * @switch_disabled: Need to disable the regulator during
+ * the switch
+ * @switch_delay: Switch time in microseconds
+ * @states: Array of gpio_regulator_state entries describing
+ * the gpio state for specific voltages
+ * The first state is the default state.
+ * @init_data: regulator_init_data
+ *
+ * This structure contains gpio-voltage regulator configuration
+ * information that must be passed by platform code to the
+ * gpio-voltage regulator driver.
+ */
+struct switch_voltage_config {
+ const char *supply_name;
+
+ int enable_gpio;
+ unsigned enable_high:1;
+ unsigned enabled_at_boot:1;
+ unsigned startup_delay;
+
+ int switch_gpio;
+ unsigned switch_disabled:1;
+ unsigned switch_delay;
+ struct switch_voltage_state *states;
+
+ struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/drivers/parrot/rtc/Kconfig b/drivers/parrot/rtc/Kconfig
new file mode 100644
index 00000000000000..1fa0c23a2be64b
--- /dev/null
+++ b/drivers/parrot/rtc/Kconfig
@@ -0,0 +1,10 @@
+config RTC_P7MU
+ tristate "Parrot P7MU RTC"
+ depends on RTC_CLASS && MFD_P7MU
+ default MFD_P7MU
+ help
+ If you say yes here you get support for RTC function Parrot
+ Power Management Unit companion chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pxmu.
diff --git a/drivers/parrot/rtc/Makefile b/drivers/parrot/rtc/Makefile
new file mode 100644
index 00000000000000..4095aca326f8cd
--- /dev/null
+++ b/drivers/parrot/rtc/Makefile
@@ -0,0 +1,3 @@
+ccflags-$(CONFIG_RTC_DEBUG) += -DDEBUG
+ccflags-y += -I$(srctree)/drivers/parrot
+obj-$(CONFIG_RTC_P7MU) += rtc-p7mu.o
diff --git a/drivers/parrot/rtc/rtc-p7mu.c b/drivers/parrot/rtc/rtc-p7mu.c
new file mode 100644
index 00000000000000..cf39767cb34e7d
--- /dev/null
+++ b/drivers/parrot/rtc/rtc-p7mu.c
@@ -0,0 +1,281 @@
+/**
+ * linux/drivers/parrot/rtc/rtc-p7mu.c - Parrot7 power management unit RTC
+ * block implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 16-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <mfd/p7mu.h>
+
+#define P7MU_RTC_NAME "p7mu-rtc"
+
+/*
+ * RTC register offset from page address
+ */
+#define P7MU_RTC_TIME_H ((u16) 0)
+#define P7MU_RTC_TIME_L ((u16) 1)
+#define P7MU_RTC_MATCH_H ((u16) 2)
+#define P7MU_RTC_MATCH_L ((u16) 3)
+#define P7MU_RTC_LOAD_H ((u16) 4)
+#define P7MU_RTC_LOAD_L ((u16) 5)
+#define P7MU_RTC_CTRL ((u16) 6)
+
+#define P7MU_RTC_TIME P7MU_RTC_TIME_H
+#define P7MU_RTC_MATCH P7MU_RTC_MATCH_H
+#define P7MU_RTC_LOAD P7MU_RTC_LOAD_H
+
+#define P7MU_RTC_EN ((u16) (1U << 0))
+#define P7MU_RTC_LOAD_EN ((u16) (1U << 1))
+#define P7MU_RTC_ALRM_EN ((u16) (1U << 2))
+
+static struct resource const* p7mu_rtc_res;
+static int p7mu_rtc_virq;
+static struct rtc_device* p7mu_rtc_dev;
+
+static int p7mu_read_rtc_time(struct device* dev, struct rtc_time* time)
+{
+ u32 sec;
+ /* there is no race condition here :
+ 32bit value from "rtc_cnt" counter is copied into two 16bit
+ "APB registers" RTC_TIME - 32bit
+ RTC_TIME_H and RTC_TIME_L when read access to RTC_TIME_H occurred.
+ */
+ int const err = p7mu_read32(p7mu_rtc_res->start + P7MU_RTC_TIME, &sec);
+
+ if (err)
+ return err;
+
+ rtc_time_to_tm(sec, time);
+ return 0;
+}
+
+static int p7mu_set_rtc_time(struct device* dev, struct rtc_time* time)
+{
+ unsigned long now;
+ int err;
+
+ rtc_tm_to_time(time, &now);
+
+ err = p7mu_write32(p7mu_rtc_res->start + P7MU_RTC_LOAD, now);
+ if (err)
+ return err;
+
+ return p7mu_mod16(p7mu_rtc_res->start + P7MU_RTC_CTRL,
+ P7MU_RTC_LOAD_EN | P7MU_RTC_EN,
+ P7MU_RTC_LOAD_EN | P7MU_RTC_EN);
+}
+
+static int p7mu_read_rtc_alrm(struct device* dev, struct rtc_wkalrm* alarm)
+{
+ unsigned long sec;
+ int err;
+ u16 reg;
+
+ dev_dbg(dev, "getting RTC alarm state...\n");
+
+ err = p7mu_read32(p7mu_rtc_res->start + P7MU_RTC_MATCH,
+ (u32*) &sec);
+ if (err)
+ goto err;
+
+ err = p7mu_read16(p7mu_rtc_res->start + P7MU_RTC_CTRL,
+ &reg);
+ if (err < 0)
+ goto err;
+
+ alarm->enabled = !!(reg & P7MU_RTC_ALRM_EN);
+
+
+ rtc_time_to_tm(sec, &alarm->time);
+
+ return 0;
+
+err:
+ return err;
+}
+
+static int __p7mu_enable_rtc_alrm(int enabled)
+{
+ int mask = P7MU_RTC_EN;
+
+ if (enabled)
+ mask |= P7MU_RTC_ALRM_EN;
+ return p7mu_mod16(p7mu_rtc_res->start + P7MU_RTC_CTRL,
+ mask,
+ P7MU_RTC_ALRM_EN | P7MU_RTC_EN);
+}
+
+static int p7mu_set_rtc_alrm(struct device* dev, struct rtc_wkalrm* alarm)
+{
+ int err;
+ unsigned long sec;
+
+ dev_dbg(dev, "setting up RTC alarm...\n");
+
+ rtc_tm_to_time(&alarm->time, &sec);
+
+ disable_irq(p7mu_rtc_virq);
+
+ /* 32bit "internal" register is update by values from RTC_MATCH_H and
+ RTC_MATCH_L "APB registers" when RTC_MATCH_L register is updated
+ */
+ err = p7mu_write32(p7mu_rtc_res->start + P7MU_RTC_MATCH, sec);
+ if (! err)
+ __p7mu_enable_rtc_alrm(alarm->enabled);
+
+ enable_irq(p7mu_rtc_virq);
+ return err;
+}
+
+static int p7mu_enable_rtc_alrm(struct device* dev, unsigned int enabled)
+{
+ dev_dbg(dev, "%sabling RTC alarm irq...\n", enabled ? "en" : "dis");
+ return __p7mu_enable_rtc_alrm(enabled);
+}
+
+static struct rtc_class_ops const p7mu_rtc_ops = {
+ .read_time = &p7mu_read_rtc_time,
+ .set_time = &p7mu_set_rtc_time,
+ .read_alarm = &p7mu_read_rtc_alrm,
+ .set_alarm = &p7mu_set_rtc_alrm,
+ .alarm_irq_enable = &p7mu_enable_rtc_alrm
+};
+
+static irqreturn_t p7mu_handle_rtc_irq(int irq, void* dev_id)
+{
+ /* Alarm interrupts are one-shot. */
+ rtc_update_irq(p7mu_rtc_dev, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int __devinit p7mu_probe_rtc(struct platform_device* pdev)
+{
+ int err;
+ struct resource *resirq;
+
+ dev_dbg(&pdev->dev, "probing...\n");
+
+ /* Reserve region from P7MU I2C registers space. */
+ p7mu_rtc_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (! p7mu_rtc_res) {
+ dev_err(&pdev->dev, "failed to find I/O region address\n");
+ err = -ENXIO;
+ goto err;
+ }
+
+ p7mu_rtc_res = p7mu_request_region(pdev, p7mu_rtc_res);
+ if (IS_ERR(p7mu_rtc_res)) {
+ err = PTR_ERR(p7mu_rtc_res);
+ goto err;
+ }
+
+ /* Allow RTC to wake up system */
+ err = device_init_wakeup(&pdev->dev, true);
+ if (err)
+ goto region;
+
+ /* Register device to RTC framework. */
+ p7mu_rtc_dev = rtc_device_register(P7MU_RTC_NAME,
+ &pdev->dev,
+ &p7mu_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(p7mu_rtc_dev)) {
+ dev_err(&pdev->dev, "failed to register device\n");
+ err = PTR_ERR(p7mu_rtc_dev);
+ goto wakeup;
+ }
+
+ /* Reserve interrupt for alarm usage. */
+ resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!resirq) {
+ dev_err(&pdev->dev, "failed to find hardware interrupt number\n");
+ err = ENODEV;
+ goto dev;
+ }
+
+ p7mu_rtc_virq = resirq->start;
+ err = request_threaded_irq(p7mu_rtc_virq, NULL, &p7mu_handle_rtc_irq, IRQF_TRIGGER_RISING, "p7mu rtc", NULL);
+ if (err < 0) {
+ printk("p7mu rtc irq %d %d\n", p7mu_rtc_virq, err);
+ goto dev;
+ }
+
+ /* Enable counting. */
+ err = p7mu_mod16(p7mu_rtc_res->start + P7MU_RTC_CTRL,
+ P7MU_RTC_EN,
+ P7MU_RTC_EN);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start counting\n");
+ goto irq;
+ }
+
+ return 0;
+
+irq:
+ free_irq(p7mu_rtc_virq, NULL);
+dev:
+ rtc_device_unregister(p7mu_rtc_dev);
+wakeup:
+ device_init_wakeup(&pdev->dev, false);
+region:
+ p7mu_release_region(p7mu_rtc_res);
+err:
+ dev_err(&pdev->dev, "failed to probe (%d)\n", err);
+ return err;
+}
+
+static int __devexit p7mu_remove_rtc(struct platform_device* pdev)
+{
+ int err;
+
+ dev_dbg(&pdev->dev, "removing...\n");
+
+ err = p7mu_write16(p7mu_rtc_res->start + P7MU_RTC_CTRL, 0);
+ free_irq(p7mu_rtc_virq, NULL);
+ rtc_device_unregister(p7mu_rtc_dev);
+ device_init_wakeup(&pdev->dev, false);
+ p7mu_release_region(p7mu_rtc_res);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7mu_suspend_rtc(struct device* dev)
+{
+ return 0;
+}
+
+static int p7mu_resume_rtc(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(p7mu_rtc_pm_ops,
+ p7mu_suspend_rtc,
+ p7mu_resume_rtc);
+
+static struct platform_driver p7mu_rtc_driver = {
+ .driver = {
+ .name = P7MU_RTC_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7mu_rtc_pm_ops,
+ },
+ .probe = &p7mu_probe_rtc,
+ .remove = __devexit_p(&p7mu_remove_rtc),
+};
+
+module_platform_driver(p7mu_rtc_driver);
+
+MODULE_DESCRIPTION("Parrot Power Management Unit RTC driver");
+MODULE_AUTHOR("Grİgor Boirie <gregor.boirie@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/serial/Kconfig b/drivers/parrot/serial/Kconfig
new file mode 100644
index 00000000000000..4bf85ae88e5c05
--- /dev/null
+++ b/drivers/parrot/serial/Kconfig
@@ -0,0 +1,25 @@
+config SERIAL_PARROTX
+ tristate "Parrot5/5+/6/6i/7 serial port support"
+ depends on ARCH_PARROT7
+ select SERIAL_CORE
+ help
+ If you have a board based on Parrot5/5+/6/6i/7 SoC you
+ can enable its onboard serial port by enabling this option.
+
+ To compile this driver as a module, choose M here: the
+ module will be called px-serial.
+
+config SERIAL_PARROTX_CONSOLE
+ bool "Console on Parrot5/5+ serial port"
+ depends on SERIAL_PARROTX
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this option if you want to use one of the Parrot5/5+/6/6i/7 serial ports
+ as console.
+
+config SERIAL_ZPRINT
+ bool "ZPrint transactor support"
+ depends on MACH_PARROT_ZEBU
+ default y
+ help
+ Enable support for ZPrint transactor output.
diff --git a/drivers/parrot/serial/Makefile b/drivers/parrot/serial/Makefile
new file mode 100644
index 00000000000000..edc15a2071e808
--- /dev/null
+++ b/drivers/parrot/serial/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SERIAL_PARROTX) += px-uart.o
+obj-$(CONFIG_SERIAL_ZPRINT) += zprint.o
diff --git a/drivers/parrot/serial/px-uart.c b/drivers/parrot/serial/px-uart.c
new file mode 100644
index 00000000000000..df51483f613fba
--- /dev/null
+++ b/drivers/parrot/serial/px-uart.c
@@ -0,0 +1,848 @@
+/*
+ * @file linux/drivers/char/parrot5.c
+ * @brief Driver for Parrot5/5+/6 UARTs
+ *
+ * Copyright (C) 2007 Parrot S.A.
+ *
+ * @author ivan.djelic@parrot.com
+ * @date 2007-07-11
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#warning Implement device removal (WARNING: [...] does not have a release() function, it is broken and must be fixed)
+
+#if defined(CONFIG_SERIAL_PARROTX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <linux/io.h>
+#include <asm/sizes.h>
+
+#include <mach/regs-uart.h>
+#include "px-uart.h"
+
+#define PARROT5_SERIAL_NAME "ttyPA"
+#define PARROT5_SERIAL_MAJOR 204
+#define PARROT5_SERIAL_MINOR 64
+
+#define UART_TRX_DUMMY_RX (1 << 16)
+#define UART_NR 8
+
+/* module parameter to enable/disable low latency */
+static bool low_latency = 0;
+module_param(low_latency, bool, S_IRUGO);
+MODULE_PARM_DESC(low_latency, "UART low latency enable/disable");
+
+/*
+ * We wrap our port structure around the generic uart_port.
+ */
+struct uart_parrot5_port {
+ struct uart_port port;
+ u32 spd;
+ u32 lcr;
+ u32 lcr_fifo;
+ unsigned int tx_fifosize;
+ unsigned int tx_threshold;
+ struct clk *uart_clk;
+ struct pinctrl *pctl;
+};
+
+/* port locked, interrupts locally disabled */
+static void parrot5_serial_start_tx(struct uart_port *port)
+{
+ __raw_writel(UART_ITX_TIM, port->membase+_UART_ITX);
+}
+
+/* port locked, interrupts locally disabled */
+static void parrot5_serial_stop_tx(struct uart_port *port)
+{
+ /* writing 0 to RSTBRK disables TX interrupts */
+ __raw_writel(0, port->membase+_UART_RSTBRK);
+}
+
+/* port locked, interrupts locally disabled */
+static void parrot5_serial_stop_rx(struct uart_port *port)
+{
+ __raw_writel(0, port->membase+_UART_IRX);
+}
+
+/* port locked, interrupts locally disabled */
+static void parrot5_serial_enable_ms(struct uart_port *port)
+{
+ /* nothing to do */
+}
+
+static void parrot5_serial_rx_chars(struct uart_port *port, u32 status)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ unsigned int c, flag;
+
+ while (status & UART_STATUS_RXFILLED) {
+
+ c = __raw_readl(port->membase+_UART_TRX) | UART_TRX_DUMMY_RX;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(c & UART_TRX_ANY_ERROR)) {
+
+ if (c & UART_TRX_RXBREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port)) {
+ goto ignore_char;
+ }
+ }
+ if (c & UART_TRX_PARITY_ERROR) {
+ port->icount.parity++;
+ }
+ if (c & UART_TRX_FRAME_ERROR) {
+ port->icount.frame++;
+ }
+ if (c & UART_TRX_OVERRUN) {
+ port->icount.overrun++;
+ }
+
+ c &= port->read_status_mask;
+
+ if (c & UART_TRX_RXBREAK) {
+ flag = TTY_BREAK;
+ }
+ else if (c & UART_TRX_PARITY_ERROR) {
+ flag = TTY_PARITY;
+ }
+ else if (c & UART_TRX_FRAME_ERROR) {
+ flag = TTY_FRAME;
+ }
+ }
+
+ if (uart_handle_sysrq_char(port, c & 0xff)) {
+ goto ignore_char;
+ }
+ uart_insert_char(port, c, UART_TRX_OVERRUN, c, flag);
+
+ ignore_char:
+ status = __raw_readl(port->membase+_UART_STATUS);
+ }
+
+ /* tty_flip_buffer_push doc say we can't call this from interrupt
+ * context, but other drivers do it
+ */
+ WARN_ON_ONCE(tty->low_latency == 1);
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(tty);
+ spin_lock(&port->lock);
+}
+
+static void parrot5_serial_tx_chars(struct uart_port *port, u32 status)
+{
+ int count;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct uart_parrot5_port *up = (struct uart_parrot5_port *)port;
+
+ if (port->x_char) {
+ __raw_writel(port->x_char, port->membase+_UART_TRX);
+ port->icount.tx++;
+ port->x_char = 0;
+ goto out;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ parrot5_serial_stop_tx(port);
+ goto out;
+ }
+
+ count = up->tx_fifosize;
+
+ /* on Parrot5+ chips TX fifo has a configurable IRQ threshold */
+ if (up->tx_threshold && (status & UART_STATUS_TXFILLED)) {
+ count -= up->tx_threshold;
+ }
+
+ while (!uart_circ_empty(xmit) && (count-- > 0)) {
+
+ __raw_writel(xmit->buf[xmit->tail], port->membase+_UART_TRX);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
+ uart_write_wakeup(port);
+ }
+ if (uart_circ_empty(xmit)) {
+ parrot5_serial_stop_tx(port);
+ }
+
+out:
+ return;
+}
+
+static irqreturn_t parrot5_serial_int(int irq, void *dev_id)
+{
+ u32 status;
+ struct uart_port *port = dev_id;
+
+ spin_lock(&port->lock);
+
+ status = __raw_readl(port->membase+_UART_STATUS);
+
+ do {
+ if (status & UART_STATUS_ITRX) {
+ parrot5_serial_rx_chars(port, status);
+ }
+ if (status & UART_STATUS_ITTX) {
+ parrot5_serial_tx_chars(port, status);
+ }
+ status = __raw_readl(port->membase+_UART_STATUS);
+
+ } while (status & (UART_STATUS_ITRX|UART_STATUS_ITTX));
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* port unlocked, interrupts status is called dependant */
+static unsigned int parrot5_serial_tx_empty(struct uart_port *port)
+{
+ u32 status = __raw_readl(port->membase+_UART_STATUS);
+ return (status & UART_STATUS_TXFILLED)? 0 : TIOCSER_TEMT;
+}
+
+/* port locked, interrupts locally disabled */
+static unsigned int parrot5_serial_get_mctrl(struct uart_port *port)
+{
+ unsigned int result;
+ u32 status;
+
+ status = __raw_readl(port->membase+_UART_STATUS);
+ result = TIOCM_CAR | TIOCM_DSR;
+
+ /* pretend CTS is always on, otherwise we would have to dynamically
+ report CTS changes, which is useless anyway since everything is done
+ in HW */
+ if (1 /* !(status & UART_STATUS_CTSN) */) {
+ result |= TIOCM_CTS;
+ }
+ return result;
+}
+
+/* port locked, interrupts locally disabled */
+static void parrot5_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 lcr = __raw_readl(port->membase+_UART_LCR);
+
+ /* allow manual control of nRTS pin */
+ if (mctrl & TIOCM_RTS) {
+ /* restore automatic nRTS control */
+ lcr &= ~UART_LCR_RTS_CTRL;
+ }
+ else {
+ /* force nRTS to 1, i.e. throttle */
+ lcr |= UART_LCR_RTS_CTRL;
+ }
+ __raw_writel(lcr, port->membase+_UART_LCR);
+}
+
+/* port unlocked, interrupts status is called dependant */
+static void parrot5_serial_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (break_state) {
+ /* special write: any value will do */
+ __raw_writel(0, port->membase+_UART_SETBRK);
+ }
+ else {
+ /* special write: any value will do */
+ __raw_writel(0, port->membase+_UART_RSTBRK);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* port_sem locked, interrupts globally disabled */
+static int parrot5_serial_startup(struct uart_port *port)
+{
+ int ret;
+ struct uart_parrot5_port *up = (struct uart_parrot5_port *)port;
+
+ /* allocate IRQ */
+ ret = request_irq(port->irq,
+ parrot5_serial_int,
+ 0,
+ PXUART_DRV_NAME,
+ port);
+ if (ret)
+ return ret;
+
+ /* stop UART */
+ __raw_writel(0, port->membase+_UART_SPD);
+
+ /* write configuration */
+ __raw_writel(up->lcr, port->membase+_UART_LCR);
+
+ spin_lock_irq(&port->lock);
+
+ /* start UART */
+ __raw_writel(up->spd, port->membase+_UART_SPD);
+
+ /* enable interrupts */
+ __raw_writel(UART_IRX_RIM, port->membase+_UART_IRX);
+ __raw_writel(UART_ITX_TIM, port->membase+_UART_ITX);
+
+ spin_unlock_irq(&port->lock);
+
+ return 0;
+}
+
+/* port_sem locked, interrupts status is caller dependent */
+static void parrot5_serial_shutdown(struct uart_port *port)
+{
+ /* disable all interrupts and reset break */
+ spin_lock_irq(&port->lock);
+ __raw_writel(0, port->membase+_UART_IRX);
+ __raw_writel(0, port->membase+_UART_RSTBRK);
+ spin_unlock_irq(&port->lock);
+
+ /* disable the port */
+ __raw_writel(0, port->membase+_UART_SPD);
+
+ /* free the interrupt */
+ free_irq(port->irq, port);
+}
+
+static void parrot5_serial_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ u32 lcr;
+ unsigned long flags;
+ unsigned int baud, csize;
+ struct uart_parrot5_port *up = (struct uart_parrot5_port *)port;
+
+ /*
+ * We don't support modem control lines.
+ */
+ termios->c_cflag &= ~(HUPCL | CMSPAR);
+ termios->c_cflag |= CLOCAL;
+
+ /*
+ * Ask the core to calculate the baudrate for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old,
+ port->uartclk >> 20,
+ port->uartclk >> 4);
+
+ if (baud) {
+ up->spd = (port->uartclk+(baud/2))/baud;
+ }
+
+ /* we only support CS7 and CS8 */
+ csize = termios->c_cflag & CSIZE;
+
+ if ((csize != CS7) && (csize != CS8)) {
+ /* default to CS8 */
+ csize = (old && ((old->c_cflag & CSIZE) == CS7))? CS7 : CS8;
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= csize;
+ }
+ lcr = (csize == CS8)? UART_LCR_EIGHTBITS : 0;
+
+ if (termios->c_cflag & CSTOPB) {
+ lcr |= UART_LCR_TWOSTOP;
+ }
+ if (termios->c_cflag & PARENB) {
+ lcr |= UART_LCR_PARITY;
+ if (!(termios->c_cflag & PARODD)) {
+ lcr |= UART_LCR_EVEN;
+ }
+ }
+ if (termios->c_cflag & CRTSCTS) {
+ lcr |= UART_LCR_CTSMODE;
+ }
+ if (port->fifosize > 1) {
+ lcr |= up->lcr_fifo;
+ }
+
+ /* change port with interrupts disabled */
+ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Update the per-port timeout.
+ */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ port->read_status_mask = UART_TRX_OVERRUN | 0xff;
+
+ if (termios->c_iflag & INPCK) {
+ port->read_status_mask |=
+ UART_TRX_FRAME_ERROR | UART_TRX_PARITY_ERROR;
+ }
+ if (termios->c_iflag & (BRKINT | PARMRK)) {
+ port->read_status_mask |= UART_TRX_RXBREAK;
+ }
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR) {
+ port->ignore_status_mask |=
+ UART_TRX_FRAME_ERROR | UART_TRX_PARITY_ERROR;
+ }
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= UART_TRX_RXBREAK;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR) {
+ port->ignore_status_mask |= UART_TRX_OVERRUN;
+ }
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if ((termios->c_cflag & CREAD) == 0) {
+ port->ignore_status_mask |= UART_TRX_DUMMY_RX;
+ }
+
+ up->lcr = lcr;
+
+ /* set baud rate */
+ __raw_writel(up->spd, port->membase+_UART_SPD);
+
+ /* configure port */
+ __raw_writel(up->lcr, port->membase+_UART_LCR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void parrot5_pm(struct uart_port *port,
+ unsigned int state,
+ unsigned int oldstate)
+{
+ struct uart_parrot5_port *up = (struct uart_parrot5_port *)port;
+
+ if (! state)
+ clk_prepare_enable(up->uart_clk);
+ else
+ clk_disable_unprepare(up->uart_clk);
+}
+
+static const char *parrot5_serial_type(struct uart_port *port)
+{
+ return (port->type == PORT_PARROT5)? "PARROT5" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'
+ */
+static void parrot5_serial_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, SZ_4K);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'
+ */
+static int parrot5_serial_request_port(struct uart_port *port)
+{
+ struct resource *res;
+
+ res = request_mem_region(port->mapbase, SZ_4K, PXUART_DRV_NAME);
+
+ return (res != NULL)? 0 : -EBUSY;
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void parrot5_serial_config_port(struct uart_port *port, int flags)
+{
+ if ((flags & UART_CONFIG_TYPE) &&
+ (parrot5_serial_request_port(port) == 0)) {
+ port->type = PORT_PARROT5;
+ }
+}
+
+/*
+ * verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int parrot5_serial_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_PARROT5)) {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int parrot5_serial_poll_get_char(struct uart_port *port)
+{
+ if (!(__raw_readl(port->membase+_UART_STATUS) &
+ UART_STATUS_RXFILLED))
+ return NO_POLL_CHAR;
+
+ return __raw_readl(port->membase+_UART_TRX) & 0xff;
+}
+
+static void parrot5_serial_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (__raw_readl(port->membase+_UART_STATUS) &
+ UART_STATUS_TXFILLED)
+ cpu_relax();
+
+ __raw_writel(ch & 0xff, port->membase+_UART_TRX);
+}
+#endif
+
+static struct uart_ops parrot5_serial_pops = {
+ .tx_empty = parrot5_serial_tx_empty,
+ .set_mctrl = parrot5_serial_set_mctrl,
+ .get_mctrl = parrot5_serial_get_mctrl,
+ .stop_tx = parrot5_serial_stop_tx,
+ .start_tx = parrot5_serial_start_tx,
+ .stop_rx = parrot5_serial_stop_rx,
+ .enable_ms = parrot5_serial_enable_ms,
+ .break_ctl = parrot5_serial_break_ctl,
+ .startup = parrot5_serial_startup,
+ .shutdown = parrot5_serial_shutdown,
+ .set_termios = parrot5_serial_set_termios,
+ .pm = parrot5_pm,
+ .type = parrot5_serial_type,
+ .release_port = parrot5_serial_release_port,
+ .request_port = parrot5_serial_request_port,
+ .config_port = parrot5_serial_config_port,
+ .verify_port = parrot5_serial_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = parrot5_serial_poll_get_char,
+ .poll_put_char = parrot5_serial_poll_put_char,
+#endif
+};
+
+static struct uart_parrot5_port p5_ports[UART_NR];
+
+/*
+ * Initialize a single Parrot5/5+/6 serial port.
+ */
+static void __init parrot5_serial_init_port(struct uart_parrot5_port *up,
+ int index)
+{
+ /* Parrot5+ / Parrot6 / Parrot7 */
+ up->tx_fifosize = UART_TX_FIFO_SIZE_P5P;
+ /*
+ * set default tx threshold to 4 bytes, i.e. a tx
+ * interrupt will be raised when 4 bytes are left in tx fifo
+ */
+ up->tx_threshold = 4;
+ /*
+ * set rx fifo threshold to 48 bytes, i.e. an rx interrupt
+ * will be raised when 16 empty slots are left in rx fifo
+ */
+ up->lcr_fifo =
+ UART_LCR_RXFIFO|
+ UART_LCR_TXFIFO|
+ UART_LCR_FIFO_RX_THRESHOLD_48|
+ UART_LCR_FIFO_TX_THRESHOLD_4;
+
+ up->spd = 0;
+ up->lcr = 0;
+ up->port.iotype = UPIO_MEM;
+ up->port.fifosize = up->tx_fifosize-up->tx_threshold;
+ up->port.ops = &parrot5_serial_pops;
+ up->port.flags = UPF_BOOT_AUTOCONF|(low_latency? UPF_LOW_LATENCY : 0);
+ up->port.line = index;
+}
+
+#ifdef CONFIG_SERIAL_PARROTX_CONSOLE
+
+static void parrot5_serial_console_putchar(struct uart_port *port, int ch)
+{
+ while (__raw_readl(port->membase+_UART_STATUS)
+ & UART_STATUS_TXFILLED) {
+ barrier();
+ }
+ __raw_writel(ch, port->membase+_UART_TRX);
+}
+
+static void
+parrot5_serial_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_parrot5_port *up = &p5_ports[co->index];
+
+ uart_console_write((struct uart_port*)up,
+ s,
+ count,
+ parrot5_serial_console_putchar);
+}
+
+static void __init
+parrot5_serial_console_get_options(struct uart_port *port, int *baud,
+ int *parity, int *bits)
+{
+ u32 lcr, spd = __raw_readl(port->membase+_UART_SPD);
+
+ if (spd) {
+ *baud = clk_get_rate(((struct uart_parrot5_port*) port)->uart_clk)/spd;
+
+ lcr = __raw_readl(port->membase+_UART_LCR);
+
+ *parity = 'n';
+ if (lcr & UART_LCR_PARITY) {
+ *parity = (lcr & UART_LCR_EVEN)? 'e' : 'o';
+ }
+
+ *bits = (lcr & UART_LCR_EIGHTBITS)? 8 : 7;
+ }
+}
+
+static int __init parrot5_serial_console_setup(struct console *co,
+ char *options)
+{
+ struct uart_parrot5_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if ((co->index < 0) || (co->index >= UART_NR)) {
+ co->index = 0;
+ }
+ up = &p5_ports[co->index];
+ if (up->port.membase == NULL)
+ return -ENODEV;
+
+ if (options) {
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ }
+ else {
+ parrot5_serial_console_get_options(&up->port, &baud, &parity,
+ &bits);
+ }
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct tty_driver* pxuart_console_device(struct console* co, int* index)
+{
+ struct uart_driver* p = co->data;
+
+ *index = co->index;
+ return p->tty_driver;
+}
+
+static struct uart_driver parrot5_reg;
+static struct console parrot5_serial_console = {
+ .name = PARROT5_SERIAL_NAME,
+ .write = parrot5_serial_console_write,
+ .device = pxuart_console_device,
+ .setup = parrot5_serial_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &parrot5_reg,
+};
+
+/* XXX TODO support earlycon stuff ... */
+#define PARROT5_CONSOLE (&parrot5_serial_console)
+#else
+#define PARROT5_CONSOLE NULL
+#endif
+
+static struct uart_driver parrot5_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "Parrot serial",
+ .nr = PXUART_NR,
+ .dev_name = PARROT5_SERIAL_NAME,
+ .major = PARROT5_SERIAL_MAJOR,
+ .minor = PARROT5_SERIAL_MINOR,
+ .cons = PARROT5_CONSOLE,
+};
+
+static int parrot5_serial_suspend(struct device *dev)
+{
+ struct uart_parrot5_port *up = dev_get_drvdata(dev);
+
+ if (up)
+ uart_suspend_port(&parrot5_reg, &up->port);
+
+ return 0;
+}
+
+static int parrot5_serial_resume(struct device *dev)
+{
+ struct uart_parrot5_port *up = dev_get_drvdata(dev);
+
+ if (up)
+ uart_resume_port(&parrot5_reg, &up->port);
+
+ return 0;
+}
+
+static const struct dev_pm_ops parrot5_pm_ops = {
+ .suspend = parrot5_serial_suspend,
+ .resume = parrot5_serial_resume,
+};
+
+static int __devinit parrot5_serial_probe(struct platform_device *dev)
+{
+ int ret;
+ struct resource *res;
+ struct uart_parrot5_port *up;
+
+ if ((dev->id < 0) || (dev->id >= UART_NR)) {
+ printk(KERN_ERR"invalid dev id %x\n", dev->id);
+ ret = -ENOENT;
+ goto noregs;
+ }
+ up = &p5_ports[dev->id];
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ ret = -ENOENT;
+ goto noregs;
+ }
+
+ up->port.dev = &dev->dev;
+ up->port.mapbase = res->start;
+ up->port.membase = ioremap(res->start, res->end-res->start+1);
+
+ if (!up->port.membase) {
+ ret = -ENOMEM;
+ goto nomap;
+ }
+
+ up->port.irq = platform_get_irq(dev, 0);
+ if (up->port.irq < 0) {
+ ret = -ENOENT;
+ goto noirq;
+ }
+
+ up->uart_clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(up->uart_clk)) {
+ ret = PTR_ERR(up->uart_clk);
+ goto noadd;
+ }
+ up->port.uartclk = clk_get_rate(up->uart_clk);
+
+ up->pctl = pinctrl_get_select_default(&dev->dev);
+ if (IS_ERR(up->pctl)) {
+ ret = PTR_ERR(up->pctl);
+ goto noclk;
+ }
+
+ parrot5_serial_init_port(up, dev->id);
+
+ ret = uart_add_one_port(&parrot5_reg, &up->port);
+ if (ret) {
+ goto put_pin;
+ }
+
+ platform_set_drvdata(dev, up);
+
+ return 0;
+
+put_pin:
+ pinctrl_put(up->pctl);
+noclk:
+ clk_put(up->uart_clk);
+noadd:
+noirq:
+ iounmap(up->port.membase);
+nomap:
+noregs:
+ return ret;
+}
+
+static int __devexit parrot5_serial_remove(struct platform_device *dev)
+{
+ struct uart_parrot5_port *up = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+
+ if (up) {
+ uart_remove_one_port(&parrot5_reg, &up->port);
+ pinctrl_put(up->pctl);
+ clk_put(up->uart_clk);
+ iounmap(up->port.membase);
+ }
+
+ return 0;
+}
+
+static struct platform_driver parrot5_serial_driver = {
+ .probe = parrot5_serial_probe,
+ .remove = __devexit_p(parrot5_serial_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PXUART_DRV_NAME,
+ .pm = &parrot5_pm_ops,
+ },
+};
+
+static int __init parrot5_serial_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Serial: Parrot5/5+ UART driver $Revision: 1.19 $\n");
+
+ ret = uart_register_driver(&parrot5_reg);
+ if (ret == 0) {
+ ret = platform_driver_register(&parrot5_serial_driver);
+ if (ret) {
+ uart_unregister_driver(&parrot5_reg);
+ }
+ }
+ return ret;
+}
+
+static void __exit parrot5_serial_exit(void)
+{
+ platform_driver_unregister(&parrot5_serial_driver);
+ uart_unregister_driver(&parrot5_reg);
+}
+
+module_init(parrot5_serial_init);
+module_exit(parrot5_serial_exit);
+
+MODULE_AUTHOR("Parrot S.A. <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("Parrot5/5+/6 serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/serial/px-uart.h b/drivers/parrot/serial/px-uart.h
new file mode 100644
index 00000000000000..290fa47b5ed474
--- /dev/null
+++ b/drivers/parrot/serial/px-uart.h
@@ -0,0 +1,12 @@
+#ifndef _PX_UART_H
+#define _PX_UART_H
+
+#if defined(CONFIG_SERIAL_PARROTX) || \
+ defined(CONFIG_SERIAL_PARROTX_MODULE)
+
+#define PXUART_DRV_NAME "px-uart"
+
+#endif /* defined(CONFIG_SERIAL_PARROTX) || \
+ defined(CONFIG_SERIAL_PARROTX_MODULE) */
+
+#endif
diff --git a/drivers/parrot/serial/zprint.c b/drivers/parrot/serial/zprint.c
new file mode 100644
index 00000000000000..eeabfbe37843fe
--- /dev/null
+++ b/drivers/parrot/serial/zprint.c
@@ -0,0 +1,185 @@
+/**
+ * linux/drivers/parrot/zprint.c - Parrot7 ZPrint Zebu transactor
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 24-Nov-2011
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <mach/p7.h>
+#include "zprint.h"
+
+struct zprint {
+ struct class *class;
+ dev_t devno;
+ struct cdev cdev;
+ struct device *device;
+ spinlock_t lock;
+};
+
+/* The one instance of the zebu driver */
+static struct zprint zprint = { 0 };
+
+static inline void zprint_putc(unsigned c)
+{
+ writel(c, __MMIO_P2V(P7_ZPRINT));
+}
+
+static ssize_t zprint_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *f_pos)
+{
+ size_t cnt;
+
+ for (cnt = 0; cnt < count; cnt++) {
+ char c;
+ int const err = get_user(c, &buf[cnt]);
+ if (err)
+ return err;
+
+ zprint_putc((unsigned) c);
+ }
+
+ return count;
+}
+
+static struct file_operations const zprint_fops = {
+ .owner = THIS_MODULE,
+ .write = &zprint_write,
+};
+
+static void (*zprint_write_oldcon)(struct console*, char const*, unsigned);
+
+static void zprint_write_con(struct console* con,
+ char const* buff,
+ unsigned int count)
+{
+ if (con->index)
+ return (*zprint_write_oldcon)(con, buff, count);
+
+ while (count--) {
+ if (*buff == '\n')
+ zprint_putc('\r');
+ zprint_putc((unsigned) *buff++);
+ }
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static struct tty_operations zprint_old_ttyops;
+
+static void zprint_poll_putc(struct tty_driver* driver, int line, char c)
+{
+ if (line) {
+ if (zprint_old_ttyops.poll_put_char)
+ zprint_old_ttyops.poll_put_char(driver, line, c);
+ return;
+ }
+
+ zprint_putc((unsigned) c);
+}
+#endif
+
+static void __init zprint_setup_con(void)
+{
+ struct console* con;
+
+ for (con = console_drivers; con != NULL; con = con->next) {
+ if (con->device) {
+ int idx = -1;
+ struct tty_driver* const drv = con->device(con, &idx);
+
+ if (! idx &&
+ drv &&
+ ((con->flags & (CON_ENABLED | CON_CONSDEV)) ==
+ (CON_ENABLED | CON_CONSDEV)) && con->write) {
+ zprint_write_oldcon = con->write;
+ con->write = &zprint_write_con;
+#ifdef CONFIG_CONSOLE_POLL
+ zprint_old_ttyops = *drv->ops;
+ zprint_old_ttyops.poll_put_char = &zprint_poll_putc;
+ drv->ops = &zprint_old_ttyops;
+#endif
+ pr_info("Zprint: %s0 console output redirected.\n", con->name);
+ return;
+ }
+ }
+ }
+}
+
+static int __devinit zprint_probe(struct platform_device* pdev)
+{
+ int res;
+
+ spin_lock_init(&zprint.lock);
+
+ res = alloc_chrdev_region(&zprint.devno, 0, 1, ZPRINT_DRV_NAME);
+ if (res < 0)
+ goto exit;
+
+ cdev_init(&zprint.cdev, &zprint_fops);
+ zprint.cdev.owner = THIS_MODULE;
+
+ res = cdev_add(&zprint.cdev, zprint.devno, 1);
+ if (res < 0)
+ goto dealloc_chrdev;
+
+ zprint.class = class_create(THIS_MODULE, ZPRINT_DRV_NAME);
+ if (IS_ERR(zprint.class)) {
+ res = PTR_ERR(zprint.class);
+ goto del_cdev;
+ }
+
+ zprint.device = device_create(zprint.class,
+ NULL,
+ zprint.devno,
+ NULL,
+ ZPRINT_DRV_NAME);
+ if (IS_ERR(zprint.device)) {
+ res = PTR_ERR(zprint.device);
+ goto class_destroy;
+ }
+
+ zprint_setup_con();
+ dev_info(&pdev->dev, "ready\n");
+ return 0;
+
+class_destroy:
+ class_destroy(zprint.class);
+del_cdev:
+ cdev_del(&zprint.cdev);
+dealloc_chrdev:
+ unregister_chrdev_region(zprint.devno, 1);
+exit:
+ return res;
+}
+
+static struct platform_driver zprint_driver = {
+ .probe = zprint_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ZPRINT_DRV_NAME
+ },
+};
+
+static int __init zprint_init(void)
+{
+ return platform_driver_register(&zprint_driver);
+}
+late_initcall(zprint_init);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Zebu zprint interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/serial/zprint.h b/drivers/parrot/serial/zprint.h
new file mode 100644
index 00000000000000..7dcf608f6cb4b1
--- /dev/null
+++ b/drivers/parrot/serial/zprint.h
@@ -0,0 +1,24 @@
+/**
+ * linux/drivers/parrot/zprint.h - Parrot7 ZPrint Zebu transactor
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 20-Aug-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _ZPRINT_H
+#define _ZPRINT_H
+
+#if defined(CONFIG_SERIAL_ZPRINT) || \
+ defined(CONFIG_SERIAL_ZPRINT_MODULE)
+
+#define ZPRINT_DRV_NAME "zprint"
+
+#endif /* defined(CONFIG_SERIAL_ZPRINT) || \
+ defined(CONFIG_SERIAL_ZPRINT_MODULE) */
+
+#endif
diff --git a/drivers/parrot/sound/p7_aai/Kconfig b/drivers/parrot/sound/p7_aai/Kconfig
new file mode 100644
index 00000000000000..ce20a95007fdb7
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/Kconfig
@@ -0,0 +1,19 @@
+config SND_AAI
+ tristate "Parrot7 AAI"
+ depends on SND
+ depends on ARCH_PARROT7 || ARCH_VEXPRESS
+ select SND_PCM
+ select SND_HWDEP
+ select SND_DYNAMIC_MINORS
+ help
+ Parrot7 AAI driver.
+ This is specific to Parrot7 architecture.
+
+config SND_AAI_DEBUG
+ bool "AAI Debug Mode"
+ default 0
+ depends on SND_AAI
+ help
+ Activate AAI debug level messages.
+ Trace AAI registers read/write operations.
+
diff --git a/drivers/parrot/sound/p7_aai/Makefile b/drivers/parrot/sound/p7_aai/Makefile
new file mode 100644
index 00000000000000..4f1a3caf102b52
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_SND_AAI) += aai.o
+
+aai-objs := \
+ aai_alsa.o \
+ aai_group.o \
+ aai_hw.o \
+ aai_ioctl.o \
+ aai_irq.o \
+ aai_module.o \
+ aai_ops.o \
+ aai_clk.o \
+ aai_regs.o \
+ aai_rules.o
+
+ifeq ($(CONFIG_SND_AAI_DEBUG), y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/parrot/sound/p7_aai/aai.h b/drivers/parrot/sound/p7_aai/aai.h
new file mode 100644
index 00000000000000..e980b8b303a7b8
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai.h
@@ -0,0 +1,236 @@
+
+#ifndef _AAI_H_
+#define _AAI_H_
+
+#ifndef CONFIG_ARCH_VEXPRESS
+# include <mach/p7.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/sizes.h>
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/moduleparam.h>
+#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/rawmidi.h>
+#include <sound/initval.h>
+
+#include <sound/hwdep.h>
+
+#include "aai_platform_data.h"
+
+#define NB_RULES 32
+#define NB_CLOCK 5
+#define NB_GROUPS 15
+#define DEV_NAME_LEN 32
+#define CHAN_NAME_LEN 16
+
+#define P7_CHIPREV_R1 0
+#define P7_CHIPREV_R2 1
+#define P7_CHIPREV_R3 2
+
+struct aai_device_t;
+struct card_data_t;
+
+enum aai_dir_t {
+ AAI_RX = 1,
+ AAI_TX = 2
+};
+
+enum aai_format_t {
+ AAI_FORMAT_MONO = 1,
+ AAI_FORMAT_STEREO = 2,
+ AAI_FORMAT_QUAD = 4,
+ AAI_FORMAT_OCTO = 8
+};
+
+enum aai_fifo_mode_t {
+ FIFO_MODE_16 = 1,
+ FIFO_MODE_32 = 2
+};
+
+enum aai_clk {
+ ctrl_clk = 0,
+ i2s_clk = 1,
+ pcm1_clk = 2,
+ pcm2_clk = 3,
+ spdif_clk = 4
+};
+
+struct aai_audio_chan_t {
+ int id;
+ char name[CHAN_NAME_LEN];
+ uint32_t fifo_sel_reg;
+ int used;
+};
+
+struct aai_hw_ops {
+ int (*open)(struct card_data_t *aai,
+ struct aai_device_t *chan);
+ int (*hw_params)(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+ int (*prepare)(struct card_data_t *aai,
+ struct aai_device_t *chan);
+ int (*close)(struct card_data_t *aai,
+ struct aai_device_t *chan);
+ int (*free)(struct card_data_t *aai,
+ struct aai_device_t *chan);
+};
+
+struct aai_hw_channel_t {
+ int fifo_id; /*!< fifo id for this channel */
+ char *pstart; /*!< Ptr to start of transfer buffer */
+ char *pcurrent; /*!< Ptr to next chunk to transfer */
+ char *pfollow; /*!< Ptr to following chunk to transfer */
+ char *pend; /*!< Ptr to end of transfer buffer */
+ uint32_t fifo_conf; /*!< HW fifo config register */
+ uint32_t dmaca; /*!< DMA current address */
+ uint32_t dmana; /*!< DMA next address */
+ uint32_t dmacv; /*!< DMA current valid ONLY FOR R2,R3 */
+ uint32_t dmanv; /*!< DMA next valid ONLY FOR R2,R3 */
+ uint32_t memoffset; /*!< offset in AAI internal memory */
+ enum aai_format_t chan_nb; /*!< number of mono channels for this device */
+ struct aai_audio_chan_t *chan_list[8]; /*!< list of aai internal audio channels */
+ enum aai_fifo_mode_t mode; /*!< fifo mode 16-bits or 32-bits */
+};
+
+struct aai_device_t {
+ /* Misc informations */
+ void *driver_data; /*!< Ptr to driver data */
+ char name[DEV_NAME_LEN]; /*!< Device name ("spk", ... etc) */
+ int ipcm; /*!< pcm identifier number */
+ enum aai_dir_t direction; /*!< RX/TX */
+ int fifo_size; /*!< Size of FIFO tranfer */
+
+ /* Alsa informations */
+ struct snd_pcm_ops *ops; /*!< pcm callbacks operators */
+ const struct snd_pcm_hardware *hw; /*!< alsa hardware params pointer */
+ uint32_t period_frames; /*!< alsa period size in frame */
+ uint32_t period_bytes; /*!< alsa period size in bytes */
+ uint32_t bytes_count; /*!< amount of bytes read or written */
+ uint32_t rate; /*!< Rate value */
+
+ /* Hardware informations */
+ struct aai_hw_channel_t fifo; /*!< HW fifo informations */
+
+ /* DMA informations */
+ uint32_t dma_burst_nb; /*!< DMA transfer number per burst */
+ uint32_t dma_xfer_size; /*!< size of a single DMA transfer */
+ char *dma_area; /*!< pointer on dma transfer buffer */
+
+ /* Buffer informations */
+ uint32_t bufsize; /*!< Buffer size (for each fifo) */
+ uint32_t nbirq; /*!< Number IRQ (Debug only) */
+ uint32_t nbperiod; /*!< Number period elapsed (Debug only) */
+
+ /* rules */
+ int rules[NB_RULES];
+ struct aai_hw_ops aai_hw_ops;
+
+ /* channel synchronization group */
+ int group;
+
+};
+
+struct card_data_t {
+ /* aai resources */
+ struct aai_device_t *chans;
+ int chans_nb;
+ struct aai_pad_t *pad;
+ int irq;
+
+ /* alsa structures */
+ struct snd_card *card;
+ struct snd_pcm **pcms;
+
+ /* memory resources */
+ void __iomem *iobase;
+ struct resource *ioarea;
+ struct device *dev;
+
+ /* hardware dep */
+ struct snd_hwdep *hwdep;
+ spinlock_t mixer_lock;
+ spinlock_t hwlock;
+
+ /* IRQ handle */
+ irq_handler_t irq_handler;
+
+ /* clock */
+ struct clk *clk[NB_CLOCK];
+ struct pinctrl *pin;
+ struct aai_platform_data *pdata;
+
+
+ int fifo_address;
+ int fifo_tx;
+ int fifo_rx;
+
+ /* groups */
+ int groups_ref_count[NB_GROUPS]; /*!< ref count started groups */
+ uint32_t groups_started; /*!< started groups reg cache */
+};
+
+/**
+ * @brief Get pointer to substream's aai-pcm data
+ *
+ * @param substream
+ * @return pointer to aai-pcm data
+ */
+static inline struct aai_device_t *substream2chan(struct snd_pcm_substream *substream)
+{
+ return (struct aai_device_t *)(substream->pcm->private_data);
+}
+
+/**
+ * Get pointer to playback substream
+ * We consider that there is only one substream per pcm
+ *
+ * @param aai private driver data
+ * @param ipcm PCM index
+ * @return pointer to capture substream
+ */
+static inline struct snd_pcm_substream *chan2pbacksubstream(struct card_data_t *aai, int ipcm)
+{
+ return (struct snd_pcm_substream *)((struct snd_pcm *)(aai->pcms)[ipcm]->streams[0].substream);
+}
+
+/**
+ * Get pointer to capture substream
+ * We consider that there is only one substream per pcm
+ *
+ * @param aai private driver data
+ * @param ipcm PCM index
+ * @return pointer to capture substream
+ */
+static inline struct snd_pcm_substream *chan2captsubstream(struct card_data_t *aai, int ipcm)
+{
+ return (struct snd_pcm_substream *)((struct snd_pcm *)(aai->pcms)[ipcm]->streams[1].substream);
+}
+
+#define chan2dev(_chan_) (((struct card_data_t *)_chan_->driver_data)->dev)
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_alsa.c b/drivers/parrot/sound/p7_aai/aai_alsa.c
new file mode 100644
index 00000000000000..d326c1bbeddea3
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_alsa.c
@@ -0,0 +1,163 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+
+int aai_pcms_probe(struct card_data_t *aai)
+{
+ int err = 0;
+ int ipcm;
+ struct aai_device_t *chan;
+
+ /* alloc pcms table */
+ aai->pcms = kmalloc(aai->chans_nb * sizeof(struct snd_pcm *),
+ GFP_KERNEL);
+ if (!aai->pcms) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ chan->driver_data = aai;
+
+ err = snd_pcm_new(aai->card,
+ chan->name,
+ ipcm,
+ ((chan->direction&AAI_TX) == AAI_TX),
+ ((chan->direction&AAI_RX) == AAI_RX),
+ &(aai->pcms[ipcm]));
+ if (err < 0) {
+ dev_err(aai->dev, "%s: %s (%d) init failed\n",
+ __func__, chan->name, ipcm);
+ goto exit;
+ }
+
+ aai->pcms[ipcm]->private_data = chan;
+ aai->pcms[ipcm]->info_flags = 0;
+ strncpy(aai->pcms[ipcm]->name, chan->name, DEV_NAME_LEN);
+
+ /* Set channel operators */
+ if (chan->direction & AAI_TX) {
+ snd_pcm_set_ops(aai->pcms[ipcm],
+ SNDRV_PCM_STREAM_PLAYBACK, chan->ops);
+ } else if (chan->direction & AAI_RX) {
+ snd_pcm_set_ops(aai->pcms[ipcm],
+ SNDRV_PCM_STREAM_CAPTURE, chan->ops);
+ } else {
+ dev_err(aai->dev, "%s init failed\n", chan->name);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Buffer memory preallocation */
+ err = snd_pcm_lib_preallocate_pages_for_all(aai->pcms[ipcm],
+ SNDRV_DMA_TYPE_DEV,
+ NULL,
+ 64*1024,
+ 64*1024);
+ if (err) {
+ dev_err(aai->dev, "%s: alsa memory preallocation "
+ "failed\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+ return err;
+}
+
+int aai_pcms_init(struct card_data_t *aai)
+{
+ int err = 0;
+ int ipcm;
+ struct aai_device_t *chan;
+
+ aai->fifo_address = 0;
+ aai->fifo_tx = 0;
+ aai->fifo_rx = 0;
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ chan->ipcm = ipcm;
+ err = aai_hw_pcm_init(aai, chan);
+ if (err)
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+struct snd_card *aai_alsa_probe(struct platform_device *pdev)
+{
+ int err;
+ struct snd_card *card = NULL;
+ struct card_data_t *aai;
+ int dev = pdev->id;
+
+ /* Create ALSA soundcard */
+ err = snd_card_create(index[dev], id[dev], THIS_MODULE,
+ sizeof(struct card_data_t), &card);
+ if (err < 0) {
+ dev_err(&pdev->dev, "card creation failed\n");
+ card = NULL;
+ goto exit;
+ }
+
+ aai = card->private_data;
+ aai->card = card;
+ aai->dev = &pdev->dev;
+ aai->pdata = dev_get_platdata(&pdev->dev);
+
+ /* Call hardware specific card init */
+ err = aai_hw_init_card(aai, dev);
+ if (err) {
+ dev_err(&pdev->dev, "hw init failed with error %d\n", err);
+ goto __nodev;
+ }
+
+ err = aai_pcms_probe(aai);
+ if (err) {
+ dev_err(&pdev->dev, "pcm probe failed with error %d\n", err);
+ goto __nodev;
+ }
+
+ /* Set device & register card to ALSA system */
+ snd_card_set_dev(card, &pdev->dev);
+ err = snd_card_register(card);
+ if (err != 0) {
+ dev_err(&pdev->dev, "card registration failed with error %d\n",
+ err);
+ goto __nodev;
+ }
+
+exit:
+ return card;
+__nodev:
+ snd_card_free(card);
+ card = NULL;
+ return card;
+}
+
+int aai_alsa_remove(struct snd_card *card)
+{
+ int ipcm;
+ struct card_data_t *aai;
+
+ if (!card)
+ goto exit;
+
+ aai = card->private_data;
+
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++)
+ snd_pcm_lib_preallocate_free_for_all(aai->pcms[ipcm]);
+
+ snd_card_free(card);
+
+exit:
+ return 0;
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_alsa.h b/drivers/parrot/sound/p7_aai/aai_alsa.h
new file mode 100644
index 00000000000000..fc25454442f6aa
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_alsa.h
@@ -0,0 +1,9 @@
+
+#ifndef _AAI_ALSA_H_
+#define _AAI_ALSA_H_
+
+struct snd_card * __devinit aai_alsa_probe(struct platform_device *pdev);
+int __devexit aai_alsa_remove(struct snd_card *card);
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_clk.c b/drivers/parrot/sound/p7_aai/aai_clk.c
new file mode 100644
index 00000000000000..a913c4865efb2b
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_clk.c
@@ -0,0 +1,113 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_clk.h"
+
+struct aai_rate_cfg_t {
+ int rate;
+ int freq;
+ int mclk_div;
+ int mframe_div;
+};
+
+struct aai_rate_cfg_t aai_rate_tab[] = {
+ {
+ .rate = 44100,
+ .freq = 858000000,
+ .mclk_div = 0x24,
+ .mframe_div = 0xff,
+ },
+ {
+ .rate = 48000,
+ .freq = 786500000,
+ .mclk_div = 0x3e,
+ .mframe_div = 0x7f,
+ },
+ {
+ .rate = 88200,
+ .freq = 858000000,
+ .mclk_div = 0x11,
+ .mframe_div = 0xff,
+ },
+ {
+ .rate = 96000,
+ .freq = 786500000,
+ .mclk_div = 0xe,
+ .mframe_div = 0xff,
+ },
+};
+
+#define AAI_RATE_NB (sizeof(aai_rate_tab) / sizeof(struct aai_rate_cfg_t))
+
+int aai_get_clk(struct card_data_t *aai, enum aai_clk num)
+{
+ int rate = -1;
+ struct aai_rate_cfg_t *rate_cfg = NULL;
+ int freq;
+ int mclk_div;
+ int mframe_div;
+ int i;
+
+ if (!aai->clk[num])
+ goto exit;
+
+ freq = clk_get_rate(aai->clk[num]);
+ mclk_div = aai_readreg(aai, AAI_GBC_MCLK);
+ mframe_div = aai_readreg(aai, AAI_GBC_MFRAME);
+
+ for (i = 0; i < AAI_RATE_NB; i++) {
+ rate_cfg = &aai_rate_tab[i];
+ if (rate_cfg->freq == freq &&
+ rate_cfg->mclk_div == mclk_div &&
+ rate_cfg->mframe_div == mframe_div)
+ break;
+ }
+
+ if (i == AAI_RATE_NB)
+ goto exit;
+
+ rate = rate_cfg->rate;
+
+exit:
+ return rate;
+}
+
+void aai_set_clk(struct card_data_t *aai, enum aai_clk num, int rate)
+{
+ struct aai_rate_cfg_t *rate_cfg = NULL;
+ int i;
+
+ dev_dbg(aai->dev, "%s: %d\n", __func__, rate);
+
+ if (!aai->clk)
+ goto exit;
+
+ for (i = 0; i < AAI_RATE_NB; i++) {
+ rate_cfg = &aai_rate_tab[i];
+ if (rate_cfg->rate == rate)
+ break;
+ }
+
+ if (i == AAI_RATE_NB) {
+ dev_err(aai->dev, "%s: unsupported rate %d\n",
+ __func__, rate);
+ goto exit;
+ }
+
+ clk_set_rate(aai->clk[num],
+ clk_round_rate(aai->clk[num], rate_cfg->freq));
+ clk_prepare_enable(aai->clk[num]);
+
+ aai_writereg(aai, rate_cfg->mclk_div, AAI_GBC_MCLK, 0);
+ aai_writereg(aai, rate_cfg->mframe_div, AAI_GBC_MFRAME, 0);
+
+exit:
+ return;
+}
+
+void aai_release_clk(struct card_data_t *aai, enum aai_clk num)
+{
+ clk_disable(aai->clk[num]);
+ clk_put(aai->clk[num]);
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_clk.h b/drivers/parrot/sound/p7_aai/aai_clk.h
new file mode 100644
index 00000000000000..54dca5675260f5
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_clk.h
@@ -0,0 +1,10 @@
+
+#ifndef _AAI_CLK_H_
+#define _AAI_CLK_H_
+
+int aai_get_clk(struct card_data_t *aai, enum aai_clk);
+void aai_set_clk(struct card_data_t *aai, enum aai_clk, int rate);
+void aai_release_clk(struct card_data_t *aai, enum aai_clk);
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_group.c b/drivers/parrot/sound/p7_aai/aai_group.c
new file mode 100644
index 00000000000000..8679ad66a304f3
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_group.c
@@ -0,0 +1,182 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_group.h"
+
+static int aai_group_is_started(struct card_data_t *aai, int group_nb)
+{
+ return !!(aai->groups_started & (1 << group_nb));
+}
+
+int aai_group_set(struct card_data_t *aai, char *name, int group_nb)
+{
+ int err = 0;
+ int reg_nb;
+ int reg_shift;
+ uint32_t cfg;
+ struct aai_device_t *chan = NULL;
+ int i;
+
+ if (group_nb < 0 || group_nb >= 0xf) {
+ dev_warn(aai->dev, "%s: invalid group number (%d)\n",
+ __func__, group_nb);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* channel lookup */
+ for (i = 0; i < aai->chans_nb; i++) {
+ chan = &aai->chans[i];
+ if (!strcmp(chan->name, name))
+ break;
+ }
+ if (i == aai->chans_nb) {
+ dev_warn(aai->dev, "%s: invalid channel: '%s'\n",
+ __func__, name);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (aai_group_is_started(aai, group_nb)) {
+ dev_warn(aai->dev, "%s: can't add '%s' to already started group %d\n",
+ __func__, name, group_nb);
+ err = -EBUSY;
+ goto exit;
+ }
+
+ pr_err(" >> %s: '%s' %d\n", __func__, name, group_nb);
+
+ /*
+ * Mark channel as belonging to group_nb
+ */
+ if (chan->group != -1) {
+ dev_warn(aai->dev, "%s: %s already synchronized to group %d\n",
+ __func__, chan->name, chan->group);
+ err = -EINVAL;
+ goto exit;
+ }
+ chan->group = group_nb;
+
+ /*
+ * Write relevant register
+ */
+ reg_nb = chan->fifo.fifo_id >> 3;
+ reg_shift = (chan->fifo.fifo_id % 8) * 4;
+ if (chan->direction & AAI_TX) {
+ cfg = aai_readreg(aai, AAI_MAIN_IT_GROUPS_TX(reg_nb));
+ cfg &= ~(0xf << reg_shift);
+ cfg |= group_nb << reg_shift;
+ aai_writereg(aai, cfg, AAI_MAIN_IT_GROUPS_TX(reg_nb), 0);
+ } else if (chan->direction & AAI_RX) {
+ cfg = aai_readreg(aai, AAI_MAIN_IT_GROUPS_RX(reg_nb));
+ cfg &= ~(0xf << reg_shift);
+ cfg |= group_nb << reg_shift;
+ aai_writereg(aai, cfg, AAI_MAIN_IT_GROUPS_RX(reg_nb), 0);
+ } else {
+ dev_err(aai->dev, "wrong channel direction\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+int aai_group_unset(struct card_data_t *aai, struct aai_device_t *chan)
+{
+ int err = 0;
+ int reg_nb;
+ int reg_shift;
+ uint32_t cfg;
+ int group_nb = chan->group;
+
+ if (chan->group == -1) {
+ /* nothing to do */
+ err = 0;
+ goto exit;
+ }
+
+ reg_nb = chan->fifo.fifo_id >> 3;
+ reg_shift = (chan->fifo.fifo_id % 8) * 4;
+ if (chan->direction & AAI_TX) {
+ cfg = aai_readreg(aai, AAI_MAIN_IT_GROUPS_TX(reg_nb));
+ cfg |= 0xf << reg_shift; /* reset value */
+ aai_writereg(aai, cfg, AAI_MAIN_IT_GROUPS_TX(reg_nb), 0);
+ } else if (chan->direction & AAI_RX) {
+ cfg = aai_readreg(aai, AAI_MAIN_IT_GROUPS_RX(reg_nb));
+ cfg |= 0xf << reg_shift; /* reset value */
+ aai_writereg(aai, cfg, AAI_MAIN_IT_GROUPS_RX(reg_nb), 0);
+ } else {
+ dev_err(aai->dev, "wrong channel direction\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ chan->group = -1;
+
+ /* update global ref count */
+ aai->groups_ref_count[group_nb]--;
+ if (aai->groups_ref_count[group_nb] < 0)
+ aai->groups_ref_count[group_nb] = 0;
+
+ if (!aai->groups_ref_count[group_nb])
+ aai_group_stop(aai, group_nb);
+
+exit:
+ return err;
+}
+
+int aai_group_start(struct card_data_t *aai, int group_nb)
+{
+ int err = 0;
+
+ if (group_nb < 0 || group_nb > NB_GROUPS) {
+ dev_warn(aai->dev, "%s: invalid group number (%d)\n",
+ __func__, group_nb);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (!aai->groups_ref_count[group_nb]) {
+ dev_warn(aai->dev, "%s: no channel in this group, abort\n",
+ __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (aai->groups_started & 1 << group_nb) {
+ dev_warn(aai->dev, "%s: group %d already started\n",
+ __func__, group_nb);
+ err = -EBUSY;
+ goto exit;
+ }
+
+ aai->groups_started |= 1 << group_nb;
+ aai_writereg(aai, aai->groups_started, AAI_MAIN_IT_GROUPS_START, 0);
+
+ aai_group_it_en(aai, group_nb);
+
+exit:
+ return err;
+}
+
+int aai_group_stop(struct card_data_t *aai, int group_nb)
+{
+ int err = 0;
+
+ if (group_nb < 0 || group_nb >= 0xf) {
+ dev_warn(aai->dev, "%s: invalid group number (%d)\n",
+ __func__, group_nb);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ aai->groups_started &= ~(1 << group_nb);
+ aai_writereg(aai, aai->groups_started, AAI_MAIN_IT_GROUPS_STOP, 0);
+
+ aai_group_it_dis(aai, group_nb);
+
+exit:
+ return err;
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_group.h b/drivers/parrot/sound/p7_aai/aai_group.h
new file mode 100644
index 00000000000000..806d63a9c14eff
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_group.h
@@ -0,0 +1,11 @@
+
+#ifndef _AAI_GROUP_H_
+#define _AAI_GROUP_H_
+
+int aai_group_set(struct card_data_t *aai, char *name, int group_nb);
+int aai_group_unset(struct card_data_t *aai, struct aai_device_t *chan);
+int aai_group_start(struct card_data_t *aai, int group_nb);
+int aai_group_stop(struct card_data_t *aai, int group_nb);
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_hw.c b/drivers/parrot/sound/p7_aai/aai_hw.c
new file mode 100644
index 00000000000000..a18212237b7553
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_hw.c
@@ -0,0 +1,626 @@
+
+#include <linux/dma-mapping.h>
+#include <asm/mach-types.h>
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_group.h"
+
+#include "aai_clk.h"
+
+#include "aai_hw_chan.h"
+
+static int aai_hw_set_conf(struct card_data_t *aai)
+{
+ int reg, err = 0;
+ struct aai_conf_set *conf = aai->pdata->aai_conf;
+
+ if (!conf) {
+ err = -ENOENT;
+ goto exit;
+ }
+
+ while (conf->addr != -1) {
+ reg = aai_readreg(aai, conf->addr);
+ reg &= ~(conf->mask << conf->shift);
+ reg |= (conf->value & conf->mask) << conf->shift;
+ aai_writereg(aai, reg, conf->addr, 0);
+ conf++;
+ }
+exit:
+ return err;
+}
+
+static int aai_hw_assign_pad(struct card_data_t *aai, enum aai_signal_t signal,
+ int padindex, int direction)
+{
+ int err = 0;
+ if (aai->pdata->chiprev < P7_CHIPREV_R3) {
+ aai_writereg(aai, padindex, AAI_GBC_PADIN(signal), 0);
+ if (direction & PAD_OUT)
+ aai_writereg(aai, signal, AAI_GBC_PADOUT(padindex), 0);
+ } else {
+ aai_writereg(aai, padindex, AAI_LBC_PADIN(signal), 0);
+ if (direction & PAD_OUT)
+ aai_writereg(aai, signal, AAI_LBC_PADOUT(padindex), 0);
+ }
+
+ return err;
+}
+
+static int aai_hw_init_pads(struct card_data_t *aai)
+{
+ struct aai_pad_t *pad = aai->pdata->pad;
+ if (!pad)
+ return 0;
+
+ while (pad->index != -1) {
+ aai_hw_assign_pad(aai, pad->signal, pad->index, pad->direction);
+ pad++;
+ }
+
+ return 0;
+}
+
+static int aai_hw_init_cfg(struct card_data_t *aai)
+{
+ uint32_t cfg;
+ int err = 0;
+ int freq;
+ int pcmdiv;
+
+ /*
+ * Configure music channels
+ * - 32 bit clock cycles per frame
+ * - MSB *not* synchronous with frame edge
+ * - left data transmitted when sync = 0
+ * - MSB sent first
+ * - left justification
+ * - music data 24 bits wide (bit 11, otherwise: 16 bits wide)
+ * - synchonous to DAC
+ * - I2S mode
+ * - biphase & binary mode unused, spdif mode only
+ */
+ cfg = 32 | (1 << 11);
+ aai_writereg(aai, cfg, AAI_AAICFG_MUS(0), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_MUS(1), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_MUS(2), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_MUS(3), 0);
+
+ /*
+ * Configure TDM channels
+ * - 32 bit clock cycles per frame
+ * - left data transmitted when sync = 0
+ * - TDM mode disable
+ */
+ cfg = 32;
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_IN, 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_OUT, 0);
+
+ /*
+ * ADC & DAC configuration
+ * - set cycles to 32
+ */
+ cfg = 32;
+ aai_writereg(aai, cfg, AAI_AAICFG_ADC, 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_DAC, 0);
+
+ /*
+ * AUX configuration
+ * - 32 bit clock cycles per frame
+ * - MSB *not* synchonous with frame edge
+ * - left data transmitted when sync = 0
+ * - half rate *not* configured
+ */
+ cfg = 32;
+ aai_writereg(aai, cfg, AAI_AAICFG_AUX, 0);
+
+ /*
+ * MIC muxing configuration
+ * - do not use loop-back mode by default
+ * - high res mode
+ */
+ cfg = 0;
+ aai_writereg(aai, cfg, AAI_AAICFG_MIC_MUX, 0);
+
+ /*
+ * Voice muxing for DAC 0, 1, 2 & 3
+ * - use standard output channels without left/right inversion
+ * (0x9: non-inverted ; 0x6: inverted)
+ */
+ cfg = 0x9 | (0x9 << 8) | (0x9 << 16) | (0x9 << 24);
+ aai_writereg(aai, cfg, AAI_AAICFG_VOI_MUX, 0);
+
+ /*
+ * PCM configuration
+ * - no second PCM frame
+ * - 32 clock cycles in 16kHz mode
+ * - 32 clock cycles in 8kHz mode
+ */
+ cfg = (32 << 8) | (32 << 16) | (1 << 25);
+ aai_writereg(aai, cfg, AAI_AAICFG_PCM(0), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_PCM(1), 0);
+
+ /*
+ * PCM CLK configuration
+ * - output PCM clk 2Mhz
+ * - frame high is 1-1 =0
+ * - frame low is 124-1 in 16kHz or 249-1 in 8kHz
+ */
+ freq = clk_get_rate(aai->clk[i2s_clk]);
+ pcmdiv = freq / 256000 - 2;
+ if (aai->pdata->chiprev < P7_CHIPREV_R3) {
+ aai_writereg(aai, pcmdiv, AAI_GBC_P1CLK, 0);
+ aai_writereg(aai, 0x1e, AAI_GBC_P1FRAME, 0);
+ aai_writereg(aai, pcmdiv, AAI_GBC_P2CLK, 0);
+ aai_writereg(aai, 0x1e, AAI_GBC_P2FRAME, 0);
+ } else {
+ aai_writereg(aai, pcmdiv, AAI_LBC_P1CLK, 0);
+ aai_writereg(aai, 0x1e, AAI_LBC_P1FRAME, 0);
+ aai_writereg(aai, pcmdiv, AAI_LBC_P2CLK, 0);
+ aai_writereg(aai, 0x1e, AAI_LBC_P2FRAME, 0);
+ }
+
+ /*
+ * SPDIF RX
+ * - place status on LSB part
+ * - accept all data
+ */
+ cfg = 0;
+ aai_writereg(aai, cfg, AAI_AAICFG_SPDIFRX, 0);
+
+ /*
+ * SPDIF TX
+ * - parity taken from data[31]
+ * - start mode: wait two subframes, to ensure synchro
+ * - disable transfer from fifo
+ */
+ cfg = 6 << 1;
+ aai_writereg(aai, cfg, AAI_AAICFG_SPDIFTX, 0);
+
+ /* XXX spdif clk div */
+ cfg = 0xff;
+ if (aai->pdata->chiprev < P7_CHIPREV_R3)
+ aai_writereg(aai, cfg, AAI_GBC_SPDIFCLK, 0);
+ else
+ aai_writereg(aai, cfg, AAI_GBC_SPDIFCLK_R3, 0);
+
+ /*
+ * Voice configuration
+ * - volume is 0dB
+ * - channel enable
+ * - no usage of down-sample channel
+ * - no down-sampling factor set
+ * - same configuration for left and right part of channel
+ */
+ cfg = 0x10 | (0x10 << 8);
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE(0), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE(1), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE(2), 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE(3), 0);
+
+ /*
+ * Voice speed
+ * - 16kHz (and 8kHz for half speed mode)
+ * - 16kHz: 0x00810e35 ; 22.05kHz: 0x00b1dac7
+ * - see documentation to get the formula
+ */
+ cfg = AAI_VOICE_SPEED_16_8;
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE_SPEED, 0);
+
+ /*
+ * Voice 8kHz
+ * - Speaker working @ 8kHz
+ * - PCM working @ 8 kHz
+ */
+ cfg = 3;
+ aai_writereg(aai, cfg, AAI_AAICFG_VOICE_8K, 0);
+
+ /*
+ * Music filter5
+ * - high freq
+ */
+ cfg = 0;
+ aai_writereg(aai, cfg, AAI_AAICFG_MUS_FILTER5, 0);
+
+ /*
+ * RAM2RAM SRC filter5
+ * - high freq for both filters
+ */
+ cfg = 0;
+ aai_writereg(aai, cfg, AAI_AAICFG_SRC_FILTER5, 0);
+
+ /*
+ * RAM2RAM SRC mode
+ * - disable for now
+ * - work on stereo streams
+ * - independant RAM2RAM, no octo configuration
+ */
+ cfg = 0;
+ aai_writereg(aai, cfg, AAI_AAICFG_SRC_MODE, 0);
+
+ /*
+ * RAM2RAM SRC speed
+ * - input = output for now
+ */
+ cfg = 1 << 28;
+ aai_writereg(aai, cfg, AAI_AAICFG_SRC_SPEED0, 0);
+ aai_writereg(aai, cfg, AAI_AAICFG_SRC_SPEED1, 0);
+
+ return err;
+}
+
+int aai_hw_pcm_init(struct card_data_t *aai, struct aai_device_t *chan)
+{
+ int err = 0;
+ int i;
+ struct aai_hw_channel_t *fifo = &chan->fifo;
+ uint32_t cfg;
+ struct aai_audio_chan_t *internal_chan;
+ uint32_t reg_fifo16;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ if (aai->fifo_address + chan->fifo_size >= 4096) {
+ dev_err(aai->dev, "not enough size in aai internal "
+ "memory for fifo allocation\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Set fifo id and registers */
+ if (chan->direction & AAI_TX) {
+ fifo->fifo_id = aai->fifo_tx++;
+ if (fifo->fifo_id > 10) {
+ dev_err(aai->dev, "not enough hardware fifo TX "
+ "available for configuration\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ fifo->fifo_conf = AAI_FIFOCFG_CFG_TX(fifo->fifo_id);
+ fifo->dmaca = AAI_DMACFG_CA_TX(fifo->fifo_id);
+ fifo->dmana = AAI_DMACFG_NA_TX(fifo->fifo_id);
+ if (aai->pdata->chiprev > P7_CHIPREV_R1) {
+ fifo->dmacv = AAI_DMACFG_CV_TX(fifo->fifo_id);
+ fifo->dmanv = AAI_DMACFG_NV_TX(fifo->fifo_id);
+ }
+ reg_fifo16 = AAI_FIFOCFG_TX_16BIT;
+ } else if (chan->direction & AAI_RX) {
+ fifo->fifo_id = aai->fifo_rx++;
+ if (fifo->fifo_id > 17) {
+ dev_err(aai->dev, "not enough hardware fifo RX "
+ "available for configuration\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ fifo->fifo_conf = AAI_FIFOCFG_CFG_RX(fifo->fifo_id);
+ fifo->dmaca = AAI_DMACFG_CA_RX(fifo->fifo_id);
+ fifo->dmana = AAI_DMACFG_NA_RX(fifo->fifo_id);
+ if (aai->pdata->chiprev > P7_CHIPREV_R1) {
+ fifo->dmacv = AAI_DMACFG_CV_RX(fifo->fifo_id);
+ fifo->dmanv = AAI_DMACFG_NV_RX(fifo->fifo_id);
+ }
+ reg_fifo16 = AAI_FIFOCFG_RX_16BIT;
+ } else {
+ dev_err(aai->dev, "wrong channel direction\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * Configure related fifo register
+ * calculate FIFO_DEPTH based on fifo_size
+ */
+ if ((chan->fifo_size >> 2) > 256) {
+ dev_err(aai->dev, "max FIFO depth is 256 words\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ cfg = aai->fifo_address |
+ ((chan->fifo_size >> 2) << 16) |
+ ((fifo->chan_nb - 1) << 24);
+ aai_writereg(aai, cfg, fifo->fifo_conf, 0);
+
+ /* Configure 16bits or 32bits mode */
+ if (aai->pdata->chiprev > P7_CHIPREV_R1 && fifo->mode == FIFO_MODE_16)
+ dev_warn(aai->dev,
+ "%s: fifo mode 16 not supported in this revision\n",
+ __func__);
+
+ cfg = aai_readreg(aai, reg_fifo16);
+ if (fifo->mode == FIFO_MODE_16) {
+ cfg |= (1 << fifo->fifo_id);
+ } else if (fifo->mode == FIFO_MODE_32) {
+ cfg &= ~(1 << fifo->fifo_id);
+ } else {
+ dev_err(aai->dev, "wrong fifo mode\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ aai_writereg(aai, cfg, reg_fifo16, 0);
+
+ /* increment fifo address for next internal memory allocation */
+ aai->fifo_address += chan->fifo_size;
+
+ /* Configure associated aai internal channels */
+ for (i = 0; i < fifo->chan_nb; i++) {
+ internal_chan = fifo->chan_list[i];
+ if (internal_chan->used) {
+ dev_err(aai->dev,
+ "%s: audio channel \"%s\" already used\n",
+ __func__, internal_chan->name);
+ err = -EINVAL;
+ goto exit;
+ }
+ cfg = fifo->fifo_id | (i << 8);
+ aai_writereg(aai, cfg, internal_chan->fifo_sel_reg, 0);
+ internal_chan->used = 1;
+ }
+
+ /* Set group to -1 (no group) */
+ chan->group = -1;
+
+exit:
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+ return err;
+}
+
+int aai_hw_init(struct card_data_t *aai)
+{
+ uint32_t cfg;
+ int err = 0;
+
+ /* Enable AAI clocks */
+ if (aai->pdata->chiprev < P7_CHIPREV_R3) {
+ cfg = aai_readreg(aai, AAI_GBC_CLKEN);
+ cfg |= (1 << GBC_AAI0_MFRAME);
+ cfg |= (1 << GBC_AAI0_HFRAME);
+ aai_writereg(aai, cfg, AAI_GBC_CLKEN, 0);
+ } else {
+ cfg = aai_readreg(aai, AAI_LBC_CLKEN);
+ cfg |= (1 << GBC_AAI0_MFRAME);
+ cfg |= (1 << GBC_AAI0_HFRAME);
+ cfg |= (1 << GBC_AAI0_MCLK);
+ aai_writereg(aai, cfg, AAI_LBC_CLKEN, 0);
+ }
+
+ aai_set_clk(aai, i2s_clk, AAI_NOMINAL_RATE);
+
+ /* Reset all uninitialized AAI registers */
+ aai_reset_regs(aai);
+
+ /* Set default AAI CFG regs */
+ err = aai_hw_init_cfg(aai);
+ if (err)
+ goto exit;
+
+ /* Init pads */
+ err = aai_hw_init_pads(aai);
+ if (err)
+ goto exit;
+
+ err = aai_hw_set_conf(aai);
+ if (err)
+ goto exit;
+
+exit:
+ return err;
+}
+
+static struct aai_device_t *aai_find_pcm(const char *aai_device)
+{
+ int itr;
+ struct aai_device_t *ret = NULL;
+ for (itr = 0; itr < ARRAY_SIZE(aai_pcm_devices); itr++) {
+ if (!strcmp(aai_device, aai_pcm_devices[itr].name)) {
+ ret = &aai_pcm_devices[itr];
+ break;
+ }
+ }
+ return ret;
+}
+
+int aai_hw_init_card(struct card_data_t *aai, int dev_id)
+{
+ int err = 0, itr = 0;
+ struct snd_card *card = aai->card;
+
+ /* Init lock */
+ spin_lock_init(&aai->hwlock);
+ if (!aai->pdata->device_list)
+ return -ENODEV;
+
+ while (aai->pdata->device_list[++itr] != NULL)
+ ;
+ aai->chans_nb = itr;
+
+ aai->chans = kmalloc(itr * sizeof(struct aai_device_t), GFP_KERNEL);
+ if (!aai->chans)
+ return -ENOMEM;
+
+ for (itr = 0; itr < aai->chans_nb; itr++) {
+ const char *name = aai->pdata->device_list[itr];
+ struct aai_device_t *ret = aai_find_pcm(name);
+ if (!ret) {
+ dev_err(aai->dev, "channel %s does not exist\n", name);
+ kfree(aai->chans);
+ return -ENODEV;
+ }
+ /* XXX need to do a copy? */
+ aai->chans[itr] = *ret;
+ }
+
+ /* Set irq handler */
+ aai->irq_handler = aai_irq_p7;
+
+ /* Register ioctl */
+ aai_ioctl_hwdep_new(aai);
+
+ /* Set Sound Card datas */
+ strcpy(card->driver, "AAI P7");
+ strcpy(card->shortname, "AAI");
+ sprintf(card->longname, "P7 Advanced Audio Interface %i", dev_id + 1);
+
+ return err;
+}
+
+int aai_hw_remove(struct card_data_t *aai)
+{
+
+ /* TODO: reset all registers */
+
+ /* Release clock */
+ aai_release_clk(aai, ctrl_clk);
+ kfree(aai->chans);
+
+ return 0;
+}
+
+int aai_hw_start_channel(struct aai_device_t *chan)
+{
+ int err = 0;
+ struct card_data_t *aai = chan->driver_data;
+ unsigned long flags = 0;
+
+ if (chan->ipcm >= aai->chans_nb) {
+ dev_warn(chan2dev(chan), "wrong channel number %d\n",
+ chan->ipcm);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ /* update group ref count */
+ if (chan->group != -1)
+ aai->groups_ref_count[chan->group]++;
+
+ /* start channel individually */
+ chan->bytes_count = 0;
+ chan->nbirq = 0;
+ chan->nbperiod = 0;
+ aai_it_en(aai, chan);
+
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+
+exit:
+ return err;
+}
+
+int aai_hw_stop_channel(struct aai_device_t *chan)
+{
+ int err = 0;
+ struct card_data_t *aai = chan->driver_data;
+ unsigned long flags = 0;
+
+ if (chan->ipcm >= aai->chans_nb) {
+ dev_warn(chan2dev(chan), "wrong channel number %d\n",
+ chan->ipcm);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+ if (aai->pdata->chiprev > P7_CHIPREV_R1)
+ aai_writereg(aai, 0, (uint32_t)chan->fifo.dmanv, 0);
+ aai_it_dis(aai, chan);
+
+ /* unset group if channel was synched */
+ aai_group_unset(aai, chan);
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+
+exit:
+ return err;
+}
+
+void aai_hw_dma_free(struct snd_pcm_substream *substream)
+{
+ int err;
+ struct aai_device_t *chan = substream2chan(substream);
+
+ err = snd_pcm_lib_free_pages(substream);
+ if (err)
+ dev_warn(chan2dev(chan), "%s (%d) - unable to free dma area\n",
+ chan->name, chan->ipcm);
+
+ return;
+}
+
+int aai_hw_dma_alloc(struct snd_pcm_substream *substream, size_t size)
+{
+ int err = 0;
+ struct aai_device_t *chan = substream2chan(substream);
+
+ err = snd_pcm_lib_malloc_pages(substream, size);
+ if (err < 0)
+ dev_warn(chan2dev(chan),
+ "%s(%d) - failed to allocate %d bytes\n",
+ chan->name, chan->ipcm, size);
+
+ return err;
+}
+
+int aai_hw_dma_prepare(struct aai_device_t *chan)
+{
+ int err = 0;
+ struct card_data_t *aai = chan->driver_data;
+ struct aai_hw_channel_t *fifo = &chan->fifo;
+ unsigned long flags = 0;
+ uint32_t reg;
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ if (chan->direction & AAI_TX) {
+ reg = aai_readreg(aai, AAI_MAIN_DMA_EN_TX);
+ reg |= 1 << chan->fifo.fifo_id;
+ aai_writereg(aai, reg, AAI_MAIN_DMA_EN_TX, 0);
+ } else {
+ reg = aai_readreg(aai, AAI_MAIN_DMA_EN_RX);
+ reg |= 1 << chan->fifo.fifo_id;
+ aai_writereg(aai, reg, AAI_MAIN_DMA_EN_RX, 0);
+ }
+
+ /* Set DMA buffer registers */
+ chan->dma_burst_nb = chan->period_bytes / (chan->fifo_size >> 1);
+ if (chan->dma_burst_nb > 0xff) {
+ dev_dbg(aai->dev,
+ "%s: '%s' period_bytes is larger than 256 fifo_size\n",
+ __func__, chan->name);
+ while (chan->dma_burst_nb > 0xff)
+ chan->dma_burst_nb >>= 1;
+ }
+ chan->dma_xfer_size = chan->dma_burst_nb * (chan->fifo_size >> 1);
+
+ /* Set DMA counter size according to dma transfer size */
+ if (chan->direction & AAI_TX) {
+ aai_writereg(aai, chan->dma_burst_nb - 1,
+ AAI_DMACFG_NC_TX(fifo->fifo_id), 0);
+ aai_writereg(aai, chan->dma_burst_nb - 1,
+ AAI_DMACFG_CC_TX(fifo->fifo_id), 0);
+ } else if (chan->direction & AAI_RX) {
+ aai_writereg(aai, chan->dma_burst_nb - 1,
+ AAI_DMACFG_CC_RX(fifo->fifo_id), 0);
+ aai_writereg(aai, chan->dma_burst_nb - 1,
+ AAI_DMACFG_NC_RX(fifo->fifo_id), 0);
+ } else {
+ dev_err(aai->dev, "%s: wrong channel direction\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ chan->fifo.pfollow = chan->fifo.pcurrent + chan->dma_xfer_size;
+ aai_writereg(aai, (uint32_t)chan->fifo.pcurrent,
+ (uint32_t)chan->fifo.dmaca, 0);
+ aai_writereg(aai, (uint32_t)chan->fifo.pfollow,
+ (uint32_t)chan->fifo.dmana, 0);
+ if (aai->pdata->chiprev > P7_CHIPREV_R1) {
+ aai_writereg(aai, 1, (uint32_t)chan->fifo.dmacv, 0);
+ aai_writereg(aai, 1, (uint32_t)chan->fifo.dmanv, 0);
+ }
+
+exit:
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+ return err;
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_hw.h b/drivers/parrot/sound/p7_aai/aai_hw.h
new file mode 100644
index 00000000000000..bf7d1f4c013428
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_hw.h
@@ -0,0 +1,273 @@
+
+#ifndef _AAI_HW_H_
+#define _AAI_HW_H_
+
+#include "reg_aai_gbc.h"
+#include "reg_aai.h"
+#include "aai_regs.h"
+
+/* Nominal Rate */
+/* XXX per board? */
+#define AAI_NOMINAL_RATE 48000
+
+int aai_hw_init(struct card_data_t *aai);
+int aai_hw_init_card(struct card_data_t *aai, int dev_id);
+int aai_hw_remove(struct card_data_t *aai);
+int aai_hw_pcm_init(struct card_data_t *aai, struct aai_device_t *chan);
+int aai_hw_start_channel(struct aai_device_t *chan);
+int aai_hw_stop_channel(struct aai_device_t *chan);
+int aai_hw_dma_prepare(struct aai_device_t *chan);
+void aai_hw_dma_free(struct snd_pcm_substream *substream);
+int aai_hw_dma_alloc(struct snd_pcm_substream *substream, size_t size);
+
+irqreturn_t aai_irq_p7(int irq, void *dev);
+
+int aai_pcms_probe(struct card_data_t *aai);
+int aai_pcms_init(struct card_data_t *aai);
+
+int aai_ioctl_hwdep_new(struct card_data_t *aai);
+
+/* AAI pcm ops declaration */
+extern struct snd_pcm_ops aai_pcm_ops;
+
+/**
+ * @brief Read register wrapper.
+ * Read register value and test offset value
+ *
+ * @param aai private driver data
+ * @param offset register offset
+ *
+ * @return register value
+ */
+static unsigned int aai_readreg(struct card_data_t *aai, uint32_t offset)
+{
+ uint32_t val = 0;
+
+ /* TODO: check input value */
+
+ if (!aai_reg_canread(offset)) {
+ dev_warn(aai->dev, "%s (0x%x) is write only register\n",
+ aai_reg_name(offset), aai_reg_addr(offset));
+ return 0;
+ }
+
+ val = readl(aai->iobase+offset);
+
+ return val;
+}
+
+/**
+ * @brief Write register wrapper.
+ * Write register value and print debug info
+ *
+ * @param aai private driver data
+ * @param val value to write
+ * @param offset register offset
+ *
+ * @return none
+ */
+static void aai_writereg(struct card_data_t *aai,
+ uint32_t val,
+ uint32_t offset,
+ int disp)
+{
+ uint32_t reg;
+
+ if (!aai_reg_canwrite(offset)) {
+ dev_warn(aai->dev, "%s (0x%x) is read only register\n",
+ aai_reg_name(offset), aai_reg_addr(offset));
+ return;
+ }
+
+ if (disp) {
+ dev_warn(aai->dev, "writing %s @ 0x%x: 0x%x\n",
+ aai_reg_name(offset), aai_reg_addr(offset), val);
+ }
+
+ writel(val, aai->iobase + offset);
+
+ /* read back */
+ if (aai_reg_canreadback(offset)) {
+ reg = aai_readreg(aai, offset);
+ if (reg != val)
+ dev_warn(aai->dev, "failed writing 0x%x to reg %s "
+ "(0x%x), contains: 0x%x\n",
+ val, aai_reg_name(offset),
+ aai_reg_addr(offset), reg);
+ }
+}
+
+/**
+ * @brief Set several consecutive bits.
+ *
+ * @param aai private driver data
+ * @param offset register offset
+ * @param mask bits mask
+ * @param shift first bit shift
+ * @param val value
+ *
+ * @return none
+ */
+static inline void aai_set_nbit(struct card_data_t *aai,
+ unsigned int offset,
+ unsigned int mask,
+ unsigned int shift,
+ unsigned int val)
+{
+ uint32_t reg;
+
+ reg = aai_readreg(aai, offset);
+ reg &= ~mask; /* set all concerned bits to 0 */
+ reg |= (val << shift); /* set needed bits to 1 */
+ aai_writereg(aai, reg, offset, 0);
+}
+
+static inline int aai_get_nbit(struct card_data_t *aai,
+ unsigned int offset,
+ unsigned int mask,
+ unsigned int shift)
+{
+ uint32_t reg = aai_readreg(aai, offset);
+ reg &= mask;
+ return reg >> shift; /* set needed bits to 1 */
+}
+
+/**
+ * @brief Set bit value.
+ *
+ * @param aai private driver data
+ * @param offset register offset
+ * @param mask bits mask
+ * @param val value
+ *
+ * @return none
+ */
+static inline void aai_setbit(struct card_data_t *aai,
+ unsigned int offset,
+ unsigned int mask,
+ unsigned int val)
+{
+ uint32_t reg;
+
+ reg = aai_readreg(aai, offset);
+ /*
+ * if the bit has to be modified
+ */
+ if (!!(reg & mask) != (!!val)) {
+ reg &= ~(mask); /* reset concerned bit to 0 */
+ if (val)
+ reg |= mask; /* set it if needed */
+ aai_writereg(aai, reg, offset, 0);
+ }
+}
+
+/**
+ * @brief Get bit value
+ *
+ * @param aai AAI card instance
+ * @param offset register offset
+ * @param mask bits mask
+ *
+ * @return requested bit value
+ */
+static inline unsigned int aai_getbit(struct card_data_t *aai,
+ unsigned int offset,
+ unsigned int mask)
+{
+ uint32_t reg = aai_readreg(aai, offset);
+ return !!(reg & mask);
+}
+
+static inline void aai_it_en(struct card_data_t *aai,
+ struct aai_device_t *device)
+{
+ uint32_t reg;
+
+ /*
+ * Activate RX/TX interrupt in main interrupt register
+ */
+ reg = aai_readreg(aai, AAI_MAIN_ITEN);
+ if (device->direction & AAI_RX) {
+ reg |= 0x1;
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+ } else if (device->direction & AAI_TX) {
+ reg |= 0x100;
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+ }
+
+ /*
+ * Activate specific fifo interrupt, in DMA mode only
+ */
+ if (device->direction & AAI_RX) {
+ if (device->group == -1) {
+ reg = aai_readreg(aai, AAI_MAIN_FIFO_EN_RX);
+ reg |= 1 << device->fifo.fifo_id;
+ aai_writereg(aai, reg, AAI_MAIN_FIFO_EN_RX, 0);
+ }
+ } else if (device->direction & AAI_TX) {
+ if (device->group == -1) {
+ reg = aai_readreg(aai, AAI_MAIN_FIFO_EN_TX);
+ reg |= 1 << device->fifo.fifo_id;
+ aai_writereg(aai, reg, AAI_MAIN_FIFO_EN_TX, 0);
+ }
+ } else {
+ dev_err(aai->dev, "wrong channel direction\n");
+ }
+}
+
+static inline void aai_it_dis(struct card_data_t *aai,
+ struct aai_device_t *device)
+{
+ uint32_t reg;
+
+ /*
+ * Deactivate specific fifo interrupt, in DMA mode only
+ */
+ if (device->direction & AAI_RX) {
+ reg = aai_readreg(aai, AAI_MAIN_FIFO_EN_RX);
+ reg &= ~(1 << device->fifo.fifo_id);
+ aai_writereg(aai, reg, AAI_MAIN_FIFO_EN_RX, 0);
+ } else if (device->direction & AAI_TX) {
+ reg = aai_readreg(aai, AAI_MAIN_FIFO_EN_TX);
+ reg &= ~(1 << device->fifo.fifo_id);
+ aai_writereg(aai, reg, AAI_MAIN_FIFO_EN_TX, 0);
+ } else {
+ dev_err(aai->dev, "wrong channel direction\n");
+ }
+
+ /*
+ * When all corresponding FIFO are disabled,
+ * deactivate RX/TX interrupt in main interrupt register.
+ */
+ reg = aai_readreg(aai, AAI_MAIN_ITEN);
+ if ((device->direction & AAI_RX) &&
+ !aai_readreg(aai, AAI_MAIN_FIFO_EN_RX)) {
+ reg &= ~(0x7);
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+ } else if ((device->direction & AAI_TX) &&
+ !aai_readreg(aai, AAI_MAIN_FIFO_EN_TX)) {
+ reg &= ~(0x700);
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+ }
+}
+
+static inline void aai_group_it_en(struct card_data_t *aai, int group)
+{
+ uint32_t reg;
+
+ reg = aai_readreg(aai, AAI_MAIN_ITEN);
+ reg |= 1 << (16 + group);
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+}
+
+static inline void aai_group_it_dis(struct card_data_t *aai, int group)
+{
+ uint32_t reg;
+
+ reg = aai_readreg(aai, AAI_MAIN_ITEN);
+ reg &= ~(1 << (16+group));
+ aai_writereg(aai, reg, AAI_MAIN_ITEN, 0);
+}
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_hw_chan.h b/drivers/parrot/sound/p7_aai/aai_hw_chan.h
new file mode 100644
index 00000000000000..41482a40a9f2f8
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_hw_chan.h
@@ -0,0 +1,559 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_hw_struct.h"
+#include "aai_rules.h"
+
+/*
+ * Static list of audio devices
+ * - 11 fifo tx
+ * - 18 fifo rx
+ * - total internal memory size 4096 * 32bits
+ */
+struct aai_device_t aai_pcm_devices[] = {
+ /* music output channels */
+ {
+ .name = "music-out-stereo0",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_tx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[12],
+ &aai_internal_audio_chans_tx[13]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-out-stereo1",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_tx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[14],
+ &aai_internal_audio_chans_tx[15]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-out-stereo2",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_tx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[16],
+ &aai_internal_audio_chans_tx[17]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-out-stereo3",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_tx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[18],
+ &aai_internal_audio_chans_tx[19]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ /* voice output channels */
+ {
+ .name = "voice-out-stereo",
+ .direction = AAI_TX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[20],
+ &aai_internal_audio_chans_tx[21]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .aai_hw_ops = {
+ .hw_params = aai_hw_params_voice,
+ .close = aai_close_voice,
+ },
+ },
+
+ /* pcm output channels */
+ {
+ .name = "pcm0-out",
+ .direction = AAI_TX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_pcm,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[8],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ .aai_hw_ops = {
+ .hw_params = aai_hw_params_pcm0,
+ .close = aai_close_pcm,
+ },
+ },
+
+ {
+ .name = "pcm1-out",
+ .direction = AAI_TX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_pcm,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[9],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ .aai_hw_ops = {
+ .hw_params = aai_hw_params_pcm1,
+ .close = aai_close_pcm,
+ },
+ },
+
+ /* music input channels */
+ {
+ .name = "music-in-stereo0",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_rx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[12],
+ &aai_internal_audio_chans_rx[13]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-in-stereo1",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_rx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[14],
+ &aai_internal_audio_chans_rx[15]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-in-stereo2",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_rx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[16],
+ &aai_internal_audio_chans_rx[17]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "music-in-stereo3",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_rx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[18],
+ &aai_internal_audio_chans_rx[19]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ /* voice input channels */
+ {
+ .name = "mic0-8k",
+ .direction = AAI_RX,
+ .fifo_size = 0x20,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[28],
+ &aai_internal_audio_chans_rx[29]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_VOICEL0_INPUT_8K,
+ RULE_VOICER0_INPUT_8K,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "mic1-8k",
+ .direction = AAI_RX,
+ .fifo_size = 0x20,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[30],
+ &aai_internal_audio_chans_rx[31]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_VOICEL1_INPUT_8K,
+ RULE_VOICER1_INPUT_8K,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "mic0-16k",
+ .direction = AAI_RX,
+ .fifo_size = 0x20,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[20],
+ &aai_internal_audio_chans_rx[21]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ {
+ .name = "mic1-16k",
+ .direction = AAI_RX,
+ .fifo_size = 0x20,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[22],
+ &aai_internal_audio_chans_rx[23]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ /* pcm input channels */
+ {
+ .name = "pcm0-in",
+ .direction = AAI_RX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_pcm,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[8],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ .aai_hw_ops = {
+ .hw_params = aai_hw_params_pcm0,
+ .close = aai_close_pcm,
+ },
+ },
+
+ {
+ .name = "pcm1-in",
+ .direction = AAI_RX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_pcm,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[9],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ .aai_hw_ops = {
+ .hw_params = aai_hw_params_pcm1,
+ .close = aai_close_pcm,
+ },
+ },
+
+ /* spdif channels */
+ {
+ .name = "spdif-out",
+ .direction = AAI_TX,
+ .fifo_size = 0x200,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_iec958,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[22],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_SPDIF_OUT, RULE_END},
+ },
+
+ {
+ .name = "spdif-in",
+ .direction = AAI_RX,
+ .fifo_size = 0x200,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_iec958,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_MONO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[36],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {RULE_END},
+ },
+
+ /* music complex output channels */
+ {
+ .name = "music-out-octo",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_OCTO,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[12],
+ &aai_internal_audio_chans_tx[13],
+ &aai_internal_audio_chans_tx[14],
+ &aai_internal_audio_chans_tx[15],
+ &aai_internal_audio_chans_tx[16],
+ &aai_internal_audio_chans_tx[17],
+ &aai_internal_audio_chans_tx[18],
+ &aai_internal_audio_chans_tx[19]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_OUT_OCTO,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "music-out-quad0",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_QUAD,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[12],
+ &aai_internal_audio_chans_tx[13],
+ &aai_internal_audio_chans_tx[14],
+ &aai_internal_audio_chans_tx[15],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_OUT_QUAD,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "music-out-quad1",
+ .direction = AAI_TX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_QUAD,
+ .chan_list = {
+ &aai_internal_audio_chans_tx[16],
+ &aai_internal_audio_chans_tx[17],
+ &aai_internal_audio_chans_tx[18],
+ &aai_internal_audio_chans_tx[19],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_OUT_QUAD,
+ RULE_END
+ },
+ },
+
+ /* music complex input channels */
+ {
+ .name = "music-in-octo",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_OCTO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[12],
+ &aai_internal_audio_chans_rx[13],
+ &aai_internal_audio_chans_rx[14],
+ &aai_internal_audio_chans_rx[15],
+ &aai_internal_audio_chans_rx[16],
+ &aai_internal_audio_chans_rx[17],
+ &aai_internal_audio_chans_rx[18],
+ &aai_internal_audio_chans_rx[19]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_IN_OCTO,
+ RULE_TDM_IN0,
+ RULE_TDM_IN1,
+ RULE_TDM_IN2,
+ RULE_TDM_IN3,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "music-in-quad0",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_QUAD,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[12],
+ &aai_internal_audio_chans_rx[13],
+ &aai_internal_audio_chans_rx[14],
+ &aai_internal_audio_chans_rx[15],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_IN_QUAD,
+ RULE_TDM_IN0,
+ RULE_TDM_IN1,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "music-in-quad1",
+ .direction = AAI_RX,
+ .fifo_size = 0x100,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_music_stereo_rx,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_QUAD,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[16],
+ &aai_internal_audio_chans_rx[17],
+ &aai_internal_audio_chans_rx[18],
+ &aai_internal_audio_chans_rx[19],
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_TDM_IN_QUAD,
+ RULE_TDM_IN0,
+ RULE_TDM_IN1,
+ RULE_END
+ },
+ },
+
+ /* loopbacks channels */
+ {
+ .name = "loopback-8k",
+ .direction = AAI_RX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[32],
+ &aai_internal_audio_chans_rx[33]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_LOOPBACK_2,
+ RULE_LOOPBACK2_I2S0,
+ RULE_VOICEL2_INPUT_8K,
+ RULE_VOICER2_INPUT_8K,
+ RULE_END
+ },
+ },
+
+ {
+ .name = "loopback-16k",
+ .direction = AAI_RX,
+ .fifo_size = 0x40,
+ .ops = &aai_pcm_ops,
+ .hw = &aai_pcm_hw_voice,
+ .fifo = {
+ .chan_nb = AAI_FORMAT_STEREO,
+ .chan_list = {
+ &aai_internal_audio_chans_rx[24],
+ &aai_internal_audio_chans_rx[25]
+ },
+ .mode = FIFO_MODE_32,
+ },
+ .rules = {
+ RULE_LOOPBACK_2,
+ RULE_LOOPBACK2_I2S0,
+ RULE_END
+ },
+ },
+};
diff --git a/drivers/parrot/sound/p7_aai/aai_hw_struct.h b/drivers/parrot/sound/p7_aai/aai_hw_struct.h
new file mode 100644
index 00000000000000..f1f07ba0de21e2
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_hw_struct.h
@@ -0,0 +1,256 @@
+
+#ifndef _AAI_HW_STRUCT_H_
+#define _AAI_HW_STRUCT_H_
+
+#define DFLT_BUFFER_BYTES_MAX (128*1024)
+#define DFLT_PERIOD_BYTES_MAX (32*1024)
+#define DFLT_PERIOD_MIN (2)
+#define DFLT_PERIOD_MAX (PAGE_SIZE / 16)
+
+static const struct snd_pcm_hardware aai_pcm_hw_music_stereo_rx = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 64,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_music_stereo_tx = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 64,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_music = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 64,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_music_96k = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_96000,
+ .rate_max = 96000,
+ .rate_min = 96000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 64,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_voice = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .rate_max = 16000,
+ .rate_min = 8000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 8,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_pcm = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ /*
+ * for pcm the real format is 16 bits in 32 bits due to hardware
+ * bug (16 bits dma not working).
+ */
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .rate_max = 16000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 8,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_ram2ram = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_16000,
+ .rate_max = 192000,
+ .rate_min = 44100,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 8,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+static const struct snd_pcm_hardware aai_pcm_hw_iec958 = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME,
+
+ .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 44100,
+ .rate_max = 96000,
+ .channels_min = 1,
+ .channels_max = 8,
+ .buffer_bytes_max = DFLT_BUFFER_BYTES_MAX,
+ .period_bytes_min = 512,
+ .period_bytes_max = DFLT_PERIOD_BYTES_MAX,
+ .periods_min = DFLT_PERIOD_MIN,
+ .periods_max = DFLT_PERIOD_MAX,
+};
+
+/* List of AAI internal audio channels RX */
+static struct aai_audio_chan_t aai_internal_audio_chans_rx[] = {
+ {.id = 0, .name = "2ram-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(0)},
+ {.id = 1, .name = "2ram-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(1)},
+ {.id = 2, .name = "2ram-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(2)},
+ {.id = 3, .name = "2ram-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(3)},
+ {.id = 4, .name = "2ram-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(4)},
+ {.id = 5, .name = "2ram-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(5)},
+ {.id = 6, .name = "2ram-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(6)},
+ {.id = 7, .name = "2ram-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(7)},
+
+ {.id = 8, .name = "pcm-in-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(8)},
+ {.id = 9, .name = "pcm-in-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(9)},
+ {.id = 10, .name = "pcm-in-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(10)},
+ {.id = 11, .name = "pcm-in-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(11)},
+
+ {.id = 12, .name = "mus-in-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(12)},
+ {.id = 13, .name = "mus-in-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(13)},
+ {.id = 14, .name = "mus-in-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(14)},
+ {.id = 15, .name = "mus-in-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(15)},
+ {.id = 16, .name = "mus-in-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(16)},
+ {.id = 17, .name = "mus-in-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(17)},
+ {.id = 18, .name = "mus-in-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(18)},
+ {.id = 19, .name = "mus-in-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(19)},
+
+ {.id = 20, .name = "micfs-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(20)},
+ {.id = 21, .name = "micfs-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(21)},
+ {.id = 22, .name = "micfs-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(22)},
+ {.id = 23, .name = "micfs-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(23)},
+ {.id = 24, .name = "micfs-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(24)},
+ {.id = 25, .name = "micfs-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(25)},
+ {.id = 26, .name = "micfs-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(26)},
+ {.id = 27, .name = "micfs-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(27)},
+
+ {.id = 28, .name = "michs-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(28)},
+ {.id = 29, .name = "michs-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(29)},
+ {.id = 30, .name = "michs-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(30)},
+ {.id = 31, .name = "michs-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(31)},
+ {.id = 32, .name = "michs-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(32)},
+ {.id = 33, .name = "michs-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(33)},
+ {.id = 34, .name = "michs-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(34)},
+ {.id = 35, .name = "michs-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(35)},
+
+ {.id = 36, .name = "binary-in", .fifo_sel_reg = AAI_FIFOCFG_SEL_RX(36)}
+};
+
+/* List of AAI internal audio channels TX */
+static struct aai_audio_chan_t aai_internal_audio_chans_tx[] = {
+ {.id = 0, .name = "2ram-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(0)},
+ {.id = 1, .name = "2ram-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(1)},
+ {.id = 2, .name = "2ram-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(2)},
+ {.id = 3, .name = "2ram-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(3)},
+ {.id = 4, .name = "2ram-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(4)},
+ {.id = 5, .name = "2ram-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(5)},
+ {.id = 6, .name = "2ram-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(6)},
+ {.id = 7, .name = "2ram-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(7)},
+
+ {.id = 8, .name = "pcm-out-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(8)},
+ {.id = 9, .name = "pcm-out-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(9)},
+ {.id = 10, .name = "pcm-out-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(10)},
+ {.id = 11, .name = "pcm-out-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(11)},
+
+ {.id = 12, .name = "spk-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(12)},
+ {.id = 13, .name = "spk-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(13)},
+ {.id = 14, .name = "spk-2", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(14)},
+ {.id = 15, .name = "spk-3", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(15)},
+ {.id = 16, .name = "spk-4", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(16)},
+ {.id = 17, .name = "spk-5", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(17)},
+ {.id = 18, .name = "spk-6", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(18)},
+ {.id = 19, .name = "spk-7", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(19)},
+
+ {.id = 20, .name = "voice-0", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(20)},
+ {.id = 21, .name = "voice-1", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(21)},
+
+ {.id = 22, .name = "binary-out", .fifo_sel_reg = AAI_FIFOCFG_SEL_TX(22)}
+};
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_ioctl.c b/drivers/parrot/sound/p7_aai/aai_ioctl.c
new file mode 100644
index 00000000000000..5d4e94247e23ec
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_ioctl.c
@@ -0,0 +1,549 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_ioctl.h"
+#include "aai_group.h"
+
+/**
+ * Get a data from user space
+ *
+ * @param src ptr to userspace data
+ * @param dst ptr to kernelspace data
+ * @param size of data
+ * @return error code
+ */
+static int get_data(void __user *src, void *dst, int size)
+{
+ int err = 0;
+ if (copy_from_user(dst, src, size))
+ err = -EFAULT;
+
+ return err;
+}
+
+/**
+ * Put data to userspace
+ *
+ * @param src ptr to userspace data
+ * @param dst ptr to kernelspace data
+ * @param size of data
+ *
+ * @return error code
+ */
+#if 0
+static int put_data(void *src, void __user *dst, int size)
+{
+ int err = 0;
+ if (copy_to_user(dst, src, size))
+ err = -EFAULT;
+
+ return err;
+}
+#endif
+
+static int aai_hwdep_set_music_conf(struct card_data_t *aai,
+ unsigned long arg,
+ int musx)
+{
+ int err = 0;
+ struct aai_ioctl_music_conf_t music_conf;
+
+ err = get_data((void __user *)arg, &music_conf,
+ sizeof(struct aai_ioctl_music_conf_t));
+ if (!err) {
+ if (0 <= music_conf.cycles_num && music_conf.cycles_num <= 127)
+ aai_set_nbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_CYCLES_NUM_MSK,
+ AAI_MUSx_CYCLES_NUM_SHIFT,
+ music_conf.cycles_num);
+
+ aai_setbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_MATSUSHITA_MSK, music_conf.matsushita);
+ aai_setbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_LEFT_FRAME_MSK, music_conf.left_frame);
+ aai_setbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_LSB_FIRST_MSK, music_conf.lsb_first);
+ aai_setbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_RIGHT_JUST_MSK, music_conf.right_just);
+ aai_setbit(aai, AAI_AAICFG_MUS(musx),
+ AAI_MUSx_24BITS_MSK, music_conf.bits24);
+ }
+
+ return err;
+}
+
+enum tdm_dir_t {
+ TDM_IN,
+ TDM_OUT
+};
+
+static int aai_hwdep_set_tdm_conf(struct card_data_t *aai,
+ unsigned long arg,
+ enum tdm_dir_t dir)
+{
+ int err = 0;
+ struct aai_ioctl_tdm_conf_t tdm_conf;
+ uint32_t offset;
+ uint32_t cycle_mask;
+ uint32_t cycle_shift;
+ uint32_t lf_mask;
+
+ if (dir == TDM_IN) {
+ offset = AAI_AAICFG_TDM_IN;
+ cycle_mask = AAI_TDM_IN_CYCLES_NUM_MSK;
+ cycle_shift = AAI_TDM_IN_CYCLES_NUM_SHIFT;
+ lf_mask = AAI_TDM_IN_LEFT_FRAME_MSK;
+ } else {
+ offset = AAI_AAICFG_TDM_OUT;
+ cycle_mask = AAI_TDM_OUT_CYCLES_NUM_MSK;
+ cycle_shift = AAI_TDM_OUT_CYCLES_NUM_SHIFT;
+ lf_mask = AAI_TDM_OUT_LEFT_FRAME_MSK;
+ }
+
+ err = get_data((void __user *)arg, &tdm_conf,
+ sizeof(struct aai_ioctl_tdm_conf_t));
+ if (!err) {
+ if (8 <= tdm_conf.cycles_num && tdm_conf.cycles_num <= 127)
+ aai_set_nbit(aai, offset, cycle_mask,
+ cycle_shift, tdm_conf.cycles_num);
+
+ aai_setbit(aai, offset, lf_mask, tdm_conf.left_frame);
+ }
+
+ return err;
+}
+
+enum adc_type_t {
+ AAI_ADC,
+ AAI_DAC,
+ AAI_AUX
+};
+
+static int aai_hwdep_set_adc_conf(struct card_data_t *aai,
+ unsigned long arg,
+ enum adc_type_t type)
+{
+ int err = 0;
+ struct aai_ioctl_adc_conf_t adc_conf;
+
+ switch (type) {
+ case AAI_ADC:
+ err = get_data((void __user *)arg, &adc_conf,
+ sizeof(struct aai_ioctl_adc_conf_t));
+ if (!err) {
+ if (0 <= adc_conf.cycles_num &&
+ adc_conf.cycles_num <= 127)
+ aai_set_nbit(aai, AAI_AAICFG_ADC,
+ AAI_ADC_CYCLES_NUM_MSK,
+ AAI_ADC_CYCLES_NUM_SHIFT,
+ adc_conf.cycles_num);
+
+ aai_setbit(aai, AAI_AAICFG_ADC,
+ AAI_ADC_MATSUSHITA_MSK, adc_conf.matsushita);
+ aai_setbit(aai, AAI_AAICFG_ADC,
+ AAI_ADC_LEFT_FRAME_MSK, adc_conf.left_frame);
+ }
+ break;
+
+ case AAI_DAC:
+ err = get_data((void __user *)arg, &adc_conf,
+ sizeof(struct aai_ioctl_adc_conf_t));
+ if (!err) {
+ if (0 <= adc_conf.cycles_num &&
+ adc_conf.cycles_num <= 127)
+ aai_set_nbit(aai, AAI_AAICFG_DAC,
+ AAI_DAC_CYCLES_NUM_MSK,
+ AAI_DAC_CYCLES_NUM_SHIFT,
+ adc_conf.cycles_num);
+
+ aai_setbit(aai, AAI_AAICFG_DAC,
+ AAI_DAC_MATSUSHITA_MSK, adc_conf.matsushita);
+ aai_setbit(aai, AAI_AAICFG_DAC,
+ AAI_DAC_LEFT_FRAME_MSK, adc_conf.left_frame);
+ }
+ break;
+
+ case AAI_AUX:
+ err = get_data((void __user *)arg, &adc_conf,
+ sizeof(struct aai_ioctl_adc_conf_t));
+ if (!err) {
+ if (0 <= adc_conf.cycles_num &&
+ adc_conf.cycles_num <= 127)
+ aai_set_nbit(aai, AAI_AAICFG_AUX,
+ AAI_AUX_CYCLES_NUM_MSK,
+ AAI_AUX_CYCLES_NUM_SHIFT,
+ adc_conf.cycles_num);
+
+ aai_setbit(aai, AAI_AAICFG_AUX,
+ AAI_AUX_HALF_FRAME_MSK, adc_conf.half_frame);
+ }
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int aai_hwdep_set_pcm_conf(struct card_data_t *aai,
+ unsigned long arg,
+ int nb)
+{
+ int err = 0;
+ struct aai_ioctl_pcm_conf_t pcm_conf;
+
+ err = get_data((void __user *)arg, &pcm_conf,
+ sizeof(struct aai_ioctl_pcm_conf_t));
+ if (!err) {
+ if (0 <= pcm_conf.start_two && pcm_conf.start_two <= 0xff)
+ aai_set_nbit(aai, AAI_AAICFG_PCM(nb),
+ AAI_PCM_START_TWO_MSK,
+ AAI_PCM_START_TWO_SHIFT,
+ pcm_conf.start_two);
+
+ if (0 <= pcm_conf.cycles_pcm_high &&
+ pcm_conf.cycles_pcm_high <= 0xff)
+ aai_set_nbit(aai, AAI_AAICFG_PCM(nb),
+ AAI_PCM_CYCLES_HIGH_MSK,
+ AAI_PCM_CYCLES_HIGH_SHIFT,
+ pcm_conf.cycles_pcm_high);
+
+ if (0 <= pcm_conf.cycles_pcm_low &&
+ pcm_conf.cycles_pcm_low <= 0x1ff)
+ aai_set_nbit(aai, AAI_AAICFG_PCM(nb),
+ AAI_PCM_CYCLES_LOW_MSK,
+ AAI_PCM_CYCLES_LOW_SHIFT,
+ pcm_conf.cycles_pcm_low);
+
+ aai_setbit(aai, AAI_AAICFG_PCM(nb),
+ AAI_PCM_OKI_MSK, pcm_conf.oki);
+ aai_setbit(aai, AAI_AAICFG_PCM(nb),
+ AAI_PCM_RUN_DUAL_MSK, pcm_conf.run_dual);
+ }
+
+ return err;
+}
+
+static int aai_hwdep_group_start(struct card_data_t *aai, unsigned long arg)
+{
+ int err;
+ int group_nb;
+
+ err = get_data((void __user *)arg, &group_nb, sizeof(int));
+ if (!err)
+ err = aai_group_start(aai, group_nb);
+
+ return err;
+}
+
+static int aai_hwdep_group_sync(struct card_data_t *aai, unsigned long arg)
+{
+ int err;
+ struct aai_ioctl_group_t group;
+
+ err = get_data((void __user *)arg, &group,
+ sizeof(struct aai_ioctl_group_t));
+ if (!err)
+ err = aai_group_set(aai, group.name, group.group);
+
+ return err;
+}
+
+static int aai_hwdep_set_mframe_asym_conf(struct card_data_t *aai,
+ unsigned long arg)
+{
+ int err = 0;
+ struct aai_ioctl_asym_conf_t conf;
+
+ err = get_data((void __user *)arg, &conf,
+ sizeof(struct aai_ioctl_asym_conf_t));
+ aai_setbit(aai, AAI_LBC_CLKEN, AAI_LBC_CKEN_MFRAME_MSK, conf.enable);
+ aai_setbit(aai, AAI_LBC_MFRAME,
+ AAI_LBC_MFRAME_MFRAME_DIV_LOW_MSK, conf.div_low);
+ aai_setbit(aai, AAI_LBC_MFRAME,
+ AAI_LBC_MFRAME_MFRAME_DIV_HIGH_MSK, conf.div_high);
+
+ return err;
+}
+
+static int aai_hwdep_set_hframe_asym_conf(struct card_data_t *aai,
+ unsigned long arg)
+{
+ int err = 0;
+ struct aai_ioctl_asym_conf_t conf;
+
+ err = get_data((void __user *)arg, &conf,
+ sizeof(struct aai_ioctl_asym_conf_t));
+ aai_setbit(aai, AAI_LBC_CLKEN, AAI_LBC_CKEN_HFRAME_MSK, conf.enable);
+ aai_setbit(aai, AAI_LBC_HFRAME,
+ AAI_LBC_HFRAME_HFRAME_DIV_LOW_MSK, conf.div_low);
+ aai_setbit(aai, AAI_LBC_HFRAME,
+ AAI_LBC_HFRAME_HFRAME_DIV_HIGH_MSK, conf.div_high);
+
+ return err;
+}
+
+/**
+ * AAI Driver IO control function
+ */
+static int aai_hwdep_ioctl(struct snd_hwdep *hwdep,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int err = -EINVAL;
+ uint32_t val = 0;
+ struct card_data_t *aai = hwdep->private_data;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ /**
+ * <ul>
+ */
+ switch (cmd) {
+ /**
+ * <li><b>AAI_MUS0_CONF:</b><br>
+ * Set AAI music 0 configuration register<br>
+ * Param : AAI music configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_MUS0_CONF:
+ err = aai_hwdep_set_music_conf(aai, arg, 0);
+ break;
+
+ /**
+ * <li><b>AAI_MUS1_CONF:</b><br>
+ * Set AAI music 1 configuration register<br>
+ * Param : AAI music configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_MUS1_CONF:
+ err = aai_hwdep_set_music_conf(aai, arg, 1);
+ break;
+
+ /**
+ * <li><b>AAI_MUS2_CONF:</b><br>
+ * Set AAI music 2 configuration register<br>
+ * Param : AAI music configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_MUS2_CONF:
+ err = aai_hwdep_set_music_conf(aai, arg, 2);
+ break;
+
+ /**
+ * <li><b>AAI_MUS3_CONF:</b><br>
+ * Set AAI music 3 configuration register<br>
+ * Param : AAI music configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_MUS3_CONF:
+ err = aai_hwdep_set_music_conf(aai, arg, 3);
+ break;
+
+ /**
+ * <li><b>AAI_TDMIN_CONF:</b><br>
+ * Set AAI TDM input configuration register<br>
+ * Param : AAI TDM configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_TDMIN_CONF:
+ err = aai_hwdep_set_tdm_conf(aai, arg, TDM_IN);
+ break;
+
+ /**
+ * <li><b>AAI_TDMOUT_CONF:</b><br>
+ * Set AAI TDM output configuration register<br>
+ * Param : AAI TDM configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_TDMOUT_CONF:
+ err = aai_hwdep_set_tdm_conf(aai, arg, TDM_OUT);
+ break;
+
+ /**
+ * <li><b>AAI_ADC_CONF:</b><br>
+ * Set AAI ADC configuration register<br>
+ * Param : AAI ADC configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_ADC_CONF:
+ err = aai_hwdep_set_adc_conf(aai, arg, AAI_ADC);
+ break;
+
+ /**
+ * <li><b>AAI_DAC_CONF:</b><br>
+ * Set AAI DAC configuration register<br>
+ * Param : AAI ADC configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_DAC_CONF:
+ err = aai_hwdep_set_adc_conf(aai, arg, AAI_DAC);
+ break;
+
+ /**
+ * <li><b>AAI_AUX_CONF:</b><br>
+ * Set AAI AUX configuration register<br>
+ * Param : AAI ADC configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_AUX_CONF:
+ err = aai_hwdep_set_adc_conf(aai, arg, AAI_AUX);
+ break;
+
+ /**
+ * <li><b>AAI_PCM1_CONF:</b><br>
+ * Set AAI PCM 1 configuration register<br>
+ * Param : AAI PCM configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_PCM1_CONF:
+ err = aai_hwdep_set_pcm_conf(aai, arg, 0);
+ break;
+
+ /**
+ * <li><b>AAI_PCM2_CONF:</b><br>
+ * Set AAI PCM 2 configuration register<br>
+ * Param : AAI PCM configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_PCM2_CONF:
+ err = aai_hwdep_set_pcm_conf(aai, arg, 1);
+ break;
+
+ /**
+ * <li><b>AAI_VOICE_SPEED:</b><br>
+ * Set AAI VOICE_SPEED register<br>
+ * Param : requested voice speed, recommended: 0x810E35 for 16k/8k;
+ 0xB1DAC7 for 22.05k/11k<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_VOICE_SPEED:
+ err = get_data((void __user *)arg, &val, sizeof(uint32_t));
+ if (!err) {
+ if (0 <= val && val <= 0xffffff)
+ aai_set_nbit(aai, AAI_AAICFG_VOICE_SPEED,
+ 0xffffff, 0, val);
+ }
+ break;
+
+ /**
+ * <li><b>AAI_VOICE_8K_SPEAKER:</b><br>
+ * Set AAI VOICE_8K register for speaker<br>
+ * Param : Set 0 for 16k speaker, 1 for 8k speaker<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_VOICE_8K_SPEAKER:
+ err = get_data((void __user *)arg, &val, sizeof(uint32_t));
+ if (!err)
+ aai_setbit(aai, AAI_AAICFG_VOICE_8K,
+ AAI_VOICE_8K_SPEAKER_MSK, !!val);
+ break;
+
+ /**
+ * <li><b>AAI_VOICE_8K_PHONE:</b><br>
+ * Set AAI VOICE_8K register for phone<br>
+ * Param : Set 0 for 16k phone, 1 for 8k phone<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_VOICE_8K_PHONE:
+ err = get_data((void __user *)arg, &val, sizeof(uint32_t));
+ if (!err)
+ aai_setbit(aai, AAI_AAICFG_VOICE_8K,
+ AAI_VOICE_8K_PHONE_MSK, !!val);
+ break;
+
+ /**
+ * <li><b>AAI_GROUP_START:</b><br>
+ * Start audio group specified in argument<br>
+ * Param : group to be started<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_GROUP_START:
+ err = aai_hwdep_group_start(aai, arg);
+ break;
+
+ /**
+ * <li><b>AAI_GROUP_SYNC:</b><br>
+ * Assign audio channel to a group<br>
+ * Param : structure containing channel name and group number<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_GROUP_SYNC:
+ err = aai_hwdep_group_sync(aai, arg);
+ break;
+
+ /**
+ * <li><b>AAI_I2S_ASYMMETRIC:</b><br>
+ * Set AAI I2S asymmetric configuration register<br>
+ * Param : AAI I2S asymmetric configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_MFRAME_ASYMMETRIC_CONF:
+ err = aai_hwdep_set_mframe_asym_conf(aai, arg);
+ break;
+
+ /**
+ * <li><b>AAI_TDM_ASYMMETRIC:</b><br>
+ * Set AAI TDM asymmetric configuration register<br>
+ * Param : AAI TDM asymmetric configuration structure<br>
+ * Return : error code
+ * <hr>
+ */
+ case AAI_HFRAME_ASYMMETRIC_CONF:
+ err = aai_hwdep_set_hframe_asym_conf(aai, arg);
+ break;
+
+ default:
+ break;
+ }
+ /**
+ * </ul>
+ */
+
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+ return err;
+}
+
+static int aai_hwdep_dummy(struct snd_hwdep *hw, struct file *file)
+{
+ return 0;
+}
+
+int aai_ioctl_hwdep_new(struct card_data_t *aai)
+{
+ struct snd_hwdep *rhwdep;
+ int err = 0;
+
+ err = snd_hwdep_new(aai->card, "AAIhwdep", 0, &rhwdep);
+ if (err < 0)
+ return err;
+
+ rhwdep->private_data = aai;
+ rhwdep->ops.open = aai_hwdep_dummy;
+ rhwdep->ops.ioctl = aai_hwdep_ioctl;
+ rhwdep->ops.release = aai_hwdep_dummy;
+
+ return 0;
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_ioctl.h b/drivers/parrot/sound/p7_aai/aai_ioctl.h
new file mode 100644
index 00000000000000..ec58f3fe90542c
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_ioctl.h
@@ -0,0 +1,89 @@
+
+#ifndef _AAI_IOCTL_H_
+#define _AAI_IOCTL_H_
+
+#include <linux/ioctl.h>
+
+/*
+ * Music-in configuration
+ */
+struct aai_ioctl_music_conf_t {
+ int cycles_num;
+ int matsushita;
+ int left_frame;
+ int lsb_first;
+ int right_just;
+ int bits24;
+};
+#define AAI_MUS0_CONF _IOW('p', 1, int)
+#define AAI_MUS1_CONF _IOW('p', 2, int)
+#define AAI_MUS2_CONF _IOW('p', 3, int)
+#define AAI_MUS3_CONF _IOW('p', 4, int)
+
+/*
+ * TDM configuration
+ */
+struct aai_ioctl_tdm_conf_t {
+ int cycles_num;
+ int left_frame;
+};
+#define AAI_TDMIN_CONF _IOW('p', 5, int)
+#define AAI_TDMOUT_CONF _IOW('p', 6, int)
+
+/*
+ * ADC, DAC & AUX configuration
+ */
+struct aai_ioctl_adc_conf_t {
+ int cycles_num;
+ int matsushita;
+ int left_frame;
+ int half_frame;
+};
+#define AAI_ADC_CONF _IOW('p', 7, int)
+#define AAI_DAC_CONF _IOW('p', 8, int)
+#define AAI_AUX_CONF _IOW('p', 9, int)
+
+/*
+ * PCM configuration
+ */
+struct aai_ioctl_pcm_conf_t {
+ int start_two;
+ int cycles_pcm_high;
+ int cycles_pcm_low;
+ int low;
+ int oki;
+ int run_dual;
+};
+#define AAI_PCM1_CONF _IOW('p', 10, int)
+#define AAI_PCM2_CONF _IOW('p', 11, int)
+
+/*
+ * Voice configuration
+ */
+#define AAI_VOICE_SPEED _IOW('p', 12, int)
+#define AAI_VOICE8K_SPEAKER _IOW('p', 13, int)
+#define AAI_VOICE8K_PHONE _IOW('p', 14, int)
+
+/*
+ * Sync group controls
+ */
+struct aai_ioctl_group_t {
+ char *name;
+ int group;
+};
+#define AAI_GROUP_START _IOW('p', 15, int)
+#define AAI_GROUP_SYNC _IOW('p', 16, int)
+
+/*
+ * ASYNC configuration
+ */
+struct aai_ioctl_asym_conf_t {
+ int enable;
+ short div_high;
+ short div_low;
+};
+#define AAI_MFRAME_ASYMMETRIC_CONF _IOW('p', 19, int)
+#define AAI_HFRAME_ASYMMETRIC_CONF _IOW('p', 20, int)
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_irq.c b/drivers/parrot/sound/p7_aai/aai_irq.c
new file mode 100644
index 00000000000000..745b17b101ef59
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_irq.c
@@ -0,0 +1,199 @@
+
+#include <linux/ratelimit.h>
+
+#include "aai.h"
+#include "aai_hw.h"
+
+/* Period elaped */
+static inline void aai_period_elapsed(struct card_data_t *aai,
+ struct aai_device_t *chan)
+{
+ struct aai_hw_channel_t *fifo = &chan->fifo;
+ chan->bytes_count += chan->dma_xfer_size;
+
+ /* Update pointers to DMA area */
+ fifo->pcurrent = fifo->pfollow;
+ fifo->pfollow += chan->dma_xfer_size;
+ if (fifo->pfollow >= fifo->pend)
+ fifo->pfollow = fifo->pstart;
+
+ aai_writereg(aai, (uint32_t)chan->fifo.pfollow,
+ (uint32_t)chan->fifo.dmana, 0);
+ if (aai->pdata->chiprev > P7_CHIPREV_R1)
+ aai_writereg(aai, 1, (uint32_t)chan->fifo.dmanv, 0);
+
+ /* Call snd_period_elapsed */
+ if (chan->bytes_count >= chan->period_bytes) {
+ if (chan->direction == AAI_RX)
+ snd_pcm_period_elapsed(chan2captsubstream(aai,
+ chan->ipcm));
+ else
+ snd_pcm_period_elapsed(chan2pbacksubstream(aai,
+ chan->ipcm));
+ chan->nbperiod++;
+ chan->bytes_count -= chan->period_bytes;
+ }
+}
+
+/* Second stage IRQ functions */
+static inline void aai_irq_fifo_err(struct card_data_t *aai,
+ enum aai_dir_t direction)
+{
+ uint32_t itreg;
+ uint32_t ackreg;
+ uint32_t itsrc;
+ uint32_t dmaen;
+ struct aai_device_t *chan;
+ int ipcm;
+
+ if (direction == AAI_RX) {
+ itreg = AAI_MAIN_RX_ERR_IT;
+ ackreg = AAI_MAIN_RX_ERR_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_RX;
+ } else {
+ itreg = AAI_MAIN_TX_ERR_IT;
+ ackreg = AAI_MAIN_TX_ERR_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_TX;
+ }
+
+ itsrc = aai_readreg(aai, itreg);
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ if ((chan->direction & direction) &&
+ ((1 << chan->fifo.fifo_id) & itsrc)) {
+ pr_err_ratelimited("%s (%d) - %s: fifo %d xrun\n",
+ chan->name, chan->ipcm, __func__, chan->fifo.fifo_id);
+ }
+ }
+
+ /* Acknowledge all interrupts */
+ aai_writereg(aai, itsrc, ackreg, 0);
+}
+
+static inline void aai_irq_dma_err(struct card_data_t *aai,
+ enum aai_dir_t direction)
+{
+ uint32_t itreg;
+ uint32_t ackreg;
+ uint32_t itsrc;
+ uint32_t dmaen;
+ struct aai_device_t *chan;
+ int ipcm;
+
+ if (direction == AAI_RX) {
+ itreg = AAI_MAIN_RX_DMA_IT;
+ ackreg = AAI_MAIN_RX_DMA_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_RX;
+ } else {
+ itreg = AAI_MAIN_TX_DMA_IT;
+ ackreg = AAI_MAIN_TX_DMA_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_TX;
+ }
+
+ itsrc = aai_readreg(aai, itreg);
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ if ((chan->direction & direction)
+ && ((1 << chan->fifo.fifo_id) & itsrc)) {
+ pr_err_ratelimited("%s (%d) - %s: dma error\n",
+ chan->name, chan->ipcm, __func__);
+ }
+ }
+
+ /* Acknowledge all interrupts */
+ aai_writereg(aai, itsrc, ackreg, 0);
+}
+
+static inline void aai_irq_ok(struct card_data_t *aai,
+ enum aai_dir_t direction)
+{
+ uint32_t itsrc;
+ uint32_t reg;
+ uint32_t itreg;
+ uint32_t ackreg;
+ uint32_t dmaen;
+ struct aai_device_t *chan;
+ int ipcm;
+
+ if (direction == AAI_RX) {
+ itreg = AAI_MAIN_RX_OK_IT;
+ ackreg = AAI_MAIN_RX_OK_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_RX;
+ } else {
+ itreg = AAI_MAIN_TX_OK_IT;
+ ackreg = AAI_MAIN_TX_OK_ITACK;
+ dmaen = AAI_MAIN_DMA_EN_TX;
+ }
+
+ itsrc = aai_readreg(aai, itreg);
+ itsrc &= aai_readreg(aai, dmaen);
+
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ if ((chan->direction & direction)
+ && ((1 << chan->fifo.fifo_id) & itsrc)) {
+ chan->nbirq++;
+ aai_period_elapsed(aai, chan);
+
+ /* Acknowledge interrupt */
+ reg = 1 << chan->fifo.fifo_id;
+ aai_writereg(aai, reg, ackreg, 0);
+ }
+ }
+}
+
+/**
+ * @brief Main P7 AAI IRQ handler
+ *
+ * @param irq number of irq
+ * @param dev device targeted
+ *
+ * @return IRQ_HANDLED
+ **/
+irqreturn_t aai_irq_p7(int irq, void *dev)
+{
+ const uint32_t itgroup_msk = 0x7fff0000;
+ struct card_data_t *aai = dev;
+ uint32_t itsrc;
+ uint32_t itsrc_masked;
+
+ itsrc = aai_readreg(aai, AAI_MAIN_IT);
+
+ /*
+ * mask itsrc with expected interrupts
+ * might be some rements of previous channels
+ */
+ itsrc_masked = itsrc & aai_readreg(aai, AAI_MAIN_ITEN);
+
+ /* First stage of aai irq */
+ if (unlikely(itsrc & (1 << 1)))
+ aai_irq_fifo_err(aai, AAI_RX);
+
+ if (unlikely(itsrc & (1 << 2)))
+ aai_irq_dma_err(aai, AAI_RX);
+
+ if (unlikely(itsrc & (1 << 9)))
+ aai_irq_fifo_err(aai, AAI_TX);
+
+ if (unlikely(itsrc & (1 << 10)))
+ aai_irq_dma_err(aai, AAI_TX);
+
+ if (itsrc_masked & (1 << 0))
+ aai_irq_ok(aai, AAI_RX);
+
+ if (itsrc_masked & (1 << 8))
+ aai_irq_ok(aai, AAI_TX);
+
+ /*
+ * Acknowlegde group interrupt if necessary.
+ * Individual interrupts have been handled in stage 2 functions.
+ */
+ if (itgroup_msk & itsrc_masked)
+ aai_writereg(aai, itgroup_msk & itsrc_masked,
+ AAI_MAIN_ITACK, 0);
+
+ aai_writereg(aai, itsrc, AAI_MAIN_ITACK, 0);
+
+ return IRQ_HANDLED;
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_module.c b/drivers/parrot/sound/p7_aai/aai_module.c
new file mode 100644
index 00000000000000..52dd45ed1af30a
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_module.c
@@ -0,0 +1,332 @@
+/**
+ * @file aai_module.c
+ * @brief P7 AAI kernel module layer
+ *
+ * @author adrien.charruel@parrot.com
+ * @date 2011-05-17
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Parrot Advanced Audio Interface Driver
+ *
+ */
+
+#include "aai.h"
+#include "aai_alsa.h"
+#include "aai_hw.h"
+
+
+static int aai_start(struct card_data_t *aai_data, struct platform_device *pdev)
+{
+ int err;
+
+ /* prepare controller clock */
+ err = clk_prepare_enable(aai_data->clk[ctrl_clk]);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ctrl clock.\n");
+ return err;
+ }
+ /* AAI hardware init */
+ err = aai_hw_init(aai_data);
+ if (err) {
+ dev_err(&pdev->dev, "AAI hardware init failed\n");
+ return -EINVAL;
+ }
+
+ /* Register devices */
+ err = aai_pcms_init(aai_data);
+ if (err) {
+ dev_err(&pdev->dev, "aai pcms Init failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devinit aai_module_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *res;
+ struct snd_card *card;
+ struct card_data_t *aai_data;
+ struct clk *clk;
+
+ /* Map registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Can't get IORESOURCE_MEM\n");
+ err = -ENOENT;
+ goto no_iores;
+ }
+
+ /* Init driver structures */
+ card = aai_alsa_probe(pdev);
+ if (!card) {
+ dev_err(&pdev->dev, "Can't probe AAI alsa card\n");
+ err = -ENXIO;
+ goto no_iores;
+ }
+ aai_data = card->private_data;
+ platform_set_drvdata(pdev, card);
+
+ aai_data->pdata = dev_get_platdata(&pdev->dev);
+ if (!aai_data->pdata->pad) {
+ dev_err(&pdev->dev, "no pad configuration\n");
+ err = -EINVAL;
+ goto no_iores;
+ }
+
+ if (!aai_data->pdata->device_list) {
+ dev_err(&pdev->dev, "no device list defined\n");
+ err = -EINVAL;
+ goto no_iores;
+ }
+
+ /* Init IO */
+ aai_data->ioarea = request_mem_region(res->start,
+ res->end - res->start + 1,
+ pdev->name);
+ if (!aai_data->ioarea) {
+ dev_err(&pdev->dev, "Can't reserve IO region\n");
+ err = -ENXIO;
+ goto no_iores;
+ }
+
+ aai_data->iobase = ioremap(res->start, res->end - res->start + 1);
+ if (!aai_data->iobase) {
+ dev_err(&pdev->dev, "Can't map IO\n");
+ err = -ENXIO;
+ goto no_iomap;
+ }
+
+ /* Get controller clock */
+ clk = clk_get(&pdev->dev, "ctrl_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get aai ctrl clk\n");
+ err = -EINVAL;
+ goto no_ctrlclk;
+ }
+ aai_data->clk[ctrl_clk] = clk;
+
+ clk = clk_get(&pdev->dev, "i2s_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get aai i2s clk\n");
+ err = -EINVAL;
+ goto no_i2sclk;
+ }
+ aai_data->clk[i2s_clk] = clk;
+
+ clk = clk_get(&pdev->dev, "pcm1_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get aai pcm1 clk\n");
+ err = -EINVAL;
+ goto no_pcm1clk;
+ }
+ aai_data->clk[pcm1_clk] = clk;
+
+ clk = clk_get(&pdev->dev, "pcm2_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get aai pcm2 clk\n");
+ err = -EINVAL;
+ goto no_pcm2clk;
+ }
+ aai_data->clk[pcm2_clk] = clk;
+
+ clk = clk_get(&pdev->dev, "spdif_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get aai spdif clk\n");
+ err = -EINVAL;
+ goto no_spdifclk;
+ }
+ aai_data->clk[spdif_clk] = clk;
+
+ /*
+ * Get IRQ ressource
+ */
+ aai_data->irq = platform_get_irq(pdev, 0);
+ if (aai_data->irq < 0) {
+ dev_err(&pdev->dev, "Can't retrieve IRQ number\n");
+ err = -ENOENT;
+ goto no_irq;
+ }
+
+ /*
+ * reset registers, set right default value
+ * enable main clk
+ * set irq
+ * The aai_start function is also used when
+ * resume after suspend
+ */
+ err = aai_start(aai_data, pdev);
+ if (err)
+ goto no_clk_enable;
+
+ /*
+ * Install IRQ handler
+ */
+ err = request_irq(aai_data->irq, aai_data->irq_handler,
+ IRQF_DISABLED, pdev->name, aai_data);
+ if (err) {
+ dev_err(&pdev->dev, "failed attaching IRQ %d\n", aai_data->irq);
+ return err;
+ }
+
+ aai_data->pin = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(aai_data->pin)) {
+ dev_err(&pdev->dev, "Can't get pin ctrl\n");
+ goto no_irq;
+ }
+ dev_info(&pdev->dev, "P7 AAI module initialized\n");
+ return 0;
+
+no_irq:
+ clk_disable_unprepare(aai_data->clk[ctrl_clk]);
+no_clk_enable:
+ if (err)
+ clk_put(aai_data->clk[spdif_clk]);
+no_spdifclk:
+ if (err)
+ clk_put(aai_data->clk[pcm2_clk]);
+no_pcm2clk:
+ if (err)
+ clk_put(aai_data->clk[pcm1_clk]);
+no_pcm1clk:
+ if (err)
+ clk_put(aai_data->clk[i2s_clk]);
+no_i2sclk:
+ if (err)
+ clk_put(aai_data->clk[ctrl_clk]);
+no_ctrlclk:
+ if (err)
+ iounmap(aai_data->iobase);
+no_iomap:
+ if (err) {
+ release_resource(aai_data->ioarea);
+ kfree(aai_data->ioarea);
+ }
+no_iores:
+ if (err)
+ platform_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static int __devexit aai_module_remove(struct platform_device *pdev)
+{
+ int err;
+ struct snd_card *card = platform_get_drvdata(pdev);
+ struct card_data_t *aai_data = card->private_data;
+
+ /* reset hardware layer */
+ err = aai_hw_remove(aai_data);
+ if (err) {
+ dev_err(&pdev->dev, "AAI hardware remove failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ free_irq(aai_data->irq, aai_data);
+
+ /*
+ * beware, ths call will free drv_data which is included
+ * into card structure.
+ */
+ aai_alsa_remove(card);
+
+ iounmap(aai_data->iobase);
+ release_resource(aai_data->ioarea);
+ kfree(aai_data->ioarea);
+
+ platform_set_drvdata(pdev, NULL);
+ pinctrl_put(aai_data->pin);
+
+exit:
+ return err;
+}
+
+static int aai_module_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ struct snd_card *card = platform_get_drvdata(pdev);
+ struct card_data_t *aai = card->private_data;
+ struct aai_device_t *chan;
+ struct aai_hw_channel_t *fifo;
+ struct aai_audio_chan_t *internal_chan;
+ int ipcm, i;
+
+ for (ipcm = 0; ipcm < aai->chans_nb; ipcm++) {
+ chan = &aai->chans[ipcm];
+ fifo = &chan->fifo;
+
+ for (i = 0; i < fifo->chan_nb; i++) {
+ internal_chan = fifo->chan_list[i];
+ internal_chan->used = 0;
+ }
+ snd_pcm_suspend_all(aai->pcms[ipcm]);
+ }
+
+ free_irq(aai->irq, aai);
+
+ clk_disable(aai->clk[i2s_clk]);
+ clk_disable(aai->clk[ctrl_clk]);
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3cold);
+
+ return 0;
+}
+
+static int aai_module_resume(struct platform_device *pdev)
+{
+ struct snd_card *card = platform_get_drvdata(pdev);
+ struct card_data_t *aai = card->private_data;
+ int err;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+ err = aai_start(aai, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to resume %d\n", err);
+ return err;
+ }
+
+ /* Install IRQ handler */
+ err = request_irq(aai->irq, aai->irq_handler,
+ IRQF_DISABLED, pdev->name, aai);
+ if (err) {
+ dev_err(&pdev->dev, "failed attaching IRQ %d\n", aai->irq);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static struct platform_driver aai_module_driver = {
+ .probe = aai_module_probe,
+ .remove = aai_module_remove,
+ .suspend = aai_module_suspend,
+ .resume = aai_module_resume,
+ .driver = {
+ .name = "aai",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init aai_module_init(void)
+{
+ return platform_driver_register(&aai_module_driver);
+}
+
+static void __exit aai_module_exit(void)
+{
+ platform_driver_unregister(&aai_module_driver);
+}
+
+module_init(aai_module_init);
+module_exit(aai_module_exit);
+
+MODULE_DESCRIPTION("Parrot7 Advanced Audio Interface driver");
+MODULE_AUTHOR("Adrien Charruel, <adrien.charruel@parrot.com>");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Parrot, P7AAI}}");
+
diff --git a/drivers/parrot/sound/p7_aai/aai_ops.c b/drivers/parrot/sound/p7_aai/aai_ops.c
new file mode 100644
index 00000000000000..76eedbe9ec68c0
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_ops.c
@@ -0,0 +1,322 @@
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_rules.h"
+
+static int devdma_mmap(struct device *dev, struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ return dma_mmap_coherent(NULL, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
+}
+
+static int aai_ops_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return devdma_mmap(NULL, substream, vma);
+}
+
+/**
+ * @brief Open operator
+ * Initialize hw_params of the substream's runtime
+ *
+ * @param substream to open
+ *
+ * @return error number
+ */
+static int aai_ops_open(struct snd_pcm_substream *substream)
+{
+ int err = 0;
+ struct aai_device_t *chan = substream2chan(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct aai_hw_ops *hw_ops = &chan->aai_hw_ops;
+
+ /* copy parameters to runtime structure */
+ runtime->hw.info = chan->hw->info;
+ runtime->hw.formats = chan->hw->formats;
+ runtime->hw.channels_min = chan->hw->channels_min;
+ runtime->hw.channels_max = chan->hw->channels_max;
+ runtime->hw.rates = chan->hw->rates;
+ runtime->hw.rate_min = chan->hw->rate_min;
+ runtime->hw.rate_max = chan->hw->rate_max;
+ runtime->hw.buffer_bytes_max = chan->hw->buffer_bytes_max;
+ runtime->hw.period_bytes_min = chan->hw->period_bytes_min;
+ runtime->hw.period_bytes_max = chan->hw->period_bytes_max;
+ runtime->hw.periods_min = chan->hw->periods_min;
+ runtime->hw.periods_max = chan->hw->periods_max;
+ runtime->hw.fifo_size = chan->hw->fifo_size;
+
+ if (hw_ops && hw_ops->open)
+ err = hw_ops->open(chan->driver_data, chan);
+
+ return err;
+}
+
+/**
+ * @brief Close operator. Disable device.
+ *
+ * @param substream to close
+ *
+ * @return error number
+ */
+static int aai_ops_close(struct snd_pcm_substream *substream)
+{
+ int err = 0;
+ struct aai_device_t *chan = substream2chan(substream);
+ struct aai_hw_ops *hw_ops = &chan->aai_hw_ops;
+
+ if (hw_ops && hw_ops->close)
+ err = hw_ops->close(chan->driver_data, chan);
+
+ return err;
+}
+
+/**
+ * @brief Params operator
+ * This is called when the hardware parameter (hw_params) is set up by
+ * the application, that is, once when the buffer size, the period size,
+ * the format, etc. are defined for the pcm substream.
+ * Many hardware setups should be done in this callback, including
+ * the allocation of buffers.
+ * Parameters to be initialized are retrieved by params_xxx() macros
+ *
+ * @param substream targeted substream
+ * @param hw_params parameters to set
+ *
+ * @return error number
+ */
+static int aai_ops_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int err = 0;
+ struct aai_device_t *chan = substream2chan(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct aai_hw_ops *hw_ops = &chan->aai_hw_ops;
+ int access;
+
+ chan->bufsize = params_buffer_bytes(hw_params);
+ chan->rate = params_rate(hw_params);
+ access = params_access(hw_params);
+
+ /* Check access mode. Only interleaved is supported. */
+ if ((access != SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) &&
+ (access != SNDRV_PCM_ACCESS_RW_INTERLEAVED)) {
+ dev_warn(chan2dev(chan),
+ "%s (%d) - unsupported access type 0x%x\n",
+ chan->name, chan->ipcm, access);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* audio stream buffer allocation */
+ err = aai_hw_dma_alloc(substream, chan->bufsize);
+ if (err < 0)
+ goto exit;
+
+ /* Set fifo pointers to DMA area, for interleaved devices only */
+ chan->fifo.pstart = (void *)runtime->dma_addr;
+ chan->fifo.pend = chan->fifo.pstart + chan->bufsize;
+ chan->fifo.pcurrent = chan->fifo.pstart;
+
+ if (hw_ops && hw_ops->hw_params)
+ err = hw_ops->hw_params(chan->driver_data, chan, hw_params);
+exit:
+ return err;
+}
+
+/**
+ * @brief Prepare operator.
+ * This callback is called when the pcm is "prepared".
+ * You can set the format type, sample rate, etc. here.
+ * The difference from hw_params is that the prepare
+ * callback will be called each time snd_pcm_prepare()
+ * is called, i.e. when recovering after underruns, etc.
+ *
+ * @param substream to be prepared
+ *
+ * @return error number
+ */
+static int aai_ops_prepare(struct snd_pcm_substream *substream)
+{
+ int err = 0;
+ int i;
+ struct aai_device_t *chan = substream2chan(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct aai_hw_ops *hw_ops = &chan->aai_hw_ops;
+
+ /* Init channel infos */
+ chan->period_frames = runtime->period_size;
+ chan->period_bytes = frames_to_bytes(runtime, runtime->period_size);
+ chan->dma_area = runtime->dma_area;
+
+ /* Prepare DMA area */
+ err = aai_hw_dma_prepare(chan);
+ if (err)
+ goto exit;
+
+ /* Apply rules */
+ for (i = 0; i < NB_RULES; i++) {
+ if (chan->rules[i] == RULE_END)
+ break;
+ err = aai_rule_apply(chan->driver_data, chan->rules[i]);
+ if (err)
+ goto exit;
+ }
+
+ chan->fifo.pcurrent = chan->fifo.pstart;
+
+ if (hw_ops && hw_ops->prepare)
+ err = hw_ops->prepare(chan->driver_data, chan);
+exit:
+ return err;
+}
+
+/**
+ * @brief Free operator
+ *
+ * @param substream to be released
+ *
+ * @return error number
+ */
+static int aai_ops_free(struct snd_pcm_substream *substream)
+{
+ int err = 0;
+ int i;
+ struct aai_device_t *chan = substream2chan(substream);
+ struct aai_hw_ops *hw_ops = &chan->aai_hw_ops;
+
+ aai_hw_dma_free(substream);
+
+ /* Remove rules */
+ for (i = 0; i < NB_RULES; i++) {
+ if (chan->rules[i] == RULE_END)
+ break;
+ err = aai_rule_remove(chan->driver_data, chan->rules[i]);
+ if (err)
+ goto exit;
+ }
+
+ if (hw_ops && hw_ops->free)
+ err = hw_ops->free(chan->driver_data, chan);
+exit:
+ return err;
+}
+
+/**
+ * @brief Pointer operator.
+ *
+ * @param substream targeted substream
+ *
+ * @return amount of frames used within the audio buffer
+ */
+static snd_pcm_uframes_t aai_ops_pointer(struct snd_pcm_substream *substream)
+{
+ struct aai_device_t *chan = substream2chan(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ ssize_t bytes = chan->fifo.pcurrent - chan->fifo.pstart;
+
+ return bytes_to_frames(runtime, bytes);
+}
+
+/**
+ * @brief Trigger operator
+ *
+ * @param substream targeted substream
+ * @param cmd command to execute
+ *
+ * @return error number
+ */
+static int aai_ops_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct aai_device_t *chan = substream2chan(substream);
+ struct card_data_t *aai = chan->driver_data;
+ int err = 0;
+
+ if (chan == NULL) {
+ dev_warn(chan2dev(chan),
+ "device pointer is NULL substream @ %p", substream);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (chan->ipcm >= aai->chans_nb) {
+ dev_warn(chan2dev(chan),
+ "substream @ %p - device identifier %d is out of range",
+ substream, chan->ipcm);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ dev_dbg(chan2dev(chan), "%s (%d) - TRIGGER_START\n",
+ chan->name, chan->ipcm);
+ err = aai_hw_start_channel(chan);
+ break;
+
+ case SNDRV_PCM_TRIGGER_RESUME:
+ dev_dbg(chan2dev(chan), "%s(%d) - TRIGGER_RESUME\n",
+ chan->name, chan->ipcm);
+ err = aai_ops_prepare(substream);
+ if (err)
+ goto exit;
+ err = aai_hw_start_channel(chan);
+ if (err)
+ goto exit;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ dev_dbg(chan2dev(chan), "%s(%d) - TRIGGER_STOP it:%d %d\n",
+ chan->name, chan->ipcm, chan->nbirq, chan->nbperiod);
+ err = aai_hw_stop_channel(chan);
+ break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ dev_dbg(chan2dev(chan), "%s(%d) - TRIGGER_SUSPEND\n",
+ chan->name, chan->ipcm);
+ err = aai_hw_stop_channel(chan);
+ break;
+
+ default:
+ dev_dbg(chan2dev(chan), "%s(%d) - TRIGGER UNKNOWN\n",
+ chan->name, chan->ipcm);
+ err = -EINVAL;
+ break;
+ }
+
+exit:
+ return err;
+}
+
+/**
+ * @brief IOCTL operator
+ *
+ * @param substream targeted substream
+ * @param cmd command to execute
+ * @param arg argument to the command
+ *
+ * @return error number
+ */
+static int aai_ops_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd,
+ void *arg)
+{
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+struct snd_pcm_ops aai_pcm_ops = {
+ .open = aai_ops_open,
+ .close = aai_ops_close,
+ .ioctl = aai_ops_ioctl,
+ .hw_params = aai_ops_params,
+ .hw_free = aai_ops_free,
+ .prepare = aai_ops_prepare,
+ .trigger = aai_ops_trigger,
+ .pointer = aai_ops_pointer,
+ .mmap = aai_ops_mmap,
+};
+
diff --git a/drivers/parrot/sound/p7_aai/aai_platform_data.h b/drivers/parrot/sound/p7_aai/aai_platform_data.h
new file mode 100644
index 00000000000000..41f0e414f72ff7
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_platform_data.h
@@ -0,0 +1,69 @@
+
+#ifndef _AAI_SIGNALS_H_
+#define _AAI_SIGNALS_H_
+
+enum aai_signal_t {
+ AAI_SIG_MCLK = 0,
+ AAI_SIG_MAIN_I2S_FRAME,
+ AAI_SIG_HALF_RATE_FRAME,
+ AAI_SIG_ADC_BIT_CLOCK,
+ AAI_SIG_DAC_BIT_CLOCK,
+ AAI_SIG_AUX_BIT_CLOCK,
+ AAI_SIG_TDM_OUT_BIT_CLOCK,
+ AAI_SIG_OUT_DAC0,
+ AAI_SIG_OUT_DAC1,
+ AAI_SIG_OUT_DAC2,
+ AAI_SIG_OUT_DAC3,
+ AAI_SIG_IN_MIC0,
+ AAI_SIG_IN_MIC1,
+ AAI_SIG_IN_MIC2,
+ AAI_SIG_IN_MIC3,
+ AAI_SIG_I2S0_IN,
+ AAI_SIG_I2S0_FRAME,
+ AAI_SIG_I2S1_IN,
+ AAI_SIG_I2S1_FRAME,
+ AAI_SIG_I2S2_IN,
+ AAI_SIG_I2S2_FRAME,
+ AAI_SIG_I2S3_IN,
+ AAI_SIG_I2S3_FRAME,
+ AAI_SIG_TDM_IN,
+ AAI_SIG_TDM_OUT,
+ AAI_SIG_PCM1_CLK,
+ AAI_SIG_PCM1_FRAME,
+ AAI_SIG_PCM1_IN,
+ AAI_SIG_PCM1_OUT,
+ AAI_SIG_PCM2_CLK,
+ AAI_SIG_PCM2_FRAME,
+ AAI_SIG_PCM2_IN,
+ AAI_SIG_PCM2_OUT,
+ AAI_SIG_SPDIF_RX,
+ AAI_SIG_SPDIF_TX,
+ AAI_SIG_TDM_IN_BIT_CLOCK
+};
+
+enum pad_direction {
+ PAD_IN = 1 << 0,
+ PAD_OUT = 1 << 1
+};
+
+struct aai_pad_t {
+ int signal;
+ int index;
+ int direction;
+};
+
+struct aai_conf_set {
+ int addr;
+ int shift;
+ int mask;
+ int value;
+};
+
+struct aai_platform_data {
+ struct aai_pad_t *pad;
+ int chiprev;
+ struct aai_conf_set *aai_conf;
+ char **device_list;
+};
+
+#endif
diff --git a/drivers/parrot/sound/p7_aai/aai_regs.c b/drivers/parrot/sound/p7_aai/aai_regs.c
new file mode 100644
index 00000000000000..74386c3f327d0e
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_regs.c
@@ -0,0 +1,827 @@
+#include "aai.h"
+#include "aai_hw.h"
+
+#ifdef CONFIG_SND_AAI_DEBUG
+
+enum attr_t {
+ AAI_READ = 1 << 0,
+ AAI_WRITE = 1 << 1
+};
+
+/**
+ * Virtual register structure definition
+ * Used to display debug information
+ */
+struct aai_virtual_reg {
+ uint32_t block;
+ uint32_t addr;
+ char name[64];
+ uint32_t attr;
+};
+
+/*
+ * List of AAI registers
+ * Available only for debug
+ */
+#define VIRTUAL_REG(_blk_, _reg_, _a_) \
+ [(_reg_)] = { \
+ .block = _blk_, \
+ .addr = _reg_, \
+ .name = #_reg_, \
+ .attr = _a_ \
+ }
+
+static struct aai_virtual_reg aai_vregs[] = {
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_ITEN, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_OK_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_ERR_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_DMA_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_OK_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_ERR_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_RX_DMA_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_OK_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_ERR_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_DMA_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_OK_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_ERR_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TX_DMA_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_START, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_IT_GROUPS_STOP, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_DMA_EN_RX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_DMA_EN_TX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_FIFO_EN_RX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_FIFO_EN_TX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_DMA_BUSY_RX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_DMA_BUSY_TX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_IT, (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_ITACK, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_EN, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_MODE, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SCALE, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_LOAD, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_RESET, (AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SELECT(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_SETVAL(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(0), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(1), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(2), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(3), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(4), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(5), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(6), (AAI_READ)),
+ VIRTUAL_REG(AAI_MAIN_OFFSET, AAI_MAIN_TIMER_GETVAL(7), (AAI_READ)),
+
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MUS(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MUS(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MUS(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MUS(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_TDM_IN, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_TDM_OUT, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_ADC, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_DAC, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_AUX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MIC_MUX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOI_MUX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_PCM(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_PCM(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SPDIFRX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SPDIFTX, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE_SPEED, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_VOICE_8K, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_MUS_FILTER5, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SRC_FILTER5, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SRC_MODE, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SRC_SPEED0, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_AAICFG_OFFSET, AAI_AAICFG_SRC_SPEED1, (AAI_READ|AAI_WRITE)),
+
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CA_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NA_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CC_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NC_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_CV_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(0), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(1), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(2), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(3), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(4), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(5), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(6), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(7), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(8), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(9), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(10), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(11), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(12), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(13), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(14), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(15), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(16), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_RX(17), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(0), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(1), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(2), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(3), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(4), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(5), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(6), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(7), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(8), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(9), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_DMACFG_OFFSET, AAI_DMACFG_NV_TX(10), (AAI_WRITE)),
+
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_CFG_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(23), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(24), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(25), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(26), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(27), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(28), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(29), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(30), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(31), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(32), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(33), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(34), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(35), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_RX(36), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_SEL_TX(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_RX_16BIT, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_TX_16BIT, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_RX_ENDIAN, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOCFG_OFFSET, AAI_FIFOCFG_TX_ENDIAN, (AAI_READ|AAI_WRITE)),
+
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(0), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(1), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(2), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(3), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(4), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(5), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(6), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(7), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(8), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(9), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(10), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(11), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(12), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(13), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(14), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(15), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(16), (AAI_READ)),
+ VIRTUAL_REG(AAI_FIFORX_OFFSET, AAI_FIFORX(17), (AAI_READ)),
+
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(0), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(1), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(2), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(3), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(4), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(5), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(6), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(7), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(8), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(9), (AAI_WRITE)),
+ VIRTUAL_REG(AAI_FIFOTX_OFFSET, AAI_FIFOTX(10), (AAI_WRITE)),
+
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_CLKEN, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_MCLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_MFRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_P1CLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_P2CLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_P1FRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_P2FRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_SPDIFCLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_SPDIFCLK_R3, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(23), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(24), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(25), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(26), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(27), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(28), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(29), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(30), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(31), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(32), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(33), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADIN(34), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(23), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(24), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(25), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(26), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(27), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(28), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(29), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_GBC_PADOUT(30), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_MCLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_MFRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_P1CLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_P2CLK, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_P1FRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_P2FRAME, (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(23), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(24), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(25), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(26), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(27), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(28), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(29), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(30), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(31), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(32), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(33), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADIN(34), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(0), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(1), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(2), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(3), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(4), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(5), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(6), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(7), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(8), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(9), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(10), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(11), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(12), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(13), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(14), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(15), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(16), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(17), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(18), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(19), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(20), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(21), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(22), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(23), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(24), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(25), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(26), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(27), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(28), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(29), (AAI_READ|AAI_WRITE)),
+ VIRTUAL_REG(AAI_GBC_OFFSET, AAI_LBC_PADOUT(30), (AAI_READ|AAI_WRITE)),
+};
+
+uint32_t aai_reg_addr(int index)
+{
+ return aai_vregs[index].addr;
+}
+
+const char *aai_reg_name(int index)
+{
+ return aai_vregs[index].name;
+}
+
+int aai_reg_canread(int index)
+{
+ return aai_vregs[index].attr & AAI_READ;
+}
+
+int aai_reg_canwrite(int index)
+{
+ return aai_vregs[index].attr & AAI_WRITE;
+}
+
+void aai_dump_regs(struct card_data_t *aai, uint32_t block)
+{
+ int i;
+ struct aai_virtual_reg *vreg;
+
+ for (i = 0;
+ i < sizeof(aai_vregs) / sizeof(struct aai_virtual_reg);
+ i++) {
+ vreg = &aai_vregs[i];
+ if (!vreg->addr || block != vreg->block)
+ continue;
+
+ if (aai_reg_canread(i))
+ dev_dbg(aai->dev, "%s: @ 0x%x = 0x%x",
+ vreg->name, vreg->addr,
+ aai_readreg(aai, vreg->addr));
+ }
+}
+
+#endif
+
+struct aai_reset_reg_t {
+ uint32_t addr;
+ uint32_t reset;
+};
+
+struct aai_reset_reg_t reset_regs[] = {
+ /* time mode reset to "free-running" */
+ {AAI_MAIN_TIMER_MODE, 0x0},
+
+ /* time scale reset to "no scaling" */
+ {AAI_MAIN_TIMER_SCALE, 0x0},
+
+ /* time source selection reset to 0 */
+ {AAI_MAIN_TIMER_SELECT(0), 0x0},
+ {AAI_MAIN_TIMER_SELECT(1), 0x0},
+ {AAI_MAIN_TIMER_SELECT(2), 0x0},
+ {AAI_MAIN_TIMER_SELECT(3), 0x0},
+ {AAI_MAIN_TIMER_SELECT(4), 0x0},
+ {AAI_MAIN_TIMER_SELECT(6), 0x0},
+ {AAI_MAIN_TIMER_SELECT(7), 0x0},
+
+ /* time value reset to 0 */
+ {AAI_MAIN_TIMER_SETVAL(0), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(1), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(2), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(3), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(4), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(5), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(6), 0x0},
+ {AAI_MAIN_TIMER_SETVAL(7), 0x0},
+
+ /* DMA addresses reset to 0, invalid but noticeable */
+ {AAI_DMACFG_CA_RX(0), 0x0},
+ {AAI_DMACFG_CA_RX(1), 0x0},
+ {AAI_DMACFG_CA_RX(2), 0x0},
+ {AAI_DMACFG_CA_RX(3), 0x0},
+ {AAI_DMACFG_CA_RX(4), 0x0},
+ {AAI_DMACFG_CA_RX(5), 0x0},
+ {AAI_DMACFG_CA_RX(6), 0x0},
+ {AAI_DMACFG_CA_RX(7), 0x0},
+ {AAI_DMACFG_CA_RX(8), 0x0},
+ {AAI_DMACFG_CA_RX(9), 0x0},
+ {AAI_DMACFG_CA_RX(10), 0x0},
+ {AAI_DMACFG_CA_RX(11), 0x0},
+ {AAI_DMACFG_CA_RX(12), 0x0},
+ {AAI_DMACFG_CA_RX(13), 0x0},
+ {AAI_DMACFG_CA_RX(14), 0x0},
+ {AAI_DMACFG_CA_RX(15), 0x0},
+ {AAI_DMACFG_CA_RX(16), 0x0},
+ {AAI_DMACFG_CA_RX(17), 0x0},
+
+ {AAI_DMACFG_CA_TX(0), 0x0},
+ {AAI_DMACFG_CA_TX(1), 0x0},
+ {AAI_DMACFG_CA_TX(2), 0x0},
+ {AAI_DMACFG_CA_TX(3), 0x0},
+ {AAI_DMACFG_CA_TX(4), 0x0},
+ {AAI_DMACFG_CA_TX(5), 0x0},
+ {AAI_DMACFG_CA_TX(6), 0x0},
+ {AAI_DMACFG_CA_TX(7), 0x0},
+ {AAI_DMACFG_CA_TX(8), 0x0},
+ {AAI_DMACFG_CA_TX(9), 0x0},
+ {AAI_DMACFG_CA_TX(10), 0x0},
+
+ {AAI_DMACFG_NA_RX(0), 0x0},
+ {AAI_DMACFG_NA_RX(1), 0x0},
+ {AAI_DMACFG_NA_RX(2), 0x0},
+ {AAI_DMACFG_NA_RX(3), 0x0},
+ {AAI_DMACFG_NA_RX(4), 0x0},
+ {AAI_DMACFG_NA_RX(5), 0x0},
+ {AAI_DMACFG_NA_RX(6), 0x0},
+ {AAI_DMACFG_NA_RX(7), 0x0},
+ {AAI_DMACFG_NA_RX(8), 0x0},
+ {AAI_DMACFG_NA_RX(9), 0x0},
+ {AAI_DMACFG_NA_RX(10), 0x0},
+ {AAI_DMACFG_NA_RX(11), 0x0},
+ {AAI_DMACFG_NA_RX(12), 0x0},
+ {AAI_DMACFG_NA_RX(13), 0x0},
+ {AAI_DMACFG_NA_RX(14), 0x0},
+ {AAI_DMACFG_NA_RX(15), 0x0},
+ {AAI_DMACFG_NA_RX(16), 0x0},
+ {AAI_DMACFG_NA_RX(17), 0x0},
+
+ {AAI_DMACFG_NA_TX(0), 0x0},
+ {AAI_DMACFG_NA_TX(1), 0x0},
+ {AAI_DMACFG_NA_TX(2), 0x0},
+ {AAI_DMACFG_NA_TX(3), 0x0},
+ {AAI_DMACFG_NA_TX(4), 0x0},
+ {AAI_DMACFG_NA_TX(5), 0x0},
+ {AAI_DMACFG_NA_TX(6), 0x0},
+ {AAI_DMACFG_NA_TX(7), 0x0},
+ {AAI_DMACFG_NA_TX(8), 0x0},
+ {AAI_DMACFG_NA_TX(9), 0x0},
+ {AAI_DMACFG_NA_TX(10), 0x0},
+
+ /* DMA counters reset to 0 */
+ {AAI_DMACFG_CC_RX(0), 0x0},
+ {AAI_DMACFG_CC_RX(1), 0x0},
+ {AAI_DMACFG_CC_RX(2), 0x0},
+ {AAI_DMACFG_CC_RX(3), 0x0},
+ {AAI_DMACFG_CC_RX(4), 0x0},
+ {AAI_DMACFG_CC_RX(5), 0x0},
+ {AAI_DMACFG_CC_RX(6), 0x0},
+ {AAI_DMACFG_CC_RX(7), 0x0},
+ {AAI_DMACFG_CC_RX(8), 0x0},
+ {AAI_DMACFG_CC_RX(9), 0x0},
+ {AAI_DMACFG_CC_RX(10), 0x0},
+ {AAI_DMACFG_CC_RX(11), 0x0},
+ {AAI_DMACFG_CC_RX(12), 0x0},
+ {AAI_DMACFG_CC_RX(13), 0x0},
+ {AAI_DMACFG_CC_RX(14), 0x0},
+ {AAI_DMACFG_CC_RX(15), 0x0},
+ {AAI_DMACFG_CC_RX(16), 0x0},
+ {AAI_DMACFG_CC_RX(17), 0x0},
+
+ {AAI_DMACFG_CC_TX(0), 0x0},
+ {AAI_DMACFG_CC_TX(1), 0x0},
+ {AAI_DMACFG_CC_TX(2), 0x0},
+ {AAI_DMACFG_CC_TX(3), 0x0},
+ {AAI_DMACFG_CC_TX(4), 0x0},
+ {AAI_DMACFG_CC_TX(5), 0x0},
+ {AAI_DMACFG_CC_TX(6), 0x0},
+ {AAI_DMACFG_CC_TX(7), 0x0},
+ {AAI_DMACFG_CC_TX(8), 0x0},
+ {AAI_DMACFG_CC_TX(9), 0x0},
+ {AAI_DMACFG_CC_TX(10), 0x0},
+
+ {AAI_DMACFG_NC_RX(0), 0x0},
+ {AAI_DMACFG_NC_RX(1), 0x0},
+ {AAI_DMACFG_NC_RX(2), 0x0},
+ {AAI_DMACFG_NC_RX(3), 0x0},
+ {AAI_DMACFG_NC_RX(4), 0x0},
+ {AAI_DMACFG_NC_RX(5), 0x0},
+ {AAI_DMACFG_NC_RX(6), 0x0},
+ {AAI_DMACFG_NC_RX(7), 0x0},
+ {AAI_DMACFG_NC_RX(7), 0x0},
+ {AAI_DMACFG_NC_RX(9), 0x0},
+ {AAI_DMACFG_NC_RX(10), 0x0},
+ {AAI_DMACFG_NC_RX(11), 0x0},
+ {AAI_DMACFG_NC_RX(12), 0x0},
+ {AAI_DMACFG_NC_RX(13), 0x0},
+ {AAI_DMACFG_NC_RX(14), 0x0},
+ {AAI_DMACFG_NC_RX(15), 0x0},
+ {AAI_DMACFG_NC_RX(16), 0x0},
+ {AAI_DMACFG_NC_RX(17), 0x0},
+
+ {AAI_DMACFG_NC_TX(0), 0x0},
+ {AAI_DMACFG_NC_TX(1), 0x0},
+ {AAI_DMACFG_NC_TX(2), 0x0},
+ {AAI_DMACFG_NC_TX(3), 0x0},
+ {AAI_DMACFG_NC_TX(4), 0x0},
+ {AAI_DMACFG_NC_TX(5), 0x0},
+ {AAI_DMACFG_NC_TX(6), 0x0},
+ {AAI_DMACFG_NC_TX(7), 0x0},
+ {AAI_DMACFG_NC_TX(8), 0x0},
+ {AAI_DMACFG_NC_TX(9), 0x0},
+ {AAI_DMACFG_NC_TX(10), 0x0},
+
+ /*
+ * FIFO configuration at reset:
+ * - chan_num = -1
+ * - fifo_depth = 0
+ * - fifo_start = 0
+ */
+ {AAI_FIFOCFG_CFG_RX(0), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(1), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(2), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(3), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(4), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(5), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(6), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(7), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(8), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(9), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(10), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(11), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(12), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(13), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(14), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(15), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(16), 0x3f000000},
+ {AAI_FIFOCFG_CFG_RX(17), 0x3f000000},
+
+ {AAI_FIFOCFG_CFG_TX(0), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(1), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(2), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(3), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(4), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(5), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(6), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(7), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(8), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(9), 0x1f000000},
+ {AAI_FIFOCFG_CFG_TX(10), 0x1f000000},
+
+ /* Default FIFO mode is 32 */
+ {AAI_FIFOCFG_RX_16BIT, 0x0},
+ {AAI_FIFOCFG_TX_16BIT, 0x0},
+
+ /* Default FIFO16 endianness is LE */
+ {AAI_FIFOCFG_RX_ENDIAN, 0x0},
+ {AAI_FIFOCFG_TX_ENDIAN, 0x0},
+};
+
+void aai_reset_regs(struct card_data_t *aai)
+{
+ int i;
+ struct aai_reset_reg_t *reg;
+
+ for (i = 0;
+ i < sizeof(reset_regs) / sizeof(struct aai_reset_reg_t);
+ i++) {
+ reg = &reset_regs[i];
+ aai_writereg(aai, reg->reset, reg->addr, 0);
+ }
+}
+
diff --git a/drivers/parrot/sound/p7_aai/aai_regs.h b/drivers/parrot/sound/p7_aai/aai_regs.h
new file mode 100644
index 00000000000000..75f0ff9e8baa9f
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_regs.h
@@ -0,0 +1,33 @@
+
+#ifndef _AAI_REGS_H_
+#define _AAI_REGS_H_
+
+#ifdef CONFIG_SND_AAI_DEBUG
+uint32_t aai_reg_addr(int index);
+const char *aai_reg_name(int index);
+void aai_dump_regs(struct card_data_t *aai, uint32_t blocks);
+int aai_reg_canread(int index);
+int aai_reg_canwrite(int index);
+static inline int aai_reg_canreadback(int index)
+{
+ return aai_reg_canread(index);
+}
+
+#else
+
+static inline uint32_t aai_reg_addr(int index) { return index; }
+static inline const char *aai_reg_name(int index)
+{
+ return "noname";
+}
+
+static inline void aai_dump_regs(struct card_data_t *aai, uint32_t blocks) {}
+static inline int aai_reg_canread(int index) { return 1; }
+static inline int aai_reg_canreadback(int index) { return 0; }
+static inline int aai_reg_canwrite(int index) { return 1; }
+#endif
+
+void aai_reset_regs(struct card_data_t *aai);
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/aai_rules.c b/drivers/parrot/sound/p7_aai/aai_rules.c
new file mode 100644
index 00000000000000..85f1e252fbfccd
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_rules.c
@@ -0,0 +1,958 @@
+
+#include "aai.h"
+#include "aai_hw.h"
+#include "aai_rules.h"
+#include "aai_clk.h"
+
+struct aai_rule_t {
+ int id;
+ uint32_t addr;
+ uint32_t shift;
+ uint32_t mask;
+ uint32_t value;
+ int excl[NB_EXCL_RULES];
+ int active;
+ /* callback to start clock */
+ int (*apply)(struct card_data_t *aai);
+ /* callback to stop clock */
+ int (*remove)(struct card_data_t *aai);
+};
+
+/*
+ * Callbacks to enable/ disable clocks
+ */
+static int enable_pcm1(struct card_data_t *aai)
+{
+ unsigned int err;
+
+ err = clk_prepare_enable(aai->clk[pcm1_clk]);
+ if (err) {
+ dev_err(aai->dev, "failed to enable clock.\n");
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+static int disable_pcm1(struct card_data_t *aai)
+{
+ aai_release_clk(aai, pcm1_clk);
+ return 0;
+}
+
+static int enable_pcm2(struct card_data_t *aai)
+{
+ unsigned int err;
+ err = clk_prepare_enable(aai->clk[pcm2_clk]);
+ if (err) {
+ dev_err(aai->dev, "failed to enable clock.\n");
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+static int disable_pcm2(struct card_data_t *aai)
+{
+ aai_release_clk(aai, pcm2_clk);
+ return 0;
+}
+
+static int enable_i2s(struct card_data_t *aai)
+{
+ /* aai_set_clk(aai, i2s_clk, AAI_NOMINAL_RATE); */
+ return 0;
+}
+
+/*static int disable_i2s(struct card_data_t *aai)
+{
+ aai_release_clk(aai, i2s_clk);
+ return 0;
+}*/
+
+static int enable_spdif(struct card_data_t *aai)
+{
+ unsigned int err;
+ err = clk_prepare_enable(aai->clk[spdif_clk]);
+ if (err) {
+ dev_err(aai->dev, "failed to enable spdif clock.\n");
+ return -EINVAL;
+ }
+
+ /* XXX release reset */
+ /*aai_writereg(aai, 0, AAI_GBC_RESET, 0);*/
+
+ return 0;
+}
+
+static int disable_spdif(struct card_data_t *aai)
+{
+ aai_release_clk(aai, spdif_clk);
+ return 0;
+}
+
+static int enable_tdm(struct card_data_t *aai)
+{
+ uint32_t cfg;
+
+ /*
+ * By default tdm is quad
+ * the rule set if quad or octo
+ * this generic function is appliable for both case
+ * tdm in and tdm out will the same configuration
+ */
+ cfg = aai_readreg(aai, AAI_AAICFG_TDM_IN) & 0x17f;
+ cfg |= 1 << 31;
+
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_IN, 0);
+ cfg |= (aai_readreg(aai, AAI_AAICFG_TDM_OUT) & 0xfffffe80);
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_OUT, 0);
+ return 0;
+}
+
+static int disable_tdm(struct card_data_t *aai)
+{
+ uint32_t cfg;
+ cfg = 0xeffffff;
+ cfg &= aai_readreg(aai, AAI_AAICFG_TDM_IN);
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_IN, 0);
+
+ cfg = 0xeffffff;
+ cfg &= aai_readreg(aai, AAI_AAICFG_TDM_OUT);
+ aai_writereg(aai, cfg, AAI_AAICFG_TDM_OUT, 0);
+ return 0;
+}
+
+static struct aai_rule_t aai_rules[] = {
+ [RULE_VOICE_SPEAKER_8K] = {
+ .id = RULE_VOICE_SPEAKER_8K,
+ .addr = AAI_AAICFG_VOICE_8K,
+ .shift = 1,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICE_SPEAKER_16K, RULE_END},
+ },
+
+ [RULE_VOICE_SPEAKER_16K] = {
+ .id = RULE_VOICE_SPEAKER_16K,
+ .addr = AAI_AAICFG_VOICE_8K,
+ .shift = 1,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICE_SPEAKER_8K, RULE_END},
+ },
+
+ [RULE_VOICE_PHONE_8K] = {
+ .id = RULE_VOICE_PHONE_8K,
+ .addr = AAI_AAICFG_VOICE_8K,
+ .shift = 0,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICE_PHONE_16K, RULE_END},
+ },
+
+ [RULE_VOICE_PHONE_16K] = {
+ .id = RULE_VOICE_PHONE_16K,
+ .addr = AAI_AAICFG_VOICE_8K,
+ .shift = 0,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICE_PHONE_8K, RULE_END},
+ },
+
+ [RULE_LOOPBACK_0] = {
+ .id = RULE_LOOPBACK_0,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 2,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK_1] = {
+ .id = RULE_LOOPBACK_1,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 10,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK_2] = {
+ .id = RULE_LOOPBACK_2,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 18,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK_3] = {
+ .id = RULE_LOOPBACK_3,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 26,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK0_I2S0] = {
+ .id = RULE_LOOPBACK0_I2S0,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 0,
+ .mask = 3,
+ .value = 0,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK0_I2S1] = {
+ .id = RULE_LOOPBACK0_I2S1,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 0,
+ .mask = 3,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK0_I2S2] = {
+ .id = RULE_LOOPBACK0_I2S2,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 0,
+ .mask = 3,
+ .value = 2,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK0_I2S3] = {
+ .id = RULE_LOOPBACK0_I2S3,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 0,
+ .mask = 3,
+ .value = 3,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK1_I2S0] = {
+ .id = RULE_LOOPBACK1_I2S0,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 8,
+ .mask = 3,
+ .value = 0,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK1_I2S1] = {
+ .id = RULE_LOOPBACK1_I2S1,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 8,
+ .mask = 3,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK1_I2S2] = {
+ .id = RULE_LOOPBACK1_I2S2,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 8,
+ .mask = 3,
+ .value = 2,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK1_I2S3] = {
+ .id = RULE_LOOPBACK1_I2S3,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 8,
+ .mask = 3,
+ .value = 3,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK2_I2S0] = {
+ .id = RULE_LOOPBACK2_I2S0,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 16,
+ .mask = 3,
+ .value = 0,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK2_I2S1] = {
+ .id = RULE_LOOPBACK2_I2S1,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 16,
+ .mask = 3,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK2_I2S2] = {
+ .id = RULE_LOOPBACK2_I2S2,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 16,
+ .mask = 3,
+ .value = 2,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK2_I2S3] = {
+ .id = RULE_LOOPBACK2_I2S3,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 16,
+ .mask = 3,
+ .value = 3,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK3_I2S0] = {
+ .id = RULE_LOOPBACK3_I2S0,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 24,
+ .mask = 3,
+ .value = 0,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK3_I2S1] = {
+ .id = RULE_LOOPBACK3_I2S1,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 24,
+ .mask = 3,
+ .value = 1,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK3_I2S2] = {
+ .id = RULE_LOOPBACK3_I2S2,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 24,
+ .mask = 3,
+ .value = 2,
+ .excl = {RULE_END},
+ },
+
+ [RULE_LOOPBACK3_I2S3] = {
+ .id = RULE_LOOPBACK3_I2S3,
+ .addr = AAI_AAICFG_MIC_MUX,
+ .shift = 24,
+ .mask = 3,
+ .value = 3,
+ .excl = {RULE_END},
+ },
+
+ [RULE_VOICEL0_INPUT_8K] = {
+ .id = RULE_VOICEL0_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(0),
+ .shift = 5,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICEL0_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICER0_INPUT_8K] = {
+ .id = RULE_VOICER0_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(0),
+ .shift = 13,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICER0_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICEL0_INPUT_16K] = {
+ .id = RULE_VOICEL0_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(0),
+ .shift = 5,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICEL0_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICER0_INPUT_16K] = {
+ .id = RULE_VOICER0_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(0),
+ .shift = 13,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICER0_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICEL1_INPUT_8K] = {
+ .id = RULE_VOICEL1_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(1),
+ .shift = 5,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICEL1_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICER1_INPUT_8K] = {
+ .id = RULE_VOICER1_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(1),
+ .shift = 13,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICER1_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICEL1_INPUT_16K] = {
+ .id = RULE_VOICEL1_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(1),
+ .shift = 5,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICEL1_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICER1_INPUT_16K] = {
+ .id = RULE_VOICER1_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(1),
+ .shift = 13,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICER1_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICEL2_INPUT_8K] = {
+ .id = RULE_VOICEL2_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(2),
+ .shift = 5,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICEL2_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICER2_INPUT_8K] = {
+ .id = RULE_VOICER2_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(2),
+ .shift = 13,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICER2_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICEL2_INPUT_16K] = {
+ .id = RULE_VOICEL2_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(2),
+ .shift = 5,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICEL2_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICER2_INPUT_16K] = {
+ .id = RULE_VOICER2_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(2),
+ .shift = 13,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICER2_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICEL3_INPUT_8K] = {
+ .id = RULE_VOICEL3_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(3),
+ .shift = 5,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICEL3_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICER3_INPUT_8K] = {
+ .id = RULE_VOICER3_INPUT_8K,
+ .addr = AAI_AAICFG_VOICE(3),
+ .shift = 13,
+ .mask = 1,
+ .value = 1,
+ .excl = {RULE_VOICER3_INPUT_16K, RULE_END},
+ },
+
+ [RULE_VOICEL3_INPUT_16K] = {
+ .id = RULE_VOICEL3_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(3),
+ .shift = 5,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICEL3_INPUT_8K, RULE_END},
+ },
+
+ [RULE_VOICER3_INPUT_16K] = {
+ .id = RULE_VOICER3_INPUT_16K,
+ .addr = AAI_AAICFG_VOICE(3),
+ .shift = 13,
+ .mask = 1,
+ .value = 0,
+ .excl = {RULE_VOICER3_INPUT_8K, RULE_END},
+ },
+
+ [RULE_PCM0_MASTER] = {
+ .id = RULE_PCM0_MASTER,
+ .apply = &enable_pcm1,
+ .remove = &disable_pcm1,
+ },
+
+ [RULE_PCM1_MASTER] = {
+ .id = RULE_PCM1_MASTER,
+ .apply = &enable_pcm2,
+ .remove = &disable_pcm2,
+ },
+
+ [RULE_PCM0_8K] = {
+ .id = RULE_PCM0_8K,
+ .addr = AAI_GBC_P1FRAME,
+ .shift = 0,
+ .mask = 0xff,
+ .value = 0x1e,
+ .excl = {RULE_PCM0_16K, RULE_END},
+ },
+
+ [RULE_PCM0_16K] = {
+ .id = RULE_PCM0_16K,
+ .addr = AAI_GBC_P1FRAME,
+ .shift = 0,
+ .mask = 0xff,
+ .value = 0x0e,
+ .excl = {RULE_PCM0_8K, RULE_END},
+ },
+
+ [RULE_PCM1_8K] = {
+ .id = RULE_PCM1_8K,
+ .addr = AAI_GBC_P2FRAME,
+ .shift = 0,
+ .mask = 0xff,
+ .value = 0x1e,
+ .apply = &enable_pcm2,
+ .remove = &disable_pcm2,
+ .excl = {RULE_PCM1_16K, RULE_END},
+ },
+
+ [RULE_PCM1_16K] = {
+ .id = RULE_PCM1_16K,
+ .addr = AAI_GBC_P2FRAME,
+ .shift = 0,
+ .mask = 0x1e,
+ .value = 0x0e,
+ .apply = &enable_pcm2,
+ .remove = &disable_pcm2,
+ .excl = {RULE_PCM1_8K, RULE_END},
+ },
+
+ [RULE_I2S_96K] = {
+ .id = RULE_I2S_96K,
+ .addr = AAI_GBC_MCLK,
+ .shift = 0,
+ .mask = 0xff,
+ .value = 0x0e,
+ .apply = &enable_i2s,
+ .excl = {RULE_I2S_48K, RULE_END},
+ },
+
+ [RULE_I2S_48K] = {
+ .id = RULE_I2S_48K,
+ .addr = AAI_GBC_MCLK,
+ .shift = 0,
+ .mask = 0xff,
+ .value = 0x1e,
+ .apply = &enable_i2s,
+ .excl = {RULE_I2S_96K, RULE_END},
+ },
+
+ [RULE_TDM_OUT_OCTO] = {
+ .id = RULE_TDM_OUT_OCTO,
+ .addr = AAI_AAICFG_TDM_IN,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x40,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN_OCTO] = {
+ .id = RULE_TDM_IN_OCTO,
+ .addr = AAI_AAICFG_TDM_IN,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x40,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_OUT_QUAD] = {
+ .id = RULE_TDM_OUT_QUAD,
+ .addr = AAI_AAICFG_TDM_OUT,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x20,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN_QUAD] = {
+ .id = RULE_TDM_IN_QUAD,
+ .addr = AAI_AAICFG_TDM_IN,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x20,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_OUT_ST] = {
+ .id = RULE_TDM_OUT_QUAD,
+ .addr = AAI_AAICFG_TDM_OUT,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x10,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN_ST] = {
+ .id = RULE_TDM_IN_QUAD,
+ .addr = AAI_AAICFG_TDM_IN,
+ .shift = 0,
+ .mask = 0x7f,
+ .value = 0x10,
+ .apply = &enable_tdm,
+ .remove = &disable_tdm,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN0] = {
+ .id = RULE_TDM_IN0,
+ .addr = AAI_AAICFG_MUS(0),
+ .shift = 12,
+ .mask = 0x7,
+ .value = 0x4,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN1] = {
+ .id = RULE_TDM_IN1,
+ .addr = AAI_AAICFG_MUS(1),
+ .shift = 12,
+ .mask = 0x7,
+ .value = 0x5,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN2] = {
+ .id = RULE_TDM_IN2,
+ .addr = AAI_AAICFG_MUS(2),
+ .shift = 12,
+ .mask = 0x7,
+ .value = 0x6,
+ .excl = {RULE_END},
+ },
+
+ [RULE_TDM_IN3] = {
+ .id = RULE_TDM_IN3,
+ .addr = AAI_AAICFG_MUS(3),
+ .shift = 12,
+ .mask = 0x7,
+ .value = 0x7,
+ .excl = {RULE_END},
+ },
+
+ [RULE_SPDIF_OUT] = {
+ .id = RULE_SPDIF_OUT,
+ .addr = AAI_AAICFG_SPDIFTX,
+ .shift = 4,
+ .mask = 1,
+ .value = 1,
+ .apply = &enable_spdif,
+ .remove = &disable_spdif,
+ .excl = {RULE_END},
+ },
+
+ [RULE_SPDIF_IN0] = {
+ .id = RULE_SPDIF_IN0,
+ .addr = AAI_AAICFG_MUS(0),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0x7,
+ .apply = &enable_spdif,
+ .remove = &disable_spdif,
+ .excl = {RULE_I2S_IN0, RULE_END},
+ },
+
+ [RULE_SPDIF_IN1] = {
+ .id = RULE_SPDIF_IN1,
+ .addr = AAI_AAICFG_MUS(1),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0x7,
+ .apply = &enable_spdif,
+ .remove = &disable_spdif,
+ .excl = {RULE_I2S_IN1, RULE_END},
+ },
+
+ [RULE_SPDIF_IN2] = {
+ .id = RULE_SPDIF_IN2,
+ .addr = AAI_AAICFG_MUS(2),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0x7,
+ .apply = &enable_spdif,
+ .remove = &disable_spdif,
+ .excl = {RULE_I2S_IN2, RULE_END},
+ },
+
+ [RULE_SPDIF_IN3] = {
+ .id = RULE_SPDIF_IN3,
+ .addr = AAI_AAICFG_MUS(3),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0x7,
+ .apply = &enable_spdif,
+ .remove = &disable_spdif,
+ .excl = {RULE_I2S_IN3, RULE_END},
+ },
+
+ [RULE_I2S_IN0] = {
+ .id = RULE_I2S_IN0,
+ .addr = AAI_AAICFG_MUS(0),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0,
+ .excl = {RULE_SPDIF_IN0, RULE_END},
+ },
+
+ [RULE_I2S_IN1] = {
+ .id = RULE_I2S_IN1,
+ .addr = AAI_AAICFG_MUS(1),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0,
+ .excl = {RULE_SPDIF_IN1, RULE_END},
+ },
+
+ [RULE_I2S_IN2] = {
+ .id = RULE_I2S_IN2,
+ .addr = AAI_AAICFG_MUS(2),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0,
+ .excl = {RULE_SPDIF_IN2, RULE_END},
+ },
+
+ [RULE_I2S_IN3] = {
+ .id = RULE_I2S_IN3,
+ .addr = AAI_AAICFG_MUS(3),
+ .shift = 16,
+ .mask = 0x7,
+ .value = 0,
+ .excl = {RULE_SPDIF_IN3, RULE_END},
+ },
+
+ [RULE_END] = {
+ .id = RULE_END,
+ }
+};
+
+static int aai_rule_is_active(struct card_data_t *aai, int ruleid)
+{
+ struct aai_rule_t *rule = &aai_rules[ruleid];
+ return rule->active;
+}
+
+int aai_rule_apply(struct card_data_t *aai, int ruleid)
+{
+ int err = 0;
+ uint32_t reg;
+ int id;
+ int apply = 1;
+ struct aai_rule_t *excl_rule = NULL;
+ struct aai_rule_t *rule = &aai_rules[ruleid];
+ unsigned long flags = 0;
+
+ if (ruleid >= RULE_END)
+ goto exit;
+
+ if (aai_rule_is_active(aai, ruleid))
+ goto exit;
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ /* check exclusion rules */
+ for (id = 0; id < NB_EXCL_RULES; id++) {
+ excl_rule = &aai_rules[rule->excl[id]];
+
+ if (excl_rule->id == RULE_END)
+ break;
+
+ if (excl_rule->active) {
+ apply = 0;
+ break;
+ }
+ }
+
+ /* apply rule */
+ if (apply) {
+ if (rule->addr) {
+ reg = aai_readreg(aai, rule->addr);
+ reg &= ~(rule->mask << rule->shift);
+ reg |= (rule->value & rule->mask) << rule->shift;
+ aai_writereg(aai, reg, rule->addr, 0);
+ }
+
+ /* mark rule as active */
+ rule->active = 1;
+ }
+
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+
+exit:
+ if (apply) {
+ /* if a callback is declared, call it */
+ if (rule->apply)
+ rule->apply(aai);
+ }
+
+ return err;
+};
+
+int aai_rule_remove(struct card_data_t *aai, int ruleid)
+{
+ int err = 0;
+ struct aai_rule_t *rule = &aai_rules[ruleid];
+ unsigned long flags = 0;
+
+ if (!aai_rule_is_active(aai, ruleid))
+ goto exit;
+
+ spin_lock_irqsave(&aai->hwlock, flags);
+
+ /* mark rule as inactive, do not change register values */
+ rule->active = 0;
+
+ spin_unlock_irqrestore(&aai->hwlock, flags);
+
+exit:
+ return err;
+}
+
+int aai_hw_params_voice(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret;
+ int rate = params_rate(hw_params);
+
+ if (rate == 8000)
+ ret = aai_rule_apply(aai, RULE_VOICE_SPEAKER_8K);
+ else if (rate == 16000)
+ ret = aai_rule_apply(aai, RULE_VOICE_SPEAKER_16K);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int aai_close_voice(struct card_data_t *aai,
+ struct aai_device_t *chan)
+{
+ int ret = 0;
+
+ if (aai_rule_is_active(aai, RULE_VOICE_SPEAKER_8K))
+ ret |= aai_rule_remove(aai, RULE_VOICE_SPEAKER_8K);
+
+ if (aai_rule_is_active(aai, RULE_VOICE_SPEAKER_16K))
+ ret |= aai_rule_remove(aai, RULE_VOICE_SPEAKER_16K);
+
+ return ret;
+}
+
+int aai_hw_params_pcm0(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret;
+ int rate = params_rate(hw_params);
+
+ if (rate == 8000) {
+ ret = aai_rule_apply(aai, RULE_PCM0_8K);
+ ret |= aai_rule_apply(aai, RULE_VOICE_PHONE_8K);
+ } else if (rate == 16000) {
+ ret = aai_rule_apply(aai, RULE_PCM0_16K);
+ ret |= aai_rule_apply(aai, RULE_VOICE_PHONE_16K);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aai_hw_params_pcm1(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int ret;
+ int rate = params_rate(hw_params);
+ uint32_t reg;
+
+ /* set rate */
+ if (rate == 8000) {
+ ret = aai_rule_apply(aai, RULE_PCM1_8K);
+ ret |= aai_rule_apply(aai, RULE_VOICE_PHONE_8K);
+ } else if (rate == 16000) {
+ ret = aai_rule_apply(aai, RULE_PCM1_16K);
+ ret |= aai_rule_apply(aai, RULE_VOICE_PHONE_16K);
+ } else {
+ ret = -EINVAL;
+ }
+
+ /*
+ * pcm1 is on the same link than pcm0,
+ * run dual must be activated.
+ * start-two set to 0xf
+ */
+ reg = aai_readreg(aai, AAI_AAICFG_PCM(0));
+
+ reg |= 1 << 27 | 0xf;
+ aai_writereg(aai, reg, AAI_AAICFG_PCM(0), 0);
+
+ return ret;
+}
+
+int aai_close_pcm(struct card_data_t *aai,
+ struct aai_device_t *chan)
+{
+ int ret = 0;
+ uint32_t reg;
+
+ /* reset rate */
+ if (aai_rule_is_active(aai, RULE_PCM0_8K)) {
+ ret |= aai_rule_remove(aai, RULE_PCM0_8K);
+ ret |= aai_rule_remove(aai, RULE_VOICE_PHONE_8K);
+ }
+
+ if (aai_rule_is_active(aai, RULE_PCM0_16K)) {
+ ret |= aai_rule_remove(aai, RULE_PCM0_16K);
+ ret |= aai_rule_remove(aai, RULE_VOICE_PHONE_16K);
+ }
+
+ if (aai_rule_is_active(aai, RULE_PCM1_8K)) {
+ ret |= aai_rule_remove(aai, RULE_PCM1_8K);
+ ret |= aai_rule_remove(aai, RULE_VOICE_PHONE_8K);
+ }
+
+ if (aai_rule_is_active(aai, RULE_PCM1_16K)) {
+ ret |= aai_rule_remove(aai, RULE_PCM1_16K);
+ ret |= aai_rule_remove(aai, RULE_VOICE_PHONE_16K);
+ }
+
+ /* disable run dual mode & start-two*/
+ reg = aai_readreg(aai, AAI_AAICFG_PCM(0));
+ reg &= ~(1 << 27);
+ reg &= ~0xff;
+ aai_writereg(aai, reg, AAI_AAICFG_PCM(0), 0);
+
+ return ret;
+}
diff --git a/drivers/parrot/sound/p7_aai/aai_rules.h b/drivers/parrot/sound/p7_aai/aai_rules.h
new file mode 100644
index 00000000000000..cef7ff0a2d9cb5
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/aai_rules.h
@@ -0,0 +1,106 @@
+
+#ifndef _AAI_RULES_H_
+#define _AAI_RULES_H_
+
+#define NB_EXCL_RULES 5
+
+enum aai_rules_t {
+ RULE_VOICE_SPEAKER_8K,
+ RULE_VOICE_SPEAKER_16K,
+ RULE_VOICE_PHONE_8K,
+ RULE_VOICE_PHONE_16K,
+ RULE_LOOPBACK_0,
+ RULE_LOOPBACK_1,
+ RULE_LOOPBACK_2,
+ RULE_LOOPBACK_3,
+ RULE_LOOPBACK0_I2S0,
+ RULE_LOOPBACK0_I2S1,
+ RULE_LOOPBACK0_I2S2,
+ RULE_LOOPBACK0_I2S3,
+ RULE_LOOPBACK1_I2S0,
+ RULE_LOOPBACK1_I2S1,
+ RULE_LOOPBACK1_I2S2,
+ RULE_LOOPBACK1_I2S3,
+ RULE_LOOPBACK2_I2S0,
+ RULE_LOOPBACK2_I2S1,
+ RULE_LOOPBACK2_I2S2,
+ RULE_LOOPBACK2_I2S3,
+ RULE_LOOPBACK3_I2S0,
+ RULE_LOOPBACK3_I2S1,
+ RULE_LOOPBACK3_I2S2,
+ RULE_LOOPBACK3_I2S3,
+ RULE_VOICEL0_INPUT_8K,
+ RULE_VOICER0_INPUT_8K,
+ RULE_VOICEL0_INPUT_16K,
+ RULE_VOICER0_INPUT_16K,
+ RULE_VOICEL1_INPUT_8K,
+ RULE_VOICER1_INPUT_8K,
+ RULE_VOICEL1_INPUT_16K,
+ RULE_VOICER1_INPUT_16K,
+ RULE_VOICEL2_INPUT_8K,
+ RULE_VOICER2_INPUT_8K,
+ RULE_VOICEL2_INPUT_16K,
+ RULE_VOICER2_INPUT_16K,
+ RULE_VOICEL3_INPUT_8K,
+ RULE_VOICER3_INPUT_8K,
+ RULE_VOICEL3_INPUT_16K,
+ RULE_VOICER3_INPUT_16K,
+ RULE_PCM0_MASTER,
+ RULE_PCM1_MASTER,
+ RULE_PCM0_8K,
+ RULE_PCM0_16K,
+ RULE_PCM1_8K,
+ RULE_PCM1_16K,
+ RULE_I2S_48K,
+ RULE_I2S_96K,
+ RULE_SPDIF_OUT,
+ RULE_SPDIF_IN0,
+ RULE_SPDIF_IN1,
+ RULE_SPDIF_IN2,
+ RULE_SPDIF_IN3,
+ RULE_I2S_IN0,
+ RULE_I2S_IN1,
+ RULE_I2S_IN2,
+ RULE_I2S_IN3,
+ RULE_TDM_OUT_OCTO,
+ RULE_TDM_IN_OCTO,
+ RULE_TDM_OUT_QUAD,
+ RULE_TDM_IN_QUAD,
+ RULE_TDM_OUT_ST,
+ RULE_TDM_IN_ST,
+ RULE_TDM_IN0,
+ RULE_TDM_IN1,
+ RULE_TDM_IN2,
+ RULE_TDM_IN3,
+ RULE_END
+};
+
+int aai_rule_apply(struct card_data_t *aai, int ruleid);
+int aai_rule_remove(struct card_data_t *aai, int ruleid);
+
+int aai_hw_params_voice(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+int aai_close_voice(struct card_data_t *aai,
+ struct aai_device_t *chan);
+int aai_hw_params_mic0(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+int aai_close_mic0(struct card_data_t *aai,
+ struct aai_device_t *chan);
+int aai_hw_params_mic1(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+int aai_close_mic1(struct card_data_t *aai,
+ struct aai_device_t *chan);
+int aai_hw_params_pcm0(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+int aai_hw_params_pcm1(struct card_data_t *aai,
+ struct aai_device_t *chan,
+ struct snd_pcm_hw_params *hw_params);
+int aai_close_pcm(struct card_data_t *aai,
+ struct aai_device_t *chan);
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/reg_aai.h b/drivers/parrot/sound/p7_aai/reg_aai.h
new file mode 100644
index 00000000000000..5899d16949afa8
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/reg_aai.h
@@ -0,0 +1,693 @@
+/*-======================================================================-
+ * Parrot S.A. Confidential Property
+ *
+ * Project : Parrot 7
+ * File name : reg_aai.h
+ * Author : Marc Tamisier
+ *
+ * ----------------------------------------------------------------------
+ * Purpose : AAI registers definition
+ *
+ */
+#ifndef __REG_AAI_H__
+#define __REG_AAI_H__
+
+/*
+ * AAI sub-module OFFSET definition
+ */
+#define AAI_MAIN_OFFSET 0x0000
+#define AAI_AAICFG_OFFSET 0x1000
+#define AAI_FIFOCFG_OFFSET 0x2000
+#define AAI_DMACFG_OFFSET 0x3000
+#define AAI_FIFORX_OFFSET 0x6000
+#define AAI_FIFOTX_OFFSET 0x7000
+#define AAI_LBC_OFFSET 0xF000
+
+/*
+ * AAI registers address definition
+ */
+
+/*
+ * AAI MAIN sub-module registers address definition
+ */
+
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: IT */
+#define AAI_MAIN_IT (AAI_MAIN_OFFSET + 0x000)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: ITEN */
+#define AAI_MAIN_ITEN (AAI_MAIN_OFFSET + 0x004)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: ITACK */
+#define AAI_MAIN_ITACK (AAI_MAIN_OFFSET + 0x008)
+
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_OK_IT */
+#define AAI_MAIN_RX_OK_IT (AAI_MAIN_OFFSET + 0x010)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_OK_ITACK */
+#define AAI_MAIN_RX_OK_ITACK (AAI_MAIN_OFFSET + 0x014)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_OK_IT */
+#define AAI_MAIN_TX_OK_IT (AAI_MAIN_OFFSET + 0x018)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_OK_ITACK */
+#define AAI_MAIN_TX_OK_ITACK (AAI_MAIN_OFFSET + 0x01C)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_ERR_IT */
+#define AAI_MAIN_RX_ERR_IT (AAI_MAIN_OFFSET + 0x020)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_ERR_ITACK */
+#define AAI_MAIN_RX_ERR_ITACK (AAI_MAIN_OFFSET + 0x024)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_ERR_IT */
+#define AAI_MAIN_TX_ERR_IT (AAI_MAIN_OFFSET + 0x028)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_ERR_ITACK */
+#define AAI_MAIN_TX_ERR_ITACK (AAI_MAIN_OFFSET + 0x02C)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_DMA_IT */
+#define AAI_MAIN_RX_DMA_IT (AAI_MAIN_OFFSET + 0x030)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: RX_DMA_ITACK */
+#define AAI_MAIN_RX_DMA_ITACK (AAI_MAIN_OFFSET + 0x034)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_DMA_IT */
+#define AAI_MAIN_TX_DMA_IT (AAI_MAIN_OFFSET + 0x038)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TX_DMA_ITACK */
+#define AAI_MAIN_TX_DMA_ITACK (AAI_MAIN_OFFSET + 0x03C)
+
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: IT_GROUPS_Rx (0-2) */
+#define AAI_MAIN_IT_GROUPS_RX(_id_) \
+ (AAI_MAIN_OFFSET + (0x040 + 0x4*(_id_)))
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: IT_GROUPS_Tx (0-1) */
+#define AAI_MAIN_IT_GROUPS_TX(_id_) \
+ (AAI_MAIN_OFFSET + (0x050 + 0x4*(_id_)))
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: IT_GROUPS_START */
+#define AAI_MAIN_IT_GROUPS_START (AAI_MAIN_OFFSET + 0x060)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: IT_GROUPS_STOP */
+#define AAI_MAIN_IT_GROUPS_STOP (AAI_MAIN_OFFSET + 0x064)
+
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: DMA_EN_RX */
+#define AAI_MAIN_DMA_EN_RX (AAI_MAIN_OFFSET + 0x080)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: DMA_EN_TX */
+#define AAI_MAIN_DMA_EN_TX (AAI_MAIN_OFFSET + 0x084)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: FIFO_EN_RX */
+#define AAI_MAIN_FIFO_EN_RX (AAI_MAIN_OFFSET + 0x088)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: FIFO_EN_TX */
+#define AAI_MAIN_FIFO_EN_TX (AAI_MAIN_OFFSET + 0x08C)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: DMA_BUSY_RX */
+#define AAI_MAIN_DMA_BUSY_RX (AAI_MAIN_OFFSET + 0x090)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: DMA_BUSY_TX */
+#define AAI_MAIN_DMA_BUSY_TX (AAI_MAIN_OFFSET + 0x094)
+
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_IT */
+#define AAI_MAIN_TIMER_IT (AAI_MAIN_OFFSET + 0x100)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_ITACK */
+#define AAI_MAIN_TIMER_ITACK (AAI_MAIN_OFFSET + 0x104)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_EN */
+#define AAI_MAIN_TIMER_EN (AAI_MAIN_OFFSET + 0x108)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_MODE */
+#define AAI_MAIN_TIMER_MODE (AAI_MAIN_OFFSET + 0x10C)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_SCALE */
+#define AAI_MAIN_TIMER_SCALE (AAI_MAIN_OFFSET + 0x110)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_LOAD */
+#define AAI_MAIN_TIMER_LOAD (AAI_MAIN_OFFSET + 0x120)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_RESET */
+#define AAI_MAIN_TIMER_RESET (AAI_MAIN_OFFSET + 0x124)
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_SELECT */
+#define AAI_MAIN_TIMER_SELECT(timer_id) \
+ (AAI_MAIN_OFFSET + (0x140 + 0x4*(timer_id)))
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_SETVAL */
+#define AAI_MAIN_TIMER_SETVAL(timer_id) \
+ (AAI_MAIN_OFFSET + (0x180 + 0x4*(timer_id)))
+/* IP name: AAI ; IP sub-block name: MAIN ; register name: TIMER_GETVAL */
+#define AAI_MAIN_TIMER_GETVAL(timer_id) \
+ (AAI_MAIN_OFFSET + (0x1C0 + 0x4*(timer_id)))
+
+
+/*
+ * AAI Audio Channel Configuration sub-module registers address definition
+ */
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: MUS */
+#define AAI_AAICFG_MUS(mus_id) \
+ (AAI_AAICFG_OFFSET + (0x000 + 0x4*(mus_id)))
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: TDM_IN */
+#define AAI_AAICFG_TDM_IN (AAI_AAICFG_OFFSET + 0x020)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: TDM_OUT */
+#define AAI_AAICFG_TDM_OUT (AAI_AAICFG_OFFSET + 0x024)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: ADC */
+#define AAI_AAICFG_ADC (AAI_AAICFG_OFFSET + 0x040)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: DAC */
+#define AAI_AAICFG_DAC (AAI_AAICFG_OFFSET + 0x044)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: AUX */
+#define AAI_AAICFG_AUX (AAI_AAICFG_OFFSET + 0x048)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: MIC_MUX */
+#define AAI_AAICFG_MIC_MUX (AAI_AAICFG_OFFSET + 0x04C)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: VOI_MUX */
+#define AAI_AAICFG_VOI_MUX (AAI_AAICFG_OFFSET + 0x050)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: PCM */
+#define AAI_AAICFG_PCM(pcm_id) \
+ (AAI_AAICFG_OFFSET + (0x080 + 0x4 * (pcm_id)))
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SPDIFRX */
+#define AAI_AAICFG_SPDIFRX (AAI_AAICFG_OFFSET + 0x0C0)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SPDIFTX */
+#define AAI_AAICFG_SPDIFTX (AAI_AAICFG_OFFSET + 0x0C4)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: VOICE */
+#define AAI_AAICFG_VOICE(voice_id) \
+ (AAI_AAICFG_OFFSET + (0x100 + 0x4 * (voice_id)))
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: VOICE_SPEED */
+#define AAI_AAICFG_VOICE_SPEED (AAI_AAICFG_OFFSET + 0x120)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: VOICE_8K */
+#define AAI_AAICFG_VOICE_8K (AAI_AAICFG_OFFSET + 0x124)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: MUS_FILTER5 */
+#define AAI_AAICFG_MUS_FILTER5 (AAI_AAICFG_OFFSET + 0x140)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SRC_FILTER5 */
+#define AAI_AAICFG_SRC_FILTER5 (AAI_AAICFG_OFFSET + 0x180)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SRC_MODE */
+#define AAI_AAICFG_SRC_MODE (AAI_AAICFG_OFFSET + 0x184)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SRC_SPEED0 */
+#define AAI_AAICFG_SRC_SPEED0 (AAI_AAICFG_OFFSET + 0x188)
+/* IP name: AAI ; IP sub-block name: AAICFG ; register name: SRC_SPEED1 */
+#define AAI_AAICFG_SRC_SPEED1 (AAI_AAICFG_OFFSET + 0x18C)
+
+/*
+ * AAI DMA Configuration sub-module registers address definition
+ */
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CA_RX */
+#define AAI_DMACFG_CA_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x000 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CA_TX */
+#define AAI_DMACFG_CA_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x100 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NA_RX */
+#define AAI_DMACFG_NA_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x200 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NA_TX */
+#define AAI_DMACFG_NA_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x300 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CC_RX */
+#define AAI_DMACFG_CC_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x400 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CC_TX */
+#define AAI_DMACFG_CC_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x500 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NC_RX */
+#define AAI_DMACFG_NC_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x600 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NC_TX */
+#define AAI_DMACFG_NC_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x700 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CV_RX */
+#define AAI_DMACFG_CV_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x800 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: CV_TX */
+#define AAI_DMACFG_CV_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0x900 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NV_RX */
+#define AAI_DMACFG_NV_RX(rxfifo_id) \
+ (AAI_DMACFG_OFFSET + (0xa00 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: DMACFG ; register name: NV_TX */
+#define AAI_DMACFG_NV_TX(txfifo_id) \
+ (AAI_DMACFG_OFFSET + (0xb00 + 0x4 * (txfifo_id)))
+
+
+/*
+ * AAI FIFO Configuration sub-module registers address definition
+ */
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: CFG_RX */
+#define AAI_FIFOCFG_CFG_RX(rxfifo_id) \
+ (AAI_FIFOCFG_OFFSET + (0x000 + 0x4 * (rxfifo_id)))
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: CFG_TX */
+#define AAI_FIFOCFG_CFG_TX(txfifo_id) \
+ (AAI_FIFOCFG_OFFSET + (0x100 + 0x4 * (txfifo_id)))
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: SEL_RX */
+#define AAI_FIFOCFG_SEL_RX(rxchan_id) \
+ (AAI_FIFOCFG_OFFSET + (0x200 + 0x4 * (rxchan_id)))
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: SEL_TX */
+#define AAI_FIFOCFG_SEL_TX(txchan_id) \
+ (AAI_FIFOCFG_OFFSET + (0x300 + 0x4 * (txchan_id)))
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: RX_16BIT */
+#define AAI_FIFOCFG_RX_16BIT (AAI_FIFOCFG_OFFSET + 0x400)
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: TX_16BIT */
+#define AAI_FIFOCFG_TX_16BIT (AAI_FIFOCFG_OFFSET + 0x404)
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: RX_ENDIAN */
+#define AAI_FIFOCFG_RX_ENDIAN (AAI_FIFOCFG_OFFSET + 0x408)
+/* IP name: AAI ; IP sub-block name: FIFOCFG ; register name: TX_ENDIAN */
+#define AAI_FIFOCFG_TX_ENDIAN (AAI_FIFOCFG_OFFSET + 0x40C)
+
+
+/*
+ * AAI FIFO RX sub-module registers address definition
+ */
+/* IP name: AAI ; IP sub-block name: - ; register name: FIFORX */
+#define AAI_FIFORX(rxfifo_id) \
+ (AAI_FIFORX_OFFSET + (0x000 + 0x40 * (rxfifo_id)))
+
+/*
+ * AAI FIFO TX sub-module registers address definition
+ */
+/* IP name: AAI ; IP sub-block name: - ; register name: FIFOTX */
+#define AAI_FIFOTX(txfifo_id) \
+ (AAI_FIFOTX_OFFSET + (0x000 + 0x40 * (txfifo_id)))
+
+/*
+ * AAI registers bit offset definition
+ */
+
+/*
+ * AAI MAIN sub-module registers bit offset definition
+ */
+/* IP name: AAI ; register name: IT ; bits field name: RX_DONE */
+#define AAI_IT_RX_DONE_SHIFT 0
+#define AAI_IT_RX_DONE_WIDTH 1
+#define AAI_IT_RX_DONE_MSK \
+ ((uint32_t)((1 << AAI_IT_RX_DONE_WIDTH) - 1) << AAI_IT_RX_DONE_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: RX_FIFO_ERR */
+#define AAI_IT_RX_FIFO_ERR_SHIFT 1
+#define AAI_IT_RX_FIFO_ERR_WIDTH 1
+#define AAI_IT_RX_FIFO_ERR_MSK \
+ ((uint32_t)((1 << AAI_IT_RX_FIFO_ERR_WIDTH) - 1) << \
+ AAI_IT_RX_FIFO_ERR_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: RX_DMA_ERR */
+#define AAI_IT_RX_DMA_ERR_SHIFT 2
+#define AAI_IT_RX_DMA_ERR_WIDTH 1
+#define AAI_IT_RX_DMA_ERR_MSK \
+ ((uint32_t)((1 << AAI_IT_RX_DMA_ERR_WIDTH) - 1) << \
+ AAI_IT_RX_DMA_ERR_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: TX_DONE */
+#define AAI_IT_TX_DONE_SHIFT 8
+#define AAI_IT_TX_DONE_WIDTH 1
+#define AAI_IT_TX_DONE_MSK \
+ ((uint32_t)((1 << AAI_IT_TX_DONE_WIDTH) - 1) << AAI_IT_TX_DONE_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: TX_FIFO_ERR */
+#define AAI_IT_TX_FIFO_ERR_SHIFT 9
+#define AAI_IT_TX_FIFO_ERR_WIDTH 1
+#define AAI_IT_TX_FIFO_ERR_MSK \
+ ((uint32_t)((1 << AAI_IT_TX_FIFO_ERR_WIDTH) - 1) << \
+ AAI_IT_TX_FIFO_ERR_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: TX_DMA_ERR */
+#define AAI_IT_TX_DMA_ERR_SHIFT 10
+#define AAI_IT_TX_DMA_ERR_WIDTH 1
+#define AAI_IT_TX_DMA_ERR_MSK \
+ ((uint32_t)((1 << AAI_IT_TX_DMA_ERR_WIDTH) - 1) << \
+ AAI_IT_TX_DMA_ERR_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: TIMER_IT */
+#define AAI_IT_TIMER_IT_SHIFT 15
+#define AAI_IT_TIMER_IT_WIDTH 1
+#define AAI_IT_TIMER_IT_MSK \
+ ((uint32_t)((1 << AAI_IT_TIMER_IT_WIDTH) - 1) << AAI_IT_TIMER_IT_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: GROUP_ITEN */
+#define AAI_IT_GROUP_ITEN_SHIFT 16
+#define AAI_IT_GROUP_ITEN_WIDTH 15
+#define AAI_IT_GROUP_ITEN_MSK \
+ ((uint32_t)((1 << AAI_IT_GROUP_ITEN_WIDTH) - 1) << \
+ AAI_IT_GROUP_ITEN_SHIFT)
+/* IP name: AAI ; register name: IT ; bits field name: ONGOING_END_IT */
+#define AAI_IT_ONGOING_END_IT_SHIFT 31
+#define AAI_IT_ONGOING_END_IT_WIDTH 1
+#define AAI_IT_ONGOING_END_IT_MSK \
+ ((uint32_t)((1 << AAI_IT_ONGOING_END_IT_WIDTH) - 1) << \
+ AAI_IT_ONGOING_END_IT_SHIFT)
+
+/* IP name: AAI ; register name: ITEN ; bits field name: RX_DONE */
+#define AAI_ITEN_RX_DONE_SHIFT 0
+#define AAI_ITEN_RX_DONE_WIDTH 1
+#define AAI_ITEN_RX_DONE_MSK \
+ ((uint32_t)((1 << AAI_ITEN_RX_DONE_WIDTH) - 1) << \
+ AAI_ITEN_RX_DONE_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: RX_FIFO_ERR */
+#define AAI_ITEN_RX_FIFO_ERR_SHIFT 1
+#define AAI_ITEN_RX_FIFO_ERR_WIDTH 1
+#define AAI_ITEN_RX_FIFO_ERR_MSK \
+ ((uint32_t)((1 << AAI_ITEN_RX_FIFO_ERR_WIDTH) - 1) << \
+ AAI_ITEN_RX_FIFO_ERR_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: RX_DMA_ERR */
+#define AAI_ITEN_RX_DMA_ERR_SHIFT 2
+#define AAI_ITEN_RX_DMA_ERR_WIDTH 1
+#define AAI_ITEN_RX_DMA_ERR_MSK \
+ ((uint32_t)((1 << AAI_ITEN_RX_DMA_ERR_WIDTH) - 1) << \
+ AAI_ITEN_RX_DMA_ERR_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: TX_DONE */
+#define AAI_ITEN_TX_DONE_SHIFT 8
+#define AAI_ITEN_TX_DONE_WIDTH 1
+#define AAI_ITEN_TX_DONE_MSK \
+ ((uint32_t)((1 << AAI_ITEN_TX_DONE_WIDTH) - 1) << \
+ AAI_ITEN_TX_DONE_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: TX_FIFO_ERR */
+#define AAI_ITEN_TX_FIFO_ERR_SHIFT 9
+#define AAI_ITEN_TX_FIFO_ERR_WIDTH 1
+#define AAI_ITEN_TX_FIFO_ERR_MSK \
+ ((uint32_t)((1 << AAI_ITEN_TX_FIFO_ERR_WIDTH) - 1) << \
+ AAI_ITEN_TX_FIFO_ERR_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: TX_DMA_ERR */
+#define AAI_ITEN_TX_DMA_ERR_SHIFT 10
+#define AAI_ITEN_TX_DMA_ERR_WIDTH 1
+#define AAI_ITEN_TX_DMA_ERR_MSK \
+ ((uint32_t)((1 << AAI_ITEN_TX_DMA_ERR_WIDTH) - 1) << \
+ AAI_ITEN_TX_DMA_ERR_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: TIMER_IT */
+#define AAI_ITEN_TIMER_IT_SHIFT 15
+#define AAI_ITEN_TIMER_IT_WIDTH 1
+#define AAI_ITEN_TIMER_IT_MSK \
+ ((uint32_t)((1 << AAI_ITEN_TIMER_IT_WIDTH) - 1) << \
+ AAI_ITEN_TIMER_IT_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: GROUP_ITEN */
+#define AAI_ITEN_GROUP_ITEN_SHIFT 16
+#define AAI_ITEN_GROUP_ITEN_WIDTH 15
+#define AAI_ITEN_GROUP_ITEN_MSK \
+ ((uint32_t)((1 << AAI_ITEN_GROUP_ITEN_WIDTH) - 1) << \
+ AAI_ITEN_GROUP_ITEN_SHIFT)
+/* IP name: AAI ; register name: ITEN ; bits field name: ONGOING_END_IT */
+#define AAI_ITEN_ONGOING_END_IT_SHIFT 31
+#define AAI_ITEN_ONGOING_END_IT_WIDTH 1
+#define AAI_ITEN_ONGOING_END_IT_MSK \
+ ((uint32_t)((1 << AAI_ITEN_ONGOING_END_IT_WIDTH) - 1) << \
+ AAI_ITEN_ONGOING_END_IT_SHIFT)
+
+/*
+ * AAI Audio Channel Configuration sub-module registers bit offset definition
+ */
+/* IP name: AAI ; register name: MUSx ; bits field name: MUSx_CYCLES_NUM */
+#define AAI_MUSx_CYCLES_NUM_SHIFT 0
+#define AAI_MUSx_CYCLES_NUM_WIDTH 7
+#define AAI_MUSx_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_MUSx_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_MUSx_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: MATSUSHITA */
+#define AAI_MUSx_MATSUSHITA_SHIFT 7
+#define AAI_MUSx_MATSUSHITA_WIDTH 1
+#define AAI_MUSx_MATSUSHITA_MSK \
+ ((uint32_t)((1 << AAI_MUSx_MATSUSHITA_WIDTH) - 1) << \
+ AAI_MUSx_MATSUSHITA_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: LEFT_FRAME */
+#define AAI_MUSx_LEFT_FRAME_SHIFT 8
+#define AAI_MUSx_LEFT_FRAME_WIDTH 1
+#define AAI_MUSx_LEFT_FRAME_MSK \
+ ((uint32_t)((1 << AAI_MUSx_LEFT_FRAME_WIDTH) - 1) << \
+ AAI_MUSx_LEFT_FRAME_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: LSB_FIRST */
+#define AAI_MUSx_LSB_FIRST_SHIFT 9
+#define AAI_MUSx_LSB_FIRST_WIDTH 1
+#define AAI_MUSx_LSB_FIRST_MSK \
+ ((uint32_t)((1 << AAI_MUSx_LSB_FIRST_WIDTH) - 1) << \
+ AAI_MUSx_LSB_FIRST_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: RIGHT_JUST */
+#define AAI_MUSx_RIGHT_JUST_SHIFT 10
+#define AAI_MUSx_RIGHT_JUST_WIDTH 1
+#define AAI_MUSx_RIGHT_JUST_MSK \
+ ((uint32_t)((1 << AAI_MUSx_RIGHT_JUST_WIDTH) - 1) << \
+ AAI_MUSx_RIGHT_JUST_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: 24BITS */
+#define AAI_MUSx_24BITS_SHIFT 11
+#define AAI_MUSx_24BITS_WIDTH 1
+#define AAI_MUSx_24BITS_MSK \
+ ((uint32_t)((1 << AAI_MUSx_24BITS_WIDTH) - 1) << AAI_MUSx_24BITS_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: SYNCHRO_TYPE */
+#define AAI_MUSx_SYNCHRO_TYPE_SHIFT 12
+#define AAI_MUSx_SYNCHRO_TYPE_WIDTH 3
+#define AAI_MUSx_SYNCHRO_TYPE_MSK \
+ ((uint32_t)((1 << AAI_MUSx_SYNCHRO_TYPE_WIDTH) - 1) << \
+ AAI_MUSx_SYNCHRO_TYPE_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: USE_SPDIF */
+#define AAI_MUSx_USE_SPDIF_SHIFT 16
+#define AAI_MUSx_USE_SPDIF_WIDTH 1
+#define AAI_MUSx_USE_SPDIF_MSK \
+ ((uint32_t)((1 << AAI_MUSx_USE_SPDIF_WIDTH) - 1) << \
+ AAI_MUSx_USE_SPDIF_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: BIPHASE_SOURCE */
+#define AAI_MUSx_BIPHASE_SOURCE_SHIFT 17
+#define AAI_MUSx_BIPHASE_SOURCE_WIDTH 1
+#define AAI_MUSx_BIPHASE_SOURCE_MSK \
+ ((uint32_t)((1 << AAI_MUSx_BIPHASE_SOURCE_WIDTH) - 1) << \
+ AAI_MUSx_BIPHASE_SOURCE_SHIFT)
+/* IP name: AAI ; register name: MUSx ; bits field name: BINARY_ACCEPT */
+#define AAI_MUSx_BINARY_ACCEPT_SHIFT 18
+#define AAI_MUSx_BINARY_ACCEPT_WIDTH 1
+#define AAI_MUSx_BINARY_ACCEPT_MSK \
+ ((uint32_t)((1 << AAI_MUSx_BINARY_ACCEPT_WIDTH) - 1) << \
+ AAI_MUSx_BINARY_ACCEPT_SHIFT)
+
+#define AAI_MUSx_SYNCHRO_TYPE_DAC 0
+#define AAI_MUSx_SYNCHRO_TYPE_ADC 1
+#define AAI_MUSx_SYNCHRO_TYPE_AUX 2
+#define AAI_MUSx_SYNCHRO_TYPE_ASYNCH 3
+#define AAI_MUSx_SYNCHRO_TYPE_TDM0 4
+#define AAI_MUSx_SYNCHRO_TYPE_TDM1 5
+#define AAI_MUSx_SYNCHRO_TYPE_TDM2 6
+#define AAI_MUSx_SYNCHRO_TYPE_TDM3 7
+
+/* IP name: AAI ; register name:TDM_IN; bits field name: TDMxx_CYCLES_NUMS */
+#define AAI_TDM_IN_CYCLES_NUM_SHIFT 0
+#define AAI_TDM_IN_CYCLES_NUM_WIDTH 7
+#define AAI_TDM_IN_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_TDM_IN_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_TDM_IN_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name:TDM_IN; bits field name: TDMxx_LEFT_FRAME */
+#define AAI_TDM_IN_LEFT_FRAME_SHIFT 8
+#define AAI_TDM_IN_LEFT_FRAME_WIDTH 1
+#define AAI_TDM_IN_LEFT_FRAME_MSK \
+ ((uint32_t)((1 << AAI_TDM_IN_LEFT_FRAME_WIDTH) - 1) << \
+ AAI_TDM_IN_LEFT_FRAME_SHIFT)
+/* IP name: AAI ; register name:TDM_IN; bits field name: TDMxx_EN */
+#define AAI_TDM_IN_EN_SHIFT 31
+#define AAI_TDM_IN_EN_WIDTH 1
+#define AAI_TDM_IN_EN_MSK \
+ ((uint32_t)((1 << AAI_TDM_IN_EN_WIDTH) - 1) << AAI_TDM_IN_EN_SHIFT)
+
+/* IP name: AAI ; register name:TDM_OUT; bits field name: TDMxx_CYCLES_NUMS */
+#define AAI_TDM_OUT_CYCLES_NUM_SHIFT 0
+#define AAI_TDM_OUT_CYCLES_NUM_WIDTH 7
+#define AAI_TDM_OUT_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_TDM_OUT_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_TDM_OUT_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name:TDM_OUT; bits field name: TDMxx_LEFT_FRAME */
+#define AAI_TDM_OUT_LEFT_FRAME_SHIFT 8
+#define AAI_TDM_OUT_LEFT_FRAME_WIDTH 1
+#define AAI_TDM_OUT_LEFT_FRAME_MSK \
+ ((uint32_t)((1 << AAI_TDM_OUT_LEFT_FRAME_WIDTH) - 1) << \
+ AAI_TDM_OUT_LEFT_FRAME_SHIFT)
+/* IP name: AAI ; register name:TDM_OUT; bits field name: TDMxx_EN */
+#define AAI_TDM_OUT_EN_SHIFT 31
+#define AAI_TDM_OUT_EN_WIDTH 1
+#define AAI_TDM_OUT_EN_MSK \
+ ((uint32_t)((1 << AAI_TDM_OUT_EN_WIDTH) - 1) << AAI_TDM_OUT_EN_SHIFT)
+
+/* IP name: AAI ; register name:ADC; bits field name: ADC_CYCLES_NUMS */
+#define AAI_ADC_CYCLES_NUM_SHIFT 0
+#define AAI_ADC_CYCLES_NUM_WIDTH 7
+#define AAI_ADC_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_ADC_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_ADC_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name:ADC; bits field name: ADC_MATSUSHITA */
+#define AAI_ADC_MATSUSHITA_SHIFT 7
+#define AAI_ADC_MATSUSHITA_WIDTH 1
+#define AAI_ADC_MATSUSHITA_MSK \
+ ((uint32_t)((1 << AAI_ADC_MATSUSHITA_WIDTH) - 1) << \
+ AAI_ADC_MATSUSHITA_SHIFT)
+/* IP name: AAI ; register name:ADC; bits field name: ADC_LEFT_FRAME */
+#define AAI_ADC_LEFT_FRAME_SHIFT 8
+#define AAI_ADC_LEFT_FRAME_WIDTH 1
+#define AAI_ADC_LEFT_FRAME_MSK \
+ ((uint32_t)((1 << AAI_ADC_LEFT_FRAME_WIDTH) - 1) << \
+ AAI_ADC_LEFT_FRAME_SHIFT)
+
+/* IP name: AAI ; register name:DAC; bits field name: DAC_CYCLES_NUMS */
+#define AAI_DAC_CYCLES_NUM_SHIFT 0
+#define AAI_DAC_CYCLES_NUM_WIDTH 7
+#define AAI_DAC_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_DAC_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_DAC_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name:DAC; bits field name: DAC_MATSUSHITA */
+#define AAI_DAC_MATSUSHITA_SHIFT 7
+#define AAI_DAC_MATSUSHITA_WIDTH 1
+#define AAI_DAC_MATSUSHITA_MSK \
+ ((uint32_t)((1 << AAI_DAC_MATSUSHITA_WIDTH) - 1) << \
+ AAI_DAC_MATSUSHITA_SHIFT)
+/* IP name: AAI ; register name:DAC; bits field name: DAC_LEFT_FRAME */
+#define AAI_DAC_LEFT_FRAME_SHIFT 8
+#define AAI_DAC_LEFT_FRAME_WIDTH 1
+#define AAI_DAC_LEFT_FRAME_MSK \
+ ((uint32_t)((1 << AAI_DAC_LEFT_FRAME_WIDTH) - 1) << \
+ AAI_DAC_LEFT_FRAME_SHIFT)
+
+/* IP name: AAI ; register name:AUX; bits field name: AUX_CYCLES_NUMS */
+#define AAI_AUX_CYCLES_NUM_SHIFT 0
+#define AAI_AUX_CYCLES_NUM_WIDTH 7
+#define AAI_AUX_CYCLES_NUM_MSK \
+ ((uint32_t)((1 << AAI_AUX_CYCLES_NUM_WIDTH) - 1) << \
+ AAI_AUX_CYCLES_NUM_SHIFT)
+/* IP name: AAI ; register name:AUX; bits field name: AUX_HALF_FRAME */
+#define AAI_AUX_HALF_FRAME_SHIFT 24
+#define AAI_AUX_HALF_FRAME_WIDTH 1
+#define AAI_AUX_HALF_FRAME_MSK \
+ ((uint32_t)((1 << AAI_AUX_HALF_FRAME_WIDTH) - 1) << \
+ AAI_AUX_HALF_FRAME_SHIFT)
+
+/* IP name: AAI ; register name: PCM ; bits field name: START_TWO */
+#define AAI_PCM_START_TWO_SHIFT 0
+#define AAI_PCM_START_TWO_WIDTH 8
+#define AAI_PCM_START_TWO_MSK \
+ ((uint32_t)((1 << AAI_PCM_START_TWO_WIDTH) - 1) << \
+ AAI_PCM_START_TWO_SHIFT)
+/* IP name: AAI ; register name: PCM ; bits field name: CYCLES_HIGH */
+#define AAI_PCM_CYCLES_HIGH_SHIFT 8
+#define AAI_PCM_CYCLES_HIGH_WIDTH 8
+#define AAI_PCM_CYCLES_HIGH_MSK \
+ ((uint32_t)((1 << AAI_PCM_CYCLES_HIGH_WIDTH) - 1) << \
+ AAI_PCM_CYCLES_HIGH_SHIFT)
+/* IP name: AAI ; register name: PCM ; bits field name: CYCLES_LOW */
+#define AAI_PCM_CYCLES_LOW_SHIFT 16
+#define AAI_PCM_CYCLES_LOW_WIDTH 9
+#define AAI_PCM_CYCLES_LOW_MSK \
+ ((uint32_t)((1 << AAI_PCM_CYCLES_LOW_WIDTH) - 1) << \
+ AAI_PCM_CYCLES_LOW_SHIFT)
+/* IP name: AAI ; register name: PCM ; bits field name: ON */
+#define AAI_PCM_ON_SHIFT 25
+#define AAI_PCM_ON_WIDTH 1
+#define AAI_PCM_ON_MSK \
+ ((uint32_t)((1 << AAI_PCM_ON_WIDTH) - 1) << AAI_PCM_ON_SHIFT)
+/* IP name: AAI ; register name: PCM ; bits field name: OKI */
+#define AAI_PCM_OKI_SHIFT 26
+#define AAI_PCM_OKI_WIDTH 1
+#define AAI_PCM_OKI_MSK \
+ ((uint32_t)((1 << AAI_PCM_OKI_WIDTH) - 1) << AAI_PCM_OKI_SHIFT)
+/* IP name: AAI ; register name: PCM ; bits field name: RUN_DUAL */
+#define AAI_PCM_RUN_DUAL_SHIFT 27
+#define AAI_PCM_RUN_DUAL_WIDTH 1
+#define AAI_PCM_RUN_DUAL_MSK \
+ ((uint32_t)((1 << AAI_PCM_RUN_DUAL_WIDTH) - 1) << \
+ AAI_PCM_RUN_DUAL_SHIFT)
+/* Values */
+#define AAI_PCM_ON (1 << AAI_PCM_ON_SHIFT)
+#define AAI_PCM_OKI (1 << AAI_PCM_OKI_SHIFT)
+#define AAI_PCM_RUN_DUAL (1 << AAI_PCM_RUN_DUAL_SHIFT)
+
+/* IP name: AAI ; register name: VOICE_8K ; bits field name: PHONE */
+#define AAI_VOICE_8K_PHONE_SHIFT 0
+#define AAI_VOICE_8K_PHONE_WIDTH 1
+#define AAI_VOICE_8K_PHONE_MSK \
+ ((uint32_t)((1 << AAI_VOICE_8K_PHONE_WIDTH) - 1) << \
+ AAI_VOICE_8K_PHONE_SHIFT)
+/* IP name: AAI ; register name: VOICE_8K ; bits field name: SPEAKER */
+#define AAI_VOICE_8K_SPEAKER_SHIFT 1
+#define AAI_VOICE_8K_SPEAKER_WIDTH 1
+#define AAI_VOICE_8K_SPEAKER_MSK \
+ ((uint32_t)((1 << AAI_VOICE_8K_SPEAKER_WIDTH) - 1) << \
+ AAI_VOICE_8K_SPEAKER_SHIFT)
+/* Values */
+#define AAI_VOICE_8K_PHONE (1 << AAI_VOICE_8K_PHONE_SHIFT)
+#define AAI_VOICE_8K_SPEAKER (1 << AAI_VOICE_8K_SPEAKER_SHIFT)
+
+#define AAI_MUS_FILTER5_LOW_FREQ (1 << 0)
+
+
+#define AAI_VOICE_SPEED_16_8 0x00810E35
+#define AAI_VOICE_SPEED_22_11 0x00B1DAC7
+
+
+#define AAI_SRC_FILTER5_0 (0 << 0)
+#define AAI_SRC_FILTER5_1 (1 << 1)
+
+#define AAI_SRC_MODE_QUADRI0 (1 << 0)
+#define AAI_SRC_MODE_QUADRI1 (1 << 1)
+#define AAI_SRC_MODE_OCTOPHONY (1 << 2)
+#define AAI_SRC_MODE_RUN0 (1 << 8)
+#define AAI_SRC_MODE_RUN1 (1 << 9)
+
+/*
+ * AAI FIFO Configuration sub-module registers address definition
+ */
+/* IP name: AAI ; register name: CFG_RX ; bits field name: START */
+#define AAI_FIFOCFG_CFG_RX_START_SHIFT 0
+#define AAI_FIFOCFG_CFG_RX_START_WIDTH 12
+#define AAI_FIFOCFG_CFG_RX_START_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_RX_START_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_RX_START_SHIFT)
+/* IP name: AAI ; register name: CFG_RX ; bits field name: DEPTH */
+#define AAI_FIFOCFG_CFG_RX_DEPTH_SHIFT 16
+#define AAI_FIFOCFG_CFG_RX_DEPTH_WIDTH 8
+#define AAI_FIFOCFG_CFG_RX_DEPTH_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_RX_DEPTH_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_RX_DEPTH_SHIFT)
+/* IP name: AAI ; register name: CFG_RX ; bits field name: CHAN_NUM */
+#define AAI_FIFOCFG_CFG_RX_CHAN_NUM_SHIFT 24
+#define AAI_FIFOCFG_CFG_RX_CHAN_NUM_WIDTH 6
+#define AAI_FIFOCFG_CFG_RX_CHAN_NUM_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_RX_CHAN_NUM_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_RX_CHAN_NUM_SHIFT)
+
+/* IP name: AAI ; register name: CFG_TX ; bits field name: START */
+#define AAI_FIFOCFG_CFG_TX_START_SHIFT 0
+#define AAI_FIFOCFG_CFG_TX_START_WIDTH 12
+#define AAI_FIFOCFG_CFG_TX_START_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_TX_START_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_TX_START_SHIFT)
+/* IP name: AAI ; register name: CFG_TX ; bits field name: DEPTH */
+#define AAI_FIFOCFG_CFG_TX_DEPTH_SHIFT 16
+#define AAI_FIFOCFG_CFG_TX_DEPTH_WIDTH 8
+#define AAI_FIFOCFG_CFG_TX_DEPTH_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_TX_DEPTH_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_TX_DEPTH_SHIFT)
+/* IP name: AAI ; register name: CFG_TX ; bits field name: CHAN_NUM */
+#define AAI_FIFOCFG_CFG_TX_CHAN_NUM_SHIFT 24
+#define AAI_FIFOCFG_CFG_TX_CHAN_NUM_WIDTH 5
+#define AAI_FIFOCFG_CFG_TX_CHAN_NUM_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_CFG_TX_CHAN_NUM_WIDTH) - 1) << \
+ AAI_FIFOCFG_CFG_TX_CHAN_NUM_SHIFT)
+
+
+/* IP name: AAI ; register name: SEL_RX ; bits field name: CHAN_ROUTE */
+#define AAI_FIFOCFG_SEL_RX_CHAN_ROUTE_SHIFT 0
+#define AAI_FIFOCFG_SEL_RX_CHAN_ROUTE_WIDTH 5
+#define AAI_FIFOCFG_SEL_RX_CHAN_ROUTE_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_SEL_RX_CHAN_ROUTE_WIDTH) - 1) << \
+ AAI_FIFOCFG_SEL_RX_CHAN_ROUTE_SHIFT)
+/* IP name: AAI ; register name: SEL_RX ; bits field name: CHAN_ORDER */
+#define AAI_FIFOCFG_SEL_RX_CHAN_ORDER_SHIFT 8
+#define AAI_FIFOCFG_SEL_RX_CHAN_ORDER_WIDTH 6
+#define AAI_FIFOCFG_SEL_RX_CHAN_ORDER_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_SEL_RX_CHAN_ORDER_WIDTH) - 1) << \
+ AAI_FIFOCFG_SEL_RX_CHAN_ORDER_SHIFT)
+
+/* IP name: AAI ; register name: SEL_TX ; bits field name: CHAN_ROUTE */
+#define AAI_FIFOCFG_SEL_TX_CHAN_ROUTE_SHIFT 0
+#define AAI_FIFOCFG_SEL_TX_CHAN_ROUTE_WIDTH 4
+#define AAI_FIFOCFG_SEL_TX_CHAN_ROUTE_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_SEL_TX_CHAN_ROUTE_WIDTH) - 1) << \
+ AAI_FIFOCFG_SEL_TX_CHAN_ROUTE_SHIFT)
+/* IP name: AAI ; register name: SEL_TX ; bits field name: CHAN_ORDER */
+#define AAI_FIFOCFG_SEL_TX_CHAN_ORDER_SHIFT 8
+#define AAI_FIFOCFG_SEL_TX_CHAN_ORDER_WIDTH 5
+#define AAI_FIFOCFG_SEL_TX_CHAN_ORDER_MSK \
+ ((uint32_t)((1 << AAI_FIFOCFG_SEL_TX_CHAN_ORDER_WIDTH) - 1) << \
+ AAI_FIFOCFG_SEL_TX_CHAN_ORDER_SHIFT)
+
+/*
+ * AAI LBC Configuration sub-module registers address definition
+ */
+/* IP name: AAI ; register name: CKEN ; bits field name: MFRAME_MODE */
+#define AAI_LBC_CKEN_MFRAME_SHIFT 17
+#define AAI_LBC_CKEN_MFRAME_WIDTH 1
+#define AAI_LBC_CKEN_MFRAME_MSK \
+ ((uint32_t)((1 << AAI_LBC_CKEN_MFRAME_WIDTH) - 1) << \
+ AAI_LBC_CKEN_MFRAME_SHIFT)
+/* IP name: AAI ; register name: CKEN ; bits field name: HFRAME_MODE */
+#define AAI_LBC_CKEN_HFRAME_SHIFT 18
+#define AAI_LBC_CKEN_HFRAME_WIDTH 1
+#define AAI_LBC_CKEN_HFRAME_MSK \
+ ((uint32_t)((1 << AAI_LBC_CKEN_HFRAME_WIDTH) - 1) << \
+ AAI_LBC_CKEN_HFRAME_SHIFT)
+/* IP name: AAI ; register name: MFRAME ; bits field name: MFRAME_DIV */
+#define AAI_LBC_MFRAME_MFRAME_DIV_LOW_SHIFT 0
+#define AAI_LBC_MFRAME_MFRAME_DIV_LOW_WIDTH 16
+#define AAI_LBC_MFRAME_MFRAME_DIV_LOW_MSK \
+ ((uint32_t)((1 << AAI_LBC_MFRAME_MFRAME_DIV_LOW_WIDTH) - 1) << \
+ AAI_LBC_MFRAME_MFRAME_DIV_LOW_SHIFT)
+/* IP name: AAI ; register name: MFRAME ; bits field name: MFRAME_DIV */
+#define AAI_LBC_MFRAME_MFRAME_DIV_HIGH_SHIFT 16
+#define AAI_LBC_MFRAME_MFRAME_DIV_HIGH_WIDTH 16
+#define AAI_LBC_MFRAME_MFRAME_DIV_HIGH_MSK \
+ ((uint32_t)((1 << AAI_LBC_MFRAME_MFRAME_DIV_HIGH_WIDTH) - 1) << \
+ AAI_LBC_MFRAME_MFRAME_DIV_HIGH_SHIFT)
+/* IP name: AAI ; register name: HFRAME ; bits field name: HFRAME_DIV */
+#define AAI_LBC_HFRAME_HFRAME_DIV_LOW_SHIFT 0
+#define AAI_LBC_HFRAME_HFRAME_DIV_LOW_WIDTH 16
+#define AAI_LBC_HFRAME_HFRAME_DIV_LOW_MSK \
+ ((uint32_t)((1 << AAI_LBC_HFRAME_HFRAME_DIV_LOW_WIDTH) - 1) << \
+ AAI_LBC_HFRAME_HFRAME_DIV_LOW_SHIFT)
+/* IP name: AAI ; register name: HFRAME ; bits field name: HFRAME_DIV */
+#define AAI_LBC_HFRAME_HFRAME_DIV_HIGH_SHIFT 16
+#define AAI_LBC_HFRAME_HFRAME_DIV_HIGH_WIDTH 16
+#define AAI_LBC_HFRAME_HFRAME_DIV_HIGH_MSK \
+ ((uint32_t)((1 << AAI_LBC_HFRAME_HFRAME_DIV_HIGH_WIDTH) - 1) << \
+ AAI_LBC_HFRAME_HFRAME_DIV_HIGH_SHIFT)
+
+#endif
+
diff --git a/drivers/parrot/sound/p7_aai/reg_aai_gbc.h b/drivers/parrot/sound/p7_aai/reg_aai_gbc.h
new file mode 100644
index 00000000000000..b16fa2cf56485d
--- /dev/null
+++ b/drivers/parrot/sound/p7_aai/reg_aai_gbc.h
@@ -0,0 +1,111 @@
+/*-======================================================================-
+ * Parrot S.A. Confidential Property
+ *
+ * Project : Parrot 7
+ * File name : reg_aai_gbc.h
+ * Author : Marc Tamisier
+ *
+ * ----------------------------------------------------------------------
+ * Purpose : AAI registers definition
+ */
+#ifndef __REG_AAI_GBC_H__
+#define __REG_AAI_GBC_H__
+
+#include "reg_aai.h"
+
+#define AAI_OFFSET 0xfc020000
+
+/*
+ * General GBC definition
+ * block_id
+ */
+#define GBC_AAI 0xFFFC
+
+/* module_id */
+#define GBC_AAI0 0
+
+/* clock_id */
+#define GBC_AAI0_MCLK 0
+#define GBC_AAI0_MFRAME 1
+#define GBC_AAI0_HFRAME 2
+#define GBC_AAI0_P1CLK 8
+#define GBC_AAI0_P2CLK 9
+#define GBC_AAI0_CLK 16
+
+/* reset_id */
+#define GBC_AAI0_RESET 0
+#define GBC_AAI0_RESET_SPDIF 1
+
+
+/*
+ * Specific GBC definition
+ */
+#define AAI_GBC_OFFSET 0xF000
+#define AAI_GBC_OFFSET_R3 0xFF000
+
+/*
+ * AAI clocks enables and resets
+ * prefer to use GBC function (using block_id, module_id, clock_id)
+ */
+#define AAI_GBC_CLKEN (AAI_GBC_OFFSET + 0xFFC)
+#define AAI_GBC_CLKEN_R3 (AAI_GBC_OFFSET_R3 + 0xFFC)
+/*#define AAI_GBC_RESET (AAI_GBC_OFFSET + 0xFF8)*/
+
+/* i2s clocks configurations */
+#define AAI_GBC_MCLK (AAI_GBC_OFFSET + 0x7FC)
+#define AAI_GBC_MFRAME (AAI_GBC_OFFSET + 0x7F8)
+
+/* pcm clocks configurations */
+#define AAI_GBC_P1CLK (AAI_GBC_OFFSET + 0x7EC)
+#define AAI_GBC_P1FRAME (AAI_GBC_OFFSET + 0x7E8)
+#define AAI_GBC_P2CLK (AAI_GBC_OFFSET + 0x7E4)
+#define AAI_GBC_P2FRAME (AAI_GBC_OFFSET + 0x7E0)
+
+/* spdif clock configurations */
+#define AAI_GBC_SPDIFCLK (AAI_GBC_OFFSET + 0x7DC)
+#define AAI_GBC_SPDIFCLK_R3 (AAI_GBC_OFFSET_R3 + 0xDFC)
+
+/* Pads configurations */
+#define AAI_GBC_PADIN(pad_id) (AAI_GBC_OFFSET + (0x000 + 0x4*(pad_id)))
+#define AAI_GBC_PADOUT(pad_id) (AAI_GBC_OFFSET + (0x100 + 0x4*(pad_id)))
+
+
+/*
+ * Specific GBC definition
+ */
+#define AAI_LBC_OFFSET 0xF000
+
+/*
+ * AAI clocks enables and resets
+ * prefer to use GBC function (using block_id, module_id, clock_id)
+ */
+#define AAI_LBC_CLKEN (AAI_LBC_OFFSET + 0xFFC)
+/*#define AAI_GBC_RESET (AAI_GBC_OFFSET + 0xFF8)*/
+
+/* i2s clocks configurations */
+#define AAI_LBC_MCLK (AAI_LBC_OFFSET + 0x7FC)
+#define AAI_LBC_HFRAME (AAI_LBC_OFFSET + 0x7F4)
+#define AAI_LBC_MFRAME (AAI_LBC_OFFSET + 0x7F8)
+
+/* pcm clocks configurations */
+#define AAI_LBC_P1CLK (AAI_LBC_OFFSET + 0x7EC)
+#define AAI_LBC_P1FRAME (AAI_LBC_OFFSET + 0x7E8)
+#define AAI_LBC_P2CLK (AAI_LBC_OFFSET + 0x7E4)
+#define AAI_LBC_P2FRAME (AAI_LBC_OFFSET + 0x7E0)
+
+
+/* Pads configurations */
+#define AAI_LBC_PADIN(pad_id) (AAI_LBC_OFFSET + (0x000 + 0x4*(pad_id)))
+#define AAI_LBC_PADOUT(pad_id) (AAI_LBC_OFFSET + (0x100 + 0x4*(pad_id)))
+
+
+/*
+ * Specific GBC Value
+ */
+#define PxCLK_DIV_2MHZ (0x0184 << 0)
+#define PxCLK_DIV_1MHZ (0x030A << 0)
+#define PxCLK_DIV_512KHZ (0x05F1 << 0)
+#define PxCLK_DIV_256KHZ (0x0BE5 << 0)
+
+#endif
+
diff --git a/drivers/parrot/spi/Kconfig b/drivers/parrot/spi/Kconfig
new file mode 100644
index 00000000000000..1202e2a4f14443
--- /dev/null
+++ b/drivers/parrot/spi/Kconfig
@@ -0,0 +1,51 @@
+config SPI_PARROT7
+ tristate "Parrot7 SPI kernel driver"
+ depends on SPI
+ default n
+ default m if (ARCH_PARROT7 || ARCH_VEXPRESS_P7FPGA)
+ help
+ Select Parrot7 SPI hardware driver.
+
+config SPI_MASTER_PARROT7
+ tristate "Parrot7 SPI master driver"
+ depends on SPI_MASTER && SPI_PARROT7
+ default m
+ help
+ Select Parrot7 SPI master driver.
+
+config SPI_MASTER_PARROT7_TEST
+ tristate "Parrot7 SPI master test"
+ depends on SPI_MASTER_PARROT7
+ default n
+ help
+ Test module, sends or receives some data on a master spi.
+
+config SPI_MASTER_PARROT7_LOOPBACK_TEST
+ tristate "Enable loopback test module"
+ depends on SPI_MASTER_PARROT7
+ default n
+ help
+ Test module to test SPI slave using the loopback.
+
+config SPI_MASTER_SPARTAN6
+ tristate "Parrot7 SPI Spartan6 FPGA"
+ depends on SPI_MASTER_PARROT7
+ select SPI_SPIDEV
+ default n
+ help
+ Enables spidev's interface to Spartan6 FPGA.
+
+config SPI_SLAVE_PARROT7
+ tristate "Parrot7 SPI slave driver"
+ depends on SPI_MASTER && SPI_PARROT7
+ default m
+ help
+ Select Parrot7 SPI slave driver. Requires the master driver to be
+ selected to handle common registers.
+
+config SPI_SLAVE_PARROT7_LOOPBACK_TEST
+ tristate "Enable loopback test module"
+ depends on SPI_SLAVE_PARROT7
+ default n
+ help
+ Test module to test SPI slave using the loopback.
diff --git a/drivers/parrot/spi/Makefile b/drivers/parrot/spi/Makefile
new file mode 100644
index 00000000000000..8a388fc03f16a7
--- /dev/null
+++ b/drivers/parrot/spi/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -I$(srctree)/drivers/parrot
+ccflags-$(CONFIG_SPI_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_SPI_PARROT7) += p7-spi-common.o
+p7-spi-common-objs := p7-swb.o p7-spi.o
+obj-$(CONFIG_SPI_MASTER_PARROT7) += p7-spim.o
+obj-$(CONFIG_SPI_SLAVE_PARROT7) += p7-spis.o
+
+obj-$(CONFIG_SPI_MASTER_PARROT7_TEST) += p7-spim_test.o
+obj-$(CONFIG_SPI_MASTER_SLAVE_TEST) += p7-spis_test_p7mu.o
+
+obj-$(CONFIG_SPI_MASTER_PARROT7_LOOPBACK_TEST) += spimtest_loopback.o
+obj-$(CONFIG_SPI_SLAVE_PARROT7_LOOPBACK_TEST) += spistest_loopback.o
diff --git a/drivers/parrot/spi/p7-spi.c b/drivers/parrot/spi/p7-spi.c
new file mode 100644
index 00000000000000..316f41e583353c
--- /dev/null
+++ b/drivers/parrot/spi/p7-spi.c
@@ -0,0 +1,1522 @@
+/**
+ * linux/drivers/parrot/spi/p7-spi.c - Parrot7 SPI shared resources
+ * driver implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 04-Jul-2012
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/lcm.h>
+#include "p7-spi.h"
+#include "p7-spi_priv.h"
+
+struct p7spi_kernel p7spi_kern = {
+ .lck = __MUTEX_INITIALIZER(p7spi_kern.lck)
+};
+EXPORT_SYMBOL(p7spi_kern);
+
+#if defined(DEBUG) || defined(CONFIG_DEBUG_FS)
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+
+#define P7SPI_INIT_DBGREG(_name) \
+ { .name = __stringify(_name), .offset = P7SPI_ ## _name }
+
+static struct debugfs_reg32 p7spi_regs[] = {
+ P7SPI_INIT_DBGREG(CTRL),
+ P7SPI_INIT_DBGREG(SPEED),
+ P7SPI_INIT_DBGREG(TIMING),
+ P7SPI_INIT_DBGREG(STATUS),
+ P7SPI_INIT_DBGREG(ITEN),
+ P7SPI_INIT_DBGREG(ITACK),
+ P7SPI_INIT_DBGREG(TH_RX),
+ P7SPI_INIT_DBGREG(TH_TX),
+ P7SPI_INIT_DBGREG(TH_RXCNT),
+ P7SPI_INIT_DBGREG(TH_TXCNT),
+ P7SPI_INIT_DBGREG(RXCNT),
+ P7SPI_INIT_DBGREG(TXCNT),
+ P7SPI_INIT_DBGREG(RX_VALID_BYTES),
+ P7SPI_INIT_DBGREG(FIFO_FLUSH),
+ P7SPI_INIT_DBGREG(FIFO_RXLVL),
+ P7SPI_INIT_DBGREG(FIFO_TXLVL)
+};
+
+#endif /* #if defined(DEBUG) || defined(CONFIG_DEBUG_FS) */
+
+#ifdef DEBUG
+
+void p7spi_dump_regs(struct p7spi_core const* core, char const* msg)
+{
+ int r;
+
+ dev_dbg(p7spi_core_dev(core), "%s:\n", msg);
+
+ for (r = 0; r < ARRAY_SIZE(p7spi_regs); r++)
+ dev_dbg(p7spi_core_dev(core),
+ "\t%s = 0x%08x\n",
+ p7spi_regs[r].name,
+ readl(core->vaddr + p7spi_regs[r].offset));
+}
+EXPORT_SYMBOL(p7spi_dump_regs);
+
+#endif
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void __devinit p7spi_init_kern_dbgfs(void)
+{
+ static struct debugfs_reg32 set[] = {
+ P7SPI_INIT_DBGREG(MEM),
+ P7SPI_INIT_DBGREG(SWB_OPAD(0)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(1)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(2)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(3)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(4)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(5)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(6)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(7)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(8)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(9)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(10)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(11)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(12)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(13)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(14)),
+ P7SPI_INIT_DBGREG(SWB_OPAD(15)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_CLK(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_SS(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA0(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA1(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA2(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA3(0)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_CLK(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_SS(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA1(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA1(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA2(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA3(1)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_CLK(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_SS(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA2(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA1(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA2(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA3(2)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_CLK(3)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_SS(3)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA3(3)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA1(3)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA2(3)),
+ P7SPI_INIT_DBGREG(SWB_IPAD_DATA3(3)),
+ };
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ p7spi_kern.dbg_dir = debugfs_create_dir(P7SPI_DRV_NAME, NULL);
+ if (! p7spi_kern.dbg_dir) {
+ dev_warn(p7spi_kern.dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ p7spi_kern.dbg_regs.regs = set;
+ p7spi_kern.dbg_regs.nregs = ARRAY_SIZE(set);
+ p7spi_kern.dbg_regs.base = (void __iomem*) p7spi_kern.vaddr;
+ p7spi_kern.dbg_file = debugfs_create_regset32("common",
+ S_IRUGO,
+ p7spi_kern.dbg_dir,
+ &p7spi_kern.dbg_regs);
+ if (! p7spi_kern.dbg_file) {
+ debugfs_remove(p7spi_kern.dbg_dir);
+ p7spi_kern.dbg_dir = NULL;
+ dev_warn(p7spi_kern.dev, "failed to create debugfs file\n");
+ }
+}
+
+static void __devexit p7spi_exit_kern_dbgfs(void)
+{
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ if (p7spi_kern.dbg_dir) {
+ debugfs_remove(p7spi_kern.dbg_file);
+ debugfs_remove(p7spi_kern.dbg_dir);
+ }
+}
+
+static void p7spi_init_core_dbgfs(struct p7spi_core* core)
+{
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ if (! p7spi_kern.dbg_dir) {
+ core->dbg_file = NULL;
+ return;
+ }
+
+ core->dbg_regs.regs = p7spi_regs;
+ core->dbg_regs.nregs = ARRAY_SIZE(p7spi_regs);
+ core->dbg_regs.base = (void __iomem*) core->vaddr;
+ core->dbg_file = debugfs_create_regset32(dev_name(p7spi_core_dev(core)->parent),
+ S_IRUGO,
+ p7spi_kern.dbg_dir,
+ &core->dbg_regs);
+ if (! core->dbg_file)
+ dev_warn(core->ctrl->dev.parent, "failed to create debugfs file\n");
+}
+
+static void p7spi_exit_core_dbgfs(struct p7spi_core* core)
+{
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ if (core->dbg_file)
+ debugfs_remove(core->dbg_file);
+}
+
+#else /* ! defined(CONFIG_DEBUG_FS) */
+
+static inline void p7spi_init_kern_dbgfs(void) {}
+static inline void p7spi_exit_kern_dbgfs(void) {}
+static inline void p7spi_init_core_dbgfs(struct p7spi_core* core) {}
+static inline void p7spi_exit_core_dbgfs(struct p7spi_core* core) {}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static int p7spi_request_pad(struct p7spi_core const * const core,
+ unsigned int pad,
+ enum p7swb_direction dir, enum p7swb_function func)
+{
+ int core_id = p7spi_core_id(core);
+
+ return p7swb_request_pad(core_id, P7SPI_CORE_SPI, core,
+ pad, dir, func);
+}
+
+static void p7spi_release_pad(struct p7spi_core const * const core,
+ unsigned int pad,
+ enum p7swb_direction dir)
+{
+ p7swb_release_pad(core, pad, dir);
+}
+
+int p7spi_core_index(int core_id, enum p7spi_core_type type)
+{
+ int i, same_core_type;
+
+ /*
+ * core_id is the index of cores of the same type. We need an
+ * index that is valid in p7spi_kern.cores.
+ */
+ for (i = 0, same_core_type = 0; i < p7spi_kern.num_cores; i++) {
+ if (p7spi_kern.cores[i] == type) {
+ if (same_core_type == core_id)
+ return i;
+ same_core_type++;
+ }
+ }
+
+ return -1;
+}
+
+static size_t p7spi_fifo_wcnt(u32 mem_reg, int core_id)
+{
+ size_t wcnt;
+
+ wcnt = (mem_reg >> (core_id * P7SPI_MEM_BITLEN) & P7SPI_MEM_MASK);
+ wcnt = ((!! wcnt) * 8U) * (1U << wcnt);
+ return wcnt;
+}
+
+ssize_t p7spi_alloc_fifo(int core_id, enum p7spi_core_type type, size_t hint)
+{
+ int i, core_index;
+ size_t core_sz, avl_mem = p7spi_kern.fifo_wcnt;
+ u32 mem_reg;
+ ssize_t ret;
+
+ if (!p7spi_kern.vaddr)
+ return -EAGAIN;
+
+ mutex_lock(&p7spi_kern.lck);
+ mem_reg = __raw_readl(p7spi_kern.vaddr + P7SPI_MEM);
+
+ core_index = p7spi_core_index(core_id, type);
+ if (core_index == -1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* shortpath if memory already allocated for this core */
+ core_sz = p7spi_fifo_wcnt(mem_reg, core_index);
+ if (core_sz != 0) {
+ ret = core_sz;
+ goto err;
+ }
+
+ for (i = 0; i < p7spi_kern.num_cores; i++) {
+ /* compute fifo size for this core */
+ core_sz = p7spi_fifo_wcnt(mem_reg, i);
+
+ /* SPI have TX and RX fifos, mpeg only RX */
+ if (p7spi_kern.cores[i] == P7SPI_CORE_SPI)
+ core_sz *= 2;
+
+ if (core_sz > avl_mem) {
+ ret = -EINVAL;
+ goto err;
+ } else {
+ avl_mem -= core_sz;
+ }
+ }
+
+ if (!avl_mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * try to make the fifo fit in available
+ * memory if caller asks for too much.
+ */
+ hint = max_t(size_t, hint, 16);
+ if (type == P7SPI_CORE_SPI)
+ hint *= 2;
+ hint = min_t(size_t, avl_mem, hint);
+ if (type == P7SPI_CORE_SPI)
+ hint /= 2;
+
+ /* round on valid values */
+ if (hint < 16) {
+ ret = -EINVAL;
+ goto err;
+ } else if (hint < 32) {
+ ret = 16;
+ } else if (hint < 64) {
+ ret = 32;
+ } else {
+ ret = 64;
+ }
+
+ dev_dbg(p7spi_kern.dev,
+ "fifo: set fifo size of %d words for core %d\n",
+ ret, core_index);
+
+ mem_reg |= __fls(ret / 8) << (core_index * P7SPI_MEM_BITLEN);
+ writel(mem_reg, p7spi_kern.vaddr + P7SPI_MEM);
+err:
+ mutex_unlock(&p7spi_kern.lck);
+ if (ret < 0)
+ dev_err(p7spi_kern.dev,
+ "fifo: failed to allocate (%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL(p7spi_alloc_fifo);
+
+void p7spi_free_fifo(int core_id, enum p7spi_core_type type)
+{
+ int core_index;
+ u32 mem_reg;
+
+ BUG_ON(!p7spi_kern.vaddr);
+
+ mutex_lock(&p7spi_kern.lck);
+ mem_reg = __raw_readl(p7spi_kern.vaddr + P7SPI_MEM);
+
+ core_index = p7spi_core_index(core_id, type);
+ if (core_index == -1)
+ goto err;
+
+ mem_reg &= ~(P7SPI_MEM_MASK << (core_index * P7SPI_MEM_BITLEN));
+ writel(mem_reg, p7spi_kern.vaddr + P7SPI_MEM);
+
+err:
+ mutex_unlock(&p7spi_kern.lck);
+}
+EXPORT_SYMBOL(p7spi_free_fifo);
+
+
+/*
+ * Current transfer's last unaligned bytes copy (FIFOs only support 32 bits
+ * word aligned accesses).
+ */
+static void p7spi_memcpy_unalign(char* dst, char const* last, size_t bytes)
+{
+#ifdef DEBUG
+ BUG_ON(! dst);
+ BUG_ON(! last);
+#endif
+ switch (bytes) {
+ case 3:
+ *(dst + 2) = *(last + 2);
+ case 2:
+ *(dst + 1) = *(last + 1);
+ case 1:
+ *dst = *last;
+ break;
+#ifdef DEBUG
+ default:
+ BUG();
+#endif
+ }
+}
+
+/*
+ * 32 bits word aligned receive FIFO data extraction.
+ */
+void p7spi_readl_fifo(struct p7spi_core* core, size_t bytes)
+{
+#ifdef DEBUG
+ BUG_ON(! bytes);
+ BUG_ON(bytes > core->fifo_sz);
+ BUG_ON(! IS_ALIGNED((unsigned long) core->rx_buff, sizeof(u32)));
+ BUG_ON(! IS_ALIGNED(bytes, sizeof(u32)));
+#endif
+
+ for (; bytes; core->rx_buff += sizeof(u32), bytes -= sizeof(u32))
+ *((u32*) core->rx_buff) = __raw_readl(core->vaddr + P7SPI_DATA);
+}
+EXPORT_SYMBOL(p7spi_readl_fifo);
+
+/*
+ * Fetch input bytes from receive FIFO.
+ */
+void p7spi_readb_fifo(struct p7spi_core* core,
+ size_t bytes)
+{
+ u32 const word = __raw_readl(core->vaddr + P7SPI_DATA);
+
+ p7spi_memcpy_unalign(core->rx_buff, (char const*) &word, bytes);
+
+ core->rx_buff += bytes;
+}
+EXPORT_SYMBOL(p7spi_readb_fifo);
+
+void p7spi_read_fifo(struct p7spi_core* core, size_t bytes)
+{
+ size_t const align = round_down(bytes, sizeof(u32));
+ size_t const unalign = bytes & __round_mask(bytes, sizeof(u32));
+
+ if (align)
+ p7spi_readl_fifo(core, align);
+
+ if (unalign)
+ p7spi_readb_fifo(core, unalign);
+}
+EXPORT_SYMBOL(p7spi_read_fifo);
+
+/*
+ * Feed transmit FIFO with 32 bits word aligned data.
+ */
+void p7spi_writel_fifo(struct p7spi_core* core, size_t bytes)
+{
+#ifdef DEBUG
+ BUG_ON(! bytes);
+ BUG_ON(bytes > core->fifo_sz);
+ BUG_ON(! IS_ALIGNED((unsigned long) core->tx_buff, sizeof(u32)));
+ BUG_ON(! IS_ALIGNED(bytes, sizeof(u32)));
+#endif
+
+ for (; bytes; core->tx_buff += sizeof(u32), bytes -= sizeof(u32))
+ __raw_writel(*((u32 const*) core->tx_buff), core->vaddr + P7SPI_DATA);
+}
+EXPORT_SYMBOL(p7spi_writel_fifo);
+
+void p7spi_writeb_fifo(struct p7spi_core* core, size_t bytes)
+{
+ u32 word = 0;
+
+ p7spi_memcpy_unalign((char*) &word, core->tx_buff, bytes);
+ __raw_writel(word, core->vaddr + P7SPI_DATA);
+
+ core->tx_buff += bytes;
+}
+EXPORT_SYMBOL(p7spi_writeb_fifo);
+
+void p7spi_write_fifo(struct p7spi_core* core, size_t bytes)
+{
+ size_t const align = round_down(bytes, sizeof(u32));
+ size_t const unalign = bytes & __round_mask(bytes, sizeof(u32));
+
+ if (align)
+ p7spi_writel_fifo(core, align);
+
+ if (unalign)
+ p7spi_writeb_fifo(core, unalign);
+}
+EXPORT_SYMBOL(p7spi_write_fifo);
+
+/*
+ * Complete a transfer round and initiate a new one if needed.
+ * Should be called after a hardware completion event happened
+ * (either FIFO threshold reached, last byte transferred or last
+ * instruction completed).
+ * This is used in both polling and interrupt mode of operations.
+ *
+ * At this time:
+ * bytes left to queue:
+ * core->bytes - core->fifo_sz
+ * bytes left to send (i.e., minus bytes still present within fifo) :
+ * core->bytes - (core->fifo_sz - core->thres_sz)
+ * bytes left to dequeue:
+ * core->bytes
+ *
+ */
+#warning remove me
+void p7spi_process_round(struct p7spi_core* core)
+{
+ size_t const thres = core->thres_sz;
+
+#ifdef DEBUG
+ /*
+ * Residue bytes transfer should always be carried out using a synchronized
+ * transfer.
+ */
+ BUG_ON(core->bytes < (core->fifo_sz + thres));
+#endif
+
+ if (core->rx_buff)
+ /* Fetch input bytes from receive FIFO. */
+ p7spi_readl_fifo(core, thres);
+
+ /* Update count of remaining bytes to dequeue. */
+ core->bytes -= thres;
+
+ /*
+ * Now start another round... At this time, remaining bytes to
+ * queue (bytes + thres - fifo) MUST be >= core->thres_sz
+ * since we perform threshold sized transfers only.
+ */
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB proc round wr[%p] rd[%p]\n",
+ thres,
+ core->tx_buff,
+ core->rx_buff);
+ if (core->tx_buff)
+ /* Feed transmit FIFO. */
+ p7spi_writel_fifo(core, thres);
+}
+EXPORT_SYMBOL(p7spi_process_round);
+
+int p7spi_data_ready(struct p7spi_core const* core, u32 stat)
+{
+ if (! stat)
+ /* Hugh ?! No interrupt raised / nothing special happened... */
+ return -EAGAIN;
+
+ if (core->rx_buff) {
+ if (! (stat & P7SPI_STATUS_RX_TH_REACHED))
+ /* Not the one we expected for rx and / or tx. */
+ return -ENODATA;
+ }
+ else if (core->tx_buff) {
+ /*
+ * Monitor tx status when tx only to prevent from catching
+ * both rx & tx interrupts.
+ */
+ if (! (stat & P7SPI_STATUS_TX_TH_REACHED))
+ /* Not the one we expected for tx. */
+ return -EIO;
+ }
+#ifdef DEBUG
+ else
+ BUG();
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_data_ready);
+
+int p7spi_poll_stat(struct p7spi_core const* core,
+ int (*check_stat)(struct p7spi_core const*, u32),
+ unsigned long tmout)
+{
+ u32 stat;
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ BUG_ON(! core);
+ BUG_ON(! check_stat);
+#endif
+
+ stat = __raw_readl(core->vaddr + P7SPI_STATUS);
+ if (stat & (P7SPI_STATUS_RXACCESS_ERROR |
+ P7SPI_STATUS_TXACCESS_ERROR))
+ return -EFAULT;
+
+ if (! tmout)
+ /* Fast path when caller does not want to block. */
+ return (*check_stat)(core, stat);
+
+ tmout += jiffies;
+ while (1) {
+ err = (*check_stat)(core, stat);
+ if (! err)
+ break;
+
+ if (time_is_before_jiffies(tmout))
+ /*
+ * Timeout elapsed: return to prevent from waiting for a never ending
+ * transfer to prevent from excessive context switches.
+ */
+ break;
+
+ /*
+ * Give Linux a chance to preempt us if necessary. cpu_relax is an
+ * alternate solution.
+ */
+ cond_resched();
+
+ /* Fetch status again. */
+ stat = __raw_readl(core->vaddr + P7SPI_STATUS);
+ if (stat & (P7SPI_STATUS_RXACCESS_ERROR |
+ P7SPI_STATUS_TXACCESS_ERROR)) {
+ err = -EFAULT;
+ break;
+ }
+
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(p7spi_poll_stat);
+
+int p7spi_init_xfer(struct p7spi_core* core,
+ struct spi_transfer* xfer,
+ struct spi_message const* msg __attribute__((unused)),
+ struct spi_master const* dev __attribute__((unused)))
+{
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ BUG_ON(! core);
+ BUG_ON(! xfer);
+ BUG_ON(! msg);
+ BUG_ON(! dev);
+
+ dev_dbg(p7spi_core_dev(core),
+ "\t%uB xfer wr[%p] rd[%p] @%uHz with cs=%d delay=%huusec\n",
+ xfer->len,
+ xfer->tx_buf,
+ xfer->rx_buf,
+ xfer->speed_hz,
+ xfer->cs_change,
+ xfer->delay_usecs);
+
+ /* All of this should be properly setup by the generic SPI layer. */
+ BUG_ON(! (xfer->tx_buf || xfer->rx_buf));
+ BUG_ON(xfer->tx_buf && (dev->flags & SPI_MASTER_NO_TX));
+ BUG_ON(xfer->rx_buf && (dev->flags & SPI_MASTER_NO_RX));
+ BUG_ON(! xfer->len);
+ if (msg->is_dma_mapped) {
+ BUG_ON((!! xfer->tx_buf) ^ (!! xfer->tx_dma));
+ BUG_ON((!! xfer->rx_buf) ^ (!! xfer->rx_dma));
+ }
+#endif
+
+ /* Setup SPI bus word width if requested on a per-transfer basis. */
+ xfer->bits_per_word = xfer->bits_per_word ? : 8;
+ if (xfer->bits_per_word != 8)
+ return -EINVAL;
+
+ /* Set internal core fields to handle current transfer rounds. */
+ core->tx_buff = xfer->tx_buf;
+ core->rx_buff = xfer->rx_buf;
+ core->bytes = (size_t) xfer->len;
+ core->stat = 0;
+
+ if (msg->is_dma_mapped && core->dma_chan) {
+ core->map_dma = false;
+ core->dma_addr = xfer->tx_dma ? : xfer->rx_dma;
+ }
+ else
+ core->map_dma = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_init_xfer);
+
+int p7spi_config_dma(struct p7spi_core* core)
+{
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ /*
+ * Implement full duplex using dmaengine device_prep_interleaved_dma method
+ * (pl330 driver does not support this interface yet).
+ */
+ BUG_ON(! ((!! core->rx_buff) ^ (!! core->tx_buff)));
+ BUG_ON(! core->dma_chan);
+#endif
+
+ if (core->tx_buff) {
+ core->dma_cfg.direction = DMA_MEM_TO_DEV;
+ core->dma_cfg.dst_addr = core->fifo_paddr;
+ }
+ else if (core->rx_buff) {
+ core->dma_cfg.direction = DMA_DEV_TO_MEM;
+ core->dma_cfg.src_addr = core->fifo_paddr;
+ }
+
+ err = dmaengine_slave_config(core->dma_chan, &core->dma_cfg);
+ if (err)
+ return err;
+
+ INIT_COMPLETION(core->done);
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_config_dma);
+
+ssize_t p7spi_map_dma(struct p7spi_core* core)
+{
+ char* const buff = core->tx_buff ? (char*) core->tx_buff : core->rx_buff;
+
+ core->dma_sz = min(rounddown(core->bytes, core->dma_min), core->sgm_sz);
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ /*
+ * No support for full duplex DMA yet.
+ * Implement full duplex using dmaengine device_prep_interleaved_dma
+ * method (pl330 driver does not support this interface yet).
+ */
+ BUG_ON(! ((!! core->rx_buff) ^ (!! core->tx_buff)));
+ BUG_ON(! core->dma_chan);
+ if (core->tx_buff) {
+ BUG_ON(core->dma_cfg.direction != DMA_MEM_TO_DEV);
+ BUG_ON(core->dma_cfg.dst_addr != core->fifo_paddr);
+ }
+ else {
+ BUG_ON(core->dma_cfg.direction != DMA_DEV_TO_MEM);
+ BUG_ON(core->dma_cfg.src_addr != core->fifo_paddr);
+ }
+ BUG_ON(! core->dma_sz);
+ BUG_ON(! IS_ALIGNED((unsigned long) buff, dma_get_cache_alignment()));
+ BUG_ON(! IS_ALIGNED(core->dma_sz, dma_get_cache_alignment()));
+ BUG_ON(core->dma_sz % core->dma_thres);
+#endif
+
+ if (core->map_dma) {
+ /*
+ * Buffers submitted to SPI layer MUST be DMA-safe, i.e., appropriate for
+ * dma_map_single usage ! (see Documentation/spi/spi-summary).
+ */
+ BUG_ON(! virt_addr_valid(buff));
+ BUG_ON(! virt_addr_valid(buff + core->dma_sz - 1));
+
+ /* Map buffer for DMA usage. */
+ core->dma_addr = dma_map_single(core->dma_chan->device->dev,
+ buff,
+ core->dma_sz,
+ core->dma_cfg.direction);
+ if (unlikely(dma_mapping_error(core->dma_chan->device->dev,
+ core->dma_addr)))
+ return -ENOBUFS;
+ }
+
+ return core->dma_sz;
+}
+EXPORT_SYMBOL(p7spi_map_dma);
+
+/* As of v3.4 dmaengin_prep_slave_single is useless... */
+inline struct dma_async_tx_descriptor*
+p7spi_dmaengine_prep_slave_single(struct dma_chan* chan,
+ dma_addr_t buf,
+ size_t len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags, NULL);
+}
+EXPORT_SYMBOL(p7spi_dmaengine_prep_slave_single);
+
+int p7spi_run_dma(struct p7spi_core* core, void (*complete)(void*))
+{
+ int err;
+ struct dma_async_tx_descriptor* const txd =
+ p7spi_dmaengine_prep_slave_single(core->dma_chan,
+ core->dma_addr,
+ core->dma_sz,
+ core->dma_cfg.direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK |
+ DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP);
+ if (! txd)
+ return -ENOMEM;
+
+ txd->callback = complete;
+ txd->callback_param = core;
+
+ core->dma_cook = dmaengine_submit(txd);
+ err = dma_submit_error(core->dma_cook);
+ if (err)
+ return err;
+
+ /*
+ * Update bytes counters ahead of time so that we don't need to manage them
+ * at DMA completion time.
+ */
+ core->bytes -= core->dma_sz;
+ if (core->tx_buff)
+ core->tx_buff += core->dma_sz;
+ if (core->rx_buff)
+ core->rx_buff += core->dma_sz;
+ if (! core->map_dma)
+ /*
+ * If buffer was already DMA mapped, update DMA bus address only
+ * once used by p7spi_dmaengine_prep_slave_single.
+ */
+ core->dma_addr += core->dma_sz;
+
+ dma_async_issue_pending(core->dma_chan);
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_run_dma);
+
+int p7spi_wait_dma(struct p7spi_core* core)
+{
+ int err = 0;
+
+ if (! wait_for_completion_timeout(&core->done, 15 * HZ))
+ err = -ETIMEDOUT;
+
+ if (! err)
+ err = p7spi_check_dma(core);
+
+ if (! err)
+ err = core->stat;
+
+ if (err)
+ p7spi_dump_regs(core, "dma segment failed");
+
+ return err;
+}
+EXPORT_SYMBOL(p7spi_wait_dma);
+
+int p7spi_setup_core(struct p7spi_core* core,
+ struct spi_device* device,
+ u32* ctrl_reg)
+{
+ struct p7spi_plat_data const* pdata;
+ struct p7spi_ctrl_data const* cdata;
+ ssize_t fifo_sz;
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ BUG_ON(! core);
+ BUG_ON(! device);
+ BUG_ON(! device->master);
+ BUG_ON(core->dma_cfg.src_addr_width != core->dma_cfg.dst_addr_width);
+#endif
+
+ pdata = (struct p7spi_plat_data const*) dev_get_platdata(p7spi_kern.dev);
+ cdata = (struct p7spi_ctrl_data const*) device->controller_data;
+ if (! (cdata && pdata)) {
+ dev_err(p7spi_core_dev(core),
+ "missing platform data for device %s\n",
+ dev_name(&device->dev));
+ return -EINVAL;
+ }
+
+ if (device->chip_select) {
+ dev_err(p7spi_core_dev(core),
+ "invalid slave select %d for device %s "
+ "(single device buses support only)\n",
+ device->chip_select,
+ dev_name(&device->dev));
+ return -EINVAL;
+ }
+
+ if (! device->bits_per_word)
+ /* Zero (the default) here means 8 bits */
+ device->bits_per_word = 8;
+ if (device->bits_per_word != 8) {
+ dev_err(p7spi_core_dev(core),
+ "unsupported word bit width %d for device %s",
+ device->bits_per_word,
+ dev_name(&device->dev));
+ return -EINVAL;
+ }
+
+ /* allocate some fifo */
+ fifo_sz = p7spi_alloc_fifo(p7spi_core_id(core), P7SPI_CORE_SPI,
+ cdata->fifo_wcnt);
+ if (fifo_sz < 0)
+ return fifo_sz;
+ core->fifo_sz = (size_t)fifo_sz * sizeof(u32);
+
+ /* Compute FIFO wakeup threshold in number of bytes. */
+ core->thres_sz = cdata->thres_wcnt * sizeof(u32);
+ if (core->thres_sz > (core->fifo_sz - sizeof(u32))) {
+ size_t const thres = core->fifo_sz / 2;
+
+ dev_warn(p7spi_core_dev(core),
+ "correcting invalid specified threshold (%u -> %u)",
+ core->thres_sz,
+ thres);
+ core->thres_sz = thres;
+ }
+
+ if (core->dma_chan) {
+ /* Compute DMA burst length and threshold. */
+ core->dma_cfg.src_maxburst = min_t(u32,
+ core->thres_sz /
+ core->dma_cfg.src_addr_width,
+ 16);
+ core->dma_cfg.dst_maxburst = core->dma_cfg.src_maxburst;
+ core->dma_thres = core->dma_cfg.src_maxburst *
+ core->dma_cfg.src_addr_width;
+
+ if (core->thres_sz != core->dma_thres)
+ dev_info(p7spi_core_dev(core),
+ "aligning DMA burst threshold (%u -> %u)",
+ core->thres_sz,
+ core->dma_thres);
+
+ core->dma_min = lcm(core->dma_thres, dma_get_cache_alignment());
+ /* Compute maximum segment size. */
+ core->sgm_sz = rounddown(P7SPI_INSTR_LEN_MASK, core->dma_min);
+ }
+ else
+ core->sgm_sz = rounddown(P7SPI_INSTR_LEN_MASK, core->thres_sz);
+
+ if (device->mode & SPI_CPHA)
+ *ctrl_reg |= P7SPI_CTRL_CPHA;
+ if (device->mode & SPI_CPOL)
+ *ctrl_reg |= P7SPI_CTRL_CPOL;
+ if (device->mode & SPI_LSB_FIRST)
+ *ctrl_reg |= P7SPI_CTRL_LSB;
+
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_setup_core);
+
+static int p7spi_enable_swb(struct p7spi_core const* core,
+ struct p7spi_swb* switchbox)
+{
+ struct p7spi_swb const* swb;
+ int err = 0;
+
+ swb = switchbox;
+ while (swb->pad != -1) {
+ err = p7spi_request_pad(core, swb->pad, swb->direction, swb->function);
+ if (err) {
+ struct p7spi_swb const* swb_rel = switchbox;
+ while (swb_rel != swb) {
+ p7spi_release_pad(core, swb_rel->pad, swb_rel->direction);
+ swb_rel++;
+ }
+
+ goto finish;
+ }
+ swb++;
+ }
+
+finish:
+ return err;
+}
+
+static void p7spi_release_swb(struct p7spi_core const* core,
+ struct p7spi_swb* switchbox)
+{
+ struct p7spi_swb const* swb;
+
+ swb = switchbox;
+ while (swb->pad != -1) {
+ p7spi_release_pad(core, swb->pad, swb->direction);
+ swb++;
+ }
+}
+
+void p7spi_enable_core(struct p7spi_core const* core,
+ struct spi_device const* device,
+ u32 ctrl_reg)
+{
+ size_t const wcnt = core->thres_sz / sizeof(u32);
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ __raw_writel(0, core->vaddr + P7SPI_ITEN);
+ __raw_writel(P7SPI_FIFORX_FLUSH | P7SPI_FIFOTX_FLUSH,
+ core->vaddr + P7SPI_FIFO_FLUSH);
+
+ __raw_writel((core->fifo_sz / sizeof(u32)) - wcnt,
+ core->vaddr + P7SPI_TH_TX);
+ __raw_writel(wcnt, core->vaddr + P7SPI_TH_RX);
+
+ writel(ctrl_reg, core->vaddr + P7SPI_CTRL);
+}
+EXPORT_SYMBOL(p7spi_enable_core);
+
+static void p7spi_cleanup_core(struct spi_device* device)
+{
+ struct p7spi_core const* const core = spi_master_get_devdata(device->master);
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ p7spi_free_fifo(p7spi_core_id(core), P7SPI_CORE_SPI);
+ __raw_writel(0, core->vaddr + P7SPI_CTRL);
+}
+
+static bool p7spi_filter(struct dma_chan* chan, void* param)
+{
+ if (chan->chan_id == (unsigned int) param)
+ return true;
+
+ return false;
+}
+
+static int p7spi_init_core(struct p7spi_core* core,
+ struct platform_device* pdev,
+ struct spi_master* ctrl,
+ struct p7spi_ops const* ops)
+{
+ int err;
+ struct resource const* res;
+ unsigned long irqfl = 0;
+ char stringify_core[2];
+
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! pdev);
+#endif
+
+ if (! p7spi_kern.dev)
+ return -ENODEV;
+
+ core->ctrl = ctrl;
+
+ core->irq = -1;
+ if (ops->handle_irq) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (res) {
+ core->irq = res->start;
+ if (res->flags & IORESOURCE_IRQ_SHAREABLE)
+ irqfl |= IRQF_SHARED;
+ }
+ else
+ dev_warn(&pdev->dev,
+ "failed to get core interrupt, "
+ "fall back to polling mode\n");
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (! res) {
+ dev_err(&pdev->dev, "failed to get core memory region\n");
+ return -ENXIO;
+ }
+
+ core->regs = request_mem_region(res->start,
+ resource_size(res),
+ dev_name(&pdev->dev));
+ if (! core->regs) {
+ dev_err(&pdev->dev,
+ "failed to request core memory region [%08x:%08x]\n",
+ res->start,
+ res->end);
+ return -EBUSY;
+ }
+
+ core->vaddr = (unsigned long) ioremap(res->start, resource_size(res));
+ if (! core->vaddr) {
+ dev_err(&pdev->dev,
+ "failed to remap core memory region [%08x:%08x]\n",
+ res->start,
+ res->end);
+ err = -ENXIO;
+ goto release;
+ }
+ core->fifo_paddr = res->start + P7SPI_DATA;
+
+ err = snprintf(stringify_core, 2, "%d", p7spi_core_id(core));
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to stringify core id\n");
+ goto release;
+ }
+
+ core->clk = clk_get_sys(stringify_core, P7SPI_DRV_NAME);
+ if (IS_ERR(core->clk)) {
+ err = PTR_ERR(core->clk);
+ dev_err(&pdev->dev,
+ "failed to get core clock\n");
+ goto release;
+ }
+
+ err = clk_prepare_enable(core->clk);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable core clock\n");
+ goto put;
+ }
+
+ if (core->irq >= 0) {
+ err = request_irq(core->irq,
+ ops->handle_irq,
+ irqfl,
+ dev_name(&pdev->dev),
+ core);
+ if (err) {
+ dev_warn(&pdev->dev,
+ "failed to request irq %d, fall back to polling mode (%d)\n",
+ core->irq,
+ err);
+ core->irq = -1;
+ }
+ }
+
+ core->dma_chan = NULL;
+ if (ops->dma) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ if (ops->dma_cyclic)
+ dma_cap_set(DMA_CYCLIC, mask);
+ core->dma_chan = dma_request_channel(mask,
+ p7spi_filter,
+ (void*) res->start);
+ if (! core->dma_chan)
+ dev_warn(&pdev->dev,
+ "failed to request DMA channel %d, "
+ "fall back to PIO mode\n",
+ res->start);
+
+ /*
+ * Setup DMA configuration fields remaining constant over controller
+ * life-cycle.
+ */
+ core->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ core->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ }
+ else
+ dev_warn(&pdev->dev,
+ "unspecified DMA channel, fall back to PIO mode\n");
+ }
+
+ init_completion(&core->done);
+
+ if (core->dma_chan)
+ dev_dbg(&pdev->dev,
+ "loaded core %d @0x%08x with irq=%d and dma channel %d\n",
+ pdev->id,
+ core->regs->start,
+ core->irq,
+ core->dma_chan->chan_id);
+ else
+ dev_dbg(&pdev->dev,
+ "loaded core %d @0x%08x with irq=%d in PIO mode\n",
+ pdev->id,
+ core->regs->start,
+ core->irq);
+ return 0;
+
+put:
+ clk_put(core->clk);
+release:
+ release_mem_region(core->regs->start, resource_size(core->regs));
+ return err;
+}
+
+static void p7spi_exit_core(struct p7spi_core* core)
+{
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ if (core->dma_chan)
+ dma_release_channel(core->dma_chan);
+
+ if (core->irq >= 0)
+ free_irq(core->irq, (void*) core);
+
+ __raw_writel(0, core->vaddr + P7SPI_CTRL);
+
+ clk_disable_unprepare(core->clk);
+ clk_put(core->clk);
+ iounmap((void*) core->vaddr);
+ release_mem_region(core->regs->start, resource_size(core->regs));
+}
+
+static int p7spi_prepare_xfer(struct spi_master* ctrl)
+{
+ pm_runtime_get_sync(p7spi_core_dev((struct p7spi_core*)
+ spi_master_get_devdata(ctrl))->parent);
+ return 0;
+}
+
+static int p7spi_unprepare_xfer(struct spi_master* ctrl)
+{
+ pm_runtime_put(p7spi_core_dev((struct p7spi_core*)
+ spi_master_get_devdata(ctrl))->parent);
+ return 0;
+}
+
+int _p7spi_create_ctrl(struct platform_device* pdev,
+ struct p7spi_ops const* ops,
+ size_t ctrl_sz)
+{
+ struct spi_master* ctrl;
+ struct p7spi_core* core;
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(! pdev);
+ BUG_ON(! ops);
+ BUG_ON(! ops->setup);
+ BUG_ON(! ops->xfer);
+ BUG_ON(! ctrl_sz);
+#endif
+
+ ctrl = spi_alloc_master(&pdev->dev, ctrl_sz);
+ if (! ctrl) {
+ dev_err(&pdev->dev, "failed to allocate controller memory\n");
+ return -ENOMEM;
+ }
+
+ ctrl->bus_num = pdev->id;
+ ctrl->num_chipselect = 1;
+ ctrl->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_3WIRE;
+ ctrl->setup = ops->setup;
+ ctrl->cleanup = p7spi_cleanup_core;
+ ctrl->prepare_transfer_hardware = p7spi_prepare_xfer;
+ ctrl->transfer_one_message = ops->xfer;
+ ctrl->unprepare_transfer_hardware = p7spi_unprepare_xfer;
+ ctrl->rt = ops->rt;
+
+ core = (struct p7spi_core*) spi_master_get_devdata(ctrl);
+ err = p7spi_init_core(core, pdev, ctrl, ops);
+ if (err)
+ goto put;
+
+ core->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(core->pctl)) {
+ dev_err(&pdev->dev, "failed to enable pins\n");
+ err = PTR_ERR(core->pctl);
+ goto exit;
+ }
+
+ err = p7spi_enable_swb(core,
+ (struct p7spi_swb*) pdev->dev.platform_data);
+ if (err)
+ goto put_pin;
+
+ platform_set_drvdata(pdev, core);
+ p7spi_init_core_dbgfs(core);
+
+ return 0;
+
+put_pin:
+ pinctrl_put(core->pctl);
+exit:
+ p7spi_exit_core(core);
+put:
+ spi_master_put(ctrl);
+ return err;
+}
+EXPORT_SYMBOL(_p7spi_create_ctrl);
+
+int p7spi_register(struct platform_device* pdev, struct p7spi_core *core)
+{
+ struct spi_master *ctrl = core->ctrl;
+
+ int err = spi_register_master(ctrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register controller.\n");
+ return err;
+ }
+
+ dev_info(&pdev->dev, "core %d ready\n", pdev->id);
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_register);
+
+int p7spi_abort_create(struct platform_device* pdev)
+{
+ struct p7spi_core* const core = platform_get_drvdata(pdev);
+
+ p7spi_exit_core_dbgfs(core);
+ p7spi_exit_core(core);
+ pinctrl_put(core->pctl);
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(core->ctrl);
+
+ dev_info(&pdev->dev, "core %d removed\n", pdev->id);
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_abort_create);
+
+int p7spi_destroy_ctrl(struct platform_device* pdev)
+{
+ struct p7spi_core* const core = platform_get_drvdata(pdev);
+
+#ifdef DEBUG
+ BUG_ON(! core);
+ BUG_ON(! core->ctrl);
+ BUG_ON(! spi_master_get_devdata(core->ctrl));
+#endif
+
+ p7spi_exit_core_dbgfs(core);
+
+ spi_unregister_master(core->ctrl);
+ p7spi_exit_core(core);
+ p7spi_release_swb(core, (struct p7spi_swb*) pdev->dev.platform_data);
+ pinctrl_put(core->pctl);
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(core->ctrl);
+
+ dev_info(&pdev->dev, "core %d removed\n", pdev->id);
+ return 0;
+}
+EXPORT_SYMBOL(p7spi_destroy_ctrl);
+
+static int __devinit p7spi_probe_kern(struct platform_device* pdev)
+{
+ int err, i __maybe_unused;
+ struct resource const* res;
+ char const* msg;
+ struct p7spi_plat_data const* const pdata = (struct p7spi_plat_data const*)
+ dev_get_platdata(&pdev->dev);
+
+#ifdef DEBUG
+ /*
+ * Check fifo size settings consistency for all cores.
+ */
+ BUG_ON(! pdata);
+ BUG_ON(! pdata->wcnt);
+ BUG_ON(! pdata->max_master_hz);
+ BUG_ON(! pdata->max_slave_hz);
+ BUG_ON(! pdata->min_hz);
+ BUG_ON(p7spi_kern.dev);
+#endif
+
+ p7spi_kern.num_cores = pdata->num_cores;
+ p7spi_kern.cores = kmemdup(pdata->cores,
+ pdata->num_cores * sizeof(enum p7spi_core_type),
+ GFP_KERNEL);
+ if (!p7spi_kern.cores) {
+ err = -ENOMEM;
+ msg = "no memory to store cores";
+ goto err;
+ }
+
+ p7spi_kern.fifo_wcnt = pdata->wcnt;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (! res) {
+ err = -ENXIO;
+ msg = "failed to get common memory region";
+ goto free_cores;
+ }
+
+ p7spi_kern.res = request_mem_region(res->start,
+ resource_size(res),
+ dev_name(&pdev->dev));
+ if (! p7spi_kern.res) {
+ err = -EBUSY;
+ msg = "failed to request common memory region";
+ goto free_cores;
+ }
+
+ p7spi_kern.vaddr = (unsigned long) ioremap(res->start,
+ resource_size(res));
+ if (! p7spi_kern.vaddr) {
+ err = -ENXIO;
+ msg = "failed to remap common memory region";
+ goto release;
+ }
+
+ p7spi_kern.clk = clk_get_sys("common", P7SPI_DRV_NAME);
+ if (IS_ERR(p7spi_kern.clk)) {
+ err = PTR_ERR(p7spi_kern.clk);
+ msg = "failed to get clock";
+ goto unmap;
+ }
+
+ err = clk_prepare_enable(p7spi_kern.clk);
+ if (err) {
+ msg = "failed to enable clock";
+ goto put;
+ }
+
+ p7spi_kern.swb = kzalloc(
+ pdata->num_pads * sizeof(struct p7swb_desc),
+ GFP_KERNEL);
+ if (!p7spi_kern.swb) {
+ err = -ENOMEM;
+ msg = "failed to allocate memory for switchbox";
+ goto unprepare;
+ }
+ p7spi_kern.swb_sz = pdata->num_pads;
+
+#ifdef CONFIG_PM_SLEEP
+ p7spi_kern.nb_swb_in = 0;
+ for (i = 0; i < p7spi_kern.num_cores; i++) {
+ p7spi_kern.nb_swb_in += p7swb_core_offset(p7spi_kern.cores[i]);
+ }
+
+ p7spi_kern.saved_swb_in = kmalloc(p7spi_kern.nb_swb_in, GFP_KERNEL);
+ p7spi_kern.saved_swb_out = kmalloc(p7spi_kern.swb_sz, GFP_KERNEL);
+
+ if (!p7spi_kern.saved_swb_in || !p7spi_kern.saved_swb_out) {
+ kfree(p7spi_kern.saved_swb_in);
+ kfree(p7spi_kern.saved_swb_out);
+ kfree(p7spi_kern.swb);
+
+ err = -ENOMEM;
+ msg = "failed to allocate memory for switchbox suspend";
+ goto unprepare;
+ }
+#endif
+
+ /* Mark all fifo as available */
+ __raw_writel(0, p7spi_kern.vaddr + P7SPI_MEM);
+ p7spi_kern.rev = pdata->rev;
+ p7spi_kern.dev = &pdev->dev;
+
+ dev_info(&pdev->dev, "Loaded SPI revision %d\n", p7spi_kern.rev);
+
+ p7spi_init_kern_dbgfs();
+ return 0;
+
+unprepare:
+ clk_disable_unprepare(p7spi_kern.clk);
+put:
+ clk_put(p7spi_kern.clk);
+unmap:
+ iounmap((void*) p7spi_kern.vaddr);
+release:
+ release_mem_region(res->start, resource_size(res));
+free_cores:
+ kfree(p7spi_kern.cores);
+err:
+ dev_err(&pdev->dev, "%s\n", msg);
+ return err;
+}
+
+static int __devexit p7spi_remove_kern(struct platform_device* pdev)
+{
+ p7spi_exit_kern_dbgfs();
+
+#ifdef CONFIG_PM_SLEEP
+ kfree(p7spi_kern.saved_swb_in);
+ kfree(p7spi_kern.saved_swb_out);
+#endif
+
+ kfree(p7spi_kern.swb);
+
+ clk_disable_unprepare(p7spi_kern.clk);
+ clk_put(p7spi_kern.clk);
+
+ iounmap((void*) p7spi_kern.vaddr);
+ release_mem_region(p7spi_kern.res->start, resource_size(p7spi_kern.res));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7spi_suspend(struct device *dev)
+{
+ int i, word_offset;
+
+ p7spi_kern.saved_fifo_sz = __raw_readl(p7spi_kern.vaddr + P7SPI_MEM);
+
+ for (i = 0; i < p7spi_kern.swb_sz; i++) {
+ word_offset = i * 4;
+ p7spi_kern.saved_swb_out[i] =
+ (u8) __raw_readl(p7spi_kern.vaddr + 0x100 + word_offset);
+ }
+ for (i = 0; i < p7spi_kern.nb_swb_in; i++) {
+ word_offset = i * 4;
+ p7spi_kern.saved_swb_in[i] =
+ (u8) __raw_readl(p7spi_kern.vaddr + 0x200 + word_offset);
+ }
+
+ clk_disable_unprepare(p7spi_kern.clk);
+
+ return 0;
+}
+
+static int p7spi_resume(struct device *dev)
+{
+ int i, word_offset;
+
+ clk_prepare_enable(p7spi_kern.clk);
+
+ __raw_writel(p7spi_kern.saved_fifo_sz,
+ p7spi_kern.vaddr + P7SPI_MEM);
+
+ for (i = 0; i < p7spi_kern.swb_sz; i++) {
+ word_offset = i * 4;
+ __raw_writel(p7spi_kern.saved_swb_out[i],
+ p7spi_kern.vaddr + 0x100 + word_offset);
+ }
+ for (i = 0; i < p7spi_kern.nb_swb_in; i++) {
+ word_offset = i * 4;
+ __raw_writel(p7spi_kern.saved_swb_in[i],
+ p7spi_kern.vaddr + 0x200 + word_offset);
+ }
+
+ return 0;
+}
+#else
+#define p7spi_suspend NULL
+#define p7spi_resume NULL
+#endif
+
+static struct dev_pm_ops p7spi_dev_pm_ops = {
+ .suspend = p7spi_suspend,
+ .resume = p7spi_resume,
+};
+
+static struct platform_driver p7spi_kern_driver = {
+ .driver = {
+ .name = P7SPI_DRV_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ .pm = &p7spi_dev_pm_ops,
+ },
+ .probe = p7spi_probe_kern,
+ .remove = __devexit_p(p7spi_remove_kern)
+};
+module_platform_driver(p7spi_kern_driver);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("Parrot7 SPI shared resources driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/spi/p7-spi.h b/drivers/parrot/spi/p7-spi.h
new file mode 100644
index 00000000000000..9d4c562d129af2
--- /dev/null
+++ b/drivers/parrot/spi/p7-spi.h
@@ -0,0 +1,139 @@
+/**
+ * linux/drivers/parrot/spi/p7-spi.h - Parrot7 SPI shared resources
+ * driver interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 04-Jul-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7_SPI_H
+#define _P7_SPI_H
+
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <spi/p7-spi_regs.h>
+#include "p7-swb.h"
+
+#define P7SPI_DRV_NAME "p7-spi"
+
+/**************
+ * IP revisions
+ **************/
+enum spi_revision {
+ SPI_REVISION_1 = 1,
+ SPI_REVISION_2 = 2,
+ SPI_REVISION_3 = 3,
+ SPI_REVISION_NR,
+};
+
+/***********
+ * Core type
+ ***********/
+enum p7spi_core_type {
+ P7SPI_CORE_SPI,
+ P7SPI_CORE_MPEG,
+};
+
+
+/****************
+ * Transfer mode
+ ****************/
+
+enum p7spi_xfer_mode {
+ P7SPI_SINGLE_XFER = 0,
+ P7SPI_DUAL_XFER = 1,
+ P7SPI_QUAD_XFER = 3
+};
+
+/*************************************************************
+ * Internal multiplexing handling: switchbox and pads control
+ *************************************************************/
+
+struct p7spi_swb {
+ int pad;
+ enum p7swb_direction direction;
+ enum p7swb_function function;
+};
+
+#define P7SPI_INIT_SWB(_pad, _dir, _func) \
+ { .pad = _pad, .direction = _dir, .function = _func }
+#define P7SPI_SWB_LAST \
+ { .pad = - 1 }
+
+/**
+ * struct p7spi_ctrl_data - Parrot7 SPI controller per device platform
+ * settings
+ * @half_duplex: self explanatory
+ * @read: controller can receive data
+ * @write: controller can send data
+ * @xfer_mode: transfer mode (one of %P7SPI_SINGLE_XFER,
+ * %P7SPI_DUAL_XFER or %P7SPI_QUAD_XFER)
+ * @swb: switchbox settings (see &struct p7spi_swb)
+ * @thres_wcnt: FIFO threshold in number of 32 bits words
+ * @tsetup_ss_ns: slave select setup time (nanoseconds)
+ * @thold_ss_ns: slave select hold time (nanoseconds)
+ * @toffclk_ns: idle time between two bytes when the slave select is
+ * maintained low (nanoseconds)
+ * @toffspi_ns: idle time between two bytes when the slave select is
+ * maintained high (nanoseconds)
+ * @tcapture_delay_ns: delay between the capture edge of SPI clock sent to the
+ * slave and the capture the incoming data (nanoseconds)
+ */
+struct p7spi_ctrl_data {
+ bool half_duplex;
+ bool read;
+ bool write;
+ enum p7spi_xfer_mode xfer_mode;
+ struct p7spi_swb const* swb;
+ size_t fifo_wcnt;
+ size_t thres_wcnt;
+ u32 tsetup_ss_ns;
+ u32 thold_ss_ns;
+ u32 toffclk_ns;
+ u32 toffspi_ns;
+ u32 tcapture_delay_ns;
+};
+
+
+/*
+ * struct p7spis_ctrl_data - Parrot7 SPI controller for slave devices
+ *
+ * @common: common settings for slave and master
+ * @circ_buf_period: circular buffer period length
+ * @periods: number of periods
+ */
+struct p7spis_ctrl_data {
+ struct p7spi_ctrl_data common;
+ size_t circ_buf_period;
+ u32 periods;
+};
+
+/***************************
+ * Internal FIFOs handling.
+ ***************************/
+
+struct p7spi_plat_data {
+ unsigned int wcnt;
+ unsigned int max_master_hz;
+ unsigned int max_slave_hz;
+ unsigned int min_hz;
+ unsigned int rev;
+
+ /* variables to handle switchbox */
+ unsigned int num_pads;
+ enum p7spi_core_type* cores;
+ unsigned int num_cores;
+};
+
+#define P7SPI_MEM_WCNT(_core, _cnt) \
+ (P7SPI_MEM_ ## _cnt ## W << (_core * P7SPI_MEM_BITLEN))
+
+
+#endif
diff --git a/drivers/parrot/spi/p7-spi_priv.h b/drivers/parrot/spi/p7-spi_priv.h
new file mode 100644
index 00000000000000..b61e9430c030da
--- /dev/null
+++ b/drivers/parrot/spi/p7-spi_priv.h
@@ -0,0 +1,330 @@
+/**
+ *
+ * @file p7-spi_int.h
+ *
+ * @brief
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 26-Jul-2012
+ *
+ * $Id:$
+ *
+ * Copyright (C) 2008 Parrot S.A.
+ */
+
+#ifndef _P7_SPI_PRIV_H
+#define _P7_SPI_PRIV_H
+
+#if ! defined(CONFIG_SPI_PARROT7) && ! defined(CONFIG_SPI_PARROT7_MODULE)
+#error Cannot build disabled Parrot7 SPI driver !
+#endif
+
+#include <linux/debugfs.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/dma-mapping.h>
+#include "p7-spi_regs.h"
+
+/**********************
+ * SPI kernel handling
+ **********************/
+
+struct clk;
+struct resource;
+
+struct p7spi_kernel {
+ struct mutex lck;
+ unsigned long vaddr;
+ struct device* dev;
+ struct resource* res;
+ struct clk* clk;
+ unsigned int rev;
+ unsigned int fifo_wcnt;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry* dbg_dir;
+ struct dentry* dbg_file;
+ struct debugfs_regset32 dbg_regs;
+#endif
+ enum p7spi_core_type* cores;
+ unsigned int num_cores;
+ struct p7swb_desc* swb;
+ unsigned int swb_sz;
+#ifdef CONFIG_PM_SLEEP
+ u32 saved_fifo_sz;
+ unsigned int nb_swb_in;
+ u8 *saved_swb_in;
+ u8 *saved_swb_out;
+#endif
+};
+
+extern struct p7spi_kernel p7spi_kern;
+
+/*****************
+ * Cores handling
+ *****************/
+
+struct dma_chan;
+struct device;
+struct platform_device;
+struct p7spi_swb;
+
+/*
+ * Reserve enough scatter-gather entries to cope with the maximum
+ * transfer size a single instruction may handle. Each entry will
+ * be mapped onto one physical memory page on demand.
+ */
+#define P7SPI_SGNR_MAX (DIV_ROUND_UP(P7SPI_INSTR_LEN_MASK, PAGE_SIZE) + 1)
+
+struct p7spi_core {
+ /* Number of completed bytes for current transfer */
+ size_t bytes;
+ /* Transmit buffer for current transfer */
+ char const* tx_buff;
+ /* Receive buffer for current transfer */
+ char* rx_buff;
+ unsigned long vaddr;
+ /* Message completion token. */
+ struct completion done;
+ unsigned long fifo_paddr;
+ struct dma_chan* dma_chan;
+ bool map_dma;
+ struct dma_slave_config dma_cfg;
+ dma_cookie_t dma_cook;
+ dma_addr_t dma_addr;
+ size_t dma_sz;
+ size_t dma_thres;
+ size_t dma_min;
+ size_t fifo_sz;
+ /* Transfer bytes threshold */
+ size_t thres_sz;
+ /* Maximum number of bytes that can be process by a single segment. */
+ size_t sgm_sz;
+ /* Current transfer hardware status */
+ int stat;
+ struct spi_master* ctrl;
+ struct clk* clk;
+ struct pinctrl* pctl;
+ struct resource const* regs;
+ int irq;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry* dbg_file;
+ struct debugfs_regset32 dbg_regs;
+#endif
+};
+
+static inline int p7spi_core_id(struct p7spi_core const* core)
+{
+#ifdef DEBUG
+ BUG_ON(! core->ctrl);
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ return (int) core->ctrl->bus_num;
+}
+
+static inline struct device* p7spi_core_dev(struct p7spi_core const* core)
+{
+#ifdef DEBUG
+ BUG_ON(! core->ctrl);
+ BUG_ON(! p7spi_kern.dev);
+#endif
+
+ return &core->ctrl->dev;
+}
+
+extern void p7spi_readb_fifo(struct p7spi_core*, size_t);
+extern void p7spi_writeb_fifo(struct p7spi_core*, size_t);
+extern void p7spi_readl_fifo(struct p7spi_core*, size_t);
+extern void p7spi_writel_fifo(struct p7spi_core*, size_t);
+extern void p7spi_read_fifo(struct p7spi_core*, size_t);
+extern void p7spi_write_fifo(struct p7spi_core*, size_t);
+
+#if 0
+extern ssize_t p7spi_start_dma(struct p7spi_core*,
+ struct dma_slave_config*,
+ dma_cookie_t*,
+ void (*)(void*));
+
+extern int p7spi_wait_dma(struct p7spi_core*,
+ struct dma_slave_config const*,
+ dma_cookie_t,
+ int (*check_stat)(struct p7spi_core const*, u32),
+ size_t);
+#endif
+
+extern void p7spi_enable_core(struct p7spi_core const*,
+ struct spi_device const*,
+ u32 ctrl_reg);
+
+extern int p7spi_setup_core(struct p7spi_core*,
+ struct spi_device*,
+ u32*);
+
+struct p7spi_ops {
+ int (*setup)(struct spi_device*);
+ int (*xfer)(struct spi_master*, struct spi_message*);
+ irq_handler_t handle_irq;
+ bool rt;
+ bool dma;
+ bool dma_cyclic;
+};
+
+extern int _p7spi_create_ctrl(struct platform_device*,
+ struct p7spi_ops const*,
+ size_t);
+
+#define p7spi_create_ctrl(_pdev, _ops, _type) \
+ _p7spi_create_ctrl(_pdev, \
+ _ops, \
+ sizeof(_type))
+
+extern int p7spi_register(struct platform_device*,
+ struct p7spi_core *);
+
+extern int p7spi_abort_create(struct platform_device* pdev);
+
+extern int p7spi_destroy_ctrl(struct platform_device*);
+
+#ifdef DEBUG
+
+/* All of this should be properly setup by the generic SPI layer. */
+#define P7SPI_ASSERT_MSG(_msg, _ctrl, _spi, _dev) \
+ BUG_ON(! p7spi_kern.dev); \
+ BUG_ON(! _msg); \
+ BUG_ON(! _ctrl); \
+ BUG_ON(! _spi); \
+ BUG_ON(! _dev); \
+ BUG_ON(! (_dev)->master); \
+ BUG_ON((_dev)->master != _spi); \
+ BUG_ON((_dev)->chip_select); \
+ BUG_ON((_dev)->mode & ~(_spi)->mode_bits); \
+ BUG_ON((_dev)->bits_per_word != 8); \
+ BUG_ON(((_spi)->flags & SPI_MASTER_NO_RX) && \
+ ((_spi)->flags & SPI_MASTER_NO_TX)); \
+ BUG_ON((_msg)->actual_length); \
+ BUG_ON(list_empty(&(_msg)->transfers)); \
+ dev_dbg(p7spi_core_dev(&(_ctrl)->core), \
+ "%smsg xfer requested for device %s\n", \
+ (_msg)->is_dma_mapped ? \
+ "DMA mapped " : "", \
+ dev_name(&(_dev)->dev))
+
+
+extern void p7spi_dump_regs(struct p7spi_core const*, char const*);
+
+#else
+
+#define P7SPI_ASSERT_MSG(_msg, _ctrl, _spi, _dev)
+#define p7spi_dump_regs(_core, _msg)
+
+#endif
+
+struct spi_transfer;
+struct spi_message;
+struct spi_master;
+
+extern int p7spi_init_xfer(struct p7spi_core*,
+ struct spi_transfer*,
+ struct spi_message const* __attribute__((unused)),
+ struct spi_master const* __attribute__((unused)));
+
+extern int p7spi_data_ready(struct p7spi_core const*, u32);
+
+extern int p7spi_poll_stat(struct p7spi_core const*,
+ int (*check_stat)(struct p7spi_core const*, u32),
+ unsigned long);
+
+extern void p7spi_process_round(struct p7spi_core*);
+
+static inline bool p7spi_has_dma(struct p7spi_core const* core)
+{
+ return core->dma_chan;
+}
+
+static inline bool p7spi_has_irq(struct p7spi_core const* core)
+{
+ return core->irq >= 0;
+}
+
+int p7spi_core_index(int core_id, enum p7spi_core_type type);
+extern ssize_t p7spi_alloc_fifo(int, enum p7spi_core_type, size_t);
+extern void p7spi_free_fifo(int core_id, enum p7spi_core_type type);
+
+static inline char const* p7spi_mode_name(enum p7spi_xfer_mode mode)
+{
+ switch (mode) {
+ case P7SPI_SINGLE_XFER:
+ return "single";
+ case P7SPI_DUAL_XFER:
+ return "dual";
+ case P7SPI_QUAD_XFER:
+ return "quad";
+ }
+
+#ifdef DEBUG
+ BUG();
+#endif
+ return NULL;
+}
+
+/*************************
+ * DMA related operations
+ *************************/
+
+extern int p7spi_config_dma(struct p7spi_core*);
+extern int p7spi_map_dma(struct p7spi_core*);
+extern int p7spi_run_dma(struct p7spi_core*, void (*complete)(void*));
+extern int p7spi_wait_dma(struct p7spi_core*);
+extern struct dma_async_tx_descriptor*
+p7spi_dmaengine_prep_slave_single(struct dma_chan* chan,
+ dma_addr_t buf,
+ size_t len,
+ enum dma_transfer_direction dir,
+ unsigned long flags);
+
+static inline void p7spi_unmap_dma(struct p7spi_core* core)
+{
+ if (core->map_dma)
+ dma_unmap_single(core->dma_chan->device->dev,
+ core->dma_addr,
+ core->dma_sz,
+ core->dma_cfg.direction);
+}
+
+static inline int p7spi_check_dma(struct p7spi_core* core)
+{
+ if (likely(dma_async_is_tx_complete(core->dma_chan,
+ core->dma_cook,
+ NULL,
+ NULL) == DMA_SUCCESS))
+ return 0;
+ return -EBADFD;
+}
+
+static inline void p7spi_cancel_dma(struct p7spi_core* core)
+{
+ dmaengine_terminate_all(core->dma_chan);
+}
+
+static inline void p7spi_cook_dmaif(struct p7spi_core const* core)
+{
+ /*
+ * Setup wakeup threshold to synchronize SPI and DMA controller transfers.
+ */
+ __raw_writel(core->dma_thres / sizeof(u32),
+ core->vaddr + P7SPI_TH_RX);
+ __raw_writel((core->fifo_sz - core->dma_thres) / sizeof(u32),
+ core->vaddr + P7SPI_TH_TX);
+}
+
+static inline void p7spi_uncook_dmaif(struct p7spi_core const* core)
+{
+ /*
+ * Setup wakeup threshold for interrupt / polling mode.
+ */
+ __raw_writel(core->thres_sz / sizeof(u32),
+ core->vaddr + P7SPI_TH_RX);
+ __raw_writel((core->fifo_sz - core->thres_sz) / sizeof(u32),
+ core->vaddr + P7SPI_TH_TX);
+}
+
+#endif
diff --git a/drivers/parrot/spi/p7-spi_regs.h b/drivers/parrot/spi/p7-spi_regs.h
new file mode 100644
index 00000000000000..26e0b7e132f7ca
--- /dev/null
+++ b/drivers/parrot/spi/p7-spi_regs.h
@@ -0,0 +1,188 @@
+#ifndef __P7_SPIREGS_H
+#define __P7_SPIREGS_H
+
+#define P7SPI_SPEED_MAXDIV_R1 (0x03ffU) /* 10 bits on R1 */
+#define P7SPI_SPEED_MAXDIV_R23 (0xffffU) /* 16 bits on R2 and R3 */
+
+#define P7SPI_TSETUP_SHIFT (0)
+#define P7SPI_THOLD_SHIFT (4)
+#define P7SPI_TOFFCLK_SHIFT (16)
+#define P7SPI_TOFFSPI_SHIFT (20)
+#define P7SPI_TCAPTURE_DELAY_SHIFT (24)
+#define P7SPI_TCAPTURE_DELAY_MIN 0x3U
+
+#define P7SPI_CORE_OFFSET(spi_id) (0x1000*(spi_id))
+#define P7SPI_COMMON_OFFSET 0xF000
+#define P7SPI_COMMON_LEN (0x25C + 1)
+
+#define P7SPI_CTRL 0x000 // SPI Control Register
+#define P7SPI_SPEED 0x004 // SPI Speed Register
+#define P7SPI_TIMING 0x008 // SPI Timing Register
+#define P7SPI_STATUS 0x00C // SPI Status Register
+#define P7SPI_ITEN 0x010 // SPI Interrupt Enable Register
+#define P7SPI_ITACK 0x014 // SPI Interrupt Acknowledge Register
+#define P7SPI_TH_RX 0x018 // SPI FIFO RX Threshold Register
+#define P7SPI_TH_TX 0x01C // SPI FIFO TX threshold Register
+#define P7SPI_TH_RXCNT 0x020 // SPI FIFO RX Threshold Bytes Counter Register
+#define P7SPI_TH_TXCNT 0x024 // SPI FIFO TX Threshold Bytes Counter Register
+#define P7SPI_RXCNT 0x028 // SPI FIFO RX Threshold Bytes Counter Register
+#define P7SPI_TXCNT 0x02C // SPI FIFO TX Threshold Bytes Counter Register
+#define P7SPI_RX_VALID_BYTES 0x030 // Number of valid bytes in a word read from rx fifo
+#define P7SPI_FIFO_FLUSH 0x034 // Write-only register for rx/tx fifo flush
+#define P7SPI_FIFO_RXLVL 0x038
+#define P7SPI_FIFO_TXLVL 0x03c
+#define P7SPI_INSTR 0x600 // SPI Instruction Registers (first address) 0x030 - 0x5FC
+#define P7SPI_DATA 0x800 // SPI Data Registers (first address) 0x610 - 0x7FC
+
+#define P7SPI_TH_RX_MAX 0xFF
+#define P7SPI_CNT_MAX (0xFFFFU)
+
+/* FIFO Flush register SPI_FIFO_FLUSH */
+#define P7SPI_FIFORX_FLUSH (1 << 0)
+#define P7SPI_FIFOTX_FLUSH (1 << 1)
+#define P7SPI_DMA_FLUSH (1 << 2)
+#define P7SPI_FIFO_FLUSH_MASK 0x7
+
+// Status register
+#define P7SPI_STATUS_END_TRANSM (1 << 0) // End of transmission
+#define P7SPI_STATUS_INSTR_FULL (1 << 1) //
+#define P7SPI_STATUS_SPI_BUSY (1 << 4) // SPI is busy
+#define P7SPI_STATUS_RXEMPTY (1 << 8) // Rx FIFO is empty
+#define P7SPI_STATUS_RXFULL (1 << 9) // Rx FIFO is full
+#define P7SPI_STATUS_RX_TH_REACHED (1 << 10) // Rx FIFO level is high threshold
+#define P7SPI_STATUS_RXCNT_TH_REACHED (1 << 11) // Rx FIFO byte counter if higher than treshold
+#define P7SPI_STATUS_RXACCESS_ERROR (1 << 12) // Rx access error
+#define P7SPI_STATUS_TXEMPTY (1 << 16) // Tx FIFO is empty
+#define P7SPI_STATUS_TXFULL (1 << 17) // Tx FIFO is full
+#define P7SPI_STATUS_TX_TH_REACHED (1 << 18) // Tx FIFO level is high threshold
+#define P7SPI_STATUS_TXCNT_TH_REACHED (1 << 19) // Tx FIFO byte counter if higher than treshold
+#define P7SPI_STATUS_TXACCESS_ERROR (1 << 20) // Tx access error
+
+#define P7SPI_ITACK_RXCNT_ACK (1 << 11)
+#define P7SPI_ITACK_TXCNT_ACK (1 << 19)
+
+#define P7SPI_INSTR_SS_END_INSTR_HIGH (1 << 21)
+#define P7SPI_INSTR_SS_END_INSTR_LOW (0 << 21)
+#define P7SPI_INSTR_SPI_TMODE_SHIFT 22
+#define P7SPI_INSTR_SPI_TMODE_SINGLE (0 << P7SPI_INSTR_SPI_TMODE_SHIFT)
+#define P7SPI_INSTR_SPI_TMODE_DUAL (1 << P7SPI_INSTR_SPI_TMODE_SHIFT)
+#define P7SPI_INSTR_SPI_TMODE_QUAD (3 << P7SPI_INSTR_SPI_TMODE_SHIFT)
+#define P7SPI_INSTR_SPI_TMODE_MASK (3 << P7SPI_INSTR_SPI_TMODE_SHIFT)
+#define P7SPI_INSTR_DMAE_ENABLE (1 << 24)
+#define P7SPI_INSTR_DMAE_DISABLE (0 << 24)
+#define P7SPI_INSTR_SS_END_BYTE_HIGH (1 << 29)
+#define P7SPI_INSTR_SS_END_BYTE_LOW (0 << 29)
+#define P7SPI_INSTR_RW_CLK_ONLY (0 << 30)
+#define P7SPI_INSTR_RW_WRITE (1 << 30)
+#define P7SPI_INSTR_RW_READ (2 << 30)
+#define P7SPI_INSTR_RW_FULL_DUPLEX (3 << 30)
+#define P7SPI_INSTR_LEN_MASK (0xFFFFU)
+
+// Control register
+#define P7SPI_CTRL_ENABLE (1 << 0) // SPI Enabled
+#define P7SPI_CTRL_DISABLE (0 << 0) // SPI Disabled
+#define P7SPI_CTRL_CPHA (1 << 1) // Clock phase selection
+#define P7SPI_CTRL_CPHAn (0 << 1) // Clock phase selection
+#define P7SPI_CTRL_CPOL (1 << 2) // Clock polarity selection
+#define P7SPI_CTRL_CPOLn (0 << 2) // Clock polarity selection
+#define P7SPI_CTRL_MSTR (1 << 3) // Master / Slave selection
+#define P7SPI_CTRL_SLAVE (0 << 3) // Master / Slave selection
+#define P7SPI_CTRL_LSB (1 << 4) // Enable LSB first TX/RX data
+#define P7SPI_CTRL_MSB (0 << 4) // Enable MSB first TX/RX data
+#define P7SPI_CTRL_BITMSB (1 << 5) // LSB goes to MSB data pin (dual/quad modes only)
+#define P7SPI_CTRL_BITLSB (0 << 5) // LSB goes to LSB data pin (dual/quad modes only)
+#define P7SPI_CTRL_RWS_NOP (0 << 8)
+#define P7SPI_CTRL_RWS_WO (1 << 8)
+#define P7SPI_CTRL_RWS_RO (2 << 8)
+#define P7SPI_CTRL_RWS_RW (3 << 8)
+#define P7SPI_CTRL_SMODE_SHIFT 10
+#define P7SPI_CTRL_SMODE_SINGLE (0 << 10)
+#define P7SPI_CTRL_SMODE_DUAL (1 << 10)
+#define P7SPI_CTRL_SMODE_QUAD (3 << 10)
+#define P7SPI_CTRL_SSIGNS_IGNORED (1 << 12)
+#define P7SPI_CTRL_SSIGNS_NORMAL (0 << 12)
+#define P7SPI_CTRL_DMAES_ENABLED (1 << 13)
+#define P7SPI_CTRL_RX_FREE_RUNS (1 << 14)
+#define P7SPI_CTRL_TX_FREE_RUNS (1 << 15)
+#define P7SPI_CTRL_DMAES_DISABLED (0 << 13)
+#define P7SPI_CTRL_MISOSEL_DATA0 (0 << 16)
+#define P7SPI_CTRL_MISOSEL_DATA1 (1 << 16)
+#define P7SPI_CTRL_MISOSEL_DATA2 (2 << 16)
+#define P7SPI_CTRL_MISOSEL_DATA3 (3 << 16)
+#define P7SPI_CTRL_MOSISEL_DATA0 (0 << 18)
+#define P7SPI_CTRL_MOSISEL_DATA1 (1 << 18)
+#define P7SPI_CTRL_MOSISEL_DATA2 (2 << 18)
+#define P7SPI_CTRL_MOSISEL_DATA3 (3 << 18)
+
+#define P7SPI_ITEN_END_TRANSM (1 << 0) // Enable bit for IT: End of transmission
+#define P7SPI_ITEN_RX_FULL (1 << 9) // Enable bit for IT: End of transmission
+#define P7SPI_ITEN_RX_TH_REACHED (1 << 10) // Enable bit for IT: Rx FIFO level is high threshold
+#define P7SPI_ITEN_RXCNT_TH_REACHED (1 << 11) // Enable bit for IT: Rx FIFO byte counter if higher than treshold
+#define P7SPI_ITEN_RXACCESS_ERROR (1 << 12) // Enable bit for IT: Rx access error
+#define P7SPI_ITEN_TXEMPTY (1 << 16) // Enable bit for IT: Tx FIFO is empty
+#define P7SPI_ITEN_TX_TH_REACHED (1 << 18) // Enable bit for IT: Tx FIFO level is high threshold
+#define P7SPI_ITEN_TXCNT_TH_REACHED (1 << 19) // Enable bit for IT: Tx FIFO byte counter if higher than treshold
+#define P7SPI_ITEN_TXACCESS_ERROR (1 << 20) // Enable bit for IT: Tx access error
+
+/*************************************************************
+ * Internal multiplexing handling: switchbox and pads control
+ *************************************************************/
+
+/*
+ * Output pad registers
+ * _pad: output pad number (0 - 15)
+ */
+#define P7SPI_SWB_OPAD(_pad) (0x100UL + (4UL * (_pad)))
+
+/*
+ * Output pad functions
+ * _core: core id (0 - 3)
+ */
+#define P7SPI_SWB_OPAD_CLK(_core) (((_core)*6) + 0)
+#define P7SPI_SWB_OPAD_SS(_core) (((_core)*6) + 1)
+#define P7SPI_SWB_OPAD_DATA0(_core) (((_core)*6) + 2)
+#define P7SPI_SWB_OPAD_DATA1(_core) (((_core)*6) + 3)
+#define P7SPI_SWB_OPAD_DATA2(_core) (((_core)*6) + 4)
+#define P7SPI_SWB_OPAD_DATA3(_core) (((_core)*6) + 5)
+#define P7SPI_SWB_OPAD_LOW 0x18
+#define P7SPI_SWB_OPAD_HIGH 0x19
+#define P7SPI_SWB_OPAD_Z 0x1a
+
+#define P7SPI_SWB_IPAD_BASE (0x200UL)
+
+/*
+ * Input pad registers
+ * _core: spi core id (0 - 3)
+ */
+#define P7SPI_SWB_IPAD_CLK(_core) (0x200UL + (24UL * (_core)))
+#define P7SPI_SWB_IPAD_SS(_core) (0x204UL + (24UL * (_core)))
+#define P7SPI_SWB_IPAD_DATA0(_core) (0x208UL + (24UL * (_core)))
+#define P7SPI_SWB_IPAD_DATA1(_core) (0x20CUL + (24UL * (_core)))
+#define P7SPI_SWB_IPAD_DATA2(_core) (0x210UL + (24UL * (_core)))
+#define P7SPI_SWB_IPAD_DATA3(_core) (0x214UL + (24UL * (_core)))
+
+/*
+ * Input pad functions
+ * _pad: input pad number (0 - 15)
+ */
+#define P7SPI_SWB_IPAD(_pad) (_pad)
+#define P7SPI_SWB_IPAD_LOW 0x10
+#define P7SPI_SWB_IPAD_HIGH 0x11
+
+/***************************
+ * Internal FIFOs handling.
+ ***************************/
+
+#define P7SPI_MEM (0x000UL)
+
+#define P7SPI_MEM_0W (0U)
+#define P7SPI_MEM_16W (1U)
+#define P7SPI_MEM_32W (2U)
+#define P7SPI_MEM_64W (3U)
+#define P7SPI_MEM_MASK (0x3U)
+#define P7SPI_MEM_BITLEN (2U)
+#define P7SPI_MEM_WCNT_MAX_R1 (0xffU)
+#define P7SPI_MEM_WCNT_MAX_R23 (0xfffU)
+#define P7SPI_MEM_MAX_WORDS 128
+
+#endif
diff --git a/drivers/parrot/spi/p7-spim.c b/drivers/parrot/spi/p7-spim.c
new file mode 100644
index 00000000000000..10eaf9f7c4da58
--- /dev/null
+++ b/drivers/parrot/spi/p7-spim.c
@@ -0,0 +1,1370 @@
+/**
+ * linux/drivers/parrot/spi/p7-spim.c - Parrot7 SPI master controller driver
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author: alvaro.moran@parrot.com
+ * @author: Gregor Boirie <gregor.boirie@parrot.com>
+ * @date: 2012-01-09
+ *
+ * This file is released under the GPL
+ *
+ * TODO:
+ * - remove unneeded timeout while polling controller status,
+ * - clean up debug / log / BUG_ON mess,
+ * - refactor with slave driver common parts,
+ * - document anatomy of a transfer (message / xfer / segments /rounds) in
+ * DMA, interrupt and polling modes,
+ * - implement full support for full duplex in PIO & DMA modes,
+ * - implement MPW2 instructions queueing ahead of time when possible,
+ * - review.
+ *
+ * Notes:
+ * ------
+ * In MPW1, FIFO TX Threshold interrupt is not generated if there is enough
+ * data in FIFO to finish the current instruction. That's why we push
+ * instructions only when we want to (en/de)queue more bytes than what was
+ * previously instructed.
+ * This forces us to maintain a count of bytes remaining to order for the
+ * current transfer (see ctrl->order). This SHOULD be fixed in MPW2.
+ *
+ * Currently, full duplex transfers are partially implemented: full duplex will
+ * work only for tx & rx buffers start @ aligned on the same boundary. Moreover,
+ * full duplex support in DMA mode cannot be implemented untill pl330 driver
+ * supports interleaved mode.
+ *
+ * In MPW1, if a given instruction/frame has an unaligned number of bytes
+ * (not multiple of 4) to be received, the last 32-bit word in the RX FIFO
+ * will be merged with the byte(s) of the next instruction/frame.
+ * This prevents us from queueing instructions ahead of time and introduces
+ * sub-optimal wait states between aligned and unaligned transfers (where we
+ * have to wait for all instructions completion before firing up a new round).
+ * This MUST be fixed in MPW2.
+ *
+ * MPW1 does not provide software with a way to flush instruction FIFOs. This
+ * forces driver to poll for all instructions completion when flushing (see
+ * p7spim_flush). This MUST be fixed in MPW2.
+ *
+ * MPW1 does not provide dsoftware with a way to de-select a slave peripheral
+ * without toggling clock, MISO or MOSI pins (see p7spim_flush).
+ * This SHOULD be fixed for MPW2.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/dmaengine.h>
+#include <asm/io.h>
+#include "p7-spi.h"
+#include "p7-spim.h"
+#include "p7-spi_priv.h"
+
+static bool noirq = false;
+module_param(noirq, bool, 0644);
+MODULE_PARM_DESC(noirq, "do not use interrupt handling");
+
+static bool nodma = false;
+module_param(nodma, bool, 0644);
+MODULE_PARM_DESC(nodma, "do not use dma operations");
+
+/* In P7 R1 and R2 the way to release the SS to high is different,
+ * we setup at init.*/
+static u32 p7spim_SSEND = 0;
+
+/*
+ * Master core instance internal state
+ */
+struct p7spim_core {
+ /* Common SPI core instance */
+ struct p7spi_core core;
+ /* Number of bytes left to instruct for current transfer */
+ size_t order;
+ /* State of slave select when transfers ends */
+ bool drop_ss;
+ /* Hardware mode control register word for current device. */
+ uint32_t mode;
+ /* Core input clock rate in Hz */
+ unsigned int base_hz;
+ /* Controller / core / master current SPI clock rate in Hz */
+ unsigned int curr_hz;
+#ifdef CONFIG_PM_SLEEP
+ /* Saved control register accross suspend */
+ u32 saved_ctrl;
+#endif
+};
+
+/*
+ * Return hardware divisor to reach SPI bus frequency passed in argument.
+ *
+ * Bus clock frequency is computed using the following formula:
+ * bus freq = parent clock freq / ((divisor + 1) — 2)
+ */
+static u32 p7spim_freq_div(struct p7spim_core const* ctrl, unsigned int hz)
+{
+ u32 const div = DIV_ROUND_UP(ctrl->base_hz, 2 * hz);
+ u32 speed_max_div;
+
+ if (p7spi_kern.rev == SPI_REVISION_1)
+ speed_max_div = P7SPI_SPEED_MAXDIV_R1;
+ else
+ speed_max_div = P7SPI_SPEED_MAXDIV_R23;
+
+ if ((div - 1) & ~speed_max_div)
+ dev_dbg(p7spi_core_dev(&ctrl->core),
+ "unreacheable clock frequency requested (%uHz)\n",
+ hz);
+
+ return (div - 1) & speed_max_div;
+}
+
+/*
+ * Express delay passed in argument as number of bus clock ticks using the
+ * specified bus clock period (in nano seconds).
+ * Number of tick will be rounded to the closest upper one.
+ */
+static u32 p7spim_delay_ticks(struct p7spim_core const* ctrl,
+ unsigned int delay,
+ unsigned int ns)
+{
+ u32 const ticks = DIV_ROUND_UP(delay, ns);
+
+#define P7SPI_TIMING_MAXTICKS (0xfU)
+
+ if (ticks & ~P7SPI_TIMING_MAXTICKS)
+ dev_dbg(p7spi_core_dev(&ctrl->core),
+ "unreacheable delay requested (%uns)\n",
+ delay);
+
+ return ticks & P7SPI_TIMING_MAXTICKS;
+}
+
+/* Adjust timings given the bus clock frequency passed in argument. */
+static void p7spim_adjust_clk(struct p7spim_core* ctrl,
+ struct p7spi_plat_data const* pdata,
+ struct p7spi_ctrl_data const* cdata,
+ unsigned int hz)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ u32 div_reg;
+ u32 tim_reg;
+ u32 ns;
+
+ /* Save uncorrected requested frequency. */
+ ctrl->curr_hz = hz;
+
+ /* Round requested frequency to possible limits. */
+ hz = max_t(unsigned int, hz, pdata->min_hz);
+ hz = min_t(unsigned int, hz, pdata->max_master_hz);
+
+ /* Compute divisor to reach maximum SPI master clock frequency. */
+ div_reg = p7spim_freq_div(ctrl, hz);
+
+ /* Compute timings according to above frequency. */
+ ns = ctrl->base_hz / ((div_reg + 1) * 2);
+ dev_info(p7spi_core_dev(core),
+ "bus clock frequency set to %uHz",
+ ns);
+ ns = NSEC_PER_SEC / ns;
+ tim_reg = (p7spim_delay_ticks(ctrl,
+ cdata->tsetup_ss_ns,
+ ns) << P7SPI_TSETUP_SHIFT) |
+ (p7spim_delay_ticks(ctrl,
+ cdata->thold_ss_ns,
+ ns) << P7SPI_THOLD_SHIFT) |
+ (p7spim_delay_ticks(ctrl,
+ cdata->toffclk_ns,
+ ns) << P7SPI_TOFFCLK_SHIFT) |
+ (p7spim_delay_ticks(ctrl,
+ cdata->toffspi_ns,
+ ns) << P7SPI_TOFFSPI_SHIFT) |
+ (p7spim_delay_ticks(ctrl,
+ cdata->tcapture_delay_ns +
+ (P7SPI_TCAPTURE_DELAY_MIN *
+ NSEC_PER_SEC / ctrl->base_hz),
+ NSEC_PER_SEC / ctrl->base_hz) <<
+ P7SPI_TCAPTURE_DELAY_SHIFT);
+
+ /* Setup master clock divisor. */
+ __raw_writel(div_reg, core->vaddr + P7SPI_SPEED);
+ /* Setup master timings. */
+ __raw_writel(tim_reg, core->vaddr + P7SPI_TIMING);
+}
+
+/*
+ * Check if pushed instructions (i.e. sequence of instructed transfers)
+ * are all completed.
+ */
+static int p7spim_instr_done(struct p7spi_core const* core, u32 stat)
+{
+ if (! (stat & P7SPI_STATUS_END_TRANSM))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * Order a new transfer, i.e. push a new instruction to fire up a transfer
+ * round.
+ */
+static void p7spim_push_instr(struct p7spim_core const* ctrl,
+ size_t bytes,
+ bool dma,
+ bool drop_ss)
+{
+ struct p7spi_core const* const core = &ctrl->core;
+ u32 cmd = ctrl->mode | bytes;
+
+ /*
+ * Build instruction word.
+ */
+ if (core->tx_buff)
+ /* Write requested. */
+ cmd |= P7SPI_INSTR_RW_WRITE;
+
+ if (core->rx_buff)
+ /* Read requested. */
+ cmd |= P7SPI_INSTR_RW_READ;
+
+ if (drop_ss)
+ /* Slave de-select requested. */
+ cmd |= p7spim_SSEND;
+
+ if (dma)
+ /* DMA requested. */
+ cmd |= P7SPI_INSTR_DMAE_ENABLE;
+
+#ifdef DEBUG
+ dev_dbg(p7spi_core_dev(core), "\t\t\t%uB instruction: 0x%08x\n", bytes, cmd);
+ BUG_ON(! ctrl);
+ BUG_ON(! bytes);
+ BUG_ON(bytes > P7SPI_INSTR_LEN_MASK);
+ BUG_ON(__raw_readl(core->vaddr + P7SPI_STATUS) &
+ P7SPI_STATUS_INSTR_FULL);
+#endif
+ /*
+ * Push instruction word.
+ */
+ __raw_writel(cmd, core->vaddr + P7SPI_INSTR);
+}
+
+
+/*
+ * Limitation on r1 and r2 using DMA in TX mode.
+ * The SPI notify DMA that it is ready to receive bytes when
+ * nb_bytes_in_instruction - TXCNT_VAL > DMA burst,
+ * and TXCNT_VAL is reset when the first byte of a new instruction is
+ * written in the FIFO. So when a first transfer completes, TXCNT_VAL
+ * is equal to the amount of bytes that have been transfered. Then, if
+ * another round tries to transfer the same amount by DMA, TXCNT_VAL
+ * is equal to nb_bytes and the DMA is not notified, so the DMA
+ * transfer can't start and TXCNT_VAL is not reset. Deadlock.
+ * The workaround is that before any DMA transfer are started in
+ * TX mode, wait for the previous instruction to complete
+ * and reset TXCNT_VAL
+ */
+static int p7spim_reset_txcnt(struct p7spim_core const* ctrl)
+{
+ struct p7spi_core const* const core = &ctrl->core;
+
+ if (core->tx_buff) {
+ int err;
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "dma timeout");
+ return err;
+ }
+ __raw_writel(P7SPI_ITACK_TXCNT_ACK, core->vaddr + P7SPI_ITACK);
+ if (p7spi_kern.rev == SPI_REVISION_1) {
+ /* MPW1 needs 0 to be rewritten in the register */
+ writel(0, core->vaddr + P7SPI_ITACK);
+ }
+
+ }
+
+ return 0;
+}
+
+
+static bool p7spim_must_drop_ss(struct p7spim_core* ctrl, size_t rem_bytes)
+{
+ struct p7spi_core* const core = &ctrl->core;
+
+ return (core->bytes == rem_bytes) && ctrl->drop_ss;
+}
+
+
+/*
+ * Launch a new transfer performing an alignment round.
+ * This will transfer enought bytes in polling mode to align overall transfer
+ * on a 32 bits boundary so that subsequent round may use either interrupts or
+ * DMA mode (FIFOs are 32bits wide).
+ */
+static int p7spim_start_xfer(struct p7spim_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ char const* const buff = core->rx_buff ? : core->tx_buff;
+ size_t bytes = core->bytes;
+ bool ss_dropped;
+
+ if (! bytes)
+ /*
+ * No more bytes to transfer: this may happen when slave de-select is
+ * requested.
+ */
+ return 0;
+
+ bytes = min(bytes, (size_t) (PTR_ALIGN(buff, sizeof(u32)) - buff));
+ if (bytes) {
+ /* Not aligned on word boundary: perform an alignment round. */
+ int err;
+
+ /*
+ * Instruct transfer ahead of time so that bytes are transmitted as soon
+ * as bytes are queued into FIFO. Controller will suspend transmission
+ * (i.e., stop toggling bus clock) if FIFOs are not ready (tx empty or
+ * rx full).
+ *
+ * On R1, if he have to deselect slave-select, it will be done in another
+ * function (_end_xfer). On R2 and R3, we do it in place.
+ */
+ if ((p7spi_kern.rev != SPI_REVISION_1) &&
+ p7spim_must_drop_ss(ctrl, bytes)) {
+ p7spim_push_instr(ctrl, bytes, false, true);
+ ss_dropped = true;
+ }
+ else {
+ p7spim_push_instr(ctrl, bytes, false, false);
+ ss_dropped = false;
+ }
+
+ if (core->tx_buff)
+ /* Feed tx FIFO. */
+ p7spi_writeb_fifo(core, bytes);
+
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB align %swr[%p] rd[%p]\n",
+ bytes,
+ ss_dropped ? "cs " : "",
+ core->tx_buff ? (core->tx_buff - bytes) : NULL,
+ core->rx_buff);
+
+ /* Wait for completion of instruction. */
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "align round failed");
+ return err;
+ }
+
+ if (core->rx_buff)
+ /* Fetch data from rx FIFO. */
+ p7spi_readb_fifo(core, bytes);
+
+ /* Update count of completed bytes. */
+ core->bytes -= bytes;
+ }
+
+ /*
+ * Return bytes count left to transfer to inform caller wether or not it
+ * must run additional transfer rounds.
+ */
+ return core->bytes;
+}
+
+/*
+ * Finalize a transfer. When processing terminal bytes of a transfer, take
+ * care of slave select line as described below.
+ *
+ * As of MPW1, controller hardware only allow software to de-select slave after
+ * each transfered bytes (i.e., software cannot request to de-select slave when
+ * transmitting last byte of a transfer). This is the reason why we have to
+ * build a separate transfer round to take care of slave select line.
+ *
+ * MPW2 will modify this behavior.
+ */
+static int p7spim_end_xfer(struct p7spim_core* ctrl, bool drop_ss)
+{
+ int err;
+ struct p7spi_core* const core = &ctrl->core;
+
+ if (p7spi_kern.rev != SPI_REVISION_1)
+ /*
+ * On newer versions, CS is raised at the end of the
+ * instruction and there is no need to send a lonely byte
+ * to do so
+ */
+ return 0;
+
+ if (! drop_ss)
+ /* Silently ignore if slave deselect is not required. */
+ return 0;
+
+ p7spim_push_instr(ctrl, 1U, false, true);
+
+ if (core->tx_buff)
+ p7spi_writeb_fifo(core, 1U);
+
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB cs wr[%p] rd[%p]\n",
+ 1U,
+ core->tx_buff ? (core->tx_buff - 1U) : NULL,
+ core->rx_buff);
+
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "cs round failed");
+ return err;
+ }
+
+ if (core->rx_buff)
+ p7spi_readb_fifo(core, 1U);
+
+ return 0;
+}
+
+/*
+ * Launch the very first round of the first segment of a transfer,
+ * warming up the chaining of successive transfer rounds machinery
+ * (by preloading tx FIFO if required).
+ * This is used in polling and / or interrupt mode (also when DMA is enabled
+ * if transfer size is not long enought to run the DMA controller).
+ *
+ * At this time, we are guaranteed the transfer start @ is aligned on a 32 bits
+ * boundary (see p7spim_start_xfer).
+ */
+static ssize_t p7spim_start_sgm(struct p7spim_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ size_t const bytes = min(core->bytes, core->fifo_sz);
+ bool ss_dropped;
+
+#ifdef DEBUG
+ BUG_ON(! bytes);
+#endif
+
+ if (core->bytes > core->fifo_sz) {
+ /*
+ * Compute maximum number of bytes to order
+ * (up to maximum segment size).
+ */
+ size_t const order = min(core->bytes, core->sgm_sz);
+
+ /*
+ * Instruct to transfer a number of bytes rounded down to
+ * maximum transfer segment size so that a single instruction is needed
+ * per segment.
+ */
+ if ((p7spi_kern.rev != SPI_REVISION_1) &&
+ p7spim_must_drop_ss(ctrl, order)) {
+ p7spim_push_instr(ctrl, order, false, true);
+ ss_dropped = true;
+ }
+ else {
+ p7spim_push_instr(ctrl, order, false, false);
+ ss_dropped = false;
+ }
+
+ /* Update count of bytes left to order to complete current transfer. */
+ ctrl->order = core->bytes - order;
+ }
+ else {
+ /* A single round is enought to complete this segment. */
+ if ((p7spi_kern.rev != SPI_REVISION_1) && ctrl->drop_ss) {
+ p7spim_push_instr(ctrl, bytes, false, true);
+ ss_dropped = true;
+ }
+ else {
+ p7spim_push_instr(ctrl, bytes, false, false);
+ ss_dropped = false;
+ }
+ }
+
+ if (core->tx_buff) {
+ /*
+ * Enqueue up to one FIFO depth bytes. In cases where we need multiple
+ * rounds, this allows us to preload tx FIFO with as many bytes as we
+ * can.
+ */
+ p7spi_write_fifo(core, bytes);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB %sproc wr[%p]\n",
+ bytes,
+ ss_dropped ? "cs " : "",
+ core->tx_buff - bytes);
+ }
+
+ /*
+ * Return bytes count left to process to inform caller wether or not it
+ * must run additional transfer rounds.
+ * This will return 0 as long as the number of bytes to transfer is <= FIFO
+ * depth.
+ */
+ return core->bytes - bytes;
+}
+
+/*
+ * Complete a transfer round and initiate a new one if needed.
+ * Should be called after a hardware completion event happened
+ * (either FIFO threshold reached, last byte transferred or last
+ * instruction completed).
+ * Transfer rounds carried out by p7spim_process_round are always threshold
+ * bytes long.
+ * This is used in both polling and interrupt mode of operations.
+ */
+static int p7spim_process_round(struct p7spim_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ size_t const thres = core->thres_sz;
+ size_t const sz = core->fifo_sz + thres;
+ size_t const bytes = core->bytes;
+ bool ss_dropped = false;
+
+ /*
+ * At this time:
+ * bytes left to order / instruct:
+ * ctrl->order
+ * bytes left to queue:
+ * core->bytes - core->fifo_sz
+ * bytes left to transfer (i.e., minus bytes still present within fifo) :
+ * core->bytes - (core->fifo_sz - core->thres_sz)
+ * bytes left to dequeue:
+ * core->bytes
+ */
+
+ if (ctrl->order && (bytes < (ctrl->order + sz))) {
+ /*
+ * There are still some more bytes to order and the number of bytes left
+ * to queue at the end of this function
+ * (core->bytes - (core->fifo_sz + core->thres_sz)) is < ctrl->order,
+ * the current number of bytes ordered, i.e., at the
+ * end of this function we will have queued more bytes than the current
+ * number of ordered bytes.
+ * Order as many bytes as we can, i.e., up to maximum segment size...
+ */
+ size_t const order = min(ctrl->order, core->sgm_sz);
+
+ if ((p7spi_kern.rev != SPI_REVISION_1) &&
+ (ctrl->order == order) &&
+ ctrl->drop_ss) {
+ p7spim_push_instr(ctrl, order, false, true);
+ ss_dropped = true;
+ }
+ else {
+ p7spim_push_instr(ctrl, order, false, false);
+ ss_dropped = false;
+ }
+ ctrl->order -= order;
+ }
+
+ if (bytes <= sz)
+ /*
+ * Remaining bytes to queue (core->bytes - core->fifo_sz) is <=
+ * threshold.
+ * we still have to perform an extra threshold sized fifo read to
+ * catch up with the one we should have done just below (see
+ * p7spim_end_sgm).
+ * Return 0 to inform caller there is no need to perform additional
+ * rounds.
+ */
+ return 0;
+
+ if (core->rx_buff)
+ /* Fetch input bytes from receive FIFO. */
+ p7spi_readl_fifo(core, thres);
+
+ /* Update count of remaining bytes to dequeue. */
+ core->bytes -= thres;
+
+ /*
+ * Now start another round... At this time, remaining bytes to
+ * queue (core->bytes + core->thres_sz - core->fifo_sz) MUST
+ * be >= core->thres_sz since we perform threshold sized transfers
+ * only.
+ */
+ if (core->tx_buff)
+ /* Feed transmit FIFO. */
+ p7spi_writel_fifo(core, thres);
+
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB %sproc wr[%p] rd[%p]\n",
+ thres,
+ ss_dropped ? "cs " : "",
+ core->tx_buff ? (core->tx_buff - thres) : NULL,
+ core->rx_buff ? (core->rx_buff - thres) : NULL);
+
+ /*
+ * Return count of bytes left to process to inform caller we need to perform
+ * additional rounds.
+ */
+ return core->bytes;
+}
+
+/*
+ * Perform the last round of the last segment of a transfer taking care of last
+ * "residue" bytes.
+ *
+ * At this time, we are guaranteed the transfer start @ is aligned on a 32 bits
+ * boundary (see p7spim_process_round).
+ */
+static int p7spim_end_sgm(struct p7spim_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ size_t const fifo = core->fifo_sz;
+ int err;
+
+ if (core->bytes > fifo) {
+ /*
+ * We had to perform more than a full FIFO bytes long transfer, meaning
+ * we are comming from p7spim_process_round and we still have to run an
+ * extra threshold sized fifo read.
+ */
+ size_t const bytes = core->bytes - fifo;
+ size_t const thres = core->thres_sz;
+
+#ifdef DEBUG
+ BUG_ON(core->bytes > (fifo + thres));
+#endif
+
+ if (core->rx_buff) {
+ /* Fetch input bytes from receive FIFO. */
+ p7spi_readl_fifo(core, thres);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB proc rd[%p]\n",
+ thres,
+ core->rx_buff - thres);
+ }
+
+ /* Update completed bytes counter. */
+ core->bytes -= thres;
+
+ if (bytes && core->tx_buff) {
+ /* Enqueue last bytes to send if any. */
+ p7spi_write_fifo(core, bytes);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB end wr[%p]\n",
+ bytes,
+ core->tx_buff - bytes);
+ }
+ }
+
+ /* Ensure all instructions were completed. */
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "end round failed");
+ return err;
+ }
+
+ if (! (core->bytes && core->rx_buff))
+ /* No more bytes to receive. */
+ return 0;
+
+ /* Fetch bytes from input FIFO. */
+ p7spi_read_fifo(core, core->bytes);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB end rd[%p]\n",
+ core->bytes,
+ core->rx_buff - core->bytes);
+ return 0;
+}
+
+/*
+ * Perform transfer in polling mode.
+ */
+static int p7spim_xfer_bypoll(struct p7spim_core* ctrl)
+{
+ if (p7spim_start_sgm(ctrl) > 0) {
+ int err;
+ struct p7spi_core* const core = &ctrl->core;
+
+ do {
+ err = p7spi_poll_stat(core, p7spi_data_ready, 15 * HZ);
+ } while (! err && p7spim_process_round(ctrl));
+
+ if (err) {
+ p7spi_dump_regs(core, "proc round failed");
+ return err;
+ }
+ }
+
+ return p7spim_end_sgm(ctrl);
+}
+
+/*
+ * Master controller interrupt handler
+ * In interrupt mode, process threshold bytes long transfer rounds.
+ */
+static irqreturn_t p7spim_handle_irq(int irq, void* dev_id)
+{
+ struct p7spim_core* const ctrl = (struct p7spim_core*) dev_id;
+ struct p7spi_core* const core = &ctrl->core;
+
+ core->stat = p7spi_poll_stat(core, p7spi_data_ready, 0);
+ switch (core->stat) {
+ case -EAGAIN:
+ /* Status reports nothing particular: spurious interrupt ?? */
+ return IRQ_NONE;
+
+ case 0:
+ if (p7spim_process_round(ctrl))
+ /*
+ * Round processed but there are still some more threshold bytes
+ * long rounds to perform (i.e., remaining bytes to complete >
+ * threshold).
+ */
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Disable interrutps since remaining bytes will be transfered in polling
+ * mode.
+ */
+ __raw_writel(0, core->vaddr + P7SPI_ITEN);
+
+ /* Notify worker we are done. */
+ complete(&core->done);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Perform transfer in interrupt mode.
+ */
+static int p7spim_xfer_byirq(struct p7spim_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+
+ if (p7spi_has_irq(core)) {
+ /* Require being notified of FIFOs (over/under)flow (MPW2 only). */
+ u32 iten = P7SPI_ITEN_RXACCESS_ERROR |
+ P7SPI_ITEN_TXACCESS_ERROR;
+
+ if (p7spim_start_sgm(ctrl) > 0) {
+ /*
+ * Initialize interrupt mode completion token: we will be notified
+ * once "residue" bytes only are left to be processed.
+ */
+ INIT_COMPLETION(core->done);
+
+ /*
+ * When transfering in rx & tx mode, we might catch 2 interrupts:
+ * activate rx-side interrupt only when rx & tx required.
+ */
+ if (core->rx_buff)
+ /* rx and / or tx requested. */
+ iten |= P7SPI_ITEN_RX_TH_REACHED;
+ else if (core->tx_buff)
+ /* tx only. */
+ iten |= P7SPI_ITEN_TX_TH_REACHED;
+ __raw_writel(iten, core->vaddr + P7SPI_ITEN);
+
+ /* Wait for end of threshold bytes long rounds processing. */
+ if (! wait_for_completion_timeout(&core->done, 15 * HZ)) {
+ p7spi_dump_regs(core, "proc round failed");
+
+ /* Disable interrupts since interrupt handler could not do it. */
+ __raw_writel(0, core->vaddr + P7SPI_ITEN);
+
+ /*
+ * Return timeout error only if rounds processing did not
+ * encounter any.
+ */
+ core->stat = core->stat ? : -ETIMEDOUT;
+ }
+
+ if (core->stat)
+ return core->stat;
+ }
+
+ return p7spim_end_sgm(ctrl);
+ }
+
+ return p7spim_xfer_bypoll(ctrl);
+}
+
+/*
+ * DMA task completion handler (run in tasklet context).
+ * Just notify xfer_msg function that it's done.
+ */
+static void p7spim_complete_dma(void* controller)
+{
+ struct p7spim_core* const ctrl = controller;
+ struct p7spi_core* const core = &ctrl->core;
+
+ /* Notify worker we are done. */
+ complete(&core->done);
+}
+
+/*
+ * Transfer extra bytes after DMA rounds.
+ */
+static int p7spim_xfer_dma_extra(struct p7spim_core* ctrl, size_t bytes)
+{
+ struct p7spi_core* const core = &ctrl->core;
+ int err;
+
+#ifdef DEBUG
+ BUG_ON(bytes >= core->fifo_sz);
+#endif
+
+ if (!bytes)
+ return 0;
+
+ if (core->tx_buff) {
+ err = p7spi_poll_stat(core, p7spi_data_ready, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "xtra round failed");
+ return err;
+ }
+ p7spi_write_fifo(core, bytes);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB xtra wr[%p]\n",
+ bytes, core->tx_buff - bytes);
+ }
+
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "xtra round failed");
+ return err;
+ }
+
+ if (core->rx_buff) {
+ p7spi_read_fifo(core, bytes);
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB extra rd[%p]\n",
+ bytes, core->rx_buff - bytes);
+ }
+
+ core->bytes -= bytes;
+ return 0;
+}
+
+/*
+ * Perform transfer in DMA mode.
+ * DMA transfers must satisfy following constraints:
+ * - start @ must be aligned on a cache line boundary,
+ * - size must be a multiple of cache line size,
+ * - DMA controller <-> SPI controller exchange burst size must be equal to
+ * SPI controller FIFO threshold to properly handle DMA peripheral interface
+ * handshakes (this means transfer size must be a multiple of burst size as
+ * well).
+ * When these condition cannot be satisfied, fall back to interrupt / polling
+ * mode.
+ */
+static int p7spim_xfer_bydma(struct p7spim_core* ctrl)
+{
+ int err = 0;
+ bool ss_dropped;
+ struct p7spi_core* const core = &ctrl->core;
+ char const* const buff = core->rx_buff ? : core->tx_buff;
+ ssize_t bytes;
+
+ bytes = min_t(ssize_t,
+ core->bytes,
+ PTR_ALIGN(buff,dma_get_cache_alignment()) - buff);
+
+ if (! p7spi_has_dma(core) ||
+ ((core->bytes - (size_t) bytes) < core->dma_min))
+ /*
+ * Cannot perform transfer using DMA because either DMA is disabled or
+ * transfer size and / or start @ don't comply with the above
+ * contraints. Fall back to interrupt / polling mode.
+ */
+ return p7spim_xfer_byirq(ctrl);
+
+ if (bytes) {
+ /*
+ * Transfer enought byte to align start @ to cache line / burst size
+ * boundary if needed. core->bytes content is altered to fool
+ * interrupt / polling implementation so that it transfers required
+ * count of bytes only.
+ */
+ size_t const sz = core->bytes - (size_t) bytes;
+ bool save_drop_ss = ctrl->drop_ss;
+
+ /* Request p7spim_xfer_byirq to transfer sz bytes only. */
+ core->bytes = (size_t) bytes;
+ ctrl->drop_ss = false;
+
+ err = p7spim_xfer_byirq(ctrl);
+ if (err)
+ return err;
+
+ /*
+ * Restore and update proper bytes count now that DMA operation is
+ * possible.
+ */
+ core->bytes = sz;
+ ctrl->drop_ss = save_drop_ss;
+ }
+
+ /*
+ * Setup DMA operation properties common to all segments for this
+ * transfer.
+ */
+ err = p7spi_config_dma(core);
+ if (err)
+ return err;
+
+ /* Setup SPI controller DMA peripheral interface. */
+ p7spi_cook_dmaif(core);
+
+ do {
+ size_t extra_bytes = 0;
+ /*
+ * map segment by segment.
+ */
+ bytes = p7spi_map_dma(core);
+ if (bytes < 0)
+ goto unmap;
+ else if (core->bytes - bytes > 0 &&
+ !core->tx_buff &&
+ core->bytes - bytes < core->fifo_sz &&
+ core->bytes < core->sgm_sz) {
+ /*
+ * This is a little hack to use the same instruction
+ * to transfer remaining bytes in RX mode.
+ */
+ extra_bytes = core->bytes - bytes;
+ bytes = core->bytes;
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\tusing dma instr to send %uB extra\n",
+ extra_bytes);
+ }
+
+ /* Submit DMA operation for current segment. */
+ err = p7spi_run_dma(core, p7spim_complete_dma);
+ if (err)
+ goto unmap;
+
+ if (p7spi_kern.rev == SPI_REVISION_1) {
+ /* we have to reet txcnt between two transfers only on R1 */
+ err = p7spim_reset_txcnt(ctrl);
+ if (err) {
+ p7spi_cancel_dma(core);
+ goto unmap;
+ }
+ }
+
+ /* Instruct SPI controller to start transfer. */
+ if ((p7spi_kern.rev != SPI_REVISION_1) &&
+ p7spim_must_drop_ss(ctrl, extra_bytes)) {
+ /*
+ * core->bytes is already decreased in p7spi_run_dma, so at this
+ * point, we check if this is the last round of bytes or not.
+ */
+ p7spim_push_instr(ctrl, (size_t) bytes, true, true);
+ ss_dropped = true;
+ }
+ else {
+ p7spim_push_instr(ctrl, (size_t) bytes, true, false);
+ ss_dropped = false;
+ }
+
+ dev_dbg(p7spi_core_dev(core),
+ "\t\t\t%uB dma %swr[%p] rd[%p]\n",
+ (size_t) bytes,
+ ss_dropped ? "cs " : "",
+ core->tx_buff ? core->tx_buff - bytes + extra_bytes : NULL,
+ core->rx_buff ? core->rx_buff - bytes + extra_bytes : NULL);
+
+ /* Wait for segment completion. */
+ err = p7spi_wait_dma(core);
+ if (err) {
+ /* Cancel current DMA operations if still undergoing... */
+ p7spi_cancel_dma(core);
+ goto unmap;
+ }
+
+ core->stat = p7spi_check_dma(core);
+ if (core->stat) {
+ err = core->stat;
+ goto unmap;
+ }
+
+ p7spi_unmap_dma(core);
+
+ err = p7spim_xfer_dma_extra(ctrl, extra_bytes);
+ if (err) {
+ p7spi_uncook_dmaif(core);
+ return err;
+ }
+
+
+ } while (core->bytes >= core->dma_min);
+
+unmap:
+ p7spi_uncook_dmaif(core);
+ if (err) {
+ p7spi_unmap_dma(core);
+ return err;
+ }
+
+ /*
+ * When DMA is completed in TX mode, we don't know how many bytes are laying
+ * in the FIFO.
+ * As xfer_byirq or xber_bypoll start by filling completely the FIFO, we
+ * need to ensure that there is room for these bytes, so we just wait for
+ * the "dma" bytes to be sent before continuing.
+ */
+ if (core->tx_buff) {
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 * HZ);
+ if (err) {
+ p7spi_dump_regs(core, "dma wait timeout");
+ return err;
+ }
+
+ /*
+ * After a DMA TX xfer, an extra DMA request is going to be
+ * generated when the next instruction is queued and the "FIFO
+ * TX threshold reached" condition is true. It triggers a bug
+ * if the next instruction is RX DMA because in this case the
+ * DMA controller will start too soon and access an empty FIFO.
+ * The workaround is to send a "null" instruction and to flush
+ * the DMA request generated by this instruction to be in a
+ * clean state.
+ */
+ __raw_writel(0, core->vaddr + P7SPI_INSTR);
+ writel(P7SPI_DMA_FLUSH, core->vaddr + P7SPI_FIFO_FLUSH);
+ }
+
+ if (! core->bytes)
+ return 0;
+
+ /*
+ * Finalize transfer by handling "residue" bytes in interrupt / polling mode.
+ */
+ return p7spim_xfer_byirq(ctrl);
+}
+
+static inline int p7spim_do_xfer(struct p7spim_core* ctrl)
+{
+ return p7spim_xfer_bydma(ctrl);
+}
+
+/*
+ * Called when tring to flush FIFOs on MPW1.
+ * Since MPW1 misses a way to flush instruction FIFOs, we need to ensure data
+ * FIFOs are flushed as long as all instructions are not completed.
+ */
+static int p7spim_empty_fifo(struct p7spi_core const* core, u32 stat)
+{
+ /* Always flush receive FIFO if not empty. */
+ __raw_writel(P7SPI_FIFORX_FLUSH, core->vaddr + P7SPI_FIFO_FLUSH);
+
+ if ((stat & (P7SPI_STATUS_RXEMPTY | P7SPI_STATUS_END_TRANSM)) ==
+ (P7SPI_STATUS_RXEMPTY | P7SPI_STATUS_END_TRANSM))
+ /*
+ * No more instructions to process and receive FIFO is empty: flush
+ * succeeded.
+ */
+ return 0;
+
+ if (core->tx_buff &&
+ ! (stat & (P7SPI_STATUS_TXFULL | P7SPI_STATUS_END_TRANSM)))
+ /* Provide tx FIFO with fake data to consume instructions. */
+ __raw_writel(0, core->vaddr + P7SPI_DATA);
+
+ /* Tell caller we still need some more work to complete flush operation. */
+ return -EAGAIN;
+}
+
+static void p7spim_flush(struct p7spim_core const* ctrl)
+{
+ struct p7spi_core const* const core = &ctrl->core;
+
+ if (p7spi_kern.rev == SPI_REVISION_1) {
+ /* This is MPW1: flush + slave de-select. */
+ int err = p7spi_poll_stat(core, p7spim_empty_fifo, 5 * HZ);
+
+ if (err)
+ dev_err(p7spi_core_dev(core),
+ "failed to flush FIFOs (%d)\n",
+ err);
+
+ /*
+ * At last, de-select slave...
+ * As of MPW1, there is no way to alter slave select signal without toggling
+ * clock / MISO / MOSI.
+ * Request controller to read one byte and de-select slave just after.
+ * This will change in MPW2 !
+ */
+ __raw_writel(P7SPI_INSTR_RW_READ |
+ p7spim_SSEND |
+ ctrl->mode |
+ 1U,
+ core->vaddr + P7SPI_INSTR);
+
+ err = p7spi_poll_stat(core, p7spim_instr_done, 5 *HZ);
+ if (err)
+ dev_err(p7spi_core_dev(core),
+ "failed to de-select slave (%d)\n",
+ err);
+ }
+ else {
+ /*
+ * MPW2 implementation: instructions FIFO is flushed at reset time, which
+ * is performed by clk implementation.
+ */
+ clk_disable(core->clk);
+ clk_enable(core->clk);
+ }
+
+ __raw_writel(P7SPI_FIFOTX_FLUSH | P7SPI_FIFORX_FLUSH,
+ core->vaddr + P7SPI_FIFO_FLUSH);
+}
+
+/*
+ * Process generic SPI layer messages (run in workqueue / kthread context).
+ */
+static int p7spim_xfer_msg(struct spi_master* master, struct spi_message* msg)
+{
+ struct p7spim_core* const ctrl = spi_master_get_devdata(master);
+ struct p7spi_core* const core = &ctrl->core;
+ struct spi_device const* const slave = msg->spi;
+ struct p7spi_plat_data const* const pdata = dev_get_platdata(p7spi_kern.dev);
+ struct p7spi_ctrl_data const* const cdata = slave->controller_data;
+ unsigned int const slave_hz = slave->max_speed_hz ? :
+ pdata->max_master_hz;
+ struct spi_transfer* xfer;
+ int err = -ENOMSG;
+
+ P7SPI_ASSERT_MSG(msg, ctrl, master, slave);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /*
+ * When processing last transfer of current message, we are
+ * required to keep slave device selected between each message
+ * if cs_change is true.
+ * When processing other transfers of current message, we are
+ * required to deselect slave device between each transfer if
+ * cs_change is true.
+ */
+ ctrl->drop_ss = list_is_last(&xfer->transfer_list,
+ &msg->transfers) ^ xfer->cs_change;
+
+ err = p7spi_init_xfer(core, xfer, msg, master);
+ if (err)
+ break;
+
+ if ((p7spi_kern.rev == SPI_REVISION_1) && ctrl->drop_ss) {
+ /*
+ * If we need to de-select slave at end of transfer, reduce size by
+ * one so that p7spim_end_sgm may handle it properly.
+ */
+ core->bytes--;
+ }
+
+ /* Setup SPI bus clock rate if requested on a per-transfer basis. */
+ if (!xfer->speed_hz || xfer->speed_hz > slave_hz)
+ xfer->speed_hz = slave_hz;
+ if (xfer->speed_hz != ctrl->curr_hz)
+ p7spim_adjust_clk(ctrl, pdata, cdata, xfer->speed_hz);
+
+ /* Start transfer. */
+ err = p7spim_start_xfer(ctrl);
+ if (err < 0)
+ break;
+
+ if (err > 0) {
+ /*
+ * Run additional transfer rounds if some more bytes are left to
+ * transfer.
+ */
+ err = p7spim_do_xfer(ctrl);
+ if (err)
+ break;
+ }
+
+ /* At last, take care of slave select output. */
+ err = p7spim_end_xfer(ctrl, ctrl->drop_ss);
+ if (err)
+ break;
+
+ msg->actual_length += xfer->len;
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+ }
+
+ if (err) {
+ p7spim_flush(ctrl);
+ dev_err(p7spi_core_dev(core),
+ "failed to transfer message (%d)\n",
+ err);
+ }
+ msg->status = err;
+ spi_finalize_current_message(master);
+ return err;
+}
+
+static int p7spim_setup(struct spi_device* slave)
+{
+ struct p7spi_plat_data const* const pdata = dev_get_platdata(p7spi_kern.dev);
+ struct p7spim_core* const ctrl = spi_master_get_devdata(slave->master);
+ struct p7spi_core* const core = &ctrl->core;
+ struct p7spi_ctrl_data const* const cdata = slave->controller_data;
+ u32 ctrl_reg = 0;
+ int err;
+
+ if (cdata->half_duplex)
+ slave->master->flags |= SPI_MASTER_HALF_DUPLEX;
+ if (! cdata->read)
+ slave->master->flags |= SPI_MASTER_NO_RX;
+ if (! cdata->write)
+ slave->master->flags |= SPI_MASTER_NO_TX;
+ if (! (cdata->half_duplex || (cdata->read ^ cdata->write))) {
+ dev_err(p7spi_core_dev(core),
+ "invalid transfer mode for device %s (%s duplex, %sread, %swrite)\n",
+ dev_name(&slave->dev),
+ cdata->half_duplex ? "half" : "full",
+ cdata->read ? "" : "no ",
+ cdata->write ? "" : "no ");
+ return -EINVAL;
+ }
+
+ err = p7spi_setup_core(core, slave, &ctrl_reg);
+ if (err == -EAGAIN)
+ return -EPROBE_DEFER;
+ else if (err)
+ return err;
+
+ /* Setup clock and timings. */
+ ctrl->base_hz = clk_get_rate(p7spi_kern.clk);
+ p7spim_adjust_clk(ctrl,
+ pdata,
+ cdata,
+ slave->max_speed_hz ? : pdata->max_master_hz);
+
+ /* Setup transfer mode. */
+ ctrl->mode = cdata->xfer_mode << P7SPI_INSTR_SPI_TMODE_SHIFT;
+
+ /* Start core in master mode. */
+ ctrl_reg |= P7SPI_CTRL_MSTR | P7SPI_CTRL_ENABLE;
+ p7spi_enable_core(core, slave, ctrl_reg);
+
+
+ dev_info(p7spi_core_dev(core),
+ "enabled device %s on master core:\n"
+ "\t%s duplex/%sread/%swrite\n"
+ "\tmode: %s\n"
+ "\tclock: polarity=%d phase=%d\n"
+ "\tfifo: %uB/%uB",
+ dev_name(&slave->dev),
+ cdata->half_duplex ? "half" : "full",
+ cdata->read ? "" : "no ",
+ cdata->write ? "" : "no ",
+ p7spi_mode_name(cdata->xfer_mode),
+ !! (slave->mode & SPI_CPOL),
+ !! (slave->mode & SPI_CPHA),
+ (unsigned int) core->thres_sz,
+ (unsigned int) core->fifo_sz);
+ return 0;
+}
+
+static struct p7spi_ops p7spim_ops __devinitdata = {
+ .setup = p7spim_setup,
+ .xfer = p7spim_xfer_msg,
+ .rt = false,
+ .dma_cyclic = false,
+};
+
+static int __devinit p7spim_probe(struct platform_device* pdev)
+{
+ int ret;
+
+ ret = p7spi_create_ctrl(pdev, &p7spim_ops, struct p7spim_core);
+ if (! ret) {
+ struct p7spim_core* const ctrl =
+ (struct p7spim_core*) platform_get_drvdata(pdev);
+ ret = p7spi_register(pdev, &ctrl->core);
+ if (ret)
+ p7spi_abort_create(pdev);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7spim_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct p7spi_core* core = platform_get_drvdata(pdev);
+ struct p7spim_core* ctrl = (struct p7spim_core*) core;
+ int err;
+
+ err = spi_master_suspend(core->ctrl);
+ if (err)
+ return err;
+
+ ctrl->saved_ctrl = __raw_readl(core->vaddr + P7SPI_CTRL);
+ __raw_writel(0, core->vaddr + P7SPI_CTRL);
+ clk_disable_unprepare(core->clk);
+
+ return 0;
+}
+
+static int p7spim_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct p7spi_core* core = platform_get_drvdata(pdev);
+ struct p7spim_core* ctrl = (struct p7spim_core*) core;
+ int err;
+
+ clk_prepare_enable(core->clk);
+ err = spi_master_resume(core->ctrl);
+ if (err)
+ return err;
+
+ ctrl->curr_hz = 0;
+
+ p7spi_enable_core(core, NULL, ctrl->saved_ctrl);
+ return 0;
+}
+#else
+#define p7spim_suspend NULL
+#define p7spim_resume NULL
+#endif
+
+static struct dev_pm_ops p7spim_dev_pm_ops = {
+ .suspend = p7spim_suspend,
+ .resume = p7spim_resume,
+};
+
+static struct platform_driver p7spim_driver = {
+ .driver = {
+ .name = P7SPIM_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7spim_dev_pm_ops,
+ },
+ .probe = p7spim_probe,
+ .remove = __devexit_p(p7spi_destroy_ctrl),
+};
+
+static int __init p7spim_init(void)
+{
+ if (! noirq)
+ p7spim_ops.handle_irq = p7spim_handle_irq;
+
+ if (! nodma)
+ p7spim_ops.dma = true;
+
+ if (p7spi_kern.rev == SPI_REVISION_1)
+ p7spim_SSEND = P7SPI_INSTR_SS_END_BYTE_HIGH;
+ else
+ p7spim_SSEND = P7SPI_INSTR_SS_END_INSTR_HIGH;
+
+ return platform_driver_register(&p7spim_driver);
+}
+module_init(p7spim_init);
+
+static void __exit p7spim_exit(void)
+{
+ platform_driver_unregister(&p7spim_driver);
+}
+module_exit(p7spim_exit);
+
+MODULE_AUTHOR("Alvaro Moran - Parrot S.A. <alvaro.moran@parrot.com>");
+MODULE_DESCRIPTION("Parrot7 SPI master controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/spi/p7-spim.h b/drivers/parrot/spi/p7-spim.h
new file mode 100644
index 00000000000000..13beaeea4c1c92
--- /dev/null
+++ b/drivers/parrot/spi/p7-spim.h
@@ -0,0 +1,24 @@
+/**
+ * linux/drivers/parrot/spi/p7-spim.h - Parrot7 SPI master driver
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author: alvaro.moran@parrot.com
+ * @date: 2012-10-01
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7_SPIM_H
+#define _P7_SPIM_H
+
+#if defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE)
+
+#define P7SPIM_DRV_NAME "p7-spim"
+
+#endif /* defined(CONFIG_SPI_MASTER_PARROT7) || \
+ defined(CONFIG_SPI_MASTER_PARROT7_MODULE) */
+
+#endif
diff --git a/drivers/parrot/spi/p7-spim_test.c b/drivers/parrot/spi/p7-spim_test.c
new file mode 100644
index 00000000000000..b5dd37ff4b40b3
--- /dev/null
+++ b/drivers/parrot/spi/p7-spim_test.c
@@ -0,0 +1,175 @@
+/*
+ * @file p7_spi_test.c
+ * @brief Driver for Parrot7 SPI controller, master mode (test module)
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author alvaro.moran@parrot.com
+ * @date 2012-06-18
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+MODULE_AUTHOR( "Alvaro Moran - Parrot S.A. <alvaro.moran@parrot.com>" );
+MODULE_DESCRIPTION( "Parrot7 SPI master test module" );
+MODULE_LICENSE( "GPL" );
+
+
+static unsigned int rx_len = 0;
+module_param(rx_len, uint, 0644);
+
+static unsigned int tx_len = 0;
+module_param(tx_len, uint, 0644);
+
+static unsigned int verbose = 1;
+module_param(verbose, uint, 0644);
+
+static unsigned int seed = 0;
+module_param(seed, uint, 0644);
+
+static unsigned int speed = 1 * 1000 * 1000;
+module_param(speed, uint, 0644);
+
+
+
+#define VPRINT(_format, ...) if (verbose) printk(_format, ## __VA_ARGS__);
+
+
+/** spi_tx_rx - Alternative to spi_write_then_read that allows bigger buffers
+ */
+int spi_tx_rx(struct spi_device *spi,
+ const u8 *buf, unsigned n_tx,
+ unsigned n_rx)
+{
+ int status;
+ struct spi_message message;
+ struct spi_transfer x[2];
+
+ if (n_tx && n_rx)
+ return -1;
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof x);
+ if (n_tx) {
+ x[0].len = n_tx;
+ spi_message_add_tail(&x[0], &message);
+ }
+ if (n_rx) {
+ x[1].len = n_rx;
+ x[1].speed_hz = speed;
+ spi_message_add_tail(&x[1], &message);
+ }
+
+ x[0].tx_buf = buf;
+ x[1].rx_buf = (u8*)(buf + n_tx);
+
+ /* do the i/o */
+ status = spi_sync(spi, &message);
+
+ return status;
+}
+
+
+static int __devinit p7_spi_test_probe(struct spi_device *spi)
+{
+ u8* tx = NULL;
+ u8* rx = NULL;
+ int status = 0;
+ int ret = 0;
+ int i;
+
+ if (spi->master->bus_num != 2)
+ return -1;
+
+ if (tx_len) {
+ tx = kmalloc(sizeof(u8)*tx_len, GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ /* Init with a simple pattern */
+ for (i = 0; i < tx_len; i++)
+ tx[i] = (seed+i) & 0xff;
+ status = spi_tx_rx(spi, tx, tx_len, 0);
+ if (status < 0) {
+ printk("Err in tx: %d\n", status);
+ goto exit;
+ }
+ msleep(10);
+ }
+ if (rx_len) {
+ rx = kzalloc(sizeof(u8)*rx_len, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+ status = spi_tx_rx(spi, rx, 0, rx_len);
+ if (status < 0) {
+ printk("Err in rx: %d\n", status);
+ goto exit;
+ }
+ }
+
+ if (tx) {
+ VPRINT("Master Transfer OK (%d bytes)\n", tx_len);
+ }
+ if (rx) {
+ VPRINT("Master Received OK (%d bytes): ", rx_len);
+ for (i = 0; i < rx_len; i++)
+ VPRINT("%02x ", rx[i]);
+ VPRINT("\n");
+ }
+
+exit:
+ if (rx)
+ kfree(rx);
+ if (tx)
+ kfree(tx);
+ return ret;
+}
+
+static int __devexit p7_spi_test_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+
+static struct spi_driver p7_spi_test_driver = {
+ .driver = {
+ .name = "spidev",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = p7_spi_test_probe,
+ .remove = __devexit_p(p7_spi_test_remove),
+};
+
+static int __init p7_spi_test_init(void)
+{
+ return spi_register_driver(&p7_spi_test_driver);
+}
+
+static void __exit p7_spi_test_exit( void )
+{
+ spi_unregister_driver(&p7_spi_test_driver);
+}
+
+module_init( p7_spi_test_init );
+module_exit( p7_spi_test_exit );
diff --git a/drivers/parrot/spi/p7-spis.c b/drivers/parrot/spi/p7-spis.c
new file mode 100644
index 00000000000000..c93b9be09bdae7
--- /dev/null
+++ b/drivers/parrot/spi/p7-spis.c
@@ -0,0 +1,961 @@
+/**
+ * linux/drivers/parrot/spi/p7-spis.c - Parrot7 SPI slave controller driver
+ * implementation
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author: alvaro.moran@parrot.com
+ * @author: Gregor Boirie <gregor.boirie@parrot.com>
+ * @author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * @date: 2012-10-13
+ *
+ * This file is released under the GPL
+ *
+ * Notes:
+ * ------
+ * To see implementation details, check also 7-spim.c.
+ *
+ * DMA cyclic:
+ * One of the requirements of this driver is to handle high speed transfer
+ * rate, up to 60Mbits/s. In slave mode, we don't have any control on the
+ * clock and the FIFO is quite short (as few as 16 words). Using DMA in a
+ * typical way means that you only have the depth of the FIFO to prepare the
+ * next DMA request. We just can't do that with such a high transfer rate. The
+ * solution chosen here is to use DMA in cyclic mode. In this mode, the DMAC is
+ * given a circular buffer split into periods. At the end of each period, a
+ * completion handler is called but the DMA keeps transfering. Because the
+ * circular buffer length is configurable, time constraints are less severe.
+ *
+ * Instrumentation:
+ * Enabling SPI slave instrumentation allows to measure the time, for a
+ * period, between the DMA completion handler call and the end of the copy.
+ * It is the time for scheduling (from tasklet to kthread) + the time to
+ * make a copy (from circular buffer to SPI transfer buffer).
+ * So to get a significant measure, you must transfer one SPI message that is
+ * n-period long to get n values. You'll get min, max and mean.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/dmaengine.h>
+#include <asm/io.h>
+#include "p7-spi.h"
+#include "p7-spi_priv.h"
+#include "p7-spis.h"
+
+
+static unsigned int period_len = 0;
+module_param(period_len, uint, 0644);
+MODULE_PARM_DESC(period_len, "period length in bytes");
+
+static unsigned int nb_periods = 0;
+module_param(nb_periods, uint, 0644);
+MODULE_PARM_DESC(nb_periods, "number of periods");
+
+
+enum p7spis_period_state {
+ P7SPIS_PERIOD_FILLED, /* buffer is ready for copy */
+ P7SPIS_PERIOD_CONSUMED, /* buffer has been read */
+ P7SPIS_PERIOD_PREP, /* DMA xfer in progress */
+};
+
+struct p7spis_period_desc {
+ char* buf;
+ size_t pos;
+
+ enum p7spis_period_state state;
+
+ spinlock_t lock;
+};
+
+
+/*
+ * Slave core instance internal state
+ */
+struct p7spis_core {
+ /* Common SPI core instance */
+ struct p7spi_core core;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+
+ char* circ_buf;
+ size_t circ_buf_len;
+ size_t circ_buf_period_len;
+
+ struct p7spis_period_desc *periods;
+ unsigned int nb_periods;
+ struct p7spis_period_desc *circ_head;
+ struct p7spis_period_desc *circ_tail;
+
+ struct list_head pending_messages;
+ bool in_tasklet;
+ bool paused;
+ u32 ctrl_reg;
+};
+
+
+
+/*
+ * Given a p7spis_core and a period descriptor, returns the following
+ * period descriptor, abstracting the way they are stored.
+ */
+static struct p7spis_period_desc* p7spis_get_next_period_desc(
+ struct p7spis_core* ctrl,
+ struct p7spis_period_desc* desc)
+{
+ int index = desc - ctrl->periods;
+ return &ctrl->periods[(index + 1) % ctrl->nb_periods];
+}
+
+
+static size_t p7spis_copy_period(struct spi_transfer *xfer,
+ size_t *rem_bytes,
+ struct p7spis_period_desc *period,
+ size_t period_len)
+{
+ size_t copy;
+ void *xfer_addr;
+
+ /*
+ * rem_bytes is the number of bytes to be
+ * transfered to complete spi_transfer copy
+ */
+ xfer_addr = xfer->rx_buf + xfer->len - *rem_bytes;
+ copy = min(*rem_bytes, period_len - period->pos);
+ memcpy(xfer_addr, &period->buf[period->pos], copy);
+ *rem_bytes -= copy;
+
+ return copy;
+}
+
+
+/*
+ * This function pushes the head if it has been consumed.
+ */
+static void p7spis_head_consumed(struct p7spis_core *ctrl,
+ struct p7spis_period_desc *head,
+ size_t bytes_copied)
+{
+ head->pos += bytes_copied;
+ if (head->pos == ctrl->circ_buf_period_len) {
+ head->state = P7SPIS_PERIOD_CONSUMED;
+ ctrl->circ_head = p7spis_get_next_period_desc(ctrl, head);
+ }
+}
+
+/*
+ * Flush SPI slave circular buffer content.
+ */
+void p7spis_flush(struct spi_master *master)
+{
+ struct p7spis_core * const ctrl =
+ spi_master_get_devdata(master);
+
+ spin_lock_bh(&ctrl->lock);
+ while (ctrl->circ_head != ctrl->circ_tail) {
+ ctrl->circ_head->state = P7SPIS_PERIOD_CONSUMED;
+ ctrl->circ_head = p7spis_get_next_period_desc(ctrl, ctrl->circ_head);
+ }
+ spin_unlock_bh(&ctrl->lock);
+}
+EXPORT_SYMBOL(p7spis_flush);
+
+
+/*
+ * This completion handler is called in a tasklet context everytime a period
+ * completes.
+ *
+ * The period is first synced, making data valid and consistent when read by
+ * CPU. The tail is then moved to the following period. Note that at this time,
+ * DMA can already have started transfering to the new tail.
+ * To handle that case, we set the period following the new tail as PREP (owned
+ * by DMA).
+ * If the head was pointing to this period, move it one period forward to
+ * prevent from pointing to a period where we can't read data.
+ */
+static void p7spis_complete_dma_cyclic(void* arg)
+{
+ struct p7spis_core* const ctrl = arg;
+ struct p7spi_core* const core = &ctrl->core;
+ struct p7spis_period_desc *desc;
+
+
+ spin_lock(&ctrl->lock);
+ ctrl->in_tasklet = true;
+ dma_sync_single_for_cpu(core->dma_chan->device->dev,
+ core->dma_addr + (ctrl->circ_tail->buf - ctrl->circ_buf),
+ ctrl->circ_buf_period_len,
+ core->dma_cfg.direction);
+ ctrl->circ_tail->state = P7SPIS_PERIOD_FILLED;
+
+ /* Move tail to the next period */
+ ctrl->circ_tail = p7spis_get_next_period_desc(ctrl, ctrl->circ_tail);
+
+ /*
+ * When this completion handler is called, the buffer just after the one
+ * that triggered the completion may already be receiving DMA transfers.
+ * That's why we get the next period and set it as busy (write in progress
+ * by DMA).
+ */
+ desc = p7spis_get_next_period_desc(ctrl, ctrl->circ_tail);
+
+ /*
+ * push head forward if necessary to prevent
+ * reader from reading out of date data
+ */
+ if (ctrl->circ_head == desc) {
+ if (desc->state == P7SPIS_PERIOD_FILLED)
+ dev_warn(p7spi_core_dev(core),
+ "overwritting a non-consumed buffer\n");
+ ctrl->circ_head = p7spis_get_next_period_desc(ctrl, desc);
+ }
+
+ if (desc->state == P7SPIS_PERIOD_FILLED ||
+ desc->state == P7SPIS_PERIOD_CONSUMED) {
+ dma_sync_single_for_device(core->dma_chan->device->dev,
+ core->dma_addr + (desc->buf - ctrl->circ_buf),
+ ctrl->circ_buf_period_len,
+ core->dma_cfg.direction);
+ desc->pos = 0;
+
+ }
+ desc->state = P7SPIS_PERIOD_PREP;
+
+ /*
+ * Alright ! We do the copy in tasklet if there are pending spi
+ * messages.
+ */
+ if (! list_empty(&ctrl->pending_messages)) {
+ struct spi_message *mesg, *next;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry_safe(mesg, next, &ctrl->pending_messages, queue) {
+ size_t bytes_copied = 0;
+
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ size_t bytes;
+
+ /* The copy may have been started in p7spis_transfer. Get to
+ * the first spi_transfer that is not complete */
+ if (mesg->actual_length >= (xfer->len + bytes_copied)) {
+ bytes_copied += xfer->len;
+ continue;
+ }
+
+ bytes = xfer->len - (mesg->actual_length - bytes_copied);
+ while (bytes) {
+ struct p7spis_period_desc *head = ctrl->circ_head;
+
+ if (head->state == P7SPIS_PERIOD_FILLED) {
+ size_t copy = p7spis_copy_period(xfer, &bytes,
+ head, ctrl->circ_buf_period_len);
+
+ p7spis_head_consumed(ctrl, head, copy);
+ mesg->actual_length += copy;
+ bytes_copied += copy;
+ }
+ else {
+ spin_unlock(&ctrl->lock);
+ ctrl->in_tasklet = false;
+ return;
+ }
+ }
+ }
+
+ list_del_init(&mesg->queue);
+ spin_unlock(&ctrl->lock);
+ mesg->state = NULL;
+ mesg->status = 0;
+ /* XXX complete can call spi_async and
+ call p7spis_transfer, that will change the msg list
+ we are reading...
+ */
+ mesg->complete(mesg->context);
+ ctrl->in_tasklet = false;
+ return;
+// spin_lock(&ctrl->lock);
+ }
+ }
+
+ ctrl->in_tasklet = false;
+ spin_unlock(&ctrl->lock);
+}
+
+
+/*
+ * Config DMA. Basically, we force direction to DEV_TO_MEM and the source
+ * address as the fifo address, whatever settings are in p7spis_core.
+ */
+static int p7spis_config_dma_cyclic(struct p7spis_core *ctrl)
+{
+ int err;
+ struct p7spi_core *core = &ctrl->core;
+
+ core->dma_cfg.direction = DMA_DEV_TO_MEM;
+ core->dma_cfg.src_addr = core->fifo_paddr;
+
+ err = dmaengine_slave_config(core->dma_chan, &core->dma_cfg);
+ return err;
+}
+
+
+/*
+ * Map circular buffer.
+ */
+static ssize_t p7spis_map_dma_cyclic(struct p7spis_core* ctrl)
+{
+ struct p7spi_core *core = &ctrl->core;
+
+#ifdef DEBUG
+ BUG_ON(! p7spi_kern.dev);
+ BUG_ON(core->dma_cfg.direction != DMA_DEV_TO_MEM);
+ BUG_ON(core->dma_cfg.src_addr != core->fifo_paddr);
+ BUG_ON(! ctrl->circ_buf_len);
+ BUG_ON(! IS_ALIGNED((unsigned long) ctrl->circ_buf, dma_get_cache_alignment()));
+ BUG_ON(! IS_ALIGNED(ctrl->circ_buf_len, dma_get_cache_alignment()));
+ BUG_ON(ctrl->circ_buf_len % core->dma_thres);
+#endif
+
+ BUG_ON(! virt_addr_valid(ctrl->circ_buf));
+ BUG_ON(! virt_addr_valid(ctrl->circ_buf + ctrl->circ_buf_len - 1));
+
+ core->dma_addr = dma_map_single(core->dma_chan->device->dev,
+ ctrl->circ_buf,
+ ctrl->circ_buf_len,
+ core->dma_cfg.direction);
+ if (unlikely(dma_mapping_error(core->dma_chan->device->dev,
+ core->dma_addr)))
+ return -ENOBUFS;
+
+ return ctrl->circ_buf_len;
+}
+
+
+/*
+ * Unmap circular buffer.
+ */
+static inline void p7spis_unmap_dma_cyclic(struct p7spis_core* ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+
+ dma_unmap_single(core->dma_chan->device->dev,
+ core->dma_addr,
+ ctrl->circ_buf_len,
+ core->dma_cfg.direction);
+}
+
+
+/*
+ * Starts the DMA cyclic operation.
+ */
+static int p7spis_run_dma_cyclic(struct p7spis_core* ctrl,
+ void (*complete)(void*))
+{
+ int err;
+ struct p7spi_core *core = &ctrl->core;
+ struct dma_chan *chan = core->dma_chan;
+ struct dma_async_tx_descriptor* const txd =
+ chan->device->device_prep_dma_cyclic(chan,
+ core->dma_addr,
+ ctrl->circ_buf_len,
+ ctrl->circ_buf_period_len,
+ core->dma_cfg.direction,
+ NULL);
+
+ if (! txd)
+ return -ENOMEM;
+
+ txd->callback = complete;
+ txd->callback_param = ctrl;
+
+ core->dma_cook = dmaengine_submit(txd);
+ err = dma_submit_error(core->dma_cook);
+ if (err)
+ return err;
+
+ dma_async_issue_pending(core->dma_chan);
+ dev_dbg(p7spi_core_dev(core), "Starting cylic DMA\n");
+
+ return 0;
+}
+
+
+static int p7spis_prepare_dma_cylic(struct p7spis_core* ctrl)
+{
+ int err = 0;
+ ssize_t mapped;
+
+ /*
+ * Setup DMA operation properties for cyclic buffer.
+ */
+ err = p7spis_config_dma_cyclic(ctrl);
+ if (err)
+ return err;
+
+ p7spi_cook_dmaif(&ctrl->core);
+
+ /*
+ * Map circular buffer.
+ */
+ mapped = p7spis_map_dma_cyclic(ctrl);
+ if (mapped < 0)
+ return mapped;
+
+ /* Submit DMA operation. */
+ err = p7spis_run_dma_cyclic(ctrl, p7spis_complete_dma_cyclic);
+
+ if (err) {
+ /* unmap only if an error occured */
+ p7spis_unmap_dma_cyclic(ctrl);
+ return err;
+ }
+
+ return 0;
+}
+
+int p7spis_transfer(struct spi_device *spi, struct spi_message *mesg)
+{
+ struct spi_master *slave = spi->master;
+ struct p7spis_core* const ctrl = spi_master_get_devdata(slave);
+ struct p7spi_core* const core = &ctrl->core;
+ struct spi_transfer* xfer;
+ int err;
+
+
+ err = 0;
+ mesg->actual_length = 0;
+
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ size_t bytes;
+
+ bytes = xfer->len;
+ if (bytes < ctrl->circ_buf_period_len) {
+ /* we need each transfert to be at least circ_buf_period_len,
+ otherwise, we can have recursive spi_async that deadlock :
+ - dma_cyclic call complete
+ - complete call spi_async (p7spis_transfer)
+ there is still data, we get data and call complete
+ - complete call spi_async that deadlock on spinlock
+ */
+ dev_err(p7spi_core_dev(core), "spi message is too short %d."
+ "It should be bigger than %d\n",
+ bytes, ctrl->circ_buf_period_len);
+ err = -EINVAL;
+ goto ret;
+ }
+ if (bytes % ctrl->circ_buf_period_len) {
+ dev_err(p7spi_core_dev(core), "spi message size (%d) need to be"
+ "multiple of %d\n",
+ bytes, ctrl->circ_buf_period_len);
+ err = -EINVAL;
+ goto ret;
+ }
+ while (bytes) {
+ struct p7spis_period_desc *head;
+
+ spin_lock(&ctrl->lock);
+ if (ctrl->paused) {
+ spin_unlock(&ctrl->lock);
+ err = -EBUSY;
+ goto ret;
+ }
+ head = ctrl->circ_head;
+
+ if (head->state == P7SPIS_PERIOD_PREP || ctrl->in_tasklet ||
+ !list_empty(&ctrl->pending_messages)) {
+ /* This function may be called from tasklet
+ * with the following flow:
+ * - We were in tasklet
+ * -> called spi message completion
+ * -> in msg completion, message is resubmitted (spy_async)
+ * -> here we are.
+ * - If there are pending messages, we don't to steal their data,
+ * go in queue
+ * - If the period is not ready yet, go in queue as well
+ */
+ list_add_tail(&mesg->queue, &ctrl->pending_messages);
+ spin_unlock(&ctrl->lock);
+ err = 0;
+ goto ret;
+ }
+ else if (head->state == P7SPIS_PERIOD_FILLED) {
+ size_t copy;
+
+ /* make the copy without holding the lock */
+ spin_unlock(&ctrl->lock);
+
+ copy = p7spis_copy_period(xfer, &bytes, head,
+ ctrl->circ_buf_period_len);
+
+ /* check if head has changed */
+ spin_lock(&ctrl->lock);
+ if (head != ctrl->circ_head) {
+ /* damn ! DMA completion handler pushed the head forward,
+ * It means that it wants this buffer for DMA operation and
+ * that this buffer is invalid
+ */
+ spin_unlock(&ctrl->lock);
+ err = -EINVAL;
+ dev_err(p7spi_core_dev(core),
+ "head has changed, message is invalid\n");
+ goto finalize_message;
+ }
+
+ p7spis_head_consumed(ctrl, head, copy);
+ spin_unlock(&ctrl->lock);
+
+ mesg->actual_length += copy;
+ dev_dbg(p7spi_core_dev(core),
+ "xfered %dB from buffer [%p] to spi_transfer [%p]\n",
+ copy, &head->buf[head->pos - copy],
+ xfer->rx_buf + xfer->len - bytes - copy);
+ }
+ else {
+ /*
+ * We shouldn't be here.
+ */
+ enum p7spis_period_state state = head->state;
+ spin_unlock(&ctrl->lock);
+ dev_err(p7spi_core_dev(core),
+ "Unexpected state value: 0x%X\n", state);
+ BUG();
+ }
+ }
+
+ }
+finalize_message:
+ mesg->state = NULL;
+ mesg->status = err;
+ mesg->complete(mesg->context);
+ret:
+ return err;
+}
+
+
+static void p7spis_clean_circ_buffer(struct p7spis_core *ctrl)
+{
+ if (ctrl->periods)
+ kfree(ctrl->periods);
+ if (ctrl->circ_buf)
+ kfree(ctrl->circ_buf);
+
+ ctrl->circ_buf = NULL;
+}
+
+static int p7spis_setup_circ_buffer(struct p7spis_core *ctrl,
+ struct p7spis_ctrl_data const * const cdata)
+{
+ struct p7spi_core* const core = &ctrl->core;
+
+ int i;
+ int err = 0;
+ struct p7spis_period_desc *p_desc;
+ unsigned int p_len;
+ unsigned periods;
+
+ if (! core->dma_chan) {
+ dev_err(p7spi_core_dev(core),
+ "can't proceed without DMA\n");
+ return -EINVAL;
+ }
+
+ p_len = period_len ? period_len : cdata->circ_buf_period;
+
+ periods = nb_periods ? nb_periods : cdata->periods;
+ ctrl->nb_periods = max_t(unsigned int, periods, 3);
+
+ if (periods != ctrl->nb_periods) {
+ dev_warn(p7spi_core_dev(core),
+ "correcting number of periods (%d -> %d)\n",
+ periods, ctrl->nb_periods);
+ }
+
+
+
+ /* adjusts buffer size */
+ ctrl->circ_buf_period_len = roundup(max_t(size_t, 1, p_len),
+ core->dma_min);
+ ctrl->circ_buf_len = ctrl->nb_periods * ctrl->circ_buf_period_len;
+ if (ctrl->circ_buf_period_len != p_len) {
+ dev_info(p7spi_core_dev(core),
+ "correcting circular buffer period length (%dB -> %dB)"
+ " (multiple of %d)\n",
+ p_len, ctrl->circ_buf_period_len,
+ core->dma_min);
+ }
+
+ /* allocate memory for circular buffer */
+ ctrl->circ_buf = kzalloc(ctrl->circ_buf_len, GFP_KERNEL | GFP_DMA);
+ if (ctrl->circ_buf == NULL) {
+ dev_err(p7spi_core_dev(core),
+ "Failed to allocate bytes for circular buffer (len: %d)\n",
+ ctrl->circ_buf_len);
+ return -ENOMEM;
+ }
+
+ /* allocate memory for period descriptor array */
+ ctrl->periods = kzalloc(ctrl->nb_periods * sizeof(struct p7spis_period_desc),
+ GFP_KERNEL);
+ if (ctrl->periods == NULL) {
+ dev_err(p7spi_core_dev(core),
+ "Failed to allocate memory for period descriptors\n");
+ err = -ENOMEM;
+ goto free_period_desc;
+ }
+
+ /* Initialize periods */
+ for(i = 0; i < ctrl->nb_periods; i++) {
+
+ p_desc = &ctrl->periods[i];
+
+ p_desc->buf = ctrl->circ_buf + i * ctrl->circ_buf_period_len;
+ p_desc->state = P7SPIS_PERIOD_PREP;
+ spin_lock_init(&p_desc->lock);
+ }
+
+ ctrl->circ_head = ctrl->circ_tail = ctrl->periods;
+
+ return 0;
+
+free_period_desc:
+ p7spis_clean_circ_buffer(ctrl);
+ return err;
+}
+
+
+
+static void p7spis_disable_core(struct p7spis_core *ctrl)
+{
+ struct p7spi_core const* const core = &ctrl->core;
+
+ ctrl->ctrl_reg = __raw_readl(core->vaddr + P7SPI_CTRL);
+ writel(0, core->vaddr + P7SPI_CTRL);
+
+}
+
+static void p7spis_enable_core(struct p7spis_core *ctrl)
+{
+ struct p7spi_core* const core = &ctrl->core;
+
+ p7spi_enable_core(core, NULL, ctrl->ctrl_reg);
+}
+
+
+static int p7spis_setup(struct spi_device* master)
+{
+ struct p7spis_core* const ctrl = spi_master_get_devdata(master->master);
+ struct p7spi_core* const core = &ctrl->core;
+ struct p7spis_ctrl_data const* const spis_cdata = master->controller_data;
+ struct p7spi_ctrl_data const* const cdata = &spis_cdata->common;
+ u32 ctrl_reg = 0;
+ int err;
+
+ if (cdata->half_duplex)
+ master->master->flags |= SPI_MASTER_HALF_DUPLEX;
+ if (!cdata->read)
+ master->master->flags |= SPI_MASTER_NO_RX;
+ if (!cdata->write)
+ master->master->flags |= SPI_MASTER_NO_TX;
+ if (! cdata->half_duplex)
+ dev_dbg(p7spi_core_dev(core),
+ "Full duplex transfers are not reliable in slave mode");
+ if (! (cdata->half_duplex || (cdata->read ^ cdata->write))) {
+ dev_err(p7spi_core_dev(core),
+ "invalid transfer mode for device %s (%s duplex, %sread, %swrite)\n",
+ dev_name(&master->dev),
+ cdata->half_duplex ? "half" : "full",
+ cdata->read ? "" : "no ",
+ cdata->write ? "" : "no ");
+ return -EINVAL;
+ }
+
+ err = p7spi_setup_core(core, master, &ctrl_reg);
+ if (err == -EAGAIN)
+ return -EPROBE_DEFER;
+ else if (err)
+ return err;
+
+ if (p7spi_kern.rev == SPI_REVISION_3) {
+ /*
+ * This is a bug due to unaligned transfer handling. It's not
+ * supposed to be enabled when in free run mode but it is,
+ * triggering a bug every 64KB (every time a 16-bit counter
+ * wraps). Moreover, The alignment info is in the instruction
+ * register which is not reset...
+ * To fix that, we must make the controller belive that it
+ * makes aligned transfers. To do so, we send an instruction
+ * to receive 4B (the size of a word), then we reset the
+ * controller (to stop the instruction, we don't want these
+ * 4B anyway). After that, we can continue as usual, transfers
+ * are now aligned.
+ */
+ u32 temp_reg = ctrl_reg;
+
+ temp_reg |= P7SPI_CTRL_ENABLE;
+ p7spi_enable_core(core, master, temp_reg);
+
+ writel(0x80000004, core->vaddr + P7SPI_INSTR);
+ clk_disable_unprepare(core->clk);
+ clk_prepare_enable(core->clk);
+ }
+
+
+ /* Setup transfer mode. */
+ ctrl_reg |= (cdata->xfer_mode << P7SPI_CTRL_SMODE_SHIFT);
+
+ /* Start core in slave mode. Force Read-Only and DMA */
+ ctrl_reg |= P7SPI_CTRL_ENABLE;
+ ctrl_reg |= P7SPI_CTRL_RWS_RO | P7SPI_CTRL_DMAES_ENABLED;
+ if (p7spi_kern.rev) {
+ /* for rev > R1, set RX free run flag */
+ ctrl_reg |= P7SPI_CTRL_RX_FREE_RUNS;
+ }
+ p7spi_enable_core(core, master, ctrl_reg);
+
+ if (ctrl->circ_buf == NULL) {
+ err = p7spis_setup_circ_buffer(ctrl, spis_cdata);
+ if (err)
+ goto disable_core;
+
+ err = p7spis_prepare_dma_cylic(ctrl);
+ if (err)
+ goto free_circ_buf;
+
+ ctrl->paused = false;
+ }
+
+ dev_info(p7spi_core_dev(core),
+ "enabled device %s on slave core:\n"
+ "\t%s duplex/%sread/%swrite\n"
+ "\tmode: %s\n"
+ "\tclock: polarity=%d phase=%d\n"
+ "\tfifo: %uB/%uB\n"
+ "\tbuffer: [%p] %uB (%d periods)\n",
+ dev_name(&master->dev),
+ cdata->half_duplex ? "half" : "full",
+ cdata->read ? "" : "no ",
+ cdata->write ? "" : "no ",
+ p7spi_mode_name(cdata->xfer_mode),
+ !! (master->mode & SPI_CPOL),
+ !! (master->mode & SPI_CPHA),
+ (unsigned int) core->thres_sz,
+ (unsigned int) core->fifo_sz,
+ ctrl->circ_buf, ctrl->circ_buf_len, ctrl->nb_periods);
+ return 0;
+
+free_circ_buf:
+ p7spis_clean_circ_buffer(ctrl);
+disable_core:
+ p7spis_disable_core(ctrl);
+
+ dev_err(p7spi_core_dev(core),
+ "failed to enable device %s on slave core:\n",
+ dev_name(&master->dev));
+ return err;
+}
+
+static int __p7spis_pause(struct spi_master* master)
+{
+ int i;
+ struct p7spis_core * const ctrl =
+ spi_master_get_devdata(master);
+
+ /*
+ * TODO:
+ * - don't pause until all pending messages are consumed
+ * - return error code when calling spi_async and core is paused
+ */
+ if (ctrl->paused) {
+ return -ESHUTDOWN;
+ }
+
+ p7spis_disable_core(ctrl);
+ dmaengine_terminate_all(ctrl->core.dma_chan);
+ ctrl->circ_head = ctrl->circ_tail = ctrl->periods;
+ ctrl->paused = true;
+ for(i = 0; i < ctrl->nb_periods; i++) {
+ struct p7spis_period_desc *desc = &ctrl->periods[i];
+ desc->state = P7SPIS_PERIOD_PREP;
+ desc->pos = 0;
+ }
+ if (! list_empty(&ctrl->pending_messages)) {
+ struct spi_message *mesg, *next;
+
+ list_for_each_entry_safe(mesg, next, &ctrl->pending_messages, queue) {
+ list_del_init(&mesg->queue);
+ mesg->state = NULL;
+ mesg->status = -ENODEV;
+ mesg->complete(mesg->context);
+ }
+ }
+
+ return 0;
+}
+
+int p7spis_pause(struct spi_master* master)
+{
+ int ret;
+ struct p7spis_core * const ctrl =
+ spi_master_get_devdata(master);
+
+ spin_lock_bh(&ctrl->lock);
+ ret = __p7spis_pause(master);
+ spin_unlock_bh(&ctrl->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(p7spis_pause);
+
+static int __p7spis_resume(struct spi_master* master)
+{
+ struct p7spis_core * const ctrl =
+ spi_master_get_devdata(master);
+ int ret = -EBUSY;
+
+ if (! ctrl->paused) {
+ return ret;
+ }
+
+ p7spis_enable_core(ctrl);
+ ret = p7spis_prepare_dma_cylic(ctrl);
+ ctrl->paused = false;
+ return ret;
+}
+
+
+int p7spis_resume(struct spi_master *master)
+{
+ struct p7spis_core * const ctrl =
+ spi_master_get_devdata(master);
+ int ret;
+
+ spin_lock_bh(&ctrl->lock);
+ ret = __p7spis_resume(master);
+ spin_unlock_bh(&ctrl->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(p7spis_resume);
+
+static int p7spis_dummy_xfer(struct spi_master* master, struct spi_message* msg)
+{
+ return 0;
+}
+
+
+static struct p7spi_ops p7spis_ops __devinitdata = {
+ .setup = p7spis_setup,
+ .xfer = p7spis_dummy_xfer,
+ .rt = false,
+ .dma_cyclic = true,
+};
+
+static int __devinit p7spis_probe(struct platform_device* pdev)
+{
+ int ret;
+
+ ret = p7spi_create_ctrl(pdev, &p7spis_ops, struct p7spis_core);
+
+ if (! ret) {
+ struct p7spis_core* const ctrl =
+ (struct p7spis_core*) platform_get_drvdata(pdev);
+
+ spin_lock_init(&ctrl->lock);
+ init_waitqueue_head(&ctrl->wq);
+ INIT_LIST_HEAD(&ctrl->pending_messages);
+
+ /* shunt default transfer function. So spi_async will call
+ * p7spis_transfer */
+ ctrl->core.ctrl->transfer = p7spis_transfer;
+
+ /*
+ * p7spi_register calls spi_register_master internally. As soon as
+ * this function is called, p7spis_setup can be called, so we need to
+ * initialize ctrl->lock before that
+ */
+ ret = p7spi_register(pdev, &ctrl->core);
+ if (ret) {
+ p7spi_abort_create(pdev);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int __devexit p7spis_remove(struct platform_device *pdev)
+{
+ struct p7spis_core* const ctrl =
+ (struct p7spis_core*) platform_get_drvdata(pdev);
+
+ if (ctrl->circ_buf) {
+ dmaengine_terminate_all(ctrl->core.dma_chan);
+ p7spis_unmap_dma_cyclic(ctrl);
+ p7spis_clean_circ_buffer(ctrl);
+ }
+
+ return p7spi_destroy_ctrl(pdev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int p7spis_pm_suspend(struct device* dev)
+{
+ struct platform_device* pdev = to_platform_device(dev);
+ struct p7spi_core* core = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = p7spis_pause(core->ctrl);
+ clk_disable_unprepare(core->clk);
+ return ret;
+}
+
+static int p7spis_pm_resume(struct device* dev)
+{
+ struct platform_device* pdev = to_platform_device(dev);
+ struct p7spi_core* core = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(core->clk);
+ return p7spis_resume(core->ctrl);
+}
+#else
+#define p7spis_pm_suspend NULL
+#define p7spis_pm_resume NULL
+#endif
+
+static struct dev_pm_ops p7spis_dev_pm_ops = {
+ .suspend = p7spis_pm_suspend,
+ .resume = p7spis_pm_resume,
+};
+
+static struct platform_driver p7spis_driver = {
+ .driver = {
+ .name = P7SPIS_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &p7spis_dev_pm_ops,
+ },
+ .probe = p7spis_probe,
+ .remove = __devexit_p(p7spis_remove),
+};
+
+static int __init p7spis_init(void)
+{
+ p7spis_ops.dma = true;
+
+ return platform_driver_register(&p7spis_driver);
+}
+module_init(p7spis_init);
+
+static void __exit p7spis_exit(void)
+{
+ platform_driver_unregister(&p7spis_driver);
+}
+module_exit(p7spis_exit);
+
+MODULE_AUTHOR("Alvaro Moran - Parrot S.A. <alvaro.moran@parrot.com>");
+MODULE_DESCRIPTION("Parrot7 SPI slave controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/spi/p7-spis.h b/drivers/parrot/spi/p7-spis.h
new file mode 100644
index 00000000000000..b04b5d411693ca
--- /dev/null
+++ b/drivers/parrot/spi/p7-spis.h
@@ -0,0 +1,35 @@
+/**
+ * linux/drivers/parrot/spi/p7-spis.h - Parrot7 SPI slave driver
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author: alvaro.moran@parrot.com
+ * @date: 2012-10-01
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _P7_SPIS_H
+#define _P7_SPIS_H
+
+#if defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE)
+
+#define P7SPIS_DRV_NAME "p7-spis"
+
+struct spi_master;
+
+/*
+ * Flush RX and TX FIFOs of SPI core.
+ * This might be necessary since the slave spi keeps on receiving
+ * (thus filling the RX FIFO).
+ */
+extern void p7spis_flush(struct spi_master *master);
+extern int p7spis_pause(struct spi_master *master);
+extern int p7spis_resume(struct spi_master *master);
+
+#endif /* defined(CONFIG_SPI_SLAVE_PARROT7) || \
+ defined(CONFIG_SPI_SLAVE_PARROT7_MODULE) */
+
+#endif
diff --git a/drivers/parrot/spi/p7-swb.c b/drivers/parrot/spi/p7-swb.c
new file mode 100644
index 00000000000000..1456c126dd5070
--- /dev/null
+++ b/drivers/parrot/spi/p7-swb.c
@@ -0,0 +1,317 @@
+/**
+ * p7-swb.c - Switchbox implementation
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * author: Damien Riegel <damien.riegel.ext@parrot.com>
+ * date: 26-Jul-2013
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include "p7-swb.h"
+#include "p7-spi.h"
+#include "p7-spi_priv.h"
+
+static int pad_out_values[] = {
+ [0 ... P7_SWB_FUNCTION_LEN] = -1,
+ [P7_SWB_SPI_CLK] = 0x0,
+ [P7_SWB_SPI_SS] = 0x1,
+ [P7_SWB_SPI_DATA0] = 0x2,
+ [P7_SWB_SPI_DATA1] = 0x3,
+ [P7_SWB_SPI_DATA2] = 0x4,
+ [P7_SWB_SPI_DATA3] = 0x5,
+ [P7_SWB_LOW] = 0x18,
+ [P7_SWB_HIGH] = 0x19,
+ [P7_SWB_Z] = 0x1a,
+};
+
+static int pad_in_registers[] = {
+ [0 ... P7_SWB_FUNCTION_LEN] = -1,
+ [P7_SWB_SPI_CLK] = 0x00,
+ [P7_SWB_SPI_SS] = 0x04,
+ [P7_SWB_SPI_DATA0] = 0x08,
+ [P7_SWB_SPI_DATA1] = 0x0C,
+ [P7_SWB_SPI_DATA2] = 0x10,
+ [P7_SWB_SPI_DATA3] = 0x14,
+ [P7_SWB_MPEG_CLK] = 0x00,
+ [P7_SWB_MPEG_DATA] = 0x04,
+ [P7_SWB_MPEG_VALID] = 0x08,
+ [P7_SWB_MPEG_SYNC] = 0x0C,
+};
+
+static char* p7swb_function2str(enum p7swb_function func)
+{
+ char* str = NULL;
+ switch(func) {
+ case P7_SWB_SPI_CLK:
+ str = "SPI_CLK";
+ break;
+ case P7_SWB_SPI_SS:
+ str = "SPI_SS";
+ break;
+ case P7_SWB_SPI_DATA0:
+ str = "SPI_DATA0";
+ break;
+ case P7_SWB_SPI_DATA1:
+ str = "SPI_DATA1";
+ break;
+ case P7_SWB_SPI_DATA2:
+ str = "SPI_DATA2";
+ break;
+ case P7_SWB_SPI_DATA3:
+ str = "SPI_DATA3";
+ break;
+ case P7_SWB_MPEG_CLK:
+ str = "MPEG_CLK";
+ break;
+ case P7_SWB_MPEG_DATA:
+ str = "MPEG_DATA";
+ break;
+ case P7_SWB_MPEG_VALID:
+ str = "MPEG_VALID";
+ break;
+ case P7_SWB_MPEG_SYNC:
+ str = "MPEG_SYNC";
+ break;
+ default:
+ str = "";
+ break;
+ }
+
+ return str;
+}
+
+static bool p7swb_func_available(enum p7swb_function func,
+ enum p7spi_core_type type)
+{
+ if (func > P7_SWB_FIRST_STATE &&
+ func < P7_SWB_LAST_STATE)
+ return true;
+ else if (type == P7SPI_CORE_SPI)
+ return (func > P7_SWB_SPI_FIRST_FUNC &&
+ func < P7_SWB_SPI_LAST_FUNC);
+ else if (type == P7SPI_CORE_MPEG)
+ return (func > P7_SWB_MPEG_FIRST_FUNC &&
+ func < P7_SWB_MPEG_LAST_FUNC);
+ else
+ return false;
+}
+
+int p7swb_core_offset(enum p7spi_core_type type)
+{
+ if (type == P7SPI_CORE_SPI)
+ return (P7_SWB_SPI_LAST_FUNC - P7_SWB_SPI_FIRST_FUNC - 1);
+ else
+ return (P7_SWB_MPEG_LAST_FUNC - P7_SWB_MPEG_FIRST_FUNC - 1);
+}
+
+static int p7swb_funcout2val(int core_id,
+ enum p7spi_core_type type,
+ enum p7swb_function func)
+{
+ int val = pad_out_values[func];
+
+ if (val < 0) {
+ dev_err(p7spi_kern.dev,
+ "switchbox: function %d is not available as output\n",
+ func);
+ return -EINVAL;
+ }
+
+ if (func > P7_SWB_FIRST_STATE && func < P7_SWB_LAST_STATE) {
+ if (p7spi_kern.rev != SPI_REVISION_1)
+ /*
+ * low high Z
+ * R1 0x18 0x19 0x1A
+ * R2 / R3 0x18-0x20 0x21 0x22
+ * val is the correct value for R1, so for latter rev
+ * add +2 to get the right value
+ */
+ val += 2;
+ return val;
+ }
+ else {
+ return val + core_id * p7swb_core_offset(P7SPI_CORE_SPI);
+ }
+}
+
+static int p7swb_funcin2reg(int core_id,
+ enum p7spi_core_type type,
+ enum p7swb_function func)
+{
+ int core_index, i;
+ int address = 0;
+ int val = pad_in_registers[func];
+
+ if (val < 0) {
+ dev_err(p7spi_kern.dev,
+ "switchbox: function %d is not available as input\n",
+ func);
+ return -EINVAL;
+ }
+
+ core_index = p7spi_core_index(core_id, type);
+ if (core_index < 0) {
+ dev_err(p7spi_kern.dev,
+ "switchbox: can't find core n°%d of type %d\n",
+ core_id, type);
+ return -EINVAL;
+ }
+
+ for(i = 0; i < core_index; i++) {
+ address += p7swb_core_offset(p7spi_kern.cores[i]) * 4;
+ }
+
+ return address + val;
+}
+
+static struct p7swb_desc* p7swb_get_swb(unsigned int pad)
+{
+ if (pad >= p7spi_kern.swb_sz)
+ return NULL;
+ else
+ return &p7spi_kern.swb[pad];
+}
+
+static int p7swb_claim(void const * const owner, unsigned int pad, bool in_pad)
+{
+ struct p7swb_desc* swb;
+
+ swb = p7swb_get_swb(pad);
+ if (!swb) {
+ /*
+ * Maybe P7SPI_SWB_LAST is missing or the pad configuration
+ * is incorrect in the BSP.
+ */
+ dev_err(p7spi_kern.dev,
+ "switchbox: can't get pad %d.\n", pad);
+ return -EINVAL;
+ }
+
+ if (! swb->owner) {
+ swb->owner = owner;
+ }
+ else if (swb->owner != owner) {
+ dev_err(p7spi_kern.dev,
+ "pad %d already claimed\n", pad);
+ return -EBUSY;
+ }
+
+ if (in_pad && !(swb->in_used))
+ swb->in_used = true;
+
+ if (!in_pad && !(swb->out_used))
+ swb->out_used = true;
+
+ return 0;
+}
+
+static void p7swb_unclaim(void const * const owner, unsigned int pad, bool in_pad)
+{
+ struct p7swb_desc* swb;
+
+ swb = p7swb_get_swb(pad);
+ if (!swb || swb->owner != owner)
+ return;
+
+ if (in_pad)
+ swb->in_used = false;
+ else
+ swb->out_used = false;
+
+ dev_dbg(p7spi_kern.dev,
+ "unclaiming pad %d (%s)\n",
+ pad, in_pad ? "in" : "out");
+
+ if ((!swb->in_used) && (!swb->out_used)) {
+ swb->owner = NULL;
+ dev_dbg(p7spi_kern.dev,
+ "releasing pad %d\n", pad);
+ }
+}
+
+int p7swb_request_pad(int core_id, enum p7spi_core_type type,
+ void const * const owner, unsigned int pad,
+ enum p7swb_direction dir, enum p7swb_function func)
+{
+ int val, err = -EINVAL;
+
+ mutex_lock(&p7spi_kern.lck);
+
+ if (!p7swb_func_available(func, type))
+ goto err;
+
+ if (dir == P7_SWB_DIR_OUT || dir == P7_SWB_DIR_BOTH) {
+ val = p7swb_funcout2val(core_id, type, func);
+ if (val >= 0) {
+ err = p7swb_claim(owner, pad, false);
+ if (err)
+ goto err;
+
+ __raw_writel(val, p7spi_kern.vaddr + P7SPI_SWB_OPAD(pad));
+ dev_dbg(p7spi_kern.dev,
+ "claiming pad %d (out) for function %s\n",
+ pad, p7swb_function2str(func));
+ }
+ else {
+ err = val;
+ goto err;
+ }
+
+ }
+
+ if (dir == P7_SWB_DIR_IN || dir == P7_SWB_DIR_BOTH) {
+ val = p7swb_funcin2reg(core_id, type, func);
+ if (val >= 0) {
+ err = p7swb_claim(owner, pad, true);
+ if (err)
+ goto release_pad;
+
+ __raw_writel(pad, p7spi_kern.vaddr +
+ P7SPI_SWB_IPAD_BASE + val);
+ dev_dbg(p7spi_kern.dev,
+ "claiming pad %d (in) for function %s\n",
+ pad, p7swb_function2str(func));
+ }
+ else {
+ err = val;
+ goto release_pad;
+ }
+ }
+
+ if (!err)
+ goto err;
+
+release_pad:
+ if (dir == P7_SWB_DIR_OUT || dir == P7_SWB_DIR_BOTH)
+ p7swb_unclaim(owner, pad, false);
+err:
+ mutex_unlock(&p7spi_kern.lck);
+ return err;
+
+}
+EXPORT_SYMBOL(p7swb_request_pad);
+
+void p7swb_release_pad(void const * const owner, unsigned int pad,
+ enum p7swb_direction dir)
+{
+ mutex_lock(&p7spi_kern.lck);
+ switch (dir) {
+ case P7_SWB_DIR_OUT:
+ p7swb_unclaim(owner, pad, false);
+ break;
+ case P7_SWB_DIR_IN:
+ p7swb_unclaim(owner, pad, true);
+ break;
+ case P7_SWB_DIR_BOTH:
+ p7swb_unclaim(owner, pad, false);
+ p7swb_unclaim(owner, pad, true);
+ break;
+ }
+ mutex_unlock(&p7spi_kern.lck);
+}
+EXPORT_SYMBOL(p7swb_release_pad);
diff --git a/drivers/parrot/spi/p7-swb.h b/drivers/parrot/spi/p7-swb.h
new file mode 100644
index 00000000000000..cc56a63aeb1f44
--- /dev/null
+++ b/drivers/parrot/spi/p7-swb.h
@@ -0,0 +1,53 @@
+
+
+#ifndef _P7_SPI_SWB_H
+#define _P7_SPI_SWB_H
+
+struct p7spi_core;
+enum p7spi_core_type;
+
+struct p7swb_desc {
+ bool in_used;
+ bool out_used;
+ struct p7spi_core const * owner;
+};
+
+enum p7swb_direction {
+ P7_SWB_DIR_IN,
+ P7_SWB_DIR_OUT,
+ P7_SWB_DIR_BOTH,
+};
+
+enum p7swb_function {
+ P7_SWB_SPI_FIRST_FUNC,
+ P7_SWB_SPI_CLK,
+ P7_SWB_SPI_SS,
+ P7_SWB_SPI_DATA0,
+ P7_SWB_SPI_DATA1,
+ P7_SWB_SPI_DATA2,
+ P7_SWB_SPI_DATA3,
+ P7_SWB_SPI_LAST_FUNC,
+
+ P7_SWB_MPEG_FIRST_FUNC,
+ P7_SWB_MPEG_CLK,
+ P7_SWB_MPEG_DATA,
+ P7_SWB_MPEG_VALID,
+ P7_SWB_MPEG_SYNC,
+ P7_SWB_MPEG_LAST_FUNC,
+
+ P7_SWB_FIRST_STATE,
+ P7_SWB_LOW,
+ P7_SWB_HIGH,
+ P7_SWB_Z,
+ P7_SWB_LAST_STATE,
+
+ P7_SWB_FUNCTION_LEN,
+};
+
+int p7swb_core_offset(enum p7spi_core_type type);
+extern int p7swb_request_pad(int core_id, enum p7spi_core_type,
+ void const * const owner, unsigned int pad,
+ enum p7swb_direction dir, enum p7swb_function func);
+extern void p7swb_release_pad(void const * const owner, unsigned int pad,
+ enum p7swb_direction dir);
+#endif
diff --git a/drivers/parrot/spi/spimtest_loopback.c b/drivers/parrot/spi/spimtest_loopback.c
new file mode 100644
index 00000000000000..109d4651d71bf9
--- /dev/null
+++ b/drivers/parrot/spi/spimtest_loopback.c
@@ -0,0 +1,253 @@
+/*
+ * @file spis_22_master.c
+ * @brief Receive a big buffer by sending multiple times the same messages.
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * @author damien.riegel.ext@parrot.com
+ * @date Dec 2012
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+
+struct wrapper_spi_info {
+ struct spi_message* mesg;
+ struct spi_device* spi;
+ int mesg_position;
+ int nb_send;
+ int target_send;
+ unsigned char offset;
+ int fail_at;
+};
+
+static unsigned int nb_messages = 1;
+module_param(nb_messages, uint, 0644);
+
+static unsigned int message_size = 256;
+module_param(message_size, uint, 0644);
+
+static unsigned int total_len = 256;
+module_param(total_len, uint, 0644);
+
+static unsigned int offset = 0x2A;
+module_param(offset, uint, 0644);
+
+static unsigned int speed_hz = 26 * 1000 * 1000;
+module_param(speed_hz, uint, 0644);
+
+static struct spi_message *mesgs = NULL;
+static struct spi_transfer *xfers = NULL;
+static struct wrapper_spi_info* infos = NULL;
+static unsigned int nb_send = 2;
+static int total_messages_received;
+static struct completion spimtest_completion;
+static unsigned int should_exit = 0;
+
+#define spimtest_print(level, format, arg...) \
+ printk(level "[SPIMTEST LOOPBACK][MSTR] " format, ##arg)
+
+#define spimtest_info(format, arg...) \
+ spimtest_print(KERN_INFO, format, ##arg)
+
+#define spimtest_err(format, arg...) \
+ spimtest_print(KERN_ERR, format, ##arg)
+
+
+static int spimtest_allocate(void)
+{
+ int err = -ENOMEM;
+ int i;
+
+ infos = kzalloc(nb_messages * sizeof(struct spi_message), GFP_KERNEL);
+ if (! infos) {
+ goto exit;
+ }
+
+ mesgs = kzalloc(nb_messages * sizeof(struct spi_message), GFP_KERNEL);
+ if (! mesgs) {
+ goto free_infos;
+ }
+
+ xfers = kzalloc(nb_messages * sizeof(struct spi_transfer), GFP_KERNEL);
+ if (! xfers) {
+ goto free_mesgs;
+ }
+
+ for(i = 0; i < nb_messages; i++) {
+ int j, loc_offset;
+ u8 *buf;
+
+ spi_message_init(&mesgs[i]);
+ memset(&xfers[i], 0, sizeof(struct spi_transfer));
+ xfers[i].len = message_size;
+ xfers[i].speed_hz = speed_hz;
+
+ buf = kzalloc(message_size, GFP_KERNEL);
+ if (! buf ) {
+ goto free_buffers;
+ }
+
+ loc_offset = (offset + (i * message_size)) % 256;
+ for(j = 0; j < message_size; j++) {
+ buf[j] = loc_offset++ % 256;
+ }
+
+ xfers[i].tx_buf = buf;
+
+ spi_message_add_tail(&xfers[i], &mesgs[i]);
+ }
+
+ return 0;
+
+free_buffers:
+ for(i = 0; i < nb_messages; i++) {
+
+ if (xfers[i].tx_buf) {
+ kfree(xfers[i].tx_buf);
+ }
+ }
+ kfree(xfers);
+
+free_mesgs:
+ kfree(mesgs);
+free_infos:
+ kfree(infos);
+exit:
+ return err;
+}
+
+static void spimtest_receive(void* context)
+{
+ struct wrapper_spi_info* info = context;
+ struct spi_message* mesg = info->mesg;
+
+ if (mesg->status) {
+ spimtest_err("-FAIL- Msg %d (round %d) received with error code %d\n",
+ info->mesg_position, info->nb_send, mesg->status);
+ should_exit++;
+ goto completion;
+ }
+
+ info->nb_send++;
+ if (info->nb_send < info->target_send) {
+ spi_async(info->spi, mesg);
+ }
+
+completion:
+ total_messages_received++;
+ if (total_messages_received == nb_send || should_exit == nb_messages)
+ complete(&spimtest_completion);
+}
+
+static int __devinit spimtest_probe(struct spi_device *spi)
+{
+ int err = 0, i;
+ int send_per_message;
+ int remaining_send;
+
+ total_messages_received = 0;
+ nb_send = total_len / message_size;
+ if (total_len % message_size) {
+ spimtest_err("total_len (%u) must be a multiple of message_size (%u)\n",
+ total_len, message_size);
+ return -EINVAL;
+ }
+
+ if (((message_size * nb_messages) % 256) != 0) {
+ spimtest_err("(message_size * nb_messages) must be a multiple of 256\n");
+ return -EINVAL;
+ }
+
+ /* some messages are going to be sent "send_per_message" times */
+ send_per_message = nb_send / nb_messages;
+ /* and some "send_per_message" + 1 times */
+ remaining_send = nb_send % nb_messages;
+
+ init_completion(&spimtest_completion);
+ err = spimtest_allocate();
+ if (err) {
+ goto exit;
+ }
+
+ /*
+ * Send each message once, the rest is done in completion function
+ */
+ for(i = 0; i < nb_messages; i++) {
+ infos[i].mesg = &mesgs[i];
+ infos[i].spi = spi;
+ infos[i].nb_send = 0;
+ infos[i].target_send = send_per_message;
+ if (remaining_send) {
+ infos[i].target_send++;
+ remaining_send--;
+ }
+ infos[i].fail_at = 0;
+ infos[i].mesg_position = i;
+
+ mesgs[i].complete = spimtest_receive;
+ mesgs[i].context = &infos[i];
+ err = spi_async(spi, &mesgs[i]);
+ if (err) {
+ spimtest_err("-FAIL- Can't send a message (%d)\n",
+ err);
+ }
+ }
+
+ wait_for_completion(&spimtest_completion);
+ spimtest_info("%s message pool %d msg. %d msgs sent\n",
+ should_exit ? "-FAIL-" : "-PASS-",
+ nb_messages, nb_send);
+
+exit:
+ for(i = 0; i < nb_messages; i++) {
+ kfree(xfers[i].tx_buf);
+ }
+
+ kfree(mesgs);
+ kfree(xfers);
+ kfree(infos);
+ return err;
+}
+
+static int __devexit spimtest_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+
+static struct spi_driver spimtest_driver = {
+ .driver = {
+ .name = "spidev",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = spimtest_probe,
+ .remove = __devexit_p(spimtest_remove),
+};
+
+module_spi_driver(spimtest_driver);
+MODULE_DESCRIPTION( "spimtest loopback module" );
+MODULE_LICENSE( "GPL" );
diff --git a/drivers/parrot/spi/spistest_loopback.c b/drivers/parrot/spi/spistest_loopback.c
new file mode 100644
index 00000000000000..796515b3f1be46
--- /dev/null
+++ b/drivers/parrot/spi/spistest_loopback.c
@@ -0,0 +1,328 @@
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+
+struct spistest_core;
+
+struct wrapper_spi_info {
+ struct spi_message* mesg;
+ struct spi_device* spi;
+ int mesg_position;
+ int nb_send;
+ int target_send;
+ unsigned char offset;
+ int fail_at;
+
+ struct spistest_core* core;
+};
+
+struct spistest_core {
+ int bus_num;
+ struct spi_message *mesgs;
+ struct spi_transfer *xfers;
+ struct wrapper_spi_info *infos;
+
+ int nb_send;
+ int nb_fail;
+ int total_messages_received;
+
+ struct completion done;
+};
+
+static unsigned int nb_messages = 1;
+module_param(nb_messages, uint, 0644);
+
+static unsigned int message_size = 256;
+module_param(message_size, uint, 0644);
+
+static unsigned int total_len = 256;
+module_param(total_len, uint, 0644);
+
+static unsigned int verbose = 1;
+module_param(verbose, uint, 0644);
+
+static unsigned int offset = 0x2A;
+module_param(offset, uint, 0644);
+
+static struct spistest_core cores[2];
+
+#define spistest_print(level, core, format, arg...) \
+ printk(level "[SPISTEST LOOPBACK][SLV %d] " format, core->bus_num, ##arg)
+
+#define spistest_info(core, format, arg...) \
+ spistest_print(KERN_INFO, core, format, ##arg)
+
+#define spistest_err(core, format, arg...) \
+ spistest_print(KERN_ERR, core, format, ##arg)
+
+static struct spistest_core* get_spistest_core(int bus_num)
+{
+ /* that's hackish but...
+ * In some case, there are two SPI slave receiving the same data (because
+ * they are wired to the same pads, yet another hack), so we want this
+ * driver to support up to two cores. For the moment, slaves are on bus
+ * 1 and 3.
+ */
+ switch(bus_num) {
+ case 1:
+ cores[0].bus_num = 1;
+ return &cores[0];
+ case 3:
+ cores[1].bus_num = 3;
+ return &cores[1];
+ default:
+ return NULL;
+ }
+
+}
+
+static int spistest_allocate(struct spistest_core *core)
+{
+ int err = -ENOMEM;
+ int i;
+
+ core->infos = kzalloc(nb_messages * sizeof(struct spi_message), GFP_KERNEL);
+ if (! core->infos) {
+ goto exit;
+ }
+
+ core->mesgs = kzalloc(nb_messages * sizeof(struct spi_message), GFP_KERNEL);
+ if (! core->mesgs) {
+ goto free_infos;
+ }
+
+ core->xfers = kzalloc(nb_messages * sizeof(struct spi_transfer), GFP_KERNEL);
+ if (! core->xfers) {
+ goto free_mesgs;
+ }
+
+ for(i = 0; i < nb_messages; i++) {
+ spi_message_init(&core->mesgs[i]);
+ memset(&core->xfers[i], 0, sizeof(struct spi_transfer));
+ core->xfers[i].len = message_size;
+
+ core->xfers[i].rx_buf = kzalloc(message_size, GFP_KERNEL);
+ if (! core->xfers[i].rx_buf ) {
+ goto free_buffers;
+ }
+
+ spi_message_add_tail(&core->xfers[i], &core->mesgs[i]);
+ }
+
+ return 0;
+
+free_buffers:
+ for(i = 0; i < nb_messages; i++) {
+ if (core->xfers[i].rx_buf)
+ kfree(core->xfers[i].rx_buf);
+ }
+free_mesgs:
+ kfree(core->mesgs);
+free_infos:
+ kfree(core->infos);
+exit:
+ return err;
+}
+
+
+static int spistest_check(struct spistest_core *core,
+ u8* buf, int len, unsigned char offset)
+{
+ int i, j, err = -1;
+ for(i = 0; i < len; i++) {
+ if (buf[i] != offset) {
+ spistest_err(core, "-FAIL- Unexpected value at offset %d."
+ "Read 0x%.2X, expected 0x%.2X\n",
+ i, buf[i], offset);
+
+ for (j = 0; j < 10; j++) {
+ if (i + j < len)
+ spistest_err(core, "-FAIL- buf[%d] = 0x%X\n",
+ i + j, buf[i + j]);
+ }
+ return i;
+
+ }
+ offset++;
+ }
+
+ return err;
+}
+
+
+static void spistest_receive(void* context)
+{
+ int err;
+ struct wrapper_spi_info* info = context;
+ struct spistest_core* core = info->core;
+ struct spi_message* mesg = info->mesg;
+ struct spi_transfer* xfer = list_entry(
+ mesg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+
+ if (mesg->status) {
+ spistest_err(core, "-FAIL- Msg %d (round %d) received with error code %d\n",
+ info->mesg_position, info->nb_send, mesg->status);
+ core->nb_fail++;
+ spistest_check(core, xfer->rx_buf, message_size, info->offset);
+ goto completion;
+ }
+
+ /* check buffer */
+ err = spistest_check(core, xfer->rx_buf, message_size, info->offset);
+
+ if (err != -1) {
+ spistest_err(core, "-FAIL- Msg %d (round %d) is invalid\n",
+ info->mesg_position, info->nb_send);
+ info->fail_at = info->nb_send;
+ core->nb_fail++;
+ goto completion;
+ }
+
+ if (verbose)
+ spistest_info(core, "-PASS- Msg %d (round %d) validated\n",
+ info->mesg_position, info->nb_send);
+
+ if (core->nb_fail)
+ /* If this message was successful but a previous one failed,
+ * do not resend this message to end the test asap */
+ core->nb_fail++;
+ else {
+ info->nb_send++;
+ if (info->nb_send < info->target_send) {
+ spi_async(info->spi, mesg);
+ }
+ }
+
+completion:
+
+ core->total_messages_received++;
+
+ if (core->total_messages_received == core->nb_send ||
+ core->nb_fail == nb_messages)
+ complete(&core->done);
+}
+
+
+static int __devinit spistest_probe(struct spi_device *spi)
+{
+ int err = 0, i;
+ int send_per_message;
+ int remaining_send;
+ struct spistest_core *core;
+
+ core = get_spistest_core(spi->master->bus_num);
+ if ( ! core ) {
+ printk(KERN_ERR "SPI bus number must be only 1 or 3\n");
+ return -EINVAL;
+ }
+
+ core->total_messages_received = 0;
+ core->nb_send = total_len / message_size;
+ if (total_len % message_size) {
+ spistest_err(core, "total_len must be a multiple of message_size\n");
+ return -EINVAL;
+ }
+
+ send_per_message = core->nb_send / nb_messages;
+ remaining_send = core->nb_send % nb_messages;
+
+ init_completion(&core->done);
+ err = spistest_allocate(core);
+ if (err) {
+ goto exit;
+ }
+
+ /*
+ * Send each message once, the rest is done in completion function
+ */
+ for(i = 0; i < nb_messages; i++) {
+ core->infos[i].mesg = &core->mesgs[i];
+ core->infos[i].spi = spi;
+ core->infos[i].nb_send = 0;
+ core->infos[i].target_send = send_per_message;
+ if (remaining_send) {
+ core->infos[i].target_send++;
+ remaining_send--;
+ }
+ core->infos[i].offset = (offset + (i * message_size)) % 256;
+ core->infos[i].fail_at = 0;
+ core->infos[i].mesg_position = i;
+ core->infos[i].core = core;
+
+ core->mesgs[i].complete = spistest_receive;
+ core->mesgs[i].context = &core->infos[i];
+ err = spi_async(spi, &core->mesgs[i]);
+ if (err) {
+ spistest_err(core, "-FAIL- Can't send a message (%d)\n",
+ err);
+ }
+ else {
+ spistest_info(core,
+ "Sending msg %d [%p], size %d (total %d)\n",
+ i, core->xfers[i].rx_buf, message_size, total_len);
+ }
+ }
+
+exit:
+ return err;
+}
+
+
+static int __devexit spistest_remove(struct spi_device *spi)
+{
+ int i;
+ struct spistest_core *core;
+
+ core = get_spistest_core(spi->master->bus_num);
+ wait_for_completion(&core->done);
+ if (! core->nb_fail) {
+ bool success = true;
+ for(i = 0; i < nb_messages; i++) {
+ if (core->infos[i].fail_at != 0)
+ success = false;
+ }
+
+ spistest_info(core, "%s message pool %d msg. %d msgs sent\n",
+ success ? "-PASS-" : "-FAIL-",
+ nb_messages, core->nb_send);
+ }
+ else {
+ spistest_err(core, "-FAIL- Could not send all messages\n");
+ for(i = 0; i < nb_messages; i++) {
+ spistest_err(core, "\tMsg %d sent %d time(s) successfully\n",
+ i, core->infos[i].nb_send);
+ }
+ }
+
+ for(i = 0; i < nb_messages; i++) {
+ kfree(core->xfers[i].rx_buf);
+ }
+ kfree(core->mesgs);
+ kfree(core->xfers);
+ kfree(core->infos);
+ return 0;
+}
+
+
+static struct spi_driver spistest_driver = {
+ .driver = {
+ .name = "spistest",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = spistest_probe,
+ .remove = __devexit_p(spistest_remove),
+};
+
+module_spi_driver(spistest_driver);
+MODULE_DESCRIPTION( "spistest loopback module" );
+MODULE_LICENSE( "GPL" );
diff --git a/drivers/parrot/uio/Kconfig b/drivers/parrot/uio/Kconfig
new file mode 100644
index 00000000000000..e732c1a8ac972a
--- /dev/null
+++ b/drivers/parrot/uio/Kconfig
@@ -0,0 +1,27 @@
+config UIO_HX
+ tristate "On2 / Hantro video userspace driver support"
+ default m if ARCH_PARROT7
+ default no if !ARCH_PARROT7
+ help
+ Enable support for On2 / Hantro video userspace drivers.
+
+config UIO_HX_DEBUG
+ bool "Enable debug for On2 / Hantro video userspace driver"
+ depends on UIO_HX
+ default no
+ help
+ Compile On2 / Hantro video userspace drivers with debug enabled.
+
+config UIO_HX270
+ tristate "On2 / Hantro hx270 video decoder support"
+ depends on UIO_HX
+ default UIO_HX
+ help
+ Enable support for On2 / Hantro video decoder.
+
+config UIO_HX280
+ tristate "On2 / Hantro hx280 video encoder support"
+ depends on UIO_HX
+ default UIO_HX
+ help
+ Enable support for On2 / Hantro video encoder.
diff --git a/drivers/parrot/uio/Makefile b/drivers/parrot/uio/Makefile
new file mode 100644
index 00000000000000..9a79f2f322d611
--- /dev/null
+++ b/drivers/parrot/uio/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -I$(srctree)/drivers/parrot
+ccflags-$(CONFIG_UIO_HX_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_UIO_HX) += hx-uio.o
+obj-$(CONFIG_UIO_HX270) += hx270-vdec.o
+obj-$(CONFIG_UIO_HX280) += hx280-venc.o
diff --git a/drivers/parrot/uio/hx-uio.c b/drivers/parrot/uio/hx-uio.c
new file mode 100644
index 00000000000000..40fafb19d4a5bb
--- /dev/null
+++ b/drivers/parrot/uio/hx-uio.c
@@ -0,0 +1,904 @@
+/**
+ * linux/driver/parrot/uio/hx-uio.c - On2 / Hantro user I/O
+ * implementation
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 30-Aug-2011
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/ratelimit.h>
+#include <linux/poll.h>
+#include <asm/signal.h>
+#include <linux/semaphore.h>
+#include <asm/uaccess.h>
+#include "hx-uio.h"
+#if defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE)
+#include "gpu/p7ump.h"
+#endif
+
+#define DRV_NAME "hxuio"
+#define HX_DEVS_NR 2
+
+struct hx_file {
+ struct hx_device* hxdev;
+ struct hx_unit* notifier;
+ unsigned int irqcnt;
+};
+
+struct hx_vmdma {
+ void* virt;
+ dma_addr_t bus;
+ atomic_t cnt;
+};
+
+#define HX_DMA_PGNO_MAGIC PFN_DOWN(~(0UL))
+
+
+
+static int hx_major;
+static struct class* hx_class;
+static struct hx_device* hx_devices[HX_DEVS_NR];
+static DEFINE_MUTEX( hx_lock);
+static int dma_memory_used;
+
+
+static void hx_dma_mem_stat_add(struct hx_device* hxdev, int size)
+{
+ int pid = current->pid;
+ struct device* const parent = hxdev->device.parent;
+
+ dma_memory_used += size;
+ dev_dbg(parent, "dma allocated %d pid %d (%s) total %d\n", size, pid,
+ current->comm, dma_memory_used);
+}
+
+static void hx_dma_mem_stat_free_lock(struct hx_device* hxdev, int size)
+{
+ struct device* const parent = hxdev->device.parent;
+ struct mutex* const lck = &hxdev->vm_lock;
+
+ mutex_lock(lck);
+ dma_memory_used -= size;
+ mutex_unlock(lck);
+ dev_dbg(parent, "dma free %d total %d\n", size, dma_memory_used);
+}
+
+static void hx_open_vmdma(struct vm_area_struct* vma)
+{
+ struct hx_vmdma* const dma = (struct hx_vmdma*) vma->vm_private_data;
+ struct device* const parent = ((struct hx_file*) vma->vm_file->private_data)->hxdev->device.parent;
+
+ dev_dbg(parent,
+ "open vmdma %p [vma=%08lx-%08lx,dmavirt=%08lx, dmabus=%08lx] cnt=%d\n",
+ vma,
+ vma->vm_start,
+ vma->vm_end,
+ (unsigned long) dma->virt,
+ (unsigned long) dma->bus,
+ atomic_read(&dma->cnt));
+
+ atomic_inc(&dma->cnt);
+}
+
+static void hx_close_vmdma(struct vm_area_struct* vma)
+{
+ struct hx_vmdma* const dma = (struct hx_vmdma*) vma->vm_private_data;
+ struct device* const parent = ((struct hx_file*) vma->vm_file->private_data)->hxdev->device.parent;
+ struct hx_device* const hxdev = ((struct hx_file*) vma->vm_file->private_data)->hxdev;
+
+ dev_dbg(parent,
+ "close vmdma %p [vma=%08lx-%08lx,dmavirt=%08lx, dmabus=%08lx] cnt=%d\n",
+ vma,
+ vma->vm_start,
+ vma->vm_end,
+ (unsigned long) dma->virt,
+ (unsigned long) dma->bus,
+ atomic_read(&dma->cnt));
+
+ if (atomic_dec_and_test(&dma->cnt)) {
+#if defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE)
+ p7ump_remove_map(dma->bus,
+ PAGE_ALIGN(vma->vm_end - vma->vm_start));
+#endif
+ dev_dbg(parent,"Freeing hx-uio memory !\n");
+ hx_dma_mem_stat_free_lock(hxdev, PAGE_ALIGN(vma->vm_end - vma->vm_start));
+ dma_free_coherent(parent,
+ PAGE_ALIGN(vma->vm_end - vma->vm_start),
+ dma->virt,
+ dma->bus);
+
+ kfree(dma);
+ }
+}
+
+static int hx_fault_vmdma(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ /* Disable mremap. */
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct const hx_vm_ops = {
+ .open = &hx_open_vmdma,
+ .close = &hx_close_vmdma,
+ .fault = &hx_fault_vmdma
+};
+
+static int hx_mmap_dma(struct vm_area_struct* vma,
+ struct hx_device* hxdev,
+ size_t size)
+{
+ struct device* const parent = hxdev->device.parent;
+ struct mutex* const lck = &hxdev->vm_lock;
+ struct hx_vmdma *dma = (struct hx_vmdma*) vma->vm_private_data;
+ int err = -ENOMEM;
+
+ if (! (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ mutex_lock(lck);
+
+ if (!dma) {
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (! dma) {
+ mutex_unlock(lck);
+ return err;
+ }
+ dev_dbg(parent,"Allocating hx-uio memory !\n");
+
+ dma->virt = dma_alloc_coherent(parent,
+ PAGE_ALIGN(size),
+ &dma->bus,
+ GFP_KERNEL);
+
+ if (! dma->virt) {
+ dev_warn(parent, "can't allocate dma memory %d\n", PAGE_ALIGN(size));
+ goto free;
+ }
+
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_private_data = dma;
+
+ atomic_set(&dma->cnt, 0);
+ }
+
+ err = dma_mmap_coherent(parent, vma, dma->virt, dma->bus, size);
+ if (err)
+ goto dma;
+
+ atomic_inc(&dma->cnt);
+
+ vma->vm_ops = &hx_vm_ops;
+
+ *((dma_addr_t*) (dma->virt)) = dma->bus;
+
+#if defined(CONFIG_UMP_PARROT7) || defined(CONFIG_UMP_PARROT7_MODULE)
+ /* Allow GPU to access this memory through UMP */
+ p7ump_add_map(dma->bus, size);
+#endif
+
+ hx_dma_mem_stat_add(hxdev, PAGE_ALIGN(size));
+
+
+ mutex_unlock(lck);
+ return 0;
+
+dma:
+ dma_free_coherent(parent,
+ PAGE_ALIGN(size),
+ dma->virt,
+ dma->bus);
+free:
+ mutex_unlock(lck);
+ kfree(dma);
+ return err;
+}
+
+static int hx_mmap(struct file* filep, struct vm_area_struct* vma)
+{
+ struct hx_device* const hxdev = ((struct hx_file*) filep->private_data)->hxdev;
+ unsigned long const pageno = vma->vm_pgoff;
+ size_t const sz = vma->vm_end - vma->vm_start;
+ int err;
+
+ if (! (vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end <= vma->vm_start) {
+ dev_dbg(hxdev->device.parent,
+ "invalid vma region: 0x%08lx-0x%08lx.\n",
+ vma->vm_start,
+ vma->vm_end);
+ return -EINVAL;
+ }
+
+ if (pageno && pageno != HX_DMA_PGNO_MAGIC) {
+ dev_dbg(hxdev->device.parent,
+ "invalid vma offset page %lu.\n",
+ pageno);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~(VM_MAYEXEC | VM_EXEC);
+ vma->vm_flags |= VM_SPECIAL;
+
+ if (unlikely(! pageno)) {
+ struct resource const* const res = hxdev->regs_res;
+
+ if (sz > resource_size(res)) {
+ dev_dbg(hxdev->device.parent,
+ "invalid vma size %u (limited to %u).\n",
+ (unsigned int) sz,
+ (unsigned int) resource_size(res));
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ err = remap_pfn_range(vma,
+ vma->vm_start,
+ PFN_DOWN(res->start),
+ sz,
+ vma->vm_page_prot);
+ }
+ else
+ err = hx_mmap_dma(vma, hxdev, sz);
+
+ if (err)
+ dev_dbg(hxdev->device.parent,
+ "failed to remap range with error %d.\n", err);
+ return err;
+}
+
+static struct hx_device* hx_get_dev(int minor)
+{
+ int d;
+ struct hx_device* hxdev = 0;
+
+ mutex_lock(&hx_lock);
+
+ for (d = 0; d < ARRAY_SIZE(hx_devices); d++) {
+ struct hx_device* const dev = hx_devices[d];
+
+ if (dev &&
+ MINOR(dev->device.devt) == minor) {
+ hxdev = dev;
+ break;
+ }
+ }
+ if (! hxdev)
+ goto unlock;
+
+ __module_get(hxdev->owner);
+
+unlock:
+ mutex_unlock(&hx_lock);
+ return hxdev;
+}
+
+static void hx_put_dev(struct hx_device const* hxdev)
+{
+ module_put(hxdev->owner);
+}
+
+static int hx_register_dev(struct hx_device* hxdev, char const* name, int id)
+{
+ int err;
+ int d;
+
+ mutex_lock(&hx_lock);
+
+ for (d = 0; d < ARRAY_SIZE(hx_devices); d++) {
+ if (! hx_devices[d])
+ break;
+ }
+ if (d == ARRAY_SIZE(hx_devices)) {
+ err = -ENXIO;
+ goto unlock;
+ }
+
+ hxdev->device.devt = MKDEV(hx_major, d);
+ err = dev_set_name(&hxdev->device, "%s%c", name, id + 'a');
+ if (err)
+ goto unlock;
+
+ err = device_register(&hxdev->device);
+ if (err)
+ goto free;
+
+ hx_devices[d] = hxdev;
+
+ mutex_unlock(&hx_lock);
+ return 0;
+
+free:
+ kfree(dev_name(&hxdev->device));
+unlock:
+ mutex_unlock(&hx_lock);
+ return err;
+}
+
+static int hx_unregister_dev(struct hx_device* hxdev)
+{
+ int d;
+ int err = 0;
+
+ mutex_lock(&hx_lock);
+
+ for (d = 0; d < ARRAY_SIZE(hx_devices); d++) {
+ if (hxdev == hx_devices[d])
+ break;
+ }
+ if (d == ARRAY_SIZE(hx_devices)) {
+ err = -ENXIO;
+ goto unlock;
+ }
+
+ device_unregister(&hx_devices[d]->device);
+ hx_devices[d] = 0;
+
+unlock:
+ mutex_unlock(&hx_lock);
+ return err;
+}
+
+static int hx_init_maps(struct hx_device* hxdev,
+ struct platform_device* pdev)
+{
+ struct device* const parent = &pdev->dev;
+ struct resource const* const regs_res = platform_get_resource(pdev,
+ IORESOURCE_MEM,
+ 0);
+ char const* msg;
+ int err = -EBUSY;
+
+ hxdev->regs_res = NULL;
+
+ if (! request_mem_region(regs_res->start,
+ resource_size(regs_res),
+ dev_name(parent))) {
+ msg = "failed to reserve registers";
+ goto err;
+ }
+
+ hxdev->regs_virt = (unsigned long) ioremap(regs_res->start,
+ resource_size(regs_res));
+ if (hxdev->regs_virt) {
+ hxdev->regs_res = regs_res;
+ return 0;
+ }
+
+ msg = "failed to remap registers";
+ release_mem_region(regs_res->start, resource_size(regs_res));
+
+err:
+ dev_dbg(parent, "%s region.\n", msg);
+ dev_err(parent, "failed to setup memory mappings (%d).\n", err);
+ return err;
+}
+
+static void hx_exit_maps(struct hx_device const* hxdev)
+{
+ if (! hxdev->regs_res)
+ return;
+
+ iounmap((void*) hxdev->regs_virt);
+ release_mem_region(hxdev->regs_res->start,
+ resource_size(hxdev->regs_res));
+}
+
+static irqreturn_t hx_handle_irq(int irq, void* dev_id)
+{
+ struct hx_device const* const hxdev = (struct hx_device*) dev_id;
+ int irqs = 0;
+ int n;
+
+ for (n = 0; n < hxdev->unit_cnt; n++) {
+ struct hx_unit* const unit = &hxdev->units[n];
+ int const happened = unit->handle_irq(hxdev);
+
+ if (! happened)
+ continue;
+
+ irqs += happened;
+ if (! unit->refcnt)
+ continue;
+
+ unit->irqcnt++;
+ wake_up_interruptible(&unit->waitq);
+ kill_fasync(&unit->asyncq, SIGIO, POLL_IN);
+ }
+
+ if (likely(irqs))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static void hx_release_dev(struct device* device)
+{
+}
+
+void hx_init_unit(struct hx_unit* unit,
+ int (*handle_irq)(struct hx_device const*),
+ void (*setup_irq)(struct hx_device const*, int))
+{
+ unit->refcnt = 0;
+ unit->irqcnt = 0;
+ unit->handle_irq = handle_irq;
+ sema_init(&unit->sem, 1);
+ unit->owner = NULL;
+ init_waitqueue_head(&unit->waitq);
+ unit->asyncq = 0;
+ unit->setup_irq = setup_irq;
+}
+EXPORT_SYMBOL(hx_init_unit);
+
+int _hx_probe(struct platform_device* pdev,
+ struct module* owner,
+ char const* name,
+ struct hx_unit* units,
+ size_t unit_count,
+ int (*init_hw)(struct hx_device const*))
+{
+ struct device* const parent = &pdev->dev;
+ struct clk* const clk = clk_get(&pdev->dev, NULL);
+ struct hx_device* hxdev;
+ int err;
+
+ if (IS_ERR(clk)) {
+ dev_dbg(parent, "failed to get clock.\n");
+ err = PTR_ERR(clk);
+ goto err;
+ }
+
+ hxdev = kzalloc(sizeof(*hxdev), GFP_KERNEL);
+ if (! hxdev) {
+ dev_dbg(parent, "failed to allocate device.\n");
+ err = -ENOMEM;
+ goto put;
+ }
+
+ hxdev->irq = platform_get_irq(pdev, 0);
+ BUG_ON(hxdev->irq < 0);
+ err = hx_init_maps(hxdev, pdev);
+ if (err)
+ goto free;
+
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_dbg(parent, "failed to enable clock.\n");
+ goto exit;
+ }
+
+ spin_lock_init(&hxdev->unit_lock);
+ hxdev->unit_cnt = unit_count;
+ hxdev->units = units;
+ mutex_init(&hxdev->vm_lock);
+ hxdev->device.class = hx_class;
+ hxdev->device.parent = parent;
+ hxdev->device.release = &hx_release_dev;
+ hxdev->owner = owner;
+
+ /* Give top-level driver a chance to perform specific initializations. */
+ if (init_hw) {
+ err = (*init_hw)(hxdev);
+ if (err)
+ goto exit;
+ }
+
+ err = request_irq(hxdev->irq,
+ &hx_handle_irq,
+ 0,
+ dev_name(parent),
+ hxdev);
+ if (err) {
+ dev_dbg(parent, "failed to reserve irq %d.\n", hxdev->irq);
+ goto clk;
+ }
+
+ err = hx_register_dev(hxdev, name, pdev->id);
+ if (! err) {
+ clk_put(clk);
+ parent->platform_data = hxdev;
+ return 0;
+ }
+
+clk:
+ clk_disable_unprepare(clk);
+exit:
+ hx_exit_maps(hxdev);
+free:
+ kfree(hxdev);
+put:
+ clk_put(clk);
+err:
+ return err;
+}
+EXPORT_SYMBOL(_hx_probe);
+
+int hx_remove(struct platform_device* pdev)
+{
+ struct device* const parent = &pdev->dev;
+ struct hx_device* const hxdev = (struct hx_device*) dev_get_platdata(parent);
+ struct clk* const clk = clk_get(parent, NULL);
+ int err = 0;
+
+ if (IS_ERR(clk)) {
+ dev_dbg(parent, "failed to get clock.\n");
+ return PTR_ERR(clk);
+ }
+
+ err = hx_unregister_dev(hxdev);
+ if (err) {
+ dev_dbg(parent, "failed to unregister device.\n");
+ goto put;
+ }
+
+ clk_disable_unprepare(clk);
+ free_irq(hxdev->irq, hxdev);
+ hx_exit_maps(hxdev);
+ kfree(hxdev);
+
+put:
+ clk_put(clk);
+ if (err)
+ dev_err(parent, "failed to remove device (%d).\n", err);
+ return err;
+}
+EXPORT_SYMBOL(hx_remove);
+
+int hx_suspend(struct platform_device* pdev)
+{
+ struct device* const parent = &pdev->dev;
+ struct clk* const clk = clk_get(parent, NULL);
+
+ clk_disable_unprepare(clk);
+ return 0;
+}
+EXPORT_SYMBOL(hx_suspend);
+
+int hx_resume(struct platform_device* pdev,
+ int (*init_hw)(struct hx_device const*))
+{
+ struct device* const parent = &pdev->dev;
+ struct hx_device* const hxdev = (struct hx_device*) dev_get_platdata(parent);
+ struct clk* const clk = clk_get(parent, NULL);
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (!ret && init_hw) {
+ ret = (*init_hw)(hxdev);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(hx_resume);
+
+static unsigned int hx_enable_notif(struct hx_unit* notifier,
+ struct hx_device const* hxdev)
+{
+ if (! notifier->refcnt)
+ notifier->setup_irq(hxdev, 1);
+ notifier->refcnt++;
+
+ return notifier->irqcnt;
+}
+
+
+static void hx_disable_notif(struct hx_unit* notifier,
+ struct hx_device const* hxdev)
+{
+ if (! (--notifier->refcnt))
+ notifier->setup_irq(hxdev, 0);
+}
+
+static long hx_ioctl(struct file* filep, unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct hx_unit* unit = NULL;
+ struct hx_file* const priv = (struct hx_file*) filep->private_data;
+ struct hx_device* const hxdev = priv->hxdev;
+
+ if (unlikely(arg >= hxdev->unit_cnt)) {
+ /* Fix your userland driver !! */
+ static DEFINE_RATELIMIT_STATE(rate,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rate))
+ dev_warn(hxdev->device.parent,
+ "unknown (un)lock command %lu request from %16s[%5d].\n",
+ arg,
+ current->comm,
+ task_pid_nr(current));
+
+ return -EINVAL;
+ }
+
+ unit = &hxdev->units[arg];
+
+ switch (cmd) {
+ case HX_LOCK :
+ if (unit->owner == filep->private_data)
+ return -EPERM;
+
+ err = down_interruptible(&unit->sem);
+ if (err == -EINTR)
+ return -ERESTARTSYS;
+
+ unit->owner = filep->private_data;
+
+ /* Userspace library can reserve both decoder and post-processor
+ * in pipelined mode. Since we only enable 'notification' for
+ * one unit (decoder or pp), we shall only update private file
+ * irqcnt field if unit we try to lock match the one notified.
+ */
+ if (priv->notifier == unit)
+ priv->irqcnt = unit->irqcnt;
+ break;
+
+ case HX_UNLOCK :
+ if (unit->owner != filep->private_data)
+ return -EPERM;
+
+ unit->owner = NULL;
+ up(&unit->sem);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static ssize_t hx_write(struct file* filep,
+ char const __user* buf,
+ size_t count,
+ loff_t* ppos)
+{
+ int err;
+ unsigned int command;
+ struct hx_file* const priv = filep->private_data;
+ struct hx_device* const hxdev = priv->hxdev;
+ struct hx_unit* notif = NULL;
+
+ if (count != sizeof(command))
+ return -EINVAL;
+
+ err = get_user(command, (unsigned int __user*) buf);
+ if (err)
+ return err;
+
+ if (unlikely(command > hxdev->unit_cnt)) {
+ /* Fix your userland driver !! */
+ static DEFINE_RATELIMIT_STATE(rate,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (__ratelimit(&rate))
+ dev_warn(hxdev->device.parent,
+ "unknown interrupt setup command %d request "
+ "from %16s[%5d].\n",
+ command,
+ current->comm,
+ task_pid_nr(current));
+
+ return -EIO;
+ }
+
+ if (command) {
+ notif = &hxdev->units[command - 1];
+ if (priv->notifier != notif) {
+ spin_lock_irq(&hxdev->unit_lock);
+ if (priv->notifier)
+ hx_disable_notif(priv->notifier, hxdev);
+ priv->irqcnt = hx_enable_notif(notif, hxdev);
+ spin_unlock_irq(&hxdev->unit_lock);
+ }
+ }
+ else {
+ if (priv->notifier) {
+ spin_lock_irq(&hxdev->unit_lock);
+ hx_disable_notif(priv->notifier, hxdev);
+ spin_unlock_irq(&hxdev->unit_lock);
+ }
+ }
+
+ priv->notifier = notif;
+ return sizeof(command);
+}
+
+static inline int hx_saw_irqs(unsigned int priv_count,
+ unsigned int volatile const* notif_count,
+ unsigned int* irq_count,
+ spinlock_t* lock)
+
+{
+ unsigned int seen;
+
+ spin_lock_irq(lock);
+ *irq_count = *notif_count;
+ seen = (*irq_count != priv_count);
+ spin_unlock_irq(lock);
+
+ return seen;
+}
+
+static ssize_t hx_read(struct file* filep,
+ char __user* buf,
+ size_t count,
+ loff_t* ppos)
+{
+ struct hx_file* const priv = filep->private_data;
+ struct hx_unit* const notif = priv->notifier;
+ unsigned int irqs;
+ int err;
+
+ if (count != sizeof(irqs))
+ return -EINVAL;
+
+ if (! notif)
+ return -EIO;
+
+ if (! (filep->f_flags & O_NONBLOCK)) {
+ switch (wait_event_interruptible_timeout(notif->waitq,
+ hx_saw_irqs(priv->irqcnt,
+ &notif->irqcnt,
+ &irqs,
+ &priv->hxdev->unit_lock),
+ HZ / 2)) {
+ case -ERESTARTSYS:
+ return -ERESTARTSYS;
+ case 0:
+ return -ETIMEDOUT;
+ }
+ }
+ else {
+ if (! hx_saw_irqs(priv->irqcnt,
+ &notif->irqcnt,
+ &irqs,
+ &priv->hxdev->unit_lock))
+ return -EAGAIN;
+ }
+
+ err = put_user(irqs, (unsigned int __user*) buf);
+ if (err)
+ return err;
+
+ priv->irqcnt = irqs;
+ return sizeof(irqs);
+}
+
+static int hx_release(struct inode* inode, struct file* filep)
+{
+ struct hx_file* const priv = filep->private_data;
+ struct hx_device* const hxdev = priv->hxdev;
+ unsigned int no;
+
+ for (no = 0; no < hxdev->unit_cnt; no++) {
+ struct hx_unit* const unit = &hxdev->units[no];
+
+ if (unit->owner == priv) {
+ /* We own the lock: release it. */
+ unit->owner = NULL;
+ up(&unit->sem);
+ }
+ }
+
+ if (priv->notifier) {
+ spin_lock_irq(&hxdev->unit_lock);
+ hx_disable_notif(priv->notifier, hxdev);
+ spin_unlock_irq(&hxdev->unit_lock);
+ }
+
+ hx_put_dev(hxdev);
+ kfree(priv);
+ dev_dbg(hxdev->device.parent, "device closed.\n");
+ return 0;
+}
+
+static int hx_open(struct inode* inode, struct file* filep)
+{
+ struct hx_device* const hxdev = hx_get_dev(iminor(inode));
+ struct hx_file* priv;
+
+ if (! hxdev)
+ return -ENODEV;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (unlikely(! priv)) {
+ hx_put_dev(hxdev);
+ return -ENOMEM;
+ }
+
+ priv->hxdev = hxdev;
+ priv->notifier = NULL;
+ priv->irqcnt = 0;
+ filep->private_data = priv;
+
+ dev_dbg(hxdev->device.parent, "device opened.\n");
+ return 0;
+}
+
+static unsigned int hx_poll(struct file* filep, poll_table* wait)
+{
+ struct hx_file* const priv = filep->private_data;
+ spinlock_t* const lck = &priv->hxdev->unit_lock;
+ struct hx_unit* const notif = priv->notifier;
+ unsigned int irqcnt;
+
+ if (! notif)
+ return POLLERR;
+
+ poll_wait(filep, &notif->waitq, wait);
+
+ spin_lock_irq(lck);
+ irqcnt = notif->irqcnt;
+ spin_unlock_irq(lck);
+
+ if (priv->irqcnt != irqcnt)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static int hx_fasync(int fd, struct file* filep, int on)
+{
+ struct hx_unit* const notif = ((struct hx_file*)
+ filep->private_data)->notifier;
+
+ if (! notif)
+ return -EIO;
+
+ return fasync_helper(fd, filep, on, &notif->asyncq);
+}
+
+static struct file_operations const hx_fops = {
+ .owner = THIS_MODULE,
+ .open = hx_open,
+ .release = hx_release,
+ .read = hx_read,
+ .write = hx_write,
+ .mmap = hx_mmap,
+ .poll = hx_poll,
+ .fasync = hx_fasync,
+ .unlocked_ioctl = hx_ioctl,
+};
+
+static int __init hx_init(void)
+{
+ hx_major = register_chrdev(0, DRV_NAME, &hx_fops);
+ if (hx_major < 0)
+ return hx_major;
+
+ hx_class = class_create(THIS_MODULE, DRV_NAME);
+ if (! IS_ERR(hx_class))
+ return 0;
+
+ unregister_chrdev(hx_major, DRV_NAME);
+ return PTR_ERR(hx_class);
+}
+
+static void __exit hx_exit(void)
+{
+ class_destroy(hx_class);
+ unregister_chrdev(hx_major, DRV_NAME);
+}
+
+module_init(hx_init);
+module_exit(hx_exit);
+
+MODULE_AUTHOR("Gregor Boirie");
+MODULE_DESCRIPTION("On2 / Hantro video userspace I/O platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/uio/hx-uio.h b/drivers/parrot/uio/hx-uio.h
new file mode 100644
index 00000000000000..df47e8a415f67a
--- /dev/null
+++ b/drivers/parrot/uio/hx-uio.h
@@ -0,0 +1,76 @@
+/**
+ * linux/driver/parrot/uio/hx-uio.h - On2 / Hantro user I/O
+ * interface
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 30-Aug-2011
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _HX_UIO_H
+#define _HX_UIO_H
+
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/semaphore.h>
+
+#define HX_IO_MAGIC 'k'
+
+#define HX_LOCK _IO(HX_IO_MAGIC, 1)
+#define HX_UNLOCK _IO(HX_IO_MAGIC, 2)
+
+struct hx_device;
+struct platform_device;
+
+struct hx_unit {
+ unsigned int refcnt;
+ unsigned int irqcnt;
+ int (*handle_irq)(struct hx_device const*);
+ struct semaphore sem;
+ void const* owner;
+ wait_queue_head_t waitq;
+ struct fasync_struct* asyncq;
+ void (*setup_irq)(struct hx_device const*, int);
+};
+
+struct hx_device {
+ spinlock_t unit_lock;
+ size_t unit_cnt;
+ struct hx_unit* units;
+ struct mutex vm_lock;
+ unsigned long regs_virt;
+ struct resource const* regs_res;
+ struct device device;
+ struct module* owner;
+ int irq;
+};
+
+extern void hx_init_unit(struct hx_unit*,
+ int (*)(struct hx_device const*),
+ void (*)(struct hx_device const*, int));
+
+extern int _hx_probe(struct platform_device*,
+ struct module*,
+ char const* name,
+ struct hx_unit*,
+ size_t,
+ int (*)(struct hx_device const*));
+
+#define hx_probe(_pdev, _name, _unit, _cnt, _init) \
+ _hx_probe(_pdev, THIS_MODULE, _name, _unit, _cnt, _init)
+
+int hx_remove(struct platform_device*);
+
+int hx_suspend(struct platform_device* pdev);
+int hx_resume(struct platform_device* pdev,
+ int (*init_hw)(struct hx_device const*));
+
+#endif
diff --git a/drivers/parrot/uio/hx270-vdec.c b/drivers/parrot/uio/hx270-vdec.c
new file mode 100644
index 00000000000000..80be90f6239884
--- /dev/null
+++ b/drivers/parrot/uio/hx270-vdec.c
@@ -0,0 +1,183 @@
+/**
+ * linux/driver/parrot/uio/hx270-vdec.c - On2 / Hantro user I/O video decoder
+ * implementation
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 31-Aug-2011
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "hx-uio.h"
+#include "hx270-vdec.h"
+
+#define DRV_VERSION "0.1"
+
+#define HX270_REGS_NR 101
+#define HX270_ID 0x0
+#define HX270_ID_PRODUCT_SHIFT 16
+#define HX270_DECIRQ 0x4
+#define HX270_DECIRQ_IRQDISABLE (1U << 4)
+#define HX270_DECIRQ_IRQ (1U << 8)
+#define HX270_PPIRQ 0xf0
+#define HX270_PPIRQ_IRQDISABLE (1U << 4)
+#define HX270_PPIRQ_IRQ (1U << 8)
+
+static int hx270_handle_decirq(struct hx_device const* hxdev)
+{
+ unsigned long const reg = hxdev->regs_virt + HX270_DECIRQ;
+ u32 const status = readl_relaxed(reg);
+
+ if (! (status & HX270_DECIRQ_IRQ))
+ return 0;
+
+ /* Acknowledge interrupt. */
+ writel_relaxed(status & ~HX270_DECIRQ_IRQ, reg);
+ return 1;
+}
+
+static void hx270_setup_decirq(struct hx_device const* hxdev, int command)
+{
+ unsigned long const reg = hxdev->regs_virt + HX270_DECIRQ;
+ u32 const word = readl(reg);
+
+ if (! command)
+ /* Disable interrupts generation. */
+ writel_relaxed(word | HX270_DECIRQ_IRQDISABLE, reg);
+ else
+ /* Enable interrupts generature. */
+ writel_relaxed(word & ~HX270_DECIRQ_IRQDISABLE, reg);
+}
+
+static int hx270_handle_ppirq(struct hx_device const* hxdev)
+{
+ unsigned long const reg = hxdev->regs_virt + HX270_PPIRQ;
+ u32 const status = readl_relaxed(reg);
+
+ if (! (status & HX270_PPIRQ_IRQ))
+ return 0;
+
+ /* Acknowledge interrupt. */
+ writel_relaxed(status & ~HX270_PPIRQ_IRQ, reg);
+ return 1;
+}
+
+static void hx270_setup_ppirq(struct hx_device const* hxdev, int command)
+{
+ unsigned long const reg = hxdev->regs_virt + HX270_PPIRQ;
+ u32 const word = readl(reg);
+
+ if (! command)
+ /* Disable interrupts generation. */
+ writel_relaxed(word | HX270_PPIRQ_IRQDISABLE, reg);
+ else
+ /* Enable interrupts generature. */
+ writel_relaxed(word & ~HX270_PPIRQ_IRQDISABLE, reg);
+}
+
+static int __devinit hx270_init_hw(struct hx_device const* hxdev)
+{
+ static unsigned short const ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
+ unsigned long const regs = hxdev->regs_virt;
+ u32 const id = readl(regs + HX270_ID);
+ unsigned int i;
+
+ /* Check hardware id. */
+ for (i = 0; i < ARRAY_SIZE(ids); i++) {
+ if (((unsigned short) (id >> HX270_ID_PRODUCT_SHIFT)) == ids[i])
+ break;
+ }
+ if (i >= ARRAY_SIZE(ids)) {
+ dev_err(hxdev->device.parent,
+ "failed to find valid hardware id (0x%08x).\n",
+ id);
+ return -ENODEV;
+ }
+ dev_info(hxdev->device.parent,
+ "found hx270 video decoder (0x%08x).\n",
+ id);
+
+ /* Disable & acknowledge decoder interrupts, stop decoder. */
+ writel(HX270_DECIRQ_IRQDISABLE, regs + HX270_DECIRQ);
+ /* Disable & acknowledge post-processor interrupts, stop post-processor. */
+ writel(HX270_PPIRQ_IRQDISABLE, regs + HX270_PPIRQ);
+
+ /* FIXME: is this really necessary ?
+ * Clear all remaining decoder registers... */
+ for (i = 2 * sizeof(u32);
+ i < (sizeof(u32) * 60);
+ i += sizeof(u32))
+ writel_relaxed(0, regs + i);
+ /* ...and post-processor registers. */
+ for (i = 61 * sizeof(u32);
+ i < (sizeof(u32) * HX270_REGS_NR);
+ i += sizeof(u32))
+ writel_relaxed(0, regs + i);
+ wmb();
+
+ return 0;
+}
+
+static int __devinit hx270_probe(struct platform_device* pdev)
+{
+ int err;
+ struct hx_unit* const units = kmalloc(2 * sizeof(*units), GFP_KERNEL);
+
+ if (! units)
+ return -ENOMEM;
+
+ hx_init_unit(&units[0], &hx270_handle_decirq, &hx270_setup_decirq);
+ hx_init_unit(&units[1], &hx270_handle_ppirq, &hx270_setup_ppirq);
+ err = hx_probe(pdev, "hx270", units, 2, &hx270_init_hw);
+ if (err)
+ kfree(units);
+
+ return err;
+}
+
+static int __devexit hx270_remove(struct platform_device* pdev)
+{
+ int err;
+ struct hx_unit* const units = ((struct hx_device*)
+ dev_get_platdata(&pdev->dev))->units;
+
+ err = hx_remove(pdev);
+ if (! err)
+ kfree(units);
+
+ return err;
+}
+static int hx270_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return hx_suspend(pdev);
+}
+static int hx270_resume(struct platform_device *pdev)
+{
+ return hx_resume(pdev, &hx270_init_hw);
+}
+
+static struct platform_driver hx270_driver = {
+ .probe = hx270_probe,
+ .remove = __devexit_p(hx270_remove),
+ .driver = {
+ .name = HX270_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ /* TODO: add support for optional suspend / resume. */
+ .suspend = hx270_suspend,
+ .resume = hx270_resume
+};
+module_platform_driver(hx270_driver);
+
+MODULE_AUTHOR("Gregor Boirie");
+MODULE_DESCRIPTION("On2 / Hantro video decoder / post-processor "
+ "userspace I/O platform driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/uio/hx270-vdec.h b/drivers/parrot/uio/hx270-vdec.h
new file mode 100644
index 00000000000000..0a00f26db37abe
--- /dev/null
+++ b/drivers/parrot/uio/hx270-vdec.h
@@ -0,0 +1,23 @@
+/**
+ * linux/driver/parrot/uio/hx270-vdec.h - On2 / Hantro user I/O video decoder
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 18-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _HX270_UIO_H
+#define _HX270_UIO_H
+
+#if defined(CONFIG_UIO_HX270) || \
+ defined(CONFIG_UIO_HX270_MODULE)
+
+#define HX270_DRV_NAME "hx270-vdec"
+
+#endif
+
+#endif
diff --git a/drivers/parrot/uio/hx280-venc.c b/drivers/parrot/uio/hx280-venc.c
new file mode 100644
index 00000000000000..476f83d8b0d0e2
--- /dev/null
+++ b/drivers/parrot/uio/hx280-venc.c
@@ -0,0 +1,149 @@
+/**
+ * linux/driver/parrot/uio/hx280-venc.c - On2 / Hantro user I/O video encoder
+ * implementation
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 31-Aug-2011
+ *
+ * This file is released under the GPL
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "hx-uio.h"
+#include "hx280-venc.h"
+
+#define DRV_VERSION "0.1"
+
+#define HX280_PRODUCT_ID_R1 (0x72800000U)
+#define HX280_PRODUCT_ID_R2 (0x48310000U)
+
+#define HX280_REGS_NR 14
+#define HX280_ID 0x0
+#define HX280_ID_PRODUCT_MSK (0xffff0000U)
+#define HX280_IRQ 0x4
+#define HX280_IRQ_HINTENC (1U << 0)
+#define HX280_IRQ_INTDISABLE (1U << 1)
+#define HX280_IRQ_SLICE_STATUS (1U << 8)
+#define HX280_COMCTRL 0x38
+
+static int hx280_handle_irq(struct hx_device const* hxdev)
+{
+ unsigned long const reg = hxdev->regs_virt + HX280_IRQ;
+ u32 const status = readl_relaxed(reg);
+
+ if (unlikely(! (status & HX280_IRQ_HINTENC)))
+ /* Spurious ? */
+ return 0;
+
+ writel_relaxed(status & ~(HX280_IRQ_SLICE_STATUS | HX280_IRQ_HINTENC),
+ reg);
+
+ return 1;
+}
+
+static void hx280_setup_irq(struct hx_device const* hxdev, int command)
+{
+ unsigned long const reg = hxdev->regs_virt + HX280_IRQ;
+ u32 const word = readl(reg);
+
+ if (! command)
+ /* Disable interrupts generation. */
+ writel_relaxed(word | HX280_IRQ_INTDISABLE, reg);
+ else
+ /* Enable interrupts generature. */
+ writel_relaxed(word & ~HX280_IRQ_INTDISABLE, reg);
+}
+
+static int __devinit hx280_init_hw(struct hx_device const* hxdev)
+{
+ unsigned long const regs = hxdev->regs_virt;
+ u32 const id = readl(regs + HX280_ID);
+ unsigned int r;
+
+ /* Check hardware id. */
+ if ((id & HX280_ID_PRODUCT_MSK) != HX280_PRODUCT_ID_R1 &&
+ (id & HX280_ID_PRODUCT_MSK) != HX280_PRODUCT_ID_R2) {
+ dev_err(hxdev->device.parent,
+ "failed to find valid hardware id (0x%08x).\n",
+ id);
+ return -ENODEV;
+ }
+ dev_info(hxdev->device.parent,
+ "found hx280 video encoder (0x%08x).\n",
+ id);
+
+ /* Stop processing. */
+ writel_relaxed(0, regs + HX280_COMCTRL);
+ /* Disable & acknowledge interrupts. */
+ writel(HX280_IRQ_INTDISABLE, regs + HX280_IRQ);
+ /* Clear all remaining registers. FIXME: is this really necessary ? */
+ for (r = 2 * sizeof(u32);
+ r < (sizeof(u32) * HX280_REGS_NR);
+ r += sizeof(u32))
+ writel_relaxed(0, regs + r);
+ wmb();
+
+ return 0;
+}
+
+static int __devinit hx280_probe(struct platform_device* pdev)
+{
+ int err;
+ struct hx_unit* const unit = kmalloc(sizeof(*unit), GFP_KERNEL);
+
+ if (! unit)
+ return -ENOMEM;
+
+ hx_init_unit(unit, &hx280_handle_irq, &hx280_setup_irq);
+ err = hx_probe(pdev, "hx280", unit, 1, &hx280_init_hw);
+ if (err)
+ kfree(unit);
+
+ return err;
+}
+
+static int __devexit hx280_remove(struct platform_device* pdev)
+{
+ int err;
+ struct hx_unit* const unit = ((struct hx_device*)
+ dev_get_platdata(&pdev->dev))->units;
+
+ err = hx_remove(pdev);
+ if (! err)
+ kfree(unit);
+
+ return err;
+}
+
+static int hx280_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return hx_suspend(pdev);
+}
+static int hx280_resume(struct platform_device *pdev)
+{
+ return hx_resume(pdev, &hx280_init_hw);
+}
+
+static struct platform_driver hx280_driver = {
+ .probe = hx280_probe,
+ .remove = __devexit_p(hx280_remove),
+ .driver = {
+ .name = HX280_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .suspend = &hx280_suspend,
+ .resume = &hx280_resume
+};
+module_platform_driver(hx280_driver);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("On2 / Hantro video encoder "
+ "userspace I/O platform driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/uio/hx280-venc.h b/drivers/parrot/uio/hx280-venc.h
new file mode 100644
index 00000000000000..cb0cdaaaa5831b
--- /dev/null
+++ b/drivers/parrot/uio/hx280-venc.h
@@ -0,0 +1,23 @@
+/**
+ * linux/driver/parrot/uio/hx280-venc.h - On2 / Hantro user I/O video encoder
+ * interface
+ *
+ * Copyright (C) 2012 Parrot S.A.
+ *
+ * author: Gregor Boirie <gregor.boirie@parrot.com>
+ * date: 18-Jun-2012
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _HX280_UIO_H
+#define _HX280_UIO_H
+
+#if defined(CONFIG_UIO_HX280) || \
+ defined(CONFIG_UIO_HX280_MODULE)
+
+#define HX280_DRV_NAME "hx280-venc"
+
+#endif
+
+#endif
diff --git a/drivers/parrot/usb/serial/blackberry/Kconfig b/drivers/parrot/usb/serial/blackberry/Kconfig
new file mode 100644
index 00000000000000..a33042151f289a
--- /dev/null
+++ b/drivers/parrot/usb/serial/blackberry/Kconfig
@@ -0,0 +1,6 @@
+config BLACKBERRY
+ tristate "Blackberry usb serial driver"
+ depends on USB_SERIAL
+ -- help
+ Driver which provides access to a blackberry via it's proprietary usb
+ interface using ttys
diff --git a/drivers/parrot/usb/serial/blackberry/Makefile b/drivers/parrot/usb/serial/blackberry/Makefile
new file mode 100644
index 00000000000000..701a5e209bd958
--- /dev/null
+++ b/drivers/parrot/usb/serial/blackberry/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BLACKBERRY) += blackberry.o
diff --git a/drivers/parrot/usb/serial/blackberry/blackberry.c b/drivers/parrot/usb/serial/blackberry/blackberry.c
new file mode 100644
index 00000000000000..9c6f0cb56b63fa
--- /dev/null
+++ b/drivers/parrot/usb/serial/blackberry/blackberry.c
@@ -0,0 +1,65 @@
+/*
+ * Blackberry USB Phone driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2011 Parrot <carrier.nicolas0@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * {sigh}
+ * Based on the moto-modem driver, provides a "dumb serial connection" to a
+ * Blackberry with as much ttys as half the number of endpoints. Ip modem
+ * typically corresponds to the third tty created.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+#define DRIVER_NAME "blackberry"
+#define VENDOR_ID 0x0fca
+
+static struct usb_device_id id_table [] = {
+ {USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, 0x0001, 0xff, 1, 0xff)},
+ {USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, 0x0004, 0xff, 1, 0xff)},
+ {USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, 0x8001, 0xff, 1, 0xff)},
+ {USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, 0x8004, 0xff, 1, 0xff)},
+ {USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, 0x8007, 0xff, 1, 0xff)},
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver bb_driver = {
+ .name = DRIVER_NAME,
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static int bb_calc_num_ports(struct usb_serial *serial)
+{
+ return serial->interface->cur_altsetting->desc.bNumEndpoints / 2;
+}
+
+static struct usb_serial_driver bb_serial_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .id_table = id_table,
+ .usb_driver = &bb_driver,
+ .calc_num_ports = bb_calc_num_ports,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &bb_serial_driver, NULL
+};
+
+module_usb_serial_driver(bb_driver, serial_drivers);
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/Kconfig b/drivers/parrot/video/Kconfig
new file mode 100644
index 00000000000000..c0d177527e50c5
--- /dev/null
+++ b/drivers/parrot/video/Kconfig
@@ -0,0 +1,60 @@
+menuconfig AVI
+ tristate "Parrot Advanced Video Interface support"
+ default n
+
+config AVI_DEBUG
+ bool "Compile AVI for debugging purposes"
+ depends on AVI
+ default n
+ help
+ Enable this if you want AVI drivers with debug info
+
+config AVI_DEBUG_LOG
+ bool "Enable debug log"
+ depends on AVI_DEBUG
+ default n
+ help
+ Enable this if you want AVI drivers to output debug messages and perform
+ additionnal consistency checks.
+
+config AVI_DEBUGFS
+ tristate "Enable debugging info via debugfs"
+ depends on AVI_DEBUG && DEBUG_FS
+ depends on AVI
+ help
+ Enable this if you want debugging information using the debug
+ filesystem (debugfs)
+
+config AVI_LOGGER
+ bool "Log all AVI register access in debugfs"
+ depends on AVI_DEBUGFS
+ default n
+ help
+ Enable this if you want to have access to a circular buffer holding
+ all register access (R/W) through the debugfs.
+
+config FB_AVI
+ tristate "Parrot Advanced Video Interface framebuffer support"
+ depends on FB && AVI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Enable support for AVI framebuffer.
+
+config FB_AVI_PAN_WAITS_FOR_VSYNC
+ bool "Make AVI framebuffer PAN operations wait for VBL by default"
+ depends on FB_AVI
+ default y
+ help
+ Some userland applications expect the FBIOPAN_DISPLAY ioctl to block
+ until the PAN operation is effective, i.e. at the next VBL. This
+ options sets the default, it is configurable at runtime via sysfs.
+
+config VSYNC_GEN_AVI
+ tristate "Parrot Advanced Video Interface vsync generator support"
+ depends on AVI
+ help
+ Enable support for the AVI vsync generator used to synchronize slave
+ cameras.
+
diff --git a/drivers/parrot/video/Makefile b/drivers/parrot/video/Makefile
new file mode 100644
index 00000000000000..29f8db97deeb26
--- /dev/null
+++ b/drivers/parrot/video/Makefile
@@ -0,0 +1,20 @@
+ccflags-$(CONFIG_AVI_DEBUG_LOG) += -DDEBUG
+
+avicore-objs := avi.o avi_compat.o avi_scaler.o avi_isp.o
+avicore-objs += avi_segment.o avi_cap.o avi_chroma.o
+avicore-objs += avi_dma.o avi_pixfmt.o
+avicore-objs += avi_limit.o
+
+ifeq ($(CONFIG_AVI_DEBUG),y)
+avicore-objs += avi_debug.o
+endif
+
+ifeq ($(CONFIG_AVI_LOGGER),y)
+avicore-objs += avi_logger.o
+endif
+
+obj-$(CONFIG_AVI) += avicore.o
+
+obj-$(CONFIG_AVI_DEBUGFS) += avi_debugfs.o
+obj-$(CONFIG_FB_AVI) += avifb.o
+obj-$(CONFIG_VSYNC_GEN_AVI) += avi_vsync_gen.o
diff --git a/drivers/parrot/video/avi.c b/drivers/parrot/video/avi.c
new file mode 100644
index 00000000000000..5ff2ac75cb9a7e
--- /dev/null
+++ b/drivers/parrot/video/avi.c
@@ -0,0 +1,1587 @@
+/*
+ * linux/drivers/parrot/video/avi.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 18-Feb-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <mach/irqs.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "avi_compat.h"
+#include "reg_avi.h"
+#include "avi_logger.h"
+
+/* If the AVI must share (some of) its IRQs */
+/*#define AVI_IRQ_SHARED */
+
+static struct avi_node avi_cfg = {
+ .type = AVI_CFG_NODE_TYPE,
+ .src_id = AVI_SRCSINK_NONE,
+ .sink_id = AVI_SRCSINK_NONE,
+ .base_off = AVI_CFG,
+ .cfg_off = 0,
+ .cfg_mask = 1UL << 0,
+ .name = "CFG",
+ .irq_offset = 0,
+ .node_id = AVI_CFG_NODE
+};
+
+static struct avi_node avi_inter = {
+ .type = AVI_INTER_NODE_TYPE,
+ .src_id = AVI_SRCSINK_NONE,
+ .sink_id = AVI_SRCSINK_NONE,
+ .base_off = AVI_INTER,
+ .cfg_off = 0,
+ .cfg_mask = 1UL << 1,
+ .name = "INTER",
+ .irq_offset = 1,
+ .node_id = AVI_INTER_NODE
+};
+
+/* Extend avi_node to add fifo capabilities */
+struct avi_fifo
+{
+ struct avi_node avi_node;
+ int capabilities;
+};
+
+static __maybe_unused struct avi_fifo *avi_get_fifo(struct avi_node *n)
+{
+ BUG_ON(!n->type == AVI_FIFO_NODE_TYPE);
+ return container_of(n, struct avi_fifo, avi_node);
+}
+
+#define AVI_DEFINE_FIFO(_no) \
+ struct avi_node avi_fifo ## _no = { \
+ .type = AVI_FIFO_NODE_TYPE, \
+ .src_id = _no + 1, \
+ .sink_id = _no, \
+ .base_off = AVI_FIFO ## _no, \
+ .cfg_off = 0, \
+ .cfg_mask = 1UL << ((_no) + 2), \
+ .name = (_no < 10 ? ("FIFO0" #_no) : ("FIFO" #_no)), \
+ .irq_offset = 2 + (_no), \
+ .node_id = AVI_FIFO00_NODE + _no \
+ }
+
+static AVI_DEFINE_FIFO(0);
+static AVI_DEFINE_FIFO(1);
+static AVI_DEFINE_FIFO(2);
+static AVI_DEFINE_FIFO(3);
+static AVI_DEFINE_FIFO(4);
+static AVI_DEFINE_FIFO(5);
+static AVI_DEFINE_FIFO(6);
+static AVI_DEFINE_FIFO(7);
+static AVI_DEFINE_FIFO(8);
+static AVI_DEFINE_FIFO(9);
+static AVI_DEFINE_FIFO(10);
+static AVI_DEFINE_FIFO(11);
+
+#define AVI_DEFINE_CONV(_no) \
+ struct avi_node avi_conv ## _no = { \
+ .type = AVI_CONV_NODE_TYPE, \
+ .src_id = _no + 0xd, \
+ .sink_id = _no + 0xc, \
+ .base_off = AVI_CONV ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << (_no), \
+ .name = "CONV" #_no, \
+ .irq_offset = 14 + (_no), \
+ .node_id = AVI_CONV0_NODE + _no \
+ }
+
+static AVI_DEFINE_CONV(0);
+static AVI_DEFINE_CONV(1);
+static AVI_DEFINE_CONV(2);
+static AVI_DEFINE_CONV(3);
+
+/* Each blender has 4 sinks. .sink_id takes the value of the first one, the
+ * others follow.*/
+#define AVI_DEFINE_BLEND(_no) \
+ struct avi_node avi_blend ## _no = { \
+ .type = AVI_BLEND_NODE_TYPE, \
+ .src_id = _no + 0x11, \
+ .sink_id = ((_no) * 4) + 0x10, \
+ .base_off = AVI_BLEND ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << ((_no) + 4), \
+ .name = "BLEND" #_no, \
+ .irq_offset = 18 + (_no), \
+ .node_id = AVI_BLEND0_NODE + _no \
+ }
+
+static AVI_DEFINE_BLEND(0);
+static AVI_DEFINE_BLEND(1);
+
+/* Extend avi_node to add lcd pixclock info */
+struct avi_lcd
+{
+ struct avi_node avi_node;
+ struct clk *pixclock;
+ int clk_enabled;
+};
+
+/* The LCD nodes are not present in the interconnect of MPW1. They are hardwired
+ * to their respective GAM node. In this case the sink_id points to a
+ * non-existing register, but we can safely write to it (it's a NOP). So this
+ * should be backward compatible. */
+#define AVI_DEFINE_LCD(_no) \
+ struct avi_lcd avi_lcd ## _no = { \
+ .avi_node = { \
+ .type = AVI_LCD_NODE_TYPE, \
+ .src_id = AVI_SRCSINK_NONE, \
+ .sink_id = _no + 0x20, \
+ .base_off = AVI_LCD ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << ((_no) + 6), \
+ .name = "LCD" #_no, \
+ .irq_offset = 20 + (_no), \
+ .node_id = AVI_LCD0_NODE + _no \
+ }, \
+ }
+
+static AVI_DEFINE_LCD(0);
+static AVI_DEFINE_LCD(1);
+
+#define AVI_DEFINE_CAM(_no) \
+ struct avi_node avi_cam ## _no = { \
+ .type = AVI_CAM_NODE_TYPE, \
+ .src_id = _no + 0x13, \
+ .sink_id = AVI_SRCSINK_NONE, \
+ .base_off = AVI_CAM ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << ((_no) + 8), \
+ .name = "CAM" #_no, \
+ .irq_offset = 22 + (_no), \
+ .node_id = AVI_CAM0_NODE + _no \
+ }
+
+static AVI_DEFINE_CAM(0);
+static AVI_DEFINE_CAM(1);
+static AVI_DEFINE_CAM(2);
+static AVI_DEFINE_CAM(3);
+static AVI_DEFINE_CAM(4);
+static AVI_DEFINE_CAM(5);
+
+#define AVI_DEFINE_SCAL(_scalrot, _input) \
+ struct avi_node avi_scal ## _scalrot ## _input = { \
+ .type = AVI_SCALROT_NODE_TYPE, \
+ .src_id = (_scalrot * 2) + 0x19, \
+ .sink_id = (_scalrot * 3) + _input + 0x1a, \
+ .base_off = AVI_SCAL ## _scalrot, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << (((_scalrot) * 2) + 14), \
+ .irq_offset = 28 + (_scalrot), \
+ .name = "SCAL" #_scalrot "[" #_input "]", \
+ .node_id = (_scalrot==0)?AVI_SCAL00_NODE:AVI_SCAL10_NODE \
+ + _input \
+ }
+
+static AVI_DEFINE_SCAL(0, 0);
+static AVI_DEFINE_SCAL(0, 1);
+static AVI_DEFINE_SCAL(1, 0);
+static AVI_DEFINE_SCAL(1, 1);
+
+#define AVI_DEFINE_ROT(_scalrot) \
+ struct avi_node avi_rot ## _scalrot = { \
+ .type = AVI_SCALROT_NODE_TYPE, \
+ .src_id = (_scalrot * 2) + 0x1a, \
+ .sink_id = (_scalrot * 3) + 0x1c, \
+ .base_off = AVI_ROT ## _scalrot, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << (((_scalrot) * 2) + 15), \
+ .name = "ROT" #_scalrot, \
+ .irq_offset = 28 + (_scalrot), \
+ .node_id = (_scalrot==0)?AVI_ROT0_NODE:AVI_ROT1_NODE \
+ }
+
+static AVI_DEFINE_ROT(0);
+static AVI_DEFINE_ROT(1);
+
+/* GAMs don't have a src_id in MPW1, see comment on AVI_DEFINE_LCD above. */
+#define AVI_DEFINE_GAM(_no) \
+ struct avi_node avi_gam ## _no = { \
+ .type = AVI_GAM_NODE_TYPE, \
+ .src_id = _no + 0x1d, \
+ .sink_id = _no + 0x18, \
+ .base_off = AVI_GAM ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << ((_no) + 18), \
+ .name = "GAM" #_no, \
+ .irq_offset = 30 + (_no), \
+ .node_id = AVI_GAM0_NODE + _no \
+ }
+
+static AVI_DEFINE_GAM(0);
+static AVI_DEFINE_GAM(1);
+
+#define AVI_DEFINE_ISP_CHAIN_BAYER(_no) \
+ struct avi_node avi_isp_chain_bayer ## _no = { \
+ .type = AVI_ISP_CHAIN_BAYER_NODE_TYPE, \
+ .src_id = _no + 0x2d, \
+ .sink_id = _no + 0x2a, \
+ .base_off = AVI_ISP_CHAIN_BAYER ## _no, \
+ .cfg_off = 3, \
+ .cfg_mask = 0xFFFUL << ((_no) * 12), \
+ .name = "ISP_CHAIN_BAYER" #_no, \
+ .irq_offset = 42 + 12 * (_no), \
+ .node_id = AVI_ISP_CHAIN_BAYER0_NODE + _no \
+ }
+
+static AVI_DEFINE_ISP_CHAIN_BAYER(0);
+static AVI_DEFINE_ISP_CHAIN_BAYER(1);
+
+#define AVI_DEFINE_STATS_BAYER(_no) \
+ struct avi_node avi_stats_bayer ##_no = { \
+ .type = AVI_STATS_BAYER_NODE_TYPE, \
+ .src_id = _no + 0x2b, \
+ /* Always implicitely connected to \
+ the respective CHAIN_BAYER */ \
+ .sink_id = AVI_SRCSINK_NONE, \
+ .base_off = 0, \
+ .cfg_off = 3, \
+ .cfg_mask = 0x40UL << ((_no) * 12), \
+ .name = "ISP_STATS_BAYER" #_no, \
+ .irq_offset = -1, \
+ .node_id = AVI_STATS_BAYER0_NODE + _no, \
+ }
+
+static AVI_DEFINE_STATS_BAYER(0);
+static AVI_DEFINE_STATS_BAYER(1);
+
+#define AVI_DEFINE_STATS_YUV(_no) \
+ struct avi_node avi_stats_yuv ## _no = { \
+ .type = AVI_STATS_YUV_NODE_TYPE, \
+ .src_id = _no + 0x29, \
+ .sink_id = _no + 0x28, \
+ .base_off = AVI_STATS_YUV ## _no, \
+ .cfg_off = 1, \
+ .cfg_mask = 1UL << ((_no) + 26), \
+ .name = "STATS_YUV" #_no, \
+ .irq_offset = 40 + (_no), \
+ .node_id = AVI_STATS_YUV0_NODE + _no \
+ }
+
+static AVI_DEFINE_STATS_YUV(0);
+static AVI_DEFINE_STATS_YUV(1);
+
+#define AVI_DEFINE_ISP_CHAIN_YUV(_no) \
+ struct avi_node avi_isp_chain_yuv ## _no = { \
+ .type = AVI_ISP_CHAIN_YUV_NODE_TYPE, \
+ .src_id = _no + 0x2f, \
+ .sink_id = _no + 0x2c, \
+ .base_off = AVI_ISP_CHAIN_YUV ## _no, \
+ .cfg_off = 3, \
+ .cfg_mask = 0xFUL << ((_no) * 4 + 24), \
+ .name = "ISP_CHAIN_YUV" #_no, \
+ .irq_offset = 66 + 4 * (_no), \
+ .node_id = AVI_ISP_CHAIN_YUV0_NODE + _no \
+ }
+
+static AVI_DEFINE_ISP_CHAIN_YUV(0);
+static AVI_DEFINE_ISP_CHAIN_YUV(1);
+
+
+struct avi_controller avi_ctrl = {
+ .cfg_lck = __SPIN_LOCK_UNLOCKED(avi_ctrl.cfg_lck),
+ .blend_lck = __MUTEX_INITIALIZER(avi_ctrl.blend_lck),
+ .nodes = {
+ [AVI_FIFO00_NODE] = &avi_fifo0,
+ [AVI_FIFO01_NODE] = &avi_fifo1,
+ [AVI_FIFO02_NODE] = &avi_fifo2,
+ [AVI_FIFO03_NODE] = &avi_fifo3,
+ [AVI_FIFO04_NODE] = &avi_fifo4,
+ [AVI_FIFO05_NODE] = &avi_fifo5,
+ [AVI_FIFO06_NODE] = &avi_fifo6,
+ [AVI_FIFO07_NODE] = &avi_fifo7,
+ [AVI_FIFO08_NODE] = &avi_fifo8,
+ [AVI_FIFO09_NODE] = &avi_fifo9,
+ [AVI_FIFO10_NODE] = &avi_fifo10,
+ [AVI_FIFO11_NODE] = &avi_fifo11,
+ [AVI_CONV0_NODE] = &avi_conv0,
+ [AVI_CONV1_NODE] = &avi_conv1,
+ [AVI_CONV2_NODE] = &avi_conv2,
+ [AVI_CONV3_NODE] = &avi_conv3,
+ [AVI_BLEND0_NODE] = &avi_blend0,
+ [AVI_BLEND1_NODE] = &avi_blend1,
+ [AVI_LCD0_NODE] = &avi_lcd0.avi_node,
+ [AVI_LCD1_NODE] = &avi_lcd1.avi_node,
+ [AVI_CAM0_NODE] = &avi_cam0,
+ [AVI_CAM1_NODE] = &avi_cam1,
+ [AVI_CAM2_NODE] = &avi_cam2,
+ [AVI_CAM3_NODE] = &avi_cam3,
+ [AVI_CAM4_NODE] = &avi_cam4,
+ [AVI_CAM5_NODE] = &avi_cam5,
+ [AVI_SCAL00_NODE] = &avi_scal00,
+ [AVI_SCAL01_NODE] = &avi_scal01,
+ [AVI_ROT0_NODE] = &avi_rot0,
+ [AVI_SCAL10_NODE] = &avi_scal10,
+ [AVI_SCAL11_NODE] = &avi_scal11,
+ [AVI_ROT1_NODE] = &avi_rot1,
+ [AVI_GAM0_NODE] = &avi_gam0,
+ [AVI_GAM1_NODE] = &avi_gam1,
+ [AVI_ISP_CHAIN_BAYER0_NODE] = &avi_isp_chain_bayer0,
+ [AVI_ISP_CHAIN_BAYER1_NODE] = &avi_isp_chain_bayer1,
+ [AVI_STATS_BAYER0_NODE] = &avi_stats_bayer0,
+ [AVI_STATS_BAYER1_NODE] = &avi_stats_bayer1,
+ [AVI_STATS_YUV0_NODE] = &avi_stats_yuv0,
+ [AVI_STATS_YUV1_NODE] = &avi_stats_yuv1,
+ [AVI_ISP_CHAIN_YUV0_NODE] = &avi_isp_chain_yuv0,
+ [AVI_ISP_CHAIN_YUV1_NODE] = &avi_isp_chain_yuv1,
+ [AVI_INTER_NODE] = &avi_inter,
+ [AVI_CFG_NODE] = &avi_cfg,
+ }
+};
+EXPORT_SYMBOL(avi_ctrl);
+
+/**
+ * Copy reg_base to addr one 32bits at a time.
+ */
+void memcpy_from_registers(void *reg_base, unsigned long addr, size_t s)
+{
+ u32 *reg = reg_base;
+ unsigned i;
+
+ BUG_ON(s % sizeof(u32) != 0);
+ s /= sizeof(u32); /* we read one register at a time */
+
+ for (i = 0; i < s; i++)
+ reg[i] = AVI_READ(addr + i * sizeof(u32));
+ rmb();
+}
+EXPORT_SYMBOL(memcpy_from_registers);
+
+void avi_cfg_get_registers(struct avi_cfg_regs *regs)
+{
+ memcpy_from_registers(regs, avi_node_base(&avi_cfg), sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_cfg_get_registers);
+
+void avi_enable_node(struct avi_node const* node)
+{
+ if (node->type != AVI_BLENDIN_NODE_TYPE) {
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ENABLE3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ENABLE1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(node->cfg_mask | AVI_READ(addr), addr);
+ }
+}
+EXPORT_SYMBOL(avi_enable_node);
+
+void avi_disable_node(struct avi_node const* node)
+{
+ if (node->type != AVI_BLENDIN_NODE_TYPE) {
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ENABLE3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ENABLE1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(AVI_READ(addr) & ~node->cfg_mask, addr);
+ }
+}
+EXPORT_SYMBOL(avi_disable_node);
+
+void avi_lock_node(struct avi_node const* node)
+{
+ /*
+ * Some nodes (aka AVI_BLENDIN_NODE_TYPE) are virtual nodes: they have
+ * no assigned clock.
+ */
+ if (node->type != AVI_BLENDIN_NODE_TYPE) {
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_LOCK3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_LOCK1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(node->cfg_mask | AVI_READ(addr), addr);
+ }
+}
+EXPORT_SYMBOL(avi_lock_node);
+
+void avi_unlock_node(struct avi_node const* node)
+{
+ if (node->type != AVI_BLENDIN_NODE_TYPE) {
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_LOCK3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_LOCK1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(AVI_READ(addr) & ~node->cfg_mask, addr);
+ }
+}
+EXPORT_SYMBOL(avi_unlock_node);
+
+/**
+ * Force apply the node's shadow registers configuration.
+ */
+void avi_apply_node(struct avi_node const* node)
+{
+ if (node->type != AVI_BLENDIN_NODE_TYPE) {
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_APPLY3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_APPLY1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(node->cfg_mask, addr);
+ }
+}
+EXPORT_SYMBOL(avi_apply_node);
+
+/**************
+ * FIFO config
+ **************/
+
+static inline void memcpy_to_registers(unsigned long addr,
+ const void *reg_base,
+ size_t s)
+{
+ const u32 *reg = reg_base;
+ unsigned i;
+
+ BUG_ON(s % sizeof(u32) != 0);
+ s /= sizeof(u32); /* we write one register at a time */
+
+ for (i = 0; i < s; i++)
+ AVI_WRITE(reg[i], addr + i * sizeof(u32));
+ wmb();
+}
+
+void avi_fifo_get_registers(struct avi_node const *fifo,
+ struct avi_fifo_registers *regs)
+{
+ memcpy_from_registers(regs, avi_node_base(fifo), sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_fifo_get_registers);
+
+void avi_fifo_set_registers(struct avi_node const *fifo,
+ struct avi_fifo_registers const *regs)
+{
+ memcpy_to_registers(avi_node_base(fifo),
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_fifo_set_registers);
+
+void avi_fifo_set_oneshot(struct avi_node const *fifo, bool oneshot)
+{
+ unsigned long const addr = avi_node_base(fifo) + AVI_FIFO_CFG;
+ union avi_fifo_cfg cfg;
+
+ cfg._register = AVI_READ(addr);
+ cfg.single = oneshot;
+ AVI_WRITE(cfg._register, addr);
+}
+EXPORT_SYMBOL(avi_fifo_set_oneshot);
+
+void avi_fifo_set_dmasa(struct avi_node const *fifo, u32 dmasa0, u32 dmasa1)
+{
+ unsigned long const base = avi_node_base(fifo);
+
+ AVI_WRITE(dmasa0, base + AVI_FIFO_DMASA0);
+ AVI_WRITE(dmasa1, base + AVI_FIFO_DMASA1);
+}
+EXPORT_SYMBOL(avi_fifo_set_dmasa);
+
+void avi_fifo_set_dmalstr(struct avi_node const *fifo,
+ u32 dmalstr0,
+ u32 dmalstr1)
+{
+ unsigned long const base = avi_node_base(fifo);
+
+ AVI_WRITE(dmalstr0, base + AVI_FIFO_DMALSTR0);
+ AVI_WRITE(dmalstr1, base + AVI_FIFO_DMALSTR1);
+}
+EXPORT_SYMBOL(avi_fifo_set_dmalstr);
+
+union avi_fifo_status avi_fifo_get_status(struct avi_node const *fifo)
+{
+ unsigned long const addr = avi_node_base(fifo) + AVI_FIFO_STATUS;
+ union avi_fifo_status fifo_status;
+
+ fifo_status._register = AVI_READ(addr);
+ return fifo_status;
+}
+EXPORT_SYMBOL(avi_fifo_get_status);
+
+static void avi_fifo_set_force_clear(struct avi_node const *fifo,
+ enum avi_fifo_vlin_force_clear fc)
+{
+ unsigned long const addr = avi_node_base(fifo) + AVI_FIFO_VLIN;
+ union avi_fifo_vlin vlin;
+
+ vlin._register = AVI_READ(addr);
+ vlin.force_clear = fc;
+ AVI_WRITE(vlin._register, addr);
+}
+
+void avi_fifo_force_clear(struct avi_node const *node)
+{
+ /* Force a clear sequence on the fifo. We force the clear high, then we
+ * return to the normal clear behaviour. */
+ avi_fifo_set_force_clear(node, AVI_FIFO_VLIN_FORCE_CLEAR_ACTIVE);
+ wmb();
+ avi_fifo_set_force_clear(node, AVI_FIFO_VLIN_FORCE_CLEAR_NORMAL);
+}
+EXPORT_SYMBOL(avi_fifo_force_clear);
+
+/***************************
+ * Blenders low-level setup
+ ***************************/
+
+static inline int avi_blendin_id(const struct avi_node *blendin)
+{
+ return blendin->sink_id & 3;
+}
+
+static inline void avi_blendin_set_id(struct avi_node *blendin, int id)
+{
+ blendin->sink_id &= ~3;
+ blendin->sink_id += id & 3;
+}
+
+/**
+ * avi_blendin_2cfg() - Build blending configuration.
+ * @config: pointer to configuration to apply
+ * @blendin: pointer to AVI virtual blendin node
+ * @xoffset: x coordinate of upper left corner of frame to blend
+ * @yoffset: y coordinate of upper left corner of frame to blend
+ * @alpha: alpha component used to blend plane with, or enable OSD mode if < 0
+ */
+void avi_blendin_2cfg(struct avi_blendin_cfg* config,
+ struct avi_node const* blendin,
+ u16 xoffset,
+ u16 yoffset,
+ int alpha)
+{
+ config->offset = (yoffset << 16) | xoffset;
+
+ if (alpha < 0)
+ config->alpha = 0x100; /* set AMODE to 1 (OSD mode) */
+ else
+ config->alpha = alpha & 0xff;
+}
+EXPORT_SYMBOL(avi_blendin_2cfg);
+
+/**
+ * avi_setup_blendin_cfg() - Apply previously built blending configuration.
+ * @blendin: pointer to AVI virtual blendin node
+ * @config: pointer to configuration to apply
+ */
+void avi_set_blendin_cfg(struct avi_node const* blendin,
+ struct avi_blendin_cfg const* config)
+{
+ unsigned long const base = avi_node_base(blendin);
+ int id = avi_blendin_id(blendin);
+
+ AVI_WRITE(config->offset,
+ base + AVI_BLEND_OFFSET0 + (id * sizeof(u32) * 2));
+ AVI_WRITE(config->alpha,
+ base + AVI_BLEND_ALPHA0 + (id * sizeof(u32) * 2));
+}
+EXPORT_SYMBOL(avi_set_blendin_cfg);
+
+/* config->id must be the ID of the blendin */
+void avi_get_blendin_cfg(struct avi_node const* blendin,
+ struct avi_blendin_cfg* config)
+{
+ unsigned long const base = avi_node_base(blendin);
+ int id = avi_blendin_id(blendin);
+
+ config->offset = AVI_READ(base + AVI_BLEND_OFFSET0
+ + (id * sizeof(u32) * 2));
+ config->alpha = AVI_READ(base + AVI_BLEND_ALPHA0
+ + (id * sizeof(u32) * 2));
+}
+EXPORT_SYMBOL(avi_get_blendin_cfg);
+
+void avi_setup_blender_input(struct avi_node const* blender,
+ unsigned input,
+ unsigned x,
+ unsigned y,
+ int alpha)
+{
+ unsigned long base;
+ u32 offset;
+
+ base = avi_node_base(blender) + (input * sizeof(u32) * 2);
+
+ if (alpha < 0 || alpha == 0x100)
+ /* OSD mode */
+ alpha = 0x100;
+ else
+ alpha &= 0xff;
+
+ x &= 0xffff;
+ y &= 0xffff;
+
+ offset = (y << 16) | x;
+
+ AVI_WRITE(offset, base + AVI_BLEND_OFFSET0);
+ AVI_WRITE(alpha, base + AVI_BLEND_ALPHA0);
+}
+EXPORT_SYMBOL(avi_setup_blender_input);
+
+/**
+ * avi_setup_blendin() - Setup blending configuration
+ * @blendin: pointer to AVI virtual blendin node
+ * @xoffset: x coordinate of upper left corner of frame to blend
+ * @yoffset: y coordinate of upper left corner of frame to blend
+ * @alpha: alpha component used to blend plane with, or enable OSD mode if < 0
+ */
+void avi_setup_blendin(struct avi_node const* blendin,
+ u16 xoffset,
+ u16 yoffset,
+ int alpha)
+{
+ struct avi_blendin_cfg cfg;
+
+ avi_blendin_2cfg(&cfg, blendin, xoffset, yoffset, alpha);
+ avi_set_blendin_cfg(blendin, &cfg);
+}
+EXPORT_SYMBOL(avi_setup_blendin);
+
+/**
+ * avi_setup_blendin_alpha() - Setup blending alpha
+ * @blendin: pointer to AVI virtual blendin node
+ * @alpha: alpha component used to blend plane with, or enable OSD mode if < 0
+ */
+void avi_setup_blendin_alpha(struct avi_node const* blendin,
+ int alpha)
+{
+ unsigned long const base = avi_node_base(blendin);
+ int id = avi_blendin_id(blendin);
+ struct avi_blendin_cfg cfg;
+
+ avi_blendin_2cfg(&cfg, blendin, 0, 0, alpha);
+
+ AVI_WRITE(cfg.alpha,
+ base + AVI_BLEND_ALPHA0 + (id * sizeof(u32)) * 2);
+}
+EXPORT_SYMBOL(avi_setup_blendin_alpha);
+
+/**
+ * avi_setup_blend() - Setup blender configuration
+ * @blender: pointer to AVI virtual blendin node
+ * @background: background plane color
+ * @width: blender output width in pixels
+ * @height: blender output height in pixels
+ */
+void avi_setup_blend(struct avi_node const* blender,
+ u32 background,
+ u16 width,
+ u16 height)
+{
+ unsigned long const base = avi_node_base(blender);
+
+ avi_blend_set_bg(blender, background);
+ AVI_WRITE(((height - 1) << 16) | (width - 1),
+ base + AVI_BLEND_FRAMEOUTSIZE);
+}
+EXPORT_SYMBOL(avi_setup_blend);
+
+u32 avi_blend_get_bg(struct avi_node const *blender)
+{
+ unsigned long const base = avi_node_base(blender);
+
+ return AVI_READ(base + AVI_BLEND_BACKGROUND);
+}
+EXPORT_SYMBOL(avi_blend_get_bg);
+
+void avi_blend_set_bg(struct avi_node const *blender, u32 bg)
+{
+ unsigned long const base = avi_node_base(blender);
+
+ AVI_WRITE(bg & 0x00ffffffUL, base + AVI_BLEND_BACKGROUND);
+}
+EXPORT_SYMBOL(avi_blend_set_bg);
+
+/************************
+ * Gamma Corrector setup
+ ***********************/
+
+void avi_gam_setup(struct avi_node const* node,
+ int bypass, int palette, int ten_bits)
+{
+ unsigned long const base = avi_node_base(node);
+ union avi_isp_gamma_corrector_conf conf = {{
+ .bypass = !!bypass,
+ .palette = !!palette,
+ .comp_width = !!ten_bits,
+ }};
+
+ AVI_WRITE(conf._register, base + AVI_ISP_GAMMA_CORRECTOR_CONF);
+}
+EXPORT_SYMBOL(avi_gam_setup);
+
+u32 avi_gam_get_config(struct avi_node const* node)
+{
+ unsigned long const base = avi_node_base(node);
+
+ return AVI_READ(base + AVI_ISP_GAMMA_CORRECTOR_CONF);
+}
+EXPORT_SYMBOL(avi_gam_get_config);
+
+void avi_gam_set_entry(struct avi_node const *gam,
+ u16 regno,
+ u8 red, u8 green, u8 blue)
+{
+ unsigned long const base = avi_node_base(gam);
+
+ AVI_WRITE(red,
+ base + AVI_ISP_GAMMA_CORRECTOR_RY_LUT + ((u32)regno << 2));
+ AVI_WRITE(green,
+ base + AVI_ISP_GAMMA_CORRECTOR_GU_LUT + ((u32)regno << 2));
+ AVI_WRITE(blue,
+ base + AVI_ISP_GAMMA_CORRECTOR_BV_LUT + ((u32)regno << 2));
+}
+EXPORT_SYMBOL(avi_gam_set_entry);
+
+void avi_gam_set_cmap(struct avi_node const *gam, struct avi_cmap const *cmap)
+{
+ unsigned i;
+
+ for (i = 0; i < AVI_CMAP_SZ; i++)
+ avi_gam_set_entry(gam, i,
+ cmap->red[i],
+ cmap->green[i],
+ cmap->blue[i]);
+}
+EXPORT_SYMBOL(avi_gam_set_cmap);
+
+void avi_gam_get_cmap(struct avi_node const *gam, struct avi_cmap *cmap)
+{
+ unsigned long const base = avi_node_base(gam);
+ unsigned i;
+
+ for (i = 0; i < AVI_CMAP_SZ; i++) {
+ cmap->red [i] = AVI_READ(base + (i << 2)
+ + AVI_ISP_GAMMA_CORRECTOR_RY_LUT);
+ cmap->green[i] = AVI_READ(base + (i << 2)
+ + AVI_ISP_GAMMA_CORRECTOR_GU_LUT);
+ cmap->blue [i] = AVI_READ(base + (i << 2)
+ + AVI_ISP_GAMMA_CORRECTOR_BV_LUT);
+ }
+}
+EXPORT_SYMBOL(avi_gam_get_cmap);
+
+/**************
+ * LCD config
+ **************/
+
+void avi_lcd_get_registers(struct avi_node const *lcd,
+ struct avi_lcd_regs *regs)
+{
+ memcpy_from_registers(regs, avi_node_base(lcd), sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_lcd_get_registers);
+
+void avi_lcd_set_registers(struct avi_node const *lcd,
+ struct avi_lcd_regs const *regs)
+{
+ memcpy_to_registers(avi_node_base(lcd), regs, sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_lcd_set_registers);
+
+enum avi_field avi_lcd_get_current_field(struct avi_node const *lcd)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_STATUS;
+
+ return (AVI_READ(addr) & 0x4) ? AVI_BOT_FIELD : AVI_TOP_FIELD;
+}
+EXPORT_SYMBOL(avi_lcd_get_current_field);
+
+unsigned avi_lcd_get_colorbar(struct avi_node const *lcd)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_DPD;
+ union avi_lcd_dpd ret;
+
+ ret._register = AVI_READ(addr);
+
+ return ret.colorbar;
+}
+EXPORT_SYMBOL(avi_lcd_get_colorbar);
+
+/**
+ * avi_lcd_get_colorbar: set the colorbar register for the given LCD. Only the
+ * two LSB of colorbar will be used: (colorbar & 2) enables/disables the
+ * colorbar, (colorbar & 1) when set means RGB mode, unset means YUV mode.
+ */
+void avi_lcd_set_colorbar(struct avi_node const *lcd, unsigned colorbar)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_DPD;
+ union avi_lcd_dpd dpd;
+
+ dpd._register = AVI_READ(addr);
+
+ dpd.colorbar = colorbar;
+ AVI_WRITE(dpd._register, addr);
+}
+EXPORT_SYMBOL(avi_lcd_set_colorbar);
+
+unsigned avi_lcd_get_force_clear(struct avi_node const *lcd)
+{
+ union avi_lcd_force_clear clr;
+ unsigned long const addr =
+ avi_node_base(lcd) + AVI_LCD_FORCE_CLEAR;
+
+ clr._register = AVI_READ(addr);
+
+ return clr.force_clear;
+}
+EXPORT_SYMBOL(avi_lcd_get_force_clear);
+
+void avi_lcd_set_force_clear(struct avi_node const *lcd, unsigned clear)
+{
+ union avi_lcd_force_clear clr;
+ unsigned long const addr =
+ avi_node_base(lcd) + AVI_LCD_FORCE_CLEAR;
+
+ clr._register = AVI_READ(addr);
+ clr.force_clear = clear;
+
+ AVI_WRITE(clr._register, addr);
+}
+EXPORT_SYMBOL(avi_lcd_set_force_clear);
+
+union avi_lcd_status avi_lcd_get_status(struct avi_node const *lcd)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_STATUS;
+ union avi_lcd_status ret;
+
+ ret._register = AVI_READ(addr);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_lcd_get_status);
+
+u32 avi_lcd_get_dpd(struct avi_node const *lcd)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_DPD;
+ union avi_lcd_dpd ret;
+
+ ret._register = AVI_READ(addr);
+
+ return ret.dpd;
+}
+EXPORT_SYMBOL(avi_lcd_get_dpd);
+
+void avi_lcd_set_dpd(struct avi_node const *lcd, u32 dpd)
+{
+ unsigned long const addr = avi_node_base(lcd) + AVI_LCD_DPD;
+ union avi_lcd_dpd v;
+
+ v._register = AVI_READ(addr);
+
+ v.dpd = dpd & 0xffffff;
+
+ AVI_WRITE(v._register, addr);
+}
+EXPORT_SYMBOL(avi_lcd_set_dpd);
+
+int avi_lcd_pixclock_enable(struct avi_node const *n)
+{
+ struct avi_lcd *lcd;
+ int ret = 0;
+
+ BUG_ON(n->type != AVI_LCD_NODE_TYPE);
+
+ lcd = container_of(n, struct avi_lcd, avi_node);
+
+ if (lcd->clk_enabled)
+ ret = -EBUSY;
+ else {
+ ret = clk_prepare_enable(lcd->pixclock);
+ lcd->clk_enabled = (ret == 0);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_lcd_pixclock_enable);
+
+int avi_lcd_pixclock_disable(struct avi_node const *n)
+{
+ struct avi_lcd *lcd;
+ int ret = 0;
+
+ BUG_ON(n->type != AVI_LCD_NODE_TYPE);
+
+ lcd = container_of(n, struct avi_lcd, avi_node);
+
+ if (!lcd->clk_enabled)
+ ret = -EINVAL;
+ else {
+ clk_disable_unprepare(lcd->pixclock);
+ lcd->clk_enabled = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_lcd_pixclock_disable);
+
+unsigned long avi_lcd_pixclock_round_rate(struct avi_node const *n,
+ unsigned long r)
+{
+ struct avi_lcd *lcd;
+
+ BUG_ON(n->type != AVI_LCD_NODE_TYPE);
+
+ lcd = container_of(n, struct avi_lcd, avi_node);
+
+ return clk_round_rate(lcd->pixclock, r);
+}
+EXPORT_SYMBOL(avi_lcd_pixclock_round_rate);
+
+int avi_lcd_pixclock_set_rate(struct avi_node const *n,
+ unsigned long r)
+{
+ struct avi_lcd *lcd;
+
+ BUG_ON(n->type != AVI_LCD_NODE_TYPE);
+
+ lcd = container_of(n, struct avi_lcd, avi_node);
+
+ return clk_set_rate(lcd->pixclock, r);
+}
+EXPORT_SYMBOL(avi_lcd_pixclock_set_rate);
+
+unsigned long avi_lcd_pixclock_get_rate(struct avi_node const *n)
+{
+ struct avi_lcd *lcd;
+
+ BUG_ON(n->type != AVI_LCD_NODE_TYPE);
+
+ lcd = container_of(n, struct avi_lcd, avi_node);
+
+ return clk_get_rate(lcd->pixclock);
+}
+EXPORT_SYMBOL(avi_lcd_pixclock_get_rate);
+
+void avi_vsync_gen_set_registers(struct avi_node const *lcd,
+ struct avi_lcd_avi_vsync_gen_regs const *regs)
+{
+ memcpy_to_registers(avi_node_base(lcd)
+ + AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_0,
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_vsync_gen_set_registers);
+
+void avi_vsync_gen_get_registers(struct avi_node const *lcd,
+ struct avi_lcd_avi_vsync_gen_regs *regs)
+{
+ memcpy_from_registers(regs, avi_node_base(lcd)
+ + AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_0,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_vsync_gen_get_registers);
+
+extern void avi_vsync_gen_set_registers(struct avi_node const*,
+ struct avi_lcd_avi_vsync_gen_regs const*);
+
+
+void avi_cam_set_registers(struct avi_node const *cam,
+ struct avi_cam_registers const *regs)
+{
+ memcpy_to_registers(avi_node_base(cam), regs, sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_cam_set_registers);
+
+void avi_cam_get_registers(struct avi_node const *cam,
+ struct avi_cam_registers *regs)
+{
+ memcpy_from_registers(regs, avi_node_base(cam), sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_cam_get_registers);
+
+union avi_cam_status avi_cam_get_status(struct avi_node const *cam)
+{
+ unsigned long const addr = avi_node_base(cam) + AVI_CAM_STATUS;
+ union avi_cam_status ret;
+
+ ret._register = AVI_READ(addr);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_cam_get_status);
+
+void avi_cam_get_measure(struct avi_node const *cam,
+ struct avi_cam_measure_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(cam) + AVI_CAM_MESURE_H_TIMING0,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_cam_get_measure);
+
+void avi_cam_set_itsource(struct avi_node const *cam,
+ union avi_cam_itsource itsrc)
+{
+ unsigned long const addr = avi_node_base(cam) + AVI_CAM_ITSOURCE;
+
+ AVI_WRITE(itsrc._register, addr);
+}
+EXPORT_SYMBOL(avi_cam_set_itsource);
+
+void avi_cam_set_timings(struct avi_node const *cam,
+ union avi_cam_h_timing ht,
+ union avi_cam_v_timing vt)
+{
+ unsigned long const haddr = avi_node_base(cam) + AVI_CAM_H_TIMING;
+ unsigned long const vaddr = avi_node_base(cam) + AVI_CAM_V_TIMING;
+
+ AVI_WRITE(ht._register, haddr);
+ AVI_WRITE(vt._register, vaddr);
+}
+EXPORT_SYMBOL(avi_cam_set_timings);
+
+void avi_cam_set_run(struct avi_node const *cam,
+ union avi_cam_run run)
+{
+ unsigned long const addr = avi_node_base(cam) + AVI_CAM_RUN;
+
+ AVI_WRITE(run._register, addr);
+}
+EXPORT_SYMBOL(avi_cam_set_run);
+
+void avi_cam_run(struct avi_node const *cam)
+{
+ unsigned long const addr = avi_node_base(cam) + AVI_CAM_RUN;
+ union avi_cam_run run = {
+ .run = 1,
+ };
+
+ AVI_WRITE(run._register, addr);
+}
+EXPORT_SYMBOL(avi_cam_run);
+
+void avi_cam_freerun(struct avi_node const *cam)
+{
+ unsigned long const addr = avi_node_base(cam) + AVI_CAM_RUN;
+ union avi_cam_run run = {
+ .free_run = 1,
+ };
+
+ AVI_WRITE(run._register, addr);
+}
+EXPORT_SYMBOL(avi_cam_freerun);
+
+void avi_inter_get_registers(u32 regs[AVI_INTER_NSINK])
+{
+ memcpy_from_registers(regs,
+ avi_node_base(&avi_inter),
+ AVI_INTER_NSINK * sizeof(u32));
+}
+EXPORT_SYMBOL(avi_inter_get_registers);
+
+/***********************
+ * Rotation handling
+ ***********************/
+void avi_rotator_setup(struct avi_node const* rot,
+ enum avi_rotation rotation,
+ bool flip,
+ unsigned height_out,
+ unsigned nstrips)
+{
+ unsigned long const angle = avi_node_base(rot) + AVI_ROT_ANGLE;
+ unsigned long const size = avi_node_base(rot) + AVI_ROT_SIZE;
+
+ switch(rotation)
+ {
+ case AVI_ROTATION_0:
+ AVI_WRITE(AVI_ROT_ANGLE_0|(AVI_ROT_HORIZ_FLIP*flip),
+ angle);
+ break;
+ case AVI_ROTATION_90:
+ AVI_WRITE(AVI_ROT_ANGLE_90|(AVI_ROT_HORIZ_FLIP*flip),
+ angle);
+ break;
+ case AVI_ROTATION_180:/* rotation is done by FIFOs */
+ AVI_WRITE(AVI_ROT_ANGLE_0|(AVI_ROT_HORIZ_FLIP*~flip),
+ angle);
+ break;
+ case AVI_ROTATION_270:
+ AVI_WRITE(AVI_ROT_ANGLE_270|(AVI_ROT_HORIZ_FLIP*flip),
+ angle);
+ break;
+ default:
+ /* Unknown rotation, there is a problem ! */
+ BUG_ON(1);
+ }
+
+ AVI_WRITE(((height_out & AVI_ROT_HEIGHTOUT_MASK)<<AVI_ROT_HEIGHTOUT_SHIFT)|
+ ((nstrips & AVI_ROT_NSTRIPE_MASK)<<AVI_ROT_NSTRIPE_SHIFT),
+ size);
+}
+
+/******
+ * ISP
+ ******/
+/* Inter */
+void avi_isp_chain_bayer_inter_set_registers(
+ struct avi_node const *chain_bayer,
+ struct avi_isp_chain_bayer_inter_regs const *regs)
+{
+ memcpy_to_registers(avi_node_base(chain_bayer) +
+ AVI_ISP_CHAIN_BAYER_INTER,
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_chain_bayer_inter_set_registers);
+
+void avi_isp_chain_bayer_inter_get_registers(
+ struct avi_node const *chain_bayer,
+ struct avi_isp_chain_bayer_inter_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(chain_bayer) +
+ AVI_ISP_CHAIN_BAYER_INTER,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_chain_bayer_inter_get_registers);
+
+void avi_isp_chain_yuv_inter_set_registers(
+ struct avi_node const *chain_yuv,
+ struct avi_isp_chain_yuv_inter_regs const *regs)
+{
+ memcpy_to_registers(avi_node_base(chain_yuv) +
+ AVI_ISP_CHAIN_YUV_INTER,
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_chain_yuv_inter_set_registers);
+
+void avi_isp_chain_yuv_inter_get_registers(
+ struct avi_node const *chain_yuv,
+ struct avi_isp_chain_yuv_inter_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(chain_yuv) +
+ AVI_ISP_CHAIN_YUV_INTER,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_chain_yuv_inter_get_registers);
+
+/* Bayer */
+void avi_isp_bayer_set_registers(struct avi_node const *chain_bayer,
+ struct avi_isp_bayer_regs const *regs)
+{
+ memcpy_to_registers(avi_node_base(chain_bayer) + AVI_ISP_BAYER,
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_bayer_set_registers);
+
+void avi_isp_bayer_get_registers(struct avi_node const *chain_bayer,
+ struct avi_isp_bayer_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(chain_bayer) + AVI_ISP_BAYER,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_bayer_get_registers);
+
+/* Stats */
+void avi_isp_statistics_bayer_set_registers(
+ struct avi_node *chain_bayer,
+ const struct avi_isp_statistics_bayer_regs *regs)
+{
+ memcpy_to_registers(avi_node_base(chain_bayer) +
+ AVI_ISP_STATS_BAYER,
+ regs,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_statistics_bayer_set_registers);
+
+void avi_isp_statistics_bayer_get_registers(
+ struct avi_node *chain_bayer,
+ struct avi_isp_statistics_bayer_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(chain_bayer) +
+ AVI_ISP_STATS_BAYER,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_statistics_bayer_get_registers);
+
+void avi_isp_pedestal_get_registers(struct avi_node *chain_bayer,
+ struct avi_isp_pedestal_regs *regs)
+{
+ memcpy_from_registers(regs,
+ avi_node_base(chain_bayer) + AVI_ISP_PEDESTAL,
+ sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_isp_pedestal_get_registers);
+
+
+int avi_node_get_irq_flag(struct avi_node const *node)
+{
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ITFLG3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ITFLG1 +
+ (node->cfg_off * sizeof(u32));
+
+ return !!(node->cfg_mask & AVI_READ(addr));
+}
+EXPORT_SYMBOL(avi_node_get_irq_flag);
+
+void avi_ack_node_irq(struct avi_node const *node)
+{
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ITACK3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ITACK1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(node->cfg_mask, addr);
+}
+EXPORT_SYMBOL(avi_ack_node_irq);
+
+void avi_enable_node_irq(struct avi_node const *node)
+{
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ITEN3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ITEN1 +
+ (node->cfg_off * sizeof(u32));
+
+ /* Make sure we don't have a pending IRQ */
+ avi_ack_node_irq(node);
+
+ AVI_WRITE(node->cfg_mask | AVI_READ(addr), addr);
+
+ avi_apply_node(&avi_cfg);
+}
+EXPORT_SYMBOL(avi_enable_node_irq);
+
+void avi_disable_node_irq(struct avi_node const *node)
+{
+ unsigned long addr;
+
+ /* ISP modules do not follow the same register map scheme. */
+ if (node->cfg_off == 3)
+ addr = avi_ctrl.base + AVI_CFG_ISP_ITEN3;
+ else
+ addr = avi_ctrl.base + AVI_CFG_ITEN1 +
+ (node->cfg_off * sizeof(u32));
+
+ AVI_WRITE(~node->cfg_mask & AVI_READ(addr), addr);
+
+ avi_apply_node(&avi_cfg);
+}
+EXPORT_SYMBOL(avi_disable_node_irq);
+
+/***************************************************
+ * Module initialization / shutdown implementation.
+ ***************************************************/
+
+static inline void memzero_regs(unsigned long base, size_t s)
+{
+ volatile u32 __iomem *reg = (u32 *)base;
+
+ while (s--)
+ AVI_WRITE(0, reg++);
+
+#ifdef AVI_BACKWARD_COMPAT
+ /* Because in P7R3 the ISP interconnect has been changed to 'bypass'
+ * the compatibility layer will set the P7R2 ISP interconnect to some
+ * values, so we override it here */
+ if (avi_ctrl.revision == AVI_REVISION_2) {
+ /* ISP Inter 0 */
+ writel(0, base + 0x200);
+ writel(0, base + 0x204);
+ /* ISP Inter 1 */
+ writel(0, base + 0x300);
+ writel(0, base + 0x304);
+ }
+#endif /* AVI_BACKWARD_COMPAT */
+
+}
+
+/*
+ * This initialization sequence makes sure the AVI is in a correct state
+ * before we start anything. After a reset, the registers are left in a
+ * non-deterministic state.
+ */
+int avi_dereset(void)
+{
+ union avi_cfg_dmacfg dmacfg;
+ int ret;
+
+ ret = clk_prepare_enable(avi_ctrl.clock);
+ if (ret)
+ return ret;
+
+ /* Initialize all the registers to 0. */
+ memzero_regs(avi_ctrl.base,
+ resource_size(avi_ctrl.iomem) / sizeof(u32));
+
+ /* Enable all AVI nodes */
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_ENABLE1);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_ENABLE2);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_ISP_ENABLE3);
+
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_APPLY1);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_APPLY2);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_ISP_APPLY3);
+
+ /* Disable all AVI nodes */
+ AVI_WRITE(0, avi_ctrl.base + AVI_CFG_ENABLE1);
+ AVI_WRITE(0, avi_ctrl.base + AVI_CFG_ENABLE2);
+ AVI_WRITE(0, avi_ctrl.base + AVI_CFG_ISP_ENABLE3);
+
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_APPLY1);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_APPLY2);
+ AVI_WRITE(0xffffffff, avi_ctrl.base + AVI_CFG_ISP_APPLY3);
+
+ /* DMA configuration. Those are arbitrary values known to work; they will
+ * probably need some adjusting after the ASIC make performance measurements
+ * for the AVI (and maybe depending on the configuration and the stress put
+ * on the AXI) */
+ dmacfg.dma_flag_timeout = 0;
+ dmacfg.dma_flag_number = 0;
+ dmacfg.dma_max_burst_number = 3;
+ dmacfg.dma_max_bandwidth = 0x0f;
+ AVI_WRITE(dmacfg._register, avi_ctrl.base + AVI_CFG_DMACFG);
+
+ /* Scaler/Rotator memory reservation
+ * Reserve memory for each scaler
+ */
+ /* Reserve SCAL0 memory slot */
+ AVI_WRITE(0x1, avi_ctrl.base + AVI_ISP_SCAL_ROT_SCAL0_RESERVE);
+ /* Reserve SCAL1 memory slot */
+ AVI_WRITE(0x1, avi_ctrl.base + AVI_ISP_SCAL_ROT_SCAL1_RESERVE);
+
+ /* Apply configuration, enable configuration and interconnect nodes */
+ avi_enable_node(&avi_inter);
+ avi_enable_node(&avi_cfg);
+ avi_apply_node(&avi_inter);
+ avi_apply_node(&avi_cfg);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_dereset);
+
+void avi_reset(void)
+{
+ clk_disable_unprepare(avi_ctrl.clock);
+}
+EXPORT_SYMBOL(avi_reset);
+
+static int __devinit avi_probe(struct platform_device* pdev)
+{
+ struct resource* res;
+ const struct avi_platform_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data found\n");
+ ret = -ENODATA;
+ goto no_pdata;
+ }
+
+ avi_ctrl.dev = &pdev->dev;
+ avi_ctrl.revision = pdata->revision;
+ dev_info(&pdev->dev, "Found AVI revision %d", avi_get_revision());
+
+ avi_ctrl.iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!avi_ctrl.iomem) {
+ dev_err(&pdev->dev, "failed to get I/O registers memory\n");
+ ret = -ENXIO;
+ goto no_iomem;
+ }
+
+ avi_ctrl.clock = clk_get(&pdev->dev, "ctrl");
+ if (IS_ERR(avi_ctrl.clock)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(avi_ctrl.clock);
+ goto no_clk;
+ }
+
+ res = request_mem_region(avi_ctrl.iomem->start,
+ resource_size(avi_ctrl.iomem),
+ pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O register memory\n");
+ ret = -EBUSY;
+ goto request_mem_failed;
+ }
+
+ avi_log_init();
+
+ avi_ctrl.irq_base = pdata->irq_base;
+
+ avi_ctrl.base = (unsigned long) ioremap(res->start, resource_size(res));
+ if (! avi_ctrl.base) {
+ dev_err(&pdev->dev, "failed to remap I/O registers memory\n");
+ ret = -ENOMEM;
+ goto remap_failed;
+ }
+
+ ret = avi_dereset();
+ if (ret)
+ goto dereset_failed;
+
+ avi_lcd0.pixclock = clk_get(&pdev->dev, "lcd0");
+ if (IS_ERR(avi_lcd0.pixclock)) {
+ dev_err(&pdev->dev, "no lcd0 clk\n");
+ ret = PTR_ERR(avi_lcd0.pixclock);
+ goto no_lcd0_clk;
+ }
+
+ avi_lcd1.pixclock = clk_get(&pdev->dev, "lcd1");
+ if (IS_ERR(avi_lcd1.pixclock)) {
+ dev_err(&pdev->dev, "no lcd1 clk\n");
+ ret = PTR_ERR(avi_lcd1.pixclock);
+ goto no_lcd1_clk;
+ }
+
+ dev_info(&pdev->dev, "AVI core driver initialized\n");
+
+ return 0;
+
+ no_lcd1_clk:
+ clk_put(avi_lcd0.pixclock);
+ no_lcd0_clk:
+ avi_reset();
+ dereset_failed:
+ iounmap((void *)avi_ctrl.base);
+ remap_failed:
+ release_mem_region(avi_ctrl.iomem->start, resource_size(avi_ctrl.iomem));
+ request_mem_failed:
+ clk_put(avi_ctrl.clock);
+ no_clk:
+ no_iomem:
+ no_pdata:
+ /* Set the AVI base to 0 to indicate to the subdrivers that the avi
+ * didn't probe correctly */
+ avi_ctrl.base = 0;
+ return ret;
+}
+
+static int __devexit avi_remove(struct platform_device* pdev)
+{
+ clk_put(avi_lcd0.pixclock);
+ clk_put(avi_lcd1.pixclock);
+
+ avi_reset();
+
+ iounmap((void *)avi_ctrl.base);
+ release_mem_region(avi_ctrl.iomem->start, resource_size(avi_ctrl.iomem));
+ clk_put(avi_ctrl.clock);
+
+ return 0;
+}
+
+static int avi_suspend(struct device *dev)
+{
+ dev_info(dev, "AVI shutting down\n");
+
+ avi_reset();
+
+ return 0;
+}
+
+/* This function is only meant to be called from the core so I don't want to
+ * declare it in the public headers. */
+extern void avi_segment_resume(void);
+
+static int avi_resume(struct device *dev)
+{
+ dev_info(dev, "AVI resuming\n");
+
+ avi_dereset();
+
+ avi_segment_resume();
+
+ return 0;
+}
+
+static struct dev_pm_ops avi_dev_pm_ops = {
+ /* I use the late/early callbacks to make sure the AVI functions are
+ * sequenced correctly with regard to the AVI subdrivers (fb, cam...) */
+ .suspend_late = &avi_suspend,
+ .resume_early = &avi_resume,
+};
+
+static struct platform_driver avi_driver = {
+ .driver = {
+ .name = "avi",
+ .owner = THIS_MODULE,
+ .pm = &avi_dev_pm_ops,
+ },
+ .probe = &avi_probe,
+ .remove = __devexit_p(&avi_remove),
+};
+
+static int __init avi_init(void)
+{
+ return platform_driver_register(&avi_driver);
+}
+module_init(avi_init);
+
+static void __exit avi_exit(void)
+{
+ platform_driver_unregister(&avi_driver);
+}
+module_exit(avi_exit);
+
+MODULE_AUTHOR("Gregor Boirie <gregor.boirie@parrot.com>");
+MODULE_DESCRIPTION("Parrot Advanced Video Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi.h b/drivers/parrot/video/avi.h
new file mode 100644
index 00000000000000..dca8f0824ba814
--- /dev/null
+++ b/drivers/parrot/video/avi.h
@@ -0,0 +1,481 @@
+/*
+ * linux/drivers/parrot/video/avi.h
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 18-Feb-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_H
+#define _AVI_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <asm/io.h>
+
+#include "reg_avi.h"
+
+/* Only needed if we need to support older revisions of the AVI */
+#define AVI_BACKWARD_COMPAT
+
+/*
+ * Revision of AVI module
+ */
+enum avi_revision {
+ AVI_REVISION_1 = 1,
+ AVI_REVISION_2,
+ AVI_REVISION_3,
+ AVI_REVISION_NR
+};
+
+/*
+ * Kind of AVI module instances
+ */
+enum avi_node_type {
+ AVI_CFG_NODE_TYPE,
+ AVI_INTER_NODE_TYPE,
+ AVI_FIFO_NODE_TYPE,
+ AVI_CONV_NODE_TYPE,
+ AVI_BLEND_NODE_TYPE,
+ AVI_BLENDIN_NODE_TYPE,
+ AVI_LCD_NODE_TYPE,
+ AVI_CAM_NODE_TYPE,
+ AVI_SCALROT_NODE_TYPE,
+ AVI_GAM_NODE_TYPE,
+ AVI_ISP_CHAIN_BAYER_NODE_TYPE,
+ AVI_STATS_BAYER_NODE_TYPE,
+ AVI_STATS_YUV_NODE_TYPE,
+ AVI_ISP_CHAIN_YUV_NODE_TYPE,
+ AVI_NODE_TYPE_NR,
+};
+
+/*
+ * AVI module instances identifiers
+ */
+enum avi_node_id {
+ AVI_FIFO00_NODE,
+ AVI_FIFO01_NODE,
+ AVI_FIFO02_NODE,
+ AVI_FIFO03_NODE,
+ AVI_FIFO04_NODE,
+ AVI_FIFO05_NODE,
+ AVI_FIFO06_NODE,
+ AVI_FIFO07_NODE,
+ AVI_FIFO08_NODE,
+ AVI_FIFO09_NODE,
+ AVI_FIFO10_NODE,
+ AVI_FIFO11_NODE,
+ AVI_CONV0_NODE,
+ AVI_CONV1_NODE,
+ AVI_CONV2_NODE,
+ AVI_CONV3_NODE,
+ AVI_BLEND0_NODE,
+ AVI_BLEND1_NODE,
+ AVI_LCD0_NODE,
+ AVI_LCD1_NODE,
+ AVI_CAM0_NODE,
+ AVI_CAM1_NODE,
+ AVI_CAM2_NODE,
+ AVI_CAM3_NODE,
+ AVI_CAM4_NODE,
+ AVI_CAM5_NODE,
+ AVI_SCAL00_NODE,
+ AVI_SCAL01_NODE,
+ AVI_ROT0_NODE,
+ AVI_SCAL10_NODE,
+ AVI_SCAL11_NODE,
+ AVI_ROT1_NODE,
+ AVI_GAM0_NODE,
+ AVI_GAM1_NODE,
+ AVI_ISP_CHAIN_BAYER0_NODE,
+ AVI_ISP_CHAIN_BAYER1_NODE,
+ AVI_STATS_BAYER0_NODE,
+ AVI_STATS_BAYER1_NODE,
+ AVI_STATS_YUV0_NODE,
+ AVI_STATS_YUV1_NODE,
+ AVI_ISP_CHAIN_YUV0_NODE,
+ AVI_ISP_CHAIN_YUV1_NODE,
+ AVI_INTER_NODE,
+ AVI_CFG_NODE,
+ AVI_NODE_NR,
+};
+
+struct clk;
+struct avi_node;
+
+/**
+ * struct avi_ctrl - AVI controller descriptor.
+ */
+struct avi_controller {
+ unsigned long base;
+ spinlock_t cfg_lck;
+ struct mutex blend_lck;
+ struct task_struct* blend_lck_owner;
+ int blend_lck_depth;
+ struct resource* iomem;
+ struct clk* clock;
+ struct device* dev;
+ struct avi_node* const nodes[AVI_NODE_NR];
+ enum avi_revision revision;
+ unsigned irq_base;
+};
+
+extern struct avi_controller avi_ctrl;
+
+/**************************
+ * AVI nodes declarations.
+ **************************/
+
+/* Value denoting an invalid src_id or sink_id in struct avi_node */
+#define AVI_SRCSINK_NONE 0xff
+
+/**
+ * struct avi_node - AVI module instance descriptor.
+ */
+struct avi_node {
+ u32 const base_off;
+ unsigned char const type;
+ unsigned char const src_id;
+ unsigned char sink_id;
+ unsigned char const cfg_off;
+ unsigned const cfg_mask;
+ unsigned char busy;
+ char const * const name;
+ unsigned irq_offset;
+ unsigned assigned_caps;
+ enum avi_node_id node_id;
+};
+
+static inline enum avi_revision avi_get_revision(void)
+{
+ return avi_ctrl.revision;
+}
+
+extern int avi_dereset(void);
+extern void avi_reset(void);
+
+static inline unsigned int avi_node_type(struct avi_node const* node)
+{
+ return (unsigned int) node->type;
+}
+
+static inline struct avi_node* avi_get_node(unsigned int id)
+{
+ BUG_ON(id >= AVI_NODE_NR);
+
+ return avi_ctrl.nodes[id];
+}
+
+static inline unsigned long avi_node_base(struct avi_node const* node)
+{
+ return avi_ctrl.base + ((unsigned long) node->base_off);
+}
+
+static inline int avi_probed(void)
+{
+ /* If the probe fails avi_ctrl.base is set to 0 to indicate that the
+ * driver is KO.*/
+ return avi_ctrl.base != 0;
+}
+
+static inline unsigned avi_node_irq(struct avi_node const* node)
+{
+ return avi_ctrl.irq_base + node->irq_offset;
+}
+
+extern void memcpy_from_registers(void *, unsigned long, size_t);
+extern void avi_cfg_get_registers(struct avi_cfg_regs *);
+extern void avi_apply_node(struct avi_node const*);
+extern void avi_enable_node(struct avi_node const*);
+extern void avi_disable_node(struct avi_node const*);
+extern void avi_lock_node(struct avi_node const*);
+extern void avi_unlock_node(struct avi_node const*);
+
+/**
+ * enum avi_field - Frame field.
+ */
+enum avi_field {
+ AVI_NONE_FIELD,
+ AVI_TOPBOT_FIELD,
+ AVI_BOTTOP_FIELD,
+ AVI_TOP_FIELD,
+ AVI_BOT_FIELD,
+ AVI_BAD_FIELD,
+ AVI_ANY_FIELD,
+ AVI_ALTERNATE_FIELD,
+};
+
+/*
+ * avi_handle_irq_fn can return a bitxsmask of the following values.
+ */
+/* IRQ handled, do not forward to other clients */
+#define AVI_IRQ_HANDLED (1UL << 0)
+/* The interrupt has been acked at the node level (for instance using
+ * avi_ack_node_irq) by the interrupt callback. There's no need to do it in the
+ * generic code. */
+#define AVI_IRQ_ACKED (1UL << 2)
+
+extern int avi_node_get_irq_flag(struct avi_node const *node);
+extern void avi_ack_node_irq(struct avi_node const *);
+extern void avi_enable_node_irq(struct avi_node const *);
+extern void avi_disable_node_irq(struct avi_node const *);
+
+/**
+ * struct avi_platform_data - AVI plateform specific data.
+ */
+struct avi_platform_data {
+ enum avi_revision revision;
+ unsigned irq_base;
+};
+
+/***********
+ * FIFO API
+ ***********/
+
+extern void avi_fifo_get_registers(struct avi_node const*,
+ struct avi_fifo_registers *);
+extern void avi_fifo_set_registers(struct avi_node const*,
+ struct avi_fifo_registers const*);
+
+/* set oneshot ("single") mode in FIFO.CFG */
+extern void avi_fifo_set_oneshot(struct avi_node const *fifo, bool oneshot);
+
+extern void avi_fifo_set_dmasa(struct avi_node const *fifo,
+ u32 dmasa0,
+ u32 dmasa1);
+extern void avi_fifo_set_dmalstr(struct avi_node const *fifo,
+ u32 dmalstr0,
+ u32 dmalstr1);
+
+extern union avi_fifo_status avi_fifo_get_status(struct avi_node const* node);
+
+extern void avi_fifo_force_clear(struct avi_node const *node);
+
+/**
+ * struct avi_blendin_cfg - Blending configuration holder.
+ */
+struct avi_blendin_cfg {
+ u32 offset;
+ u32 alpha;
+};
+
+/* Per pixel transparency using alpha channel */
+#define AVI_ALPHA_OSD (-1)
+/* parameter in percent. 100% is fully opaque. */
+#define AVI_ALPHA(_p) ((_p) * 0xff / 100)
+
+
+extern void avi_blendin_2cfg(struct avi_blendin_cfg*,
+ struct avi_node const*,
+ u16,
+ u16,
+ int);
+
+extern void avi_set_blendin_cfg(struct avi_node const*,
+ struct avi_blendin_cfg const*);
+extern void avi_get_blendin_cfg(struct avi_node const*,
+ struct avi_blendin_cfg*);
+
+extern void avi_setup_blender_input(struct avi_node const*,
+ unsigned,
+ unsigned,
+ unsigned,
+ int);
+
+extern void avi_setup_blendin(struct avi_node const*, u16, u16, int);
+extern void avi_setup_blendin_alpha(struct avi_node const*, int);
+
+static inline void avi_hide_blendin(struct avi_node const *blendin)
+{
+ /* We hide the blendin by sending it completely out of frame */
+ avi_setup_blendin(blendin, 0xffff, 0xffff, 0x0);
+}
+
+extern void avi_setup_blend(struct avi_node const*, u32, u16, u16);
+
+extern u32 avi_blend_get_bg(struct avi_node const *);
+extern void avi_blend_set_bg(struct avi_node const *, u32);
+
+/**********************
+ * Gamma corrector API
+ **********************/
+
+#define AVI_CMAP_SZ 0x400
+
+struct avi_cmap
+{
+ u8 red [AVI_CMAP_SZ];
+ u8 green[AVI_CMAP_SZ];
+ u8 blue [AVI_CMAP_SZ];
+};
+
+extern void avi_gam_setup(struct avi_node const*, int, int, int);
+extern u32 avi_gam_get_config(struct avi_node const*);
+extern void avi_gam_set_entry(struct avi_node const*, u16, u8, u8, u8);
+extern void avi_gam_set_cmap(struct avi_node const*, struct avi_cmap const *);
+extern void avi_gam_get_cmap(struct avi_node const*, struct avi_cmap*);
+
+/*********************
+ * LCD controller API
+ *********************/
+
+/* Inspired by fb_videomode but duplicated here to avoid a dependency on
+ * fbdev */
+
+#define AVI_VIDEOMODE_INTERLACED (1 << 0)
+#define AVI_VIDEOMODE_HSYNC_ACTIVE_HIGH (1 << 4)
+#define AVI_VIDEOMODE_VSYNC_ACTIVE_HIGH (1 << 5)
+
+#define AVI_VIDEOMODE_SYNC_ACTIVE_HIGH \
+ (AVI_VIDEOMODE_HSYNC_ACTIVE_HIGH | \
+ AVI_VIDEOMODE_VSYNC_ACTIVE_HIGH)
+
+struct avi_videomode {
+ const char *name;
+ u32 xres;
+ u32 yres;
+ /* pixclock in kHz *unlike* fb_videomode where it's in ps. */
+ u32 pixclock;
+ u32 left_margin;
+ u32 right_margin;
+ u32 upper_margin;
+ u32 lower_margin;
+ u32 hsync_len;
+ u32 vsync_len;
+ u32 flags;
+};
+
+extern void avi_lcd_get_registers(struct avi_node const*,
+ struct avi_lcd_regs *);
+extern void avi_lcd_set_registers(struct avi_node const*,
+ struct avi_lcd_regs const*);
+extern enum avi_field avi_lcd_get_current_field(struct avi_node const*);
+
+extern unsigned avi_lcd_get_colorbar(struct avi_node const*);
+extern void avi_lcd_set_colorbar(struct avi_node const*, unsigned);
+extern unsigned avi_lcd_get_force_clear(struct avi_node const *);
+extern void avi_lcd_set_force_clear(struct avi_node const *, unsigned);
+extern union avi_lcd_status avi_lcd_get_status(struct avi_node const*);
+
+extern u32 avi_lcd_get_dpd(struct avi_node const *);
+extern void avi_lcd_set_dpd(struct avi_node const *, u32);
+
+extern int avi_lcd_pixclock_enable(struct avi_node const *);
+extern int avi_lcd_pixclock_disable(struct avi_node const *);
+extern unsigned long avi_lcd_pixclock_round_rate(struct avi_node const *,
+ unsigned long );
+extern int avi_lcd_pixclock_set_rate(struct avi_node const *,
+ unsigned long);
+extern unsigned long avi_lcd_pixclock_get_rate(struct avi_node const *);
+
+extern void avi_vsync_gen_get_registers(struct avi_node const*,
+ struct avi_lcd_avi_vsync_gen_regs *);
+extern void avi_vsync_gen_set_registers(struct avi_node const*,
+ struct avi_lcd_avi_vsync_gen_regs const*);
+
+extern union avi_cam_status avi_cam_get_status(struct avi_node const*);
+extern void avi_cam_get_measure(struct avi_node const*,
+ struct avi_cam_measure_regs *);
+extern void avi_cam_set_itsource(struct avi_node const*,
+ union avi_cam_itsource);
+extern void avi_cam_set_timings(struct avi_node const*,
+ union avi_cam_h_timing,
+ union avi_cam_v_timing);
+extern void avi_cam_set_run(struct avi_node const*, union avi_cam_run);
+extern void avi_cam_run(struct avi_node const*);
+extern void avi_cam_freerun(struct avi_node const*);
+
+extern void avi_cam_set_registers(struct avi_node const*,
+ struct avi_cam_registers const*);
+extern void avi_cam_get_registers(struct avi_node const *,
+ struct avi_cam_registers *);
+extern void avi_inter_get_registers(u32[AVI_INTER_NSINK]);
+
+/**
+ * Rotator
+ */
+enum avi_rotation
+{
+ AVI_ROTATION_0,
+ AVI_ROTATION_90,
+ AVI_ROTATION_180,
+ AVI_ROTATION_270,
+ AVI_ROTATION_NR
+};
+
+extern void avi_rotator_setup(struct avi_node const*,
+ enum avi_rotation,
+ bool,
+ unsigned height_out,
+ unsigned nstrips);
+
+/**
+ * struct avi_rect - AVI frame area / rectangle descriptor.
+ * @top: position / offset from top of containing frame in number of pixels
+ * @left: position / offset from left of containing frame in number of pixels
+ * @width: rectangle width in pixels
+ * @height: rectangle height in pixels
+ */
+struct avi_rect {
+ u16 top;
+ u16 left;
+ u16 width;
+ u16 height;
+};
+
+/**********
+ * ISP API
+ **********/
+/* Inter */
+extern void avi_isp_chain_bayer_inter_set_registers(
+ struct avi_node const *,
+ struct avi_isp_chain_bayer_inter_regs const *);
+
+extern void avi_isp_chain_bayer_inter_get_registers(
+ struct avi_node const *,
+ struct avi_isp_chain_bayer_inter_regs *);
+
+extern void avi_isp_chain_yuv_inter_set_registers(
+ struct avi_node const *,
+ struct avi_isp_chain_yuv_inter_regs const *);
+
+extern void avi_isp_chain_yuv_inter_get_registers(
+ struct avi_node const *,
+ struct avi_isp_chain_yuv_inter_regs *);
+
+/* Bayer */
+extern void avi_isp_bayer_set_registers(struct avi_node const *,
+ struct avi_isp_bayer_regs const *);
+extern void avi_isp_bayer_get_registers(struct avi_node const *,
+ struct avi_isp_bayer_regs *);
+
+/* Stats */
+extern void avi_isp_statistics_bayer_set_registers(
+ struct avi_node *,
+ const struct avi_isp_statistics_bayer_regs *);
+
+extern void avi_isp_statistics_bayer_get_registers(
+ struct avi_node *,
+ struct avi_isp_statistics_bayer_regs *);
+
+/* Pedestal */
+extern void avi_isp_pedestal_get_registers(struct avi_node *chain_bayer,
+ struct avi_isp_pedestal_regs *regs);
+
+#endif
diff --git a/drivers/parrot/video/avi_cap.c b/drivers/parrot/video/avi_cap.c
new file mode 100644
index 00000000000000..77f3e121bc7390
--- /dev/null
+++ b/drivers/parrot/video/avi_cap.c
@@ -0,0 +1,254 @@
+#include <linux/export.h>
+#include <linux/seq_file.h>
+#include "avi_compat.h"
+#include "avi_cap.h"
+
+/*******************************************************************************
+ * Helpers
+ *******************************************************************************/
+
+/* Check that caps does not contain more than one capability set in exclusive.
+ *
+ * Returns 1 if there's a conflict, 0 otherwise.
+ */
+static inline int avi_cap_conflicts(unsigned long caps,
+ unsigned long exclusive)
+{
+ unsigned long c = caps & exclusive;
+
+ /* Here we have a conflict if more than one bit of c is set. That means
+ * the Hamming weight of c must be <= 1 but we can do simpler: if only
+ * one bit is set then c is a power of two and then we can use the
+ * simple (c & (c - 1)) == 0 test to check that. This power of two test
+ * has one false positive when c == 0, but since 0 is also acceptable in
+ * this case (not a conflict) we're all set */
+ return (c & (c - 1)) != 0;
+}
+
+/*******************************************************************************
+ * Getters
+ *******************************************************************************/
+
+/* Iterate over an inclusive range of node and return the first unused node.
+ *
+ * If end < start we iterate backwards.
+ *
+ * If no available node is found NULL is returned.
+ *
+ * Needs to be called with avi_ctrl.cfg_lck held
+ */
+struct avi_node *avi_cap_find_node(enum avi_node_id start,
+ enum avi_node_id end)
+{
+ int incr = (end >= start) ? 1 : -1;
+ int i = start;
+
+ for (;;) {
+ struct avi_node *node = avi_get_node(i);
+
+ if (!node->busy)
+ return node;
+
+ if (i == end)
+ /* No node found */
+ return NULL;
+
+ i += incr;
+ }
+}
+
+inline static struct avi_node * avi_cap_find_single(enum avi_node_id id)
+{
+ return avi_cap_find_node(id,id);
+}
+
+/* Return a scaler with both entry available or NULL is no scaler found
+*/
+static struct avi_node * avi_cap_get_scaler(void)
+{
+ struct avi_node *scal, *scal_planar;
+ enum avi_node_id id = AVI_SCAL00_NODE;
+
+ while (id <= AVI_SCAL10_NODE) {
+ scal = avi_get_node(id);
+ scal_planar = avi_get_node(id+1);
+
+ if ((scal->busy) || (scal_planar->busy))
+ id += (AVI_SCAL10_NODE-AVI_SCAL00_NODE);
+ else
+ return scal;
+ }
+ return NULL;
+}
+
+/* FIFO */
+
+#define FIFO_NR (AVI_FIFO11_NODE - AVI_FIFO00_NODE + 1)
+
+#define FIFO_00 (1 << 0)
+#define FIFO_01 (1 << 1)
+#define FIFO_02 (1 << 2)
+#define FIFO_03 (1 << 3)
+#define FIFO_04 (1 << 4)
+#define FIFO_05 (1 << 5)
+#define FIFO_06 (1 << 6)
+#define FIFO_07 (1 << 7)
+#define FIFO_08 (1 << 8)
+#define FIFO_09 (1 << 9)
+#define FIFO_10 (1 << 10)
+#define FIFO_11 (1 << 11)
+
+/* P7 FIFOs have different capabilities with chip revision. */
+
+#define AVI_FIFO_CAPS_VL (FIFO_00 | FIFO_01 | FIFO_02 | FIFO_03 | \
+ FIFO_04 | FIFO_05 | FIFO_06 | FIFO_07 | \
+ FIFO_08 | FIFO_09 | FIFO_10 | FIFO_11)
+
+#define AVI_FIFO_CAPS_IN (FIFO_00 | FIFO_01 | FIFO_02 | FIFO_03 | \
+ FIFO_04 | FIFO_05 | FIFO_06 | FIFO_07 | \
+ FIFO_08)
+
+#define AVI_FIFO_CAPS_OUT_R1 (FIFO_00 | FIFO_01 | FIFO_02 | FIFO_03 | \
+ FIFO_04 | FIFO_09)
+
+#define AVI_FIFO_CAPS_OUT_R2 (AVI_FIFO_CAPS_OUT_R1 | (FIFO_10))
+
+#define AVI_FIFO_CAPS_OUT_R3 (AVI_FIFO_CAPS_OUT_R2 | (FIFO_05 | FIFO_11))
+
+/* Search for an available FIFO with specified capacity.
+ *
+ * The exclusion list allow to find several different FIFO before using them.
+ *
+ * Search is from top to bottom because :
+ * - We should use in priority FIFO with less features
+ * - Bottom FIFO 0 and 1 have 8ko instead of 4ko, we may want to request
+ * them appart.
+ */
+static struct avi_node * avi_cap_get_fifo(unsigned long caps)
+{
+ int i;
+ struct avi_node *node;
+
+ /* Starting from top, check adapted FIFO busy state */
+ for ( i = (FIFO_NR -1); i >= 0; i--) {
+ if (!((1 << i) & caps))
+ continue;
+
+ node = avi_get_node(AVI_FIFO00_NODE + i);
+ if (!node->busy)
+ return node;
+ }
+ return NULL;
+}
+
+/* Return the node of a FIFO able to perform DMA out
+ * NULL is returned if no FIFO found
+ */
+#ifdef AVI_BACKWARD_COMPAT
+static struct avi_node * avi_cap_get_dmaout(void)
+{
+
+ switch(avi_get_revision()) {
+ case AVI_REVISION_1:
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_OUT_R1);
+ case AVI_REVISION_2:
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_OUT_R2);
+ case AVI_REVISION_3:
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_OUT_R3);
+ default:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+#else /* AVI_BACKWARD_COMPAT */
+static inline struct avi_node * avi_cap_get_dmaout(void)
+{
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_OUT_R3);
+}
+#endif /* AVI_BACKWARD_COMPAT */
+
+/*******************************************************************************
+ * API
+ *******************************************************************************/
+
+int avi_cap_check(unsigned long caps)
+{
+ /* A segment can have only one input */
+ if (avi_cap_conflicts(caps, AVI_CAPS_CAM_ALL | AVI_CAP_DMA_IN))
+ return -EINVAL;
+
+ /* and one output */
+ if (avi_cap_conflicts(caps, AVI_CAPS_LCD_ALL | AVI_CAP_DMA_OUT))
+ return -EINVAL;
+
+ /* planar only makes sense for DMA input and scaler */
+ if ((caps & AVI_CAP_PLANAR) &&
+ (!(caps & AVI_CAP_DMA_IN) && !(caps & AVI_CAP_SCAL)))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_cap_check);
+
+/* Returns a non-busy node with given capability.
+ * Returns NULL if there is no node available with given capability, if many
+ * capabilities are specified or for unknown capabilities.
+ *
+ *
+ */
+struct avi_node * avi_cap_get_node(unsigned long cap)
+{
+ switch(cap) {
+ case AVI_CAP_BUFFER:
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_VL);
+ case AVI_CAP_PLANAR:
+ case AVI_CAP_DMA_IN:
+ return avi_cap_get_fifo(AVI_FIFO_CAPS_IN);
+ case AVI_CAP_DMA_OUT:
+ return avi_cap_get_dmaout();
+ case AVI_CAP_SCAL:
+ return avi_cap_get_scaler();
+ case AVI_CAP_CONV:
+ return avi_cap_find_node(AVI_CONV0_NODE,AVI_CONV3_NODE);
+ case AVI_CAP_GAM:
+ return avi_cap_find_node(AVI_GAM0_NODE,AVI_GAM1_NODE);
+ case AVI_CAP_CAM_0:
+ return avi_cap_find_single(AVI_CAM0_NODE);
+ case AVI_CAP_CAM_1:
+ return avi_cap_find_single(AVI_CAM1_NODE);
+ case AVI_CAP_CAM_2:
+ return avi_cap_find_single(AVI_CAM2_NODE);
+ case AVI_CAP_CAM_3:
+ return avi_cap_find_single(AVI_CAM3_NODE);
+ case AVI_CAP_CAM_4:
+ return avi_cap_find_single(AVI_CAM4_NODE);
+ case AVI_CAP_CAM_5:
+ return avi_cap_find_single(AVI_CAM5_NODE);
+ case AVI_CAP_LCD_0:
+ return avi_cap_find_single(AVI_LCD0_NODE);
+ case AVI_CAP_LCD_1:
+ return avi_cap_find_single(AVI_LCD1_NODE);
+ case AVI_CAP_ISP_CHAIN_BAYER:
+ return avi_cap_find_node(AVI_ISP_CHAIN_BAYER0_NODE,
+ AVI_ISP_CHAIN_BAYER1_NODE);
+ case AVI_CAP_STATS_YUV:
+ return avi_cap_find_node(AVI_STATS_YUV0_NODE,
+ AVI_STATS_YUV1_NODE);
+ case AVI_CAP_ISP_CHAIN_YUV:
+ return avi_cap_find_node(AVI_ISP_CHAIN_YUV0_NODE,
+ AVI_ISP_CHAIN_YUV1_NODE);
+ case AVI_CAP_STATS_BAYER_0:
+ return avi_cap_find_single(AVI_STATS_BAYER0_NODE);
+ case AVI_CAP_STATS_BAYER_1:
+ return avi_cap_find_single(AVI_STATS_BAYER1_NODE);
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(avi_cap_get_node);
+
+inline struct avi_node *avi_cap_get_blender(void)
+{
+ return avi_cap_find_node(AVI_BLEND0_NODE,AVI_BLEND1_NODE);
+}
+EXPORT_SYMBOL(avi_cap_get_blender);
diff --git a/drivers/parrot/video/avi_cap.h b/drivers/parrot/video/avi_cap.h
new file mode 100644
index 00000000000000..10c613d95d9be5
--- /dev/null
+++ b/drivers/parrot/video/avi_cap.h
@@ -0,0 +1,77 @@
+#ifndef _AVI_CAP_H_
+#define _AVI_CAP_H_
+
+/*
+ * Capabilities descriptions. Each segment has a set of capabilities.
+ *
+ * Warning: if you add/delete/modify these entries be sure to update
+ * avi_caps_to_string
+ */
+#define AVI_CAP_DMA_IN (1UL << 0)
+#define AVI_CAP_DMA_OUT (1UL << 1)
+#define AVI_CAP_BUFFER (1UL << 2) /* FIFO Buffer after a scaler */
+#define AVI_CAP_CONV (1UL << 3)
+#define AVI_CAP_GAM (1UL << 4)
+#define AVI_CAP_SCAL (1UL << 5)
+#define AVI_CAP_ROT (1UL << 6)
+#define AVI_CAP_PLANAR (1UL << 7)
+/* Do not use these capabilities directly, use AVI_CAPS_ISP instead, as it does
+ * not make sense to take only a part of the ISP */
+#define AVI_CAP_ISP_CHAIN_BAYER (1UL << 8)
+#define AVI_CAP_STATS_YUV (1UL << 9)
+#define AVI_CAP_ISP_CHAIN_YUV (1UL << 10)
+
+#define AVI_CAP_STATS_BAYER_0 (1UL << 11)
+#define AVI_CAP_STATS_BAYER_1 (1UL << 12)
+
+// XXX add the remaining capabilities
+/* We need one capability for each camera and LCD to be able to pick one
+ * explicitely (it doesn't make sense to dynamically allocate them) */
+#define AVI_CAP_CAM_0 (1UL << 24)
+#define AVI_CAP_CAM_1 (1UL << 25)
+#define AVI_CAP_CAM_2 (1UL << 26)
+#define AVI_CAP_CAM_3 (1UL << 27)
+#define AVI_CAP_CAM_4 (1UL << 28)
+#define AVI_CAP_CAM_5 (1UL << 29)
+
+#define AVI_CAP_LCD_0 (1UL << 30)
+#define AVI_CAP_LCD_1 (1UL << 31)
+
+/* It does not make sense to take only a part of the ISP-chain */
+#define AVI_CAPS_ISP (AVI_CAP_ISP_CHAIN_BAYER | \
+ AVI_CAP_GAM | \
+ AVI_CAP_CONV | \
+ AVI_CAP_STATS_YUV | \
+ AVI_CAP_ISP_CHAIN_YUV )
+
+#define AVI_CAPS_CAM_ALL (AVI_CAP_CAM_0 | \
+ AVI_CAP_CAM_1 | \
+ AVI_CAP_CAM_2 | \
+ AVI_CAP_CAM_3 | \
+ AVI_CAP_CAM_4 | \
+ AVI_CAP_CAM_5 )
+
+#define AVI_CAPS_LCD_ALL (AVI_CAP_LCD_0 | AVI_CAP_LCD_1)
+
+/* Check that caps does not contain more than one capability set in exclusive.
+ *
+ * Returns 1 if there's a conflict, 0 otherwise.
+ */
+extern int avi_cap_check(unsigned long caps);
+
+/* Get a non-busy node with given capability
+ *
+ * returns NULL if there no node is found or if more than one cap is requested
+ */
+extern struct avi_node *avi_cap_get_node(unsigned long cap);
+
+/* Get a non-busy blender node.
+ *
+ * There is a separate function for blenders and capabilities as no capability
+ * should be associated with a blender.
+ *
+ * returns NULL if there no node is found or if more than one cap is requested
+ */
+extern struct avi_node *avi_cap_get_blender(void);
+
+#endif /* _AVI_CAP_H_ */
diff --git a/drivers/parrot/video/avi_chroma.c b/drivers/parrot/video/avi_chroma.c
new file mode 100644
index 00000000000000..b9c096204f1d1b
--- /dev/null
+++ b/drivers/parrot/video/avi_chroma.c
@@ -0,0 +1,508 @@
+#include <linux/module.h>
+#include "avi_chroma.h"
+
+/************************************
+ * Chroma converters low-level setup
+ ************************************/
+
+/* The Q311 macro converts from a float to a fixed point 3.11 2's complement
+ * integer.
+ *
+ * Q311(1.0) => 0x0800
+ * Q311(0.0) => 0x0000
+ * Q311(1.42) => 0x0b5c
+ * Q311(-3.3) => 0x259a
+ */
+#define ABS(i) ((i) < 0 ? -(i) : (i))
+#define COMPLEMENT_2(i, r) (((i) >= 0) ? (r) : (~(r) + 1) & 0x3fff)
+#define Q311(i) (COMPLEMENT_2(i, (unsigned)(((ABS(i)) * (1 << 11)) + 0.5)))
+
+/*
+ * Chroma converter parameters to convert input stream from a given
+ * color space to another.
+ * See http://www.fourcc.org/fccyvrgb.php
+ */
+
+#define AVI_CONV_MATRIX(_c00, _c01, _c02, \
+ _c10, _c11, _c12, \
+ _c20, _c21, _c22) \
+ .coeff_01_00 = {{ .coeff_00 = Q311(_c00), .coeff_01 = Q311(_c01) }}, \
+ .coeff_10_02 = {{ .coeff_02 = Q311(_c02), .coeff_10 = Q311(_c10) }}, \
+ .coeff_12_11 = {{ .coeff_11 = Q311(_c11), .coeff_12 = Q311(_c12) }}, \
+ .coeff_21_20 = {{ .coeff_20 = Q311(_c20), .coeff_21 = Q311(_c21) }}, \
+ .coeff_22 = {{ .coeff_22 = Q311(_c22) }}
+
+#define AVI_CONV_OFFSETS(_ryin, _ryout, \
+ _guin, _guout, \
+ _bvin, _bvout) \
+ .offset_ry = {{ .offset_in = _ryin, .offset_out = _ryout }}, \
+ .offset_gu = {{ .offset_in = _guin, .offset_out = _guout }}, \
+ .offset_bv = {{ .offset_in = _bvin, .offset_out = _bvout }}
+
+#define AVI_CONV_CLIPS(_rymin, _rymax, \
+ _gumin, _gumax, \
+ _bvmin, _bvmax) \
+ .clip_ry = {{ .clip_min = _rymin, .clip_max = _rymax }}, \
+ .clip_gu = {{ .clip_min = _gumin, .clip_max = _gumax }}, \
+ .clip_bv = {{ .clip_min = _bvmin, .clip_max = _bvmax }}
+
+/* Identity config: no conversion */
+#define AVI_CONV_CONF_ID \
+ AVI_CONV_MATRIX(1, 0, 0, \
+ 0, 1, 0, \
+ 0, 0, 1), \
+ AVI_CONV_OFFSETS(0, 0, 0, 0, 0, 0), \
+ AVI_CONV_CLIPS(0, 0xff, 0, 0xff, 0, 0xff)
+
+struct avi_isp_chroma_regs const avi_conv_cfg[AVI_CSPACE_NR][AVI_CSPACE_NR] = {
+ /*
+ * No / transparent converter
+ * digital RGB[0-255] To digital RGB[0-255]
+ */
+ [AVI_NULL_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_JFIF_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ },
+ /*
+ * BT.601 (usually SDTV) to digital RGB
+ * Y[16-235]/CbCr[16-240] To digital RGB[0-255]
+ * R' = 1.164(Y-16) + 1.596(Cr-128)
+ * G' = 1.164(Y-16) - 0.391(Cb-128) - 0.813(Cr-128)
+ * B' = 1.164(Y-16) + 2.018(Cb-128)
+ */
+ [AVI_BT601_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_MATRIX(1.164, 0.0 , 1.596,
+ 1.164, -0.391, -0.813,
+ 1.164, 2.018, 0.0),
+ AVI_CONV_OFFSETS(16 , 0,
+ 128, 0,
+ 128, 0),
+ AVI_CONV_CLIPS(0, 0xff,
+ 0, 0xff,
+ 0, 0xff),
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_MATRIX(1.146, 0.015, 0.003,
+ 0.008, 1.153, 0.003,
+ 0.008, 0.015, 1.142),
+ AVI_CONV_OFFSETS(16 , 16 ,
+ 128, 128,
+ 128, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 16, 240,
+ 16, 240),
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_MATRIX(1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1),
+ AVI_CONV_OFFSETS(0 , 0,
+ 0, 0,
+ 0, 0),
+ AVI_CONV_CLIPS(0, 0xff,
+ 0, 0,
+ 0, 0),
+ },
+ },
+ /*
+ * BT.709 (usually HDTV) to digital RGB
+ * Y[16-235]/CbCr[16-240] To digital RGB[0-255]
+ * R' = 1.164(Y-16) + 1.793(Cr-128)
+ * G' = 1.164(Y-16) - 0.213(Cb-128) - 0.534(Cr-128)
+ * B' = 1.164(Y-16) + 2.115(Cb-128)
+ */
+ [AVI_BT709_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_MATRIX(1.0, 0.0 , 1.402,
+ 1.0, -0.344, -0.714,
+ 1.0, 1.772, 0.0),
+ AVI_CONV_OFFSETS(16 , 0,
+ 128, 0,
+ 128, 0),
+ AVI_CONV_CLIPS(0, 0xff,
+ 0, 0xff,
+ 0, 0xff),
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_MATRIX( 0.873, -0.012, -0.002,
+ -0.006, 0.867, -0.002,
+ -0.006, -0.012, 0.876),
+ AVI_CONV_OFFSETS(16 , 16 ,
+ 128, 128,
+ 128, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 16, 240,
+ 16, 240),
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_MATRIX( 0.873, -0.012, -0.002,
+ -0.006, 0.867, -0.002,
+ -0.006, -0.012, 0.876),
+ AVI_CONV_OFFSETS(16 , 16 ,
+ 128, 128,
+ 128, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 0, 0,
+ 0, 0),
+ },
+ },
+ [AVI_RGB_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_MATRIX( 0.257, 0.504, 0.098,
+ -0.148, -0.291, 0.439,
+ 0.439, -0.368, -0.071),
+ AVI_CONV_OFFSETS(0, 16,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 16, 240,
+ 16, 240),
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_MATRIX( 0.213, 0.715, 0.072,
+ -0.100, -0.336, 0.436,
+ 0.615, -0.515, -0.100),
+ AVI_CONV_OFFSETS(0, 16,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 16, 240,
+ 16, 240),
+ },
+ [AVI_JFIF_CSPACE] = {
+ AVI_CONV_MATRIX( 0.299, 0.587, 0.114,
+ -0.1697, -0.3313, 0.5,
+ 0.5, -0.4187, -0.0813),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_MATRIX( 0.257, 0.504, 0.098,
+ -0.148, -0.291, 0.439,
+ 0.439, -0.368, -0.071),
+ AVI_CONV_OFFSETS(0, 16,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(16, 235,
+ 0, 0,
+ 0, 0),
+ },
+ },
+ [AVI_GREY_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0, 0,
+ 1, 0, 0,
+ 1, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 0,
+ 0, 0),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ },
+ [AVI_Y10_CSPACE] = {
+ [AVI_NULL_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ [AVI_RGB_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0.03125, 0,
+ 1, 0.03125, 0,
+ 1, 0.03125, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 0,
+ 0, 0),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_BT601_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0.03125, 0,
+ 0, 0, 0,
+ 0, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_BT709_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0.03125, 0,
+ 0, 0, 0,
+ 0, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 128,
+ 0, 128),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_GREY_CSPACE] = {
+ AVI_CONV_MATRIX( 1, 0.03125, 0,
+ 0, 0, 0,
+ 0, 0, 0),
+ AVI_CONV_OFFSETS(0, 0,
+ 0, 0,
+ 0, 0),
+ AVI_CONV_CLIPS(0, 255,
+ 0, 255,
+ 0, 255),
+ },
+ [AVI_Y10_CSPACE] = {
+ AVI_CONV_CONF_ID,
+ },
+ },
+};
+EXPORT_SYMBOL(avi_conv_cfg);
+
+/***********************
+ * Color space handling
+ ***********************/
+
+static inline void memcpy_to_registers(unsigned long addr,
+ const void *reg_base,
+ size_t s)
+{
+ const u32 *reg = reg_base;
+ unsigned i;
+
+ BUG_ON(s % sizeof(u32) != 0);
+ s /= sizeof(u32); /* we write one register at a time */
+
+ for (i = 0; i < s; i++)
+ AVI_WRITE(reg[i], addr + i * sizeof(u32));
+ wmb();
+}
+
+static void avi_setup_conv(struct avi_node const* conv,
+ struct avi_isp_chroma_regs const* regs)
+{
+ unsigned long base = avi_node_base(conv);
+
+ BUG_ON(avi_node_type(conv) != AVI_CONV_NODE_TYPE);
+
+ memcpy_to_registers(base, regs, sizeof(*regs));
+}
+
+static const struct avi_isp_chroma_regs *
+avi_conv_get_matrix(enum avi_colorspace from,
+ enum avi_colorspace to)
+{
+ const struct avi_isp_chroma_regs *matrix;
+
+ BUG_ON(from >= AVI_CSPACE_NR);
+ BUG_ON(to >= AVI_CSPACE_NR);
+
+ matrix = &avi_conv_cfg[from][to];
+
+ // Check if the conversion matrix is initialized.
+ BUG_ON(matrix->coeff_01_00.coeff_00 == 0);
+
+ return matrix;
+}
+
+void avi_setup_conv_colorspace(struct avi_node const *conv,
+ enum avi_colorspace from,
+ enum avi_colorspace to)
+{
+ avi_setup_conv(conv, avi_conv_get_matrix(from, to));
+}
+EXPORT_SYMBOL(avi_setup_conv_colorspace);
+
+void avi_conv_get_registers(struct avi_node const* conv,
+ struct avi_isp_chroma_regs* regs)
+{
+ memcpy_from_registers(regs, avi_node_base(conv), sizeof(*regs));
+}
+EXPORT_SYMBOL(avi_conv_get_registers);
+
+/* Convert Q3.11 into Q21.11 to use the native two complement's
+ representation. Without that arithmetics on negative Q3.11 values yield
+ incorrect results. */
+static s32 avi_q311_to_q2111(u32 q)
+{
+ /* Check if the sign bit is set, and if so extend it to 32bits */
+ if (q & (1 << 13)) {
+ q |= 0xffffc000;
+ }
+
+ /* We can now cast it as a native s32 since the two's complement
+ representation is correct. */
+ return (s32)q;
+}
+
+/* Compute the dot product of two vectors, the 2nd being expressed as
+ Q3.11 fixed point. The result is Q22.11 (really Q6.11 with sign
+ extension, makes sense right?) */
+static s32 avi_q311_dot_product(s32 r, s32 g, s32 b,
+ u32 qr, u32 qg, u32 qb)
+{
+ s32 fr = avi_q311_to_q2111(qr);
+ s32 fg = avi_q311_to_q2111(qg);
+ s32 fb = avi_q311_to_q2111(qb);
+
+ return r * fr + g * fg + b * fb;
+}
+
+/* Round Q3.11 integer representation to the nearest value. Not sure
+ if that works properly for negative values but those will get
+ clipped afterwards anyway. */
+static s32 avi_q311_to_s32(s32 q) {
+ return (q + (1 << 10)) >> 11;
+}
+
+static u32 avi_conv_clip(s32 c, u32 clip_min, u32 clip_max)
+{
+ u32 uc = c;
+
+ if (c <= 0) {
+ return clip_min;
+ }
+
+ if (uc < clip_min) {
+ return clip_min;
+ } else if (uc > clip_max) {
+ return clip_max;
+ }
+
+ return uc;
+}
+
+/* Simulate CONV color conversion in software. The only complexity
+ comes from the fact that the CONV factors are stored as Q3.11 which
+ requires a fair bit of fixed point arithmetics. I haven't tested
+ this against the actual C model so I don't garantee bit
+ accuracy. */
+u32 avi_conv_convert_color(enum avi_colorspace from,
+ enum avi_colorspace to,
+ u32 color)
+{
+ const struct avi_isp_chroma_regs *matrix;
+
+ s32 ry = (color >> 16) & 0xff;
+ s32 gu = (color >> 8) & 0xff;
+ s32 bv = (color >> 0) & 0xff;
+
+ s32 o_ry, o_gu, o_bv;
+
+ matrix = avi_conv_get_matrix(from, to);
+
+ /* Substract input offsets */
+ ry -= matrix->offset_ry.offset_in;
+ gu -= matrix->offset_gu.offset_in;
+ bv -= matrix->offset_bv.offset_in;
+
+ /* Handle matrix transformation */
+ o_ry = avi_q311_dot_product(ry,
+ gu,
+ bv,
+ matrix->coeff_01_00.coeff_00,
+ matrix->coeff_01_00.coeff_01,
+ matrix->coeff_10_02.coeff_02);
+
+ o_gu = avi_q311_dot_product(ry,
+ gu,
+ bv,
+ matrix->coeff_10_02.coeff_10,
+ matrix->coeff_12_11.coeff_11,
+ matrix->coeff_12_11.coeff_12);
+
+ o_bv = avi_q311_dot_product(ry,
+ gu,
+ bv,
+ matrix->coeff_21_20.coeff_20,
+ matrix->coeff_21_20.coeff_21,
+ matrix->coeff_22 .coeff_22);
+
+ /* Convert from fixed point to integer by rounding */
+ o_ry = avi_q311_to_s32(o_ry);
+ o_gu = avi_q311_to_s32(o_gu);
+ o_bv = avi_q311_to_s32(o_bv);
+
+ /* Add output offsets */
+ o_ry += matrix->offset_ry.offset_out;
+ o_gu += matrix->offset_gu.offset_out;
+ o_bv += matrix->offset_bv.offset_out;
+
+ /* Clip */
+ o_ry = avi_conv_clip(o_ry,
+ matrix->clip_ry.clip_min,
+ matrix->clip_ry.clip_max);
+ o_gu = avi_conv_clip(o_gu,
+ matrix->clip_gu.clip_min,
+ matrix->clip_gu.clip_max);
+ o_bv = avi_conv_clip(o_bv,
+ matrix->clip_bv.clip_min,
+ matrix->clip_bv.clip_max);
+
+ return (o_ry << 16) | (o_gu << 8) | o_bv;
+}
+EXPORT_SYMBOL(avi_conv_convert_color);
diff --git a/drivers/parrot/video/avi_chroma.h b/drivers/parrot/video/avi_chroma.h
new file mode 100644
index 00000000000000..2c3f568a8f31ce
--- /dev/null
+++ b/drivers/parrot/video/avi_chroma.h
@@ -0,0 +1,60 @@
+#ifndef _AVI_CHROMA_H_
+#define _AVI_CHROMA_H_
+
+#include "avi_compat.h"
+
+/*
+ * linux/drivers/parrot/video/avi_chroma.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 22-Dec-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/***********************
+ * Chroma converter API
+ ***********************/
+enum avi_colorspace {
+ AVI_NULL_CSPACE = 0,
+ AVI_RGB_CSPACE,
+ AVI_BT601_CSPACE,
+ AVI_BT709_CSPACE,
+ AVI_JFIF_CSPACE,
+ AVI_GREY_CSPACE,
+ AVI_Y10_CSPACE,
+ AVI_CSPACE_NR,
+};
+
+extern struct avi_isp_chroma_regs const avi_conv_cfg[AVI_CSPACE_NR][AVI_CSPACE_NR];
+
+extern void avi_setup_conv_colorspace(struct avi_node const *,
+ enum avi_colorspace, enum avi_colorspace);
+
+
+extern void avi_conv_get_registers(struct avi_node const*,
+ struct avi_isp_chroma_regs*);
+
+
+/* Convert a 888 YUV or RGB color (depending on colorspace `from`) to the same
+ * color in colorspace `to` */
+extern u32 avi_conv_convert_color(enum avi_colorspace from,
+ enum avi_colorspace to,
+ u32 color);
+
+#endif /* _AVI_CHROMA_H_ */
diff --git a/drivers/parrot/video/avi_compat.c b/drivers/parrot/video/avi_compat.c
new file mode 100644
index 00000000000000..93fe840293eff6
--- /dev/null
+++ b/drivers/parrot/video/avi_compat.c
@@ -0,0 +1,1182 @@
+#include <linux/module.h>
+
+#include "avi_compat.h"
+
+/***************************
+ * P7R1 Compatibility layer
+ ***************************/
+
+static void avi_compat_r1_conv_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0xff;
+
+ if (off <= AVI_ISP_CHROMA_COEFF_22) {
+ /* The coeff matrix layout didn't change */
+ __raw_writel(val, addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_RY) {
+ union avi_isp_chroma_offset_ry reg = { ._register = val};
+ u32 r1_addr = (addr & ~0xff) + 0x14;
+ u32 r1_offsets = __raw_readl(r1_addr);
+
+ r1_offsets &= 0xff00ff;
+ r1_offsets |= (reg.offset_in & 0xff) << 8;
+ r1_offsets |= (reg.offset_out & 0xff) << 24;
+
+ __raw_writel(r1_offsets, r1_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_RY) {
+ union avi_isp_chroma_clip_ry reg = { ._register = val};
+ u32 r1_addr = (addr & ~0xff) + 0x18;
+ u32 r1_clip = __raw_readl(r1_addr);
+
+ r1_clip &= 0xffff;
+ r1_clip |= (reg.clip_min & 0xff) << 16;
+ r1_clip |= (reg.clip_max & 0xff) << 24;
+
+ __raw_writel(r1_clip, r1_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_GU) { /* Offset GU */
+ union avi_isp_chroma_offset_gu reg = { ._register = val};
+ u32 r1_addr = (addr & ~0xff) + 0x14;
+ u32 r1_offsets = __raw_readl(r1_addr);
+
+ r1_offsets &= 0xff00ff00;
+ r1_offsets |= (reg.offset_in & 0xff) << 0;
+ r1_offsets |= (reg.offset_out & 0xff) << 16;
+
+ __raw_writel(r1_offsets, r1_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_GU) {
+ union avi_isp_chroma_clip_gu reg = { ._register = val};
+ u32 r1_addr = (addr & ~0xff) + 0x18;
+ u32 r1_clip = __raw_readl(r1_addr);
+
+ r1_clip &= 0xffff0000;
+ r1_clip |= (reg.clip_min & 0xff) << 0;
+ r1_clip |= (reg.clip_max & 0xff) << 8;
+
+ __raw_writel(r1_clip, r1_addr);
+ return;
+ }
+
+ /* The Offsets and Clip values of the GU and BV components are not
+ * separate in P7R1. We use the GU value for both above, so we just
+ * ignore all writes to the BV regs */
+ return;
+}
+
+static u32 avi_compat_r1_conv_read(u32 addr)
+{
+ u32 off = addr & 0xff;
+
+ if (off <= AVI_ISP_CHROMA_COEFF_22) {
+ /* The coeff matrix layout didn't change */
+ return __raw_readl(addr);
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_RY) { /* Offset RY */
+ union avi_isp_chroma_offset_ry reg;
+
+ u32 r1_addr = (addr & ~0xff) + 0x14;
+ u32 r1_offsets = __raw_readl(r1_addr);
+
+ reg.offset_in = (r1_offsets >> 8) & 0xff;
+ reg.offset_out = (r1_offsets >> 24) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_RY) { /* Clip RY */
+ union avi_isp_chroma_clip_ry reg;
+
+ u32 r1_addr = (addr & ~0xff) + 0x18;
+ u32 r1_clip = __raw_readl(r1_addr);
+
+ reg.clip_min = (r1_clip >> 16) & 0xff;
+ reg.clip_max = (r1_clip >> 24) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_GU ||
+ off == AVI_ISP_CHROMA_OFFSET_BV) {
+ union avi_isp_chroma_offset_gu reg;
+
+ u32 r1_addr = (addr & ~0xff) + 0x14;
+ u32 r1_offsets = __raw_readl(r1_addr);
+
+ reg.offset_in = (r1_offsets >> 0) & 0xff;
+ reg.offset_out = (r1_offsets >> 16) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_GU ||
+ off == AVI_ISP_CHROMA_CLIP_BV) {
+ union avi_isp_chroma_clip_gu reg;
+
+ u32 r1_addr = (addr & ~0xff) + 0x18;
+ u32 r1_clip = __raw_readl(r1_addr);
+
+ reg.clip_min = (r1_clip >> 0) & 0xff;
+ reg.clip_max = (r1_clip >> 8) & 0xff;
+
+ return reg._register;
+ }
+
+ /* Unknown register */
+ return __raw_readl(addr);
+}
+
+static inline void avi_compat_r1_gam_write_reg(u32 val, u32 addr)
+{
+ /* RTL workarounds: AVI R1 slave locks when it receives bursts to the
+ * gamma CLUT. To prevent that, I added a memory barrier to force all
+ * entries to be written one by one. There's an other bug that causes
+ * silent failres when accessing (read/write) the CLUT RAM. To make sure
+ * the writel is effective, I loop it until readl returns the correct
+ * value. Needless to say, since both writel *and* readl can fail, it
+ * can loop a while before settling, so expect poor performances. NOT
+ * FOOLPROOF!*/
+
+ do {
+ wmb();
+ __raw_writel(val, addr);
+ wmb();
+ } while (readl(addr) != (val));
+}
+
+static void avi_compat_r1_gam_write(u32 val, u32 addr)
+{
+ int gam = !!(addr & 0x4000);
+ u32 bank = addr & 0x3000;
+ u32 off = addr & 0x0fff;
+
+ /* The LUTs are smaller in R1 because there's no 10 bit
+ * support. Therefore the offsets are different. */
+ u32 base_r1 = (addr & ~0x7fffUL) + gam * 0x1000;
+
+ switch (bank) {
+ case AVI_ISP_GAMMA_CORRECTOR_CONF:
+ if (off == AVI_ISP_GAMMA_CORRECTOR_CONF) {
+ union avi_isp_gamma_corrector_conf cfg =
+ { ._register = val };
+
+ /* R1 ISP does not support 10 bit mode */
+ BUG_ON(cfg.comp_width);
+
+ avi_compat_r1_gam_write_reg(val, base_r1);
+ break;
+ }
+ /* Nothing here */
+ BUG_ON(val);
+ break;
+ case AVI_ISP_GAMMA_CORRECTOR_RY_LUT:
+ case AVI_ISP_GAMMA_CORRECTOR_GU_LUT:
+ case AVI_ISP_GAMMA_CORRECTOR_BV_LUT:
+ /* P7 R1 has smaller LUTs */
+ if ((off >> 2) > 0xff)
+ BUG_ON(val);
+ else
+ avi_compat_r1_gam_write_reg(val,
+ base_r1 + (bank >> 2)
+ + off);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline u32 avi_compat_r1_gam_read_reg(u32 addr)
+{
+ /* RTL Workaroud: same problem as avi_compat_r1_gam_write_reg above. I
+ * read the register until I get twice the same value in a row. NOT
+ * FOOLPROOF! */
+ u32 r, rprev;
+
+ r = readl(addr);
+ do {
+ rprev = r;
+ r = readl(addr);
+ } while (r != rprev);
+
+ return r;
+}
+
+static u32 avi_compat_r1_gam_read(u32 addr)
+{
+ int gam = !!(addr & 0x4000);
+ u32 bank = addr & 0x3000;
+ u32 off = addr & 0x0fff;
+
+ /* The LUTs are smaller in R1 because there's no 10 bit
+ * support. Therefore the offsets are different. */
+ u32 base_r1 = (addr & ~0x7fffUL) + gam * 0x1000;
+
+ switch (bank) {
+ case AVI_ISP_GAMMA_CORRECTOR_CONF:
+ if (off == AVI_ISP_GAMMA_CORRECTOR_CONF) {
+ return avi_compat_r1_gam_read_reg(base_r1);
+ break;
+ }
+ /* Nothing here */
+ return 0;
+ break;
+ case AVI_ISP_GAMMA_CORRECTOR_RY_LUT:
+ case AVI_ISP_GAMMA_CORRECTOR_GU_LUT:
+ case AVI_ISP_GAMMA_CORRECTOR_BV_LUT:
+ /* P7 R1 has smaller LUTs */
+ if ((off >> 2) > 0xff)
+ return 0;
+ else
+ return avi_compat_r1_gam_read_reg(base_r1 + (bank >> 2)
+ + off);
+ break;
+ default:
+ return 0;
+ }
+}
+
+void avi_compat_r1_write(u32 val, u32 addr)
+{
+ u32 block = addr & 0x1ff00;
+
+ /* Match each block in ascending order of the AVI reg map */
+ if (block < 0x100)
+ /* CFG */
+ goto write;
+ else if (block < 0x200)
+ /* INTER */
+ goto write;
+ else if (block < 0x1000)
+ /* ISP INTER, not implemented */
+ BUG_ON(val);
+ else if (block < 0x3000)
+ /* FIFO */
+ goto write;
+ else if (block < 0x4000)
+ /* CONV */
+ avi_compat_r1_conv_write(val, addr);
+ else if (block < 0x5000)
+ /* BLEND */
+ goto write;
+ else if (block < 0x6000)
+ /* LCD */
+ goto write;
+ else if (block < 0x7000)
+ /* CAM */
+ goto write;
+ else if (block < 0x8000)
+ /* SCALROT */
+ goto write;
+ else if (block < 0x10000)
+ avi_compat_r1_gam_write(val, addr);
+ else
+ goto write;
+
+ return;
+write:
+ __raw_writel(val, addr);
+}
+EXPORT_SYMBOL(avi_compat_r1_write);
+
+u32 avi_compat_r1_read(u32 addr)
+{
+ u32 block = addr & 0x1ff00;
+
+ /* Match each block in ascending order of the AVI reg map */
+ if (block < 0x100)
+ /* CFG */
+ goto read;
+ else if (block < 0x200)
+ /* INTER */
+ goto read;
+ else if (block < 0x1000)
+ /* ISP INTER, not implemented */
+ goto read;
+ else if (block < 0x3000)
+ /* FIFO */
+ goto read;
+ else if (block < 0x4000)
+ /* CONV */
+ return avi_compat_r1_conv_read(addr);
+ else if (block < 0x5000)
+ /* BLEND */
+ goto read;
+ else if (block < 0x6000)
+ /* LCD */
+ goto read;
+ else if (block < 0x7000)
+ /* CAM */
+ goto read;
+ else if (block < 0x8000)
+ /* SCALROT */
+ goto read;
+ else if (block < 0x10000)
+ /* GAM */
+ return avi_compat_r1_gam_read(addr);
+ else
+ goto read;
+read:
+
+ return __raw_readl(addr);
+}
+EXPORT_SYMBOL(avi_compat_r1_read);
+
+/***************************
+ * P7R2 Compatibility layer
+ ***************************/
+union avi_cfg_r2_isp_enable3
+{
+ struct
+ {
+ u32 inter0 : 1;
+ u32 inter1 : 1;
+ u32 rip0 : 1;
+ u32 rip1 : 1;
+ u32 anti_vign0 : 1;
+ u32 anti_vign1 : 1;
+ u32 chroma_aber0 : 1;
+ u32 chroma_aber1 : 1;
+ u32 denoise_ee0 : 1;
+ u32 denoise_ee1 : 1;
+ u32 bayer0 : 1;
+ u32 bayer1 : 1;
+ u32 drop0 : 1;
+ u32 drop1 : 1;
+ };
+ u32 _register;
+};
+
+static void avi_compat_r2_cfg_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0xff;
+ u32 i3d_addr = addr & ~0xff;
+
+ switch (off) {
+ case AVI_CFG_ISP_ENABLE3:
+ i3d_addr += 0x04;
+ goto write;
+ case AVI_CFG_ISP_APPLY3:
+ i3d_addr += 0x0C;
+ goto write;
+ case AVI_CFG_ISP_LOCK3:
+ i3d_addr += 0x14;
+ goto write;
+ case AVI_CFG_ISP_ITFLG3:
+ i3d_addr += 0x24;
+ goto write;
+ case AVI_CFG_ISP_ITEN3:
+ i3d_addr += 0x2C;
+ goto write;
+ case AVI_CFG_ISP_ITACK3:
+ i3d_addr += 0x34;
+write:
+ {
+ union avi_cfg_isp_enable3 reg = { ._register = val};
+
+ union avi_cfg_r2_isp_enable3 r2 = {
+ .inter0 = reg.inter0_enable,
+ .inter1 = reg.inter1_enable,
+ .rip0 = reg.rip0_enable,
+ .rip1 = reg.rip1_enable,
+ .anti_vign0 = reg.lsc0_enable,
+ .anti_vign1 = reg.lsc1_enable,
+ .chroma_aber0 = reg.chroma_aber0_enable,
+ .chroma_aber1 = reg.chroma_aber1_enable,
+ .denoise_ee0 = reg.denoise0_enable,
+ .denoise_ee1 = reg.denoise1_enable,
+ .bayer0 = reg.bayer0_enable,
+ .bayer1 = reg.bayer1_enable,
+ .drop0 = reg.drop0_enable,
+ .drop1 = reg.drop1_enable,
+ };
+
+ writel(r2._register, addr);
+ writel(readl(i3d_addr) |
+ reg.i3d_lut0_enable << 28 |
+ reg.i3d_lut1_enable << 29, i3d_addr);
+ }
+ break;
+
+ default:
+ writel(val, addr);
+ return;
+ }
+}
+
+static u32 avi_compat_r2_cfg_read(u32 addr)
+{
+ u32 off = addr & 0xff;
+ u32 i3d_addr = addr & ~0xff;
+
+ switch (off) {
+ case AVI_CFG_ISP_ENABLE3:
+ i3d_addr += 0x04;
+ goto read;
+ case AVI_CFG_ISP_APPLY3:
+ i3d_addr += 0x0C;
+ goto read;
+ case AVI_CFG_ISP_LOCK3:
+ i3d_addr += 0x14;
+ goto read;
+ case AVI_CFG_ISP_ITFLG3:
+ i3d_addr += 0x24;
+ goto read;
+ case AVI_CFG_ISP_ITEN3:
+ i3d_addr += 0x2C;
+ goto read;
+ case AVI_CFG_ISP_ITACK3:
+ i3d_addr += 0x34;
+read:
+ {
+ union avi_cfg_isp_enable3 reg = {
+ ._register = 0,
+ };
+
+ union avi_cfg_r2_isp_enable3 r2 = {
+ ._register = readl(addr),
+ };
+
+ reg.inter0_enable = r2.inter0;
+ reg.inter1_enable = r2.inter1;
+ reg.rip0_enable = r2.rip0;
+ reg.rip1_enable = r2.rip1;
+ reg.lsc0_enable = r2.anti_vign0;
+ reg.lsc1_enable = r2.anti_vign1;
+ reg.chroma_aber0_enable = r2.chroma_aber0;
+ reg.chroma_aber1_enable = r2.chroma_aber1;
+ reg.denoise0_enable = r2.denoise_ee0;
+ reg.denoise1_enable = r2.denoise_ee1;
+ reg.bayer0_enable = r2.bayer0;
+ reg.bayer1_enable = r2.bayer1;
+ reg.drop0_enable = r2.drop0;
+ reg.drop1_enable = r2.drop1;
+ reg.i3d_lut0_enable = (readl(i3d_addr) >> 28) & 0x1;
+ reg.i3d_lut1_enable = (readl(i3d_addr) >> 29) & 0x1;
+
+ return reg._register;
+ }
+ default:
+ return readl(addr);
+ }
+}
+
+union avi_inter_ids
+{
+ u8 ids[4];
+ u32 _register;
+};
+
+/* ID Translation between P7R2 and P7R3 */
+static inline u32 avi_compat_r2_inter_id_write(u32 val)
+{
+ int i;
+
+ union avi_inter_ids reg = { ._register = val };
+
+ for (i = 0 ; i < 4 ; i++) {
+ if (reg.ids[i] == 0x2f)
+ reg.ids[i] = 0x2b;
+ else if (reg.ids[i] == 0x30)
+ reg.ids[i] = 0x2c;
+ }
+
+ return reg._register;
+}
+
+static inline u32 avi_compat_r2_inter_id_read(u32 val)
+{
+ int i;
+
+ union avi_inter_ids reg = { ._register = val };
+
+ for (i = 0 ; i < 4 ; i++) {
+ if (reg.ids[i] == 0x2b)
+ reg.ids[i] = 0x2f;
+ else if (reg.ids[i] == 0x2c)
+ reg.ids[i] = 0x30;
+ }
+
+ return reg._register;
+}
+
+static void avi_compat_r2_inter_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0xff;
+
+ val = avi_compat_r2_inter_id_write(val);
+
+ switch (off) {
+ case AVI_INTER_SINKK:
+ {
+ u32 r3_stats_yuv_src = val & 0xffff;
+ u32 r3_isp_chain_bayer_src = (val >> 16) & 0xffff;
+ u32 r2_i3d_lut_src = readl(addr) & 0xffff0000;
+
+ writel(r2_i3d_lut_src | r3_stats_yuv_src, addr);
+ writel(r3_isp_chain_bayer_src, addr + 4);
+ }
+ break;
+ case AVI_INTER_SINKL:
+ {
+ u32 r3_isp_chain_yuv_src = (val & 0xffff) << 16;
+ u32 r2_stats_yuv_src = readl(addr - 4) & 0xffff;
+
+ writel(r3_isp_chain_yuv_src | r2_stats_yuv_src, addr - 4);
+ }
+ break;
+ default:
+ writel(val, addr);
+ }
+}
+
+static u32 avi_compat_r2_inter_read(u32 addr)
+{
+ u32 off = addr & 0xff;
+ u32 reg;
+
+ switch (off) {
+ case AVI_INTER_SINKK:
+ {
+ u32 r3_stats_yuv_src = readl(addr) & 0xffff;
+ u32 r3_isp_chain_bayer_src = readl(addr + 4) << 16;
+
+ reg = (r3_isp_chain_bayer_src | r3_stats_yuv_src);
+ }
+ break;
+ case AVI_INTER_SINKL:
+ {
+ u32 r3_isp_chain_yuv_src = readl(addr - 4) >> 16;
+
+ reg = r3_isp_chain_yuv_src;
+ }
+ break;
+ default:
+ reg = readl(addr);
+ }
+
+ return avi_compat_r2_inter_id_read(reg);
+}
+
+static void avi_compat_r2_conv_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0xff;
+
+ if (off <= AVI_ISP_CHROMA_COEFF_22) {
+ /* The coeff matrix layout didn't change */
+ writel(val, addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_RY) {
+ union avi_isp_chroma_offset_ry reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x14;
+ u32 r2_ry = readl(r2_addr);
+
+ r2_ry &= 0xffff;
+ r2_ry |= (reg.offset_in & 0xff) << 16;
+ r2_ry |= (reg.offset_out & 0xff) << 24;
+
+ writel(r2_ry, r2_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_RY) {
+ union avi_isp_chroma_clip_ry reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x14;
+ u32 r2_ry = readl(r2_addr);
+
+ r2_ry &= 0xffff0000;
+ r2_ry |= (reg.clip_min & 0xff) << 0;
+ r2_ry |= (reg.clip_max & 0xff) << 8;
+
+ writel(r2_ry, r2_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_GU) {
+ union avi_isp_chroma_offset_gu reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x18;
+ u32 r2_gu = readl(r2_addr);
+
+ r2_gu &= 0xffff;
+ r2_gu |= (reg.offset_in & 0xff) << 16;
+ r2_gu |= (reg.offset_out & 0xff) << 24;
+
+ writel(r2_gu, r2_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_GU) {
+ union avi_isp_chroma_clip_gu reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x18;
+ u32 r2_gu = readl(r2_addr);
+
+ r2_gu &= 0xffff0000;
+ r2_gu |= (reg.clip_min & 0xff) << 0;
+ r2_gu |= (reg.clip_max & 0xff) << 8;
+
+ writel(r2_gu, r2_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_BV) {
+ union avi_isp_chroma_offset_bv reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x1c;
+ u32 r2_bv = readl(r2_addr);
+
+ r2_bv &= 0xffff;
+ r2_bv |= (reg.offset_in & 0xff) << 16;
+ r2_bv |= (reg.offset_out & 0xff) << 24;
+
+ writel(r2_bv, r2_addr);
+ return;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_BV) {
+ union avi_isp_chroma_clip_bv reg = { ._register = val};
+ u32 r2_addr = (addr & ~0xff) + 0x1c;
+ u32 r2_bv = readl(r2_addr);
+
+ r2_bv &= 0xffff0000;
+ r2_bv |= (reg.clip_min & 0xff) << 0;
+ r2_bv |= (reg.clip_max & 0xff) << 8;
+
+ writel(r2_bv, r2_addr);
+ return;
+ }
+
+ return;
+}
+
+static u32 avi_compat_r2_conv_read(u32 addr)
+{
+ u32 off = addr & 0xff;
+
+ if (off <= AVI_ISP_CHROMA_COEFF_22) {
+ /* The coeff matrix layout didn't change */
+ return readl(addr);
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_RY) {
+ union avi_isp_chroma_offset_ry reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x14;
+ u32 r2_ry = readl(r2_addr);
+
+ reg.offset_in = (r2_ry >> 16) & 0xff;
+ reg.offset_out = (r2_ry >> 24) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_RY) {
+ union avi_isp_chroma_clip_ry reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x14;
+ u32 r2_ry = readl(r2_addr);
+
+ reg.clip_min = (r2_ry >> 0) & 0xff;
+ reg.clip_max = (r2_ry >> 8) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_GU) {
+ union avi_isp_chroma_offset_gu reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x18;
+ u32 r2_gu = readl(r2_addr);
+
+ reg.offset_in = (r2_gu >> 16) & 0xff;
+ reg.offset_out = (r2_gu >> 24) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_GU) {
+ union avi_isp_chroma_clip_gu reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x18;
+ u32 r2_gu = readl(r2_addr);
+
+ reg.clip_min = (r2_gu >> 0) & 0xff;
+ reg.clip_max = (r2_gu >> 8) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_OFFSET_BV) {
+ union avi_isp_chroma_offset_bv reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x1c;
+ u32 r2_bv = readl(r2_addr);
+
+ reg.offset_in = (r2_bv >> 16) & 0xff;
+ reg.offset_out = (r2_bv >> 24) & 0xff;
+
+ return reg._register;
+ }
+
+ if (off == AVI_ISP_CHROMA_CLIP_BV) {
+ union avi_isp_chroma_clip_bv reg;
+
+ u32 r2_addr = (addr & ~0xff) + 0x1c;
+ u32 r2_bv = readl(r2_addr);
+
+ reg.clip_min = (r2_bv >> 0) & 0xff;
+ reg.clip_max = (r2_bv >> 8) & 0xff;
+
+ return reg._register;
+ }
+
+ /* Unknown register */
+ return readl(addr);
+}
+
+union avi_isp_inter_r2_src0
+{
+ struct
+ {
+ u32 isp_out_src : 3;
+ unsigned /*unused */ : 5;
+ u32 rip_src : 3;
+ unsigned /*unused */ : 5;
+ u32 lsc_src : 3;
+ unsigned /*unused */ : 5;
+ u32 chroma_aber_src : 3;
+ };
+ u32 _register;
+};
+
+union avi_isp_inter_r2_src1
+{
+ struct
+ {
+ u32 denoise_src : 3;
+ unsigned /*unused */ : 5;
+ u32 bayer_src : 3;
+ unsigned /*unused */ : 5;
+ u32 drop_src : 3;
+ };
+ u32 _register;
+};
+
+/*
+ * ISP interconnect in R2 is a partial interconnect, based on SRC and SINK
+ * selection (like the main interconnect), whereas in R3, it is a simple
+ * 'bypass' register.
+ *
+ * In P7R2, the ISP order is:
+ * RIP -> LSC -> Chroma Abera -> Denoise -> Bayer
+ */
+static u32 avi_compat_r2_isp_inter_read(u32 addr)
+{
+ union avi_isp_inter_r2_src0 r2_src0 = {
+ ._register = readl(addr),
+ };
+
+ union avi_isp_inter_r2_src1 r2_src1 = {
+ ._register = readl(addr + 0x4),
+ };
+
+ /* R2 non-implemented modules are considered as bypassed */
+ union avi_isp_chain_bayer_inter_module_bypass r3 = {
+ .pedestal_bypass = 1,
+ .grim_bypass = 1,
+ .color_matrix_bypass = 1,
+ };
+
+ /* We assume a simple scheme where the user won't set the sources id
+ * even if the module is unused. */
+ r3.rip_bypass = (r2_src0.rip_src == 0);
+ r3.lsc_bypass = (r2_src0.lsc_src == 0);
+ r3.chroma_aber_bypass = (r2_src0.chroma_aber_src == 0);
+ r3.denoise_bypass = (r2_src1.denoise_src == 0);
+ r3.bayer_bypass = (r2_src1.bayer_src == 0);
+
+ return r3._register;
+}
+
+static void avi_compat_r2_isp_inter_write(u32 val, u32 addr)
+{
+ union avi_isp_inter_r2_src0 r2_src0 = {
+ ._register = 0,
+ };
+
+ /* Drop will be handle by YUV chain */
+ union avi_isp_inter_r2_src1 r2_src1 = {
+ ._register = 0,
+ };
+
+ union avi_isp_chain_bayer_inter_module_bypass r3 = {
+ ._register = val,
+ };
+
+ /* Default source is the input, then iterate of the ISP chain and set
+ * the corresponding source if the module is used. */
+ u32 current_src = 1;
+
+ /* In case the whole ISP is bypassed, there is a limitation in P7R2, we
+ * cannot simply connect the input to the output. Thus, we keep the
+ * 'Dead Pixel Correction' module in the chain with a pass-through
+ * configuration. */
+ if (r3._register == 0xFF) {
+ /* The mapping of ISP chain modules is a little bit 'tricky':
+ * - Instance 0:
+ * - isp_inter_0: 0x0200
+ * - dpc_0: 0xe000
+ * - Instance 1:
+ * - isp_inter_1: 0x0300
+ * - dpc_1: 0xf000
+ */
+ int inst = (addr & 0x100) != 0;
+
+ r2_src0.rip_src = 0x1;
+ r2_src0.isp_out_src = 0x2;
+
+ /* Dead Pixel Correction:
+ * the pixel at (2047, 2047) is marked as 'dead' and the
+ * correction method is 'Copy Right', which will result to a
+ * copy of itself since we are at the border of the image. */
+ writel(0xFFFFFD, addr + 0xd00 + (inst * 0xF00));
+ writel(0x0, addr + 0xd00 + (inst * 0xF00) + 0x4);
+
+ /* ISP inter */
+ writel(r2_src0._register, addr);
+ writel(r2_src1._register, addr + 0x4);
+
+ return;
+ }
+
+ if (r3.rip_bypass) {
+ r2_src0.rip_src = 0;
+ r2_src0.lsc_src = current_src;
+ } else {
+ r2_src0.rip_src = current_src;
+ current_src = 0x2;
+ }
+
+ if (r3.lsc_bypass) {
+ r2_src0.lsc_src = 0;
+ r2_src0.chroma_aber_src = current_src;
+ } else {
+ r2_src0.lsc_src = current_src;
+ current_src = 0x3;
+ }
+
+ if (r3.chroma_aber_bypass) {
+ r2_src0.chroma_aber_src = 0;
+ r2_src1.denoise_src = current_src;
+ } else {
+ r2_src0.chroma_aber_src = current_src;
+ current_src = 0x4;
+ }
+
+ if (r3.denoise_bypass) {
+ r2_src1.denoise_src = 0;
+ r2_src1.bayer_src = current_src;
+ } else {
+ r2_src1.denoise_src = current_src;
+ current_src = 0x5;
+ }
+
+ if (r3.bayer_bypass) {
+ r2_src1.bayer_src = 0;
+ } else {
+ r2_src1.bayer_src = current_src;
+ current_src = 0x6;
+ }
+
+ r2_src0.isp_out_src = current_src;
+
+ writel(r2_src0._register, addr);
+ writel(r2_src1._register, addr + 0x4);
+}
+
+static u32 avi_compat_r2_isp_chain_bayer_read(u32 addr)
+{
+ u32 off = addr & 0x0ffff;
+ u32 base = addr & ~0x3ffff;
+
+ /* There are 2 instances of isp_chain_bayer:
+ * 0x20000: Instance 0
+ * 0x30000: Instance 1
+ */
+ u32 inst = (addr & 0x10000) != 0;
+
+ /* Match each ISP block in ascending order of the ISP reg map */
+ if (off < 0x1000) {
+ /* Inter */
+ u32 r2_addr = base + 0x200 + (inst * 0x100);
+
+ if (off != 0)
+ return 0;
+ else
+ return avi_compat_r2_isp_inter_read(r2_addr);
+
+ } else if (off < 0x2000) {
+ /* VL format 32 to 40: Not implemented in R2 */
+ return 0;
+ } else if (off < 0x4000) {
+ /* Pedestal: Not implemented in R2 */
+ return 0;
+ } else if (off < 0x6000) {
+ /* Green Imbalance: Not implemented in R2 */
+ return 0;
+ } else if (off < 0x7000) {
+ /* Dead Pixel Correction */
+ goto read;
+ } else if (off < 0x8000) {
+ /* Noise Reduction */
+ goto read;
+ } else if (off < 0xA000) {
+ /* Stats Bayer: Not implemented in R2 */
+ return 0;
+ } else if (off < 0xC000) {
+ /* Lens Shading Correction */
+ goto read;
+ } else if (off < 0xD000) {
+ /* Chromatic Aberration */
+ goto read;
+ } else if (off < 0xE000) {
+ /* Bayer */
+ u32 r2_addr = base + 0x14000 + (inst * 0x100) + (addr & 0xff);
+
+ return readl(r2_addr);
+ } else if (off < 0xF000) {
+ /* Color Correction: Not implemented in R2 */
+ return 0;
+ } else {
+ /* VL format 40 to 32: Not implemented in R2 */
+ return 0;
+ }
+
+read:
+ return readl(addr);
+}
+
+static void avi_compat_r2_isp_chain_bayer_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0x0ffff;
+ u32 base = addr & ~0x3ffff;
+
+ /* There are 2 isp_chain_bayer:
+ * 0x20000: Instance 0
+ * 0x30000: Instance 1
+ */
+ u32 inst = (addr & 0x10000) != 0;
+
+ /* Match each ISP block in ascending order of the ISP reg map */
+ if (off < 0x1000) {
+ /* Inter */
+ u32 r2_addr = base + 0x200 + (inst * 0x100);
+
+ if (off != 0)
+ return;
+ else
+ avi_compat_r2_isp_inter_write(val, r2_addr);
+
+ } else if (off < 0x2000) {
+ /* VL format 32 to 40: Not implemented in R2 */
+ } else if (off < 0x4000) {
+ /* Pedestal: Not implemented in R2 */
+ } else if (off < 0x6000) {
+ /* Green Imbalance: Not implemented in R2 */
+ } else if (off < 0x7000) {
+ /* Dead Pixel Correction */
+ BUG_ON(val);
+ } else if (off < 0x8000) {
+ /* Noise Reduction */
+ BUG_ON(val);
+ } else if (off < 0xA000) {
+ /* Stats Bayer: Not implemented in R2 */
+ } else if (off < 0xC000) {
+ /* Lens Shading Correction */
+ BUG_ON(val);
+ } else if (off < 0xD000) {
+ /* Chromatic Aberration */
+ BUG_ON(val);
+ } else if (off < 0xE000) {
+ /* Bayer */
+ u32 r2_addr = base + 0x14000 + (inst * 0x100) + (addr & 0xff);
+
+ writel(val, r2_addr);
+ } else if (off < 0xF000) {
+ /* Color Correction: Not implemented in R2 */
+ } else {
+ /* VL format 40 to 32: Not implemented in R2 */
+ }
+}
+
+static u32 avi_compat_r2_isp_chain_yuv_read(u32 addr)
+{
+ u32 off = addr & 0x0ffff;
+ u32 base = addr & ~0x3ffff;
+
+ /* There are 2 instance of isp_chain_yuv:
+ * 0x40000: Instance 0
+ * 0x50000: Instance 1
+ */
+ u32 inst = (addr & 0x10000) != 0;
+
+ /* Match each ISP block in ascending order */
+ if (off < 0x1000) {
+ /* Inter: Not implemented in R2 */
+ return 0;
+ } else if (off < 0x2000) {
+ /* Edge enhancement: Not implemented in R2 */
+ return 0;
+ } else if (off < 0x3000) {
+ /* i3D LUT */
+ u32 r2_addr = base + 0x16000 + (inst * 0x1000) + (addr & 0xfff);
+
+ return readl(r2_addr);
+ } else {
+ /* Drop */
+ u32 r2_addr = base + 0x15000 + (inst * 0x100) + (addr & 0xff);
+
+ return readl(r2_addr);
+ }
+}
+
+static void avi_compat_r2_isp_chain_yuv_write(u32 val, u32 addr)
+{
+ u32 off = addr & 0x0ffff;
+ u32 base = addr & ~0x3ffff;
+
+ /* There are 2 instance of isp_chain_yuv:
+ * 0x40000: Instance 0
+ * 0x50000: Instance 1
+ */
+ u32 inst = (addr & 0x10000) != 0;
+
+ /* Match each ISP block in ascending order */
+ if (off < 0x1000) {
+ /* Inter: Not implemented in R2 */
+ } else if (off < 0x2000) {
+ /* Edge enhancement: Not implemented in R2 */
+ } else if (off < 0x3000) {
+ /* i3D LUT */
+ u32 r2_addr = base + 0x16000 + (inst * 0x1000) + (addr & 0xfff);
+
+ writel(val, r2_addr);
+ } else {
+ /* Drop */
+ u32 r2_addr = base + 0x15000 + (inst * 0x100) + (addr & 0xff);
+
+ writel(val, r2_addr);
+ }
+}
+
+/* There's a bug in R2 that can cause the AVI to drop a transaction within a
+ * burst. To work around that we simply forbid bursts by adding memory barriers
+ * after each access (that is, the non-__raw functions) */
+void avi_compat_r2_write(u32 val, u32 addr)
+{
+ u32 block = addr & 0x7ff00;
+
+ /* Match each block in ascending order of the AVI reg map */
+ if (block < 0x100)
+ /* CFG */
+ avi_compat_r2_cfg_write(val, addr);
+ else if (block < 0x200)
+ /* INTER */
+ avi_compat_r2_inter_write(val, addr);
+ else if (block < 0x3000)
+ /* FIFO */
+ goto write;
+ else if (block < 0x4000)
+ /* CONV */
+ avi_compat_r2_conv_write(val, addr);
+ else if (block < 0x5000)
+ /* BLEND */
+ goto write;
+ else if (block < 0x6000)
+ /* LCD */
+ goto write;
+ else if (block < 0x7000)
+ /* CAM */
+ goto write;
+ else if (block < 0x8000)
+ /* SCALROT */
+ goto write;
+ else if (block < 0x12000)
+ /* GAM */
+ avi_compat_r1_gam_write(val, addr);
+ else if (block < 0x20000)
+ /* STATS YUV */
+ goto write;
+ else if (block < 0x40000)
+ /* ISP CHAIN BAYER */
+ avi_compat_r2_isp_chain_bayer_write(val, addr);
+ else if (block < 0x60000)
+ /* ISP CHAIN YUV */
+ avi_compat_r2_isp_chain_yuv_write(val, addr);
+ else
+ goto write;
+
+ return;
+write:
+ writel(val, addr);
+}
+EXPORT_SYMBOL(avi_compat_r2_write);
+
+u32 avi_compat_r2_read(u32 addr)
+{
+ u32 block = addr & 0x7ff00;
+
+ /* Match each block in ascending order of the AVI reg map */
+ if (block < 0x100)
+ /* CFG */
+ return avi_compat_r2_cfg_read(addr);
+ else if (block < 0x200)
+ /* INTER */
+ return avi_compat_r2_inter_read(addr);
+ else if (block < 0x3000)
+ /* FIFO */
+ goto read;
+ else if (block < 0x4000)
+ /* CONV */
+ return avi_compat_r2_conv_read(addr);
+ else if (block < 0x5000)
+ /* BLEND */
+ goto read;
+ else if (block < 0x6000)
+ /* LCD */
+ goto read;
+ else if (block < 0x7000)
+ /* CAM */
+ goto read;
+ else if (block < 0x8000)
+ /* SCALROT */
+ goto read;
+ else if (block < 0x12000)
+ /* GAM */
+ return avi_compat_r1_gam_read(addr);
+ else if (block < 0x20000)
+ /* STATS YUV */
+ goto read;
+ else if (block < 0x40000)
+ /* ISP CHAIN BAYER */
+ return avi_compat_r2_isp_chain_bayer_read(addr);
+ else if (block < 0x60000)
+ /* ISP CHAIN YUV */
+ return avi_compat_r2_isp_chain_yuv_read(addr);
+ else
+ goto read;
+read:
+
+ return readl(addr);
+}
+EXPORT_SYMBOL(avi_compat_r2_read);
diff --git a/drivers/parrot/video/avi_compat.h b/drivers/parrot/video/avi_compat.h
new file mode 100644
index 00000000000000..81f7a6491c4f71
--- /dev/null
+++ b/drivers/parrot/video/avi_compat.h
@@ -0,0 +1,112 @@
+#ifndef _AVI_COMPAT_H_
+#define _AVI_COMPAT_H_
+
+/*
+ * linux/drivers/parrot/video/avi_compat.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 27-Aug-2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file contains the AVI compatibility code for the various revisions of
+ * the IP.
+ */
+
+#include "avi.h"
+#include "avi_logger.h"
+
+#ifdef CONFIG_AVI_LOGGER
+/* We need the compatibility layer to log register access */
+#define AVI_BACKWARD_COMPAT
+#endif
+
+#ifdef AVI_BACKWARD_COMPAT
+
+extern void avi_compat_r1_write(u32 val, u32 addr);
+extern u32 avi_compat_r1_read(u32 addr);
+
+extern void avi_compat_r2_write(u32 val, u32 addr);
+extern u32 avi_compat_r2_read(u32 addr);
+
+/*
+ * Those functions are made so that they can be replaced by simple
+ * "__raw_writel" and "__raw_readl" if we only need to support the final version
+ * of the chip (currently P7R3).
+ */
+
+static inline void avi_writel(enum avi_revision rev,
+ u32 val, u32 addr)
+{
+ avi_log_access(addr, val, AVI_LOG_WRITE);
+
+ switch (rev) {
+ case AVI_REVISION_1:
+ avi_compat_r1_write(val, addr);
+ break;
+ case AVI_REVISION_2:
+ avi_compat_r2_write(val, addr);
+ break;
+ case AVI_REVISION_3:
+ __raw_writel(val, addr);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline u32 avi_readl(enum avi_revision rev, u32 addr)
+{
+ u32 val;
+
+ switch (rev) {
+ case AVI_REVISION_1:
+ val = avi_compat_r1_read(addr);
+ break;
+ case AVI_REVISION_2:
+ val = avi_compat_r2_read(addr);
+ break;
+ case AVI_REVISION_3:
+ val = __raw_readl(addr);
+ break;
+ default:
+ BUG();
+ }
+
+ avi_log_access(addr, val, AVI_LOG_READ);
+
+
+ return val;
+
+}
+
+/* A dirty but handy macro to avoid mentioning avi_ctrl.revision for each
+ * access */
+#define AVI_WRITE(_v, _a) avi_writel(avi_ctrl.revision, (_v), (u32)(_a))
+#define AVI_READ(_a) avi_readl(avi_ctrl.revision, (u32)(_a))
+
+#else /* AVI_BACKWARD_COMPAT */
+
+#define AVI_WRITE __raw_writel
+#define AVI_READ __raw_readl
+
+#endif /* AVI_BACKWARD_COMPAT */
+
+
+#endif /* _AVI_COMPAT_H_ */
diff --git a/drivers/parrot/video/avi_debug.c b/drivers/parrot/video/avi_debug.c
new file mode 100644
index 00000000000000..5e8689c66522c0
--- /dev/null
+++ b/drivers/parrot/video/avi_debug.c
@@ -0,0 +1,177 @@
+#include <linux/module.h>
+#include "avi_debug.h"
+
+/* Look for entry v_ in string array arr_. If it's not found return string
+ * not_found_ */
+#define FIND_IN_ARRAY(arr_, v_, not_found_) do { \
+ if ((v_) >= ARRAY_SIZE(arr_) || (arr_)[(v_)] == NULL) \
+ return not_found_; \
+ return (arr_)[(v_)]; \
+ } while(0)
+
+static const char *avi_debug_colorspaces[] =
+{
+#define AVI_DEBUG_CSPACE_TO_STR(_c) [AVI_ ## _c ## _CSPACE] = # _c
+ AVI_DEBUG_CSPACE_TO_STR(NULL),
+ AVI_DEBUG_CSPACE_TO_STR(RGB),
+ AVI_DEBUG_CSPACE_TO_STR(BT601),
+ AVI_DEBUG_CSPACE_TO_STR(BT709),
+ AVI_DEBUG_CSPACE_TO_STR(JFIF),
+ AVI_DEBUG_CSPACE_TO_STR(GREY),
+ AVI_DEBUG_CSPACE_TO_STR(Y10),
+#undef AVI_DEBUG_CSPACE_TO_STR
+};
+
+const char *avi_debug_colorspace_to_str(enum avi_colorspace cspace)
+{
+ FIND_IN_ARRAY(avi_debug_colorspaces, cspace, "INVALID_COLORSPACE");
+}
+EXPORT_SYMBOL(avi_debug_colorspace_to_str);
+
+#define AVI_PIXEL_FORMAT_AS_STR(_x) [AVI_PIXEL_FORMAT_AS_ENUM(_x)] = # _x
+static const char *avi_debug_pixfmts[] =
+{
+ AVI_PIXEL_FORMATS(AVI_PIXEL_FORMAT_AS_STR),
+};
+
+const char *avi_debug_pixfmt_to_str(struct avi_dma_pixfmt pixfmt)
+{
+ FIND_IN_ARRAY(avi_debug_pixfmts, pixfmt.id, "INVALID_PIXFMT");
+}
+EXPORT_SYMBOL(avi_debug_pixfmt_to_str);
+
+static const char *avi_debug_active_modes[] =
+{
+#define AVI_DEBUG_ACTIVE_TO_STR(_p) [AVI_SEGMENT_ ## _p] = # _p
+ AVI_DEBUG_ACTIVE_TO_STR(DISABLED),
+ AVI_DEBUG_ACTIVE_TO_STR(ACTIVE),
+ AVI_DEBUG_ACTIVE_TO_STR(ACTIVE_ONESHOT),
+#undef AVI_DEBUG_ACTIVE_TO_STR
+};
+
+const char *avi_debug_activation_to_str(enum avi_segment_activation active)
+{
+ FIND_IN_ARRAY(avi_debug_active_modes, active, "INVALID_ACTIVE");
+}
+EXPORT_SYMBOL(avi_debug_activation_to_str);
+
+static const char *avi_debug_force_clear[] =
+{
+#define AVI_DEBUG_CLEAR_TO_STR(_p) [AVI_LCD_FORCE_CLEAR_ ## _p] = # _p
+ AVI_DEBUG_CLEAR_TO_STR(NORMAL),
+ AVI_DEBUG_CLEAR_TO_STR(ACTIVE),
+ AVI_DEBUG_CLEAR_TO_STR(INACTIVE),
+#undef AVI_DEBUG_ACTIVE_TO_STR
+};
+
+const char *avi_debug_force_clear_to_str(unsigned clear)
+{
+ FIND_IN_ARRAY(avi_debug_force_clear, clear, "INVALID_CLEAR");
+}
+EXPORT_SYMBOL(avi_debug_force_clear_to_str);
+
+static const struct {
+ unsigned long cap;
+ const char *str;
+} avi_debug_caps[] = {
+#define MAKE_CAP_STR(c_) { .cap = AVI_CAP_ ## c_, .str = # c_ }
+ MAKE_CAP_STR(DMA_IN),
+ MAKE_CAP_STR(DMA_OUT),
+ MAKE_CAP_STR(BUFFER),
+ MAKE_CAP_STR(CONV),
+ MAKE_CAP_STR(GAM),
+ MAKE_CAP_STR(SCAL),
+ MAKE_CAP_STR(ROT),
+ MAKE_CAP_STR(PLANAR),
+ MAKE_CAP_STR(ISP_CHAIN_BAYER),
+ MAKE_CAP_STR(STATS_BAYER_0),
+ MAKE_CAP_STR(STATS_BAYER_1),
+ MAKE_CAP_STR(STATS_YUV),
+ MAKE_CAP_STR(ISP_CHAIN_YUV),
+ MAKE_CAP_STR(CAM_0),
+ MAKE_CAP_STR(CAM_1),
+ MAKE_CAP_STR(CAM_2),
+ MAKE_CAP_STR(CAM_3),
+ MAKE_CAP_STR(CAM_4),
+ MAKE_CAP_STR(CAM_5),
+ MAKE_CAP_STR(LCD_0),
+ MAKE_CAP_STR(LCD_1),
+#undef MAKE_CAP_STR
+};
+
+const char *avi_debug_caps_to_str(unsigned long *caps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(avi_debug_caps); i++) {
+ if (*caps & avi_debug_caps[i].cap) {
+ /* Found a capability to return */
+ *caps &= ~(avi_debug_caps[i].cap);
+ return avi_debug_caps[i].str;
+ }
+ }
+
+ if (*caps) {
+ /* caps is not empty but nothing matches in our translation
+ * table, return an error */
+ *caps = 0;
+ return "<UNKNOWN_CAPS>";
+ }
+
+ /* No caps remain to convert */
+ return NULL;
+}
+EXPORT_SYMBOL(avi_debug_caps_to_str);
+
+/* The pleasure of string handling in C... */
+static inline int avi_debug_safe_strcpy(char **dst, size_t *s, const char *src)
+{
+ size_t l = strlen(src);
+
+ if (*s < l + 1)
+ /* Not enough space left */
+ return 1;
+
+ strcpy(*dst, src);
+
+ *dst += l;
+ *s -= l;
+
+ return 0;
+}
+
+char *avi_debug_format_caps(unsigned long caps, char *buf, size_t buf_size)
+{
+ const char *str;
+ char *p = buf;
+ size_t s = buf_size;
+ int n = 0;
+
+ while ((str = avi_debug_caps_to_str(&caps))) {
+ if (n++ > 0)
+ if (avi_debug_safe_strcpy(&p, &s, " | "))
+ goto unfinished;
+
+ if (avi_debug_safe_strcpy(&p, &s, str))
+ goto unfinished;
+ }
+
+ return buf;
+
+ unfinished:
+ if (buf_size == 0)
+ return NULL;
+
+ if (buf_size >= 4) {
+ if (buf_size - s < 4)
+ s = buf_size - 4;
+
+ strcpy(p - s, "...");
+ } else {
+ /* Not enough for the dots */
+ *buf = '\0';
+ }
+
+ return buf;
+}
+EXPORT_SYMBOL(avi_debug_format_caps);
diff --git a/drivers/parrot/video/avi_debug.h b/drivers/parrot/video/avi_debug.h
new file mode 100644
index 00000000000000..2c25dd8328333c
--- /dev/null
+++ b/drivers/parrot/video/avi_debug.h
@@ -0,0 +1,23 @@
+#ifndef _AVI_DEBUG_H_
+#define _AVI_DEBUG_H_
+
+#include "avi_segment.h"
+#include "avi_pixfmt.h"
+
+extern const char *avi_debug_colorspace_to_str(enum avi_colorspace cspace);
+extern const char *avi_debug_pixfmt_to_str(struct avi_dma_pixfmt fmt);
+extern const char *avi_debug_activation_to_str(enum avi_segment_activation active);
+extern const char *avi_debug_force_clear_to_str(unsigned clear);
+
+/* For printing capabilities: returns the name of a capability in cap, update
+ * cap to remove the capability returned. Returns NULL when no capability
+ * remains.
+ *
+ * Basically when you want to display a set of capabilities you iterate over
+ * this function until it returns NULL */
+extern const char *avi_debug_caps_to_str(unsigned long *caps);
+
+extern char *avi_debug_format_caps(unsigned long caps, char *buf, size_t buf_size);
+
+#endif /* _AVI_DEBUG_H_ */
+
diff --git a/drivers/parrot/video/avi_debugfs.c b/drivers/parrot/video/avi_debugfs.c
new file mode 100644
index 00000000000000..9fcd9a9fcae107
--- /dev/null
+++ b/drivers/parrot/video/avi_debugfs.c
@@ -0,0 +1,1379 @@
+/*
+ * linux/drivers/parrot/video/avi_debugfs.c
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 27-Jul-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "avi_compat.h"
+#include "avi_segment.h"
+#include "avi_debug.h"
+#include "avi_logger.h"
+
+/* The root of our debugfs filesystem. */
+static struct dentry *avi_debugfs_root = NULL;
+/* Subdirs for ISP */
+static struct dentry *avi_isp0_debugfs_root = NULL;
+static struct dentry *avi_isp1_debugfs_root = NULL;
+
+static const char *avi_inter_src_name[] =
+{
+ "unconnected",
+ "FIFO00 ",
+ "FIFO01 ",
+ "FIFO02 ",
+ "FIFO03 ",
+ "FIFO04 ",
+ "FIFO05 ",
+ "FIFO06 ",
+ "FIFO07 ",
+ "FIFO08 ",
+ "FIFO09 ",
+ "FIFO10 ",
+ "FIFO11 ",
+ "CONV0 ",
+ "CONV1 ",
+ "CONV2 ",
+ "CONV3 ",
+ "BLEND0 ",
+ "BLEND1 ",
+ "CAM0 ",
+ "CAM1 ",
+ "CAM2 ",
+ "CAM3 ",
+ "CAM4 ",
+ "CAM5 ",
+ "SCAL0 ",
+ "ROT0 ",
+ "SCAL1 ",
+ "ROT1 ",
+ "GAM0 ",
+ "GAM1 ",
+ "FORK0_0 ",
+ "FORK0_1 ",
+ "FORK1_0 ",
+ "FORK1_1 ",
+ "FORK2_0 ",
+ "FORK2_1 ",
+ "FORK3_0 ",
+ "FORK3_1 ",
+ "SAT_0 ",
+ "SAT_1 ",
+ "STATS_YUV0 ",
+ "STATS_YUV1 ",
+ "STATS_BAYER0 ",
+ "STATS_BAYER1 ",
+ "ISP_CHAIN_BAYER0",
+ "ISP_CHAIN_BAYER1",
+ "ISP_CHAIN_YUV0 ",
+ "ISP_CHAIN_YUV1 ",
+};
+
+static void avi_inter_display_sel(struct seq_file *s, const char *sink, u8 sel)
+{
+ if (sel == 0)
+ /* The node is not connected, don't display it */
+ return;
+
+ if (sel < ARRAY_SIZE(avi_inter_src_name))
+ seq_printf(s, "%s -> %s\n", avi_inter_src_name[sel], sink);
+ else
+ seq_printf(s, "INVAL(0x%02x) -> %s\n", sel, sink);
+}
+
+static int avi_inter_show(struct seq_file *s, void *unused)
+{
+ u32 regs[AVI_INTER_NSINK];
+ u8 *srcsel;
+
+ avi_inter_get_registers(regs);
+
+ srcsel = (u8*)regs;
+
+ avi_inter_display_sel(s, "FIFO00", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO01", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO02", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO03", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO04", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO05", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO06", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO07", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO08", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO09", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO10", *(srcsel++));
+ avi_inter_display_sel(s, "FIFO11", *(srcsel++));
+ avi_inter_display_sel(s, "CONV0", *(srcsel++));
+ avi_inter_display_sel(s, "CONV1", *(srcsel++));
+ avi_inter_display_sel(s, "CONV2", *(srcsel++));
+ avi_inter_display_sel(s, "CONV3", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND0_0", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND0_1", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND0_2", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND0_3", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND1_0", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND1_1", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND1_2", *(srcsel++));
+ avi_inter_display_sel(s, "BLEND1_3", *(srcsel++));
+ avi_inter_display_sel(s, "GAM0", *(srcsel++));
+ avi_inter_display_sel(s, "GAM1", *(srcsel++));
+ avi_inter_display_sel(s, "SCAL0_0", *(srcsel++));
+ avi_inter_display_sel(s, "SCAL0_1", *(srcsel++));
+ avi_inter_display_sel(s, "ROT0", *(srcsel++));
+ avi_inter_display_sel(s, "SCAL1_0", *(srcsel++));
+ avi_inter_display_sel(s, "SCAL1_1", *(srcsel++));
+ avi_inter_display_sel(s, "ROT1", *(srcsel++));
+ avi_inter_display_sel(s, "LCD0", *(srcsel++));
+ avi_inter_display_sel(s, "LCD1", *(srcsel++));
+ avi_inter_display_sel(s, "FORK0", *(srcsel++));
+ avi_inter_display_sel(s, "FORK1", *(srcsel++));
+ avi_inter_display_sel(s, "FORK2", *(srcsel++));
+ avi_inter_display_sel(s, "FORK3", *(srcsel++));
+ avi_inter_display_sel(s, "SAT0", *(srcsel++));
+ avi_inter_display_sel(s, "SAT1", *(srcsel++));
+ avi_inter_display_sel(s, "STATS_YUV0", *(srcsel++));
+ avi_inter_display_sel(s, "STATS_YUV1", *(srcsel++));
+ avi_inter_display_sel(s, "ISP_CHAIN_BAYER0", *(srcsel++));
+ avi_inter_display_sel(s, "ISP_CHAIN_BAYER1", *(srcsel++));
+ avi_inter_display_sel(s, "ISP_CHAIN_YUV0", *(srcsel++));
+ avi_inter_display_sel(s, "ISP_CHAIN_YUV1", *(srcsel++));
+
+ return 0;
+}
+
+static void avi_conv_show(struct seq_file *s, const struct avi_node *conv)
+{
+ struct avi_isp_chroma_regs regs;
+
+ avi_conv_get_registers(conv, &regs);
+
+
+ seq_printf(s, "[ %04x %04x %04x ] / [RY] [ %02x ] \\ [ %02x ];"
+ " %02x <= RY <= %02x\n",
+ regs.coeff_01_00.coeff_00,
+ regs.coeff_01_00.coeff_01,
+ regs.coeff_10_02.coeff_02,
+ regs.offset_ry.offset_in,
+ regs.offset_ry.offset_out,
+ regs.clip_ry.clip_min,
+ regs.clip_ry.clip_max);
+ seq_printf(s, "[ %04x %04x %04x ] x | [GU] - [ %02x ] | + [ %02x ];"
+ " %02x <= GU <= %02x\n",
+ regs.coeff_10_02.coeff_10,
+ regs.coeff_12_11.coeff_11,
+ regs.coeff_12_11.coeff_12,
+ regs.offset_gu.offset_in,
+ regs.offset_gu.offset_out,
+ regs.clip_gu.clip_min,
+ regs.clip_gu.clip_max);
+ seq_printf(s, "[ %04x %04x %04x ] \\ [BV] [ %02x ] / [ %02x ];"
+ " %02x <= BV <= %02x\n",
+ regs.coeff_21_20.coeff_20,
+ regs.coeff_21_20.coeff_21,
+ regs.coeff_22.coeff_22,
+ regs.offset_bv.offset_in,
+ regs.offset_bv.offset_out,
+ regs.clip_bv.clip_min,
+ regs.clip_bv.clip_max);
+}
+
+#define MAKE_AVI_CONV_SHOW(_nr) \
+ static int avi_conv##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_conv_show(s, avi_ctrl.nodes[AVI_CONV##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_CONV_SHOW(0)
+MAKE_AVI_CONV_SHOW(1)
+MAKE_AVI_CONV_SHOW(2)
+MAKE_AVI_CONV_SHOW(3)
+
+static void avi_fifo_show(struct seq_file *s, const struct avi_node *fifo)
+{
+ struct avi_fifo_registers regs;
+ static const char *fifo_format_to_str[] = {
+ [0] = "4:4:4",
+ [1] = "4:2:2",
+ [2] = "INVALID",
+ [3] = "4:2:0",
+ };
+ static const char *fifobitenc[] = {
+ [0] = "4444",
+ [1] = "565",
+ [2] = "1 LSB vote + 555",
+ [3] = "1 alpha + 555",
+ [4] = "888",
+ [5] = "INVAL",
+ [6] = "INVAL",
+ [7] = "INVAL",
+ [8] = "8888",
+ };
+#define GET_FIFOBITENC(b) (((b) >= ARRAY_SIZE(fifobitenc)) ? "INVAL" : fifobitenc[b])
+
+ avi_fifo_get_registers(fifo, &regs);
+
+ seq_printf(s, "STATUS: RUNNING: %x IT_ALL: %x\n",
+ regs.status.running,
+ regs.status.it_all);
+ seq_printf(s, "STATUS: IT_MONITOR0: %x IT_MONITOR1: %x\n",
+ regs.status.it_monitor0,
+ regs.status.it_monitor1);
+ seq_printf(s, "STATUS: IT_NL0: %x IT_NL1: %x\n",
+ regs.status.it_nl0,
+ regs.status.it_nl1);
+ seq_printf(s, "CFG: SRCTYPE: %s DSTTYPE: %s\n",
+ regs.cfg.srctype ? "DMA" : "VL",
+ regs.cfg.dsttype ? "DMA" : "VL");
+ seq_printf(s, "CFG: SRCORG: %s DSTORG: %s\n",
+ regs.cfg.srcorg ? "semi-planar" : "interleaved",
+ regs.cfg.dstorg ? "semi-planar" : "interleaved");
+ seq_printf(s, "CFG: SRCFORMAT: %s DSTFORMAT: %s\n",
+ fifo_format_to_str[regs.cfg.srcformat],
+ fifo_format_to_str[regs.cfg.dstformat]);
+ seq_printf(s, "CFG: SRCBITENC: %s DSTBITENC: %s\n",
+ GET_FIFOBITENC(regs.cfg.srcbitenc),
+ GET_FIFOBITENC(regs.cfg.dstbitenc));
+ seq_printf(s, "CFG: REORG: %x DITHERING: %x\n",
+ regs.cfg.reorg, regs.cfg.dithering);
+ seq_printf(s, "CFG: CDSDIS: %x CUSDIS: %x PHASE: %x\n",
+ regs.cfg.cdsdis, regs.cfg.cusdis, regs.cfg.phase);
+ seq_printf(s, "CFG: SCALPLANE: %x KEYING: %x\n",
+ regs.cfg.scalplane, regs.cfg.keying);
+ seq_printf(s, "CFG: ITSOURCE: %x ITLINE: %x\n",
+ regs.cfg.itsource, regs.cfg.itline);
+ seq_printf(s, "CFG: SINGLE: %x\n", regs.cfg.single);
+
+ seq_printf(s, "FRAMESIZE: %u x %u\n",
+ regs.framesize.width, regs.framesize.height);
+ seq_printf(s, "MONITOR: 0x%08x\n", regs.monitor._register);
+ seq_printf(s,
+ "TIMEOUT: BURST: %d x 64bits ISSUING_CAPABILITY: %d\n",
+ 16 << regs.timeout.burst,
+ regs.timeout.issuing_capability + 1);
+ seq_printf(s, "TIMEOUT: PINCR: %d HBLANK: %d VBLANK: %d\n",
+ regs.timeout.pincr, regs.timeout.hblank,
+ regs.timeout.vblank);
+ seq_printf(s, "SWAP0: 0x%08x\n", regs.swap0._register);
+ seq_printf(s, "SWAP1: 0x%08x\n", regs.swap1._register);
+ seq_printf(s, "DMASA0: 0x%08x\n", regs.dmasa0);
+ seq_printf(s, "DMASA1: 0x%08x\n", regs.dmasa1);
+ seq_printf(s, "DMALSTR0: %d\n", regs.dmalstr0);
+ seq_printf(s, "DMALSTR1: %d\n", regs.dmalstr1);
+ seq_printf(s, "DMAFXYCFG: %u x %u [%u]\n",
+ regs.dmafxycfg.dmafxnb,
+ regs.dmafxycfg.dmafynb,
+ regs.dmafxycfg.dmafxymode);
+ seq_printf(s, "DMAFXSTR0: %d\n", regs.dmafxstr0);
+ seq_printf(s, "DMAFXSTR1: %d\n", regs.dmafxstr1);
+ seq_printf(s, "DMAFYSTR0: %d\n", regs.dmafystr0);
+ seq_printf(s, "DMAFYSTR1: %d\n", regs.dmafystr1);
+ seq_printf(s, "KEYINGMIN: #%02x%02x%02x\n",
+ regs.keyingmin.ry,
+ regs.keyingmin.gu,
+ regs.keyingmin.bv);
+ seq_printf(s, "KEYINGMAX: #%02x%02x%02x\n",
+ regs.keyingmax.ry,
+ regs.keyingmax.gu,
+ regs.keyingmax.bv);
+ seq_printf(s, "KEYINGALPHA: 0x%02x\n", regs.keyingalpha);
+ seq_printf(s, "VLIN: FORCE_CLEAR: %s[%x]\n",
+ avi_debug_force_clear_to_str(regs.vlin.force_clear),
+ regs.vlin.force_clear);
+}
+
+#define MAKE_AVI_FIFO_SHOW(_nr) \
+ static int avi_fifo##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_fifo_show(s, avi_ctrl.nodes[AVI_FIFO##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_FIFO_SHOW(00)
+MAKE_AVI_FIFO_SHOW(01)
+MAKE_AVI_FIFO_SHOW(02)
+MAKE_AVI_FIFO_SHOW(03)
+MAKE_AVI_FIFO_SHOW(04)
+MAKE_AVI_FIFO_SHOW(05)
+MAKE_AVI_FIFO_SHOW(06)
+MAKE_AVI_FIFO_SHOW(07)
+MAKE_AVI_FIFO_SHOW(08)
+MAKE_AVI_FIFO_SHOW(09)
+MAKE_AVI_FIFO_SHOW(10)
+MAKE_AVI_FIFO_SHOW(11)
+
+
+static void avi_format_show(struct seq_file *s, unsigned format)
+{
+ static const char *names[] = {
+ [AVI_FORMAT_CONTROL_RGB888_1X24] = "RGB888/YUV888_1X24",
+ [0x1] = "RGB888/YUV888_1X24",
+ [0x2] = "RGB888/YUV888_1X24",
+ [0x3] = "RGB888/YUV888_1X24",
+#define AVI_FORMAT_CONTROL(_name) [AVI_FORMAT_CONTROL_ ## _name] = #_name
+ AVI_FORMAT_CONTROL(YUYV_1X16),
+ AVI_FORMAT_CONTROL(YVYU_1X16),
+ AVI_FORMAT_CONTROL(UYVY_2X8),
+ AVI_FORMAT_CONTROL(VYUY_2X8),
+ AVI_FORMAT_CONTROL(YUYV_2X8),
+ AVI_FORMAT_CONTROL(YVYU_2X8),
+ AVI_FORMAT_CONTROL(RGB888_3X8),
+ AVI_FORMAT_CONTROL(BGR888_3X8),
+ AVI_FORMAT_CONTROL(RGBX8888_4X8),
+ AVI_FORMAT_CONTROL(BGRX8888_4X8),
+ AVI_FORMAT_CONTROL(RGB565_2X8),
+ AVI_FORMAT_CONTROL(BGR565_2X8),
+ AVI_FORMAT_CONTROL(RGB555_2X8),
+ AVI_FORMAT_CONTROL(BGR555_2X8),
+ AVI_FORMAT_CONTROL(RGB444_2X8),
+ AVI_FORMAT_CONTROL(BGR444_2X8),
+ };
+ const char *name;
+
+
+ if (format > ARRAY_SIZE(names) || !names[format])
+ name = "invalid";
+ else
+ name = names[format];
+
+ seq_printf(s, "%s[%u]", name, format);
+}
+
+static void avi_cam_show(struct seq_file *s, const struct avi_node *cam)
+{
+ struct avi_cam_registers regs;
+ struct avi_cam_measure_regs measures;
+
+ avi_cam_get_registers(cam, &regs);
+ avi_cam_get_measure(cam, &measures);
+
+ seq_printf(s, "STATUS: FIELD: %s, FOF: %d, DONE: %d\n",
+ regs.status.field ? "BOT" : "TOP",
+ regs.status.fof, regs.status.done);
+ seq_printf(s, "ITSOURCE: FOF: %d, DONE: %d\n",
+ regs.itsource.fof_en, regs.itsource.done_en);
+ seq_printf(s, "INTERFACE: IVS: %d, IHS: %d, IPC: %d, IOE: %d\n",
+ regs.interface.ivs,
+ regs.interface.ihs,
+ regs.interface.ipc,
+ regs.interface.ioe);
+ seq_printf(s, "INTERFACE: PSYNC_RF: %d, PSYNC_EN: %d\n",
+ regs.interface.psync_rf,
+ regs.interface.psync_en);
+ seq_printf(s, "INTERFACE: ITU656: %d, TIMING_AUTO: %d\n",
+ regs.interface.itu656,
+ regs.interface.timing_auto);
+ seq_printf(s, "INTERFACE: FORMAT_CONTROL: ");
+ avi_format_show(s, regs.interface.format_control);
+ seq_printf(s, "\n"
+ "INTERFACE: PAD_SELECT: 0x%x\n",
+ regs.interface.pad_select);
+ seq_printf(s, "INTERFACE: UNPACKER: %d, RAW10: %d, ROR_LSB: %d\n",
+ regs.interface.unpacker,
+ regs.interface.raw10,
+ regs.interface.ror_lsb);
+ seq_printf(s, "RUN: %d\n", regs.run.run);
+ seq_printf(s, "FREE_RUN: %d\n", regs.run.free_run);
+ seq_printf(s, "HACTIVE_ON: %u\n", regs.h_timing.hactive_on);
+ seq_printf(s, "HACTIVE_OFF: %u\n", regs.h_timing.hactive_off);
+ seq_printf(s, "VACTIVE_ON: %u\n", regs.v_timing.vactive_on);
+ seq_printf(s, "VACTIVE_OFF: %u\n", regs.v_timing.vactive_off);
+ seq_printf(s, "resolution: %u x %u\n",
+ regs.h_timing.hactive_off - regs.h_timing.hactive_on,
+ regs.v_timing.vactive_off - regs.v_timing.vactive_on);
+
+ seq_printf(s, "\nMeasures:\n");
+ seq_printf(s, "HSYNC_OFF: %u\n", measures.hsync_off);
+ seq_printf(s, "HACTIVE_ON: %u\n", measures.hactive_on);
+ seq_printf(s, "HACTIVE_OFF: %u\n", measures.hactive_off);
+ seq_printf(s, "HTOTAL: %u\n", measures.htotal);
+ seq_printf(s, "VSYNC_HON: %u\n", measures.vsync_hon);
+ seq_printf(s, "VSYNC_VON: %u\n", measures.vsync_von);
+ seq_printf(s, "VSYNC_HOFF: %u\n", measures.vsync_hoff);
+ seq_printf(s, "VSYNC_VOFF: %u\n", measures.vsync_voff);
+ seq_printf(s, "VACTIVE_ON: %u\n", measures.vactive_on);
+ seq_printf(s, "VACTIVE_OFF: %u\n", measures.vactive_off);
+ seq_printf(s, "VTOTAL: %u\n", measures.vtotal);
+
+ seq_printf(s, "\nleft_margin/xres/right_margin/hsync_len: "
+ "%u/%u/%u/%u\n",
+ measures.hactive_on - measures.hsync_off,
+ measures.hactive_off - measures.hactive_on,
+ measures.htotal - measures.hactive_off,
+ measures.hsync_off);
+ seq_printf(s, "top_margin/yres/bot_margin/vsync_len: "
+ "%u/%u/%u/%u\n",
+ measures.vactive_on - measures.vsync_voff,
+ measures.vactive_off - measures.vactive_on,
+ measures.vtotal - measures.vactive_off,
+ measures.vsync_voff);
+}
+
+#define MAKE_AVI_CAM_SHOW(_nr) \
+ static int avi_cam##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_cam_show(s, avi_ctrl.nodes[AVI_CAM##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_CAM_SHOW(0)
+MAKE_AVI_CAM_SHOW(1)
+MAKE_AVI_CAM_SHOW(2)
+MAKE_AVI_CAM_SHOW(3)
+MAKE_AVI_CAM_SHOW(4)
+MAKE_AVI_CAM_SHOW(5)
+
+static void avi_gam_show(struct seq_file *s, const struct avi_node *gam)
+{
+ union avi_isp_gamma_corrector_conf cfg;
+ struct avi_cmap *cmap;
+ unsigned i;
+ unsigned column_sz;
+
+ /* struct avi_cmap is too big to be safely allocated on the stack (it
+ * contains all the lookup tables) */
+ cmap = kzalloc(sizeof(*cmap), GFP_KERNEL);
+
+ if (!cmap) {
+ seq_printf(s, "Error: can't allocate cmap struct\n");
+ return;
+ }
+
+ cfg._register = avi_gam_get_config(gam);
+
+ seq_printf(s, "BYPASS: %d\n", cfg.bypass);
+ seq_printf(s, "PALETTE: %d\n", cfg.palette);
+ seq_printf(s, "COMP_WIDTH: %db\n", cfg.comp_width ? 10 : 8);
+
+ avi_gam_get_cmap(gam, cmap);
+
+ seq_printf(s, "\n"
+ "ID : RY GU BV | "
+ "ID : RY GU BV | "
+ "ID : RY GU BV | "
+ "ID : RY GU BV\n");
+
+ /* Divide CLUT display in 4 columns. */
+ BUILD_BUG_ON(AVI_CMAP_SZ % 4 != 0);
+ column_sz = AVI_CMAP_SZ / 4;
+
+ for (i = 0; i < column_sz; i++) {
+ unsigned c1 = i;
+ unsigned c2 = c1 + column_sz;
+ unsigned c3 = c2 + column_sz;
+ unsigned c4 = c3 + column_sz;
+
+ seq_printf(s,
+ "%03x: %02x %02x %02x | "
+ "%03x: %02x %02x %02x | "
+ "%03x: %02x %02x %02x | "
+ "%03x: %02x %02x %02x\n",
+ c1, cmap->red[c1], cmap->green[c1], cmap->blue [c1],
+ c2, cmap->red[c2], cmap->green[c2], cmap->blue [c2],
+ c3, cmap->red[c3], cmap->green[c3], cmap->blue [c3],
+ c4, cmap->red[c4], cmap->green[c4], cmap->blue [c4]);
+ }
+
+ kfree(cmap);
+}
+
+#define MAKE_AVI_GAM_SHOW(_nr) \
+ static int avi_gam##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_gam_show(s, avi_ctrl.nodes[AVI_GAM##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_GAM_SHOW(0)
+MAKE_AVI_GAM_SHOW(1)
+
+static void avi_lcd_show(struct seq_file *s, const struct avi_node *lcd)
+{
+ struct avi_lcd_regs regs;
+ struct avi_lcd_avi_vsync_gen_regs vsync_gen_regs;
+
+ avi_lcd_get_registers(lcd, &regs);
+
+ seq_printf(s, "STATUS: FIELD: %s, FUF: %d, DONE: %d\n",
+ regs.status.field ? "BOT" : "TOP",
+ regs.status.fuf, regs.status.done);
+ seq_printf(s, "ITSOURCE: FUF: %d, DONE: %d\n",
+ regs.itsource.fuf_en, regs.itsource.done_en);
+ seq_printf(s, "INTERFACE: IVS: %d, IHS: %d, IPC: %d, IOE: %d\n",
+ regs.interface.ivs,
+ regs.interface.ihs,
+ regs.interface.ipc,
+ regs.interface.ioe);
+ seq_printf(s, "INTERFACE: PSYNC_RF: %d, PSYNC_EN: %d\n",
+ regs.interface.psync_rf,
+ regs.interface.psync_en);
+ seq_printf(s, "INTERFACE: ITU656: %d, PROG: %d\n",
+ regs.interface.itu656,
+ regs.interface.prog);
+ seq_printf(s, "INTERFACE: CLIP_EN: %d, FREE_RUN: %d\n",
+ regs.interface.clip_en,
+ regs.interface.free_run);
+ seq_printf(s, "INTERFACE: PAD_SELECT: %d, VSYNC_GEN: %d\n",
+ regs.interface.pad_select,
+ regs.interface.vsync_gen);
+ seq_printf(s, "DPD: 0x%06x\n", regs.dpd.dpd);
+ switch (regs.dpd.colorbar) {
+ case 0:
+ case 1:
+ seq_printf(s, "COLORBAR: NONE\n");
+ break;
+ case 2:
+ seq_printf(s, "COLORBAR: YUV\n");
+ break;
+ case 3:
+ seq_printf(s, "COLORBAR: RGB\n");
+ break;
+ }
+
+#ifdef AVI_BACKWARD_COMPAT
+ if (avi_get_revision() >= AVI_REVISION_3) {
+#endif /* AVI_BACKWARD_COMPAT */
+ seq_printf(s, "FORCE_CLEAR: %s[%x]\n",
+ avi_debug_force_clear_to_str(regs.force_clear.force_clear),
+ regs.force_clear.force_clear);
+#ifdef AVI_BACKWARD_COMPAT
+ }
+#endif /* AVI_BACKWARD_COMPAT */
+
+#define PRINT_TIMINGS(w) do { \
+ seq_printf(s, "FORMAT_CONTROL: "); \
+ avi_format_show(s, regs.w##_format_ctrl.w##_format_control); \
+ seq_printf(s, "\n"); \
+ seq_printf(s, "HSYNC_OFF: %u\n", \
+ regs.w##_h_timing0.w##_hsync_off); \
+ seq_printf(s, "HACTIVE_ON: %u\n", \
+ regs.w##_h_timing0.w##_hactive_on); \
+ seq_printf(s, "HACTIVE_OFF: %u\n", \
+ regs.w##_h_timing1.w##_hactive_off); \
+ seq_printf(s, "HTOTAL: %u\n", \
+ regs.w##_h_timing1.w##_htotal); \
+ seq_printf(s, "VSYNC_HON: %u\n", \
+ regs.w##_v_timing0.w##_vsync_hon); \
+ seq_printf(s, "VSYNC_VON: %u\n", \
+ regs.w##_v_timing0.w##_vsync_von); \
+ seq_printf(s, "VSYNC_HOFF: %u\n", \
+ regs.w##_v_timing1.w##_vsync_hoff); \
+ seq_printf(s, "VSYNC_VOFF: %u\n", \
+ regs.w##_v_timing1.w##_vsync_voff); \
+ seq_printf(s, "VACTIVE_ON: %u\n", \
+ regs.w##_v_timing2.w##_vactive_on); \
+ seq_printf(s, "VACTIVE_OFF: %u\n", \
+ regs.w##_v_timing2.w##_vactive_off); \
+ seq_printf(s, "VTOTAL: %u\n", \
+ regs.w##_v_timing3.w##_vtotal); \
+ seq_printf(s, "\nleft_margin/xres/right_margin/hsync_len: " \
+ "%u/%u/%u/%u\n", \
+ regs.w##_h_timing0.w##_hactive_on \
+ - regs.w##_h_timing0.w##_hsync_off, \
+ regs.w##_h_timing1.w##_hactive_off \
+ - regs.w##_h_timing0.w##_hactive_on, \
+ regs.w##_h_timing1.w##_htotal \
+ - regs.w##_h_timing1.w##_hactive_off, \
+ regs.w##_h_timing0.w##_hsync_off); \
+ seq_printf(s, "top_margin/yres/bot_margin/vsync_len: " \
+ "%u/%u/%u/%u\n", \
+ regs.w##_v_timing2.w##_vactive_on \
+ - regs.w##_v_timing1.w##_vsync_voff, \
+ regs.w##_v_timing2.w##_vactive_off \
+ - regs.w##_v_timing2.w##_vactive_on, \
+ regs.w##_v_timing3.w##_vtotal \
+ - regs.w##_v_timing2.w##_vactive_off, \
+ regs.w##_v_timing1.w##_vsync_voff); \
+ } while(0);
+
+ seq_printf(s, "\nTOP timings:\n");
+ PRINT_TIMINGS(top);
+ seq_printf(s, "\nBOT timings:\n");
+ PRINT_TIMINGS(bot);
+
+ avi_vsync_gen_get_registers(lcd, &vsync_gen_regs);
+ seq_printf(s, "\nVSync generator:\n");
+
+#define PRINT_VSYNC(_n) \
+ seq_printf(s, #_n ": %u:%u -> %u:%u\n", \
+ vsync_gen_regs.vsync_gen_on_##_n.vsync_gen_von, \
+ vsync_gen_regs.vsync_gen_on_##_n.vsync_gen_hon, \
+ vsync_gen_regs.vsync_gen_off_##_n.vsync_gen_voff, \
+ vsync_gen_regs.vsync_gen_off_##_n.vsync_gen_hoff)
+
+ PRINT_VSYNC(0);
+ PRINT_VSYNC(1);
+ PRINT_VSYNC(2);
+ PRINT_VSYNC(3);
+ PRINT_VSYNC(4);
+}
+
+#define MAKE_AVI_LCD_SHOW(_nr) \
+ static int avi_lcd##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_lcd_show(s, avi_ctrl.nodes[AVI_LCD##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_LCD_SHOW(0)
+MAKE_AVI_LCD_SHOW(1)
+
+static void avi_blend_show(struct seq_file *s, const struct avi_node *blend)
+{
+ unsigned long base = avi_node_base(blend);
+ u32 val;
+ unsigned i;
+
+ seq_printf(s, "BACKGROUND: #%06x\n",
+ AVI_READ(base + AVI_BLEND_BACKGROUND));
+ val = AVI_READ(base + AVI_BLEND_FRAMEOUTSIZE);
+ seq_printf(s, "FRAMEOUTSIZE: %u x %u\n",
+ val & 0xffff, val >> 16);
+
+ /* Display the 4 plane configurations */
+ for (i = 0; i < 4; i++) {
+ val = AVI_READ(base + AVI_BLEND_OFFSET0 + (i * 8));
+ seq_printf(s, "PLANE%u: x: %04x y: %04x alpha: %03x\n",
+ i, val & 0xffff, val >> 16,
+ AVI_READ(base + AVI_BLEND_ALPHA0 + (i * 8)));
+ }
+}
+
+#define MAKE_AVI_BLEND_SHOW(_nr) \
+ static int avi_blend##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_blend_show(s, avi_ctrl.nodes[AVI_BLEND##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_BLEND_SHOW(0)
+MAKE_AVI_BLEND_SHOW(1)
+
+static void avi_scal_coeff_histo(struct seq_file *s, unsigned tap,
+ unsigned n, u8 val)
+{
+ char histo[66];
+ /* values are stored in 8 bit in 2's complement */
+ s8 sval = (s8)val;
+ int index = sval * 2;
+
+ memset(histo, ' ', sizeof(histo));
+ histo[65] = '\0';
+ histo[32] = '|';
+
+ /* Check if the value of the coeff is out of range */
+ if (index > 128)
+ histo[64] = '>';
+ else if (index < -128)
+ histo[0] = '<';
+ else
+ histo[(index + 128) / 4] = 'X';
+
+ seq_printf(s, " %u-%02d: % 4d [%s]\n", tap, n, sval, histo);
+}
+
+static void avi_scal_dump_coeff(struct seq_file *s, unsigned base, unsigned ntap)
+{
+ u32 val;
+ unsigned i;
+ unsigned j;
+ unsigned nphases;
+ union avi_scal_coeff coeff;
+
+ val = AVI_READ(base + AVI_SCAL_OFFSET) & 0xffffff;
+ seq_printf(s, "OFFSET: %u\n", val);
+
+ val = AVI_READ(base + AVI_SCAL_INCR) & 0xffffff;
+
+ seq_printf(s, "INCR: %u/256 (~%u)\n", val, val >> 8);
+
+ memcpy_from_registers(&coeff, base + AVI_SCAL_COEFF, sizeof(coeff));
+
+ nphases = (32 / ntap);
+
+ for (i = 0; i < ntap; i++)
+ for (j = nphases; j--;)
+ avi_scal_coeff_histo(s,
+ i,
+ i * nphases + j,
+ coeff.bytes[i * nphases + j]);
+
+ seq_printf(s, "Phase sums:");
+ for (j = nphases; j--; ) {
+ int sum = 0;
+ for (i = 0; i < ntap; i++)
+ sum += (s8)coeff.bytes[i * nphases + j];
+ seq_printf(s, " %d", sum);
+ }
+ seq_printf(s, "\n");
+}
+
+static void avi_scal_dump_plane(struct seq_file *s, unsigned base)
+{
+ u32 val;
+ unsigned ntapsx;
+ unsigned ntapsy;
+
+ val = AVI_READ(base + AVI_SCAL_NTAPS_NPHASES);
+
+ ntapsx = 2 << ((val >> AVI_SCAL_NTAPSX_SHIFT) & 3);
+ ntapsy = 2 << ((val >> AVI_SCAL_NTAPSY_SHIFT) & 3);
+
+ seq_printf(s,
+ "NTAPS: x: %u y: %u\n"
+ "NPHASES: x: %u y: %u\n",
+ ntapsx, ntapsy,
+ 2 << ((val >> AVI_SCAL_NPHASESX_SHIFT) & 3),
+ 2 << ((val >> AVI_SCAL_NPHASESY_SHIFT) & 3));
+
+ seq_printf(s, "Horizontal coefficients:\n");
+ avi_scal_dump_coeff(s, base + AVI_SCAL_HORIZ_DIM, ntapsx);
+ seq_printf(s, "Vertical coefficients:\n");
+ avi_scal_dump_coeff(s, base + AVI_SCAL_VERT_DIM, ntapsy);
+}
+
+static void avi_scal_show(struct seq_file *s, const struct avi_node *scal)
+{
+ unsigned long base = avi_node_base(scal);
+ u32 val;
+
+ char const * const scal_inter[] = {
+ [0] = "Unconnected",
+ [1] = "VL Input",
+ [2] = "Horizontal scaler",
+ [3] = "Vertical scaler",
+ };
+
+ val = AVI_READ(base + AVI_SCAL_CONF);
+ seq_printf(s,
+ "CFG: OUTPUT_SRC: %s\n"
+ "CFG: HORIZONTAL_SRC: %s\n"
+ "CFG: VERTICAL_SRC: %s\n",
+ scal_inter[(val >> AVI_SCAL_CONF_OUTSRC_SHIFT) & 3],
+ scal_inter[(val >> AVI_SCAL_CONF_HORIZSRC_SHIFT) & 3],
+ scal_inter[(val >> AVI_SCAL_CONF_VERTSRC_SHIFT) & 3]);
+ seq_printf(s,
+ "CFG: SEMIPLANAR: %d FORMATOUT: %s\n",
+ (val >> AVI_SCAL_CONF_PLANAR_SHIFT) & 1,
+ ((val >> AVI_SCAL_CONF_OUTSRC_SHIFT) & 1) ?
+ "4:2:2" : "4:4:4");
+
+ val = AVI_READ(base + AVI_SCAL_SIZEOUT);
+ seq_printf(s,
+ "SIZEOUT: %u x %u\n", val & 0xffff, val >> 16);
+
+ seq_printf(s, "AR or Y plane:\n");
+ avi_scal_dump_plane(s, base + AVI_SCAL_PLANE0);
+ seq_printf(s, "GB or UV plane:\n");
+ avi_scal_dump_plane(s, base + AVI_SCAL_PLANE1);
+}
+
+#define MAKE_AVI_SCAL_SHOW(_nr) \
+ static int avi_scal##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_scal_show(s, avi_ctrl.nodes[AVI_SCAL##_nr##0_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_SCAL_SHOW(0)
+MAKE_AVI_SCAL_SHOW(1)
+
+static void avi_rot_show(struct seq_file *s, const struct avi_node *rot)
+{
+ unsigned long base = avi_node_base(rot);
+ u32 val;
+ char const * const angle[] = {
+ [AVI_ROT_ANGLE_0] = "0",
+ [AVI_ROT_ANGLE_90] = "90",
+ [AVI_ROT_ANGLE_270] = "270",
+ [AVI_ROT_ANGLE_180] = "180",
+ };
+
+ val = AVI_READ(base + AVI_ROT_ANGLE);
+
+ seq_printf(s, "ANGLE: %s degrees HORIZONTAL FLIP: %d\n",
+ angle[val & AVI_ROT_ANGLE_MASK],
+ !!(val & AVI_ROT_HORIZ_FLIP));
+
+ val = AVI_READ(base + AVI_ROT_SIZE);
+ seq_printf(s, "HEIGHTOUT: %d NSTRIPE: %d\n", val >> 16, val & 0xffff);
+}
+
+#define MAKE_AVI_ROT_SHOW(_nr) \
+ static int avi_rot##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_rot_show(s, avi_ctrl.nodes[AVI_ROT##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_ROT_SHOW(0)
+MAKE_AVI_ROT_SHOW(1)
+
+static void avi_isp_chain_bayer_inter_show(struct seq_file *s,
+ const struct avi_node *chain_bayer)
+{
+ struct avi_isp_chain_bayer_inter_regs regs;
+
+ avi_isp_chain_bayer_inter_get_registers(chain_bayer, &regs);
+
+ seq_printf(s, "PEDESTAL: %d\n", regs.module_bypass.pedestal_bypass);
+ seq_printf(s, "GRIM: %d\n", regs.module_bypass.grim_bypass);
+ seq_printf(s, "DPC: %d\n", regs.module_bypass.rip_bypass);
+ seq_printf(s, "DENOISE: %d\n", regs.module_bypass.denoise_bypass);
+ seq_printf(s, "LSC: %d\n", regs.module_bypass.lsc_bypass);
+ seq_printf(s, "CAC: %d\n", regs.module_bypass.chroma_aber_bypass);
+ seq_printf(s, "BAYER: %d\n", regs.module_bypass.bayer_bypass);
+ seq_printf(s, "CC: %d\n", regs.module_bypass.color_matrix_bypass);
+}
+
+#define MAKE_AVI_ISP_CHAIN_BAYER_INTER_SHOW(_nr) \
+ static int avi_isp_chain_bayer_inter##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_isp_chain_bayer_inter_show(s, \
+ avi_ctrl.nodes[AVI_ISP_CHAIN_BAYER##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_ISP_CHAIN_BAYER_INTER_SHOW(0)
+MAKE_AVI_ISP_CHAIN_BAYER_INTER_SHOW(1)
+
+static const char *cfa_format_to_str[] = {
+ [0] = "BGGR",
+ [1] = "RGGB",
+ [2] = "GBRG",
+ [3] = "GRBG",
+};
+
+static void avi_isp_bayer_show(struct seq_file *s,
+ const struct avi_node *chain_bayer)
+{
+ struct avi_isp_bayer_regs regs;
+
+ avi_isp_bayer_get_registers(chain_bayer, &regs);
+
+ seq_printf(s, "CFA: %s\n", cfa_format_to_str[regs.cfa.cfa]);
+ seq_printf(s, "THRESHOLD1: %d\n", regs.threshold_1.threshold_1);
+ seq_printf(s, "THRESHOLD2: %d\n", regs.threshold_2.threshold_2);
+}
+
+#define MAKE_AVI_ISP_BAYER_SHOW(_nr) \
+ static int avi_isp_bayer##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_isp_bayer_show(s, \
+ avi_ctrl.nodes[AVI_ISP_CHAIN_BAYER##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_ISP_BAYER_SHOW(0)
+MAKE_AVI_ISP_BAYER_SHOW(1)
+
+static void avi_stats_bayer_show(struct seq_file *s,
+ struct avi_node *chain_bayer)
+{
+ struct avi_isp_statistics_bayer_regs regs;
+ unsigned x_center;
+ unsigned y_center;
+ unsigned x_squared;
+ unsigned y_squared;
+ int squared_ok;
+
+ avi_isp_statistics_bayer_get_registers(chain_bayer, &regs);
+
+ seq_printf(s, "WINDOW: %dx%d\n",
+ regs.window_x.x_width,
+ regs.window_y.y_width);
+ seq_printf(s, "OFFSETS: %dx%d\n",
+ regs.window_x.x_offset,
+ regs.window_y.y_offset);
+ seq_printf(s, "MAX_NB_WIN: %dx%d\n",
+ regs.max_nb_windows.x_window_count,
+ regs.max_nb_windows.y_window_count);
+ seq_printf(s, "MEASURE_REQ: CLEAR: %d\n",
+ regs.measure_req.clear);
+
+ x_center = regs.circle_pos_x_center.x_center;
+ y_center = regs.circle_pos_y_center.y_center;
+
+ seq_printf(s, "CIRCLE: CENTER: %dx%d\n", x_center, y_center);
+
+ x_squared = regs.circle_pos_x_squared.x_squared;
+ y_squared = regs.circle_pos_y_squared.y_squared;
+
+ squared_ok = (x_center * x_center == x_squared) &&
+ (y_center * y_center == y_squared);
+
+ seq_printf(s, "CIRCLE: SQUARED: %dx%d [%s]\n",
+ regs.circle_pos_x_squared.x_squared,
+ regs.circle_pos_y_squared.y_squared,
+ squared_ok ? "OK" : "NOK");
+
+ seq_printf(s, "INCR_LOG2: %dx%d\n",
+ regs.increments_log2.x_log2_inc,
+ regs.increments_log2.y_log2_inc);
+ seq_printf(s, "SAT_THRESHOLD: %d\n",
+ regs.sat_threshold.threshold);
+ seq_printf(s, "CFA: %s\n",
+ cfa_format_to_str[regs.cfa.cfa]);
+}
+
+#define MAKE_AVI_STATS_BAYER_SHOW(_nr) \
+ static int avi_stats_bayer##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_stats_bayer_show(s, \
+ avi_ctrl.nodes[AVI_ISP_CHAIN_BAYER##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_STATS_BAYER_SHOW(0)
+MAKE_AVI_STATS_BAYER_SHOW(1)
+
+static void avi_isp_pedestal_show(struct seq_file *s,
+ struct avi_node *chain_bayer)
+{
+ struct avi_isp_pedestal_regs regs;
+
+ avi_isp_pedestal_get_registers(chain_bayer, &regs);
+ seq_printf(s, "CFA: %s\n", cfa_format_to_str[regs.cfa.cfa]);
+ seq_printf(s, "SUB_R: %u\n", regs.sub_r.sub_r);
+ seq_printf(s, "SUB_GB: %u\n", regs.sub_gb.sub_gb);
+ seq_printf(s, "SUB_GR: %u\n", regs.sub_gr.sub_gr);
+ seq_printf(s, "SUB_B: %u\n", regs.sub_b.sub_b);
+}
+
+#define MAKE_AVI_ISP_PEDESTAL_SHOW(_nr) \
+ static int avi_isp_pedestal##_nr##_show(struct seq_file *s, \
+ void *unused) \
+ { \
+ avi_isp_pedestal_show(s, \
+ avi_ctrl.nodes[AVI_ISP_CHAIN_BAYER##_nr##_NODE]); \
+ return 0; \
+ }
+
+MAKE_AVI_ISP_PEDESTAL_SHOW(0)
+MAKE_AVI_ISP_PEDESTAL_SHOW(1)
+
+static void avi_cfg_display(struct seq_file *s, const char *n, u32 r1, u32 r2,
+ u32 r3)
+{
+ seq_printf(s, "%s:", n);
+
+ if (r1 & (1 << 0)) seq_printf(s, " CFG");
+ if (r1 & (1 << 1)) seq_printf(s, " INTER");
+ if (r1 & (1 << 2)) seq_printf(s, " FIFO00");
+ if (r1 & (1 << 3)) seq_printf(s, " FIFO01");
+ if (r1 & (1 << 4)) seq_printf(s, " FIFO02");
+ if (r1 & (1 << 5)) seq_printf(s, " FIFO03");
+ if (r1 & (1 << 6)) seq_printf(s, " FIFO04");
+ if (r1 & (1 << 7)) seq_printf(s, " FIFO05");
+ if (r1 & (1 << 8)) seq_printf(s, " FIFO06");
+ if (r1 & (1 << 9)) seq_printf(s, " FIFO07");
+ if (r1 & (1 << 10)) seq_printf(s, " FIFO08");
+ if (r1 & (1 << 11)) seq_printf(s, " FIFO09");
+ if (r1 & (1 << 12)) seq_printf(s, " FIFO10");
+ if (r1 & (1 << 13)) seq_printf(s, " FIFO11");
+
+ if (r2 & (1 << 0)) seq_printf(s, " CONV0");
+ if (r2 & (1 << 1)) seq_printf(s, " CONV1");
+ if (r2 & (1 << 2)) seq_printf(s, " CONV2");
+ if (r2 & (1 << 3)) seq_printf(s, " CONV3");
+ if (r2 & (1 << 4)) seq_printf(s, " BLEND0");
+ if (r2 & (1 << 5)) seq_printf(s, " BLEND1");
+ if (r2 & (1 << 6)) seq_printf(s, " LCD0");
+ if (r2 & (1 << 7)) seq_printf(s, " LCD1");
+ if (r2 & (1 << 8)) seq_printf(s, " CAM0");
+ if (r2 & (1 << 9)) seq_printf(s, " CAM1");
+ if (r2 & (1 << 10)) seq_printf(s, " CAM2");
+ if (r2 & (1 << 11)) seq_printf(s, " CAM3");
+ if (r2 & (1 << 12)) seq_printf(s, " CAM4");
+ if (r2 & (1 << 13)) seq_printf(s, " CAM5");
+ if (r2 & (1 << 14)) seq_printf(s, " SCAL0");
+ if (r2 & (1 << 15)) seq_printf(s, " ROT0");
+ if (r2 & (1 << 16)) seq_printf(s, " SCAL1");
+ if (r2 & (1 << 17)) seq_printf(s, " ROT1");
+ if (r2 & (1 << 18)) seq_printf(s, " GAM0");
+ if (r2 & (1 << 19)) seq_printf(s, " GAM1");
+ if (r2 & (1 << 20)) seq_printf(s, " FORK0");
+ if (r2 & (1 << 21)) seq_printf(s, " FORK1");
+ if (r2 & (1 << 22)) seq_printf(s, " FORK2");
+ if (r2 & (1 << 23)) seq_printf(s, " FORK3");
+ if (r2 & (1 << 24)) seq_printf(s, " SAT0");
+ if (r2 & (1 << 25)) seq_printf(s, " SAT1");
+ if (r2 & (1 << 26)) seq_printf(s, " STATS_YUV0");
+ if (r2 & (1 << 27)) seq_printf(s, " STATS_YUV1");
+ if (r2 & (1 << 28)) seq_printf(s, " I3D_LUT0");
+ if (r2 & (1 << 29)) seq_printf(s, " I3D_LUT1");
+
+ if (r3 & (1 << 0)) seq_printf(s, " ISP_BAYER_INTER0");
+ if (r3 & (1 << 1)) seq_printf(s, " VLFORMAT_32TO40_0");
+ if (r3 & (1 << 2)) seq_printf(s, " PEDESTAL0");
+ if (r3 & (1 << 3)) seq_printf(s, " GRIM0");
+ if (r3 & (1 << 4)) seq_printf(s, " RIP0");
+ if (r3 & (1 << 5)) seq_printf(s, " DENOISE0");
+ if (r3 & (1 << 6)) seq_printf(s, " STATS_BAYER0");
+ if (r3 & (1 << 7)) seq_printf(s, " LSC0");
+ if (r3 & (1 << 8)) seq_printf(s, " CHROMA_ABER0");
+ if (r3 & (1 << 9)) seq_printf(s, " BAYER0");
+ if (r3 & (1 << 10)) seq_printf(s, " COLOR_MATRIX0");
+ if (r3 & (1 << 11)) seq_printf(s, " VLFORMAT_40TO32_0");
+ if (r3 & (1 << 12)) seq_printf(s, " ISP_BAYER_INTER1");
+ if (r3 & (1 << 13)) seq_printf(s, " VLFORMAT_32TO40_1");
+ if (r3 & (1 << 14)) seq_printf(s, " PEDESTAL1");
+ if (r3 & (1 << 15)) seq_printf(s, " GRIM1");
+ if (r3 & (1 << 16)) seq_printf(s, " RIP1");
+ if (r3 & (1 << 17)) seq_printf(s, " DENOISE1");
+ if (r3 & (1 << 18)) seq_printf(s, " STATS_BAYER1");
+ if (r3 & (1 << 19)) seq_printf(s, " LSC1");
+ if (r3 & (1 << 20)) seq_printf(s, " CHROMA_ABER1");
+ if (r3 & (1 << 21)) seq_printf(s, " BAYER1");
+ if (r3 & (1 << 22)) seq_printf(s, " COLOR_MATRIX1");
+ if (r3 & (1 << 23)) seq_printf(s, " VLFORMAT_40TO32_1");
+ if (r3 & (1 << 24)) seq_printf(s, " ISP_YUV_INTER0");
+ if (r3 & (1 << 25)) seq_printf(s, " EE_CRF0");
+ if (r3 & (1 << 26)) seq_printf(s, " I3D_LUT0");
+ if (r3 & (1 << 27)) seq_printf(s, " DROP0");
+ if (r3 & (1 << 28)) seq_printf(s, " ISP_YUV_INTER1");
+ if (r3 & (1 << 29)) seq_printf(s, " EE_CRF1");
+ if (r3 & (1 << 30)) seq_printf(s, " I3D_LUT1");
+ if (r3 & (1 << 31)) seq_printf(s, " DROP1");
+
+ seq_printf(s, "\n");
+}
+
+static int avi_cfg_show(struct seq_file *s, void *unused)
+{
+ struct avi_cfg_regs regs;
+
+ avi_cfg_get_registers(&regs);
+
+#define DISPLAY(_n) avi_cfg_display(s, #_n, regs._n ##1 ._register, \
+ regs._n ##2 ._register, \
+ regs.isp_ ## _n ##3 ._register)
+ DISPLAY(enable);
+ DISPLAY(apply);
+ DISPLAY(lock);
+ DISPLAY(itflg);
+ DISPLAY(iten);
+#undef DISPLAY
+ seq_printf(s, "DMACFG: FLAG_TIMEOUT:%u FLAG_NUMBER:%u\n",
+ regs.dmacfg.dma_flag_timeout,regs.dmacfg.dma_flag_number);
+ seq_printf(s, "DMACFG: MAX_BURST_NUMBER:%u MAX_BANDWIDTH:%u\n",
+ regs.dmacfg.dma_max_burst_number,regs.dmacfg.dma_max_bandwidth);
+ return 0;
+}
+
+static void avi_debugfs_show_segment_format(struct seq_file *seq,
+ struct avi_segment_format *fmt,
+ int print_dma)
+{
+ seq_printf(seq, "%ux%u%c %s",
+ fmt->width,
+ fmt->height,
+ fmt->interlaced ? 'i' : 'p',
+ avi_debug_colorspace_to_str(fmt->colorspace));
+
+ if (print_dma)
+ seq_printf(seq, " %s %uB/line@plane0 %uB/line@plane1",
+ avi_debug_pixfmt_to_str(fmt->pix_fmt),
+ fmt->plane0.line_size,
+ fmt->plane1.line_size);
+}
+
+int avi_cap_debugfs_show(struct seq_file *s, unsigned long caps)
+{
+ int n = 0;
+ const char *str;
+
+ while ((str = avi_debug_caps_to_str(&caps))) {
+ if (n++)
+ seq_printf(s, " | ");
+
+ seq_printf(s, "%s", str);
+ }
+
+ if (n == 0)
+ seq_printf(s, "<none>");
+
+ return 0;
+}
+
+static int avi_segment_show(struct avi_segment *s, void *priv)
+{
+ struct seq_file *seq = priv;
+ struct avi_node *dma_in_planar;
+ int i;
+ int n;
+
+ /* Make sure the segment won't be destroyed while we're accessing it */
+ mutex_lock(&s->lock);
+
+ seq_printf(seq, "\nSegment \"%.*s\" [%s]",
+ AVI_SEGMENT_ID_LEN,
+ s->id,
+ avi_debug_activation_to_str(s->active));
+
+ if (s->owner)
+ seq_printf(seq, " owned by %s.%d:\n",
+ dev_driver_string(s->owner),
+ s->owner->id);
+ else
+ seq_printf(seq, " orphaned:\n");
+
+ seq_printf(seq, " Source segments: ");
+
+ for (i = 0, n = 0; i < AVI_SEGMENT_MAX_SRC; i++)
+ if (s->source[i]) {
+ if (n++)
+ seq_printf(seq, ", ");
+ seq_printf(seq, "\"%.*s\" [%d]",
+ AVI_SEGMENT_ID_LEN, s->source[i]->id, i);
+ }
+
+ if (n == 0)
+ seq_printf(seq, "<none>");
+
+ seq_printf(seq, "\n Sink segments: ");
+
+ for (i = 0, n = 0; i < AVI_SEGMENT_MAX_SINK; i++)
+ if (s->sink[i]) {
+ if (n++)
+ seq_printf(seq, ", ");
+ seq_printf(seq, "\"%.*s\" [%d]",
+ AVI_SEGMENT_ID_LEN, s->sink[i]->id, i);
+ }
+
+ if (n == 0)
+ seq_printf(seq, "<none>");
+
+ seq_printf(seq, "\n Layout: x: %u, y: %u, alpha: %d, %s",
+ s->layout.x, s->layout.y,
+ s->layout.alpha,
+ s->layout.hidden ? "hidden" : "displayed");
+
+ seq_printf(seq, "\n Input format: ");
+ avi_debugfs_show_segment_format(seq,
+ &s->input_format,
+ !!(s->caps & AVI_CAP_DMA_IN));
+
+ seq_printf(seq, "\n Output format: ");
+ avi_debugfs_show_segment_format(seq,
+ &s->output_format,
+ !!(s->caps & AVI_CAP_DMA_OUT));
+
+ if (s->stream_period_us)
+ seq_printf(seq, "\n Stream Period: %ldus (~%ldHz)",
+ s->stream_period_us,
+ (USEC_PER_SEC + s->stream_period_us / 2) / s->stream_period_us);
+
+ if (s->period_us)
+ seq_printf(seq, "\n Period: %ldus (~%ldHz)",
+ s->period_us,
+ (USEC_PER_SEC + s->period_us / 2) / s->period_us);
+
+ if (s->period_us)
+ seq_printf(seq, "\n Dropped frames: %lu", s->frame_dropped);
+
+
+ seq_printf(seq, "\n Capabilities: ");
+ avi_cap_debugfs_show(seq, s->caps);
+
+ seq_printf(seq, "\n AVI Nodes: ");
+
+ /* Blenders are connected in reverse order */
+ for (i = 1, n = 0; i >= 0; i--) {
+ if (s->blender[i]) {
+ if (n++)
+ seq_printf(seq, " -> ");
+
+ seq_printf(seq, s->blender[i]->name);
+ }
+ }
+
+ /* Plane1 input for planar formats is not in the main node array */
+ dma_in_planar = avi_segment_get_node(s, AVI_CAP_PLANAR);
+ if (dma_in_planar)
+ seq_printf(seq, "%s + ", dma_in_planar->name);
+
+ for (i = 0; i < s->nodes_nr; i++) {
+ if (n++)
+ seq_printf(seq, " -> ");
+
+ seq_printf(seq, s->nodes[i]->name);
+ }
+
+ if (n == 0)
+ seq_printf(seq, "<none>");
+
+ seq_printf(seq, "\n");
+
+ mutex_unlock(&s->lock);
+
+ return 0;
+}
+
+static int avi_segments_show(struct seq_file *seq, void *unused)
+{
+ unsigned count = avi_segment_count();
+
+ seq_printf(seq, "%d registered segment%s:\n",
+ count, count == 1 ? "" : "s");
+
+ return avi_segment_foreach(&avi_segment_show, seq);
+}
+
+static int avi_access_show(struct seq_file *seq, void *unused)
+{
+ return avi_log_display(seq);
+}
+
+static int avi_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* inode->i_private contains the "show" callback */
+ return single_open(file, inode->i_private, NULL);
+}
+
+static const struct file_operations avi_debugfs_fops = {
+ .open = avi_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init avi_debugfs_init(void)
+{
+ BUG_ON(!IS_ERR_OR_NULL(avi_debugfs_root));
+
+ avi_debugfs_root = debugfs_create_dir("avi", NULL);
+
+ if (IS_ERR_OR_NULL(avi_debugfs_root))
+ return avi_debugfs_root ? PTR_ERR(avi_debugfs_root) : -ENOMEM;
+
+ avi_isp0_debugfs_root = debugfs_create_dir("isp0", avi_debugfs_root);
+
+ if (IS_ERR_OR_NULL(avi_isp0_debugfs_root))
+ return avi_isp0_debugfs_root ? PTR_ERR(avi_isp0_debugfs_root) :
+ -ENOMEM;
+
+ avi_isp1_debugfs_root = debugfs_create_dir("isp1", avi_debugfs_root);
+
+ if (IS_ERR_OR_NULL(avi_isp1_debugfs_root))
+ return avi_isp1_debugfs_root ? PTR_ERR(avi_isp1_debugfs_root) :
+ -ENOMEM;
+
+#define AVI_DEBUGFS_REGISTER(_name, _cback) \
+ IS_ERR_OR_NULL(debugfs_create_file((_name), S_IRUGO, \
+ avi_debugfs_root, \
+ (_cback), \
+ &avi_debugfs_fops))
+
+#define AVI_ISP_DEBUGFS_REGISTER(_name, _nr, _cback) \
+ IS_ERR_OR_NULL(debugfs_create_file((_name), S_IRUGO, \
+ avi_isp##_nr##_debugfs_root, \
+ (_cback), \
+ &avi_debugfs_fops))
+
+ if (AVI_DEBUGFS_REGISTER("inter", &avi_inter_show) ||
+ AVI_DEBUGFS_REGISTER("fifo00", &avi_fifo00_show) ||
+ AVI_DEBUGFS_REGISTER("fifo01", &avi_fifo01_show) ||
+ AVI_DEBUGFS_REGISTER("fifo02", &avi_fifo02_show) ||
+ AVI_DEBUGFS_REGISTER("fifo03", &avi_fifo03_show) ||
+ AVI_DEBUGFS_REGISTER("fifo04", &avi_fifo04_show) ||
+ AVI_DEBUGFS_REGISTER("fifo05", &avi_fifo05_show) ||
+ AVI_DEBUGFS_REGISTER("fifo06", &avi_fifo06_show) ||
+ AVI_DEBUGFS_REGISTER("fifo07", &avi_fifo07_show) ||
+ AVI_DEBUGFS_REGISTER("fifo08", &avi_fifo08_show) ||
+ AVI_DEBUGFS_REGISTER("fifo09", &avi_fifo09_show) ||
+ AVI_DEBUGFS_REGISTER("fifo10", &avi_fifo10_show) ||
+ AVI_DEBUGFS_REGISTER("fifo11", &avi_fifo11_show) ||
+ AVI_DEBUGFS_REGISTER("conv0", &avi_conv0_show) ||
+ AVI_DEBUGFS_REGISTER("conv1", &avi_conv1_show) ||
+ AVI_DEBUGFS_REGISTER("conv2", &avi_conv2_show) ||
+ AVI_DEBUGFS_REGISTER("conv3", &avi_conv3_show) ||
+ AVI_DEBUGFS_REGISTER("cam0", &avi_cam0_show) ||
+ AVI_DEBUGFS_REGISTER("cam1", &avi_cam1_show) ||
+ AVI_DEBUGFS_REGISTER("cam2", &avi_cam2_show) ||
+ AVI_DEBUGFS_REGISTER("cam3", &avi_cam3_show) ||
+ AVI_DEBUGFS_REGISTER("cam4", &avi_cam4_show) ||
+ AVI_DEBUGFS_REGISTER("cam5", &avi_cam5_show) ||
+ AVI_DEBUGFS_REGISTER("gam0", &avi_gam0_show) ||
+ AVI_DEBUGFS_REGISTER("gam1", &avi_gam1_show) ||
+ AVI_DEBUGFS_REGISTER("lcd0", &avi_lcd0_show) ||
+ AVI_DEBUGFS_REGISTER("lcd1", &avi_lcd1_show) ||
+ AVI_DEBUGFS_REGISTER("blend0", &avi_blend0_show) ||
+ AVI_DEBUGFS_REGISTER("blend1", &avi_blend1_show) ||
+ AVI_DEBUGFS_REGISTER("scal0", &avi_scal0_show) ||
+ AVI_DEBUGFS_REGISTER("scal1", &avi_scal1_show) ||
+ AVI_DEBUGFS_REGISTER("rot0", &avi_rot0_show) ||
+ AVI_DEBUGFS_REGISTER("rot1", &avi_rot1_show) ||
+ AVI_DEBUGFS_REGISTER("cfg", &avi_cfg_show) ||
+ AVI_DEBUGFS_REGISTER("segments", &avi_segments_show) ||
+ AVI_DEBUGFS_REGISTER("access", &avi_access_show) ||
+
+ AVI_ISP_DEBUGFS_REGISTER("inter", 0,
+ &avi_isp_chain_bayer_inter0_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("inter", 1,
+ &avi_isp_chain_bayer_inter1_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("bayer", 0,
+ &avi_isp_bayer0_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("bayer", 1,
+ &avi_isp_bayer1_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("stats_bayer", 0,
+ &avi_stats_bayer0_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("stats_bayer", 1,
+ &avi_stats_bayer1_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("pedestal", 0,
+ &avi_isp_pedestal0_show) ||
+ AVI_ISP_DEBUGFS_REGISTER("pedestal", 1,
+ &avi_isp_pedestal1_show)) {
+ debugfs_remove_recursive(avi_debugfs_root);
+ avi_debugfs_root = NULL;
+ return -ENOMEM;
+ }
+
+ printk(KERN_INFO "AVI DebugFS registered\n");
+
+ return 0;
+}
+module_init(avi_debugfs_init);
+
+static void __exit avi_debugfs_exit(void)
+{
+ BUG_ON(IS_ERR_OR_NULL(avi_debugfs_root));
+
+ debugfs_remove_recursive(avi_debugfs_root);
+ avi_debugfs_root = NULL;
+}
+module_exit(avi_debugfs_exit);
+
+MODULE_DESCRIPTION("AVI debugfs module");
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi_dma.c b/drivers/parrot/video/avi_dma.c
new file mode 100644
index 00000000000000..f62afaf70029e2
--- /dev/null
+++ b/drivers/parrot/video/avi_dma.c
@@ -0,0 +1,620 @@
+#include <linux/module.h>
+#include "avi_dma.h"
+#include "avi_segment.h"
+#include "avi_pixfmt.h"
+
+/**
+ * Tales of AVI: DMA
+ *
+ * Definition:
+ * - whole frame: a complete frame
+ * - stripped frame: a part of a whole frame
+ * - left address:
+ * physical address of first byte of left pixel of stripped frame
+ * - right address:
+ * physical address of last byte of right pixel of stripped frame
+ *
+ * Whole frames in memory have to be compliant to AVI constraints:
+ * - start addresses must be 64 bits aligned
+ * - line sizes must be 64 bits aligned
+ * - line sizes must be a multiple of pixel width
+ * - for specific pixel formats, width and/or height must be even
+ *
+ * Stripped frames have also to be compliant to AVI constraints:
+ * - for specific pixel formats, width and/or height must be even
+ * - for P7R1/P7R2, left and right addresses must be pixel aligned and 64 bits aligned
+ * - for P7R3, left and right addresses must be pixel aligned.
+ */
+
+/* Swap calculations:
+ *
+ * The swap value is made of 8 values, each the index of a byte to pick.
+ *
+ * The top 4 bytes are always the same as the top bottom bytes + 4 (we don't
+ * have any format where we apply a different swapping for the top and bottom
+ * 32bits.
+ *
+ * A same byte is never repeated (picked twice).
+ *
+ * Therefore, we can represent the swapping by only 3 values: the first three
+ * values for the bottom 32bits.
+ *
+ * The 4th value is found by elimination (if the values are 0, 1, 3 then the 4th
+ * can only be 2).
+ */
+
+/* compute register value for swap0 */
+static inline u32 avi_dma_pixfmt_swap0(struct avi_dma_pixfmt pixfmt)
+{
+ unsigned b0 = pixfmt.swap0_b0;
+ unsigned b1 = pixfmt.swap0_b1;
+ unsigned b2 = pixfmt.swap0_b2;
+ unsigned b3 = b0 ^ b1 ^ b2;
+
+ return AVI_FIFO_MAKE_SWAP(b0, b1, b2, b3);
+}
+
+/* compute register value for swap1 */
+static inline u32 avi_dma_pixfmt_swap1(struct avi_dma_pixfmt pixfmt)
+{
+ unsigned b0 = pixfmt.swap1_b0;
+ unsigned b1 = pixfmt.swap1_b1;
+ unsigned b2 = pixfmt.swap1_b2;
+ unsigned b3 = b0 ^ b1 ^ b2;
+
+ return AVI_FIFO_MAKE_SWAP(b0, b1, b2, b3);
+}
+
+/**
+ * Returns the maximal value of pincr for a given FIFO and AVI revision.
+ *
+ * In P7R3, maximal value for pincr is 255.
+ * In P7R2, maximal value for pincr is
+ * 64 for FIFO00 & FIFO01, 128 for other FIFOs
+ * In P7R1, maximal value for pincr is 64.
+ */
+static inline unsigned avi_fifo_max_pincr(const struct avi_node *fifo)
+{
+ switch (avi_get_revision())
+ {
+ default:
+ case AVI_REVISION_3:
+ return 255;
+ break;
+ case AVI_REVISION_1:
+ case AVI_REVISION_2:
+ switch(fifo->node_id)
+ {
+ case AVI_FIFO00_NODE:
+ case AVI_FIFO01_NODE:
+ return 64;
+ break;
+ default:
+ return 128;
+ break;
+ }
+ break;
+ }
+
+}
+
+/**
+ * Returns the value of pincr for a given bandwidth (in bytes/s).
+ *
+ * The highest the value of pincr the lowest the priority.
+ * The maximal priority is 1 (0 is invalid), the minimal 255.
+ *
+ * I attribute maximal value to 0B/s (minimal bandwidth) and 1 to 474MB/s
+ * (bandwidth of 1080p60 @ 32bits). I use a linear scale between those two
+ * values.
+ */
+static inline unsigned avi_fifo_bw_to_pincr(unsigned long bw, unsigned max_pincr)
+{
+ /* max is bw of 1080p60 32bpp */
+ static const unsigned long bwmax = 1920 * 1080 * 4 * 60 / 1024;
+ unsigned max_pincr_minus_one, ret;
+
+ /* convert bandwidth to KB to make sure the calculations will
+ fit in 32 bits */
+ bw /= 1024;
+
+ if (bw >= bwmax)
+ return 1;
+
+ if (max_pincr > 255)
+ max_pincr = 255;
+ if (max_pincr < 255)
+ max_pincr_minus_one = max_pincr -1;
+ else
+ max_pincr_minus_one = max_pincr;
+
+ /* extrapolate the value of pincr linearly */
+ ret = max_pincr - (max_pincr_minus_one * bw / bwmax);
+ if (ret < 1)
+ ret = 1;
+
+ return ret;
+}
+
+void avi_dma_setup_input(const struct avi_node *fifo_plane0,
+ const struct avi_node *fifo_plane1,
+ const struct avi_segment_format *in)
+{
+ struct avi_fifo_registers fifo_reg_0, fifo_reg_1;
+ struct avi_dma_pixfmt const pixfmt = in->pix_fmt;
+
+ /* Clear overall configuration. */
+ memset(&fifo_reg_0, 0, sizeof(fifo_reg_0));
+ memset(&fifo_reg_1, 0, sizeof(fifo_reg_1));
+
+ /* Setup interrupts. */
+ fifo_reg_0.cfg.itline = 0x0F;
+ fifo_reg_0.cfg.itsource = 1;
+
+ /* Input DMA from memory. */
+ fifo_reg_0.cfg.srctype = AVI_FIFO_CFG_DMA_TYPE;
+
+ /* Input data depicted by format. */
+ fifo_reg_0.cfg.srcorg = pixfmt.semiplanar;
+ fifo_reg_0.cfg.srcformat = pixfmt.subsampling;
+ fifo_reg_0.cfg.srcbitenc = pixfmt.bitenc;
+
+ /*
+ * Destination is always a video link in packed 4:4:4
+ * 32bits format.
+ */
+ fifo_reg_0.cfg.dsttype = AVI_FIFO_CFG_VL_TYPE;
+ fifo_reg_0.cfg.dstorg = AVI_FIFO_CFG_PACKED_ORG;
+ fifo_reg_0.cfg.dstformat = AVI_FIFO_CFG_444_FMT,
+ fifo_reg_0.cfg.dstbitenc = AVI_FIFO_CFG_8888_BENC;
+
+ fifo_reg_0.cfg.dithering = AVI_FIFO_CFG_TRUNC_RND;
+
+ fifo_reg_0.cfg.cusdis = AVI_FIFO_CFG_DISABLE_CUS;
+ fifo_reg_0.cfg.cdsdis = AVI_FIFO_CFG_DISABLE_CDS;
+
+ fifo_reg_0.cfg.reorg = pixfmt.reorg;
+
+ fifo_reg_0.swap0._register = avi_dma_pixfmt_swap0(pixfmt);
+
+ if (pixfmt.semiplanar)
+ fifo_reg_0.swap1._register = avi_dma_pixfmt_swap1(pixfmt);
+ else
+ fifo_reg_0.swap1._register = 0;
+
+ /* Use some know-to-work values for the timeout settings */
+ fifo_reg_0.timeout.vblank = 0;
+ fifo_reg_0.timeout.hblank = 0;
+ /* TODO: We should probably set the pincr as well, but for that we need
+ * the framerate */
+ fifo_reg_0.timeout.pincr = 1;
+
+ fifo_reg_0.timeout.issuing_capability = 7;
+ fifo_reg_0.timeout.burst = 0;
+
+
+ /* We send the whole frame at once for now */
+ fifo_reg_0.dmafxycfg.dmafxnb = 1;
+ fifo_reg_0.dmafxycfg.dmafynb = 1;
+ fifo_reg_0.dmafxycfg.dmafxymode = 0;
+ fifo_reg_0.dmafxstr0 = 0;
+ fifo_reg_0.dmafxstr1 = 0;
+ fifo_reg_0.dmafystr0 = 0;
+ fifo_reg_0.dmafystr1 = 0;
+
+ if (in->interlaced)
+ fifo_reg_0.framesize.height = in->height / 2 - 1;
+ else
+ fifo_reg_0.framesize.height = in->height - 1;
+
+ if (unlikely(in->pix_fmt.id == AVI_BAYER_3X10_32_FMTID))
+ /* In this mode we put 3 10-bit pixels in one 32bit word.
+ * TODO: this calculation is bogus, it does not work for
+ * cropping.
+ */
+ fifo_reg_0.framesize.width = roundup(in->width, 3) - 1;
+ else
+ fifo_reg_0.framesize.width = in->width - 1;
+
+ /* TODO : interlaced management */
+
+ if (avi_pixfmt_is_planar(in->pix_fmt) && fifo_plane1) {
+ /* use 2 FIFOs and a Scaler to manage SEMI-PLANAR formats
+ * both FIFOs are set to AVI_FIFO_CFG_422_FMT (src/dst)
+ * never less of desired format (420 or 422)
+ * FIFO plane0 is set to AVI_FIFO_CFG_Y_SCALPLANE
+ * FIFO plane1 is set to AVI_FIFO_CFG_UV_SCALPLANE
+ * Frame height is divided by 2 for FIFO plane1 in case of 420.
+ * This configuration allows:
+ * - management of SEMI-PLANAR formats in P7R1/R2
+ * where these formats are bugged: workaround
+ * - a better quality for chrominance (UV) scaling
+ */
+ fifo_reg_1 = fifo_reg_0;
+
+ fifo_reg_0.cfg.dstformat = AVI_FIFO_CFG_422_FMT;
+ /* If dstorg is planar the FIFOs will only fetch the plane they
+ * have to. FIFO0 will fetch the plane0 (DMASA0) and FIFO1 will
+ * fetch plane1 (DMASA1) */
+ fifo_reg_0.cfg.dstorg = AVI_FIFO_CFG_PLANAR_ORG;
+ fifo_reg_0.cfg.srcformat = AVI_FIFO_CFG_422_FMT;
+ fifo_reg_0.cfg.srcorg = AVI_FIFO_CFG_PLANAR_ORG;
+ fifo_reg_0.cfg.scalplane = AVI_FIFO_CFG_Y_SCALPLANE;
+ fifo_reg_0.cfg.cusdis = AVI_FIFO_CFG_ENABLE_CUS;
+ fifo_reg_0.cfg.cdsdis = AVI_FIFO_CFG_ENABLE_CDS;
+
+ fifo_reg_1.cfg.dstformat = AVI_FIFO_CFG_422_FMT;
+ fifo_reg_1.cfg.dstorg = AVI_FIFO_CFG_PLANAR_ORG;
+ fifo_reg_1.cfg.srcformat = AVI_FIFO_CFG_422_FMT;
+ fifo_reg_1.cfg.srcorg = AVI_FIFO_CFG_PLANAR_ORG;
+ fifo_reg_1.cfg.scalplane = AVI_FIFO_CFG_UV_SCALPLANE;
+ fifo_reg_1.cfg.cusdis = AVI_FIFO_CFG_ENABLE_CUS;
+ fifo_reg_1.cfg.cdsdis = AVI_FIFO_CFG_ENABLE_CDS;
+
+ if (avi_pixfmt_get_packing(in->pix_fmt) ==
+ AVI_SEMIPLANAR_YUV_420_PACKING) {
+ fifo_reg_1.framesize.width = in->width - 1;
+ fifo_reg_1.framesize.height = (in->height / 2) - 1;
+
+ }
+
+ avi_fifo_set_registers(fifo_plane0, &fifo_reg_0);
+ avi_fifo_set_registers(fifo_plane1, &fifo_reg_1);
+ }
+ else
+ avi_fifo_set_registers(fifo_plane0, &fifo_reg_0);
+}
+EXPORT_SYMBOL(avi_dma_setup_input);
+
+void avi_dma_setup_output(const struct avi_node *fifo,
+ const struct avi_segment_format *out)
+{
+ struct avi_fifo_registers fifo_reg;
+ struct avi_dma_pixfmt pixfmt = out->pix_fmt;
+
+ /* Clear overall configuration. */
+ memset(&fifo_reg, 0, sizeof(struct avi_fifo_registers));
+
+ /* Setup interrupts. */
+ fifo_reg.cfg.itline = 0x0F;
+ fifo_reg.cfg.itsource = 1;
+
+ /* Output DMA to memory. */
+ fifo_reg.cfg.dsttype = AVI_FIFO_CFG_DMA_TYPE;
+
+ /* Output data depicted by format. */
+ fifo_reg.cfg.dstorg = pixfmt.semiplanar;
+ fifo_reg.cfg.dstformat = pixfmt.subsampling;
+ fifo_reg.cfg.dstbitenc = pixfmt.bitenc;
+
+ /*
+ * Source is always a video link in packed 4:4:4
+ * 32bits format.
+ */
+ fifo_reg.cfg.srctype = AVI_FIFO_CFG_VL_TYPE;
+ fifo_reg.cfg.srcorg = AVI_FIFO_CFG_PACKED_ORG;
+ fifo_reg.cfg.srcformat = AVI_FIFO_CFG_444_FMT,
+ fifo_reg.cfg.srcbitenc = AVI_FIFO_CFG_8888_BENC;
+
+ fifo_reg.cfg.dithering = AVI_FIFO_CFG_TRUNC_RND;
+
+ fifo_reg.cfg.cusdis = AVI_FIFO_CFG_DISABLE_CUS;
+ fifo_reg.cfg.cdsdis = AVI_FIFO_CFG_DISABLE_CDS;
+
+ fifo_reg.cfg.reorg = pixfmt.reorg;
+
+ if (pixfmt.semiplanar) {
+ /* if the format is semiplanar, swap0 and swap1 in pixfmt
+ * contain the format for both planes */
+ fifo_reg.swap0._register = avi_dma_pixfmt_swap0(pixfmt);
+ fifo_reg.swap1._register = avi_dma_pixfmt_swap1(pixfmt);
+ } else {
+ /* otherwise swap0 and swap1 in pixfmt contain the dma in and
+ * dma out swap values */
+ fifo_reg.swap0._register = avi_dma_pixfmt_swap1(pixfmt);
+ fifo_reg.swap1._register = 0;
+ }
+
+ /* Use some know-to-work values for the timeout settings */
+ fifo_reg.timeout.vblank = 0;
+ fifo_reg.timeout.hblank = 0;
+ /* TODO: We should probably set the pincr as well, but for that we need
+ * the framerate */
+ fifo_reg.timeout.pincr = 1;
+
+ fifo_reg.timeout.issuing_capability = 0;
+ fifo_reg.timeout.burst = 0;
+
+ /* We send the whole frame at once for now */
+ fifo_reg.dmafxycfg.dmafxnb = 1;
+ fifo_reg.dmafxycfg.dmafynb = 1;
+ fifo_reg.dmafxycfg.dmafxymode = 0;
+ fifo_reg.dmafxstr0 = 0;
+ fifo_reg.dmafxstr1 = 0;
+ fifo_reg.dmafystr0 = 0;
+ fifo_reg.dmafystr1 = 0;
+
+ if (out->interlaced)
+ fifo_reg.framesize.height = out->height / 2 - 1;
+ else
+ fifo_reg.framesize.height = out->height - 1;
+
+ if (unlikely(out->pix_fmt.id == AVI_BAYER_3X10_32_FMTID))
+ /* In this mode we put 3 10-bit pixels in one 32bit word.
+ * TODO: this calculation is bogus, it does not work for
+ * cropping.
+ */
+ fifo_reg.framesize.width = roundup(out->width, 3) - 1;
+ else
+ fifo_reg.framesize.width = out->width - 1;
+
+ /* TODO : interlaced management */
+
+ avi_fifo_set_registers(fifo, &fifo_reg);
+}
+EXPORT_SYMBOL(avi_dma_setup_output);
+
+void avi_dma_setup_inner_buffer(const struct avi_node *fifo,
+ const struct avi_segment_format *out)
+{
+ struct avi_fifo_registers fifo_reg;
+
+ /* Clear overall configuration. */
+ memset(&fifo_reg, 0, sizeof(struct avi_fifo_registers));
+
+ fifo_reg.cfg.dstbitenc = AVI_FIFO_CFG_8888_BENC;
+ fifo_reg.cfg.srcbitenc = AVI_FIFO_CFG_8888_BENC;
+
+ fifo_reg.framesize.height = out->height - 1;
+ fifo_reg.framesize.width = out->width - 1;
+
+ avi_fifo_set_registers(fifo, &fifo_reg);
+}
+EXPORT_SYMBOL(avi_dma_setup_inner_buffer);
+
+/**
+ * Computation of line stride
+ *
+ * Line stride is depending on several parameters:
+ * - whole frame line size (see avi_segment.planeX.line_size) in bytes
+ * - stripped frame width size: width * pixel_size
+ * - progressive or interlaced mode
+ * - left address
+ * - right address
+ *
+ * For all revision of P7, first part of computation is the same:
+ * line_stride = line_size - width_size
+ *
+ * Second part of computation only for P7R3.
+ * P7R3 is able to transfer data even if left and right addresses are not
+ * 64 bits aligned, but its internal address pointers work only
+ * on 64 bits addresses. So, line stride must be tuned to take account
+ * of these "overflows".
+ * During transfer, for each line, effective addresses start before
+ * the left address and end after the right address.
+ * The left padding is the modulo 8 of left address, (8 bytes -> 64 bits)
+ * the right padding is the complement of module 8 of right address.
+ * Module 8 of an address can have the values:
+ * - 0, 4 for 32 bits pixel format
+ * - 0, 2, 4, 6 for 16 bits pixel format
+ * - 0-7 for 8 bits pixel format
+ * For 24 bits pixel format, this is a special case: modulo 8 is replaced
+ * by a "modulo 8-24": effective addresses are 64 bits aligned (8 bytes)
+ * and 24 bits aligned (3 bytes): common multiple 24.
+ * To compute "modulo 8-24", first modulo 8 is computed as usual (result in 0-7)
+ * second it is converted to module 24.
+ * This "modulo 8-24" is used in replacement of modulo 8
+ * for left and right paddings
+ */
+
+static const int avi_dma_24bit_align[] =
+{
+ [0] = 0,
+ [1] = 9,
+ [2] = 18,
+ [3] = 3,
+ [4] = 12,
+ [5] = 21,
+ [6] = 6,
+ [7] = 15
+};
+
+static inline int avi_dma_alignment(unsigned bytes_per_pixel)
+{
+ /* The alignment constraint is 8 bytes for all modes except the 24bit
+ * ones. There we have to have a multiple of 24bytes to reach the
+ * correct 64bit alignment over a full pixel. */
+ return (bytes_per_pixel == 3) ? 24 : 8;
+}
+
+static void avi_dma_setup_memory_addresses(const struct avi_node *fifo,
+ const struct avi_segment_format *fmt,
+ const struct avi_dma_buffer *buffer)
+{
+ struct avi_dma_pixfmt pixfmt;
+ int width_bytes;
+ int pad_left = 0;
+ int pad_right = 0;
+ int tmp;
+ int align;
+ int left;
+ int right;
+ unsigned line_size0;
+ unsigned line_size1;
+ u32 dmasa0;
+ u32 dmasa1;
+ u32 dmalstr0;
+ u32 dmalstr1;
+
+ pixfmt = fmt->pix_fmt;
+
+ line_size0 = fmt->plane0.line_size;
+ line_size1 = fmt->plane1.line_size;
+
+ dmasa0 = buffer->plane0.dma_addr;
+ dmasa1 = buffer->plane1.dma_addr;
+
+ width_bytes = fmt->width * avi_pixel_size0(pixfmt);
+
+ dmalstr0 = line_size0 - width_bytes;
+
+#ifdef AVI_BACKWARD_COMPAT
+ if (avi_get_revision() >= AVI_REVISION_3) {
+#endif /* AVI_BACKWARD_COMPAT */
+
+ /* The alignment constraint is 8 bytes for all modes except the
+ * 24bit ones. There we have to have a multiple of 24bytes to
+ * reach the correct 64bit alignment over a full pixel. */
+ align = avi_dma_alignment(avi_pixel_size0(pixfmt));
+
+ pad_left = 0;
+ left = dmasa0 % 8;
+ if (left != 0) {
+ if (align == 24)
+ left = avi_dma_24bit_align[left];
+ pad_left = left;
+ }
+
+ pad_right = 0;
+ right = left + width_bytes;
+ tmp = right % 8;
+ if (tmp != 0) {
+ if (align == 24)
+ tmp = avi_dma_24bit_align[tmp];
+ pad_right = align - tmp;
+ }
+ dmalstr0 -= pad_left;
+ dmalstr0 -= pad_right;
+#ifdef AVI_BACKWARD_COMPAT
+ }
+#endif /* AVI_BACKWARD_COMPAT */
+
+ dmalstr1 = 0;
+ if (avi_pixfmt_is_planar(fmt->pix_fmt)) {
+ width_bytes = fmt->width * avi_pixel_size1(pixfmt);
+ dmalstr1 = line_size1 - width_bytes;
+#ifdef AVI_BACKWARD_COMPAT
+ if (avi_get_revision() >= AVI_REVISION_3) {
+#endif /* AVI_BACKWARD_COMPAT */
+ align = avi_dma_alignment(avi_pixel_size1(pixfmt));
+ pad_left = 0;
+ left = dmasa1 % 8;
+ if (left != 0)
+ pad_left = left;
+
+ pad_right = 0;
+ right = left + width_bytes;
+ tmp = right % 8;
+ if (tmp != 0) {
+ pad_right = align - tmp;
+ }
+ dmalstr1 -= pad_left;
+ dmalstr1 -= pad_right;
+#ifdef AVI_BACKWARD_COMPAT
+ }
+#endif /* AVI_BACKWARD_COMPAT */
+ }
+
+ avi_fifo_set_dmasa(fifo, dmasa0, dmasa1);
+ avi_fifo_set_dmalstr(fifo, dmalstr0, dmalstr1);
+}
+
+void avi_dma_set_input_buffer(const struct avi_node *fifo_plane0,
+ const struct avi_node *fifo_plane1,
+ const struct avi_segment_format *in,
+ const struct avi_dma_buffer *buff)
+{
+ avi_dma_setup_memory_addresses(fifo_plane0, in, buff);
+
+ if (avi_pixfmt_is_planar(in->pix_fmt) && fifo_plane1)
+ avi_dma_setup_memory_addresses(fifo_plane1, in, buff);
+}
+EXPORT_SYMBOL(avi_dma_set_input_buffer);
+
+void avi_dma_set_output_buffer(const struct avi_node *fifo,
+ const struct avi_segment_format *out,
+ const struct avi_dma_buffer *buff)
+{
+ avi_dma_setup_memory_addresses(fifo, out, buff);
+}
+EXPORT_SYMBOL(avi_dma_set_output_buffer);
+
+void avi_dma_setup_line_size(struct avi_segment_format *fmt,
+ const unsigned frame_width,
+ const unsigned int requested_lnsize0)
+{
+ unsigned pixsz0, pixsz1, align, rem;
+
+ BUG_ON(!fmt);
+ BUG_ON(fmt->pix_fmt.id == AVI_INVALID_FMTID);
+ BUG_ON(!frame_width);
+
+ fmt->plane0.line_size = 0;
+ fmt->plane1.line_size = 0;
+
+ pixsz0 = avi_pixel_size0(fmt->pix_fmt);
+ pixsz1 = avi_pixel_size1(fmt->pix_fmt);
+
+ fmt->plane0.line_size = max(frame_width * pixsz0, requested_lnsize0);
+
+ align = avi_dma_alignment(pixsz0);
+
+ rem = fmt->plane0.line_size % 8;
+ if (rem != 0) {
+ if (align == 24 )
+ rem = avi_dma_24bit_align[rem];
+
+ fmt->plane0.line_size += align - rem;
+ }
+
+ /* Plane 1 */
+ if (pixsz1 > 0) {
+ unsigned int requested_lnsize1;
+
+ /* derive requested_lnsize1 from requested_lnsize0 using
+ * format subsampling */
+ requested_lnsize1 = (requested_lnsize0 * pixsz1) / pixsz0;
+ fmt->plane1.line_size = max(frame_width * pixsz1, requested_lnsize1);
+ align = avi_dma_alignment(pixsz1);
+
+ rem = fmt->plane1.line_size % align;
+
+ if (rem != 0) {
+ if (align == 24 )
+ rem = avi_dma_24bit_align[rem];
+
+ fmt->plane1.line_size += align - rem;
+ }
+ }
+}
+EXPORT_SYMBOL(avi_dma_setup_line_size);
+
+void avi_dma_setup_plane_size(struct avi_segment_format *fmt,
+ const unsigned frame_height,
+ unsigned *plane0_size,
+ unsigned *plane1_size)
+{
+ BUG_ON(!fmt);
+
+ *plane0_size = frame_height * fmt->plane0.line_size;
+
+ switch (avi_pixfmt_get_packing(fmt->pix_fmt)) {
+ case AVI_INTERLEAVED_444_PACKING:
+ case AVI_INTERLEAVED_YUV_422_PACKING:
+ *plane1_size = 0;
+ break;
+ case AVI_SEMIPLANAR_YUV_422_PACKING:
+ *plane1_size = frame_height * fmt->plane1.line_size;
+ break;
+ case AVI_SEMIPLANAR_YUV_420_PACKING:
+ *plane1_size = frame_height * fmt->plane1.line_size / 2;
+ break;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(avi_dma_setup_plane_size);
+
+MODULE_AUTHOR("Didier Leymarie <didier.leymarie.ext@parrot.com>");
+MODULE_DESCRIPTION("Parrot Advanced Video Interface DMA layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi_dma.h b/drivers/parrot/video/avi_dma.h
new file mode 100644
index 00000000000000..3a192473af86b9
--- /dev/null
+++ b/drivers/parrot/video/avi_dma.h
@@ -0,0 +1,182 @@
+#ifndef _AVI_DMA_H_
+#define _AVI_DMA_H_
+
+/*
+ * linux/drivers/parrot/video/avi_dma.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Didier Leymarie <didier.leymarie.ext@parrot.com>
+ * @date 15-Jan-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "avi_compat.h"
+
+/* This struct has been packed to fit in 32bits. This way we can store it and
+ * pass it by value everywhere instead of using an indirection through an
+ * array. */
+struct avi_dma_pixfmt {
+ /* Unique format ID (the rest of the config might not be unique, for
+ * instance bayer 3x10 and rgb565 share the same configuration)*/
+ unsigned id : 6;
+ /* Bit encoding (565, 888, 8888 etc...) */
+ unsigned bitenc : 4;
+ /* Chroma subsampling: 4:4:4/4:2:2/4:2:0 */
+ unsigned subsampling : 2;
+ /* Component reorganisation for 16 and 24bit formats. */
+ unsigned reorg : 2;
+ /* set if the format is semiplanar */
+ unsigned semiplanar : 1;
+ /* RAW bayer 'space' */
+ unsigned raw : 1;
+ /* Width of the pixels in both planes (in bytes - 1) */
+ unsigned bytes_per_pixel0 : 2;
+ unsigned bytes_per_pixel1 : 2;
+ /* swap value. The value of the last byte is implied as
+ * b0 ^ b1 ^ b2.
+ *
+ * If the format is planar, swap0 is plane0 and swap1 is
+ * plane1.
+ *
+ * Otherwise swap0 is the dma_in swap value and swap0
+ * the dma out. That should take care of all weird
+ * cases. */
+ unsigned swap0_b0 : 2;
+ unsigned swap0_b1 : 2;
+ unsigned swap0_b2 : 2;
+ unsigned swap1_b0 : 2;
+ unsigned swap1_b1 : 2;
+ unsigned swap1_b2 : 2;
+};
+
+/* Dummy function to validate the struct is correctly packed */
+static inline void avi_dma_check_pixfmt_packing__dummy(void)
+{
+ BUILD_BUG_ON(sizeof(struct avi_dma_pixfmt) != sizeof(u32));
+}
+
+enum avi_dma_buffer_status
+{
+ AVI_BUFFER_INVAL = 0,
+ AVI_BUFFER_READY,
+ AVI_BUFFER_DONE,
+ AVI_BUFFER_ERROR,
+ AVI_BUFFER_TIMEOUT,
+};
+
+/* AVI Buffer struct */
+struct avi_dma_buffer
+{
+ struct {
+ dma_addr_t dma_addr;
+ } plane0, plane1;
+
+ enum avi_dma_buffer_status status;
+
+ /* Private client data */
+ void *priv;
+};
+
+/* Forward declare avi_segment_format to prevent circular includes */
+struct avi_segment_format;
+
+/* Setup FIFO registers for DMA input according to avi_segment_format
+ * @fifo_plane0: avi_node for FIFO on Plane 0
+ * @fifo_plane1: avi_node for FIFO on Plane 1, Planar configuration, optional
+ * @in: input avi_segment_format
+ * Note: FIFOs registers StartAddress dmasa0, dmasa1
+ * and LineStride dmalstr0,dmalstr1 are not set by this function
+ */
+extern
+void avi_dma_setup_input(const struct avi_node *fifo_plane0,
+ const struct avi_node *fifo_plane1,
+ const struct avi_segment_format *in);
+
+/* Setup FIFO registers for DMA output according to avi_segment_format
+ * @fifo: avi_node for FIFO
+ * @out: output avi_segment_format
+ * Note: FIFOs registers StartAddress dmasa0, dmasa1
+ * and LineStride dmalstr0,dmalstr1 are not set by this function
+ */
+extern
+void avi_dma_setup_output(const struct avi_node *fifo,
+ const struct avi_segment_format *out);
+
+/* Setup FIFO registers for DMA input
+ * according to avi_segment_format and avi_dma_buffer
+ * @fifo_plane0: avi_node for FIFO on Plane 0
+ * @fifo_plane1: avi_node for FIFO on Plane 1, Planar configuration, optional
+ * @in: input avi_segment_format
+ * @buff: input avi_dma_buffer
+ * This function is the counterpart of avi_dma_setup_input()
+ * which must be called before.
+ * This function set FIFOs registers StartAddress dmasa0, dmasa1
+ * and LineStride dmalstr0,dmalstr1 in compliance of AVI constraints
+ */
+extern
+void avi_dma_set_input_buffer(const struct avi_node *fifo_plane0,
+ const struct avi_node *fifo_plane1,
+ const struct avi_segment_format *in,
+ const struct avi_dma_buffer *buff);
+
+/* Setup FIFO registers for DMA output
+ * according to avi_segment_format and avi_dma_buffer
+ * @fifo: avi_node for FIFO
+ * @out: output avi_segment_format
+ * @buff: output avi_dma_buffer
+ * This function is the counterpart of avi_dma_setup_output()
+ * which must be called before.
+ * This function set FIFOs registers StartAddress dmasa0, dmasa1
+ * and LineStride dmalstr0,dmalstr1 in compliance of AVI constraints
+ */
+extern
+void avi_dma_set_output_buffer(const struct avi_node *fifo,
+ const struct avi_segment_format *out,
+ const struct avi_dma_buffer *buff);
+
+/* Setup line_size fields in avi_segment_format
+ * according to avi_segment_format and AVI constraints
+ * @fmt: avi_segment_format
+ * @frame_width: width in pixels of whole frame
+ * Following fmt fields must be set before calling:
+ * pix_fmt: pixel format
+ * This function set fmt fields:
+ * plane0.line_size
+ * plane1.line_size
+ */
+extern
+void avi_dma_setup_line_size(struct avi_segment_format *fmt,
+ const unsigned frame_width,
+ const unsigned int requested_lnsize0);
+
+/* Compute plane sizes for a given avi_segment_format
+ * according AVI constraints
+ * @fmt (input): avi_segment_format
+ * @frame_height (input): height in rows of whole frame
+ * @plane0_size (output): pointer to returned size of plane 0
+ * @plane1_size (output): pointer to returned size of plane 1
+ * Following fmt fields must be set before calling (use avi_dma_setup_line_size)
+ * pix_fmt: pixel format
+ * plane0.line_size
+ * plane1.line_size
+ */
+extern
+void avi_dma_setup_plane_size(struct avi_segment_format *fmt,
+ const unsigned frame_height,
+ unsigned *plane0_size,
+ unsigned *plane1_size);
+#endif /* _AVI_DMA_H_ */
diff --git a/drivers/parrot/video/avi_isp.c b/drivers/parrot/video/avi_isp.c
new file mode 100644
index 00000000000000..4dad1cacfdebeb
--- /dev/null
+++ b/drivers/parrot/video/avi_isp.c
@@ -0,0 +1,111 @@
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include "avi_segment.h"
+#include "avi_isp.h"
+
+/* Bypass all ISP modules */
+void avi_isp_chain_bayer_bypass(struct avi_node *node)
+{
+ struct avi_isp_chain_bayer_inter_regs inter_regs = {
+ .module_bypass = {
+ .pedestal_bypass = 1,
+ .grim_bypass = 1,
+ .rip_bypass = 1,
+ .denoise_bypass = 1,
+ .lsc_bypass = 1,
+ .chroma_aber_bypass = 1,
+ .bayer_bypass = 1,
+ .color_matrix_bypass = 1,
+ },
+ };
+
+ avi_isp_chain_bayer_inter_set_registers(node, &inter_regs);
+}
+EXPORT_SYMBOL(avi_isp_chain_bayer_bypass);
+
+void avi_isp_chain_yuv_bypass(struct avi_node *node)
+{
+ struct avi_isp_chain_yuv_inter_regs inter_regs = {
+ .module_bypass = {
+ .ee_crf_bypass = 1,
+ .i3d_lut_bypass = 1,
+ .drop_bypass = 1,
+ },
+ };
+
+ avi_isp_chain_yuv_inter_set_registers(node, &inter_regs);
+}
+EXPORT_SYMBOL(avi_isp_chain_yuv_bypass);
+
+void avi_statistics_bayer_configure(struct avi_node *node,
+ unsigned width,
+ unsigned height,
+ unsigned thumb_width,
+ unsigned thumb_height)
+{
+ struct avi_isp_statistics_bayer_regs stats_cfg;
+
+ avi_isp_statistics_bayer_get_registers(node, &stats_cfg);
+
+ /* Update only window settings */
+ stats_cfg.max_nb_windows.x_window_count = thumb_width;
+ stats_cfg.max_nb_windows.y_window_count = thumb_height;
+
+ /* All the stats config has to be even */
+ stats_cfg.window_x.x_width = (width / thumb_width) & (~1UL);
+ stats_cfg.window_y.y_width = (height / thumb_height) & (~1UL);
+
+ /* Center the grid */
+ stats_cfg.window_x.x_offset = ((width % thumb_width) / 2) & (~1UL);
+ stats_cfg.window_y.y_offset = ((height % thumb_height) / 2) & (~1UL);
+
+ avi_isp_statistics_bayer_set_registers(node, &stats_cfg);
+}
+EXPORT_SYMBOL(avi_statistics_bayer_configure);
+
+/* Get all ISP modules offset */
+int avi_isp_get_offsets(struct avi_segment *s, struct avi_isp_offsets *offsets)
+{
+ struct avi_node *n;
+
+ if (s == NULL)
+ return -ENODEV;
+
+#define GET_BASE(_off, _cap) do { \
+ n = avi_segment_get_node(s, _cap); \
+ if (n == NULL) \
+ return -ENODEV; \
+ _off = n->base_off; \
+ } while(0)
+
+ GET_BASE(offsets->chain_bayer, AVI_CAP_ISP_CHAIN_BAYER);
+ GET_BASE(offsets->gamma_corrector, AVI_CAP_GAM);
+ GET_BASE(offsets->chroma, AVI_CAP_CONV);
+ GET_BASE(offsets->statistics_yuv, AVI_CAP_STATS_YUV);
+ GET_BASE(offsets->chain_yuv, AVI_CAP_ISP_CHAIN_YUV);
+
+#undef GET_BASE
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_isp_get_offsets);
+
+unsigned long avi_isp_bayer_get_stats_cap(struct avi_segment *s)
+{
+ struct avi_node *bayer;
+
+ bayer = avi_segment_get_node(s, AVI_CAP_ISP_CHAIN_BAYER);
+
+ if (bayer == NULL)
+ return 0;
+
+ switch (bayer->node_id) {
+ case AVI_ISP_CHAIN_BAYER0_NODE:
+ return AVI_CAP_STATS_BAYER_0;
+ case AVI_ISP_CHAIN_BAYER1_NODE:
+ return AVI_CAP_STATS_BAYER_1;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(avi_isp_bayer_get_stats_cap);
diff --git a/drivers/parrot/video/avi_isp.h b/drivers/parrot/video/avi_isp.h
new file mode 100644
index 00000000000000..4ab96c1a143ef4
--- /dev/null
+++ b/drivers/parrot/video/avi_isp.h
@@ -0,0 +1,69 @@
+#ifndef _AVI_ISP_H_
+#define _AVI_ISP_H_
+
+/*
+ * linux/drivers/parrot/video/avi_isp.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Eng-Hong Sron <eng-hong.sron@parrot.com>
+ * @date 20-Jan-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file contains the isp configuration code. It can be included from userland.
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct avi_isp_offsets {
+ __u32 chain_bayer;
+ __u32 gamma_corrector;
+ __u32 chroma;
+ __u32 statistics_yuv;
+ __u32 chain_yuv;
+};
+
+/* ISP nodes offset */
+#define AVI_ISP_IOGET_OFFSETS _IOR('F', 0x33, struct avi_isp_offsets)
+
+#ifdef __KERNEL__
+
+struct avi_node;
+struct avi_segment;
+
+extern void avi_isp_chain_bayer_bypass(struct avi_node *);
+extern void avi_isp_chain_yuv_bypass(struct avi_node *);
+
+extern void avi_statistics_bayer_configure(struct avi_node *,
+ unsigned,
+ unsigned,
+ unsigned,
+ unsigned);
+
+extern int avi_isp_get_offsets(struct avi_segment *,
+ struct avi_isp_offsets *);
+
+/* Return the capability of the stat node corresponding to an ISP_CHAIN_BAYER
+ * contained in the segment. Returns 0 if the segment doesn't contain an
+ * ISP_CHAIN_BAYER. */
+extern unsigned long avi_isp_bayer_get_stats_cap(struct avi_segment *);
+
+#endif /* __KERNEL__ */
+
+#endif /* _AVI_ISP_H_ */
diff --git a/drivers/parrot/video/avi_limit.c b/drivers/parrot/video/avi_limit.c
new file mode 100644
index 00000000000000..3eff74f9fe270d
--- /dev/null
+++ b/drivers/parrot/video/avi_limit.c
@@ -0,0 +1,258 @@
+/*
+ * linux/drivers/parrot/video/avi_limit.c
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 24-Feb-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include "avi_limit.h"
+
+#define AVI_LIMIT_MAX 0x10000U
+
+/* Limitations types according to P7 User Manual */
+enum avi_pixfmt_type {
+ AVI_LIMIT_UNKNOWN = 0,
+ AVI_LIMIT_PACKED_444_16BITS,
+ AVI_LIMIT_PACKED_444_24BITS,
+ AVI_LIMIT_PACKED_444_32BITS,
+ AVI_LIMIT_PACKED_422_16BITS,
+ AVI_LIMIT_PLANAR_422_16BITS,
+ AVI_LIMIT_PLANAR_420_16BITS,
+ AVI_LIMIT_NR,
+};
+
+enum avi_limit_type {
+ AVI_LIMIT_WIDTH = 0,
+ AVI_LIMIT_HEIGHT,
+ AVI_LIMIT_WIDTH_STRIP,
+ AVI_LIMIT_HEIGHT_STRIP,
+ AVI_LIMIT_LEFT,
+ AVI_LIMIT_TOP,
+};
+
+/* Frame and strip height limitations are the same on P7R2 and P7R3 */
+static unsigned avi_limit_height[] = {
+ [AVI_LIMIT_UNKNOWN] = 2,
+ [AVI_LIMIT_PACKED_444_16BITS] = 1,
+ [AVI_LIMIT_PACKED_444_24BITS] = 1,
+ [AVI_LIMIT_PACKED_444_32BITS] = 1,
+ [AVI_LIMIT_PACKED_422_16BITS] = 1,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 1,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 2,
+};
+
+/* Frame width limitations are the same on P7R2 and P7R3 */
+static unsigned avi_limit_width[] = {
+ [AVI_LIMIT_UNKNOWN] = 8,
+ [AVI_LIMIT_PACKED_444_16BITS] = 4,
+ [AVI_LIMIT_PACKED_444_24BITS] = 8,
+ [AVI_LIMIT_PACKED_444_32BITS] = 2,
+ [AVI_LIMIT_PACKED_422_16BITS] = 4,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 8,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 8,
+};
+
+/* Strip width */
+static unsigned avi_limit_r2_width_strip[] = {
+ [AVI_LIMIT_UNKNOWN] = 8,
+ [AVI_LIMIT_PACKED_444_16BITS] = 1,
+ [AVI_LIMIT_PACKED_444_24BITS] = 1,
+ [AVI_LIMIT_PACKED_444_32BITS] = 1,
+ [AVI_LIMIT_PACKED_422_16BITS] = 8,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 2,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 8,
+};
+
+static unsigned avi_limit_r3_width_strip[] = {
+ [AVI_LIMIT_UNKNOWN] = 8,
+ [AVI_LIMIT_PACKED_444_16BITS] = 1,
+ [AVI_LIMIT_PACKED_444_24BITS] = 1,
+ [AVI_LIMIT_PACKED_444_32BITS] = 1,
+ [AVI_LIMIT_PACKED_422_16BITS] = 2,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 2,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 2,
+};
+
+/* left constraints (derivates from start adress limitations) */
+static unsigned avi_limit_r2_left_strip[] = {
+ [AVI_LIMIT_UNKNOWN] = 8,
+ [AVI_LIMIT_PACKED_444_16BITS] = 4,
+ [AVI_LIMIT_PACKED_444_24BITS] = 8,
+ [AVI_LIMIT_PACKED_444_32BITS] = 2,
+ [AVI_LIMIT_PACKED_422_16BITS] = 4,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 4,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 4,
+};
+
+static unsigned avi_limit_r3_left_strip[] = {
+ [AVI_LIMIT_UNKNOWN] = 8,
+ [AVI_LIMIT_PACKED_444_16BITS] = 1,
+ [AVI_LIMIT_PACKED_444_24BITS] = 1,
+ [AVI_LIMIT_PACKED_444_32BITS] = 1,
+ [AVI_LIMIT_PACKED_422_16BITS] = 1,
+ [AVI_LIMIT_PLANAR_422_16BITS] = 2,
+ [AVI_LIMIT_PLANAR_420_16BITS] = 2,
+};
+
+static enum avi_pixfmt_type avi_limit_get_fmttype(struct avi_dma_pixfmt pixfmt)
+{
+ if (pixfmt.semiplanar == 0) {
+ if (pixfmt.subsampling == AVI_FIFO_CFG_422_FMT)
+ return AVI_LIMIT_PACKED_422_16BITS;
+ else if (pixfmt.subsampling == AVI_FIFO_CFG_444_FMT)
+ switch (pixfmt.bitenc) {
+ case AVI_FIFO_CFG_4444_BENC:
+ case AVI_FIFO_CFG_565_BENC:
+ case AVI_FIFO_CFG_LSB555_BENC:
+ case AVI_FIFO_CFG_1555_BENC:
+ return AVI_LIMIT_PACKED_444_16BITS;
+ case AVI_FIFO_CFG_888_BENC:
+ return AVI_LIMIT_PACKED_444_24BITS;
+ case AVI_FIFO_CFG_8888_BENC:
+ return AVI_LIMIT_PACKED_444_32BITS;
+ default:
+ return AVI_LIMIT_UNKNOWN;
+ };
+ }
+ else if (pixfmt.semiplanar == 1) {
+ if (pixfmt.subsampling == AVI_FIFO_CFG_420_FMT)
+ return AVI_LIMIT_PLANAR_420_16BITS;
+ else if (pixfmt.subsampling == AVI_FIFO_CFG_422_FMT)
+ return AVI_LIMIT_PLANAR_422_16BITS;
+ }
+ return AVI_LIMIT_UNKNOWN;
+}
+
+static unsigned *avi_limit_get_limits(enum avi_limit_type type)
+{
+ int revision = avi_get_revision();
+
+ switch(type) {
+ case AVI_LIMIT_HEIGHT:
+ case AVI_LIMIT_HEIGHT_STRIP:
+ case AVI_LIMIT_TOP:
+ return avi_limit_height;
+ case AVI_LIMIT_WIDTH:
+ return avi_limit_width;
+ case AVI_LIMIT_WIDTH_STRIP:
+ if (revision == AVI_REVISION_2)
+ return avi_limit_r2_width_strip;
+ else
+ return avi_limit_r3_width_strip;
+ case AVI_LIMIT_LEFT:
+ if (revision == AVI_REVISION_2)
+ return avi_limit_r2_left_strip;
+ else
+ return avi_limit_r3_left_strip;
+ }
+
+ pr_err("%s:unknown type %d with revision %d\n",
+ __func__, type, revision);
+ BUG_ON(1);
+ return NULL;
+}
+
+/* Round down val according to limitations corresponding to pixel format in
+ * the valid range for pixel format. */
+static void avi_limit_adjust(struct avi_dma_pixfmt pixfmt,
+ unsigned *val,
+ enum avi_limit_type type)
+{
+ enum avi_pixfmt_type fmttype = avi_limit_get_fmttype(pixfmt);
+ unsigned *limitations = avi_limit_get_limits(type);
+ unsigned limit = limitations[fmttype];
+
+ *val = min(AVI_LIMIT_MAX, round_down(*val, limit));
+
+ /* We need at least a positive value for width and height */
+ if ((type != AVI_LIMIT_LEFT) && (type != AVI_LIMIT_TOP)
+ && (*val < limit))
+ *val = limit;
+
+ /* P7R2 semiplanar limitation : width have to be at least 112 pixels */
+ if (AVI_REVISION_2 == avi_get_revision() && pixfmt.semiplanar
+ && ((type == AVI_LIMIT_WIDTH) && (type == AVI_LIMIT_WIDTH_STRIP))
+ && (*val < 112))
+ *val = 112;
+}
+
+/*******************************************************************************
+ * Provided helpers
+ ******************************************************************************/
+
+void avi_limit_adjust_height(struct avi_dma_pixfmt pixfmt, unsigned *h)
+{
+ *h = min((unsigned) AVI_LIMIT_MAX_HEIGHT, *h);
+ avi_limit_adjust(pixfmt, h, AVI_LIMIT_HEIGHT);
+}
+EXPORT_SYMBOL(avi_limit_adjust_height);
+
+/* FIXME : Add Semi-planar 420 constraint. As pixels blocs are 2 pixel high, the
+ * AVI FIFO need to contain a complete line. The minimal FIFO depth is 4Ko so we
+ * cant have more than 4096 width */
+void avi_limit_adjust_width(struct avi_dma_pixfmt pixfmt, unsigned *w)
+{
+ /* P7R2 semiplanar limitation : width have to be at least 112 pixels */
+ if ((AVI_REVISION_2 == avi_get_revision())
+ && pixfmt.semiplanar
+ && (*w < 112))
+ *w = 112;
+
+ *w = min((unsigned) AVI_LIMIT_MAX_WIDTH, *w);
+
+ avi_limit_adjust(pixfmt, w, AVI_LIMIT_WIDTH);
+}
+EXPORT_SYMBOL(avi_limit_adjust_width);
+
+/* This function adjust both left and width strip values */
+void avi_limit_adjust_w_strip(struct avi_dma_pixfmt pixfmt,
+ unsigned w,
+ unsigned *strip_w,
+ unsigned *left)
+{
+ /* We cant strip more than image size */
+ *strip_w = min(*strip_w, w);
+ avi_limit_adjust(pixfmt, strip_w, AVI_LIMIT_WIDTH_STRIP);
+
+ /* if strip is out of bounds we reduce left */
+ *left = min(*left, (w - *strip_w));
+ avi_limit_adjust(pixfmt, left, AVI_LIMIT_LEFT);
+}
+EXPORT_SYMBOL(avi_limit_adjust_w_strip);
+
+/* This function adjust both top and height strip values */
+void avi_limit_adjust_h_strip(struct avi_dma_pixfmt pixfmt,
+ unsigned h,
+ unsigned *strip_h,
+ unsigned *top)
+{
+ /* We cant strip more than image size*/
+ *strip_h = min(*strip_h, h);
+ avi_limit_adjust(pixfmt, strip_h, AVI_LIMIT_HEIGHT_STRIP);
+
+ /* if strip is out of bounds we reduce top */
+ *top = min(*top, (h - *strip_h));
+ avi_limit_adjust(pixfmt, top, AVI_LIMIT_TOP);
+}
+EXPORT_SYMBOL(avi_limit_adjust_h_strip);
+
+MODULE_AUTHOR("Victor Lambret <victor.lambret.ext@parrot.com>");
+MODULE_DESCRIPTION("Parrot Advanced Video Interface Limitations layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi_limit.h b/drivers/parrot/video/avi_limit.h
new file mode 100644
index 00000000000000..2983190432ac46
--- /dev/null
+++ b/drivers/parrot/video/avi_limit.h
@@ -0,0 +1,56 @@
+/*
+ * linux/drivers/parrot/video/avi_limit.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * @author Victor Lambret <victor.lambret.ext@parrot.com>
+ * @date 24-Feb-2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _AVI_LIMIT_H_
+#define _AVI_LIMIT_H_
+
+#include "avi_dma.h"
+#include "avi_pixfmt.h"
+
+/* AVI registers are 16bits wide */
+#define AVI_LIMIT_MAX_WIDTH 0xFFFF
+#define AVI_LIMIT_MAX_HEIGHT 0xFFFF
+
+/* Maximum frequency for LCD output and camera input clocks in kHz */
+#define AVI_LIMIT_MAX_PHY_FREQ 165000
+
+/* Those functions are helpers rounding down a value according to P7R2 and P7R3
+ * limitations for both the image frame and the strip used by the FIFOs */
+
+extern void avi_limit_adjust_height(struct avi_dma_pixfmt pixfmt,
+ unsigned *h);
+extern void avi_limit_adjust_width(struct avi_dma_pixfmt pixfmt,
+ unsigned *w);
+/* This function adjust both left and width strip values */
+extern void avi_limit_adjust_w_strip(struct avi_dma_pixfmt pixfmt,
+ unsigned w,
+ unsigned *strip_w,
+ unsigned *left);
+
+/* This function adjust both top and height strip values */
+extern void avi_limit_adjust_h_strip(struct avi_dma_pixfmt pixfmt,
+ unsigned h,
+ unsigned *strip_h,
+ unsigned *top);
+
+#endif /* _AVI_LIMIT_H_ */
diff --git a/drivers/parrot/video/avi_logger.c b/drivers/parrot/video/avi_logger.c
new file mode 100644
index 00000000000000..3c1e58b74a31b2
--- /dev/null
+++ b/drivers/parrot/video/avi_logger.c
@@ -0,0 +1,149 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+#include "avi_logger.h"
+
+/* Must be a power of two */
+#define AVI_LOGGER_BUF_SZ 0x4000
+
+struct avi_register_access {
+ /* I store the direction in reg's lsb */
+ u32 reg;
+ u32 val;
+};
+
+struct avi_logger {
+ unsigned long head;
+ unsigned long nwrap;
+ spinlock_t lock;
+ struct avi_register_access *log;
+};
+
+static struct avi_logger avi_logger = {
+ .head = 0,
+ .nwrap = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(avi_logger.lock),
+ .log = NULL,
+};
+
+int avi_log_init(void)
+{
+ avi_logger.log =
+ vmalloc(AVI_LOGGER_BUF_SZ * sizeof(*avi_logger.log));
+
+ if (avi_logger.log == NULL)
+ return -ENOMEM;
+
+ memset(avi_logger.log,
+ 0,
+ AVI_LOGGER_BUF_SZ * sizeof(*avi_logger.log));
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_log_init);
+
+void avi_log_access(u32 reg, u32 val, enum avi_log_type type)
+{
+ unsigned long flags;
+ struct avi_register_access *a;
+
+ /* The register must be 32bit aligned */
+ BUG_ON(reg & 3);
+ BUG_ON(type > 3);
+
+ /* Cram the type into the register value to save space */
+ reg |= type;
+
+ spin_lock_irqsave(&avi_logger.lock, flags);
+
+ if (avi_logger.log == NULL)
+ /* No log available */
+ goto unlock;
+
+ avi_logger.head = (avi_logger.head + 1) % AVI_LOGGER_BUF_SZ;
+
+ if (avi_logger.head == 0)
+ /* We wrapped around */
+ avi_logger.nwrap++;
+
+ a = &avi_logger.log[avi_logger.head];
+ a->reg = reg;
+ a->val = val;
+
+ unlock:
+ spin_unlock_irqrestore(&avi_logger.lock, flags);
+}
+EXPORT_SYMBOL(avi_log_access);
+
+static void avi_log_format(struct seq_file *s,
+ struct avi_register_access *log,
+ unsigned long head,
+ unsigned long index)
+{
+ struct avi_register_access *a;
+ enum avi_log_type t;
+ unsigned long i;
+
+ for (i = (head + 1) % AVI_LOGGER_BUF_SZ;
+ i != head;
+ i = (i + 1) % AVI_LOGGER_BUF_SZ) {
+ a = &log[i];
+
+ t = a->reg & 3;
+ a->reg &= ~3UL;
+
+ if (t != AVI_LOG_NONE)
+ seq_printf(s, "[%8lx] %c 0x%08x @ 0x%08x\n",
+ index,
+ (t == AVI_LOG_READ) ? 'R' : 'W',
+ a->val,
+ a->reg);
+
+ index++;
+ }
+}
+
+int avi_log_display(struct seq_file *s)
+{
+ struct avi_register_access *log;
+ unsigned long head;
+ unsigned long index;
+ unsigned long flags;
+ int ret = 0;
+
+ /* We make a copy of the log before formating it to limit the time we
+ * spend holding the lock */
+ log = vmalloc(AVI_LOGGER_BUF_SZ * sizeof(*avi_logger.log));
+ if (log == NULL)
+ return -ENOMEM;
+
+ memset(log, 0, AVI_LOGGER_BUF_SZ * sizeof(*log));
+
+ spin_lock_irqsave(&avi_logger.lock, flags);
+
+ if (avi_logger.log == NULL) {
+ /* No log available */
+ ret = -ENODEV;
+ goto copy_done;
+ }
+
+ memcpy(log, avi_logger.log, AVI_LOGGER_BUF_SZ * sizeof(*log));
+ head = avi_logger.head;
+ index = avi_logger.nwrap * AVI_LOGGER_BUF_SZ + avi_logger.head - 1;
+
+ copy_done:
+ spin_unlock_irqrestore(&avi_logger.lock, flags);
+
+ if (ret == 0)
+ avi_log_format(s, log, head, index);
+
+ vfree(log);
+ return ret;
+}
+EXPORT_SYMBOL(avi_log_display);
diff --git a/drivers/parrot/video/avi_logger.h b/drivers/parrot/video/avi_logger.h
new file mode 100644
index 00000000000000..753200c462fe01
--- /dev/null
+++ b/drivers/parrot/video/avi_logger.h
@@ -0,0 +1,36 @@
+#ifndef _AVI_LOGGER_H_
+#define _AVI_LOGGER_H_
+
+#include <linux/errno.h>
+
+enum avi_log_type {
+ AVI_LOG_NONE = 0,
+ AVI_LOG_READ = 1,
+ AVI_LOG_WRITE = 2,
+};
+
+#ifdef CONFIG_AVI_LOGGER
+
+extern int avi_log_init(void);
+extern void avi_log_access(u32 reg, u32 val, enum avi_log_type type);
+extern int avi_log_display(struct seq_file *s);
+
+#else /* CONFIG_AVI_LOGGER */
+
+static inline int avi_log_init(void)
+{
+ return -ENODEV;
+}
+
+static inline void avi_log_access(u32 reg, u32 val, enum avi_log_type type)
+{
+ return;
+}
+
+static inline int avi_log_display(struct seq_file *s)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AVI_LOGGER */
+
+#endif /* _AVI_LOGGER_H_ */
diff --git a/drivers/parrot/video/avi_pixfmt.c b/drivers/parrot/video/avi_pixfmt.c
new file mode 100644
index 00000000000000..d0f0f9c0a1d140
--- /dev/null
+++ b/drivers/parrot/video/avi_pixfmt.c
@@ -0,0 +1,30 @@
+#include <linux/module.h>
+
+#include "avi_pixfmt.h"
+
+#define AVI_PIXEL_FORMAT_AS_LUT(_x) \
+ [AVI_PIXEL_FORMAT_AS_ENUM(_x)] = AVI_PIXFMT_ ## _x
+static const struct avi_dma_pixfmt avi_pixfmt_lut[AVI_PIXFMT_NR] = {
+ AVI_PIXEL_FORMATS(AVI_PIXEL_FORMAT_AS_LUT),
+};
+
+const struct avi_dma_pixfmt avi_pixfmt_by_id(enum avi_dma_pixfmt_ids id)
+{
+ if (id >= AVI_PIXFMT_NR)
+ id = AVI_INVALID_FMTID;
+
+ return avi_pixfmt_lut[id];
+}
+EXPORT_SYMBOL(avi_pixfmt_by_id);
+
+int avi_pixfmt_have_alpha(struct avi_dma_pixfmt pixfmt)
+{
+ enum avi_dma_pixfmt_ids id = pixfmt.id;
+ return ((id == AVI_RGBA8888_FMTID) ||
+ (id == AVI_BGRA8888_FMTID) ||
+ (id == AVI_ARGB8888_FMTID) ||
+ (id == AVI_ABGR8888_FMTID) ||
+ (id == AVI_RGBA5551_FMTID) ||
+ (id == AVI_AYUV_FMTID) );
+}
+EXPORT_SYMBOL(avi_pixfmt_have_alpha);
diff --git a/drivers/parrot/video/avi_pixfmt.h b/drivers/parrot/video/avi_pixfmt.h
new file mode 100644
index 00000000000000..cc126aff3e93a0
--- /dev/null
+++ b/drivers/parrot/video/avi_pixfmt.h
@@ -0,0 +1,597 @@
+#ifndef _AVI_PIXFMT_H_
+#define _AVI_PIXFMT_H_
+
+#include "avi_dma.h"
+#include "reg_avi.h"
+
+#define AVI_PIXEL_FORMATS(EXPANDER) \
+ EXPANDER(INVALID), \
+ EXPANDER(RGBA8888), \
+ EXPANDER(BGRA8888), \
+ EXPANDER(ARGB8888), \
+ EXPANDER(ABGR8888), \
+ EXPANDER(RGB888), \
+ EXPANDER(BGR888), \
+ EXPANDER(RGB565), \
+ EXPANDER(BGR565), \
+ EXPANDER(RGBA5551), \
+ EXPANDER(AYUV), \
+ EXPANDER(YUYV), \
+ EXPANDER(YYUV), \
+ EXPANDER(YVYU), \
+ EXPANDER(UYVY), \
+ EXPANDER(VYUY), \
+ EXPANDER(NV16), \
+ EXPANDER(NV61), \
+ EXPANDER(NV12), \
+ EXPANDER(NV21), \
+ EXPANDER(GREY), \
+ EXPANDER(Y10), \
+ EXPANDER(BAYER_8), \
+ EXPANDER(BAYER_1X10_16), \
+ EXPANDER(BAYER_1X10_24), \
+ EXPANDER(BAYER_1X10_32), \
+ EXPANDER(BAYER_3X10_32), \
+ EXPANDER(BAYER_1X12_16)
+
+/* Do not use these tags directly, use the corresponding avi_dma_pixfmt declared
+ * below instead */
+#define AVI_PIXEL_FORMAT_AS_ENUM(_x) AVI_ ## _x ## _FMTID
+enum avi_dma_pixfmt_ids {
+ AVI_PIXEL_FORMATS(AVI_PIXEL_FORMAT_AS_ENUM),
+ AVI_PIXFMT_NR
+};
+
+static inline unsigned avi_pixel_size0(struct avi_dma_pixfmt pixfmt)
+{
+ return pixfmt.bytes_per_pixel0 + 1;
+}
+
+static inline unsigned avi_pixel_size1(struct avi_dma_pixfmt pixfmt)
+{
+ /* Certain format pretend they're planar but don't actually
+ * use the chroma plane. */
+ if (pixfmt.semiplanar) {
+ if (pixfmt.raw || pixfmt.id == AVI_GREY_FMTID)
+ return 0;
+ else
+ return pixfmt.bytes_per_pixel1 + 1;
+ }
+
+ return 0;
+}
+
+enum avi_pixel_packing {
+ AVI_INTERLEAVED_444_PACKING,
+ AVI_INTERLEAVED_YUV_422_PACKING,
+ AVI_SEMIPLANAR_YUV_420_PACKING,
+ AVI_SEMIPLANAR_YUV_422_PACKING,
+};
+
+static inline int avi_pixfmt_is_planar(struct avi_dma_pixfmt pixfmt)
+{
+ return !!avi_pixel_size1(pixfmt);
+}
+
+static inline enum avi_pixel_packing
+avi_pixfmt_get_packing(struct avi_dma_pixfmt pixfmt)
+{
+ if (!avi_pixfmt_is_planar(pixfmt))
+ return (pixfmt.subsampling == AVI_FIFO_CFG_444_FMT)?
+ AVI_INTERLEAVED_444_PACKING:
+ AVI_INTERLEAVED_YUV_422_PACKING;
+
+ switch (pixfmt.subsampling) {
+ case AVI_FIFO_CFG_422_FMT:
+ return AVI_SEMIPLANAR_YUV_422_PACKING;
+ case AVI_FIFO_CFG_420_FMT:
+ return AVI_SEMIPLANAR_YUV_420_PACKING;
+ case AVI_FIFO_CFG_444_FMT:
+ /* Doesn't make sense to have 4:4:4 planar */
+ default:
+ BUG();
+ break;
+ }
+}
+
+extern int avi_pixfmt_have_alpha(struct avi_dma_pixfmt pixfmt);
+
+#define AVI_PIXFMT_INVALID ((struct avi_dma_pixfmt) { \
+ .id = AVI_INVALID_FMTID, \
+})
+
+#define AVI_PIXFMT_RGBA8888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_RGBA8888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 0, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 0, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BGRA8888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BGRA8888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_ARGB8888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_ARGB8888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 0, \
+ .swap0_b1 = 1, \
+ .swap0_b2 = 2, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 1, \
+ .swap1_b2 = 2, \
+})
+
+#define AVI_PIXFMT_ABGR8888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_ABGR8888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 0, \
+ .swap0_b1 = 3, \
+ .swap0_b2 = 2, \
+ \
+ .swap1_b0 = 2, \
+ .swap1_b1 = 1, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_RGB888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_RGB888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_VUY_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 3 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BGR888 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BGR888_FMTID, \
+ .bitenc = AVI_FIFO_CFG_888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 3 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_RGB565 ((struct avi_dma_pixfmt) { \
+ .id = AVI_RGB565_FMTID, \
+ .bitenc = AVI_FIFO_CFG_565_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BGR565 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BGR565_FMTID, \
+ .bitenc = AVI_FIFO_CFG_565_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_VUY_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_RGBA5551 ((struct avi_dma_pixfmt) { \
+ .id = AVI_RGBA5551_FMTID, \
+ .bitenc = AVI_FIFO_CFG_1555_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_AYUV ((struct avi_dma_pixfmt) { \
+ .id = AVI_AYUV_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 0, \
+ .swap0_b1 = 1, \
+ .swap0_b2 = 2, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 1, \
+ .swap1_b2 = 2, \
+})
+
+#define AVI_PIXFMT_YUYV ((struct avi_dma_pixfmt) { \
+ .id = AVI_YUYV_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 2, \
+ .swap0_b1 = 1, \
+ .swap0_b2 = 0, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 3, \
+ .swap1_b2 = 2, \
+})
+
+#define AVI_PIXFMT_YYUV ((struct avi_dma_pixfmt) { \
+ .id = AVI_YYUV_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 1, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 0, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 3, \
+})
+
+#define AVI_PIXFMT_YVYU ((struct avi_dma_pixfmt) { \
+ .id = AVI_YVYU_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 2, \
+ .swap0_b1 = 3, \
+ .swap0_b2 = 0, \
+ \
+ .swap1_b0 = 2, \
+ .swap1_b1 = 3, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_UYVY ((struct avi_dma_pixfmt) { \
+ .id = AVI_UYVY_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 0, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 0, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_VYUY ((struct avi_dma_pixfmt) { \
+ .id = AVI_VYUY_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_NV16 ((struct avi_dma_pixfmt) { \
+ .id = AVI_NV16_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 1 - 1, \
+ .bytes_per_pixel1 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 2, \
+ .swap1_b1 = 3, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_NV61 ((struct avi_dma_pixfmt) { \
+ .id = AVI_NV61_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 1 - 1, \
+ .bytes_per_pixel1 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_NV12 ((struct avi_dma_pixfmt) { \
+ .id = AVI_NV12_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_420_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 1 - 1, \
+ .bytes_per_pixel1 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 2, \
+ .swap1_b1 = 3, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_NV21 ((struct avi_dma_pixfmt) { \
+ .id = AVI_NV21_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_420_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 1 - 1, \
+ .bytes_per_pixel1 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_GREY ((struct avi_dma_pixfmt) { \
+ .id = AVI_GREY_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 0, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_Y10 ((struct avi_dma_pixfmt) { \
+ .id = AVI_Y10_FMTID, \
+ .bitenc = AVI_FIFO_CFG_565_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 0, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+
+
+#define AVI_PIXFMT_BAYER_8 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_8_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 1, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 1 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 0, \
+ .swap1_b2 = 0, \
+})
+
+#define AVI_PIXFMT_BAYER_1X10_16 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_1X10_16_FMTID, \
+ .bitenc = AVI_FIFO_CFG_565_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BAYER_1X10_24 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_1X10_24_FMTID, \
+ .bitenc = AVI_FIFO_CFG_888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 3 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BAYER_1X10_32 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_1X10_32_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BAYER_3X10_32 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_3X10_32_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_444_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 4 - 1, \
+ \
+ .swap0_b0 = 3, \
+ .swap0_b1 = 2, \
+ .swap0_b2 = 1, \
+ \
+ .swap1_b0 = 3, \
+ .swap1_b1 = 2, \
+ .swap1_b2 = 1, \
+})
+
+#define AVI_PIXFMT_BAYER_1X12_16 ((struct avi_dma_pixfmt) { \
+ .id = AVI_BAYER_1X12_16_FMTID, \
+ .bitenc = AVI_FIFO_CFG_8888_BENC, \
+ .subsampling = AVI_FIFO_CFG_422_FMT, \
+ .reorg = AVI_FIFO_CFG_YUV_REORG, \
+ .semiplanar = 0, \
+ .raw = 1, \
+ .bytes_per_pixel0 = 2 - 1, \
+ \
+ .swap0_b0 = 2, \
+ .swap0_b1 = 1, \
+ .swap0_b2 = 0, \
+ \
+ .swap1_b0 = 0, \
+ .swap1_b1 = 3, \
+ .swap1_b2 = 2, \
+})
+
+extern const struct avi_dma_pixfmt avi_pixfmt_by_id(enum avi_dma_pixfmt_ids id);
+
+#endif /* _AVI_PIXFMT_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_cfg.h b/drivers/parrot/video/avi_regmap/avi_cfg.h
new file mode 100644
index 00000000000000..cfdb5ec973a68f
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_cfg.h
@@ -0,0 +1,664 @@
+/*********************************************************************
+ * avi_cfg register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-01-08
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_CFG_H_
+#define _AVI_CFG_H_
+
+#define AVI_CFG_ENABLE1 0x00
+#define AVI_CFG_ENABLE2 0x04
+#define AVI_CFG_APPLY1 0x08
+#define AVI_CFG_APPLY2 0x0c
+#define AVI_CFG_LOCK1 0x10
+#define AVI_CFG_LOCK2 0x14
+#define AVI_CFG_ITFLG1 0x20
+#define AVI_CFG_ITFLG2 0x24
+#define AVI_CFG_ITEN1 0x28
+#define AVI_CFG_ITEN2 0x2c
+#define AVI_CFG_ITACK1 0x30
+#define AVI_CFG_ITACK2 0x34
+#define AVI_CFG_DMACFG 0x40
+#define AVI_CFG_ISP_ENABLE3 0x44
+#define AVI_CFG_ISP_APPLY3 0x48
+#define AVI_CFG_ISP_LOCK3 0x4c
+#define AVI_CFG_ISP_ITFLG3 0x50
+#define AVI_CFG_ISP_ITEN3 0x54
+#define AVI_CFG_ISP_ITACK3 0x58
+
+union avi_cfg_enable1
+{
+ struct
+ {
+ uint32_t cfg_enable : 1;
+ uint32_t inter_enable : 1;
+ uint32_t fifo00_enable : 1;
+ uint32_t fifo01_enable : 1;
+ uint32_t fifo02_enable : 1;
+ uint32_t fifo03_enable : 1;
+ uint32_t fifo04_enable : 1;
+ uint32_t fifo05_enable : 1;
+ uint32_t fifo06_enable : 1;
+ uint32_t fifo07_enable : 1;
+ uint32_t fifo08_enable : 1;
+ uint32_t fifo09_enable : 1;
+ uint32_t fifo10_enable : 1;
+ uint32_t fifo11_enable : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_enable2
+{
+ struct
+ {
+ uint32_t conv0_enable : 1;
+ uint32_t conv1_enable : 1;
+ uint32_t conv2_enable : 1;
+ uint32_t conv3_enable : 1;
+ uint32_t blend0_enable : 1;
+ uint32_t blend1_enable : 1;
+ uint32_t lcd0_enable : 1;
+ uint32_t lcd1_enable : 1;
+ uint32_t cam0_enable : 1;
+ uint32_t cam1_enable : 1;
+ uint32_t cam2_enable : 1;
+ uint32_t cam3_enable : 1;
+ uint32_t cam4_enable : 1;
+ uint32_t cam5_enable : 1;
+ uint32_t scal0_enable : 1;
+ uint32_t rot0_enable : 1;
+ uint32_t scal1_enable : 1;
+ uint32_t rot1_enable : 1;
+ uint32_t gam0_enable : 1;
+ uint32_t gam1_enable : 1;
+ uint32_t fork0_enable : 1;
+ uint32_t fork1_enable : 1;
+ uint32_t fork2_enable : 1;
+ uint32_t fork3_enable : 1;
+ uint32_t sat0_enable : 1;
+ uint32_t sat1_enable : 1;
+ uint32_t stats_yuv0_enable : 1;
+ uint32_t stats_yuv1_enable : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_apply1
+{
+ struct
+ {
+ uint32_t cfg_apply : 1;
+ uint32_t inter_apply : 1;
+ uint32_t fifo00_apply : 1;
+ uint32_t fifo01_apply : 1;
+ uint32_t fifo02_apply : 1;
+ uint32_t fifo03_apply : 1;
+ uint32_t fifo04_apply : 1;
+ uint32_t fifo05_apply : 1;
+ uint32_t fifo06_apply : 1;
+ uint32_t fifo07_apply : 1;
+ uint32_t fifo08_apply : 1;
+ uint32_t fifo09_apply : 1;
+ uint32_t fifo10_apply : 1;
+ uint32_t fifo11_apply : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_apply2
+{
+ struct
+ {
+ uint32_t conv0_apply : 1;
+ uint32_t conv1_apply : 1;
+ uint32_t conv2_apply : 1;
+ uint32_t conv3_apply : 1;
+ uint32_t blend0_apply : 1;
+ uint32_t blend1_apply : 1;
+ uint32_t lcd0_apply : 1;
+ uint32_t lcd1_apply : 1;
+ uint32_t cam0_apply : 1;
+ uint32_t cam1_apply : 1;
+ uint32_t cam2_apply : 1;
+ uint32_t cam3_apply : 1;
+ uint32_t cam4_apply : 1;
+ uint32_t cam5_apply : 1;
+ uint32_t scal0_apply : 1;
+ uint32_t rot0_apply : 1;
+ uint32_t scal1_apply : 1;
+ uint32_t rot1_apply : 1;
+ uint32_t gam0_apply : 1;
+ uint32_t gam1_apply : 1;
+ uint32_t fork0_apply : 1;
+ uint32_t fork1_apply : 1;
+ uint32_t fork2_apply : 1;
+ uint32_t fork3_apply : 1;
+ uint32_t sat0_apply : 1;
+ uint32_t sat1_apply : 1;
+ uint32_t stats_yuv0_apply : 1;
+ uint32_t stats_yuv1_apply : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_lock1
+{
+ struct
+ {
+ uint32_t cfg_lock : 1;
+ uint32_t inter_lock : 1;
+ uint32_t fifo00_lock : 1;
+ uint32_t fifo01_lock : 1;
+ uint32_t fifo02_lock : 1;
+ uint32_t fifo03_lock : 1;
+ uint32_t fifo04_lock : 1;
+ uint32_t fifo05_lock : 1;
+ uint32_t fifo06_lock : 1;
+ uint32_t fifo07_lock : 1;
+ uint32_t fifo08_lock : 1;
+ uint32_t fifo09_lock : 1;
+ uint32_t fifo10_lock : 1;
+ uint32_t fifo11_lock : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_lock2
+{
+ struct
+ {
+ uint32_t conv0_lock : 1;
+ uint32_t conv1_lock : 1;
+ uint32_t conv2_lock : 1;
+ uint32_t conv3_lock : 1;
+ uint32_t blend0_lock : 1;
+ uint32_t blend1_lock : 1;
+ uint32_t lcd0_lock : 1;
+ uint32_t lcd1_lock : 1;
+ uint32_t cam0_lock : 1;
+ uint32_t cam1_lock : 1;
+ uint32_t cam2_lock : 1;
+ uint32_t cam3_lock : 1;
+ uint32_t cam4_lock : 1;
+ uint32_t cam5_lock : 1;
+ uint32_t scal0_lock : 1;
+ uint32_t rot0_lock : 1;
+ uint32_t scal1_lock : 1;
+ uint32_t rot1_lock : 1;
+ uint32_t gam0_lock : 1;
+ uint32_t gam1_lock : 1;
+ uint32_t fork0_lock : 1;
+ uint32_t fork1_lock : 1;
+ uint32_t fork2_lock : 1;
+ uint32_t fork3_lock : 1;
+ uint32_t sat0_lock : 1;
+ uint32_t sat1_lock : 1;
+ uint32_t stats_yuv0_lock : 1;
+ uint32_t stats_yuv1_lock : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_itflg1
+{
+ struct
+ {
+ uint32_t cfg_itflg : 1;
+ uint32_t inter_itflg : 1;
+ uint32_t fifo00_itflg : 1;
+ uint32_t fifo01_itflg : 1;
+ uint32_t fifo02_itflg : 1;
+ uint32_t fifo03_itflg : 1;
+ uint32_t fifo04_itflg : 1;
+ uint32_t fifo05_itflg : 1;
+ uint32_t fifo06_itflg : 1;
+ uint32_t fifo07_itflg : 1;
+ uint32_t fifo08_itflg : 1;
+ uint32_t fifo09_itflg : 1;
+ uint32_t fifo10_itflg : 1;
+ uint32_t fifo11_itflg : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_itflg2
+{
+ struct
+ {
+ uint32_t conv0_itflg : 1;
+ uint32_t conv1_itflg : 1;
+ uint32_t conv2_itflg : 1;
+ uint32_t conv3_itflg : 1;
+ uint32_t blend0_itflg : 1;
+ uint32_t blend1_itflg : 1;
+ uint32_t lcd0_itflg : 1;
+ uint32_t lcd1_itflg : 1;
+ uint32_t cam0_itflg : 1;
+ uint32_t cam1_itflg : 1;
+ uint32_t cam2_itflg : 1;
+ uint32_t cam3_itflg : 1;
+ uint32_t cam4_itflg : 1;
+ uint32_t cam5_itflg : 1;
+ uint32_t scal0_itflg : 1;
+ uint32_t rot0_itflg : 1;
+ uint32_t scal1_itflg : 1;
+ uint32_t rot1_itflg : 1;
+ uint32_t gam0_itflg : 1;
+ uint32_t gam1_itflg : 1;
+ uint32_t fork0_itflg : 1;
+ uint32_t fork1_itflg : 1;
+ uint32_t fork2_itflg : 1;
+ uint32_t fork3_itflg : 1;
+ uint32_t sat0_itflg : 1;
+ uint32_t sat1_itflg : 1;
+ uint32_t stats_yuv0_itflg : 1;
+ uint32_t stats_yuv1_itflg : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_iten1
+{
+ struct
+ {
+ uint32_t cfg_iten : 1;
+ uint32_t inter_iten : 1;
+ uint32_t fifo00_iten : 1;
+ uint32_t fifo01_iten : 1;
+ uint32_t fifo02_iten : 1;
+ uint32_t fifo03_iten : 1;
+ uint32_t fifo04_iten : 1;
+ uint32_t fifo05_iten : 1;
+ uint32_t fifo06_iten : 1;
+ uint32_t fifo07_iten : 1;
+ uint32_t fifo08_iten : 1;
+ uint32_t fifo09_iten : 1;
+ uint32_t fifo10_iten : 1;
+ uint32_t fifo11_iten : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_iten2
+{
+ struct
+ {
+ uint32_t conv0_iten : 1;
+ uint32_t conv1_iten : 1;
+ uint32_t conv2_iten : 1;
+ uint32_t conv3_iten : 1;
+ uint32_t blend0_iten : 1;
+ uint32_t blend1_iten : 1;
+ uint32_t lcd0_iten : 1;
+ uint32_t lcd1_iten : 1;
+ uint32_t cam0_iten : 1;
+ uint32_t cam1_iten : 1;
+ uint32_t cam2_iten : 1;
+ uint32_t cam3_iten : 1;
+ uint32_t cam4_iten : 1;
+ uint32_t cam5_iten : 1;
+ uint32_t scal0_iten : 1;
+ uint32_t rot0_iten : 1;
+ uint32_t scal1_iten : 1;
+ uint32_t rot1_iten : 1;
+ uint32_t gam0_iten : 1;
+ uint32_t gam1_iten : 1;
+ uint32_t fork0_iten : 1;
+ uint32_t fork1_iten : 1;
+ uint32_t fork2_iten : 1;
+ uint32_t fork3_iten : 1;
+ uint32_t sat0_iten : 1;
+ uint32_t sat1_iten : 1;
+ uint32_t stats_yuv0_iten : 1;
+ uint32_t stats_yuv1_iten : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_itack1
+{
+ struct
+ {
+ uint32_t cfg_itack : 1;
+ uint32_t inter_itack : 1;
+ uint32_t fifo00_itack : 1;
+ uint32_t fifo01_itack : 1;
+ uint32_t fifo02_itack : 1;
+ uint32_t fifo03_itack : 1;
+ uint32_t fifo04_itack : 1;
+ uint32_t fifo05_itack : 1;
+ uint32_t fifo06_itack : 1;
+ uint32_t fifo07_itack : 1;
+ uint32_t fifo08_itack : 1;
+ uint32_t fifo09_itack : 1;
+ uint32_t fifo10_itack : 1;
+ uint32_t fifo11_itack : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_itack2
+{
+ struct
+ {
+ uint32_t conv0_itack : 1;
+ uint32_t conv1_itack : 1;
+ uint32_t conv2_itack : 1;
+ uint32_t conv3_itack : 1;
+ uint32_t blend0_itack : 1;
+ uint32_t blend1_itack : 1;
+ uint32_t lcd0_itack : 1;
+ uint32_t lcd1_itack : 1;
+ uint32_t cam0_itack : 1;
+ uint32_t cam1_itack : 1;
+ uint32_t cam2_itack : 1;
+ uint32_t cam3_itack : 1;
+ uint32_t cam4_itack : 1;
+ uint32_t cam5_itack : 1;
+ uint32_t scal0_itack : 1;
+ uint32_t rot0_itack : 1;
+ uint32_t scal1_itack : 1;
+ uint32_t rot1_itack : 1;
+ uint32_t gam0_itack : 1;
+ uint32_t gam1_itack : 1;
+ uint32_t fork0_itack : 1;
+ uint32_t fork1_itack : 1;
+ uint32_t fork2_itack : 1;
+ uint32_t fork3_itack : 1;
+ uint32_t sat0_itack : 1;
+ uint32_t sat1_itack : 1;
+ uint32_t stats_yuv0_itack : 1;
+ uint32_t stats_yuv1_itack : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_dmacfg
+{
+ struct
+ {
+ uint32_t dma_flag_timeout : 16;
+ uint32_t dma_flag_number : 4;
+ uint32_t dma_max_burst_number : 4;
+ uint32_t dma_max_bandwidth : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_enable3
+{
+ struct
+ {
+ uint32_t inter0_enable : 1;
+ uint32_t vlformat_32to40_0_enable : 1;
+ uint32_t pedestal0_enable : 1;
+ uint32_t grim0_enable : 1;
+ uint32_t rip0_enable : 1;
+ uint32_t denoise0_enable : 1;
+ uint32_t stats_bayer0_enable : 1;
+ uint32_t lsc0_enable : 1;
+ uint32_t chroma_aber0_enable : 1;
+ uint32_t bayer0_enable : 1;
+ uint32_t color_matrix0_enable : 1;
+ uint32_t vlformat_40to32_0_enable : 1;
+ uint32_t inter1_enable : 1;
+ uint32_t vlformat_32to40_1_enable : 1;
+ uint32_t pedestal1_enable : 1;
+ uint32_t grim1_enable : 1;
+ uint32_t rip1_enable : 1;
+ uint32_t denoise1_enable : 1;
+ uint32_t stats_bayer1_enable : 1;
+ uint32_t lsc1_enable : 1;
+ uint32_t chroma_aber1_enable : 1;
+ uint32_t bayer1_enable : 1;
+ uint32_t color_matrix1_enable : 1;
+ uint32_t vlformat_40to32_1_enable : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_enable : 1;
+ uint32_t ee_crf0_enable : 1;
+ uint32_t i3d_lut0_enable : 1;
+ uint32_t drop0_enable : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_enable : 1;
+ uint32_t ee_crf1_enable : 1;
+ uint32_t i3d_lut1_enable : 1;
+ uint32_t drop1_enable : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_apply3
+{
+ struct
+ {
+ uint32_t inter0_apply : 1;
+ uint32_t vlformat_32to40_0_apply : 1;
+ uint32_t pedestal0_apply : 1;
+ uint32_t grim0_apply : 1;
+ uint32_t rip0_apply : 1;
+ uint32_t denoise0_apply : 1;
+ uint32_t stats_bayer0_apply : 1;
+ uint32_t lsc0_apply : 1;
+ uint32_t chroma_aber0_apply : 1;
+ uint32_t bayer0_apply : 1;
+ uint32_t color_matrix0_apply : 1;
+ uint32_t vlformat_40to32_0_apply : 1;
+ uint32_t inter1_apply : 1;
+ uint32_t vlformat_32to40_1_apply : 1;
+ uint32_t pedestal1_apply : 1;
+ uint32_t grim1_apply : 1;
+ uint32_t rip1_apply : 1;
+ uint32_t denoise1_apply : 1;
+ uint32_t stats_bayer1_apply : 1;
+ uint32_t lsc1_apply : 1;
+ uint32_t chroma_aber1_apply : 1;
+ uint32_t bayer1_apply : 1;
+ uint32_t color_matrix1_apply : 1;
+ uint32_t vlformat_40to32_1_apply : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_apply : 1;
+ uint32_t ee_crf0_apply : 1;
+ uint32_t i3d_lut0_apply : 1;
+ uint32_t drop0_apply : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_apply : 1;
+ uint32_t ee_crf1_apply : 1;
+ uint32_t i3d_lut1_apply : 1;
+ uint32_t drop1_apply : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_lock3
+{
+ struct
+ {
+ uint32_t inter0_lock : 1;
+ uint32_t vlformat_32to40_0_lock : 1;
+ uint32_t pedestal0_lock : 1;
+ uint32_t grim0_lock : 1;
+ uint32_t rip0_lock : 1;
+ uint32_t denoise0_lock : 1;
+ uint32_t stats_bayer0_lock : 1;
+ uint32_t lsc0_lock : 1;
+ uint32_t chroma_aber0_lock : 1;
+ uint32_t bayer0_lock : 1;
+ uint32_t color_matrix0_lock : 1;
+ uint32_t vlformat_40to32_0_lock : 1;
+ uint32_t inter1_lock : 1;
+ uint32_t vlformat_32to40_1_lock : 1;
+ uint32_t pedestal1_lock : 1;
+ uint32_t grim1_lock : 1;
+ uint32_t rip1_lock : 1;
+ uint32_t denoise1_lock : 1;
+ uint32_t stats_bayer1_lock : 1;
+ uint32_t lsc1_lock : 1;
+ uint32_t chroma_aber1_lock : 1;
+ uint32_t bayer1_lock : 1;
+ uint32_t color_matrix1_lock : 1;
+ uint32_t vlformat_40to32_1_lock : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_lock : 1;
+ uint32_t ee_crf0_lock : 1;
+ uint32_t i3d_lut0_lock : 1;
+ uint32_t drop0_lock : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_lock : 1;
+ uint32_t ee_crf1_lock : 1;
+ uint32_t i3d_lut1_lock : 1;
+ uint32_t drop1_lock : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_itflg3
+{
+ struct
+ {
+ uint32_t inter0_itflg : 1;
+ uint32_t vlformat_32to40_0_itflg : 1;
+ uint32_t pedestal0_itflg : 1;
+ uint32_t grim0_itflg : 1;
+ uint32_t rip0_itflg : 1;
+ uint32_t denoise0_itflg : 1;
+ uint32_t stats_bayer0_itflg : 1;
+ uint32_t lsc0_itflg : 1;
+ uint32_t chroma_aber0_itflg : 1;
+ uint32_t bayer0_itflg : 1;
+ uint32_t color_matrix0_itflg : 1;
+ uint32_t vlformat_40to32_0_itflg : 1;
+ uint32_t inter1_itflg : 1;
+ uint32_t vlformat_32to40_1_itflg : 1;
+ uint32_t pedestal1_itflg : 1;
+ uint32_t grim1_itflg : 1;
+ uint32_t rip1_itflg : 1;
+ uint32_t denoise1_itflg : 1;
+ uint32_t stats_bayer1_itflg : 1;
+ uint32_t lsc1_itflg : 1;
+ uint32_t chroma_aber1_itflg : 1;
+ uint32_t bayer1_itflg : 1;
+ uint32_t color_matrix1_itflg : 1;
+ uint32_t vlformat_40to32_1_itflg : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_itflg : 1;
+ uint32_t ee_crf0_itflg : 1;
+ uint32_t i3d_lut0_itflg : 1;
+ uint32_t drop0_itflg : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_itflg : 1;
+ uint32_t ee_crf1_itflg : 1;
+ uint32_t i3d_lut1_itflg : 1;
+ uint32_t drop1_itflg : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_iten3
+{
+ struct
+ {
+ uint32_t inter0_iten : 1;
+ uint32_t vlformat_32to40_0_iten : 1;
+ uint32_t pedestal0_iten : 1;
+ uint32_t grim0_iten : 1;
+ uint32_t rip0_iten : 1;
+ uint32_t denoise0_iten : 1;
+ uint32_t stats_bayer0_iten : 1;
+ uint32_t lsc0_iten : 1;
+ uint32_t chroma_aber0_iten : 1;
+ uint32_t bayer0_iten : 1;
+ uint32_t color_matrix0_iten : 1;
+ uint32_t vlformat_40to32_0_iten : 1;
+ uint32_t inter1_iten : 1;
+ uint32_t vlformat_32to40_1_iten : 1;
+ uint32_t pedestal1_iten : 1;
+ uint32_t grim1_iten : 1;
+ uint32_t rip1_iten : 1;
+ uint32_t denoise1_iten : 1;
+ uint32_t stats_bayer1_iten : 1;
+ uint32_t lsc1_iten : 1;
+ uint32_t chroma_aber1_iten : 1;
+ uint32_t bayer1_iten : 1;
+ uint32_t color_matrix1_iten : 1;
+ uint32_t vlformat_40to32_1_iten : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_iten : 1;
+ uint32_t ee_crf0_iten : 1;
+ uint32_t i3d_lut0_iten : 1;
+ uint32_t drop0_iten : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_iten : 1;
+ uint32_t ee_crf1_iten : 1;
+ uint32_t i3d_lut1_iten : 1;
+ uint32_t drop1_iten : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_cfg_isp_itack3
+{
+ struct
+ {
+ uint32_t inter0_itack : 1;
+ uint32_t vlformat_32to40_0_itack : 1;
+ uint32_t pedestal0_itack : 1;
+ uint32_t grim0_itack : 1;
+ uint32_t rip0_itack : 1;
+ uint32_t denoise0_itack : 1;
+ uint32_t stats_bayer0_itack : 1;
+ uint32_t lsc0_itack : 1;
+ uint32_t chroma_aber0_itack : 1;
+ uint32_t bayer0_itack : 1;
+ uint32_t color_matrix0_itack : 1;
+ uint32_t vlformat_40to32_0_itack : 1;
+ uint32_t inter1_itack : 1;
+ uint32_t vlformat_32to40_1_itack : 1;
+ uint32_t pedestal1_itack : 1;
+ uint32_t grim1_itack : 1;
+ uint32_t rip1_itack : 1;
+ uint32_t denoise1_itack : 1;
+ uint32_t stats_bayer1_itack : 1;
+ uint32_t lsc1_itack : 1;
+ uint32_t chroma_aber1_itack : 1;
+ uint32_t bayer1_itack : 1;
+ uint32_t color_matrix1_itack : 1;
+ uint32_t vlformat_40to32_1_itack : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter0_itack : 1;
+ uint32_t ee_crf0_itack : 1;
+ uint32_t i3d_lut0_itack : 1;
+ uint32_t drop0_itack : 1;
+ uint32_t ee_crf_i3d_lut_drop_inter1_itack : 1;
+ uint32_t ee_crf1_itack : 1;
+ uint32_t i3d_lut1_itack : 1;
+ uint32_t drop1_itack : 1;
+ };
+ uint32_t _register;
+};
+
+struct avi_cfg_regs
+{
+ union avi_cfg_enable1 enable1; /* 0x000 */
+ union avi_cfg_enable2 enable2; /* 0x004 */
+ union avi_cfg_apply1 apply1; /* 0x008 */
+ union avi_cfg_apply2 apply2; /* 0x00c */
+ union avi_cfg_lock1 lock1; /* 0x010 */
+ union avi_cfg_lock2 lock2; /* 0x014 */
+ unsigned /*unused*/ : 32; /* 0x018 */
+ unsigned /*unused*/ : 32; /* 0x01c */
+ union avi_cfg_itflg1 itflg1; /* 0x020 */
+ union avi_cfg_itflg2 itflg2; /* 0x024 */
+ union avi_cfg_iten1 iten1; /* 0x028 */
+ union avi_cfg_iten2 iten2; /* 0x02c */
+ union avi_cfg_itack1 itack1; /* 0x030 */
+ union avi_cfg_itack2 itack2; /* 0x034 */
+ unsigned /*unused*/ : 32; /* 0x038 */
+ unsigned /*unused*/ : 32; /* 0x03c */
+ union avi_cfg_dmacfg dmacfg; /* 0x040 */
+ union avi_cfg_isp_enable3 isp_enable3; /* 0x044 */
+ union avi_cfg_isp_apply3 isp_apply3; /* 0x048 */
+ union avi_cfg_isp_lock3 isp_lock3; /* 0x04c */
+ union avi_cfg_isp_itflg3 isp_itflg3; /* 0x050 */
+ union avi_cfg_isp_iten3 isp_iten3; /* 0x054 */
+ union avi_cfg_isp_itack3 isp_itack3; /* 0x058 */
+};
+
+#endif /* _AVI_CFG_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_bayer.h b/drivers/parrot/video/avi_regmap/avi_isp_bayer.h
new file mode 100644
index 00000000000000..8f64581307bccb
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_bayer.h
@@ -0,0 +1,55 @@
+/*********************************************************************
+ * avi_isp_bayer register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-01-17
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_BAYER_H_
+#define _AVI_ISP_BAYER_H_
+
+#define AVI_ISP_BAYER_CFA 0x00
+#define AVI_ISP_BAYER_THRESHOLD_1 0x04
+#define AVI_ISP_BAYER_THRESHOLD_2 0x08
+
+union avi_isp_bayer_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_bayer_threshold_1
+{
+ struct
+ {
+ uint32_t threshold_1 : 13;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_bayer_threshold_2
+{
+ struct
+ {
+ uint32_t threshold_2 : 13;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_bayer_regs
+{
+ union avi_isp_bayer_cfa cfa; /* 0x000 */
+ union avi_isp_bayer_threshold_1 threshold_1; /* 0x004 */
+ union avi_isp_bayer_threshold_2 threshold_2; /* 0x008 */
+};
+
+#endif /* _AVI_ISP_BAYER_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_chain_bayer_inter.h b/drivers/parrot/video/avi_regmap/avi_isp_chain_bayer_inter.h
new file mode 100644
index 00000000000000..58827eef837162
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_chain_bayer_inter.h
@@ -0,0 +1,40 @@
+/*********************************************************************
+ * avi_isp_chain_bayer_inter register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-01-27
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_CHAIN_BAYER_INTER_H_
+#define _AVI_ISP_CHAIN_BAYER_INTER_H_
+
+#define AVI_ISP_CHAIN_BAYER_INTER_MODULE_BYPASS 0x00
+
+union avi_isp_chain_bayer_inter_module_bypass
+{
+ struct
+ {
+ uint32_t pedestal_bypass : 1;
+ uint32_t grim_bypass : 1;
+ uint32_t rip_bypass : 1;
+ uint32_t denoise_bypass : 1;
+ uint32_t lsc_bypass : 1;
+ uint32_t chroma_aber_bypass : 1;
+ uint32_t bayer_bypass : 1;
+ uint32_t color_matrix_bypass : 1;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_chain_bayer_inter_regs
+{
+ union avi_isp_chain_bayer_inter_module_bypass module_bypass; /* 0x000 */
+};
+
+#endif /* _AVI_ISP_CHAIN_BAYER_INTER_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_chain_yuv_inter.h b/drivers/parrot/video/avi_regmap/avi_isp_chain_yuv_inter.h
new file mode 100644
index 00000000000000..28a6190b193d06
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_chain_yuv_inter.h
@@ -0,0 +1,35 @@
+/*********************************************************************
+ * avi_isp_chain_yuv_inter register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-21
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_CHAIN_YUV_INTER_H_
+#define _AVI_ISP_CHAIN_YUV_INTER_H_
+
+#define AVI_ISP_CHAIN_YUV_INTER_MODULE_BYPASS 0x00
+
+union avi_isp_chain_yuv_inter_module_bypass
+{
+ struct
+ {
+ uint32_t ee_crf_bypass : 1;
+ uint32_t i3d_lut_bypass : 1;
+ uint32_t drop_bypass : 1;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_chain_yuv_inter_regs
+{
+ union avi_isp_chain_yuv_inter_module_bypass module_bypass; /* 0x000 */
+};
+
+#endif /* _AVI_ISP_CHAIN_YUV_INTER_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_chroma.h b/drivers/parrot/video/avi_regmap/avi_isp_chroma.h
new file mode 100644
index 00000000000000..35d0cb3889eca7
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_chroma.h
@@ -0,0 +1,163 @@
+/*********************************************************************
+ * avi_isp_chroma register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2013-09-05
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_CHROMA_H_
+#define _AVI_ISP_CHROMA_H_
+
+#define AVI_ISP_CHROMA_COEFF_01_00 0x00
+#define AVI_ISP_CHROMA_COEFF_10_02 0x04
+#define AVI_ISP_CHROMA_COEFF_12_11 0x08
+#define AVI_ISP_CHROMA_COEFF_21_20 0x0c
+#define AVI_ISP_CHROMA_COEFF_22 0x10
+#define AVI_ISP_CHROMA_OFFSET_RY 0x14
+#define AVI_ISP_CHROMA_CLIP_RY 0x18
+#define AVI_ISP_CHROMA_OFFSET_GU 0x1c
+#define AVI_ISP_CHROMA_CLIP_GU 0x20
+#define AVI_ISP_CHROMA_OFFSET_BV 0x24
+#define AVI_ISP_CHROMA_CLIP_BV 0x28
+
+union avi_isp_chroma_coeff_01_00
+{
+ struct
+ {
+ uint32_t coeff_00 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_01 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_coeff_10_02
+{
+ struct
+ {
+ uint32_t coeff_02 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_10 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_coeff_12_11
+{
+ struct
+ {
+ uint32_t coeff_11 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_12 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_coeff_21_20
+{
+ struct
+ {
+ uint32_t coeff_20 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_21 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_coeff_22
+{
+ struct
+ {
+ uint32_t coeff_22 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_offset_ry
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_clip_ry
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_offset_gu
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_clip_gu
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_offset_bv
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chroma_clip_bv
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_chroma_regs
+{
+ union avi_isp_chroma_coeff_01_00 coeff_01_00; /* 0x000 */
+ union avi_isp_chroma_coeff_10_02 coeff_10_02; /* 0x004 */
+ union avi_isp_chroma_coeff_12_11 coeff_12_11; /* 0x008 */
+ union avi_isp_chroma_coeff_21_20 coeff_21_20; /* 0x00c */
+ union avi_isp_chroma_coeff_22 coeff_22; /* 0x010 */
+ union avi_isp_chroma_offset_ry offset_ry; /* 0x014 */
+ union avi_isp_chroma_clip_ry clip_ry; /* 0x018 */
+ union avi_isp_chroma_offset_gu offset_gu; /* 0x01c */
+ union avi_isp_chroma_clip_gu clip_gu; /* 0x020 */
+ union avi_isp_chroma_offset_bv offset_bv; /* 0x024 */
+ union avi_isp_chroma_clip_bv clip_bv; /* 0x028 */
+};
+
+#endif /* _AVI_ISP_CHROMA_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_chromatic_aberration.h b/drivers/parrot/video/avi_regmap/avi_isp_chromatic_aberration.h
new file mode 100644
index 00000000000000..9efb78c3632b34
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_chromatic_aberration.h
@@ -0,0 +1,162 @@
+/*********************************************************************
+ * avi_isp_chromatic_aberration register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_CHROMATIC_ABERRATION_H_
+#define _AVI_ISP_CHROMATIC_ABERRATION_H_
+
+#define AVI_ISP_CHROMATIC_ABERRATION_RADIUS_SQUARED 0x00
+#define AVI_ISP_CHROMATIC_ABERRATION_DISPLACEMENT_COEFFS 0x04
+#define AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_X_CENTER 0xa0
+#define AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_X_SQUARED 0xa4
+#define AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_Y_CENTER 0xa8
+#define AVI_ISP_CHROMATIC_ABERRATION_CIRCLE_POS_Y_SQUARED 0xac
+#define AVI_ISP_CHROMATIC_ABERRATION_CFA 0xb0
+#define AVI_ISP_CHROMATIC_ABERRATION_GREEN_VARIATION 0xb4
+#define AVI_ISP_CHROMATIC_ABERRATION_INCREMENTS_LOG2 0xb8
+
+union avi_isp_chromatic_aberration_radius_squared
+{
+ struct
+ {
+ uint32_t radius_squared : 24;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_displacement_coeffs
+{
+ struct
+ {
+ uint32_t displacement_blue : 16;
+ uint32_t displacement_red : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_circle_pos_x_center
+{
+ struct
+ {
+ uint32_t x_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_circle_pos_x_squared
+{
+ struct
+ {
+ uint32_t x_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_circle_pos_y_center
+{
+ struct
+ {
+ uint32_t y_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_circle_pos_y_squared
+{
+ struct
+ {
+ uint32_t y_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_green_variation
+{
+ struct
+ {
+ uint32_t green_var : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_chromatic_aberration_increments_log2
+{
+ struct
+ {
+ uint32_t x_log2_inc : 3;
+ unsigned /*unused */ : 13;
+ uint32_t y_log2_inc : 3;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_chromatic_aberration_regs
+{
+ union avi_isp_chromatic_aberration_radius_squared radius_squared; /* 0x000 */
+ union avi_isp_chromatic_aberration_displacement_coeffs displacement_coeffs; /* 0x004 */
+ unsigned /*unused*/ : 32; /* 0x008 */
+ unsigned /*unused*/ : 32; /* 0x00c */
+ unsigned /*unused*/ : 32; /* 0x010 */
+ unsigned /*unused*/ : 32; /* 0x014 */
+ unsigned /*unused*/ : 32; /* 0x018 */
+ unsigned /*unused*/ : 32; /* 0x01c */
+ unsigned /*unused*/ : 32; /* 0x020 */
+ unsigned /*unused*/ : 32; /* 0x024 */
+ unsigned /*unused*/ : 32; /* 0x028 */
+ unsigned /*unused*/ : 32; /* 0x02c */
+ unsigned /*unused*/ : 32; /* 0x030 */
+ unsigned /*unused*/ : 32; /* 0x034 */
+ unsigned /*unused*/ : 32; /* 0x038 */
+ unsigned /*unused*/ : 32; /* 0x03c */
+ unsigned /*unused*/ : 32; /* 0x040 */
+ unsigned /*unused*/ : 32; /* 0x044 */
+ unsigned /*unused*/ : 32; /* 0x048 */
+ unsigned /*unused*/ : 32; /* 0x04c */
+ unsigned /*unused*/ : 32; /* 0x050 */
+ unsigned /*unused*/ : 32; /* 0x054 */
+ unsigned /*unused*/ : 32; /* 0x058 */
+ unsigned /*unused*/ : 32; /* 0x05c */
+ unsigned /*unused*/ : 32; /* 0x060 */
+ unsigned /*unused*/ : 32; /* 0x064 */
+ unsigned /*unused*/ : 32; /* 0x068 */
+ unsigned /*unused*/ : 32; /* 0x06c */
+ unsigned /*unused*/ : 32; /* 0x070 */
+ unsigned /*unused*/ : 32; /* 0x074 */
+ unsigned /*unused*/ : 32; /* 0x078 */
+ unsigned /*unused*/ : 32; /* 0x07c */
+ unsigned /*unused*/ : 32; /* 0x080 */
+ unsigned /*unused*/ : 32; /* 0x084 */
+ unsigned /*unused*/ : 32; /* 0x088 */
+ unsigned /*unused*/ : 32; /* 0x08c */
+ unsigned /*unused*/ : 32; /* 0x090 */
+ unsigned /*unused*/ : 32; /* 0x094 */
+ unsigned /*unused*/ : 32; /* 0x098 */
+ unsigned /*unused*/ : 32; /* 0x09c */
+ union avi_isp_chromatic_aberration_circle_pos_x_center circle_pos_x_center; /* 0x0a0 */
+ union avi_isp_chromatic_aberration_circle_pos_x_squared circle_pos_x_squared; /* 0x0a4 */
+ union avi_isp_chromatic_aberration_circle_pos_y_center circle_pos_y_center; /* 0x0a8 */
+ union avi_isp_chromatic_aberration_circle_pos_y_squared circle_pos_y_squared; /* 0x0ac */
+ union avi_isp_chromatic_aberration_cfa cfa; /* 0x0b0 */
+ union avi_isp_chromatic_aberration_green_variation green_variation; /* 0x0b4 */
+ union avi_isp_chromatic_aberration_increments_log2 increments_log2; /* 0x0b8 */
+};
+
+#endif /* _AVI_ISP_CHROMATIC_ABERRATION_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_color_correction.h b/drivers/parrot/video/avi_regmap/avi_isp_color_correction.h
new file mode 100644
index 00000000000000..adfb30425fbe87
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_color_correction.h
@@ -0,0 +1,163 @@
+/*********************************************************************
+ * avi_isp_color_correction register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_COLOR_CORRECTION_H_
+#define _AVI_ISP_COLOR_CORRECTION_H_
+
+#define AVI_ISP_COLOR_CORRECTION_COEFF_01_00 0x00
+#define AVI_ISP_COLOR_CORRECTION_COEFF_10_02 0x04
+#define AVI_ISP_COLOR_CORRECTION_COEFF_12_11 0x08
+#define AVI_ISP_COLOR_CORRECTION_COEFF_21_20 0x0c
+#define AVI_ISP_COLOR_CORRECTION_COEFF_22 0x10
+#define AVI_ISP_COLOR_CORRECTION_OFFSET_RY 0x14
+#define AVI_ISP_COLOR_CORRECTION_CLIP_RY 0x18
+#define AVI_ISP_COLOR_CORRECTION_OFFSET_GU 0x1c
+#define AVI_ISP_COLOR_CORRECTION_CLIP_GU 0x20
+#define AVI_ISP_COLOR_CORRECTION_OFFSET_BV 0x24
+#define AVI_ISP_COLOR_CORRECTION_CLIP_BV 0x28
+
+union avi_isp_color_correction_coeff_01_00
+{
+ struct
+ {
+ uint32_t coeff_00 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_01 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_coeff_10_02
+{
+ struct
+ {
+ uint32_t coeff_02 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_10 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_coeff_12_11
+{
+ struct
+ {
+ uint32_t coeff_11 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_12 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_coeff_21_20
+{
+ struct
+ {
+ uint32_t coeff_20 : 14;
+ unsigned /*unused */ : 2;
+ uint32_t coeff_21 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_coeff_22
+{
+ struct
+ {
+ uint32_t coeff_22 : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_offset_ry
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_clip_ry
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_offset_gu
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_clip_gu
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_offset_bv
+{
+ struct
+ {
+ uint32_t offset_in : 10;
+ unsigned /*unused */ : 6;
+ uint32_t offset_out : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_color_correction_clip_bv
+{
+ struct
+ {
+ uint32_t clip_min : 10;
+ unsigned /*unused */ : 6;
+ uint32_t clip_max : 10;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_color_correction_regs
+{
+ union avi_isp_color_correction_coeff_01_00 coeff_01_00; /* 0x000 */
+ union avi_isp_color_correction_coeff_10_02 coeff_10_02; /* 0x004 */
+ union avi_isp_color_correction_coeff_12_11 coeff_12_11; /* 0x008 */
+ union avi_isp_color_correction_coeff_21_20 coeff_21_20; /* 0x00c */
+ union avi_isp_color_correction_coeff_22 coeff_22; /* 0x010 */
+ union avi_isp_color_correction_offset_ry offset_ry; /* 0x014 */
+ union avi_isp_color_correction_clip_ry clip_ry; /* 0x018 */
+ union avi_isp_color_correction_offset_gu offset_gu; /* 0x01c */
+ union avi_isp_color_correction_clip_gu clip_gu; /* 0x020 */
+ union avi_isp_color_correction_offset_bv offset_bv; /* 0x024 */
+ union avi_isp_color_correction_clip_bv clip_bv; /* 0x028 */
+};
+
+#endif /* _AVI_ISP_COLOR_CORRECTION_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_dead_pixel_correction.h b/drivers/parrot/video/avi_regmap/avi_isp_dead_pixel_correction.h
new file mode 100644
index 00000000000000..930cc2cbe49cff
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_dead_pixel_correction.h
@@ -0,0 +1,100 @@
+/*********************************************************************
+ * avi_isp_dead_pixel_correction register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_DEAD_PIXEL_CORRECTION_H_
+#define _AVI_ISP_DEAD_PIXEL_CORRECTION_H_
+
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_LIST_MEM 0x00
+
+union avi_isp_dead_pixel_correction_list_mem
+{
+ struct
+ {
+ uint32_t op_code : 3;
+ uint32_t coord_x : 11;
+ uint32_t coord_y : 12;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_dead_pixel_correction_list_mem_regs
+{
+ union avi_isp_dead_pixel_correction_list_mem list_mem[256];
+};
+
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_CFA 0x400
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_BYPASS 0x404
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_THRESHOLD 0x408
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_CONF 0x40c
+#define AVI_ISP_DEAD_PIXEL_CORRECTION_RGRIM_GAIN 0x410
+
+union avi_isp_dead_pixel_correction_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_dead_pixel_correction_bypass
+{
+ struct
+ {
+ uint32_t list : 1;
+ uint32_t auto_detection : 1;
+ uint32_t rgrim : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_dead_pixel_correction_threshold
+{
+ struct
+ {
+ uint32_t threshold : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_dead_pixel_correction_rgrim_conf
+{
+ struct
+ {
+ uint32_t tim_1 : 8;
+ uint32_t tim_2 : 8;
+ uint32_t tsat : 8;
+ uint32_t tcon : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_dead_pixel_correction_rgrim_gain
+{
+ struct
+ {
+ uint32_t gain : 7;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_dead_pixel_correction_regs
+{
+ union avi_isp_dead_pixel_correction_cfa cfa; /* 0x400 */
+ union avi_isp_dead_pixel_correction_bypass bypass; /* 0x404 */
+ union avi_isp_dead_pixel_correction_threshold threshold; /* 0x408 */
+ union avi_isp_dead_pixel_correction_rgrim_conf rgrim_conf; /* 0x40c */
+ union avi_isp_dead_pixel_correction_rgrim_gain rgrim_gain; /* 0x410 */
+};
+
+#endif /* _AVI_ISP_DEAD_PIXEL_CORRECTION_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_denoising.h b/drivers/parrot/video/avi_regmap/avi_isp_denoising.h
new file mode 100644
index 00000000000000..d0d726bff3551f
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_denoising.h
@@ -0,0 +1,198 @@
+/*********************************************************************
+ * avi_isp_denoising register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_DENOISING_H_
+#define _AVI_ISP_DENOISING_H_
+
+#define AVI_ISP_DENOISING_CFA 0x00
+#define AVI_ISP_DENOISING_LUMOCOEFF_R_03_00 0x10
+#define AVI_ISP_DENOISING_LUMOCOEFF_R_07_04 0x14
+#define AVI_ISP_DENOISING_LUMOCOEFF_R_11_08 0x18
+#define AVI_ISP_DENOISING_LUMOCOEFF_R_13_12 0x1c
+#define AVI_ISP_DENOISING_LUMOCOEFF_B_03_00 0x20
+#define AVI_ISP_DENOISING_LUMOCOEFF_B_07_04 0x24
+#define AVI_ISP_DENOISING_LUMOCOEFF_B_11_08 0x28
+#define AVI_ISP_DENOISING_LUMOCOEFF_B_13_12 0x2c
+#define AVI_ISP_DENOISING_LUMOCOEFF_G_03_00 0x30
+#define AVI_ISP_DENOISING_LUMOCOEFF_G_07_04 0x34
+#define AVI_ISP_DENOISING_LUMOCOEFF_G_11_08 0x38
+#define AVI_ISP_DENOISING_LUMOCOEFF_G_13_12 0x3c
+
+union avi_isp_denoising_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_r_03_00
+{
+ struct
+ {
+ uint32_t lumocoeff_r_00 : 8;
+ uint32_t lumocoeff_r_01 : 8;
+ uint32_t lumocoeff_r_02 : 8;
+ uint32_t lumocoeff_r_03 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_r_07_04
+{
+ struct
+ {
+ uint32_t lumocoeff_r_04 : 8;
+ uint32_t lumocoeff_r_05 : 8;
+ uint32_t lumocoeff_r_06 : 8;
+ uint32_t lumocoeff_r_07 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_r_11_08
+{
+ struct
+ {
+ uint32_t lumocoeff_r_08 : 8;
+ uint32_t lumocoeff_r_09 : 8;
+ uint32_t lumocoeff_r_10 : 8;
+ uint32_t lumocoeff_r_11 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_r_13_12
+{
+ struct
+ {
+ uint32_t lumocoeff_r_12 : 8;
+ uint32_t lumocoeff_r_13 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_b_03_00
+{
+ struct
+ {
+ uint32_t lumocoeff_b_00 : 8;
+ uint32_t lumocoeff_b_01 : 8;
+ uint32_t lumocoeff_b_02 : 8;
+ uint32_t lumocoeff_b_03 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_b_07_04
+{
+ struct
+ {
+ uint32_t lumocoeff_b_04 : 8;
+ uint32_t lumocoeff_b_05 : 8;
+ uint32_t lumocoeff_b_06 : 8;
+ uint32_t lumocoeff_b_07 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_b_11_08
+{
+ struct
+ {
+ uint32_t lumocoeff_b_08 : 8;
+ uint32_t lumocoeff_b_09 : 8;
+ uint32_t lumocoeff_b_10 : 8;
+ uint32_t lumocoeff_b_11 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_b_13_12
+{
+ struct
+ {
+ uint32_t lumocoeff_b_12 : 8;
+ uint32_t lumocoeff_b_13 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_g_03_00
+{
+ struct
+ {
+ uint32_t lumocoeff_g_00 : 8;
+ uint32_t lumocoeff_g_01 : 8;
+ uint32_t lumocoeff_g_02 : 8;
+ uint32_t lumocoeff_g_03 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_g_07_04
+{
+ struct
+ {
+ uint32_t lumocoeff_g_04 : 8;
+ uint32_t lumocoeff_g_05 : 8;
+ uint32_t lumocoeff_g_06 : 8;
+ uint32_t lumocoeff_g_07 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_g_11_08
+{
+ struct
+ {
+ uint32_t lumocoeff_g_08 : 8;
+ uint32_t lumocoeff_g_09 : 8;
+ uint32_t lumocoeff_g_10 : 8;
+ uint32_t lumocoeff_g_11 : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_denoising_lumocoeff_g_13_12
+{
+ struct
+ {
+ uint32_t lumocoeff_g_12 : 8;
+ uint32_t lumocoeff_g_13 : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_denoising_regs
+{
+ union avi_isp_denoising_cfa cfa; /* 0x000 */
+ unsigned /*unused*/ : 32; /* 0x004 */
+ unsigned /*unused*/ : 32; /* 0x008 */
+ unsigned /*unused*/ : 32; /* 0x00c */
+ union avi_isp_denoising_lumocoeff_r_03_00 lumocoeff_r_03_00; /* 0x010 */
+ union avi_isp_denoising_lumocoeff_r_07_04 lumocoeff_r_07_04; /* 0x014 */
+ union avi_isp_denoising_lumocoeff_r_11_08 lumocoeff_r_11_08; /* 0x018 */
+ union avi_isp_denoising_lumocoeff_r_13_12 lumocoeff_r_13_12; /* 0x01c */
+ union avi_isp_denoising_lumocoeff_b_03_00 lumocoeff_b_03_00; /* 0x020 */
+ union avi_isp_denoising_lumocoeff_b_07_04 lumocoeff_b_07_04; /* 0x024 */
+ union avi_isp_denoising_lumocoeff_b_11_08 lumocoeff_b_11_08; /* 0x028 */
+ union avi_isp_denoising_lumocoeff_b_13_12 lumocoeff_b_13_12; /* 0x02c */
+ union avi_isp_denoising_lumocoeff_g_03_00 lumocoeff_g_03_00; /* 0x030 */
+ union avi_isp_denoising_lumocoeff_g_07_04 lumocoeff_g_07_04; /* 0x034 */
+ union avi_isp_denoising_lumocoeff_g_11_08 lumocoeff_g_11_08; /* 0x038 */
+ union avi_isp_denoising_lumocoeff_g_13_12 lumocoeff_g_13_12; /* 0x03c */
+};
+
+#endif /* _AVI_ISP_DENOISING_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_drop.h b/drivers/parrot/video/avi_regmap/avi_isp_drop.h
new file mode 100644
index 00000000000000..8116411bf8cf76
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_drop.h
@@ -0,0 +1,44 @@
+/*********************************************************************
+ * avi_isp_drop register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_DROP_H_
+#define _AVI_ISP_DROP_H_
+
+#define AVI_ISP_DROP_OFFSET_X 0x00
+#define AVI_ISP_DROP_OFFSET_Y 0x04
+
+union avi_isp_drop_offset_x
+{
+ struct
+ {
+ uint32_t offset_x : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_drop_offset_y
+{
+ struct
+ {
+ uint32_t offset_y : 16;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_drop_regs
+{
+ union avi_isp_drop_offset_x offset_x; /* 0x000 */
+ union avi_isp_drop_offset_y offset_y; /* 0x004 */
+};
+
+#endif /* _AVI_ISP_DROP_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_edge_enhancement_color_reduction_filter.h b/drivers/parrot/video/avi_regmap/avi_isp_edge_enhancement_color_reduction_filter.h
new file mode 100644
index 00000000000000..b81e7ec251eff7
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_edge_enhancement_color_reduction_filter.h
@@ -0,0 +1,93 @@
+/*********************************************************************
+ * avi_isp_edge_enhancement_color_reduction_filter register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_H_
+#define _AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_H_
+
+#define AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_EE_LUT 0x00
+
+union avi_isp_edge_enhancement_color_reduction_filter_ee_lut
+{
+ struct
+ {
+ uint32_t ee_lut : 6;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_edge_enhancement_color_reduction_filter_ee_lut_regs
+{
+ union avi_isp_edge_enhancement_color_reduction_filter_ee_lut ee_lut[256];
+};
+
+#define AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_EE_KERNEL_COEFF 0x400
+#define AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_CRF_KERNEL_COEFF 0x420
+#define AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_M_COEFF 0x440
+#define AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_D_COEFF 0x460
+
+union avi_isp_edge_enhancement_color_reduction_filter_ee_kernel_coeff
+{
+ struct
+ {
+ uint32_t ee_kernel_coeff : 11;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_edge_enhancement_color_reduction_filter_crf_kernel_coeff
+{
+ struct
+ {
+ uint32_t crf_kernel_coeff : 11;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_edge_enhancement_color_reduction_filter_m_coeff
+{
+ struct
+ {
+ uint32_t m_coeff : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_edge_enhancement_color_reduction_filter_d_coeff
+{
+ struct
+ {
+ uint32_t d_coeff : 11;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_edge_enhancement_color_reduction_filter_regs
+{
+ union avi_isp_edge_enhancement_color_reduction_filter_ee_kernel_coeff ee_kernel_coeff[6]; /* 0x400 - 0x414 */
+ unsigned /*unused*/ : 32; /* 0x018 */
+ unsigned /*unused*/ : 32; /* 0x01c */
+ union avi_isp_edge_enhancement_color_reduction_filter_crf_kernel_coeff crf_kernel_coeff[6]; /* 0x420 - 0x434 */
+ unsigned /*unused*/ : 32; /* 0x038 */
+ unsigned /*unused*/ : 32; /* 0x03c */
+ union avi_isp_edge_enhancement_color_reduction_filter_m_coeff m_coeff; /* 0x440 */
+ unsigned /*unused*/ : 32; /* 0x044 */
+ unsigned /*unused*/ : 32; /* 0x048 */
+ unsigned /*unused*/ : 32; /* 0x04c */
+ unsigned /*unused*/ : 32; /* 0x050 */
+ unsigned /*unused*/ : 32; /* 0x054 */
+ unsigned /*unused*/ : 32; /* 0x058 */
+ unsigned /*unused*/ : 32; /* 0x05c */
+ union avi_isp_edge_enhancement_color_reduction_filter_d_coeff d_coeff; /* 0x460 */
+};
+
+#endif /* _AVI_ISP_EDGE_ENHANCEMENT_COLOR_REDUCTION_FILTER_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_gamma_corrector.h b/drivers/parrot/video/avi_regmap/avi_isp_gamma_corrector.h
new file mode 100644
index 00000000000000..bf3c293b02bb7e
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_gamma_corrector.h
@@ -0,0 +1,83 @@
+/*********************************************************************
+ * avi_isp_gamma_corrector register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2013-09-05
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_GAMMA_CORRECTOR_H_
+#define _AVI_ISP_GAMMA_CORRECTOR_H_
+
+#define AVI_ISP_GAMMA_CORRECTOR_CONF 0x00
+
+union avi_isp_gamma_corrector_conf
+{
+ struct
+ {
+ uint32_t bypass : 1;
+ uint32_t palette : 1;
+ uint32_t comp_width : 1;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_gamma_corrector_regs
+{
+ union avi_isp_gamma_corrector_conf conf; /* 0x000 */
+};
+
+#define AVI_ISP_GAMMA_CORRECTOR_RY_LUT 0x1000
+
+union avi_isp_gamma_corrector_ry_lut
+{
+ struct
+ {
+ uint32_t ry_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_gamma_corrector_ry_lut_regs
+{
+ union avi_isp_gamma_corrector_ry_lut ry_lut[1024];
+};
+
+#define AVI_ISP_GAMMA_CORRECTOR_GU_LUT 0x2000
+
+union avi_isp_gamma_corrector_gu_lut
+{
+ struct
+ {
+ uint32_t gu_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_gamma_corrector_gu_lut_regs
+{
+ union avi_isp_gamma_corrector_gu_lut gu_lut[1024];
+};
+
+#define AVI_ISP_GAMMA_CORRECTOR_BV_LUT 0x3000
+
+union avi_isp_gamma_corrector_bv_lut
+{
+ struct
+ {
+ uint32_t bv_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_gamma_corrector_bv_lut_regs
+{
+ union avi_isp_gamma_corrector_bv_lut bv_lut[1024];
+};
+
+#endif /* _AVI_ISP_GAMMA_CORRECTOR_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_green_imbalance.h b/drivers/parrot/video/avi_regmap/avi_isp_green_imbalance.h
new file mode 100644
index 00000000000000..16d4305e703a73
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_green_imbalance.h
@@ -0,0 +1,157 @@
+/*********************************************************************
+ * avi_isp_green_imbalance register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_GREEN_IMBALANCE_H_
+#define _AVI_ISP_GREEN_IMBALANCE_H_
+
+#define AVI_ISP_GREEN_IMBALANCE_BAYER_CFA 0x00
+#define AVI_ISP_GREEN_IMBALANCE_OFFSET_X_Y 0x04
+#define AVI_ISP_GREEN_IMBALANCE_CELL_ID_X_Y 0x08
+#define AVI_ISP_GREEN_IMBALANCE_CELL_W 0x0c
+#define AVI_ISP_GREEN_IMBALANCE_CELL_H 0x10
+#define AVI_ISP_GREEN_IMBALANCE_CELL_W_INV 0x14
+#define AVI_ISP_GREEN_IMBALANCE_CELL_H_INV 0x18
+#define AVI_ISP_GREEN_IMBALANCE_ALPHA 0x1c
+#define AVI_ISP_GREEN_IMBALANCE_BETA 0x20
+
+union avi_isp_green_imbalance_bayer_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_offset_x_y
+{
+ struct
+ {
+ uint32_t offset_x : 9;
+ unsigned /*unused */ : 7;
+ uint32_t offset_y : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_cell_id_x_y
+{
+ struct
+ {
+ uint32_t cell_id_x : 4;
+ unsigned /*unused */ : 12;
+ uint32_t cell_id_y : 4;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_cell_w
+{
+ struct
+ {
+ uint32_t cell_w : 9;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_cell_h
+{
+ struct
+ {
+ uint32_t cell_h : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_cell_w_inv
+{
+ struct
+ {
+ uint32_t w_inv : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_cell_h_inv
+{
+ struct
+ {
+ uint32_t h_inv : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_alpha
+{
+ struct
+ {
+ uint32_t alpha : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_green_imbalance_beta
+{
+ struct
+ {
+ uint32_t beta : 17;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_green_imbalance_regs
+{
+ union avi_isp_green_imbalance_bayer_cfa bayer_cfa; /* 0x000 */
+ union avi_isp_green_imbalance_offset_x_y offset_x_y; /* 0x004 */
+ union avi_isp_green_imbalance_cell_id_x_y cell_id_x_y; /* 0x008 */
+ union avi_isp_green_imbalance_cell_w cell_w; /* 0x00c */
+ union avi_isp_green_imbalance_cell_h cell_h; /* 0x010 */
+ union avi_isp_green_imbalance_cell_w_inv cell_w_inv; /* 0x014 */
+ union avi_isp_green_imbalance_cell_h_inv cell_h_inv; /* 0x018 */
+ union avi_isp_green_imbalance_alpha alpha; /* 0x01c */
+ union avi_isp_green_imbalance_beta beta; /* 0x020 */
+};
+
+#define AVI_ISP_GREEN_IMBALANCE_GREEN_RED_COEFF_MEM 0x1000
+
+union avi_isp_green_imbalance_green_red_coeff_mem
+{
+ struct
+ {
+ uint32_t gr_coeff_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_green_imbalance_green_red_coeff_mem_regs
+{
+ union avi_isp_green_imbalance_green_red_coeff_mem red_coeff_mem[221];
+};
+
+#define AVI_ISP_GREEN_IMBALANCE_GREEN_BLUE_COEFF_MEM 0x1400
+
+union avi_isp_green_imbalance_green_blue_coeff_mem
+{
+ struct
+ {
+ uint32_t gb_coeff_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_green_imbalance_green_blue_coeff_mem_regs
+{
+ union avi_isp_green_imbalance_green_blue_coeff_mem green_coeff_mem[221];
+};
+
+#endif /* _AVI_ISP_GREEN_IMBALANCE_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_i3d_lut.h b/drivers/parrot/video/avi_regmap/avi_isp_i3d_lut.h
new file mode 100644
index 00000000000000..4b95322e3e47cf
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_i3d_lut.h
@@ -0,0 +1,69 @@
+/*********************************************************************
+ * avi_isp_i3d_lut register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_I3D_LUT_H_
+#define _AVI_ISP_I3D_LUT_H_
+
+#define AVI_ISP_I3D_LUT_LUT_OUTSIDE 0x00
+
+union avi_isp_i3d_lut_lut_outside
+{
+ struct
+ {
+ uint32_t bv_value : 8;
+ uint32_t gu_value : 8;
+ uint32_t ry_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_i3d_lut_lut_outside_regs
+{
+ union avi_isp_i3d_lut_lut_outside lut_outside[125];
+};
+
+#define AVI_ISP_I3D_LUT_LUT_INSIDE 0x200
+
+union avi_isp_i3d_lut_lut_inside
+{
+ struct
+ {
+ uint32_t bv_value : 8;
+ uint32_t gu_value : 8;
+ uint32_t ry_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_i3d_lut_lut_inside_regs
+{
+ union avi_isp_i3d_lut_lut_inside lut_inside[125];
+};
+
+#define AVI_ISP_I3D_LUT_CLIP_MODE 0x400
+
+union avi_isp_i3d_lut_clip_mode
+{
+ struct
+ {
+ uint32_t clip_en : 1;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_i3d_lut_regs
+{
+ union avi_isp_i3d_lut_clip_mode clip_mode; /* 0x400 */
+};
+
+#endif /* _AVI_ISP_I3D_LUT_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_lens_shading_correction.h b/drivers/parrot/video/avi_regmap/avi_isp_lens_shading_correction.h
new file mode 100644
index 00000000000000..4158909e5e83b6
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_lens_shading_correction.h
@@ -0,0 +1,199 @@
+/*********************************************************************
+ * avi_isp_lens_shading_correction register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_LENS_SHADING_CORRECTION_H_
+#define _AVI_ISP_LENS_SHADING_CORRECTION_H_
+
+#define AVI_ISP_LENS_SHADING_CORRECTION_BAYER_CFA 0x00
+#define AVI_ISP_LENS_SHADING_CORRECTION_OFFSET_X_Y 0x04
+#define AVI_ISP_LENS_SHADING_CORRECTION_CELL_ID_X_Y 0x08
+#define AVI_ISP_LENS_SHADING_CORRECTION_CELL_W 0x0c
+#define AVI_ISP_LENS_SHADING_CORRECTION_CELL_H 0x10
+#define AVI_ISP_LENS_SHADING_CORRECTION_CELL_W_INV 0x14
+#define AVI_ISP_LENS_SHADING_CORRECTION_CELL_H_INV 0x18
+#define AVI_ISP_LENS_SHADING_CORRECTION_ALPHA 0x1c
+#define AVI_ISP_LENS_SHADING_CORRECTION_BETA 0x20
+#define AVI_ISP_LENS_SHADING_CORRECTION_THRESHOLD 0x24
+#define AVI_ISP_LENS_SHADING_CORRECTION_GAIN 0x28
+
+union avi_isp_lens_shading_correction_bayer_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_offset_x_y
+{
+ struct
+ {
+ uint32_t offset_x : 9;
+ unsigned /*unused */ : 7;
+ uint32_t offset_y : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_cell_id_x_y
+{
+ struct
+ {
+ uint32_t cell_id_x : 4;
+ unsigned /*unused */ : 12;
+ uint32_t cell_id_y : 4;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_cell_w
+{
+ struct
+ {
+ uint32_t cell_w : 9;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_cell_h
+{
+ struct
+ {
+ uint32_t cell_h : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_cell_w_inv
+{
+ struct
+ {
+ uint32_t w_inv : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_cell_h_inv
+{
+ struct
+ {
+ uint32_t h_inv : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_alpha
+{
+ struct
+ {
+ uint32_t alpha : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_beta
+{
+ struct
+ {
+ uint32_t beta : 17;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_threshold
+{
+ struct
+ {
+ uint32_t threshold_r : 10;
+ uint32_t threshold_g : 10;
+ uint32_t threshold_b : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_lens_shading_correction_gain
+{
+ struct
+ {
+ uint32_t gain_r : 10;
+ uint32_t gain_g : 10;
+ uint32_t gain_b : 10;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_lens_shading_correction_regs
+{
+ union avi_isp_lens_shading_correction_bayer_cfa bayer_cfa; /* 0x000 */
+ union avi_isp_lens_shading_correction_offset_x_y offset_x_y; /* 0x004 */
+ union avi_isp_lens_shading_correction_cell_id_x_y cell_id_x_y; /* 0x008 */
+ union avi_isp_lens_shading_correction_cell_w cell_w; /* 0x00c */
+ union avi_isp_lens_shading_correction_cell_h cell_h; /* 0x010 */
+ union avi_isp_lens_shading_correction_cell_w_inv cell_w_inv; /* 0x014 */
+ union avi_isp_lens_shading_correction_cell_h_inv cell_h_inv; /* 0x018 */
+ union avi_isp_lens_shading_correction_alpha alpha; /* 0x01c */
+ union avi_isp_lens_shading_correction_beta beta; /* 0x020 */
+ union avi_isp_lens_shading_correction_threshold threshold; /* 0x024 */
+ union avi_isp_lens_shading_correction_gain gain; /* 0x028 */
+};
+
+#define AVI_ISP_LENS_SHADING_CORRECTION_RED_COEFF_MEM 0x1000
+
+union avi_isp_lens_shading_correction_red_coeff_mem
+{
+ struct
+ {
+ uint32_t r_coeff_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_lens_shading_correction_red_coeff_mem_regs
+{
+ union avi_isp_lens_shading_correction_red_coeff_mem red_coeff_mem[221];
+};
+
+#define AVI_ISP_LENS_SHADING_CORRECTION_GREEN_COEFF_MEM 0x1400
+
+union avi_isp_lens_shading_correction_green_coeff_mem
+{
+ struct
+ {
+ uint32_t g_coeff_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_lens_shading_correction_green_coeff_mem_regs
+{
+ union avi_isp_lens_shading_correction_green_coeff_mem green_coeff_mem[221];
+};
+
+#define AVI_ISP_LENS_SHADING_CORRECTION_BLUE_COEFF_MEM 0x1800
+
+union avi_isp_lens_shading_correction_blue_coeff_mem
+{
+ struct
+ {
+ uint32_t b_coeff_value : 8;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_lens_shading_correction_blue_coeff_mem_regs
+{
+ union avi_isp_lens_shading_correction_blue_coeff_mem blue_coeff_mem[221];
+};
+
+#endif /* _AVI_ISP_LENS_SHADING_CORRECTION_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_pedestal.h b/drivers/parrot/video/avi_regmap/avi_isp_pedestal.h
new file mode 100644
index 00000000000000..01e5a446f7b8f6
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_pedestal.h
@@ -0,0 +1,77 @@
+/*********************************************************************
+ * avi_isp_pedestal register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_PEDESTAL_H_
+#define _AVI_ISP_PEDESTAL_H_
+
+#define AVI_ISP_PEDESTAL_CFA 0x00
+#define AVI_ISP_PEDESTAL_SUB_R 0x04
+#define AVI_ISP_PEDESTAL_SUB_GB 0x08
+#define AVI_ISP_PEDESTAL_SUB_GR 0x0c
+#define AVI_ISP_PEDESTAL_SUB_B 0x10
+
+union avi_isp_pedestal_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_pedestal_sub_r
+{
+ struct
+ {
+ uint32_t sub_r : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_pedestal_sub_gb
+{
+ struct
+ {
+ uint32_t sub_gb : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_pedestal_sub_gr
+{
+ struct
+ {
+ uint32_t sub_gr : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_pedestal_sub_b
+{
+ struct
+ {
+ uint32_t sub_b : 10;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_pedestal_regs
+{
+ union avi_isp_pedestal_cfa cfa; /* 0x000 */
+ union avi_isp_pedestal_sub_r sub_r; /* 0x004 */
+ union avi_isp_pedestal_sub_gb sub_gb; /* 0x008 */
+ union avi_isp_pedestal_sub_gr sub_gr; /* 0x00c */
+ union avi_isp_pedestal_sub_b sub_b; /* 0x010 */
+};
+
+#endif /* _AVI_ISP_PEDESTAL_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_statistics_bayer.h b/drivers/parrot/video/avi_regmap/avi_isp_statistics_bayer.h
new file mode 100644
index 00000000000000..86164cc4334170
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_statistics_bayer.h
@@ -0,0 +1,162 @@
+/*********************************************************************
+ * avi_isp_statistics_bayer register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_STATISTICS_BAYER_H_
+#define _AVI_ISP_STATISTICS_BAYER_H_
+
+#define AVI_ISP_STATISTICS_BAYER_MEASURE_REQ 0x00
+#define AVI_ISP_STATISTICS_BAYER_WINDOW_X 0x04
+#define AVI_ISP_STATISTICS_BAYER_WINDOW_Y 0x08
+#define AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_X_CENTER 0x0c
+#define AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_X_SQUARED 0x10
+#define AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_Y_CENTER 0x14
+#define AVI_ISP_STATISTICS_BAYER_CIRCLE_POS_Y_SQUARED 0x18
+#define AVI_ISP_STATISTICS_BAYER_CIRCLE_RADIUS_SQUARED 0x1c
+#define AVI_ISP_STATISTICS_BAYER_INCREMENTS_LOG2 0x20
+#define AVI_ISP_STATISTICS_BAYER_SAT_THRESHOLD 0x24
+#define AVI_ISP_STATISTICS_BAYER_CFA 0x28
+#define AVI_ISP_STATISTICS_BAYER_MAX_NB_WINDOWS 0x2c
+
+union avi_isp_statistics_bayer_measure_req
+{
+ struct
+ {
+ uint32_t clear : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_window_x
+{
+ struct
+ {
+ uint32_t x_offset : 13;
+ unsigned /*unused */ : 3;
+ uint32_t x_width : 11;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_window_y
+{
+ struct
+ {
+ uint32_t y_offset : 13;
+ unsigned /*unused */ : 3;
+ uint32_t y_width : 11;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_circle_pos_x_center
+{
+ struct
+ {
+ uint32_t x_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_circle_pos_x_squared
+{
+ struct
+ {
+ uint32_t x_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_circle_pos_y_center
+{
+ struct
+ {
+ uint32_t y_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_circle_pos_y_squared
+{
+ struct
+ {
+ uint32_t y_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_circle_radius_squared
+{
+ struct
+ {
+ uint32_t radius_squared : 29;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_increments_log2
+{
+ struct
+ {
+ uint32_t x_log2_inc : 3;
+ unsigned /*unused */ : 13;
+ uint32_t y_log2_inc : 3;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_sat_threshold
+{
+ struct
+ {
+ uint32_t threshold : 10;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_cfa
+{
+ struct
+ {
+ uint32_t cfa : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_bayer_max_nb_windows
+{
+ struct
+ {
+ uint32_t x_window_count : 7;
+ unsigned /*unused */ : 9;
+ uint32_t y_window_count : 7;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_statistics_bayer_regs
+{
+ union avi_isp_statistics_bayer_measure_req measure_req; /* 0x000 */
+ union avi_isp_statistics_bayer_window_x window_x; /* 0x004 */
+ union avi_isp_statistics_bayer_window_y window_y; /* 0x008 */
+ union avi_isp_statistics_bayer_circle_pos_x_center circle_pos_x_center; /* 0x00c */
+ union avi_isp_statistics_bayer_circle_pos_x_squared circle_pos_x_squared; /* 0x010 */
+ union avi_isp_statistics_bayer_circle_pos_y_center circle_pos_y_center; /* 0x014 */
+ union avi_isp_statistics_bayer_circle_pos_y_squared circle_pos_y_squared; /* 0x018 */
+ union avi_isp_statistics_bayer_circle_radius_squared circle_radius_squared; /* 0x01c */
+ union avi_isp_statistics_bayer_increments_log2 increments_log2; /* 0x020 */
+ union avi_isp_statistics_bayer_sat_threshold sat_threshold; /* 0x024 */
+ union avi_isp_statistics_bayer_cfa cfa; /* 0x028 */
+ union avi_isp_statistics_bayer_max_nb_windows max_nb_windows; /* 0x02c */
+};
+
+#endif /* _AVI_ISP_STATISTICS_BAYER_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_statistics_yuv.h b/drivers/parrot/video/avi_regmap/avi_isp_statistics_yuv.h
new file mode 100644
index 00000000000000..4eba8f0cab501f
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_statistics_yuv.h
@@ -0,0 +1,227 @@
+/*********************************************************************
+ * avi_isp_statistics_yuv register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_STATISTICS_YUV_H_
+#define _AVI_ISP_STATISTICS_YUV_H_
+
+#define AVI_ISP_STATISTICS_YUV_MEASURE_REQ 0x00
+#define AVI_ISP_STATISTICS_YUV_MEASURE_STATUS 0x04
+#define AVI_ISP_STATISTICS_YUV_WINDOW_POS_X 0x08
+#define AVI_ISP_STATISTICS_YUV_WINDOW_POS_Y 0x0c
+#define AVI_ISP_STATISTICS_YUV_CIRCLE_POS_X_CENTER 0x10
+#define AVI_ISP_STATISTICS_YUV_CIRCLE_POS_X_SQUARED 0x14
+#define AVI_ISP_STATISTICS_YUV_CIRCLE_POS_Y_CENTER 0x18
+#define AVI_ISP_STATISTICS_YUV_CIRCLE_POS_Y_SQUARED 0x1c
+#define AVI_ISP_STATISTICS_YUV_CIRCLE_RADIUS_SQUARED 0x20
+#define AVI_ISP_STATISTICS_YUV_INCREMENTS_LOG2 0x24
+#define AVI_ISP_STATISTICS_YUV_AE_NB_VALID_Y 0x30
+#define AVI_ISP_STATISTICS_YUV_AWB_THRESHOLD 0x40
+#define AVI_ISP_STATISTICS_YUV_AWB_SUM_Y 0x44
+#define AVI_ISP_STATISTICS_YUV_AWB_SUM_U 0x48
+#define AVI_ISP_STATISTICS_YUV_AWB_SUM_V 0x4c
+#define AVI_ISP_STATISTICS_YUV_AWB_NB_GREY_PIXELS 0x50
+
+union avi_isp_statistics_yuv_measure_req
+{
+ struct
+ {
+ uint32_t measure_req : 1;
+ uint32_t clear : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_measure_status
+{
+ struct
+ {
+ uint32_t done : 1;
+ uint32_t error : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_window_pos_x
+{
+ struct
+ {
+ uint32_t window_x_start : 13;
+ unsigned /*unused */ : 3;
+ uint32_t window_x_end : 13;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_window_pos_y
+{
+ struct
+ {
+ uint32_t window_y_start : 13;
+ unsigned /*unused */ : 3;
+ uint32_t window_y_end : 13;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_circle_pos_x_center
+{
+ struct
+ {
+ uint32_t x_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_circle_pos_x_squared
+{
+ struct
+ {
+ uint32_t x_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_circle_pos_y_center
+{
+ struct
+ {
+ uint32_t y_center : 14;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_circle_pos_y_squared
+{
+ struct
+ {
+ uint32_t y_squared : 26;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_circle_radius_squared
+{
+ struct
+ {
+ uint32_t radius_squared : 29;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_increments_log2
+{
+ struct
+ {
+ uint32_t x_log2_inc : 3;
+ unsigned /*unused */ : 13;
+ uint32_t y_log2_inc : 3;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_ae_nb_valid_y
+{
+ struct
+ {
+ uint32_t nb_valid_y : 22;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_awb_threshold
+{
+ struct
+ {
+ uint32_t awb_threshold : 8;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_awb_sum_y
+{
+ struct
+ {
+ uint32_t awb_sum_y : 30;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_awb_sum_u
+{
+ struct
+ {
+ uint32_t awb_sum_u : 30;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_awb_sum_v
+{
+ struct
+ {
+ uint32_t awb_sum_v : 30;
+ };
+ uint32_t _register;
+};
+
+union avi_isp_statistics_yuv_awb_nb_grey_pixels
+{
+ struct
+ {
+ uint32_t nb_grey_pixels : 22;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_statistics_yuv_regs
+{
+ union avi_isp_statistics_yuv_measure_req measure_req; /* 0x000 */
+ union avi_isp_statistics_yuv_measure_status measure_status; /* 0x004 */
+ union avi_isp_statistics_yuv_window_pos_x window_pos_x; /* 0x008 */
+ union avi_isp_statistics_yuv_window_pos_y window_pos_y; /* 0x00c */
+ union avi_isp_statistics_yuv_circle_pos_x_center circle_pos_x_center; /* 0x010 */
+ union avi_isp_statistics_yuv_circle_pos_x_squared circle_pos_x_squared; /* 0x014 */
+ union avi_isp_statistics_yuv_circle_pos_y_center circle_pos_y_center; /* 0x018 */
+ union avi_isp_statistics_yuv_circle_pos_y_squared circle_pos_y_squared; /* 0x01c */
+ union avi_isp_statistics_yuv_circle_radius_squared circle_radius_squared; /* 0x020 */
+ union avi_isp_statistics_yuv_increments_log2 increments_log2; /* 0x024 */
+ unsigned /*unused*/ : 32; /* 0x028 */
+ unsigned /*unused*/ : 32; /* 0x02c */
+ union avi_isp_statistics_yuv_ae_nb_valid_y ae_nb_valid_y; /* 0x030 */
+ unsigned /*unused*/ : 32; /* 0x034 */
+ unsigned /*unused*/ : 32; /* 0x038 */
+ unsigned /*unused*/ : 32; /* 0x03c */
+ union avi_isp_statistics_yuv_awb_threshold awb_threshold; /* 0x040 */
+ union avi_isp_statistics_yuv_awb_sum_y awb_sum_y; /* 0x044 */
+ union avi_isp_statistics_yuv_awb_sum_u awb_sum_u; /* 0x048 */
+ union avi_isp_statistics_yuv_awb_sum_v awb_sum_v; /* 0x04c */
+ union avi_isp_statistics_yuv_awb_nb_grey_pixels awb_nb_grey_pixels; /* 0x050 */
+};
+
+#define AVI_ISP_STATISTICS_YUV_AE_HISTOGRAM_Y 0x400
+
+union avi_isp_statistics_yuv_ae_histogram_y
+{
+ struct
+ {
+ uint32_t histogram_y : 22;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_statistics_yuv_ae_histogram_y_regs
+{
+ union avi_isp_statistics_yuv_ae_histogram_y ae_histogram_y[256];
+};
+
+#endif /* _AVI_ISP_STATISTICS_YUV_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_vlformat_32to40.h b/drivers/parrot/video/avi_regmap/avi_isp_vlformat_32to40.h
new file mode 100644
index 00000000000000..43e8fe8ed87644
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_vlformat_32to40.h
@@ -0,0 +1,33 @@
+/*********************************************************************
+ * avi_isp_vlformat_32to40 register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_VLFORMAT_32TO40_H_
+#define _AVI_ISP_VLFORMAT_32TO40_H_
+
+#define AVI_ISP_VLFORMAT_32TO40_FORMAT 0x00
+
+union avi_isp_vlformat_32to40_format
+{
+ struct
+ {
+ uint32_t format : 3;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_vlformat_32to40_regs
+{
+ union avi_isp_vlformat_32to40_format format; /* 0x000 */
+};
+
+#endif /* _AVI_ISP_VLFORMAT_32TO40_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_isp_vlformat_40to32.h b/drivers/parrot/video/avi_regmap/avi_isp_vlformat_40to32.h
new file mode 100644
index 00000000000000..2d636e69cbf75f
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_isp_vlformat_40to32.h
@@ -0,0 +1,33 @@
+/*********************************************************************
+ * avi_isp_vlformat_40to32 register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2014-02-28
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_ISP_VLFORMAT_40TO32_H_
+#define _AVI_ISP_VLFORMAT_40TO32_H_
+
+#define AVI_ISP_VLFORMAT_40TO32_FORMAT 0x00
+
+union avi_isp_vlformat_40to32_format
+{
+ struct
+ {
+ uint32_t format : 3;
+ };
+ uint32_t _register;
+};
+
+struct avi_isp_vlformat_40to32_regs
+{
+ union avi_isp_vlformat_40to32_format format; /* 0x000 */
+};
+
+#endif /* _AVI_VLFORMAT_40TO32_H_ */
diff --git a/drivers/parrot/video/avi_regmap/avi_lcd.h b/drivers/parrot/video/avi_regmap/avi_lcd.h
new file mode 100644
index 00000000000000..cd6d8bfba43ac4
--- /dev/null
+++ b/drivers/parrot/video/avi_regmap/avi_lcd.h
@@ -0,0 +1,391 @@
+/*********************************************************************
+ * avi_lcd register map
+ *
+ * Vendor: Parrot
+ * Library: AVI
+ * Version: P7R3
+ * Gen-date: (Date of generation of this C code, not the IP-Xact file)
+ * 2013-10-02
+ *
+ * WARNING: This code is automatically generated from the hardware
+ * IP-Xact XML files. Do not edit directly.
+ *********************************************************************/
+
+#ifndef _AVI_LCD_H_
+#define _AVI_LCD_H_
+
+#define AVI_LCD_STATUS 0x00
+#define AVI_LCD_ITSOURCE 0x04
+#define AVI_LCD_INTERFACE 0x08
+#define AVI_LCD_DPD 0x0c
+#define AVI_LCD_TOP_FORMAT_CTRL 0x10
+#define AVI_LCD_TOP_H_TIMING0 0x20
+#define AVI_LCD_TOP_H_TIMING1 0x24
+#define AVI_LCD_TOP_V_TIMING0 0x30
+#define AVI_LCD_TOP_V_TIMING1 0x34
+#define AVI_LCD_TOP_V_TIMING2 0x38
+#define AVI_LCD_TOP_V_TIMING3 0x3c
+#define AVI_LCD_BOT_FORMAT_CTRL 0x40
+#define AVI_LCD_BOT_H_TIMING0 0x50
+#define AVI_LCD_BOT_H_TIMING1 0x54
+#define AVI_LCD_BOT_V_TIMING0 0x60
+#define AVI_LCD_BOT_V_TIMING1 0x64
+#define AVI_LCD_BOT_V_TIMING2 0x68
+#define AVI_LCD_BOT_V_TIMING3 0x6c
+#define AVI_LCD_FORCE_CLEAR 0x70
+
+union avi_lcd_status
+{
+ struct
+ {
+ uint32_t done : 1;
+ uint32_t fuf : 1;
+ uint32_t field : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_itsource
+{
+ struct
+ {
+ uint32_t done_en : 1;
+ uint32_t fuf_en : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_interface
+{
+ struct
+ {
+ uint32_t ivs : 1;
+ uint32_t ihs : 1;
+ uint32_t ipc : 1;
+ uint32_t ioe : 1;
+ uint32_t psync_rf : 1;
+ uint32_t psync_en : 1;
+ uint32_t itu656 : 1;
+ uint32_t prog : 1;
+ uint32_t clip_en : 1;
+ uint32_t free_run : 1;
+ uint32_t pad_select : 2;
+ uint32_t vsync_gen : 1;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_dpd
+{
+ struct
+ {
+ uint32_t dpd : 24;
+ uint32_t colorbar : 2;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_format_ctrl
+{
+ struct
+ {
+ uint32_t top_format_control : 5;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_h_timing0
+{
+ struct
+ {
+ uint32_t top_hsync_off : 16;
+ uint32_t top_hactive_on : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_h_timing1
+{
+ struct
+ {
+ uint32_t top_hactive_off : 16;
+ uint32_t top_htotal : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_v_timing0
+{
+ struct
+ {
+ uint32_t top_vsync_hon : 16;
+ uint32_t top_vsync_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_v_timing1
+{
+ struct
+ {
+ uint32_t top_vsync_hoff : 16;
+ uint32_t top_vsync_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_v_timing2
+{
+ struct
+ {
+ uint32_t top_vactive_on : 16;
+ uint32_t top_vactive_off : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_top_v_timing3
+{
+ struct
+ {
+ uint32_t top_vtotal : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_format_ctrl
+{
+ struct
+ {
+ uint32_t bot_format_control : 5;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_h_timing0
+{
+ struct
+ {
+ uint32_t bot_hsync_off : 16;
+ uint32_t bot_hactive_on : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_h_timing1
+{
+ struct
+ {
+ uint32_t bot_hactive_off : 16;
+ uint32_t bot_htotal : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_v_timing0
+{
+ struct
+ {
+ uint32_t bot_vsync_hon : 16;
+ uint32_t bot_vsync_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_v_timing1
+{
+ struct
+ {
+ uint32_t bot_vsync_hoff : 16;
+ uint32_t bot_vsync_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_v_timing2
+{
+ struct
+ {
+ uint32_t bot_vactive_on : 16;
+ uint32_t bot_vactive_off : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_bot_v_timing3
+{
+ struct
+ {
+ uint32_t bot_vtotal : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_force_clear
+{
+ struct
+ {
+ uint32_t force_clear : 2;
+ };
+ uint32_t _register;
+};
+
+struct avi_lcd_regs
+{
+ union avi_lcd_status status; /* 0x000 */
+ union avi_lcd_itsource itsource; /* 0x004 */
+ union avi_lcd_interface interface; /* 0x008 */
+ union avi_lcd_dpd dpd; /* 0x00c */
+ union avi_lcd_top_format_ctrl top_format_ctrl; /* 0x010 */
+ unsigned /*unused*/ : 32; /* 0x014 */
+ unsigned /*unused*/ : 32; /* 0x018 */
+ unsigned /*unused*/ : 32; /* 0x01c */
+ union avi_lcd_top_h_timing0 top_h_timing0; /* 0x020 */
+ union avi_lcd_top_h_timing1 top_h_timing1; /* 0x024 */
+ unsigned /*unused*/ : 32; /* 0x028 */
+ unsigned /*unused*/ : 32; /* 0x02c */
+ union avi_lcd_top_v_timing0 top_v_timing0; /* 0x030 */
+ union avi_lcd_top_v_timing1 top_v_timing1; /* 0x034 */
+ union avi_lcd_top_v_timing2 top_v_timing2; /* 0x038 */
+ union avi_lcd_top_v_timing3 top_v_timing3; /* 0x03c */
+ union avi_lcd_bot_format_ctrl bot_format_ctrl; /* 0x040 */
+ unsigned /*unused*/ : 32; /* 0x044 */
+ unsigned /*unused*/ : 32; /* 0x048 */
+ unsigned /*unused*/ : 32; /* 0x04c */
+ union avi_lcd_bot_h_timing0 bot_h_timing0; /* 0x050 */
+ union avi_lcd_bot_h_timing1 bot_h_timing1; /* 0x054 */
+ unsigned /*unused*/ : 32; /* 0x058 */
+ unsigned /*unused*/ : 32; /* 0x05c */
+ union avi_lcd_bot_v_timing0 bot_v_timing0; /* 0x060 */
+ union avi_lcd_bot_v_timing1 bot_v_timing1; /* 0x064 */
+ union avi_lcd_bot_v_timing2 bot_v_timing2; /* 0x068 */
+ union avi_lcd_bot_v_timing3 bot_v_timing3; /* 0x06c */
+ union avi_lcd_force_clear force_clear; /* 0x070 */
+};
+
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_0 0x80
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_OFF_0 0x84
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_1 0x88
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_OFF_1 0x8c
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_2 0x90
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_OFF_2 0x94
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_3 0x98
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_OFF_3 0x9c
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_ON_4 0xa0
+#define AVI_LCD_AVI_VSYNC_GEN_VSYNC_GEN_OFF_4 0xa4
+
+union avi_lcd_vsync_gen_on_0
+{
+ struct
+ {
+ uint32_t vsync_gen_hon : 16;
+ uint32_t vsync_gen_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_off_0
+{
+ struct
+ {
+ uint32_t vsync_gen_hoff : 16;
+ uint32_t vsync_gen_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_on_1
+{
+ struct
+ {
+ uint32_t vsync_gen_hon : 16;
+ uint32_t vsync_gen_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_off_1
+{
+ struct
+ {
+ uint32_t vsync_gen_hoff : 16;
+ uint32_t vsync_gen_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_on_2
+{
+ struct
+ {
+ uint32_t vsync_gen_hon : 16;
+ uint32_t vsync_gen_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_off_2
+{
+ struct
+ {
+ uint32_t vsync_gen_hoff : 16;
+ uint32_t vsync_gen_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_on_3
+{
+ struct
+ {
+ uint32_t vsync_gen_hon : 16;
+ uint32_t vsync_gen_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_off_3
+{
+ struct
+ {
+ uint32_t vsync_gen_hoff : 16;
+ uint32_t vsync_gen_voff : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_on_4
+{
+ struct
+ {
+ uint32_t vsync_gen_hon : 16;
+ uint32_t vsync_gen_von : 16;
+ };
+ uint32_t _register;
+};
+
+union avi_lcd_vsync_gen_off_4
+{
+ struct
+ {
+ uint32_t vsync_gen_hoff : 16;
+ uint32_t vsync_gen_voff : 16;
+ };
+ uint32_t _register;
+};
+
+struct avi_lcd_avi_vsync_gen_regs
+{
+ union avi_lcd_vsync_gen_on_0 vsync_gen_on_0; /* 0x080 */
+ union avi_lcd_vsync_gen_off_0 vsync_gen_off_0; /* 0x084 */
+ union avi_lcd_vsync_gen_on_1 vsync_gen_on_1; /* 0x088 */
+ union avi_lcd_vsync_gen_off_1 vsync_gen_off_1; /* 0x08c */
+ union avi_lcd_vsync_gen_on_2 vsync_gen_on_2; /* 0x090 */
+ union avi_lcd_vsync_gen_off_2 vsync_gen_off_2; /* 0x094 */
+ union avi_lcd_vsync_gen_on_3 vsync_gen_on_3; /* 0x098 */
+ union avi_lcd_vsync_gen_off_3 vsync_gen_off_3; /* 0x09c */
+ union avi_lcd_vsync_gen_on_4 vsync_gen_on_4; /* 0x0a0 */
+ union avi_lcd_vsync_gen_off_4 vsync_gen_off_4; /* 0x0a4 */
+};
+
+#endif /* _AVI_LCD_H_ */
diff --git a/drivers/parrot/video/avi_scaler.c b/drivers/parrot/video/avi_scaler.c
new file mode 100644
index 00000000000000..96da44721c88bc
--- /dev/null
+++ b/drivers/parrot/video/avi_scaler.c
@@ -0,0 +1,656 @@
+#include <linux/module.h>
+#include "avi_scaler.h"
+
+/**************************
+ * Scalers low-level setup
+ **************************/
+
+struct avi_scal_dimcfg {
+ unsigned ntaps;
+ unsigned nphases;
+ char coeffs[32];
+};
+
+/* Configuration for scaling factor 1 / 1 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 4 taps and 8 phases.
+ *
+ * +
+ * +|+
+ * | +
+ * + |
+ * |
+ * + | +
+ * |
+ * + | +
+ * |
+ * |
+ * + | +
+ * |
+ * |
+ * + | +
+ * |
+ * + | +
+ * -------+---------------+------++
+ * ++++ + | + +++
+ * ++ | ++
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_1_1 = {
+ .ntaps = AVI_SCAL_NTAPS_4,
+ .nphases = AVI_SCAL_NPHASES_8,
+ .coeffs = {
+ 0, -4, -5, -5, -4, -2, -1, -1, /* tap 0 */
+ 64, 62, 55, 46, 36, 24, 14, 6, /* tap 1 */
+ 0, 6, 15, 25, 36, 47, 56, 62, /* tap 2 */
+ 0, 0, -1, -2, -4, -5, -5, -3, /* tap 3 */
+ },
+};
+
+/* Configuration for scaling factor 1 / 2 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 8 taps and 4 phases.
+ *
+ * +++
+ * + | +
+ * + | +
+ * + | +
+ * |
+ * + | +
+ * + | +
+ * + | +
+ * ++++-+++---------------+++++++++
+ * + |
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_1_2 = {
+ .ntaps = AVI_SCAL_NTAPS_8,
+ .nphases = AVI_SCAL_NPHASES_4,
+ .coeffs = {
+ -2, -1, -1, -1, /* tap 0 */
+ 0, -2, -2, -3, /* tap 1 */
+ 18, 13, 7, 3, /* tap 2 */
+ 32, 31, 28, 23, /* tap 3 */
+ 18, 23, 28, 31, /* tap 4 */
+ 0, 3, 7, 13, /* tap 5 */
+ -1, -2, -2, -1, /* tap 6 */
+ -1, -1, -1, -1, /* tap 7 */
+ },
+};
+
+/* Configuration for scaling factor 1 / 3 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 16 taps and 2 phases.
+ *
+ * +++
+ * + | +
+ * + | +
+ * + | +
+ * + | +
+ * ++++++++++-----------+++++++++++
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_1_3 = {
+ .ntaps = AVI_SCAL_NTAPS_16,
+ .nphases = AVI_SCAL_NPHASES_2,
+ .coeffs = {
+ -1, -1, /* tap 0 */
+ -1, -1, /* tap 1 */
+ 0, 0, /* tap 2 */
+ -1, 0, /* tap 3 */
+ 0, -1, /* tap 4 */
+ 7, 3, /* tap 5 */
+ 17, 12, /* tap 6 */
+ 21, 20, /* tap 7 */
+ 17, 20, /* tap 8 */
+ 7, 12, /* tap 9 */
+ 0, 3, /* tap 10 */
+ -1, -1, /* tap 11 */
+ 0, 0, /* tap 12 */
+ 0, 0, /* tap 13 */
+ 0, -1, /* tap 14 */
+ -1, -1, /* tap 15 */
+ },
+};
+
+/* Configuration for scaling factor 2 / 3 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 8 taps and 4 phases.
+ *
+ * +
+ * +|+
+ * |
+ * + | +
+ * |
+ * + | +
+ * |
+ * + | +
+ * |
+ * + | +
+ * |
+ * +++++++-++-----------++-++++++++
+ * + | +
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_2_3 = {
+ .ntaps = AVI_SCAL_NTAPS_8,
+ .nphases = AVI_SCAL_NPHASES_4,
+ .coeffs = {
+ 0, -1, -1, -1, /* tap 0 */
+ -3, -2, 0, 0, /* tap 1 */
+ 14, 6, 0, -2, /* tap 2 */
+ 42, 40, 33, 24, /* tap 3 */
+ 14, 24, 33, 40, /* tap 4 */
+ -3, -2, 0, 6, /* tap 5 */
+ 0, 0, 0, -2, /* tap 6 */
+ 0, -1, -1, -1, /* tap 7 */
+ },
+};
+
+/* Configuration for scaling factor 3 / 4 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 8 taps and 4 phases.
+ *
+ * +
+ * +|+
+ * |
+ * + | +
+ * |
+ * |
+ * + | +
+ * |
+ * |
+ * + | +
+ * |
+ * + | +
+ * ++++++++-+-----------+-+++++++++
+ * + | +
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_3_4 = {
+ .ntaps = AVI_SCAL_NTAPS_8,
+ .nphases = AVI_SCAL_NPHASES_4,
+ .coeffs = {
+ -1, -1, -1, -1, /* tap 0 */
+ -2, 0, 0, 0, /* tap 1 */
+ 11, 2, -2, -3, /* tap 2 */
+ 48, 44, 35, 23, /* tap 3 */
+ 11, 23, 35, 44, /* tap 4 */
+ -2, -3, -2, 2, /* tap 5 */
+ 0, 0, 0, 0, /* tap 6 */
+ -1, -1, -1, -1, /* tap 7 */
+ },
+};
+
+/* Configuration for scaling factor 1 / 4 using lanczos
+ * with 2 lobes.
+ *
+ * This configuration uses 16 taps and 2 phases.
+ *
+ * +++++
+ * + | +
+ * ++ | ++
+ * ++ | ++
+ * ++++++++---------------+++++++++
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_1_4 = {
+ .ntaps = AVI_SCAL_NTAPS_16,
+ .nphases = AVI_SCAL_NPHASES_2,
+ .coeffs = {
+ -1, -1, /* tap 0 */
+ -1, -1, /* tap 1 */
+ -1, -1, /* tap 2 */
+ 0, 0, /* tap 3 */
+ 4, 2, /* tap 4 */
+ 9, 6, /* tap 5 */
+ 14, 12, /* tap 6 */
+ 16, 15, /* tap 7 */
+ 14, 15, /* tap 8 */
+ 9, 12, /* tap 9 */
+ 4, 6, /* tap 10 */
+ 0, 2, /* tap 11 */
+ 0, 0, /* tap 12 */
+ -1, -1, /* tap 13 */
+ -1, -1, /* tap 14 */
+ -1, -1, /* tap 15 */
+ },
+};
+
+#define SCAL_RATIO(_to, _from) ((_to) << 8 / (_from))
+
+struct avi_scal_ratio_config {
+ unsigned int ratio;
+ struct avi_scal_dimcfg const* cfg;
+};
+
+static const struct avi_scal_ratio_config avi_scal_ratios[] = {
+#define ADD_SCAL_CFG(_to, _from) \
+ { .ratio = SCAL_RATIO(_to, _from), &avi_scalcfg_ ##_to##_##_from }
+ /* the matrix is the same for all upscaling ratios. It's also the default ratio */
+ ADD_SCAL_CFG(1, 1),
+ /* Various downscaling ratios. Sorted from smaller to bigger. */
+ ADD_SCAL_CFG(3, 4),
+ ADD_SCAL_CFG(2, 3),
+ ADD_SCAL_CFG(1, 2),
+ ADD_SCAL_CFG(1, 3),
+ ADD_SCAL_CFG(1, 4),
+#undef ADD_SCAL_CFG
+};
+
+static inline unsigned avi_scal_delta(unsigned a, unsigned b)
+{
+ if (a > b)
+ return a - b;
+
+ return b - a;
+}
+
+/*
+ * Return scaler's FIR parameters according to sizes passed in argument for one
+ * dimension.
+ */
+static struct avi_scal_dimcfg const* avi_scal_size2dimcfg(unsigned from,
+ unsigned to,
+ unsigned max_taps)
+{
+ unsigned long ratio = SCAL_RATIO(to, from);
+ const struct avi_scal_ratio_config *best = &avi_scal_ratios[0];
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(avi_scal_ratios); i++) {
+ const struct avi_scal_ratio_config *cur = &avi_scal_ratios[i];
+
+ if (cur->cfg->ntaps > max_taps)
+ continue;
+
+ if (avi_scal_delta(cur->ratio, ratio) <
+ avi_scal_delta(best->ratio, ratio))
+ best = cur;
+ }
+
+ return best->cfg;
+}
+
+static inline void avi_write_scal_coeffs(unsigned long base,
+ const char *coeffs)
+{
+#define SCAL_COEFFS_AT(_x) (coeffs[_x] | \
+ (coeffs[_x + 1] << 8) | \
+ (coeffs[_x + 2] << 16) | \
+ (coeffs[_x + 3] << 24))
+
+ AVI_WRITE(SCAL_COEFFS_AT(0), base + AVI_SCAL_COEFF0300);
+ AVI_WRITE(SCAL_COEFFS_AT(4), base + AVI_SCAL_COEFF0704);
+ AVI_WRITE(SCAL_COEFFS_AT(8), base + AVI_SCAL_COEFF1108);
+ AVI_WRITE(SCAL_COEFFS_AT(12), base + AVI_SCAL_COEFF1512);
+ AVI_WRITE(SCAL_COEFFS_AT(16), base + AVI_SCAL_COEFF1916);
+ AVI_WRITE(SCAL_COEFFS_AT(20), base + AVI_SCAL_COEFF2320);
+ AVI_WRITE(SCAL_COEFFS_AT(24), base + AVI_SCAL_COEFF2724);
+ AVI_WRITE(SCAL_COEFFS_AT(28), base + AVI_SCAL_COEFF3128);
+
+#undef SCAL_COEFFS_AT
+}
+
+/*
+ * Setup scaler's parameters for resizing a single plane (handling both
+ * dimensions if required).
+ * A NULL increment means no resize operation is requested for the given
+ * dimension.
+ * A NULL dimension configuration requests to leave coefficients unchanged.
+ */
+static void avi_setup_scalplane(unsigned long base,
+ u32 woffset,
+ u32 wincr,
+ struct avi_scal_dimcfg const* wcfg,
+ u32 hoffset,
+ u32 hincr,
+ struct avi_scal_dimcfg const* hcfg)
+{
+ u32 tp = 0;
+
+ if (wincr) {
+ /* Horizontal resizing requested. */
+ tp = wcfg->ntaps << AVI_SCAL_NTAPSX_SHIFT |
+ wcfg->nphases << AVI_SCAL_NPHASESX_SHIFT;
+
+ AVI_WRITE(woffset,
+ base + AVI_SCAL_HORIZ_DIM + AVI_SCAL_OFFSET);
+ AVI_WRITE(wincr, base + AVI_SCAL_HORIZ_DIM + AVI_SCAL_INCR);
+
+ /* Setup FIR coefficients for horizontal dimension. */
+ avi_write_scal_coeffs(base + AVI_SCAL_HORIZ_DIM, wcfg->coeffs);
+ }
+
+ if (hincr) {
+ /* Vertical resizing requested. */
+ tp |= hcfg->ntaps << AVI_SCAL_NTAPSY_SHIFT |
+ hcfg->nphases << AVI_SCAL_NPHASESY_SHIFT;
+
+ AVI_WRITE(hoffset,
+ base + AVI_SCAL_VERT_DIM + AVI_SCAL_OFFSET);
+ AVI_WRITE(hincr, base + AVI_SCAL_VERT_DIM + AVI_SCAL_INCR);
+
+ AVI_WRITE(hoffset,
+ base + AVI_SCAL_VERT_DIM + AVI_SCAL_OFFSET);
+ AVI_WRITE(hincr, base + AVI_SCAL_VERT_DIM + AVI_SCAL_INCR);
+ /* Setup FIR coefficients for vertical dimension. */
+ avi_write_scal_coeffs(base + AVI_SCAL_VERT_DIM, hcfg->coeffs);
+ }
+
+ /* Setup FIR number of phases and taps for both dimensions. */
+ AVI_WRITE(tp, base + AVI_SCAL_NTAPS_NPHASES);
+}
+
+/* Compute the base offset value in Q16.8.
+ *
+ * This value depends solely on the scaling factor.
+ */
+static inline unsigned avi_scal_offset(u16 from, u16 to)
+{
+ /* The offset is (F - 1) / 2, where F is the downscaling factor. This
+ * means that for upscaling (when F is smaller than 1) the offset is
+ * negative.
+ *
+ * The offset is stored in hardware in Q16.8, so we have to multiply of
+ * final offset value by 256. We can simplify ((F - 1) * 256 / 2) as
+ * (128 * F - 128) */
+ unsigned offset = ((from << 7) + to - 1) / to;
+
+ /* Here if offset is < 128 it means we are upscaling and the offset
+ * should be negative. However, the hardware doesn't handle negative
+ * offsets, so in this case I cheat by moving one pixel right and making
+ * the effective offset (1 + real_offset). This means we're skewed by
+ * one pixel but the phases still fall in the right places */
+ if (offset < 128)
+ return offset + 128; /* move one pixel to the right to get a
+ * positive value with the same phase */
+
+ return offset - 128; /* true value */
+}
+
+unsigned avi_scal_line_taps(unsigned buffer_line_sz)
+{
+ unsigned ntaps;
+
+ /* See how many lines we can buffer in the internal scaler RAM */
+ ntaps = AVI_SCAL_RAMSZ / (buffer_line_sz * 4);
+
+ if (ntaps < 2)
+ /* We can't scale lines that big! */
+ BUG();
+
+ if (ntaps <= 4)
+ return AVI_SCAL_NTAPS_4;
+ if (ntaps <= 8)
+ return AVI_SCAL_NTAPS_8;
+
+ return AVI_SCAL_NTAPS_16;
+}
+
+static void avi_scal_calc_cfg(struct avi_scal_cfg* config,
+ unsigned from_width,
+ unsigned to_width,
+ unsigned from_height,
+ unsigned to_height,
+ struct avi_dma_pixfmt pixfmt,
+ int have_planar)
+{
+ int planar = avi_pixfmt_is_planar(pixfmt) &&
+ have_planar;
+ enum avi_pixel_packing packing = avi_pixfmt_get_packing(pixfmt);
+ unsigned bufln_sz;
+
+ /* We can't bypass the scaler when it's configured in planar mode even
+ * if the frame size stays the same: we still have to upscale the chroma
+ * plane. */
+ if (unlikely(to_width == from_width &&
+ to_height == from_height &&
+ !planar)) {
+ /* no scaling needed, use bypass config */
+ memset(config, 0, sizeof(*config));
+
+ config->conf = AVI_SCAL_IN_SRC << AVI_SCAL_CONF_OUTSRC_SHIFT;
+
+ return;
+ }
+
+ /* The vertical scaler has a line buffer. To optimize memory usage we
+ * have to make it store the smallest lines. */
+ if (to_width > from_width) {
+ /* We are enlarging the picture, put the vertical scaler
+ * first */
+ config->conf = AVI_SCAL_IN_SRC << AVI_SCAL_CONF_VERTSRC_SHIFT
+ | AVI_SCAL_VERT_SRC << AVI_SCAL_CONF_HORIZSRC_SHIFT
+ | AVI_SCAL_HORIZ_SRC << AVI_SCAL_CONF_OUTSRC_SHIFT;
+ bufln_sz = from_width;
+ } else {
+ /* We are thinning the picture, put the vertical scaler last */
+ config->conf = AVI_SCAL_IN_SRC << AVI_SCAL_CONF_HORIZSRC_SHIFT
+ | AVI_SCAL_HORIZ_SRC << AVI_SCAL_CONF_VERTSRC_SHIFT
+ | AVI_SCAL_VERT_SRC << AVI_SCAL_CONF_OUTSRC_SHIFT;
+ bufln_sz = to_width;
+ }
+
+ config->size = (to_height - 1) << AVI_SCAL_HEIGHT_SHIFT |
+ (to_width - 1) << AVI_SCAL_WIDTH_SHIFT;
+
+ config->wcfg0 = avi_scal_size2dimcfg(from_width,
+ to_width,
+ AVI_SCAL_NTAPS_16);
+
+ config->wincr0 = (from_width - config->wcfg0->ntaps) << 8;
+ config->wincr0 /= max(to_width - 1, 1U);
+ config->woffset0 = avi_scal_offset(from_width, to_width);
+
+ config->hcfg0 = avi_scal_size2dimcfg(from_height,
+ to_height,
+ avi_scal_line_taps(bufln_sz));
+
+ config->hincr0 = (from_height - config->hcfg0->ntaps) << 8;
+ config->hincr0 /= max(to_height - 1, 1U);
+ config->hoffset0 = avi_scal_offset(from_height, to_height);
+
+ config->conf |= (!!planar) << AVI_SCAL_CONF_PLANAR_SHIFT;
+
+ if (planar) {
+ switch(packing) {
+ case AVI_INTERLEAVED_444_PACKING:
+ case AVI_INTERLEAVED_YUV_422_PACKING:
+ default:
+ config->wincr1 = 0;
+ config->wcfg1 = 0;
+ config->woffset1 = 0;
+ config->hincr1 = 0;
+ config->hcfg1 = 0;
+ config->hoffset1 = 0;
+ break;
+ case AVI_SEMIPLANAR_YUV_420_PACKING:
+ config->wincr1 = config->wincr0 / 2;
+ config->wcfg1 = config->wcfg0;
+ config->woffset1 = config->woffset0;
+ config->hincr1 = config->hincr0 / 2;
+ config->hcfg1 = config->hcfg0;
+ config->hoffset1 = config->hoffset0;
+ break;
+ /* NV16 */
+ case AVI_SEMIPLANAR_YUV_422_PACKING:
+ config->wincr1 = config->wincr0 / 2;
+ config->wcfg1 = config->wcfg0;
+ config->woffset1 = config->woffset0;
+ config->hincr1 = config->hincr0;
+ config->hcfg1 = config->hcfg0;
+ config->hoffset1 = config->hoffset0;
+ break;
+ }
+ } else {
+ config->wincr1 = 0;
+ config->wcfg1 = 0;
+ config->woffset1 = 0;
+ config->hincr1 = 0;
+ config->hcfg1 = 0;
+ config->hoffset1 = 0;
+ }
+}
+
+/**
+ * avi_scal_bypass() - Configure a scaler node as passthrough
+ */
+void avi_scal_bypass(struct avi_node const *scaler)
+{
+ unsigned long const base = avi_node_base(scaler);
+
+ AVI_WRITE(AVI_SCAL_IN_SRC << AVI_SCAL_CONF_OUTSRC_SHIFT,
+ base + AVI_SCAL_CONF);
+}
+EXPORT_SYMBOL(avi_scal_bypass);
+
+/**
+ * avi_scal_to_cfg() - Compute scaler configuration
+ * @config: the structure we'll fill with the computed config
+ * @from_width: frame's original width
+ * @to_width: width to resize frame to
+ * @from_height: frame's original height
+ * @to_height: height to resize frame to
+ */
+void avi_scal_to_cfg(struct avi_scal_cfg* config,
+ u16 from_width,
+ u16 to_width,
+ u16 from_height,
+ u16 to_height,
+ struct avi_dma_pixfmt pixfmt,
+ int have_planar)
+{
+ avi_scal_calc_cfg(config,
+ from_width, to_width,
+ from_height, to_height,
+ pixfmt,
+ have_planar);
+}
+EXPORT_SYMBOL(avi_scal_to_cfg);
+
+/**
+ * avi_scal_to_interleaced_cfg() - Compute scaler configuration for interleaced
+ * output.
+ * @top_config: the structure we'll fill with the computed config for the top
+ * field
+ * @bot_config: the structure we'll fill with the computed config for the bottom
+ * field
+ * @from_width: frame's original width
+ * @to_width: width to resize frame to
+ * @from_height: frame's original height
+ * @to_height: height to resize frame to (the complete frame size, not just the
+ * field's).
+ */
+void avi_scal_to_interleaced_cfg(struct avi_scal_cfg *top_config,
+ struct avi_scal_cfg *bot_config,
+ u16 from_width,
+ u16 to_width,
+ u16 from_height,
+ u16 to_height,
+ struct avi_dma_pixfmt pixfmt,
+ int have_planar)
+{
+ avi_scal_calc_cfg(top_config,
+ from_width, to_width,
+ from_height, roundup(to_height, 2) / 2,
+ pixfmt,
+ have_planar);
+
+ *bot_config = *top_config;
+
+ /* We offset the bottom field by one output line. Incr moves two output
+ * lines at a time (since we divided to_height by two above) so we can
+ * use that to speed up the computation. */
+ bot_config->hoffset0 += bot_config->hincr0 / 2;
+ bot_config->hoffset1 += bot_config->hincr1 / 2;
+}
+EXPORT_SYMBOL(avi_scal_to_interleaced_cfg);
+
+/*
+ * avi_setup_scal() - Apply configuration to scaler.
+ * @scaler: node pointing to the scaler to setup
+ * @width: output width after resize operation
+ * @height: output height after resize operation
+ * @config: configuration to apply
+ */
+void avi_scal_setup(struct avi_node const* scaler,
+ struct avi_scal_cfg const* config)
+{
+ unsigned long const base = avi_node_base(scaler);
+
+ /* Setup format and resizing stages. */
+ AVI_WRITE(config->conf, base + AVI_SCAL_CONF);
+
+ /* Setup dimensions sizes. */
+ AVI_WRITE(config->size, base + AVI_SCAL_SIZEOUT);
+
+ /* Setup plane 0. */
+ avi_setup_scalplane(base + AVI_SCAL_PLANE0,
+ config->woffset0,
+ config->wincr0,
+ config->wcfg0,
+ config->hoffset0,
+ config->hincr0,
+ config->hcfg0);
+
+ if (config->wincr1 || config->hincr1)
+ avi_setup_scalplane(base + AVI_SCAL_PLANE1,
+ config->woffset1,
+ config->wincr1,
+ config->wcfg1,
+ config->hoffset1,
+ config->hincr1,
+ config->hcfg1);
+ else
+ /* Otherwise we use the same config for both planes */
+ avi_setup_scalplane(base + AVI_SCAL_PLANE1,
+ config->woffset0,
+ config->wincr0,
+ config->wcfg0,
+ config->hoffset0,
+ config->hincr0,
+ config->hcfg0);
+}
+EXPORT_SYMBOL(avi_scal_setup);
+
+void avi_scal_setup_oneshot(struct avi_node const *scaler,
+ u16 from_width, u16 to_width,
+ u16 from_height, u16 to_height,
+ struct avi_dma_pixfmt pixfmt,
+ int have_planar)
+{
+ struct avi_scal_cfg scalcfg;
+
+ avi_scal_to_cfg(&scalcfg,
+ from_width, to_width,
+ from_height, to_height,
+ pixfmt,
+ have_planar);
+ avi_scal_setup(scaler, &scalcfg);
+}
+EXPORT_SYMBOL(avi_scal_setup_oneshot);
+
+void avi_scal_set_field(struct avi_node const *scaler,
+ u16 from_height,
+ u16 to_height,
+ enum avi_field field)
+{
+ /* I'm not sure if this code works for planar but I'm pretty sure the
+ * rest of the code in this file is broken for planar anyway. */
+ unsigned long base = avi_node_base(scaler) + AVI_SCAL_PLANE0;
+ unsigned hoffset;
+ u32 hincr;
+
+ hoffset = avi_scal_offset(from_height, to_height);
+
+ if (field == AVI_BOT_FIELD) {
+ hincr = AVI_READ(base + AVI_SCAL_VERT_DIM + AVI_SCAL_INCR);
+ /* Offset by one line */
+ hoffset += hincr / 2;
+ }
+
+ AVI_WRITE(hoffset, base + AVI_SCAL_VERT_DIM + AVI_SCAL_OFFSET);
+
+ /* XXX update plan1 hoffset if needed */
+}
+EXPORT_SYMBOL(avi_scal_set_field);
diff --git a/drivers/parrot/video/avi_scaler.h b/drivers/parrot/video/avi_scaler.h
new file mode 100644
index 00000000000000..829197ed4df4e2
--- /dev/null
+++ b/drivers/parrot/video/avi_scaler.h
@@ -0,0 +1,89 @@
+#ifndef _AVI_SCALER_H_
+#define _AVI_SCALER_H_
+
+/*
+ * linux/drivers/parrot/video/avi_scaler.h
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * @author Lionel Flandrin <lionel.flandrin@parrot.com>
+ * @date 17-Jun-2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file contains the scaler configuration code
+ */
+
+#include "avi_compat.h"
+#include "avi_pixfmt.h"
+
+/**************
+ * Scalers API
+ **************/
+
+struct avi_scal_dimcfg;
+
+/**
+ * struct avi_scal_cfg - Scaler configuration
+ */
+struct avi_scal_cfg {
+ u32 conf;
+ u32 size;
+ u32 woffset0;
+ u32 wincr0;
+ struct avi_scal_dimcfg const* wcfg0;
+ u32 hoffset0;
+ u32 hincr0;
+ struct avi_scal_dimcfg const* hcfg0;
+ u32 woffset1;
+ u32 wincr1;
+ struct avi_scal_dimcfg const* wcfg1;
+ u32 hoffset1;
+ u32 hincr1;
+ struct avi_scal_dimcfg const* hcfg1;
+};
+
+extern void avi_scal_bypass(struct avi_node const *);
+extern void avi_scal_to_cfg(struct avi_scal_cfg*,
+ u16, u16, u16, u16,
+ struct avi_dma_pixfmt,
+ int);
+
+extern void avi_scal_to_interleaced_cfg(struct avi_scal_cfg*,
+ struct avi_scal_cfg*,
+ u16, u16, u16, u16,
+ struct avi_dma_pixfmt,
+ int);
+
+extern void avi_scal_setup(struct avi_node const*,
+ struct avi_scal_cfg const*);
+/* The "slow" configuration that computes a temporary config and applies it
+ * directly. If you must reconfigure the scaler several times per second you
+ * should probably precompute the configurations once and then use
+ * avi_setup_scal when needed. */
+extern void avi_scal_setup_oneshot(struct avi_node const *,
+ u16, u16, u16, u16,
+ struct avi_dma_pixfmt,
+ int);
+
+/* Function used to reconfigure the scaler between frames when it's used to drop
+ * the unnecessary field for interleaced output */
+extern void avi_scal_set_field(struct avi_node const *,
+ u16, u16,
+ enum avi_field);
+
+#endif /* _AVI_SCALER_H_ */
diff --git a/drivers/parrot/video/avi_segment.c b/drivers/parrot/video/avi_segment.c
new file mode 100644
index 00000000000000..660ad432f8477e
--- /dev/null
+++ b/drivers/parrot/video/avi_segment.c
@@ -0,0 +1,1811 @@
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include "avi_compat.h"
+#include "avi_segment.h"
+#include "avi_dma.h"
+#include "avi_debug.h"
+#include "avi_scaler.h"
+#include "avi_pixfmt.h"
+
+#ifdef DEBUG
+
+#define dprintk(format_, args...) \
+ pr_debug("avi segment: " format_, ##args)
+
+#define dprintk_s(s_, format_, args...) \
+ dev_dbg((s_)->owner, "[%s] " format_, (s_)->id, ##args)
+
+#else /* DEBUG */
+#define dprintk(format_, args...) do {} while(0)
+#define dprintk_s(s_, format_, args...) (void)s_
+#endif /* DEBUG */
+
+struct avi_segment_index {
+ struct mutex lock;
+ struct list_head segment_list;
+};
+
+static struct avi_segment_index avi_segment_index = {
+ .lock = __MUTEX_INITIALIZER(avi_segment_index.lock),
+ .segment_list = LIST_HEAD_INIT(avi_segment_index.segment_list),
+};
+
+/*
+ * This function contains the heuristic used to build the segments. Hopefully it
+ * should handle all the cases and the client application won't have to bother
+ * tweaking the order of the nodes within a segment.
+ *
+ * This function is called with avi_segment_index.lock held.
+ */
+static int avi_segment_satisfy_caps(struct avi_segment *s)
+{
+ unsigned long caps = s->caps;
+ unsigned long missing = 0;
+ int ret = 0;
+ int i = 0;
+ unsigned long flags;
+ struct avi_node *n;
+
+ /* First some basic (and probably non-exhaustive) sanity checks on the
+ * capabilities. Some combinations make no sense (like having both a
+ * DMA_OUT and an LCD or multiple LCDs for instance) */
+ BUG_ON(!caps);
+
+ BUG_ON(avi_cap_check(caps));
+
+ /* At the moment each capability has an associated node. This might
+ * change later on.
+ */
+
+ /* Play nicely with legacy code. We don't want it to allocate nodes in
+ * our back. */
+ spin_lock_irqsave(&avi_ctrl.cfg_lck, flags);
+
+ /* Planar special case:
+ * If requested caps contain DMA_IN, SCALER and PLANAR
+ * we try to find a "PLANAR" node (DMA_IN)
+ */
+ s->dma_in_planar = NULL;
+ if (caps & AVI_CAP_PLANAR) {
+ n = avi_cap_get_node(AVI_CAP_PLANAR);
+ if (n) {
+ n->busy = 1;
+ n->assigned_caps = AVI_CAP_PLANAR;
+ }
+#ifdef AVI_BACKWARD_COMPAT
+ else if ((avi_get_revision() < AVI_REVISION_3))
+ /* for AVI revision 3, DMA IN FIFO works with
+ * semi-planar pixel formats instead of revision 1 and 2
+ * where they are bugged. So in this case, a second DMA
+ * IN FIFO is mandatory for managing semi-planar */
+ missing |= (AVI_CAP_PLANAR);
+#endif /* AVI_BACKWARD_COMPAT */
+ s->dma_in_planar = n;
+ caps &= ~(AVI_CAP_PLANAR);
+ }
+ /* Unlock CFG to allow kzalloc() below with GFP_KERNEL. */
+ spin_unlock_irqrestore(&avi_ctrl.cfg_lck, flags);
+
+ /*
+ * The Hamming weight is the number of bits set in the int.
+ */
+ s->nodes_nr = hweight_long(caps);
+ s->nodes = kzalloc(s->nodes_nr * sizeof(*s->nodes), GFP_KERNEL);
+ if (!s->nodes) {
+ if (s->dma_in_planar) {
+ s->dma_in_planar->assigned_caps = 0;
+ s->dma_in_planar->busy = 0;
+ }
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ /* Play nicely with legacy code. We don't want it to allocate nodes in
+ * our back. */
+ spin_lock_irqsave(&avi_ctrl.cfg_lck, flags);
+
+#define AVI_SEGMENT_FIND_NODE(cap) do { \
+ if (caps & (cap)) { \
+ n = avi_cap_get_node(cap); \
+ if (!n) { \
+ missing |= (cap); \
+ } else { \
+ n->busy = 1; \
+ n->assigned_caps = cap; \
+ } \
+ s->nodes[i++] = n; \
+ caps &= ~(cap); \
+ } \
+ } while(0)
+
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_0);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_1);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_2);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_3);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_4);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CAM_5);
+
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_DMA_IN);
+
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_STATS_BAYER_0);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_STATS_BAYER_1);
+
+ /* Selecting one part of ISP (Bayer or YUV)
+ * implies select of all modules
+ * It does not make sense to take only a part of the ISP-chain
+ * See AVI_CAPS_ISP in avi_cap.h
+ * For the ISP chain, the order is fixed
+ */
+ if (caps & (AVI_CAP_ISP_CHAIN_BAYER|AVI_CAP_ISP_CHAIN_YUV)) {
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_ISP_CHAIN_BAYER);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_GAM);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CONV);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_STATS_YUV);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_ISP_CHAIN_YUV);
+ }
+
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_SCAL);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_BUFFER);
+
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_CONV);
+
+ /* In P7R1 the gamma and the LCD are directly connected in hardware,
+ * therefore we have to put the gamma just before the LCD here if we
+ * want to remain compatible with this version */
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_GAM);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_LCD_0);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_LCD_1);
+ AVI_SEGMENT_FIND_NODE(AVI_CAP_DMA_OUT);
+
+ /* We shouldn't have unhandled capabilities at that point. If we have it
+ * means our code is incomplete or the caller gave us garbage (undefined
+ * caps in the bitfield) */
+ BUG_ON(caps);
+
+ if (missing) {
+ int j;
+
+ for (j = 0; j < s->nodes_nr; j++)
+ if (s->nodes[j]) {
+ s->nodes[j]->assigned_caps = 0;
+ s->nodes[j]->busy = 0;
+ }
+ kfree(s->nodes);
+ if (s->dma_in_planar) {
+ s->dma_in_planar->assigned_caps = 0;
+ s->dma_in_planar->busy = 0;
+ }
+ /* update s->caps with the unsatisfied capabilities */
+ s->caps = missing;
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ /* We should have filled the entire array by that point */
+ BUG_ON(i != s->nodes_nr);
+
+unlock:
+ spin_unlock_irqrestore(&avi_ctrl.cfg_lck, flags);
+
+ return ret;
+}
+
+/* Set sink's source to src_id in the AVI interconnect. If src_id is 0 it
+ * disconnects the node. */
+static void avi_segment_set_node_source(struct avi_node *inter,
+ unsigned sink_id,
+ unsigned src_id)
+{
+ unsigned base;
+ unsigned shift;
+ u32 reg;
+
+ BUG_ON(src_id == AVI_SRCSINK_NONE);
+
+ /* We have 4 src_ids per 32 bit registers. */
+ base = avi_node_base(inter) + (sink_id & ~(3UL));
+ /* Now that we have the base we need to find the layout within
+ * the register */
+ shift = (sink_id % 4) * 8;
+
+ reg = AVI_READ(base);
+
+ /* We mask the previous value of the src id */
+ reg &= ~(0xffUL << shift);
+ reg |= src_id << shift;
+
+ AVI_WRITE(reg, base);
+}
+
+static inline void avi_segment_set_blender_src_id(struct avi_node *inter,
+ struct avi_node *blender,
+ unsigned input,
+ unsigned src_id)
+{
+ unsigned sink_id = blender->sink_id + input;
+
+ BUG_ON(input >= 4);
+
+ avi_segment_set_node_source(inter, sink_id, src_id);
+}
+
+/* Connect src to a blender input. If src is NULL it disconnects the input. */
+static inline void avi_segment_set_blender_source(struct avi_node *inter,
+ struct avi_node *blender,
+ unsigned input,
+ struct avi_segment *src)
+{
+ unsigned src_id = 0;
+
+ if (src)
+ src_id = src->nodes[src->nodes_nr - 1]->src_id;
+
+ if (!src || src->layout.hidden) {
+ /* hide plane */
+ avi_setup_blender_input(blender, input,
+ 0xffff, 0xffff,
+ 0);
+ } else {
+ unsigned blender_y = src->layout.y;
+
+ if (src->output_format.interlaced)
+ /* If the stream is interlaced we have to use the offset
+ * in the field, not the full frame */
+ blender_y /= 2;
+
+ avi_setup_blender_input(blender, input,
+ src->layout.x, blender_y,
+ src->layout.alpha);
+ }
+
+ avi_segment_set_blender_src_id(inter, blender, input, src_id);
+}
+
+/* must be called with avi_ctrl.cfg_lck held */
+static void avi_segment_connect_nodes(struct avi_segment *s)
+{
+ struct avi_node *inter = avi_get_node(AVI_INTER_NODE);
+ struct avi_node *scaler;
+ int i;
+
+ avi_lock_node(inter);
+
+ for (i = 1; i < s->nodes_nr; i++)
+ avi_segment_set_node_source(inter,
+ s->nodes[i]->sink_id,
+ s->nodes[i - 1]->src_id);
+
+ if (s->dma_in_planar) {
+ scaler = avi_segment_get_node(s, AVI_CAP_SCAL);
+
+ BUG_ON(!scaler);
+
+ avi_segment_set_node_source(inter,
+ scaler->sink_id + 1,
+ s->dma_in_planar->src_id);
+ }
+
+ avi_unlock_node(inter);
+}
+
+/* Must be called with avi_ctrl.cfg_lck held */
+static void avi_segment_disconnect_nodes(struct avi_segment *s)
+{
+ struct avi_node *inter = avi_get_node(AVI_INTER_NODE);
+ int i;
+
+ avi_lock_node(inter);
+
+ if (s->nodes_nr)
+ for (i = 0; i < s->nodes_nr; i++)
+ avi_segment_set_node_source(inter,
+ s->nodes[i]->sink_id,
+ 0);
+
+ if (s->dma_in_planar) {
+ struct avi_node *scaler = avi_segment_get_node(s, AVI_CAP_SCAL);
+
+ BUG_ON(!scaler);
+
+ avi_segment_set_node_source(inter,
+ scaler->sink_id + 1,
+ 0);
+ }
+
+ avi_unlock_node(inter);
+
+ /* The node disconnection will only take place at the next clear, but
+ * since we're in the process of destroying the segment it won't happen
+ * any time soon. Therefore we force the interconnect change right
+ * here. */
+ avi_apply_node(inter);
+}
+
+/* Must be called with the index lock held */
+struct avi_segment *avi_segment_find(const char *id)
+{
+ struct avi_segment *s;
+
+ BUG_ON(!id || !*id);
+
+ list_for_each_entry(s, &avi_segment_index.segment_list, list)
+ if (strncmp(s->id, id, ARRAY_SIZE(s->id)) == 0)
+ return s;
+
+ /* not found */
+ return NULL;
+}
+EXPORT_SYMBOL(avi_segment_find);
+
+/* Reconfigure all nodes in the segment to sane default values.
+ *
+ * Must be called with s->lock held. */
+static void avi_segment_reset(struct avi_segment *s)
+{
+ struct avi_node *n;
+ int i;
+
+ for (i = 0; i < s->nodes_nr; i++) {
+ n = s->nodes[i];
+
+ switch (n->type) {
+ case AVI_FIFO_NODE_TYPE:
+ {
+ struct avi_fifo_registers fifo_regs;
+
+ memset(&fifo_regs, 0, sizeof(fifo_regs));
+
+ if ((s->caps & AVI_CAP_DMA_IN) && i == 0)
+ /* DMA IN fifo */
+ fifo_regs.cfg.srctype = AVI_FIFO_CFG_DMA_TYPE;
+ else
+ fifo_regs.cfg.srctype = AVI_FIFO_CFG_VL_TYPE;
+
+
+ if ((s->caps & AVI_CAP_DMA_OUT) &&
+ i == (s->nodes_nr - 1))
+ /* DMA OUT fifo */
+ fifo_regs.cfg.dsttype = AVI_FIFO_CFG_DMA_TYPE;
+ else
+ fifo_regs.cfg.dsttype = AVI_FIFO_CFG_VL_TYPE;
+
+ fifo_regs.cfg.dstbitenc = 0x8; /* 8888 */
+ fifo_regs.cfg.srcbitenc = 0x8; /* 8888 */
+ fifo_regs.cfg.itline = 0xf;
+ fifo_regs.cfg.dithering = 1; /* MSB filling */
+
+ fifo_regs.swap0._register = AVI_FIFO_NULL_SWAP;
+ fifo_regs.swap1._register = AVI_FIFO_NULL_SWAP;
+
+ fifo_regs.timeout.pincr = 1;
+ fifo_regs.timeout.issuing_capability = 7;
+
+ fifo_regs.dmafxycfg.dmafxnb = 1;
+ fifo_regs.dmafxycfg.dmafynb = 1;
+
+ avi_fifo_set_registers(n, &fifo_regs);
+
+ if (s->dma_in_planar && i == 0)
+ avi_fifo_set_registers(s->dma_in_planar, &fifo_regs);
+ }
+ break;
+ case AVI_CONV_NODE_TYPE:
+ /* Put the identity matrix in the converter */
+ avi_setup_conv_colorspace(n,
+ AVI_NULL_CSPACE,
+ AVI_NULL_CSPACE);
+ break;
+ case AVI_GAM_NODE_TYPE:
+ /* Bypass the gamma */
+ avi_gam_setup(n, 1, 0, 0);
+ break;
+ case AVI_LCD_NODE_TYPE:
+ {
+ struct avi_lcd_regs lcd_regs;
+
+ memset(&lcd_regs, 0, sizeof(lcd_regs));
+
+ /* Default pixel colour. Ugly yellow. */
+ lcd_regs.dpd.dpd = 0xd4e523,
+ avi_lcd_set_registers(n, &lcd_regs);
+ }
+ break;
+ case AVI_CAM_NODE_TYPE:
+ {
+ struct avi_cam_registers cam_regs;
+
+ memset(&cam_regs, 0, sizeof(cam_regs));
+
+ avi_cam_set_registers(n, &cam_regs);
+ }
+ break;
+ case AVI_SCALROT_NODE_TYPE:
+ switch(n->node_id)
+ {
+ case AVI_SCAL00_NODE:/* scaler */
+ case AVI_SCAL10_NODE:
+ /* Setup scaler in bypass by default */
+ avi_scal_bypass(n);
+ break;
+ case AVI_ROT0_NODE:/* rotator */
+ case AVI_ROT1_NODE:
+ BUG();
+ break;
+ default:
+ BUG();
+ break;
+ }
+ break;
+
+ case AVI_ISP_CHAIN_BAYER_NODE_TYPE:
+ avi_isp_chain_bayer_bypass(n);
+ break;
+
+ case AVI_STATS_BAYER_NODE_TYPE:
+ /* Nop */
+ break;
+
+ case AVI_STATS_YUV_NODE_TYPE:
+ /* Nothing to be done, the module always forward the
+ * stream */
+ break;
+
+ case AVI_ISP_CHAIN_YUV_NODE_TYPE:
+ avi_isp_chain_yuv_bypass(n);
+ break;
+
+ default:
+ BUG();
+ }
+ }
+}
+
+/* Handle FIFO start address reconfiguration for interlaced video */
+static inline void avi_segment_update_dma_field(struct avi_segment *s)
+{
+ if (s->input_format.interlaced && (s->caps & AVI_CAP_DMA_IN)) {
+ struct avi_node *fifo0;
+ struct avi_node *fifo1;
+ dma_addr_t start0 = s->plane0_base;
+ dma_addr_t start1 = s->plane1_base;
+
+ if (s->cur_field == AVI_BOT_FIELD) {
+ /* Offset the beginning of the video buffer by one line
+ * to get the bottom field. Since line_size must already
+ * contain the offset to skip one line we must divide it
+ * by 2
+ *
+ * XXX I'm not really satisfied with this method, maybe
+ * the client driver should tell us where the bottom
+ * field is? The problem of course is that if we need to
+ * use the scaler to drop the useless field we *must* be
+ * in INTERLACED mode, ALTERNATE can't work. That
+ * constrains what we can do here. */
+ start0 += s->input_format.plane0.line_size / 2;
+ start1 += s->input_format.plane1.line_size / 2;
+ }
+
+ fifo0 = avi_segment_get_node(s, AVI_CAP_DMA_IN);
+
+ BUG_ON(fifo0 == NULL);
+
+ avi_fifo_set_dmasa(fifo0, start0, start1);
+
+ fifo1 = avi_segment_get_node(s, AVI_CAP_PLANAR);
+ if (fifo1)
+ avi_fifo_set_dmasa(fifo1, start0, start1);
+ }
+}
+
+static void avi_segment_update_field(struct avi_segment *s, enum avi_field *field)
+{
+ s->cur_field = *field;
+
+ if (*field == AVI_NONE_FIELD)
+ /* Nothing left to do here */
+ return;
+
+ BUG_ON(*field != AVI_TOP_FIELD && *field != AVI_BOT_FIELD);
+
+ /* If the segment is displayed on an odd line we have to inverse the
+ * field for interlaced streams*/
+ if (s->layout.y % 2)
+ s->cur_field = (*field == AVI_BOT_FIELD) ?
+ AVI_TOP_FIELD : AVI_BOT_FIELD;
+
+ if (!s->input_format.interlaced && s->output_format.interlaced) {
+ /* We use the scaler to drop the useless field*/
+ struct avi_node *scal = avi_segment_get_node(s, AVI_CAP_SCAL);
+
+ BUG_ON(!scal);
+
+ avi_scal_set_field(scal,
+ s->input_format.height,
+ s->output_format.height,
+ s->cur_field);
+ }
+
+ avi_segment_update_dma_field(s);
+}
+
+/* I couldn't find a way to factorize these two function simply, so I use a
+ * macro instead... */
+#define AVI_SEGMENT_PROPAGATE_IRQ(_dir, _count, _member) \
+ static void avi_segment_propagate_irq_##_dir(struct avi_segment *s, \
+ enum avi_field field) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < _count; i++) { \
+ struct avi_segment *next = s->_member[i]; \
+ \
+ if (next) { \
+ /* Each branch can have its own field it can \
+ * manipulate at will without impacting the \
+ * other branches */ \
+ enum avi_field nfield = field; \
+ unsigned status = 0; \
+ \
+ avi_segment_update_field(next, &nfield); \
+ \
+ if (next->active == AVI_SEGMENT_ACTIVE_ONESHOT)\
+ avi_segment_deactivate(next); \
+ \
+ if (next->irq_handler_f) \
+ status = next->irq_handler_f(next, \
+ &nfield); \
+ \
+ if (!(status & AVI_IRQ_HANDLED)) \
+ avi_segment_propagate_irq_##_dir( \
+ next, \
+ nfield); \
+ \
+ } \
+ } \
+ }
+
+AVI_SEGMENT_PROPAGATE_IRQ(forward, AVI_SEGMENT_MAX_SINK, sink)
+AVI_SEGMENT_PROPAGATE_IRQ(backward, AVI_SEGMENT_MAX_SRC, source)
+
+static irqreturn_t avi_segment_irq_handler(int irq, void *priv)
+{
+ struct avi_segment *s = priv;
+ enum avi_field field = AVI_NONE_FIELD;
+ int status = 0;
+
+ if (!avi_node_get_irq_flag(s->irq_node)) {
+ printk("stray IRQ\n");
+ return IRQ_NONE;
+ }
+
+ if (s->active == AVI_SEGMENT_ACTIVE_ONESHOT)
+ avi_segment_deactivate(s);
+
+ if (s->irq_handler_f)
+ status = s->irq_handler_f(s, &field);
+
+ if (!(status & AVI_IRQ_ACKED))
+ avi_ack_node_irq(s->irq_node);
+
+ if (!(status & AVI_IRQ_HANDLED)) {
+ avi_segment_update_field(s, &field);
+
+ avi_segment_propagate_irq_forward (s, field);
+ avi_segment_propagate_irq_backward(s, field);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int avi_segment_setup_irq(struct avi_segment *s)
+{
+ BUG_ON(!s->irq_node);
+
+ avi_disable_node_irq(s->irq_node);
+ avi_ack_node_irq(s->irq_node);
+
+ return request_irq(avi_node_irq(s->irq_node),
+ &avi_segment_irq_handler,
+ 0,
+ s->id, s);
+}
+
+static void avi_segment_free_irq(struct avi_segment *s)
+{
+ BUG_ON(!s->irq_node);
+
+ avi_disable_node_irq(s->irq_node);
+ free_irq(avi_node_irq(s->irq_node), s);
+}
+
+void avi_segment_enable_irq(struct avi_segment *s)
+{
+ BUG_ON(!s->irq_node);
+
+ avi_enable_node_irq(s->irq_node);
+}
+EXPORT_SYMBOL(avi_segment_enable_irq);
+
+void avi_segment_disable_irq(struct avi_segment *s)
+{
+ BUG_ON(!s->irq_node);
+
+ avi_disable_node_irq(s->irq_node);
+}
+EXPORT_SYMBOL(avi_segment_disable_irq);
+
+void avi_segment_register_irq(struct avi_segment *s,
+ avi_segment_irq_handler_t f)
+{
+ s->irq_handler_f = f;
+}
+EXPORT_SYMBOL(avi_segment_register_irq);
+
+static const struct avi_segment_format avi_segment_default_format = {
+ .width = 0,
+ .height = 0,
+ .colorspace = AVI_NULL_CSPACE,
+ .pix_fmt = AVI_PIXFMT_INVALID,
+};
+
+static void avi_segment_free_nodes(struct avi_segment *s)
+{
+ int i;
+
+ /* Mark the nodes as unused */
+ for (i = 0; i < s->nodes_nr; i++) {
+ struct avi_node *n = s->nodes[i];
+
+ n->assigned_caps = 0;
+ n->busy = 0;
+ }
+
+ if (s->dma_in_planar) {
+ s->dma_in_planar->assigned_caps = 0;
+ s->dma_in_planar->busy = 0;
+ }
+}
+
+
+static struct avi_segment *avi_segment_do_build(unsigned long *caps,
+ const char *id,
+ struct device *owner)
+{
+ struct avi_segment_index *index = &avi_segment_index;
+ struct avi_segment *new;
+ struct avi_node *out_node;
+ int ret;
+
+ /* Make sure the AVI has been succesfully probed, otherwise we will
+ * crash when we attempt to access the registers. */
+ BUG_ON(!avi_probed());
+
+ mutex_lock(&index->lock);
+
+ if (avi_segment_find(id)) {
+ dprintk("duplicate segment %s\n", id);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ dprintk("can't allocate segment struct for %s", id);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ new->caps = *caps;
+ new->owner = owner;
+ new->input_format = avi_segment_default_format;
+ new->layout.alpha = 0xff;
+ new->cur_field = AVI_NONE_FIELD;
+
+ /* if the provided id is less than ARRAY_SIZE(new->id) it feels the
+ * remaining bytes with \0. If it's bigger however the buffer will not
+ * be \0 terminated. Since the buffer has a static size we could live
+ * with it but in order to save us some trouble later I force the last
+ * byte to \0. It means we effectively lose one usable byte for the
+ * id. */
+ strncpy(new->id, id, ARRAY_SIZE(new->id));
+ new->id[ARRAY_SIZE(new->id) - 1] = '\0';
+
+ mutex_init(&new->lock);
+
+ ret = avi_segment_satisfy_caps(new);
+ if (ret) {
+ if (ret == -ENODEV) {
+#ifdef DEBUG
+ char str[64];
+
+ avi_debug_format_caps(new->caps, str, sizeof(str));
+
+ dprintk_s(new, "can't satisfy caps: %s\n", str);
+#endif /* DEBUG */
+
+ /* Copy the unsatisfied capabilities back to caps */
+ *caps = new->caps;
+ } else {
+ dprintk_s (new, "satisfy caps failed [%d]\n", ret);
+ }
+ goto free;
+ }
+
+ /* If we have a LCD, CAM or a DMA_OUT, let's setup its interrupt */
+ out_node = avi_segment_get_node(new,
+ AVI_CAPS_LCD_ALL |
+ AVI_CAPS_CAM_ALL |
+ AVI_CAP_DMA_OUT);
+ if (out_node) {
+ new->irq_node = out_node;
+ ret = avi_segment_setup_irq(new);
+ if (ret) {
+ dprintk_s(new,
+ "can't setup interrupt for node %s\n",
+ new->irq_node->name);
+ new->irq_node = NULL;
+ goto free_nodes;
+ }
+ }
+
+ /* Reconfigure all nodes to sane (and hopefully harmless) default values */
+ avi_segment_reset(new);
+
+ avi_segment_connect_nodes(new);
+
+ list_add_tail(&new->list, &index->segment_list);
+
+ mutex_unlock(&index->lock);
+
+ return new;
+
+free_nodes:
+ avi_segment_free_nodes(new);
+free:
+ kfree(new);
+unlock:
+ mutex_unlock(&index->lock);
+
+ return ERR_PTR(ret);
+}
+
+struct avi_segment *avi_segment_build(unsigned long *caps,
+ const char *type,
+ int major,
+ int minor,
+ struct device *owner)
+{
+ char id[AVI_SEGMENT_ID_LEN];
+
+ if (minor >= 0)
+ snprintf(id, sizeof(id), "%s.%d:%d",
+ type, major, minor);
+ else
+ snprintf(id, sizeof(id), "%s.%d",
+ type, major);
+
+ return avi_segment_do_build(caps, id, owner);
+}
+EXPORT_SYMBOL(avi_segment_build);
+
+void avi_segment_gen_id(const char *base_id,
+ const char *nonce,
+ char gen_id[AVI_SEGMENT_ID_LEN])
+{
+ size_t base_len = strlen(base_id);
+ /* We need space for two more chars: the dash and the trailing \0 */
+ size_t nonce_len = strlen(nonce) + 2;
+ size_t dst_len;
+ char *p;
+
+ /* limit the size of the nonce */
+ if (nonce_len > (AVI_SEGMENT_ID_LEN / 2))
+ nonce_len = (AVI_SEGMENT_ID_LEN / 2);
+
+ dst_len = base_len + nonce_len;
+
+ if (dst_len > AVI_SEGMENT_ID_LEN)
+ dst_len = AVI_SEGMENT_ID_LEN;
+
+ memset(gen_id, 0, AVI_SEGMENT_ID_LEN);
+
+ p = gen_id;
+
+ strncpy(p, base_id, AVI_SEGMENT_ID_LEN);
+
+ p += dst_len - nonce_len;
+
+ *p++ = '-';
+ memcpy(p, nonce, nonce_len - 2);
+}
+EXPORT_SYMBOL(avi_segment_gen_id);
+
+int avi_segment_connected(struct avi_segment *s)
+{
+ int i;
+
+ for (i = 0; i < AVI_SEGMENT_MAX_SRC; i++)
+ if (s->source[i])
+ return 1;
+
+ for (i = 0; i < AVI_SEGMENT_MAX_SINK; i++)
+ if (s->sink[i])
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_segment_connected);
+
+int avi_segment_teardown(struct avi_segment *s)
+{
+ int ret = 0;
+
+ /* Make sure the index is locked since we want to remove an element */
+ mutex_lock(&avi_segment_index.lock);
+ mutex_lock(&s->lock);
+
+ if (avi_segment_connected(s)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ avi_segment_deactivate(s);
+
+ if (s->irq_node)
+ avi_segment_free_irq(s);
+
+ /* Reconfigure all nodes to sane (and hopefully harmless) default values */
+ avi_segment_reset(s);
+
+ spin_lock(&avi_ctrl.cfg_lck);
+
+ avi_segment_disconnect_nodes(s);
+
+ /* Mark the nodes as unused */
+ avi_segment_free_nodes(s);
+
+ list_del(&s->list);
+
+ spin_unlock(&avi_ctrl.cfg_lck);
+
+ mutex_unlock(&s->lock);
+ mutex_unlock(&avi_segment_index.lock);
+
+ kfree(s);
+
+ return 0;
+
+unlock:
+ mutex_unlock(&s->lock);
+ mutex_unlock(&avi_segment_index.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_teardown);
+
+int avi_take_segment(struct avi_segment *s, struct device *owner)
+{
+ mutex_lock(&s->lock);
+
+ if (s->owner) {
+ mutex_unlock(&s->lock);
+ return -EBUSY;
+ }
+
+ s->owner = owner;
+
+ mutex_unlock(&s->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_take_segment);
+
+int avi_release_segment(struct avi_segment *s)
+{
+ mutex_lock(&s->lock);
+
+ if (s->owner == NULL) {
+ mutex_unlock(&s->lock);
+ return -EINVAL;
+ }
+
+ /* Disown the segment and cleanup the old client context for good
+ * measure. */
+ s->owner = NULL;
+ s->priv = NULL;
+ s->irq_handler_f = NULL;
+
+ mutex_unlock(&s->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_release_segment);
+
+/* This function attempt to reconnect all sources of s, adding and removing
+ * blenders if necessary.
+ *
+ * Must be called with the segment's lock held and interrupts disabled */
+static int avi_segment_reconfigure_connection(struct avi_segment *s)
+{
+ int force_blender = 0;
+ int nsources = 0;
+ int ret = 0;
+ struct avi_node *inter = avi_get_node(AVI_INTER_NODE);
+ unsigned sink_id;
+ int nblenders = 0;
+ int i;
+ unsigned long cfg_flags;
+ struct avi_segment *dense_sources[AVI_SEGMENT_MAX_SRC] = { NULL };
+
+ BUG_ON(s->nodes_nr == 0);
+
+ sink_id = s->nodes[0]->sink_id;
+
+ /* count number of connected sources,
+ * compress segment sources into dense_sources */
+ for (i = 0; i < AVI_SEGMENT_MAX_SRC; i++) {
+ struct avi_segment *tmp;
+ tmp = s->source[i];
+ if (tmp != NULL) {
+ dense_sources[nsources] = tmp;
+ nsources ++;
+ }
+ }
+
+ if ((nsources == 1) && (dense_sources[0] != NULL)) {
+ /* Even if we only have one source we might need a
+ * blender if it's not fullscreen or needs to be moved
+ * around. */
+ if (dense_sources[0]->layout.x != 0 ||
+ dense_sources[0]->layout.y != 0 ||
+ dense_sources[0]->layout.hidden ||
+ dense_sources[0]->layout.alpha != 0xff ||
+ dense_sources[0]->output_format.width !=
+ s->input_format.width ||
+ dense_sources[0]->output_format.height !=
+ s->input_format.height)
+ force_blender = 1;
+ }
+
+ if (nsources <= 1)
+ nblenders = force_blender;
+ else if (nsources <= 4)
+ nblenders = 1;
+ else
+ nblenders = 2;
+
+ spin_lock_irqsave(&avi_ctrl.cfg_lck, cfg_flags);
+
+ for (i = 0; i < nblenders; i++)
+ if (s->blender[i] == NULL) {
+ /* We need a new blender node */
+ s->blender[i] = avi_cap_get_blender();
+ if (s->blender[i] == NULL) {
+ /* no more blenders available! */
+ ret = -ENODEV;
+ break;
+ }
+ /* We mark a different busy state in order to
+ * distinguish freshly obtained blenders and old ones.
+ * This is used to revert to previous state if an error
+ * occured. */
+ s->blender[i]->busy = 2;
+ }
+
+ if (ret) {
+ /* We couldn't get the blenders we wanted, rollback and make
+ * sure we revert to the initial state. */
+ while (i--)
+ if (s->blender[i]->busy == 2) {
+ s->blender[i]->busy = 0;
+ s->blender[i] = NULL;
+ }
+ goto unlock;
+ }
+ else {
+ /* All needed blenders are requested, so we no longer need to
+ * distinguish blenders */
+ for (i = 0; i < nblenders; i++)
+ if (s->blender[i]->busy == 2)
+ s->blender[i]->busy = 1;
+ }
+
+ avi_lock_node(inter);
+
+ if (nblenders == 0) {
+ unsigned src_id = 0;
+
+ if (dense_sources[0])
+ src_id = dense_sources[0]->
+ nodes[dense_sources[0]->nodes_nr - 1]->
+ src_id;
+
+ avi_segment_set_node_source(inter, sink_id, src_id);
+ } else if (nblenders == 1) {
+ /* Connect the blender */
+ avi_segment_set_node_source(inter, sink_id, s->blender[0]->src_id);
+
+ for (i = 0; i < 4; i++)
+ /* The blender's input are ordered from bottom to top
+ * (input 0 is under input 1 etc...). Our zorder is the
+ * other way around so we connect them starting from 3
+ * downwards. */
+ avi_segment_set_blender_source(inter,
+ s->blender[0],
+ 3 - i,
+ dense_sources[i]);
+
+ } else if (nblenders == 2) {
+ /* Connect the blenders */
+ avi_segment_set_node_source(inter,
+ sink_id,
+ s->blender[0]->src_id);
+ /* the 2nd blender's output is the first input of the other
+ * one */
+ avi_segment_set_blender_src_id(inter,
+ s->blender[0],
+ 0,
+ s->blender[1]->src_id);
+
+ /* Connect the first 3 planes on the first blender */
+ for (i = 0; i < 3; i++)
+ /* Same as above, we connect the planes in reverse
+ * order. */
+ avi_segment_set_blender_source(inter,
+ s->blender[0],
+ 3 - i,
+ dense_sources[i]);
+
+ /* Connect the last 4 planes on the 2nd blender */
+ for (i = 3; i < 7; i++) {
+ avi_segment_set_blender_source(inter,
+ s->blender[1],
+ 6 - i,
+ dense_sources[i]);
+ }
+ } else
+ BUG();
+
+ for (i = 0; i < nblenders; i++) {
+ avi_setup_blend(s->blender[i],
+ s->background,
+ s->input_format.width,
+ s->input_format.height);
+
+ avi_enable_node(s->blender[i]);
+ }
+
+ /* Free the no-longer needed blenders (if any) */
+ for (i = nblenders; i < 2; i++)
+ if (s->blender[i]) {
+ int j;
+
+ for (j = 0; j < 4; j++)
+ avi_segment_set_blender_src_id(inter,
+ s->blender[i],
+ j,
+ 0);
+
+ avi_disable_node(s->blender[i]);
+ s->blender[i]->busy = 0;
+ s->blender[i]->assigned_caps = 0;
+ s->blender[i] = NULL;
+ }
+
+ /* Reconfiguration done! */
+ avi_unlock_node(inter);
+
+ unlock:
+ spin_unlock_irqrestore(&avi_ctrl.cfg_lck, cfg_flags);
+ return ret;
+}
+
+int avi_segment_connect(struct avi_segment *src,
+ struct avi_segment *dst,
+ int zorder)
+{
+ int ret;
+
+ mutex_lock(&src->lock);
+ mutex_lock(&dst->lock);
+
+ if (src->sink[0]) {
+ /* Forks are not yet supported */
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (zorder == -1) {
+ /* Look for an available zorder */
+ for (zorder = 0; zorder < AVI_SEGMENT_MAX_SRC; zorder++)
+ if (dst->source[zorder] == NULL)
+ /* We found an available source */
+ break;
+
+ if (zorder >= AVI_SEGMENT_MAX_SRC) {
+ /* No available sources remain */
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ } else if (dst->source[zorder] != NULL) {
+ /* The requested zorder is already in use */
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ dst->source[zorder] = src;
+
+ ret = avi_segment_reconfigure_connection(dst);
+
+ if (ret) {
+ /* reconfiguration failed, rollback */
+ dst->source[zorder] = NULL;
+ goto unlock;
+ }
+
+ src->sink[0] = dst;
+
+ ret = 0;
+ unlock:
+ mutex_unlock(&dst->lock);
+ mutex_unlock(&src->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_connect);
+
+int avi_segment_disconnect(struct avi_segment *src,
+ struct avi_segment *dst)
+{
+ int ret;
+ int zorder;
+
+ mutex_lock(&src->lock);
+ mutex_lock(&dst->lock);
+
+ for (zorder = 0; zorder < AVI_SEGMENT_MAX_SRC; zorder++)
+ if (dst->source[zorder] == src)
+ break;
+
+ if (zorder >= AVI_SEGMENT_MAX_SRC) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ dst->source[zorder] = NULL;
+
+ ret = avi_segment_reconfigure_connection(dst);
+ if (ret) {
+ /* reconfiguration failed, rollback */
+ dst->source[zorder] = src;
+ goto unlock;
+ }
+
+ /* XXX no fork support */
+ src->sink[0] = NULL;
+ ret = 0;
+ unlock:
+ mutex_unlock(&dst->lock);
+ mutex_unlock(&src->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_disconnect);
+
+void avi_segment_set_background(struct avi_segment *s, u32 rgb)
+{
+ /* Convert the rgb color to the destination colorspace */
+ s->background = avi_conv_convert_color(AVI_RGB_CSPACE,
+ s->output_format.colorspace,
+ rgb);
+
+ if (s->blender[0])
+ avi_blend_set_bg(s->blender[0], s->background);
+
+ if (s->blender[1])
+ avi_blend_set_bg(s->blender[1], s->background);
+}
+EXPORT_SYMBOL(avi_segment_set_background);
+
+u32 avi_segment_get_background(struct avi_segment *s)
+{
+ /* Convert the rgb color to the destination colorspace */
+ return avi_conv_convert_color(s->output_format.colorspace,
+ AVI_RGB_CSPACE,
+ s->background);
+}
+EXPORT_SYMBOL(avi_segment_get_background);
+
+static int avi_segment_do_activate(struct avi_segment *s, bool oneshot)
+{
+
+ struct avi_node *out_node;
+ unsigned i;
+
+ if (s->active != AVI_SEGMENT_DISABLED)
+ return -EBUSY;
+
+ for (i = 0; i < s->nodes_nr; i++) {
+ if (s->nodes[i]->type == AVI_FIFO_NODE_TYPE)
+ avi_fifo_set_oneshot(s->nodes[i], oneshot);
+ avi_enable_node(s->nodes[i]);
+ }
+
+ if (s->dma_in_planar) {
+ avi_fifo_set_oneshot(s->dma_in_planar, oneshot);
+ avi_enable_node(s->dma_in_planar);
+ }
+
+ out_node = avi_segment_get_node(s, AVI_CAPS_LCD_ALL | AVI_CAP_DMA_OUT);
+
+ if (out_node)
+ avi_apply_node(out_node);
+
+#ifdef AVI_BACKWARD_COMPAT
+ if ((avi_get_revision() == AVI_REVISION_1) && oneshot) {
+ /* Pre-disable FIFO nodes to stop processing after one shot */
+ for (i = 0; i < s->nodes_nr; i++) {
+ if (s->nodes[i]->type == AVI_FIFO_NODE_TYPE)
+ avi_disable_node(s->nodes[i]);
+ }
+ if (s->dma_in_planar) {
+ avi_disable_node(s->dma_in_planar);
+ }
+ }
+#endif /* AVI_BACKWARD_COMPAT */
+
+ s->active = oneshot ? AVI_SEGMENT_ACTIVE_ONESHOT : AVI_SEGMENT_ACTIVE;
+
+ return 0;
+}
+
+int avi_segment_activate(struct avi_segment *s) {
+ return avi_segment_do_activate(s, 0);
+}
+EXPORT_SYMBOL(avi_segment_activate);
+
+int avi_segment_activate_oneshot(struct avi_segment *s) {
+ return avi_segment_do_activate(s, 1);
+}
+EXPORT_SYMBOL(avi_segment_activate_oneshot);
+
+int avi_segment_deactivate(struct avi_segment *s)
+{
+ unsigned i;
+
+ if (s->active == AVI_SEGMENT_DISABLED)
+ return -EINVAL;
+
+ for (i = 0; i < s->nodes_nr; i++)
+ avi_disable_node(s->nodes[i]);
+
+ if (s->dma_in_planar)
+ avi_disable_node(s->dma_in_planar);
+
+ s->active = AVI_SEGMENT_DISABLED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(avi_segment_deactivate);
+
+/* Called with the segment's lock held */
+static int avi_segment_check_transform(struct avi_segment *s,
+ const struct avi_segment_format *in,
+ const struct avi_segment_format *out)
+{
+ dprintk_s(s,
+ "check_transform: %ux%u%c %s [%s %u %u] ->"
+ " %ux%u%c %s [%s %u %u]\n",
+ in->width, in->height, in->interlaced ? 'i' : 'p',
+ avi_debug_colorspace_to_str(in->colorspace),
+ avi_debug_pixfmt_to_str(in->pix_fmt),
+ in->plane0.line_size,
+ in->plane1.line_size,
+
+ out->width, out->height, out->interlaced ? 'i' : 'p',
+ avi_debug_colorspace_to_str(out->colorspace),
+ avi_debug_pixfmt_to_str(out->pix_fmt),
+ out->plane0.line_size,
+ out->plane1.line_size);
+
+#define INVALID_TRANSFORM(msg_) do { \
+ dprintk_s(s, "bad transform: %s\n", msg_); \
+ return -EINVAL; \
+ } while (0)
+
+ if (in->width == 0 || out->width == 0 ||
+ in->height == 0 || out->height == 0)
+ INVALID_TRANSFORM("null dimension in transform");
+
+ if ((in->width != out->width || in->height != out->height)
+ && !(s->caps & AVI_CAP_SCAL))
+ INVALID_TRANSFORM("can't rescale without scaler");
+
+ if (in->interlaced && !out->interlaced)
+ INVALID_TRANSFORM("can't deinterlace");
+
+ if (!in->interlaced && out->interlaced && !(s->caps & AVI_CAP_SCAL))
+ INVALID_TRANSFORM("can't drop field without scaler");
+
+ /* For RAW input, need to check that the whole ISP chain is
+ * present and the dimensions */
+ if (in->pix_fmt.raw) {
+ if (!out->pix_fmt.raw &&
+ (s->caps & AVI_CAPS_ISP) != AVI_CAPS_ISP)
+ INVALID_TRANSFORM("can't do ISP conversion");
+
+ if (in->width % 2)
+ INVALID_TRANSFORM("bad width for RAW colorspace");
+
+ if (in->height % 2)
+ INVALID_TRANSFORM("bad height for RAW colorspace");
+ }
+
+ if (in->colorspace != out->colorspace && !(s->caps & AVI_CAP_CONV))
+ INVALID_TRANSFORM("can't do chroma conversion");
+
+ if (in->pix_fmt.id != AVI_INVALID_FMTID && !(s->caps & AVI_CAP_DMA_IN))
+ INVALID_TRANSFORM("pixel_format without DMA_IN");
+
+ if (in->pix_fmt.id != AVI_INVALID_FMTID && in->plane0.line_size == 0)
+ INVALID_TRANSFORM("bad line_size for DMA_IN");
+
+ if (avi_pixfmt_get_packing(in->pix_fmt) != AVI_INTERLEAVED_444_PACKING
+ && (in->width % 2))
+ INVALID_TRANSFORM("bad width for DMA_IN");
+
+ if (avi_pixfmt_get_packing(in->pix_fmt) == AVI_SEMIPLANAR_YUV_420_PACKING
+ && (in->height % 2))
+ INVALID_TRANSFORM("bad height for DMA_IN");
+
+ if (out->pix_fmt.id != AVI_INVALID_FMTID &&
+ !(s->caps & AVI_CAP_DMA_OUT))
+ INVALID_TRANSFORM("pixel_format without DMA_OUT");
+
+ if (out->pix_fmt.id != AVI_INVALID_FMTID && out->plane0.line_size == 0)
+ INVALID_TRANSFORM("bad line_size for DMA_OUT");
+
+ if (avi_pixfmt_get_packing(out->pix_fmt) != AVI_INTERLEAVED_444_PACKING
+ && (out->width % 2))
+ INVALID_TRANSFORM("bad width for DMA_OUT");
+
+ if (avi_pixfmt_get_packing(out->pix_fmt) == AVI_SEMIPLANAR_YUV_420_PACKING
+ && (out->height % 2))
+ INVALID_TRANSFORM("bad height for DMA_OUT");
+
+ if ((avi_get_revision() < AVI_REVISION_3))
+ if (avi_pixfmt_is_planar(in->pix_fmt) &&
+ !(s->caps & AVI_CAP_PLANAR))
+ INVALID_TRANSFORM("can't handle planar without CAP_PLANAR");
+
+#undef INVALID_TRANSFORM
+
+ /* All is good */
+ return 0;
+}
+
+int avi_segment_try_format(struct avi_segment *s,
+ const struct avi_segment_format *in,
+ const struct avi_segment_format *out,
+ const struct avi_segment_layout *layout)
+{
+ unsigned out_width;
+ unsigned out_height;
+ int ret;
+ int i;
+
+ ret = avi_segment_check_transform(s, in, out);
+ if (ret)
+ return ret;
+
+ /* How big the sinks will have to be to accomodate the current
+ * format. */
+ out_width = layout->x + out->width;
+ out_height = layout->y + out->height;
+
+ for (i = 0; i < AVI_SEGMENT_MAX_SINK; i++) {
+ struct avi_segment *sink = s->sink[i];
+
+ if (sink &&
+ (sink->input_format.width < out_width ||
+ sink->input_format.height < out_height)) {
+ dprintk_s(s, "segment position is out "
+ "of the sink window: %ux%u @%u,%u -> %ux%u\n",
+ out->width, out->height,
+ layout->x, layout->y,
+ sink->input_format.width,
+ sink->input_format.height);
+ return -ERANGE;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_segment_try_format);
+
+/* Must be called with interrupts disabled */
+static inline int avi_segment_reconfigure_sinks(struct avi_segment *s)
+{
+ /* XXX no fork support */
+ struct avi_segment *sink = s->sink[0];
+
+ if (sink)
+ return avi_segment_reconfigure_connection(s->sink[0]);
+
+ return 0;
+}
+
+/* Configure the segment's node to match the current input and output
+ * formats. This function cannot fail as avi_segment_try_format must have been
+ * called beforehand to validate */
+static int avi_segment_apply_format(struct avi_segment *s)
+{
+ const struct avi_segment_format *in = &s->input_format;
+ const struct avi_segment_format *out = &s->output_format;
+ struct avi_node *n;
+ unsigned in_height;
+ unsigned out_height;
+ int ret;
+
+ in_height = in->height;
+ out_height = out->height;
+
+ if (in->interlaced)
+ in_height /= 2;
+
+ if (out->interlaced)
+ out_height /= 2;
+
+ ret = avi_segment_reconfigure_sinks(s);
+ if (ret)
+ return ret;
+
+ if ((n = avi_segment_get_node(s, AVI_CAP_SCAL))) {
+
+ /* We need to configure the scaler in planar if we have a planar
+ * pixel format and we're using two FIFOs */
+ avi_scal_setup_oneshot(n,
+ in->width, out->width,
+ in_height, out_height,
+ in->pix_fmt,
+ (s->dma_in_planar != NULL));
+ }
+
+ /* configure only chroma converter if ISP CHAIN BAYER is not present
+ * in this case chroma converter is configured in userland
+ */
+ if (!avi_segment_get_node(s, AVI_CAP_ISP_CHAIN_BAYER))
+ if ((n = avi_segment_get_node(s, AVI_CAP_CONV)))
+ avi_setup_conv_colorspace(n,
+ in->colorspace,
+ out->colorspace);
+
+ if ((n = avi_segment_get_node(s, AVI_CAP_DMA_IN))) {
+ struct avi_node *planar_fifo;
+
+ planar_fifo = avi_segment_get_node(s, AVI_CAP_PLANAR);
+
+ avi_dma_setup_input(n, planar_fifo, in);
+ }
+
+ if ((n = avi_segment_get_node(s, AVI_CAP_BUFFER))) {
+ struct avi_fifo_registers fifo_reg = {
+ .cfg = {{
+ .srctype = AVI_FIFO_CFG_VL_TYPE,
+ .dsttype = AVI_FIFO_CFG_VL_TYPE,
+ .srcbitenc = AVI_FIFO_CFG_8888_BENC,
+ .dstbitenc = AVI_FIFO_CFG_8888_BENC,
+ }},
+ .framesize = {{
+ .width = out->width - 1,
+ .height = out_height - 1,
+ }},
+ .dmafxycfg = {{
+ .dmafxnb = 1,
+ .dmafynb = 1,
+ .dmafxymode = 0,
+ }},
+ };
+ avi_fifo_set_registers(n, &fifo_reg);
+ }
+
+ if ((n = avi_segment_get_node(s, AVI_CAP_DMA_OUT)))
+ avi_dma_setup_output(n, out);
+
+ avi_segment_reconfigure_connection(s);
+
+ return 0;
+}
+
+int avi_segment_set_format_and_layout(struct avi_segment *s,
+ const struct avi_segment_format *in_fmt,
+ const struct avi_segment_format *out_fmt,
+ const struct avi_segment_layout *layout)
+{
+ struct avi_segment_format old_in_fmt;
+ struct avi_segment_format old_out_fmt;
+ struct avi_segment_layout old_layout;
+ int ret;
+
+ ret = avi_segment_try_format(s, in_fmt, out_fmt, layout);
+ if (ret)
+ return ret;
+
+ /* Roll the new configuration in */
+ old_in_fmt = s->input_format;
+ old_out_fmt = s->output_format;
+ old_layout = s->layout;
+
+ s->input_format = *in_fmt;
+ s->output_format = *out_fmt;
+ s->layout = *layout;
+
+ ret = avi_segment_apply_format(s);
+ if (ret) {
+ /* Rollback */
+ s->input_format = old_in_fmt;
+ s->output_format = old_out_fmt;
+ s->layout = old_layout;
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_segment_set_format_and_layout);
+
+int avi_segment_set_format(struct avi_segment *s,
+ const struct avi_segment_format *in_fmt,
+ const struct avi_segment_format *out_fmt)
+{
+ return avi_segment_set_format_and_layout(s,
+ in_fmt,
+ out_fmt,
+ &s->layout);
+}
+EXPORT_SYMBOL(avi_segment_set_format);
+
+int avi_segment_set_input_format(struct avi_segment *s,
+ const struct avi_segment_format *fmt)
+{
+ return avi_segment_set_format(s, fmt, &s->output_format);
+}
+EXPORT_SYMBOL(avi_segment_set_input_format);
+
+int avi_segment_set_output_format(struct avi_segment *s,
+ const struct avi_segment_format *fmt)
+{
+ return avi_segment_set_format(s, &s->input_format, fmt);
+}
+EXPORT_SYMBOL(avi_segment_set_output_format);
+
+int avi_segment_hide(struct avi_segment *s)
+{
+ int ret = 0;
+
+ if (!s->layout.hidden) {
+ s->layout.hidden = 1;
+
+ ret = avi_segment_reconfigure_sinks(s);
+
+ if (ret)
+ s->layout.hidden = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_hide);
+
+int avi_segment_unhide(struct avi_segment *s)
+{
+ int ret = 0;
+
+ if (s->layout.hidden) {
+ s->layout.hidden = 0;
+
+ ret = avi_segment_reconfigure_sinks(s);
+
+ if (ret)
+ s->layout.hidden = 1;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_unhide);
+
+inline int avi_segment_set_position(struct avi_segment *s,
+ unsigned x,
+ unsigned y)
+{
+ struct avi_segment *sink = s->sink[0];
+ struct avi_segment_layout layout;
+ int ret = 0;
+
+ if (s->layout.x == x && s->layout.y == y)
+ /* Nothing to do */
+ return 0;
+
+ layout = s->layout;
+
+ layout.x = x;
+ layout.y = y;
+
+ ret = avi_segment_try_format(s,
+ &s->input_format,
+ &s->output_format,
+ &layout);
+ if (ret)
+ return ret;
+
+ s->layout = layout;
+
+ if (sink)
+ return avi_segment_reconfigure_connection(sink);
+
+ return 0;
+}
+EXPORT_SYMBOL(avi_segment_set_position);
+
+inline int avi_segment_set_alpha(struct avi_segment *s, int alpha)
+{
+ int ret = 0;
+
+ if (s->layout.alpha != alpha) {
+ struct avi_segment *sink = s->sink[0];
+
+ s->layout.alpha = alpha;
+ if (sink)
+ ret = avi_segment_reconfigure_connection(sink);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_set_alpha);
+
+void avi_segment_get_input_format(struct avi_segment *s,
+ struct avi_segment_format *fmt)
+{
+ *fmt = s->input_format;
+}
+EXPORT_SYMBOL(avi_segment_get_input_format);
+
+void avi_segment_get_output_format(struct avi_segment *s,
+ struct avi_segment_format *fmt)
+{
+ *fmt = s->output_format;
+}
+EXPORT_SYMBOL(avi_segment_get_output_format);
+
+void avi_segment_get_layout(struct avi_segment *s,
+ struct avi_segment_layout *layout)
+{
+ *layout = s->layout;
+}
+EXPORT_SYMBOL(avi_segment_get_layout);
+
+struct avi_node *avi_segment_get_node(struct avi_segment *s,
+ const unsigned long cap)
+{
+ int pos;
+
+ if ((s->caps & cap)) {
+ for (pos = 0; pos < s->nodes_nr; pos++)
+ if ((s->nodes[pos]->assigned_caps & cap))
+ return s->nodes[pos];
+
+ if (s->dma_in_planar && (s->dma_in_planar->assigned_caps & cap))
+ return s->dma_in_planar;
+ }
+
+ /* Not found */
+ return NULL;
+}
+EXPORT_SYMBOL(avi_segment_get_node);
+
+void avi_segment_set_input_buffer(struct avi_segment *segment,
+ const struct avi_dma_buffer *buff)
+{
+ struct avi_node *fifo_plane0;
+ struct avi_node *fifo_plane1;
+ const struct avi_segment_format *in = &segment->input_format;
+
+ fifo_plane0 = avi_segment_get_node(segment, AVI_CAP_DMA_IN);
+
+ BUG_ON(fifo_plane0 == NULL);
+ BUG_ON(buff->status != AVI_BUFFER_READY);
+ BUG_ON(buff->plane0.dma_addr == 0);
+ BUG_ON(avi_pixfmt_is_planar(in->pix_fmt) &&
+ buff->plane1.dma_addr == 0);
+
+ segment->plane0_base = buff->plane0.dma_addr;
+ segment->plane1_base = buff->plane1.dma_addr;
+
+ fifo_plane1 = avi_segment_get_node(segment, AVI_CAP_PLANAR);
+ avi_dma_set_input_buffer(fifo_plane0, fifo_plane1, in, buff);
+
+ avi_segment_update_dma_field(segment);
+}
+EXPORT_SYMBOL(avi_segment_set_input_buffer);
+
+void avi_segment_set_output_buffer(struct avi_segment *segment,
+ const struct avi_dma_buffer *buff)
+{
+ struct avi_node *fifo;
+ const struct avi_segment_format *out = &segment->output_format;
+
+ fifo = avi_segment_get_node(segment, AVI_CAP_DMA_OUT);
+
+ BUG_ON(fifo == NULL);
+ BUG_ON(buff->status != AVI_BUFFER_READY);
+
+ avi_dma_set_output_buffer(fifo, out, buff);
+}
+EXPORT_SYMBOL(avi_segment_set_output_buffer);
+
+int avi_segment_foreach(int (*cback)(struct avi_segment *, void *), void *priv)
+{
+ struct avi_segment *s;
+ int ret = 0;
+
+ mutex_lock(&avi_segment_index.lock);
+
+ list_for_each_entry(s, &avi_segment_index.segment_list, list) {
+ ret = cback(s, priv);
+
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&avi_segment_index.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(avi_segment_foreach);
+
+unsigned avi_segment_count(void)
+{
+ struct avi_segment *s;
+ unsigned count = 0;
+
+ mutex_lock(&avi_segment_index.lock);
+
+ list_for_each_entry(s, &avi_segment_index.segment_list, list) {
+ count++;
+ }
+
+ mutex_unlock(&avi_segment_index.lock);
+
+ return count;
+}
+EXPORT_SYMBOL(avi_segment_count);
+
+static int avi_segment_reconfigure(struct avi_segment *s, void *unused)
+{
+ avi_segment_reset(s);
+ avi_segment_connect_nodes(s);
+ BUG_ON(avi_segment_apply_format(s));
+ BUG_ON(avi_segment_reconfigure_connection(s));
+
+ /* It's up to each individual driver (fb, cam...) to actually reactivate
+ * the segments (enable, apply and IRQ) as well as reconfiguring the
+ * DMA. */
+ s->active = AVI_SEGMENT_DISABLED;
+
+ return 0;
+}
+
+void avi_segment_resume(void)
+{
+ /* Iterate over each segment, reconfiguring and reconnecting
+ * everything. */
+ avi_segment_foreach(avi_segment_reconfigure, NULL);
+}
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Parrot Advanced Video Interface Segment layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi_segment.h b/drivers/parrot/video/avi_segment.h
new file mode 100644
index 00000000000000..95babedfa9f0f0
--- /dev/null
+++ b/drivers/parrot/video/avi_segment.h
@@ -0,0 +1,343 @@
+#ifndef _AVI_SEGMENT_H_
+#define _AVI_SEGMENT_H_
+
+#include "avi.h"
+#include "avi_isp.h"
+#include "avi_cap.h"
+#include "avi_dma.h"
+#include "avi_chroma.h"
+
+struct avi_segment;
+
+/* The interrupt callbacks are called directly from interrupt context with the
+ * segment's lock held */
+typedef unsigned (*avi_segment_irq_handler_t)(struct avi_segment *,
+ enum avi_field *);
+
+/* A segment can have up to 7 sources (2 blenders in series) */
+#define AVI_SEGMENT_MAX_SRC 7
+/* A segment can have up to 5 sinks (4 forks in series). */
+/* XXX not supported yet, change to 5 when adding fork support */
+#define AVI_SEGMENT_MAX_SINK 1
+
+/* The segment's input and output formats */
+struct avi_segment_format {
+ unsigned width;
+ unsigned height;
+
+ int interlaced;
+
+ enum avi_colorspace colorspace;
+
+ /* For DMA IN/OUT segments */
+ struct avi_dma_pixfmt pix_fmt;
+
+ struct {
+ /* constraint: line_size must be multiple of 8 */
+ unsigned line_size; /* in bytes */
+ } plane0, plane1;
+};
+
+/* A segment's layout within a blender */
+struct avi_segment_layout {
+ unsigned x;
+ unsigned y;
+ int hidden;
+ int alpha;
+};
+
+enum avi_segment_activation {
+ AVI_SEGMENT_DISABLED = 0,
+ AVI_SEGMENT_ACTIVE,
+ /* Segment disabled during interrupt */
+ AVI_SEGMENT_ACTIVE_ONESHOT,
+};
+
+struct avi_segment {
+ struct avi_segment *source[AVI_SEGMENT_MAX_SRC];
+ struct avi_segment *sink[AVI_SEGMENT_MAX_SINK];
+ /* pointers to the blenders used by this segment */
+ struct avi_node *blender[2];
+ /* pointers to the forks used by this segment */
+ struct avi_node *fork[4];
+ /* A bitmask of the segment's capabilities */
+ unsigned long caps;
+ enum avi_segment_activation active;
+ /* Input and output formats */
+ struct avi_segment_format input_format;
+ struct avi_segment_format output_format;
+ /* Output offset */
+ struct avi_segment_layout layout;
+ /* Input blender background colour. 24bit 888, no alpha */
+ u32 background;
+ /* Frame period (if applicable, 0 otherwise). It might make sense to put
+ * that in the segment format but I'm not sure if that's necessary and I
+ * don't want to increase the structure size needlessly. */
+ long stream_period_us;
+ long period_us;
+ /* Frame dropped (if applicable, 0 otherwise). */
+ unsigned long frame_dropped;
+ /* A list of the segment's nodes ordered the way they're interconnected
+ * in hardware */
+ struct avi_node **nodes;
+ unsigned nodes_nr;
+ /* For DMA nodes we store the base address of both planes */
+ dma_addr_t plane0_base;
+ dma_addr_t plane1_base;
+ enum avi_field cur_field;
+ /* DMA IN node used by special case planar : 2xDMA_IN > SCALER */
+ struct avi_node *dma_in_planar;
+
+ char id[16];
+ struct list_head list;
+ /* Lock protecting segment access when (dis)connecting or destroying the
+ * segment */
+ struct mutex lock;
+ /* The device owning this segment */
+ struct device *owner;
+ /* Private data for use by the owner. Can be useful in the interrupt
+ * handler. */
+ void *priv;
+ /* The interrupt callback */
+ avi_segment_irq_handler_t irq_handler_f;
+ struct avi_node *irq_node;
+};
+
+/* Length of the segment id. Last byte is forced to \0 for convenience. */
+#define AVI_SEGMENT_ID_LEN (sizeof(((struct avi_segment *)0)->id))
+
+
+/* Attempt to create a segment with capabilities "cap".
+ *
+ * Use IS_ERR and PTR_ERR to check for success and retreive the errno.
+ *
+ * If errno is ENODEV it means the AVI couldn't allocate the nodes to satisfy
+ * the requested capabilities. In this case caps is updated to contain the
+ * unsatisfied capabilities.
+ *
+ * If errno is EBUSY it means there's already a segment with that
+ * type/major/minor combination.
+ *
+ * If owner is not NULL it becomes the segment's owner, otherwise the segment's
+ * created as an orphan and avi_take_segment must be called to take ownership.
+ *
+ * The id of the resulting segment is "avi-<type>.<major>[:minor]"
+ *
+ * If minor is -1 it's ignored and not part of the id.
+ */
+extern struct avi_segment *avi_segment_build(unsigned long *caps,
+ const char *type,
+ int major,
+ int minor,
+ struct device *owner);
+
+/* Generate a new unique id based on "base_id" and "nonce". The result is stored
+ * in gen_id. This is usefull for drivers that need to create segments on the
+ * fly. */
+extern void avi_segment_gen_id(const char *base_id,
+ const char *nonce,
+ char gen_id[AVI_SEGMENT_ID_LEN]);
+
+/* Return 1 if the segment has at least one source or one sink. 0 otherwise. */
+extern int avi_segment_connected(struct avi_segment *segment);
+
+/* Destroys a segment.
+ *
+ * The segment must not be connected to an other segment at the moment this
+ * function is called (otherwise -EBUSY is returned).
+ *
+ * Returns negative errno on error, 0 otherwise.
+ *
+ * /!\ Locks the segment's mutex
+ */
+extern int avi_segment_teardown(struct avi_segment *segment);
+
+/* Retreive an existing segment by id.
+ *
+ * If no segment matching id is found this returns NULL
+ */
+extern struct avi_segment *avi_segment_find(const char *id);
+
+/* Enables the segment's interrupt.
+ *
+ * A segment has an interrupt if it contains an LCD, CAM or DMA_OUT capability.
+ *
+ * It's an error to call this function on a segment that does not have any such
+ * capability.
+ *
+ * If the segment's IRQ is already enabled this does nothing.
+ */
+extern void avi_segment_enable_irq(struct avi_segment *segment);
+
+/* Disables the segment's interrupt.
+ *
+ * Same remarks as avi_segment_enable_irq.
+ */
+extern void avi_segment_disable_irq(struct avi_segment *segment);
+
+/* Registers this segment's interrupt handler.
+ *
+ * You have to own the segment before you attempt to register an interrupt
+ * handler.
+ */
+extern void avi_segment_register_irq(struct avi_segment *segment,
+ avi_segment_irq_handler_t handler);
+
+/* Request the ownership of a segment.
+ *
+ * This is mandatory before attempting to modify the configuration of the
+ * segment.
+ *
+ * owner cannot be NULL.
+ *
+ * This returns -EBUSY if the segment is already owned, 0 otherwise.
+ *
+ * /!\ Locks the segment's mutex
+ */
+extern int avi_take_segment(struct avi_segment *segment, struct device *owner);
+
+/* Release the ownership of a segment.
+ *
+ * Returns negative errno on error, 0 otherwise.
+ *
+ * This does not destroy the segment, just remove the ownership. Use
+ * avi_teardown_segment to permanently destroy a segment.
+ */
+extern int avi_release_segment(struct avi_segment *segment);
+
+/* Connect two segments together.
+ *
+ * If the source segment already has a sink a fork is added to share the output
+ * with the new destination.
+ *
+ * If the destination segment already has a source or the zorder parameter is
+ * greater than 0 a blender is added to mix the input. In this case the zorder
+ * parameter dictates the layer the source will occupy in the blender. Layer 0
+ * is the top-most layer.
+ *
+ * If zorder is -1 the first available layer is used (starting from 1 upwards).
+ *
+ * Returns -ENODEV if the connection could not be achieved by lack of fork or
+ * blender inputs available (if zorder is >= 0).
+ *
+ * Returns -EBUSY if the requested zorder is already in use.
+ *
+ * /!\ Locks both segments' mutexes
+ */
+extern int avi_segment_connect(struct avi_segment *src,
+ struct avi_segment *dst,
+ int zorder);
+
+/* Disconnect two segments
+ *
+ * Returns -EINVAL if src and dst are not currently connected together.
+ *
+ * /!\ Locks both segments' mutexes
+ */
+extern int avi_segment_disconnect(struct avi_segment *src,
+ struct avi_segment *dst);
+
+/* Set a segment layout on the screen (configures the blender) */
+extern int avi_segment_set_position(struct avi_segment *s,
+ unsigned x,
+ unsigned y);
+
+/* Set a segment alpha (configures the blender) */
+extern int avi_segment_set_alpha(struct avi_segment *s, int alpha);
+
+
+/* Set the segment background color when no video is displayed (blender
+ * background).
+ *
+ * Color is expressed as RGB and converted according to segment colorspace */
+extern void avi_segment_set_background(struct avi_segment *segment, u32 rgb);
+
+/* Return the background color converted to 24bit RGB */
+u32 avi_segment_get_background(struct avi_segment *segment);
+
+/* Enable and activate the segment in order to start processing. Depending on
+ * the capabilities of the segment that might mean enabling the pixelclock or
+ * other capability-specific actions
+ */
+extern int avi_segment_activate(struct avi_segment *segment);
+
+/*
+ * Same as above but it configures the FIFOs for a single frame and then stops
+ * the processing.
+ */
+extern int avi_segment_activate_oneshot(struct avi_segment *segment);
+
+/* Disable the segment */
+extern int avi_segment_deactivate(struct avi_segment *segment);
+
+int avi_segment_try_format(struct avi_segment *s,
+ const struct avi_segment_format *in,
+ const struct avi_segment_format *out,
+ const struct avi_segment_layout *layout);
+
+/* Configure a segment's input and output format to the same value (i.e. the
+ * segment does not modify the source or destination format). */
+extern int avi_segment_set_format(struct avi_segment *segment,
+ const struct avi_segment_format *in_fmt,
+ const struct avi_segment_format *out_fmt);
+
+/* Same as above but sets the layout as well. */
+extern int avi_segment_set_format_and_layout(struct avi_segment *segment,
+ const struct avi_segment_format *i,
+ const struct avi_segment_format *o,
+ const struct avi_segment_layout *l);
+
+/* Configure a segment's input format (i.e. the
+ * segment does not modify the source or destination format). */
+extern int avi_segment_set_input_format(struct avi_segment *segment,
+ const struct avi_segment_format *fmt);
+
+/* Configure a segment's output format (i.e. the
+ * segment does not modify the source or destination format). */
+extern int avi_segment_set_output_format(struct avi_segment *segment,
+ const struct avi_segment_format *fmt);
+
+/* Hide a segment (move it outside of the active frame). Requires a blender.*/
+extern int avi_segment_hide(struct avi_segment *segment);
+
+/* Put the segment back to its previous layout */
+extern int avi_segment_unhide(struct avi_segment *segment);
+
+/* Retrieve a copy of segment's input format */
+extern void avi_segment_get_input_format(struct avi_segment *segment,
+ struct avi_segment_format *format);
+
+/* Retrieve a copy of segment's output format */
+extern void avi_segment_get_output_format(struct avi_segment *segment,
+ struct avi_segment_format *format);
+
+
+extern void avi_segment_get_layout(struct avi_segment *segment,
+ struct avi_segment_layout *layout);
+
+/* Retrieve a node by its capability. */
+extern struct avi_node *avi_segment_get_node(struct avi_segment *s,
+ const unsigned long cap);
+
+/* Configure the DMA input buffer for an AVI_CAP_DMA_IN segment */
+void avi_segment_set_input_buffer(struct avi_segment *segment,
+ const struct avi_dma_buffer *buff);
+
+/* Configure the DMA output buffer for an AVI_CAP_DMA_OUT segment */
+void avi_segment_set_output_buffer(struct avi_segment *segment,
+ const struct avi_dma_buffer *buff);
+/********************************
+ * Public segment index function
+ ********************************/
+
+/* Iterate over all existing segments, calling "cback" on each.
+ *
+ * WARNING: The callback is called with the index lock held
+ */
+extern int avi_segment_foreach(int (*cback)(struct avi_segment *, void *),
+ void *priv);
+
+/* Return the total number of segments currently registered */
+extern unsigned avi_segment_count(void);
+
+#endif /* _AVI_SEGMENT_H_ */
diff --git a/drivers/parrot/video/avi_vsync_gen.c b/drivers/parrot/video/avi_vsync_gen.c
new file mode 100644
index 00000000000000..19c87f67efcf5f
--- /dev/null
+++ b/drivers/parrot/video/avi_vsync_gen.c
@@ -0,0 +1,461 @@
+/*
+ * P7 AVI LCD vsync generator
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "avi_vsync_gen.h"
+
+#define DEVICE_NAME "avi_vsync_gen"
+
+struct avivsync_data {
+ struct avivsync_timings timings;
+ int enabled;
+ const struct avi_node *lcd_node;
+ struct clk *pixclock;
+ struct mutex lock;
+ struct pinctrl *pctl;
+};
+
+static int avivsync_configure(struct avivsync_data *data)
+{
+ struct avi_lcd_avi_vsync_gen_regs vsync_gen_regs;
+ u32 *regs;
+ int i;
+ int ret;
+ struct avi_lcd_regs lcd_reg = {
+ .itsource = { ._register = 0 },
+ .interface = {{
+ .free_run = 1,
+ .itu656 = 0,
+ .ivs = 0,
+ .psync_en = 0,
+ .vsync_gen = 1,
+ .prog = 1,
+ }},
+ .top_format_ctrl = { ._register = 0 },
+ .top_h_timing0 = {{
+ .top_hactive_on = 0,
+ .top_hsync_off = 0,
+ }},
+ .top_h_timing1 = {{
+ .top_hactive_off = 0,
+ }},
+ .top_v_timing2 = {{
+ .top_vactive_on = 0,
+ .top_vactive_off = 0,
+ }},
+ };
+
+ /* XXX sanity check the values ? */
+
+ /*
+ * Master sync configuration
+ */
+ lcd_reg.top_h_timing1.top_htotal = data->timings.htotal;
+
+ lcd_reg.top_v_timing0.top_vsync_von = data->timings.vsync_von;
+ lcd_reg.top_v_timing0.top_vsync_hon = data->timings.vsync_hon;
+
+ lcd_reg.top_v_timing1.top_vsync_voff = data->timings.vsync_voff;
+ lcd_reg.top_v_timing1.top_vsync_hoff = data->timings.vsync_hoff;
+
+ lcd_reg.top_v_timing3.top_vtotal = data->timings.vtotal;
+
+ avi_lcd_set_registers(data->lcd_node, &lcd_reg);
+
+ regs = (u32 *)&vsync_gen_regs;
+
+ /*
+ * Slave sync configuration
+ */
+ for (i = 0; i < AVIVSYNC_OFFSET_NR; i++) {
+ *regs++ = data->timings.offsets[i].vsync_on._register;
+ *regs++ = data->timings.offsets[i].vsync_off._register;
+ }
+
+ avi_vsync_gen_set_registers(data->lcd_node, &vsync_gen_regs);
+
+ /*
+ * Pixclock configuration
+ */
+ if (data->timings.pixclock_freq) {
+ data->timings.pixclock_freq =
+ clk_round_rate(data->pixclock,
+ data->timings.pixclock_freq);
+
+ ret = clk_set_rate(data->pixclock, data->timings.pixclock_freq);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int avivsync_enable(struct avivsync_data *data)
+{
+ int ret;
+
+ if (data->enabled)
+ return 0;
+
+ if (data->timings.pixclock_freq == 0)
+ return -EINVAL;
+
+ /* We have to activate the pixclock before we enable the node otherwise
+ * the logic won't be de-reset properly. */
+ ret = clk_enable(data->pixclock);
+ if (ret)
+ return ret;
+
+ avi_enable_node(data->lcd_node);
+ avi_apply_node(data->lcd_node);
+
+ data->enabled = 1;
+ return 0;
+}
+
+static void avivsync_disable(struct avivsync_data *data)
+{
+ if (data->enabled) {
+ clk_disable(data->pixclock);
+ avi_disable_node(data->lcd_node);
+ }
+}
+
+/*
+ * SysFS interface
+ */
+
+#define AVIVSYNC_SYSFS_ACCESSORS(_t) \
+ static ssize_t avivsync_show_##_t(struct device *device, \
+ struct device_attribute *a, \
+ char *buf) \
+ { \
+ const struct avivsync_data *data; \
+ data = dev_get_drvdata(device); \
+ \
+ return snprintf(buf, PAGE_SIZE, \
+ "%u\n", data->timings._t); \
+ } \
+ \
+ static ssize_t avivsync_store_##_t(struct device *device, \
+ struct device_attribute *a, \
+ const char *buf, \
+ size_t count) \
+ { \
+ struct avivsync_data *data; \
+ unsigned long v; \
+ char *last; \
+ \
+ data = dev_get_drvdata(device); \
+ \
+ v = simple_strtoul(buf, &last, 0); \
+ \
+ if (*last != '\0' && *last != '\n') \
+ /* invalid input */ \
+ return -EINVAL; \
+ \
+ mutex_lock(&data->lock); \
+ \
+ data->timings._t = v; \
+ avivsync_configure(data); \
+ \
+ mutex_unlock(&data->lock); \
+ \
+ return count; \
+ }
+
+#define AVIVSYNC_SYSFS_ATTRS(_t) \
+ __ATTR(_t, S_IRUGO|S_IWUSR, \
+ avivsync_show_##_t, avivsync_store_##_t)
+
+AVIVSYNC_SYSFS_ACCESSORS(htotal)
+AVIVSYNC_SYSFS_ACCESSORS(vtotal)
+AVIVSYNC_SYSFS_ACCESSORS(vsync_von)
+AVIVSYNC_SYSFS_ACCESSORS(vsync_voff)
+AVIVSYNC_SYSFS_ACCESSORS(vsync_hon)
+AVIVSYNC_SYSFS_ACCESSORS(vsync_hoff)
+AVIVSYNC_SYSFS_ACCESSORS(pixclock_freq)
+
+#define AVIVSYNC_OFFSET_SYSFS_ACCESSORS(_n) \
+ static ssize_t avivsync_show_offset##_n(struct device *device, \
+ struct device_attribute *a, \
+ char *buf) \
+ { \
+ const struct avivsync_data *data; \
+ const struct avivsync_offsets *o; \
+ \
+ data = dev_get_drvdata(device); \
+ o = &data->timings.offsets[_n]; \
+ \
+ return snprintf(buf, PAGE_SIZE, "%u:%u,%u:%u\n", \
+ o->vsync_on.vsync_gen_von, \
+ o->vsync_on.vsync_gen_hon, \
+ o->vsync_off.vsync_gen_voff, \
+ o->vsync_off.vsync_gen_hoff); \
+ } \
+ \
+ static ssize_t avivsync_store_offset##_n( \
+ struct device *device, \
+ struct device_attribute *a, \
+ const char *buf, \
+ size_t count) \
+ { \
+ struct avivsync_data *data; \
+ struct avivsync_offsets off; \
+ struct avivsync_offsets *o; \
+ char *last; \
+ \
+ \
+ data = dev_get_drvdata(device); \
+ o = &data->timings.offsets[_n]; \
+ \
+ off.vsync_on.vsync_gen_von = \
+ simple_strtoul(buf, &last, 0); \
+ if (*last != ':') \
+ goto error; \
+ \
+ last++; \
+ off.vsync_on.vsync_gen_hon = \
+ simple_strtoul(last, &last, 0); \
+ if (*last != ',') \
+ goto error; \
+ \
+ last++; \
+ off.vsync_off.vsync_gen_voff = \
+ simple_strtoul(last, &last, 0); \
+ if (*last != ':') \
+ goto error; \
+ \
+ last++; \
+ off.vsync_off.vsync_gen_hoff = \
+ simple_strtoul(last, &last, 0); \
+ if (*last != '\0' && *last != '\n') \
+ goto error; \
+ \
+ *o = off; \
+ avivsync_configure(data); \
+ \
+ error: \
+ return count; \
+ }
+
+#define AVIVSYNC_OFFSET_SYSFS_ATTRS(_n) \
+ __ATTR(offset##_n, S_IRUGO|S_IWUSR, \
+ avivsync_show_offset##_n, avivsync_store_offset##_n)
+
+AVIVSYNC_OFFSET_SYSFS_ACCESSORS(0)
+AVIVSYNC_OFFSET_SYSFS_ACCESSORS(1)
+AVIVSYNC_OFFSET_SYSFS_ACCESSORS(2)
+AVIVSYNC_OFFSET_SYSFS_ACCESSORS(3)
+AVIVSYNC_OFFSET_SYSFS_ACCESSORS(4)
+
+static ssize_t avivsync_show_enabled(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avivsync_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data->enabled);
+}
+
+static ssize_t avivsync_store_enabled(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avivsync_data *data = dev_get_drvdata(device);
+ int v;
+
+ if (buf[0] == '1')
+ v = 1;
+ else if (buf[0] == '0')
+ v = 0;
+ else
+ /* invalid input */
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ if (v)
+ avivsync_enable(data);
+ else
+ avivsync_disable(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static struct device_attribute avivsync_sysfs_attrs[] = {
+ AVIVSYNC_SYSFS_ATTRS(htotal),
+ AVIVSYNC_SYSFS_ATTRS(vtotal),
+ AVIVSYNC_SYSFS_ATTRS(vsync_von),
+ AVIVSYNC_SYSFS_ATTRS(vsync_voff),
+ AVIVSYNC_SYSFS_ATTRS(vsync_hon),
+ AVIVSYNC_SYSFS_ATTRS(vsync_hoff),
+ AVIVSYNC_SYSFS_ATTRS(pixclock_freq),
+
+ AVIVSYNC_OFFSET_SYSFS_ATTRS(0),
+ AVIVSYNC_OFFSET_SYSFS_ATTRS(1),
+ AVIVSYNC_OFFSET_SYSFS_ATTRS(2),
+ AVIVSYNC_OFFSET_SYSFS_ATTRS(3),
+ AVIVSYNC_OFFSET_SYSFS_ATTRS(4),
+
+ __ATTR(enabled, S_IRUGO|S_IWUSR,
+ avivsync_show_enabled, avivsync_store_enabled),
+};
+
+static int __devinit avivsync_probe(struct platform_device *pdev)
+{
+ struct avivsync_platform_data *pdata;
+ struct avivsync_data *data;
+ const struct avi_chan *chan;
+ int i;
+ int ret = 0;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto no_pdata;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ mutex_init(&data->lock);
+
+ data->pixclock = clk_get(&pdev->dev, "lcd");
+ if (IS_ERR(data->pixclock)) {
+ ret = PTR_ERR(data->pixclock);
+ goto no_clock;
+ }
+
+ data->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(data->pctl)) {
+ ret = PTR_ERR(data->pctl);
+ goto no_pins;
+ }
+
+#warning "This code needs to be ported to the segment API"
+#if 0
+ if (!pdata->avi_group || pdata->avi_group->chan_nr == 0) {
+ ret = -ENODEV;
+ goto no_group;
+ }
+
+ chan = &pdata->avi_group->channels[0];
+
+ if (chan->type != AVI_VSYNC_GEN_CHAN_TYPE ||
+ chan->nodes_nr != 1) {
+ ret = -EINVAL;
+ goto bad_chan;
+ }
+
+ data->lcd_node = avi_get_channode(chan, 0);
+ if (data->lcd_node->type != AVI_LCD_NODE_TYPE) {
+ ret = -EINVAL;
+ goto bad_chan;
+ }
+#endif
+ /* Copy the defaults */
+ data->timings = pdata->timings;
+
+ ret = avivsync_configure(data);
+ if (ret)
+ goto configure_failed;
+
+ if (pdata->enabled)
+ avivsync_enable(data);
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ for (i = 0; i < ARRAY_SIZE(avivsync_sysfs_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &avivsync_sysfs_attrs[i]);
+ if (ret) {
+ for (--i; i >= 0; i--)
+ device_remove_file(&pdev->dev,
+ &avivsync_sysfs_attrs[i]);
+ goto sysfs_register_failed;
+ }
+ }
+
+ dev_info(&pdev->dev, "VSync generator driver initialized\n");
+
+ return 0;
+
+sysfs_register_failed:
+ dev_set_drvdata(&pdev->dev, NULL);
+configure_failed:
+bad_chan:
+no_group:
+ pinctrl_put(data->pctl);
+no_pins:
+ clk_put(data->pixclock);
+no_clock:
+ mutex_destroy(&data->lock);
+ kfree(data);
+ platform_set_drvdata(pdev, NULL);
+alloc_failed:
+no_pdata:
+ return ret;
+}
+
+static int __devexit avivsync_remove(struct platform_device *pdev)
+{
+ struct avivsync_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ if (data) {
+ for (i = 0; i < ARRAY_SIZE(avivsync_sysfs_attrs); i++)
+ device_remove_file(&pdev->dev, &avivsync_sysfs_attrs[i]);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ avivsync_disable(data);
+ pinctrl_put(data->pctl);
+ clk_put(data->pixclock);
+ mutex_destroy(&data->lock);
+ kfree(data);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static struct platform_driver avivsync_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = avivsync_probe,
+ .remove = __devexit_p(avivsync_remove),
+};
+
+static int __init avivsync_init(void)
+{
+ return platform_driver_register(&avivsync_driver);
+}
+
+module_init(avivsync_init);
+
+static void __exit avivsync_exit(void)
+{
+ platform_driver_unregister(&avivsync_driver);
+}
+
+module_exit(avivsync_exit);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Driver for the Advanced Video Interface VSync generator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avi_vsync_gen.h b/drivers/parrot/video/avi_vsync_gen.h
new file mode 100644
index 00000000000000..a4015af303ab73
--- /dev/null
+++ b/drivers/parrot/video/avi_vsync_gen.h
@@ -0,0 +1,79 @@
+#ifndef _AVI_VSYNC_GEN_H_
+#define _AVI_VSYNC_GEN_H_
+
+#include "avi.h"
+
+#define AVIVSYNC_OFFSET_NR 5
+
+struct avivsync_offsets {
+ union avi_lcd_vsync_gen_on_0 vsync_on;
+ union avi_lcd_vsync_gen_off_0 vsync_off;
+};
+
+struct avivsync_timings {
+ unsigned pixclock_freq;
+ /* Length of a line in pixclock */
+ unsigned htotal;
+ /* Height of the frame in lines */
+ unsigned vtotal;
+ /* Vsync specification */
+ unsigned vsync_von;
+ unsigned vsync_hon;
+ unsigned vsync_voff;
+ unsigned vsync_hoff;
+ /* VSync offsets */
+ struct avivsync_offsets offsets[AVIVSYNC_OFFSET_NR];
+};
+
+struct avivsync_platform_data {
+ struct avivsync_timings timings;
+ int enabled;
+ struct avi_group *avi_group;
+};
+
+#ifdef _SAMPLE_BSP_DO_NOT_REMOVE_
+
+#include <video/avi_vsync_gen.h>
+
+static struct pinctrl_map rnb6_avi_vsync_pins[] __initdata = {
+ P7_INIT_PINMAP(P7_LCD_0_CLK),
+ P7_INIT_PINMAP(P7_LCD_0_VS),
+ /* This is for P7R2. In P7R3 all pads are shifted by one: the "master"
+ * VSync is DATA16 and the offsets 0 to 4 are on pads DATA17 to 21. */
+ P7_INIT_PINMAP(P7_LCD_0_DATA15),
+ P7_INIT_PINMAP(P7_LCD_0_DATA16),
+ P7_INIT_PINMAP(P7_LCD_0_DATA17),
+ P7_INIT_PINMAP(P7_LCD_0_DATA18),
+ P7_INIT_PINMAP(P7_LCD_0_DATA19),
+ P7_INIT_PINMAP(P7_LCD_0_DATA20),
+};
+
+static unsigned int const rnb6_avi_vsync_nodes[] = {
+ AVI_LCD0_NODE
+};
+
+static struct avi_chan rnb6_avi_vsync_chan = {
+ .type = AVI_VSYNC_GEN_CHAN_TYPE,
+ .nodes = rnb6_avi_vsync_nodes,
+ .nodes_nr = ARRAY_SIZE(rnb6_avi_vsync_nodes),
+};
+
+static struct avi_group rnb6_avi_vsync_group = {
+ .name = "avi_vsync.lcd0",
+ .lck = __SPIN_LOCK_UNLOCKED(rnb6_avi_vsync_group.lck),
+ .channels = &rnb6_avi_vsync_chan,
+ .chan_nr = 1,
+};
+
+static struct avivsync_platform_data rnb6_avi_vsync_pdata = {
+ .avi_group = &rnb6_avi_vsync_group,
+};
+
+static struct platform_device rnb6_avi_vsync_dev = {
+ .name = "avi_vsync_gen",
+ .id = 0,
+};
+
+#endif /* _SAMPLE_BSP_DO_NOT_REMOVE_ */
+
+#endif /* _AVI_VSYNC_GEN_H_ */
diff --git a/drivers/parrot/video/avifb.c b/drivers/parrot/video/avifb.c
new file mode 100644
index 00000000000000..1ddc6d260d92d2
--- /dev/null
+++ b/drivers/parrot/video/avifb.c
@@ -0,0 +1,2575 @@
+/*
+ * Parrot AVI framebuffer driver.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/pinctrl/consumer.h>
+#include <asm/cacheflush.h>
+#include "avi_scaler.h"
+#include "avi_isp.h"
+#include "avi_limit.h"
+#include "avifb.h"
+
+/* maximum possimble number of displays */
+#define AVIFB_BUFFER_COUNT 2
+
+#define DEVICE_NAME "avifb"
+
+#ifdef DEBUG
+#define dprintk(data, format_, args...) \
+ dev_dbg((data)->dev, "%s: " format_, __func__, ##args)
+
+#else /* DEBUG */
+#define dprintk(data_, format_, args...) (void)data_
+#endif /* DEBUG */
+
+/* AVI LCD data. */
+struct avifb_data {
+ struct device *dev;
+ struct avifb_platform_data *pdata;
+ /* Used to change the videomode/pixclock/gamma... atomically, avoid
+ * incoherent configurations */
+ struct mutex lcd_lock;
+ unsigned lcd_format_control;
+ struct avi_videomode lcd_videomode;
+ struct avi_videomode lcd_default_videomode;
+ enum avi_colorspace output_cspace;
+ union avi_lcd_interface lcd_interface;
+ wait_queue_head_t wait_vbl;
+ atomic_t vbl_count;
+ struct sysfs_dirent *vbl_dirent;
+ enum avi_field next_field;
+ struct avi_segment *display_segment;
+ const struct avi_cihan *avi_chan;
+ struct avi_cmap gamma;
+ const struct avi_node *gam;
+ const struct avi_node *lcdc;
+ /* "Default Pixel Data", default color of the LCD output when there's no
+ * stream to display */
+ u32 dpd;
+ /* For interlaced modes we need to change the scaler's config depending
+ * on which field we need to display. */
+ struct avi_scal_cfg top_config;
+ struct avi_scal_cfg bot_config;
+ /* Number of clk ticks per frame */
+ unsigned long clk_per_frame;
+ struct pinctrl *pctl;
+ /* Monitor specs */
+ struct fb_monspecs monspecs;
+ /* Panel size in mm */
+ __u32 height;
+ __u32 width;
+ struct fb_info *infos[];
+};
+
+#define IS_INTERLACED(_data) \
+ (!!((data)->lcd_videomode.flags & AVI_VIDEOMODE_INTERLACED))
+/*
+ * AVI framebuffer private data. One instance per framebuffer
+ */
+struct avifb_par {
+ unsigned id;
+ struct avifb_data *data;
+ const struct avifb_overlay *overlay;
+ struct avi_segment *segment;
+ u32 pseudo_palette[16];
+ atomic_t pan_count;
+ unsigned pan_waits_for_vbl;
+ /* same as in avifb_data, in case we have to handle interlacing at the
+ * overlay level */
+ struct avi_scal_cfg top_config;
+ struct avi_scal_cfg bot_config;
+ /* If we're interlaced and there's no scaler in the way we use the input
+ * fifo to only fetch the displayed field */
+ u32 top_fifo_addr;
+ u32 bot_fifo_addr;
+};
+
+/* Empirical timeout value: 3/10th of a sec */
+#define AVIFB_VSYNC_TIMEOUT (3 * HZ / 10)
+
+static int avifb_wait_for_vbl(struct avifb_data *data)
+{
+ atomic_t current_vbl;
+ int ret;
+
+ atomic_set(&current_vbl, atomic_read(&data->vbl_count));
+ ret = wait_event_interruptible_timeout(data->wait_vbl,
+ atomic_read(&current_vbl)
+ != atomic_read(&data->vbl_count),
+ AVIFB_VSYNC_TIMEOUT);
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * avifb_setcolreg - Optional function. Sets a color register.
+ * @regno: Which register in the CLUT we are programming
+ * @red: The red value which can be up to 16 bits wide
+ * @green: The green value which can be up to 16 bits wide
+ * @blue: The blue value which can be up to 16 bits wide.
+ * @transp: If supported, the alpha value which can be up to 16 bits wide.
+ * @info: frame buffer info structure
+ *
+ * Set a single color register. The values supplied have a 16 bit
+ * magnitude which needs to be scaled in this function for the hardware.
+ * Things to take into consideration are how many color registers, if
+ * any, are supported with the current color visual. With truecolor mode
+ * no color palettes are supported. Here a pseudo palette is created
+ * which we store the value in pseudo_palette in struct fb_info. For
+ * pseudocolor mode we have a limited color palette. To deal with this
+ * we can program what color is displayed for a particular pixel value.
+ * DirectColor is similar in that we can program each color field. If
+ * we have a static colormap we don't need to implement this function.
+ *
+ * Returns negative errno on error, or zero on success.
+ */
+static int avifb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct avifb_data *data = par->data;
+ int ret = 0;
+
+ /* Store the pseudopalette */
+ if (regno < 16) {
+#define TRUNCATE_COL(_c) \
+ ((_c >> (16 - var->_c.length)) << var->_c.offset)
+ /* colours are 16 bits in amplitude, so we truncate them to the
+ * hardware (i.e. TRUECOLOR) value*/
+ ((u32 *)info->pseudo_palette)[regno] =
+ TRUNCATE_COL(red) |
+ TRUNCATE_COL(green) |
+ TRUNCATE_COL(blue) |
+ TRUNCATE_COL(transp);
+#undef TRUNCATE_COL
+ }
+
+ mutex_lock(&data->lcd_lock);
+
+ if (!data->gam) {
+ if (regno >= 16)
+ ret = -EINVAL;
+
+ goto unlock;
+ }
+
+ /* We only handle 8bit colours */
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ /* gamma correction (directcolor) cmap */
+ regno &= 0xff;
+ data->gamma.red [regno] = red;
+ data->gamma.green[regno] = green;
+ data->gamma.blue [regno] = blue;
+ avi_gam_set_entry(data->gam, regno, red, green, blue);
+
+ unlock:
+ mutex_unlock(&data->lcd_lock);
+
+ return 0;
+}
+
+/*
+ * Flush all mappings from the cache for the currently active portion of the
+ * framebuffer.
+ */
+static void avifb_flush_cache(struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ unsigned long off;
+ unsigned long sz;
+
+ if (!par->overlay->cacheable)
+ /* fb is mapped as non-cacheable, nothing to be done here */
+ return;
+
+ /* Compute the offset and the size of the active portion of the
+ * framebuffer */
+ off = var->yoffset * var->xres_virtual + var->xoffset;
+ off *= var->bits_per_pixel / 8;
+ sz = var->xres_virtual * var->yres * var->bits_per_pixel / 8;
+
+ /* Flush L1 cache. __cpuc_flush_dcache_area takes a virtual address and
+ * a size. Since the cache is physically tagged, we don't need to flush
+ * all virtual mappings independantly, instead we just use the kernel
+ * mapping. */
+ __cpuc_flush_dcache_area(info->screen_base + off, sz);
+
+ /* Now that the L1 is clean, we can move on to the L2. outer_flush_range
+ * takes a physical addresse range. */
+ outer_flush_range(info->fix.smem_start + off,
+ info->fix.smem_start + off + sz);
+}
+
+/**
+ * Pans the display
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
+ *
+ * Pan (or wrap, depending on the `vmode' field) the display using the
+ * `xoffset' and `yoffset' fields of the `var' structure.
+ * If the values don't fit, return -EINVAL.
+ *
+ * In this implementation, the pan will only take effect in the next VBL,
+ * never immediately (the AVI FIFO will only sync their shadow registers on
+ * EOF)
+ *
+ * Returns negative errno on error, or zero on success.
+ */
+static int avifb_pan_display(struct fb_var_screeninfo *pan_var,
+ struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct avifb_par *par = info->par;
+ struct avifb_data *data = par->data;
+ struct avi_dma_buffer buf;
+
+ if (pan_var->xoffset + var->xres > var->xres_virtual)
+ return -EINVAL;
+ if (pan_var->yoffset + var->yres > var->yres_virtual)
+ return -EINVAL;
+
+ if (var->rotate != FB_ROTATE_UR)
+ {
+ dev_err(data->dev,
+ "rotation requested but not yet implemented\n");
+ return -EINVAL;
+ }
+
+ var->xoffset = pan_var->xoffset;
+ var->yoffset = pan_var->yoffset;
+
+ buf.plane0.dma_addr = var->yoffset * var->xres_virtual + var->xoffset;
+ /* Convert offset from pixels into bytes */
+ buf.plane0.dma_addr *= var->bits_per_pixel / 8;
+ /* Finally get the real physical address of the buffer */
+ buf.plane0.dma_addr += info->fix.smem_start;
+
+ /* Framebuffer is never planar */
+ buf.plane1.dma_addr = 0;
+
+ buf.priv = info;
+
+ buf.status = AVI_BUFFER_READY;
+
+ avi_segment_set_input_buffer(par->segment, &buf);
+
+ atomic_inc(&par->pan_count);
+
+ /* Ensure the coherency of the portion of the framebuffer to be
+ * displayed */
+ avifb_flush_cache(info);
+
+ if (par->pan_waits_for_vbl)
+ return avifb_wait_for_vbl(data);
+
+ return 0;
+}
+
+static inline int avifb_pan_display_nowait(struct fb_var_screeninfo *pan_var,
+ struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+ unsigned wait_vbl = par->pan_waits_for_vbl;
+ int ret;
+
+ par->pan_waits_for_vbl = 0;
+ ret = avifb_pan_display(pan_var, info);
+ par->pan_waits_for_vbl = wait_vbl;
+
+ return ret;
+}
+
+/**
+ * avifb_blank - NOT a required function. Blanks the display.
+ * @blank_mode: the blank mode we want.
+ * @info: frame buffer structure that represents a single frame buffer
+ *
+ * Blank the screen if blank_mode != FB_BLANK_UNBLANK, else unblank.
+ * Return 0 if blanking succeeded, != 0 if un-/blanking failed due to
+ * e.g. a video mode which doesn't support it.
+ *
+ * Implements VESA suspend and powerdown modes on hardware that supports
+ * disabling hsync/vsync:
+ *
+ * FB_BLANK_NORMAL = display is blanked, syncs are on.
+ * FB_BLANK_HSYNC_SUSPEND = hsync off
+ * FB_BLANK_VSYNC_SUSPEND = vsync off
+ * FB_BLANK_POWERDOWN = hsync and vsync off
+ *
+ * If implementing this function, at least support FB_BLANK_UNBLANK.
+ * Return !0 for any modes that are unimplemented.
+ *
+ */
+static int avifb_blank(int blank_mode, struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+
+ /* XXX: when do we actually blank the screen (i.e. disable the
+ * pixclock)? When all blends are deactivated? */
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ avi_segment_unhide(par->segment);
+ break;
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ avi_segment_hide(par->segment);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* We reimplement our own mmap routine to remap the framebuffer memory as
+ * cacheable in userland. This is done in order to improve read/write
+ * performances on the framebuffer (the osprey won't burst on the AXI on
+ * non-cacheable memory) */
+static int avifb_mmap(struct fb_info *info, struct vm_area_struct * vma)
+{
+ struct avifb_par *par = info->par;
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long startpfn;
+
+ /* Check if the mapping is within the framebuffer memory */
+ if ((vma->vm_end - vma->vm_start) > (info->fix.smem_len - off))
+ return -EINVAL;
+ /* Compute the absolute offset */
+ startpfn = (info->fix.smem_start + off) >> PAGE_SHIFT;
+ /* This is an IO map - tell maydump to skip this VMA */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (par->overlay->cacheable)
+ /* Set mapping as device cacheable */
+ vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot,
+ L_PTE_MT_MASK,
+ L_PTE_MT_DEV_CACHED);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ return io_remap_pfn_range(vma, vma->vm_start, startpfn,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static unsigned avifb_interrupt(struct avi_segment *s, enum avi_field *field)
+{
+ struct avifb_data *data = s->priv;
+ union avi_lcd_status status;
+
+ status = avi_lcd_get_status(data->lcdc);
+
+ if (avi_segment_connected(data->display_segment) &&
+ status.fuf &&
+ printk_ratelimit())
+ dev_warn(data->dev, "LCD input underflow\n");
+
+ if (status.done) {
+ atomic_inc(&data->vbl_count);
+ if (IS_INTERLACED(data)) {
+#ifdef AVI_BACKWARD_COMPAT
+ if (avi_get_revision() == AVI_REVISION_1)
+ /* In MPW1 we can't trust the "FIELD" bit given
+ * by the hardware, so we just toggle a bit in
+ * software and pray that we're correctly
+ * synchronized */
+ data->next_field ^=
+ AVI_TOP_FIELD ^ AVI_BOT_FIELD;
+ else
+#endif /* AVI_BACKWARD_COMPAT */
+ /* status.field is the field currently being
+ * displayed. We want to configure the next
+ * field. */
+ data->next_field = status.field ?
+ AVI_TOP_FIELD : AVI_BOT_FIELD;
+
+ *field = data->next_field;
+ } else
+ *field = AVI_NONE_FIELD;
+
+ /* Wake up any task waiting for the vsync */
+ wake_up_interruptible(&data->wait_vbl);
+ /* Notify any task polling the vbl_count sysfs entry */
+ sysfs_notify_dirent(data->vbl_dirent);
+
+ return 0;
+ }
+
+ /* Do not dispatch the interruption if it's not the EOF */
+ return AVI_IRQ_HANDLED;
+}
+
+static int avifb_instance(struct avifb_data *data)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(data->dev, struct platform_device, dev);
+
+ return pdev->id;
+}
+
+static void avifb_set_gam_config(struct avifb_data *data)
+{
+ if (data->gam) {
+ avi_gam_set_cmap(data->gam, &data->gamma);
+ avi_gam_setup(data->gam, 0, 0, 0);
+ }
+}
+
+/**
+ * avifb_setup_lcd_segment
+ *
+ * Called by avifb_probe, checks channel coherency
+ */
+static int __devinit avifb_setup_lcd_segment(struct avifb_data *data)
+{
+ unsigned long caps;
+ int i;
+
+ caps = data->pdata->caps;
+
+ if (!(caps & AVI_CAPS_LCD_ALL))
+ /* We need an LCD! */
+ return -EINVAL;
+
+ data->display_segment = avi_segment_build(&caps,
+ "lcd",
+ avifb_instance(data),
+ -1,
+ data->dev);
+ if (IS_ERR(data->display_segment))
+ return PTR_ERR(data->display_segment);
+
+ data->lcdc = avi_segment_get_node(data->display_segment,
+ AVI_CAPS_LCD_ALL);
+
+ data->gam = avi_segment_get_node(data->display_segment, AVI_CAP_GAM);
+
+ /* Initialize gamma LUT to a 1:1 transformation. */
+ for (i = 0; i < 0x100; i++)
+ data->gamma.red[i]
+ = data->gamma.green[i]
+ = data->gamma.blue[i]
+ = i;
+
+ avifb_set_gam_config(data);
+
+ init_waitqueue_head(&data->wait_vbl);
+
+ data->display_segment->priv = data;
+
+ avi_segment_register_irq(data->display_segment, &avifb_interrupt);
+ avi_segment_set_background(data->display_segment, data->pdata->background);
+
+ return 0;
+}
+
+/* Return the avi_pixel_format corresponding to the pixel structure in
+ * var. The framebuffer can only be in RGB format (or palette).
+ *
+ * Returns AVI_INVALID_FMT if the format is invalid or unknown.
+ */
+struct avi_dma_pixfmt avifb_get_pixfmt(struct fb_info *info,
+ struct fb_var_screeninfo *var)
+{
+ if (var->nonstd)
+ return AVI_PIXFMT_INVALID;
+
+ switch (var->bits_per_pixel) {
+ /* For 32 and 24 bpp modes we have to take the endianess into
+ * account since fbdev's pixel format is defined as bit shifts
+ * instead of byte order. The AVI_PIXFMT however follows the
+ * V4L2 convention of using the byte order. */
+ case 32:
+ if (var->red.length != 8 ||
+ var->green.length != 8 ||
+ var->blue.length != 8 ||
+ var->transp.length != 8)
+ return AVI_PIXFMT_INVALID;
+
+ if (var->transp.offset == 0) {
+ if (var->red.offset == 8 &&
+ var->green.offset == 16 &&
+ var->blue.offset == 24)
+ return AVI_PIXFMT_ARGB8888;
+
+ if (var->blue.offset == 8 &&
+ var->green.offset == 16 &&
+ var->red.offset == 24)
+ return AVI_PIXFMT_ABGR8888;
+ }
+
+ if (var->transp.offset == 24) {
+ if (var->red.offset == 0 &&
+ var->green.offset == 8 &&
+ var->blue.offset == 16)
+ return AVI_PIXFMT_RGBA8888;
+
+ if (var->blue.offset == 0 &&
+ var->green.offset == 8 &&
+ var->red.offset == 16)
+ return AVI_PIXFMT_BGRA8888;
+ }
+
+ return AVI_PIXFMT_INVALID;
+ case 24:
+ if (var->red.length != 8 ||
+ var->green.length != 8 ||
+ var->blue.length != 8 ||
+ var->transp.length != 0)
+ return AVI_PIXFMT_INVALID;
+
+
+ if (var->red.offset == 0 &&
+ var->green.offset == 8 &&
+ var->blue.offset == 16)
+ return AVI_PIXFMT_RGB888;
+
+ if (var->blue.offset == 0 &&
+ var->green.offset == 8 &&
+ var->red.offset == 16)
+ return AVI_PIXFMT_BGR888;
+
+ return AVI_PIXFMT_INVALID;
+ case 16:
+ if (var->red.length == 5 &&
+ var->green.length == 6 &&
+ var->blue.length == 5) {
+ if (var->red.offset == 11 &&
+ var->green.offset == 5 &&
+ var->blue.offset == 0)
+ return AVI_PIXFMT_RGB565;
+
+ if (var->red.offset == 0 &&
+ var->green.offset == 5 &&
+ var->blue.offset == 11)
+ return AVI_PIXFMT_BGR565;
+ }
+
+ return AVI_PIXFMT_INVALID;
+ default:
+ return AVI_PIXFMT_INVALID;
+ }
+}
+
+static void avifb_compute_format(struct fb_info *info,
+ struct avi_segment_format *in,
+ struct avi_segment_format *out)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct avifb_par *par = info->par;
+ struct avifb_data *data = par->data;
+
+ avi_segment_get_input_format(data->display_segment, out);
+
+ /* If the resolution does not match the LCD, we crop */
+ out->width = var->xres;
+ out->height = var->yres;
+
+ *in = *out;
+
+ /* Framebuffer is always RGB */
+ in->colorspace = AVI_RGB_CSPACE;
+
+ in->plane0.line_size = var->xres_virtual * var->bits_per_pixel / 8;
+
+ if (in->interlaced)
+ /* Skip the unused field */
+ in->plane0.line_size *= 2;
+
+ /* No planar input format for avifb */
+ in->plane1.line_size = 0;
+
+ in->pix_fmt = avifb_get_pixfmt(info, var);
+}
+
+static int avifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+ struct avifb_data *data = par->data;
+ struct avi_segment_format in;
+ struct avi_segment_format out;
+ unsigned framebuffer_size;
+ int ret;
+
+ /*
+ * Match the in-memory resolution with the FIFO alignment requirements.
+ */
+ if (!var->xres || !var->yres)
+ return -EINVAL;
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+ /* Save us some trouble later (especially in interlaced mode) and make
+ * sure we can pretend to have an even number of lines. */
+ if (roundup(var->yres, 2) * AVIFB_BUFFER_COUNT > var->yres_virtual)
+ var->yres_virtual = roundup(var->yres, 2) * AVIFB_BUFFER_COUNT;
+ if (var->xoffset + var->xres > var->xres_virtual)
+ return -EINVAL;
+ if (var->yoffset + var->yres > var->yres_virtual)
+ return -EINVAL;
+
+ if (avifb_get_pixfmt(info, var).id == AVI_INVALID_FMTID) {
+ /* Some utils just update bpp and expect us to fill in the rest
+ * (e.g. busybox's fbset when using the -depth option). So let's
+ * try to accommodate them. */
+ switch (var->bits_per_pixel) {
+ case 32:
+ /* default to ARGB 8888 */
+ var->transp.length = 8;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+
+ var->blue.offset = 0;
+ var->green.offset = 8;
+ var->red.offset = 16;
+ var->transp.offset = 24;
+ break;
+ case 24:
+ /* default to RGB 888 */
+ var->transp.length = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+
+ var->transp.offset = 0;
+ var->blue.offset = 0;
+ var->green.offset = 8;
+ var->red.offset = 16;
+ break;
+ case 16:
+ /* default to RGB 565 */
+ var->transp.length = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+
+ var->transp.offset = 0;
+ var->blue.offset = 0;
+ var->green.offset = 5;
+ var->red.offset = 11;
+ break;
+ default:
+ /* At least we tried... */
+ return -EINVAL;
+ }
+
+ /* This one has to work since the timings above are known to be
+ * valid */
+ BUG_ON(avifb_get_pixfmt(info, var).id == AVI_INVALID_FMTID);
+ }
+
+ avifb_compute_format(info, &in, &out);
+
+ ret = avi_segment_try_format(par->segment,
+ &in, &out,
+ &par->segment->layout);
+ if (ret)
+ return ret;
+
+ framebuffer_size = var->xres_virtual * var->yres_virtual
+ * var->bits_per_pixel / 8;
+ if (PAGE_ALIGN(framebuffer_size) >
+ resource_size(&par->overlay->dma_memory))
+ return -ENOMEM;
+
+ if (data->width > 0 && data->height > 0) {
+ var->height = data->height;
+ var->width = data->width;
+ } else {
+ var->height = -1;
+ var->width = -1;
+ }
+ if (var->rotate != FB_ROTATE_UR) {
+ dev_err(data->dev,
+ "rotation requested but not yet implemented\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This function is only called after a succesful avifb_check_var, we don't have
+ * to recheck everything. */
+static int avifb_set_par(struct fb_info *info)
+{
+ struct avifb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct avifb_data *data = par->data;
+ struct avi_segment_format in;
+ struct avi_segment_format out;
+ unsigned line_size;
+ unsigned framebuffer_size;
+ int ret;
+
+ avi_segment_get_input_format(data->display_segment, &out);
+
+ avifb_compute_format(info, &in, &out);
+
+ line_size = var->xres_virtual * var->bits_per_pixel / 8;
+ framebuffer_size = line_size * var->yres_virtual;
+
+ ret = avi_segment_set_format(par->segment, &in, &out);
+ if (ret)
+ return ret;
+
+ info->fix.smem_len = PAGE_ALIGN(framebuffer_size);
+ info->screen_size = 0;
+ info->fix.line_length = line_size;
+
+ return 0;
+}
+
+static int avifb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct avifb_par *par = info->par;
+ struct avifb_data *data = par->data;
+
+ switch (cmd) {
+ case FBIOGET_VBLANK:
+ {
+ struct fb_vblank vb = {
+ /* we return the vblcount and we support WAITFORVSYNC */
+ .flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VSYNC,
+ .count = atomic_read(&data->vbl_count),
+ };
+ if (copy_to_user(argp, &vb, sizeof(vb)))
+ return -EFAULT;
+ return 0;
+ }
+ case FBIO_WAITFORVSYNC:
+ avifb_flush_cache(info);
+ return avifb_wait_for_vbl(data);
+ case AVI_ISP_IOGET_OFFSETS:
+ return avi_isp_get_offsets(par->segment,
+ (struct avi_isp_offsets *)arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+/*
+ * sysfs: per-framebuffer entries
+ */
+static ssize_t avifb_show_pan_waits_for_vsync(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", par->pan_waits_for_vbl);
+}
+
+static ssize_t avifb_store_pan_waits_for_vsync(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(device);
+ struct avifb_par *par = info->par;
+ char *last;
+
+ par->pan_waits_for_vbl = !!simple_strtoul(buf, &last, 0);
+
+ return count;
+}
+
+static ssize_t avifb_show_fb_enabled(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ !par->segment->layout.hidden);
+}
+
+static ssize_t avifb_store_fb_enabled(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(device);
+ int enable;
+ char *last;
+
+ enable = !!simple_strtoul(buf, &last, 0);
+
+ avifb_blank(enable ? FB_BLANK_UNBLANK : FB_BLANK_NORMAL, info);
+
+ return count;
+}
+
+static ssize_t avifb_show_pan_count(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", atomic_read(&par->pan_count));
+}
+
+static ssize_t avifb_show_position(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+
+ return snprintf(buf, PAGE_SIZE, "%u,%u\n",
+ par->segment->layout.x,
+ par->segment->layout.y);
+}
+
+static ssize_t avifb_store_position(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+ char *last;
+ unsigned x, y;
+ int ret;
+
+ x = simple_strtoul(buf, &last, 0);
+ if (*last++ != ',')
+ return -EINVAL;
+
+ y = simple_strtoul(last, &last, 0);
+ if (*last != '\0' && *last != '\n')
+ return -EINVAL;
+
+ ret = avi_segment_set_position(par->segment, x, y);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t avifb_show_alpha(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+ int alpha = par->segment->layout.alpha;
+
+ if (alpha == AVI_ALPHA_OSD)
+ return snprintf(buf, PAGE_SIZE, "OSD (per-pixel alpha)\n");
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", alpha);
+}
+
+static ssize_t avifb_store_alpha(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(device);
+ struct avifb_par *par = info->par;
+ unsigned alpha;
+ char *last;
+
+ if (*buf == 'O' || *buf == 'o')
+ alpha = AVI_ALPHA_OSD;
+ else {
+ alpha = simple_strtoul(buf, &last, 0);
+ if (alpha > 0xff || (*last != '\0' && *last != '\n'))
+ return -EINVAL;
+
+ alpha = alpha;
+ }
+
+ avi_segment_set_alpha(par->segment, alpha);
+
+ return count;
+}
+
+static ssize_t avifb_show_timeout(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct fb_info *info = dev_get_drvdata(device);
+ const struct avifb_par *par = info->par;
+ struct avi_node *in_fifo;
+ struct avi_fifo_registers regs;
+
+ in_fifo = avi_segment_get_node(par->segment, AVI_CAP_DMA_IN);
+ if (in_fifo == NULL)
+ return -ENODEV;
+
+ avi_fifo_get_registers(in_fifo, &regs);
+ return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d,%d\n",
+ 16 << regs.timeout.burst,
+ regs.timeout.issuing_capability + 1,
+ regs.timeout.pincr,
+ regs.timeout.hblank,
+ regs.timeout.vblank);
+}
+
+static ssize_t avifb_store_timeout(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(device);
+ struct avifb_par *par = info->par;
+ struct avi_node *in_fifo;
+ char *last;
+ unsigned tmp;
+ struct avi_fifo_registers regs;
+
+ in_fifo = avi_segment_get_node(par->segment, AVI_CAP_DMA_IN);
+ if (in_fifo == NULL)
+ return -ENODEV;
+
+ avi_fifo_get_registers(in_fifo, &regs);
+
+ tmp = simple_strtoul(buf, &last, 0);
+ if (*last++ != ',')
+ return -EINVAL;
+
+ switch (tmp) {
+ case 16:
+ regs.timeout.burst = 0;
+ break;
+ case 32:
+ regs.timeout.burst = 1;
+ break;
+ case 64:
+ regs.timeout.burst = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tmp = simple_strtoul(last, &last, 0);
+ if (*last++ != ',')
+ return -EINVAL;
+
+ if (tmp == 0 || tmp > 8)
+ return -EINVAL;
+
+ regs.timeout.issuing_capability = tmp - 1;
+
+ regs.timeout.pincr = simple_strtoul(last, &last, 0);
+ if (*last++ != ',')
+ return -EINVAL;
+
+ if (regs.timeout.pincr == 0)
+ return -EINVAL;
+
+ regs.timeout.hblank = simple_strtoul(last, &last, 0);
+ if (*last++ != ',')
+ return -EINVAL;
+
+ regs.timeout.vblank = simple_strtoul(last, &last, 0);
+ if (*last != '\0' && *last != '\n')
+ return -EINVAL;
+
+ avi_fifo_set_registers(in_fifo, &regs);
+
+ return count;
+}
+
+static struct device_attribute avifb_fb_sysfs_attrs[] = {
+ __ATTR(pan_waits_for_vsync, S_IRUGO|S_IWUSR,
+ avifb_show_pan_waits_for_vsync, avifb_store_pan_waits_for_vsync),
+ __ATTR(enabled, S_IRUGO|S_IWUSR,
+ avifb_show_fb_enabled, avifb_store_fb_enabled),
+ __ATTR(pan_count, S_IRUGO, avifb_show_pan_count, NULL),
+ __ATTR(position, S_IRUGO|S_IWUSR,
+ avifb_show_position, avifb_store_position),
+ __ATTR(alpha, S_IRUGO|S_IWUSR, avifb_show_alpha, avifb_store_alpha),
+ __ATTR(timeout, S_IRUGO|S_IWUSR,
+ avifb_show_timeout, avifb_store_timeout),
+};
+
+static void avifb_update_period(struct avifb_data *data, unsigned long pclk)
+{
+ static const unsigned long scaling = 10000;
+ unsigned long period;
+
+ /* To achieve the best precision we should be doing:
+ *
+ * period = (clk_per_frame * USEC_PER_SEC) / pclk
+ *
+ * However the multiplication is very likely to oveflow, so instead we
+ * settle for:
+ *
+ * period = clk_per_frame / (pclk / scaling) * (USEC_PER_SEC / scaling)
+ */
+
+ pclk = (pclk + scaling / 2) / scaling;
+
+ period = (data->clk_per_frame + pclk / 2) / pclk;
+ period *= USEC_PER_SEC / scaling;
+
+ data->display_segment->period_us = period;
+}
+
+/* Should be called with lcd_lock held. */
+static int avifb_set_clock_rate(struct avifb_data *data, unsigned long *rate)
+{
+ unsigned long r = *rate;
+ unsigned long pclk;
+ unsigned long delta;
+ unsigned long delta_percent;
+ int ret;
+
+ pclk = avi_lcd_pixclock_round_rate(data->lcdc, r);
+
+ if (pclk == r)
+ goto set_rate;
+
+ if (r > pclk)
+ delta = r - pclk;
+ else
+ delta = pclk - r;
+
+ /* Compute deviation in percents, rounding up */
+ delta_percent = (delta * 100 + r - 1) / r;
+
+ dev_warn(data->dev,
+ "pixelclock rounded to %lukHz [%lu%%]\n",
+ pclk / 1000,
+ delta_percent);
+
+ /* Allow up to 5% deviation */
+ if (delta_percent > 5) {
+ dev_err(data->dev,
+ "clock rate deviation too big, bailing out\n");
+ return -ERANGE;
+ }
+
+ set_rate:
+ ret = avi_lcd_pixclock_set_rate(data->lcdc, pclk);
+ if (ret)
+ return ret;
+
+ avifb_update_period(data, pclk);
+
+ *rate = pclk;
+
+ return 0;
+}
+
+static unsigned avifb_get_framerate(const struct avifb_data *data)
+{
+ return (USEC_PER_SEC + data->display_segment->period_us / 2) /
+ data->display_segment->period_us;
+}
+
+static void avifb_vmode_to_registers(struct avi_lcd_regs *lcd_regs,
+ const struct avi_videomode *vmode)
+{
+ /*
+ * The timings configuration follows. We configure both fields all the
+ * time to simplify the code a bit, but the LCDC will only use the
+ * top_* timings in progressive mode.
+ */
+
+ /* The horizontal timings are expressed in pixclock ticks. TOP and BOT
+ * horizontal timings are identical, exception made of the vsync/hsync
+ * offsets */
+ lcd_regs->top_h_timing0.top_hsync_off
+ = lcd_regs->bot_h_timing0.bot_hsync_off
+ = vmode->hsync_len;
+
+ lcd_regs->top_h_timing0.top_hactive_on
+ = lcd_regs->bot_h_timing0.bot_hactive_on
+ = lcd_regs->top_h_timing0.top_hsync_off + vmode->left_margin;
+
+ lcd_regs->top_h_timing1.top_hactive_off
+ = lcd_regs->bot_h_timing1.bot_hactive_off
+ = lcd_regs->top_h_timing0.top_hactive_on + vmode->xres;
+
+ lcd_regs->top_h_timing1.top_htotal
+ = lcd_regs->bot_h_timing1.bot_htotal
+ = lcd_regs->top_h_timing1.top_hactive_off + vmode->right_margin;
+
+ /* We offset the vsync_hon and hoff by half a line to denote the bottom
+ * field to the LCD. */
+ lcd_regs->top_v_timing0.top_vsync_hon
+ = lcd_regs->top_v_timing1.top_vsync_hoff
+ = 0;
+
+ lcd_regs->bot_v_timing0.bot_vsync_hon
+ = lcd_regs->bot_v_timing1.bot_vsync_hoff
+ = lcd_regs->bot_h_timing1.bot_htotal / 2;
+
+ /* The vertical timings are expressed in lines */
+ lcd_regs->top_v_timing0.top_vsync_von
+ = lcd_regs->bot_v_timing0.bot_vsync_von
+ = 0;
+
+ lcd_regs->top_v_timing1.top_vsync_voff
+ = lcd_regs->bot_v_timing1.bot_vsync_voff
+ = vmode->vsync_len;
+
+ lcd_regs->top_v_timing2.top_vactive_on
+ = lcd_regs->top_v_timing1.top_vsync_voff + vmode->upper_margin;
+
+ if (vmode->flags & AVI_VIDEOMODE_INTERLACED)
+ lcd_regs->top_v_timing2.top_vactive_off
+ = lcd_regs->top_v_timing2.top_vactive_on
+ + roundup(vmode->yres, 2) / 2;
+ else
+ lcd_regs->top_v_timing2.top_vactive_off
+ = lcd_regs->top_v_timing2.top_vactive_on
+ + vmode->yres;
+
+ /* bottom field is offset by one line (at least, for PAL and NTSC that's
+ * how it is, I'm not sure whether it's true for all the interlaced
+ * screens. Maybe this should be made configurable, we'll see) */
+ lcd_regs->bot_v_timing2.bot_vactive_on = lcd_regs->top_v_timing2.top_vactive_on + 1;
+ lcd_regs->bot_v_timing2.bot_vactive_off = lcd_regs->top_v_timing2.top_vactive_off + 1;
+
+ lcd_regs->top_v_timing3.top_vtotal
+ = lcd_regs->top_v_timing2.top_vactive_off + vmode->lower_margin;
+ lcd_regs->bot_v_timing3.bot_vtotal
+ = lcd_regs->bot_v_timing2.bot_vactive_off + vmode->lower_margin;
+}
+
+static inline int avifb_ticks_per_pixel(struct avifb_data *data)
+{
+ switch (data->lcd_format_control >> 2) {
+ case 0: /* RGB888/YUV422 1X24 */
+ case 1: /* YUV 4:2:2 1X16 */
+ return 1;
+ case 2: /* YUV 4:2:2 2X8 */
+ case 5: /* RGB565 2X8 */
+ case 6: /* RGB555 2X8 */
+ case 7: /* RGB444 2X8 */
+ /* Two clock periods per pixel */
+ return 2;
+ case 3: /* RGB888 3X8 */
+ /* Three clock periods per pixel */
+ return 3;
+ break;
+ case 4: /* RGBX8888 4X8 */
+ /* Four clock periods per pixel */
+ return 4;
+ default:
+ BUG();
+ }
+}
+
+static int avifb_configure_timings(struct avifb_data *data)
+{
+ const struct avi_node *lcd = data->lcdc;
+ const struct avi_videomode *vmode = &data->lcd_videomode;
+ unsigned long ticks_per_pixel;
+ unsigned long pclk;
+ unsigned long old_clk_per_frame;
+ int ret;
+
+ struct avi_lcd_regs lcd_reg = {
+ .itsource = {{
+ .done_en = 1, /* EOF IRQ enabled */
+ }},
+ .interface = {{
+ .ivs = !(vmode->flags & AVI_VIDEOMODE_VSYNC_ACTIVE_HIGH),
+ .ihs = !(vmode->flags & AVI_VIDEOMODE_HSYNC_ACTIVE_HIGH),
+ .ipc = data->lcd_interface.ipc,
+ .ioe = data->lcd_interface.ioe,
+ .psync_en = data->lcd_interface.psync_en,
+ .psync_rf = data->lcd_interface.psync_rf,
+ .itu656 = data->lcd_interface.itu656,
+ .clip_en = data->lcd_interface.clip_en,
+ .prog = !(vmode->flags & AVI_VIDEOMODE_INTERLACED),
+ .free_run = data->lcd_interface.free_run,
+ .pad_select = data->lcd_interface.pad_select,
+ }},
+ .top_format_ctrl = {{
+ .top_format_control = data->lcd_format_control,
+ }},
+ .bot_format_ctrl = {{
+ .bot_format_control = data->lcd_format_control,
+ }},
+ .dpd = {{
+ .dpd = data->dpd,
+ .colorbar = 0,
+ }},
+ };
+
+ avifb_vmode_to_registers(&lcd_reg, vmode);
+
+ mutex_lock(&data->lcd_lock);
+
+ pclk = vmode->pixclock * 1000UL;
+
+ dev_info(data->dev, "Using vmode %s (PCLK at %lukHz)\n",
+ vmode->name, pclk / 1000);
+
+ ticks_per_pixel = avifb_ticks_per_pixel(data);
+
+ pclk *= ticks_per_pixel;
+
+ old_clk_per_frame = data->clk_per_frame;
+
+ data->clk_per_frame = lcd_reg.top_h_timing1.top_htotal
+ * lcd_reg.top_v_timing3.top_vtotal
+ * ticks_per_pixel;
+
+ ret = avifb_set_clock_rate(data, &pclk);
+ if (ret) {
+ /* Rollback */
+ data->clk_per_frame = old_clk_per_frame;
+ goto unlock;
+ }
+
+ avi_lcd_set_registers(lcd, &lcd_reg);
+
+ dev_info(data->dev,
+ "screen output configured: %dx%d%c@%dHz\n",
+ vmode->xres, vmode->yres,
+ (vmode->flags & AVI_VIDEOMODE_INTERLACED) ? 'i' : 'p',
+ avifb_get_framerate(data));
+
+ ret = 0;
+
+ unlock:
+ mutex_unlock(&data->lcd_lock);
+
+ return ret;
+}
+
+static int avifb_configure_lcd_segment(struct avifb_data *data)
+{
+ struct avi_segment_format lcd_fmt = {
+ .width = data->lcd_videomode.xres,
+ .height = data->lcd_videomode.yres,
+ .interlaced = IS_INTERLACED(data),
+ .colorspace = data->output_cspace,
+ };
+ struct avi_segment_format in_fmt = lcd_fmt;
+ int ret;
+
+ if (data->display_segment->caps & AVI_CAP_CONV)
+ /* Force RGB input and do color conversion in the output segment
+ * if it has a CONV. This is pretty arbitrary but it mimicks the
+ * previous behaviour of this driver: all overlays were supposed
+ * to output in RGB. */
+ in_fmt.colorspace = AVI_RGB_CSPACE;
+
+ ret = avi_segment_set_format(data->display_segment, &in_fmt, &lcd_fmt);
+ if (ret) {
+ dev_err(data->dev, "failed to set LCD segment format\n");
+ return ret;
+ }
+
+ ret = avifb_configure_timings(data);
+ if (ret)
+ return ret;
+
+#ifdef AVI_BACKWARD_COMPAT
+ data->next_field = AVI_BOT_FIELD;
+#endif
+
+ avi_lcd_pixclock_enable(data->lcdc);
+
+ avi_segment_enable_irq(data->display_segment);
+
+ ret = avi_segment_activate(data->display_segment);
+ if (ret) {
+ dev_err(data->dev, "couldn't activate LCD segment\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * sysfs: per-LCD entries
+ */
+static ssize_t avifb_show_vbl_count(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", atomic_read(&data->vbl_count));
+}
+
+static ssize_t avifb_show_colorbar(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ unsigned col;
+ const char *str;
+
+ col = avi_lcd_get_colorbar(data->lcdc);
+
+ switch (col) {
+ case 0:
+ case 1:
+ str = "0";
+ break;
+ case 2:
+ str = "YUV";
+ break;
+ case 3:
+ str = "RGB";
+ break;
+ default:
+ str = "?";
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t avifb_store_colorbar(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ unsigned frm = data->lcd_format_control;
+ unsigned rgb;
+
+ /* We only consider the first letter to determine what's to be done:
+ * - If we get a '1' we try to determine which colorbar to use based on
+ * the output colorspace and format.
+ * - If we get a 'Y' or 'y' we display the YUV colorbar.
+ * - If we get an 'R' or 'r' we display the RGB colorbar.
+ */
+ switch (*buf) {
+ case '1':
+ /* Set the colorbar depending on format_control */
+ switch (frm >> 2) {
+ case 1:
+ case 2:
+ /* YUV colourspace */
+ rgb = 0;
+ break;
+ case 0:
+ /* This can be either 24bit RGB or 24bit YUV. Check the
+ * segment format. */
+ rgb = data->display_segment->output_format.colorspace
+ == AVI_RGB_CSPACE;
+ break;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ default:
+ rgb = 1;
+ break;
+ }
+ break;
+ case 'R':
+ case 'r':
+ /* force RGB colorbar */
+ rgb = 1;
+ break;
+ case 'Y':
+ case 'y':
+ /* force YUV colorbar */
+ rgb = 0;
+ break;
+ case '0':
+ default:
+ /* disable colorbar */
+ avi_lcd_set_colorbar(data->lcdc, 0);
+ return count;
+ }
+
+ avi_lcd_set_colorbar(data->lcdc, 0x2 | rgb);
+
+ return count;
+}
+
+static ssize_t avifb_show_background(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "0x%06x\n",
+ avi_segment_get_background(data->display_segment));
+}
+
+static ssize_t avifb_store_background(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ char *last;
+ unsigned long bg;
+
+ bg = simple_strtoul(buf, &last, 0);
+
+ if (bg & 0xff000000 || (*last != '\0' && *last != '\n'))
+ return -EINVAL;
+
+ avi_segment_set_background(data->display_segment, bg);
+
+ return count;
+}
+
+static ssize_t avifb_show_framerate(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%uHz\n", avifb_get_framerate(data));
+}
+
+static ssize_t avifb_store_framerate(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ unsigned long fps;
+ unsigned long clk;
+ char *last;
+ int ret;
+
+ fps = simple_strtoul(buf, &last, 0);
+
+ if (*last != '\0' && *last != '\n')
+ return -EINVAL;
+
+ clk = data->clk_per_frame * fps;
+
+ dev_info(data->dev, "Setting PCLK to %lukHz\n", clk / 1000);
+
+ mutex_lock(&data->lcd_lock);
+ avi_lcd_pixclock_disable(data->lcdc);
+
+ ret = avifb_set_clock_rate(data, &clk);
+
+ avi_lcd_pixclock_enable(data->lcdc);
+ mutex_unlock(&data->lcd_lock);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/* The clear register value is tristate:
+ *
+ * Normal: usual clear behaviour. triggered by the LCD state machine.
+ * Inactive: clear is forced low
+ * Active: clear is forced high
+ */
+static ssize_t avifb_store_clear(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ unsigned clear;
+
+ switch (*buf) {
+ case 'a':
+ case 'A':
+ case '1': /* Active */
+ clear = AVI_LCD_FORCE_CLEAR_ACTIVE;
+ break;
+ case 'i':
+ case 'I': /* Inactive */
+ clear = AVI_LCD_FORCE_CLEAR_INACTIVE;
+ break;
+ case 'n':
+ case 'N':
+ case '0':
+ clear = AVI_LCD_FORCE_CLEAR_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ avi_lcd_set_force_clear(data->lcdc, clear);
+
+ return count;
+}
+
+static ssize_t avifb_show_clear(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const char *str;
+ unsigned clear;
+
+ clear = avi_lcd_get_force_clear(data->lcdc);
+
+ switch (clear) {
+ case AVI_LCD_FORCE_CLEAR_ACTIVE:
+ str = "Active";
+ break;
+ case AVI_LCD_FORCE_CLEAR_INACTIVE:
+ str = "Inactive";
+ break;
+ case AVI_LCD_FORCE_CLEAR_NORMAL:
+ str = "Normal";
+ break;
+ default:
+ str = "INVALID";
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t avifb_show_vmode(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const struct avi_videomode *vmode = &data->lcd_videomode;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", vmode->name);
+}
+
+static int avifb_resize_overlay(struct fb_info *info,
+ const struct avi_videomode *vmode)
+{
+ struct avifb_par *par = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct avi_segment_format in;
+ struct avi_segment_format out;
+ struct avi_segment_layout layout;
+ unsigned max_width;
+ unsigned max_height;
+ int ret;
+
+ avi_segment_get_layout(par->segment, &layout);
+
+ /* Check if the position is outside the screen */
+ if (layout.x >= vmode->xres)
+ layout.x = 0;
+
+ if (layout.y >= vmode->yres)
+ layout.y = 0;
+
+ /* Here's our heuristic: we see if the current overlay at its current
+ * position still fits in the screen, if so we do nothing. Otherwise we
+ * leave the overlay at the current position but we reduce its size to
+ * fit. */
+ max_width = vmode->xres - layout.x;
+
+ if (var->xres > max_width)
+ var->xres = max_width;
+
+ max_height = vmode->yres - layout.y;
+
+ if (var->yres > max_height)
+ var->yres = max_height;
+
+ avifb_compute_format(info, &in, &out);
+
+ ret = avi_segment_set_format_and_layout(par->segment,
+ &in,
+ &out,
+ &layout);
+ if (ret)
+ return ret;
+
+ ret = avifb_pan_display_nowait(&info->var, info);
+
+ return ret;
+}
+
+static int avifb_change_vmode(struct avifb_data *data,
+ const struct avi_videomode *vmode)
+{
+ const unsigned noverlays = data->pdata->overlay_nr;
+ struct avi_segment_format in_fmt;
+ struct avi_segment_format lcd_fmt;
+ int i;
+
+ for (i = 0; i < noverlays; i++)
+ BUG_ON(!lock_fb_info(data->infos[i]));
+
+ avi_lcd_pixclock_disable(data->lcdc);
+
+ /* Reconfigure the LCD */
+ avi_segment_get_input_format(data->display_segment, &in_fmt);
+ avi_segment_get_output_format(data->display_segment, &lcd_fmt);
+
+ in_fmt.width = lcd_fmt.width = vmode->xres;
+ in_fmt.height = lcd_fmt.height = vmode->yres;
+ in_fmt.interlaced = lcd_fmt.interlaced =
+ vmode->flags & AVI_VIDEOMODE_INTERLACED;
+
+ BUG_ON(avi_segment_set_format(data->display_segment,
+ &in_fmt,
+ &lcd_fmt));
+
+ data->lcd_videomode = *vmode;
+
+ avifb_configure_timings(data);
+
+ /* Reconfigure the FB overlays */
+ for (i = 0; i < noverlays; i++)
+ BUG_ON(avifb_resize_overlay(data->infos[i], vmode));
+
+ avi_lcd_pixclock_enable(data->lcdc);
+
+ for (i = 0; i < noverlays; i++)
+ unlock_fb_info(data->infos[i]);
+
+ return 0;
+}
+
+static void avifb_from_fb_videomode(struct avi_videomode *vmode,
+ const struct fb_videomode *fb_vmode)
+{
+ vmode->name = fb_vmode->name;
+ vmode->xres = fb_vmode->xres;
+ vmode->yres = fb_vmode->yres;
+ vmode->pixclock = PICOS2KHZ(fb_vmode->pixclock);
+ vmode->left_margin = fb_vmode->left_margin;
+ vmode->right_margin = fb_vmode->right_margin;
+ vmode->upper_margin = fb_vmode->upper_margin;
+ vmode->lower_margin = fb_vmode->lower_margin;
+ vmode->hsync_len = fb_vmode->hsync_len;
+ vmode->vsync_len = fb_vmode->vsync_len;
+
+ vmode->flags = 0;
+
+ if (fb_vmode->vmode & FB_VMODE_INTERLACED)
+ vmode->flags |= AVI_VIDEOMODE_INTERLACED;
+
+ if (fb_vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+ vmode->flags |= AVI_VIDEOMODE_VSYNC_ACTIVE_HIGH;
+
+ if (fb_vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+ vmode->flags |= AVI_VIDEOMODE_HSYNC_ACTIVE_HIGH;
+}
+
+struct fb_videomode *avifb_select_mode(const char *id,
+ struct fb_monspecs *monspecs)
+{
+ struct avi_segment *s;
+ struct avifb_data *data;
+ struct fb_videomode *best = NULL;
+ struct avi_videomode vmode;
+ unsigned long min_period;
+ int i;
+ int ret;
+ int select_first_detailed;
+
+ s = avi_segment_find(id);
+
+ if (s == NULL)
+ return ERR_PTR(-ENODEV);
+
+ data = s->priv;
+ if (data == NULL)
+ return ERR_PTR(-ENODEV);
+
+ /* struct fb_videomode stores the pixclock period in ns, not the
+ * frequency. */
+ min_period = KHZ2PICOS(AVI_LIMIT_MAX_PHY_FREQ) *
+ avifb_ticks_per_pixel(data);
+
+ /* The FB_MISC_1ST_DETAIL flag is set to indicate that the screen
+ * prefered mode is the first detailed one */
+
+ select_first_detailed = monspecs->misc & FB_MISC_1ST_DETAIL;
+
+ for (i = 0; i < monspecs->modedb_len; i++) {
+ struct fb_videomode *m = &monspecs->modedb[i];
+
+ if (m->pixclock < min_period ||
+ m->xres > AVI_LIMIT_MAX_WIDTH ||
+ m->yres > AVI_LIMIT_MAX_HEIGHT) {
+ /* We don't support this videomode */
+ if (m->flag & FB_MODE_IS_DETAILED)
+ /* We don't support the screen preered mode */
+ select_first_detailed = 0;
+ continue;
+ }
+
+ if (select_first_detailed && (m->flag & FB_MODE_IS_DETAILED)) {
+ /* Select the prefered mode if possible */
+ best = m;
+ break;
+ }
+
+ if (best == NULL) {
+ /* Better than nothing */
+ best = m;
+ continue;
+ }
+
+ if (m->xres > best->xres) {
+ best = m;
+ continue;
+ }
+
+ if (m->xres == best->xres && m->pixclock < best->pixclock) {
+ best = m;
+ continue;
+ }
+ }
+
+#ifdef DEBUG
+ dprintk(data, "Selecting mode:\n");
+
+ for (i = 0; i < monspecs->modedb_len; i++) {
+ struct fb_videomode *m = &monspecs->modedb[i];
+
+ dprintk(data, "%c %dx%d @ %ldkHz\n",
+ (m == best) ? '*' : ' ',
+ m->xres, m->yres, PICOS2KHZ(m->pixclock));
+ }
+#endif
+
+ if (best == NULL)
+ return NULL;
+
+ avifb_from_fb_videomode(&vmode, best);
+
+ ret = avifb_change_vmode(data, &vmode);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return best;
+}
+EXPORT_SYMBOL(avifb_select_mode);
+
+int avifb_set_screen_size(const char* id, unsigned width, unsigned height)
+{
+ struct avi_segment *s;
+ struct avifb_data *data;
+
+ s = avi_segment_find(id);
+ if (s == NULL)
+ return -ENODEV;
+
+ data = s->priv;
+ if (data == NULL)
+ return -ENODEV;
+
+ data->width = width;
+ data->height = height;
+
+ return 0;
+}
+EXPORT_SYMBOL(avifb_set_screen_size);
+
+static ssize_t avifb_store_vmode(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ size_t l = count;
+ const struct avi_videomode *vm;
+ const struct avi_videomode *vmode;
+ int i;
+ int ret;
+
+ while (buf[l] == '\n' || buf[l] == '\0')
+ l--;
+
+ vmode = NULL;
+ for (i = 0; (vm = data->pdata->lcd_videomodes[i]) != NULL; i++) {
+ if (strncmp(vm->name, buf, l) == 0) {
+ vmode = vm;
+ break;
+ }
+ }
+
+ if (!vmode) {
+ dev_warn(data->dev,
+ "unknown vmode \"%s\"\n", buf);
+ return -EINVAL;
+ }
+
+ ret = avifb_change_vmode(data, vmode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static ssize_t avifb_show_default_vmode(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const struct avi_videomode *vmode = &data->lcd_default_videomode;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", vmode->name);
+}
+
+static ssize_t avifb_show_default_resolution(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const struct avi_videomode *vmode = &data->lcd_default_videomode;
+
+ return snprintf(buf, PAGE_SIZE, "%ux%u\n", vmode->xres, vmode->yres);
+}
+
+static ssize_t avifb_show_resolution(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const struct avi_videomode *vmode = &data->lcd_videomode;
+
+ return snprintf(buf, PAGE_SIZE, "%ux%u\n", vmode->xres, vmode->yres);
+}
+
+static ssize_t avifb_show_dpd(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "0x%06x\n", data->dpd);
+}
+
+static ssize_t avifb_store_dpd(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct avifb_data *data = dev_get_drvdata(device);
+ unsigned long dpd;
+ char *last;
+
+ dpd = simple_strtoul(buf, &last, 0);
+
+ if (*last != '\0' && *last != '\n')
+ return -EINVAL;
+
+ if (dpd & ~0xFFFFFFfUL)
+ return -ERANGE;
+
+ data->dpd = dpd;
+
+ if (data->lcdc)
+ avi_lcd_set_dpd(data->lcdc, data->dpd);
+
+ return count;
+}
+
+static ssize_t avifb_show_default_screen_size(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+ const struct avifb_platform_data *pdata = data->pdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ux%u\n", pdata->width, pdata->height);
+}
+
+static ssize_t avifb_show_screen_size(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%ux%u\n", data->width, data->height);
+}
+
+void avifb_set_monspecs(const char *id, struct fb_monspecs *monspecs)
+{
+ struct avi_segment *seg;
+ struct avifb_data *data;
+
+ /* Get avi_segment associated to LCD ID */
+ seg = avi_segment_find(id);
+ if (seg == NULL)
+ return;
+
+ /* Get avifb_data from avi_segment */
+ data = seg->priv;
+ if (data == NULL)
+ return;
+
+ /* Copy monitor specs */
+ if (monspecs == NULL)
+ memset(&data->monspecs, 0, sizeof(struct fb_monspecs));
+ else
+ memcpy(&data->monspecs, monspecs, sizeof(struct fb_monspecs));
+
+ /* Reset modedb to an empty list */
+ data->monspecs.modedb = NULL;
+ data->monspecs.modedb_len = 0;
+}
+EXPORT_SYMBOL(avifb_set_monspecs);
+
+static ssize_t avifb_show_monitor_manufacturer(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data->monspecs.manufacturer);
+}
+
+
+static ssize_t avifb_show_monitor_name(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data->monspecs.monitor);
+}
+
+static ssize_t avifb_show_monitor_serial(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data->monspecs.serial_no);
+}
+
+static ssize_t avifb_show_monitor_misc(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ const struct avifb_data *data = dev_get_drvdata(device);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data->monspecs.ascii);
+}
+
+static struct device_attribute avifb_lcd_sysfs_attrs[] = {
+ __ATTR(vbl_count, S_IRUGO, avifb_show_vbl_count, NULL),
+ __ATTR(colorbar, S_IRUGO | S_IWUSR,
+ avifb_show_colorbar, avifb_store_colorbar),
+ __ATTR(background, S_IRUGO | S_IWUSR,
+ avifb_show_background, avifb_store_background),
+ __ATTR(framerate, S_IRUGO | S_IWUSR,
+ avifb_show_framerate, avifb_store_framerate),
+ __ATTR(clear, S_IRUGO | S_IWUSR, avifb_show_clear, avifb_store_clear),
+ __ATTR(vmode, S_IRUGO | S_IWUSR, avifb_show_vmode, avifb_store_vmode),
+ __ATTR(default_vmode, S_IRUGO | S_IWUSR, avifb_show_default_vmode, NULL),
+ __ATTR(resolution, S_IRUGO, avifb_show_resolution, NULL),
+ __ATTR(default_resolution, S_IRUGO, avifb_show_default_resolution, NULL),
+ __ATTR(dpd, S_IRUGO | S_IWUSR, avifb_show_dpd, avifb_store_dpd),
+ // device physical size (mm)
+ __ATTR(screen_size, S_IRUGO, avifb_show_screen_size, NULL),
+ __ATTR(default_screen_size, S_IRUGO, avifb_show_default_screen_size, NULL),
+ /* Monitor specs */
+ __ATTR(monitor_manufacturer, S_IRUGO, avifb_show_monitor_manufacturer,
+ NULL),
+ __ATTR(monitor_name, S_IRUGO, avifb_show_monitor_name, NULL),
+ __ATTR(monitor_serial, S_IRUGO, avifb_show_monitor_serial, NULL),
+ __ATTR(monitor_misc, S_IRUGO, avifb_show_monitor_misc, NULL),
+};
+
+/*
+ * Frame buffer operations
+ */
+static struct fb_ops avifb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = avifb_check_var,
+ .fb_set_par = avifb_set_par,
+ .fb_setcolreg = avifb_setcolreg,
+ .fb_blank = avifb_blank,
+ .fb_pan_display = avifb_pan_display,
+ .fb_ioctl = avifb_ioctl,
+ .fb_mmap = avifb_mmap,
+
+ /*
+ * fillrect, copyarea and imageblit are mandatory. We use the generic
+ * software based versions.
+ */
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+/* Used to set the basic "hardcoded" features of the hardware and driver */
+static const struct fb_fix_screeninfo avifb_default_fix = {
+ .id = DEVICE_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 0,
+ .ypanstep = 1,
+ .ywrapstep = 0,
+ .accel = FB_ACCEL_NONE,
+ .mmio_start = 0,
+ .mmio_len = 0,
+};
+
+static int __devinit avifb_overlay_init(struct avifb_data *data, unsigned id)
+{
+ struct fb_info *info;
+ struct avifb_par *par;
+ struct resource *vmem;
+ struct avi_segment_format lcd_fmt;
+ unsigned long caps;
+ unsigned i;
+ int ret;
+
+ info = framebuffer_alloc(sizeof(struct avifb_par), data->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+
+ data->infos[id] = info;
+ par = info->par;
+ par->id = id;
+ par->data = data;
+ par->overlay = &data->pdata->overlays[id];
+
+ /* Force DMA input for the overlay, it makes no sense not to have it. */
+ caps = par->overlay->caps | AVI_CAP_DMA_IN;
+
+ avi_segment_get_input_format(data->display_segment, &lcd_fmt);
+
+ if (lcd_fmt.colorspace != AVI_RGB_CSPACE)
+ caps |= AVI_CAP_CONV;
+
+ par->segment = avi_segment_build(&caps,
+ "fb",
+ avifb_instance(data),
+ id,
+ data->dev);
+ if (IS_ERR(par->segment)) {
+ ret = PTR_ERR(par->segment);
+ goto no_segment;
+ }
+
+ /* The default value for pan_waits_for_vbl is defined in the KConfig. It
+ * can be overriden at runtime through the sysfs. */
+#ifdef CONFIG_FB_AVI_PAN_WAITS_FOR_VSYNC
+ par->pan_waits_for_vbl = 1;
+#else
+ par->pan_waits_for_vbl = 0;
+#endif
+
+ info->pseudo_palette = par->pseudo_palette;
+
+ info->fbops = &avifb_ops;
+ info->fix = avifb_default_fix;
+
+ if (data->gam)
+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
+
+ info->fix.smem_start = par->overlay->dma_memory.start;
+ info->fix.smem_len = resource_size(&par->overlay->dma_memory);
+
+ vmem = request_mem_region(info->fix.smem_start,
+ resource_size(&par->overlay->dma_memory),
+ dev_driver_string(data->dev));
+ if (!vmem) {
+ ret = -EBUSY;
+ dev_err(data->dev,
+ "request_mem_region %08lx:%08x failed\n",
+ info->fix.smem_start,
+ info->fix.smem_len);
+ goto mem_request_failed;
+ }
+
+ if (par->overlay->cacheable)
+ /* Map the framebuffer DMA memory as cached. This improves the
+ * write performances in certain cases (but also contributes to
+ * cache corruption). */
+ info->screen_base = ioremap_cached(info->fix.smem_start,
+ info->fix.smem_len);
+ else
+ info->screen_base = ioremap(info->fix.smem_start,
+ info->fix.smem_len);
+
+ if (!info->screen_base) {
+ ret = -ENOMEM;
+ dev_err(data->dev,
+ "ioremap_cached %08lx:%08x failed\n",
+ info->fix.smem_start,
+ info->fix.smem_len);
+ goto ioremap_failed;
+ }
+
+ /* Paint the framebuffer black */
+ memset(info->screen_base, 0, info->fix.smem_len);
+
+ /* Use the specified color depth of default to 16bit RGB */
+ if (par->overlay->color_depth)
+ info->var.bits_per_pixel = par->overlay->color_depth;
+ else
+ info->var.bits_per_pixel = 16;
+
+ /* The default resolution is the display resolution */
+ info->var.xres = data->display_segment->input_format.width;
+ info->var.yres = data->display_segment->input_format.height;
+
+ /* If the BSP requires a different resolution we override it here */
+ if (par->overlay->layout.width)
+ info->var.xres = par->overlay->layout.width;
+ if (par->overlay->layout.height)
+ info->var.yres = par->overlay->layout.height;
+
+ info->var.yres_virtual = info->var.yres * 2;
+
+ ret = avifb_check_var(&info->var, info);
+ if (ret)
+ goto check_var_failed;
+
+ ret = avifb_set_par(info);
+ if (ret)
+ goto set_par_failed;
+
+ ret = avi_segment_set_position(par->segment,
+ par->overlay->layout.x,
+ par->overlay->layout.y);
+ if (ret)
+ goto set_position_failed;
+
+ ret = avifb_pan_display(&info->var, info);
+ if (ret)
+ goto pan_failed;
+
+ avi_segment_set_alpha(par->segment, par->overlay->layout.alpha);
+ if (!par->overlay->layout.enabled)
+ avi_segment_hide(par->segment);
+
+ avi_segment_activate(par->segment);
+
+ ret = avi_segment_connect(par->segment,
+ data->display_segment,
+ par->overlay->zorder);
+ if (ret) {
+ dev_err(data->dev,
+ "Couldn't connect segment %s to %s "
+ "(zorder: %d)\n",
+ par->segment->id,
+ data->display_segment->id,
+ par->overlay->zorder);
+ goto segment_connect_failed;
+ }
+
+ ret = register_framebuffer(info);
+ if (ret)
+ goto register_failed;
+
+ for (i = 0; i < ARRAY_SIZE(avifb_fb_sysfs_attrs); i++) {
+ ret = device_create_file(info->dev, &avifb_fb_sysfs_attrs[i]);
+ if (ret) {
+ while (i--)
+ device_remove_file(info->dev,
+ &avifb_fb_sysfs_attrs[i]);
+ goto sysfs_failed;
+ }
+ }
+
+ dev_info(data->dev,
+ "overlay fb%d initialized [%u x %u] (%s)\n",
+ info->node, info->var.xres, info->var.yres,
+ par->segment->id);
+
+ return 0;
+
+sysfs_failed:
+ unregister_framebuffer(info);
+register_failed:
+ avi_segment_disconnect(par->segment, data->display_segment);
+segment_connect_failed:
+pan_failed:
+set_position_failed:
+set_par_failed:
+check_var_failed:
+ iounmap(info->screen_base);
+ioremap_failed:
+ release_mem_region(info->fix.smem_start,
+ resource_size(&par->overlay->dma_memory));
+mem_request_failed:
+ avi_segment_teardown(par->segment);
+no_segment:
+ kfree(info);
+nomem:
+ data->infos[id] = NULL;
+ return ret;
+}
+
+static void avifb_overlay_destroy(struct avifb_data *data, unsigned id)
+{
+ struct fb_info *info;
+ struct avifb_par *par;
+ int i;
+
+ info = data->infos[id];
+ if (info == NULL)
+ return;
+
+ par = info->par;
+
+ for (i = 0; i < ARRAY_SIZE(avifb_fb_sysfs_attrs); i++)
+ device_remove_file(info->dev, &avifb_fb_sysfs_attrs[i]);
+
+ unregister_framebuffer(info);
+
+ iounmap(info->screen_base);
+ release_mem_region(info->fix.smem_start,
+ resource_size(&par->overlay->dma_memory));
+ avi_segment_disconnect(par->segment, data->display_segment);
+ avi_segment_teardown(par->segment);
+ kfree(info);
+ data->infos[id] = NULL;
+}
+
+/* This has to match with the pin configuration in the BSP. If the command line
+ * requests RGB888_1X24 but only 8 data bits are enabled in the pinctrl there
+ * will be a silent missmatch. */
+static const struct
+{
+ const char *name;
+ unsigned value;
+} avifb_busnames[] = {
+#define AVIFB_BUS(_n) { #_n, AVI_FORMAT_CONTROL_ ##_n }
+ AVIFB_BUS(RGB888_1X24),
+ AVIFB_BUS(YUV888_1X24),
+ AVIFB_BUS(YUYV_1X16),
+ AVIFB_BUS(YVYU_1X16),
+ AVIFB_BUS(UYVY_2X8),
+ AVIFB_BUS(VYUY_2X8),
+ AVIFB_BUS(YUYV_2X8),
+ AVIFB_BUS(YVYU_2X8),
+ AVIFB_BUS(RGB888_3X8),
+ AVIFB_BUS(BGR888_3X8),
+ AVIFB_BUS(RGBX8888_4X8),
+ AVIFB_BUS(BGRX8888_4X8),
+ AVIFB_BUS(RGB565_2X8),
+ AVIFB_BUS(BGR565_2X8),
+ AVIFB_BUS(RGB555_2X8),
+ AVIFB_BUS(BGR555_2X8),
+ AVIFB_BUS(RGB444_2X8),
+ AVIFB_BUS(BGR444_2X8),
+};
+
+/**
+ * avifb_parse_options: parse the kernel command line for framebuffer-specific options.
+ */
+static int __devinit avifb_parse_options(struct avifb_data *data)
+{
+ const struct avi_videomode **vmodes = data->pdata->lcd_videomodes;
+ char *opts = NULL;
+ const char *vmode;
+ int pad;
+ const char *bus;
+ char *opt;
+ int i;
+
+ if (fb_get_options(dev_name(data->dev), &opts)) {
+ /* If fb_get_options returns 1 it means we were asked to turn
+ * off the screen (e.g. video=avifb:off) */
+ dev_info(data->dev, "canceling probe due to command line\n");
+ return -ENODEV;
+ }
+
+ if (opts == NULL)
+ return 0;
+
+ /* For all the parsing I assume that command line errors are not fatal:
+ * bogus entries are simply ignored (with a warning message) */
+ while ((opt = strsep(&opts, ","))) {
+ if (!opt)
+ continue;
+
+ if (!strcmp(opt, "yuv"))
+ data->output_cspace = AVI_BT709_CSPACE;
+ else if (!strcmp(opt, "rgb"))
+ data->output_cspace = AVI_RGB_CSPACE;
+ else if (!strcmp(opt, "bt656"))
+ data->lcd_interface.itu656 = 1;
+ else if (!strcmp(opt, "bt601"))
+ data->lcd_interface.itu656 = 0;
+ else if (!strncmp(opt, "pad=", strlen("pad="))) {
+ pad = *(opt + strlen("pad=")) - '0';
+
+ if (pad >= 0 || pad <= 2) {
+ data->lcd_interface.pad_select = pad;
+ } else
+ dev_warn(data->dev, "bogus pad value\n");
+ }
+ else if (!strncmp(opt, "ipc=", strlen("ipc=")))
+ data->lcd_interface.ipc = (*(opt + strlen("ipc=")) != '0');
+ else if (!strncmp(opt, "vmode=", strlen("vmode="))) {
+ vmode = opt + strlen("vmode=");
+
+ if (vmodes) {
+ for (i = 0; vmodes[i]; i++)
+ if (!strcmp(vmode, vmodes[i]->name))
+ break;
+
+ if (vmodes[i]) {
+ data->lcd_videomode = *vmodes[i];
+ data->lcd_default_videomode = *vmodes[i];
+ continue;
+ }
+ }
+
+ dev_warn(data->dev, "unknown vmode %s", opt);
+ }
+ else if (!strncmp(opt, "bus=", strlen("bus="))) {
+ bus = opt + strlen("bus=");
+
+ for (i = 0; i < ARRAY_SIZE(avifb_busnames); i++)
+ if (!strcmp(bus, avifb_busnames[i].name))
+ break;
+
+ if (i == ARRAY_SIZE(avifb_busnames))
+ dev_warn(data->dev, "unknown bus type %s", bus);
+ else
+ data->lcd_format_control
+ = avifb_busnames[i].value;
+ }
+ else
+ dev_warn(data->dev, "unknown option %s", opt);
+ }
+
+ return 0;
+}
+
+static int __devinit avifb_probe(struct platform_device *pdev)
+{
+ struct avifb_data *data;
+ struct avifb_platform_data *pdata;
+ unsigned i;
+ int ret;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ ret = -ENODEV;
+ goto nopdata;
+ }
+
+ /* Allocate avifb_data structure + enough memory to hold the pointers to
+ * the fb_info structs */
+ data = kzalloc(sizeof(*data) +
+ pdata->overlay_nr * sizeof(*(data->infos)), GFP_KERNEL);
+
+ if (!data) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+
+ data->dev = &pdev->dev;
+ data->pdata = pdata;
+
+ mutex_init(&data->lcd_lock);
+
+ /* Load the defaults, may be overriden by the command line */
+ data->lcd_format_control = pdata->lcd_format_control;
+ data->lcd_videomode = *pdata->lcd_default_videomode;
+ data->lcd_default_videomode = *pdata->lcd_default_videomode;
+ data->output_cspace = pdata->output_cspace;
+ data->lcd_interface = pdata->lcd_interface;
+ data->width = pdata->width;
+ data->height = pdata->height;
+ data->dpd = pdata->dpd;
+
+ if (data->output_cspace == AVI_NULL_CSPACE)
+ /* Default to RGB */
+ data->output_cspace = AVI_RGB_CSPACE;
+
+ ret = avifb_parse_options(data);
+ if (ret)
+ goto bad_option;
+
+ ret = avifb_setup_lcd_segment(data);
+ if (ret)
+ goto bad_lcd_segment;
+
+ data->pctl = pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(data->pctl)) {
+ ret = PTR_ERR(data->pctl);
+ goto nopin;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(avifb_lcd_sysfs_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &avifb_lcd_sysfs_attrs[i]);
+ if (ret) {
+ for (--i; i >= 0; i--)
+ device_remove_file(&pdev->dev,
+ &avifb_lcd_sysfs_attrs[i]);
+ goto sysfs_err;
+ }
+ }
+
+ /* Retrieve the struct sysfs_dirent of the vbl_count in order to call
+ * sysfs_notify_dirent from the irq handler */
+ data->vbl_dirent = sysfs_get_dirent(pdev->dev.kobj.sd, NULL, "vbl_count");
+ if (!data->vbl_dirent)
+ goto vbl_dirent_err;
+
+ ret = avifb_configure_lcd_segment(data);
+ if (ret)
+ goto lcd_config_failed;
+
+ for (i = 0; i < pdata->overlay_nr; i++)
+ if ((ret = avifb_overlay_init(data, i))) {
+ while (i--)
+ avifb_overlay_destroy(data, i);
+ goto overlay_err;
+ }
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ dev_info(&pdev->dev, "avifb initialized, screen configured\n");
+
+ return 0;
+
+overlay_err:
+ avi_lcd_pixclock_disable(data->lcdc);
+lcd_config_failed:
+ sysfs_put(data->vbl_dirent);
+vbl_dirent_err:
+ for (i = 0; i < ARRAY_SIZE(avifb_lcd_sysfs_attrs); i++)
+ device_remove_file(&pdev->dev, &avifb_lcd_sysfs_attrs[i]);
+sysfs_err:
+ pinctrl_put(data->pctl);
+nopin:
+ /* It's safe to call this function even if the pixclock's already
+ * disabled. */
+ ret = avi_lcd_pixclock_disable(data->lcdc);
+ avi_segment_teardown(data->display_segment);
+bad_lcd_segment:
+bad_option:
+ kfree(data);
+nomem:
+nopdata:
+ return ret;
+}
+
+static int __devexit avifb_remove(struct platform_device *pdev)
+{
+ const struct avifb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct avifb_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ sysfs_put(data->vbl_dirent);
+ for (i = 0; i < pdata->overlay_nr; i++)
+ avifb_overlay_destroy(data, i);
+ for (i = 0; i < ARRAY_SIZE(avifb_lcd_sysfs_attrs); i++)
+ device_remove_file(&pdev->dev, &avifb_lcd_sysfs_attrs[i]);
+ dev_set_drvdata(&pdev->dev, NULL);
+ avi_lcd_pixclock_disable(data->lcdc);
+ pinctrl_put(data->pctl);
+ avi_segment_teardown(data->display_segment);
+ kfree(data);
+
+ return 0;
+}
+
+static int avifb_suspend(struct device *dev)
+{
+ struct avifb_data *data = dev_get_drvdata(dev);
+ int i;
+
+ dev_info(dev, "framebuffer shutting down\n");
+
+ BUG_ON(data == NULL);
+
+ for (i = 0; i < data->pdata->overlay_nr; i++)
+ fb_set_suspend(data->infos[i], 1);
+
+ avi_lcd_pixclock_disable(data->lcdc);
+
+ return 0;
+}
+
+static int avifb_resume(struct device *dev)
+{
+ struct avifb_data *data = dev_get_drvdata(dev);
+ struct fb_info *info;
+ struct avifb_par *par;
+ int i;
+
+ dev_info(dev, "framebuffer resuming\n");
+
+ BUG_ON(data == NULL);
+
+ for (i = 0; i < data->pdata->overlay_nr; i++) {
+
+ info = data->infos[i];
+ par = info->par;
+
+ avifb_pan_display_nowait(&info->var, info);
+
+ avi_segment_activate(par->segment);
+
+ fb_set_suspend(info, 0);
+ }
+
+ avifb_set_gam_config(data);
+
+ avifb_configure_timings(data);
+ avi_lcd_pixclock_enable(data->lcdc);
+ avi_segment_enable_irq(data->display_segment);
+ avi_segment_activate(data->display_segment);
+
+ dev_info(dev, "framebuffer reconfigured\n");
+
+ return 0;
+}
+
+static struct dev_pm_ops avifb_dev_pm_ops = {
+ .suspend = &avifb_suspend,
+ .resume = &avifb_resume,
+};
+
+static struct platform_driver avifb_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &avifb_dev_pm_ops,
+ },
+ .probe = avifb_probe,
+ .remove = __devexit_p(avifb_remove),
+};
+
+static int __init avifb_init(void)
+{
+ return platform_driver_register(&avifb_driver);
+}
+module_init(avifb_init);
+
+static void __exit avifb_exit(void)
+{
+ platform_driver_unregister(&avifb_driver);
+}
+module_exit(avifb_exit);
+
+MODULE_AUTHOR("Lionel Flandrin <lionel.flandrin@parrot.com>");
+MODULE_DESCRIPTION("Framebuffer driver for Parrot Advanced Video Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parrot/video/avifb.h b/drivers/parrot/video/avifb.h
new file mode 100644
index 00000000000000..fd804960b0b3a7
--- /dev/null
+++ b/drivers/parrot/video/avifb.h
@@ -0,0 +1,67 @@
+#ifndef _AVIFB_H_
+#define _AVIFB_H_
+
+#include <linux/fb.h>
+#include <linux/ioctl.h>
+#include "avi_segment.h"
+
+struct avifb_layout {
+ unsigned x;
+ unsigned y;
+ unsigned width;
+ unsigned height;
+ int alpha;
+ int enabled;
+};
+
+struct avifb_overlay {
+ /* Default layout on startup */
+ struct avifb_layout layout;
+ struct resource dma_memory;
+ unsigned long caps;
+ int zorder;
+ unsigned color_depth;
+ /* If cacheable is != 0 the framebuffer will be mapped in device
+ * cacheable in both kernel and userland. */
+ int cacheable;
+};
+
+struct avifb_platform_data {
+ /* Output format control as defined in reg_avi.h */
+ unsigned lcd_format_control;
+ const struct avi_videomode *lcd_default_videomode;
+ const struct avi_videomode **lcd_videomodes;
+ union avi_lcd_interface lcd_interface;
+ unsigned long caps;
+ /* blender background colour. 24bit RGB no alpha */
+ u32 background;
+ /* Output colorspace conversion. Only possible if we have a chroma
+ * converter in our channel. Input colorspace is always
+ * AVI_RGB_CSPACE. */
+ enum avi_colorspace output_cspace;
+ /* Enable itu656 mode where video synchronisation codes are embedded in
+ * the data stream (no need for external HS/VS/DE */
+ struct avifb_overlay *overlays;
+ unsigned overlay_nr;
+ /* Default pixel data (color) for the LCD */
+ u32 dpd;
+
+ /* panel size in mm */
+ __u32 height;
+ __u32 width;
+};
+
+extern struct fb_videomode *avifb_select_mode(const char *id,
+ struct fb_monspecs *monspecs);
+
+extern int avifb_set_screen_size(const char* id, unsigned width, unsigned height);
+
+/** avifb_set_monspecs() - Set monitor specs for specific LCD
+ * @monspecs: monitor specs
+ *
+ * Copy all specs of fb_monspecs to an internal fb_monspecs except the
+ * videomode databse (modedb) which stay empty.
+ */
+extern void avifb_set_monspecs(const char *id, struct fb_monspecs *monspecs);
+
+#endif /* _AVIFB_H_ */
diff --git a/drivers/parrot/video/reg_avi.h b/drivers/parrot/video/reg_avi.h
new file mode 100644
index 00000000000000..5bcc08075f28b4
--- /dev/null
+++ b/drivers/parrot/video/reg_avi.h
@@ -0,0 +1,927 @@
+/*
+ * linux/drivers/parrot/video/reg_avi.h
+ *
+ * Copyright (C) 2011 Parrot S.A.
+ *
+ * @author Gregor Boirie <gregor.boirie@parrot.com>
+ * @date 01-Feb-2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _REG_AVI_H
+#define _REG_AVI_H
+
+#include <asm/memory.h>
+
+/***********************
+ * Configuration module
+ ***********************/
+#define AVI_CFG UL(0)
+
+#include "avi_regmap/avi_cfg.h"
+
+/**********************
+ * Interconnect module
+ **********************/
+#define AVI_INTER UL(0x100)
+
+#define AVI_INTER_SINKA UL(0x00)
+#define AVI_INTER_SINKB UL(0x04)
+#define AVI_INTER_SINKC UL(0x08)
+#define AVI_INTER_SINKD UL(0x0c)
+#define AVI_INTER_SINKE UL(0x10)
+#define AVI_INTER_SINKF UL(0x14)
+#define AVI_INTER_SINKG UL(0x18)
+#define AVI_INTER_SINGH UL(0x1C)
+/* Not present in MPW1 (GAM -> LCD connection is hardwired) */
+#define AVI_INTER_SINKI UL(0x20)
+#define AVI_INTER_SINKJ UL(0x24)
+#define AVI_INTER_SINKK UL(0x28)
+#define AVI_INTER_SINKL UL(0x2C)
+#define AVI_INTER_NSINK 12
+
+#define AVI_INTER_SRC_MASK UL(0x3F)
+
+/**************
+ * FIFO module
+ **************/
+#define AVI_FIFO UL(0x1000)
+#define AVI_FIFO0 AVI_FIFO
+#define AVI_FIFO1 (AVI_FIFO + UL(0x100))
+#define AVI_FIFO2 (AVI_FIFO + UL(0x200))
+#define AVI_FIFO3 (AVI_FIFO + UL(0x300))
+#define AVI_FIFO4 (AVI_FIFO + UL(0x400))
+#define AVI_FIFO5 (AVI_FIFO + UL(0x500))
+#define AVI_FIFO6 (AVI_FIFO + UL(0x600))
+#define AVI_FIFO7 (AVI_FIFO + UL(0x700))
+#define AVI_FIFO8 (AVI_FIFO + UL(0x800))
+#define AVI_FIFO9 (AVI_FIFO + UL(0x900))
+#define AVI_FIFO10 (AVI_FIFO + UL(0xa00))
+#define AVI_FIFO11 (AVI_FIFO + UL(0xb00))
+
+#define AVI_FIFO_CFG UL(0x00)
+#define AVI_FIFO_FRAMESIZE UL(0x04)
+#define AVI_FIFO_MONITOR UL(0x08)
+#define AVI_FIFO_TIMEOUT UL(0x0C)
+#define AVI_FIFO_SWAP0 UL(0x10)
+#define AVI_FIFO_SWAP1 UL(0x14)
+#define AVI_FIFO_DMASA0 UL(0x20)
+#define AVI_FIFO_DMASA1 UL(0x24)
+#define AVI_FIFO_DMALSTR0 UL(0x28)
+#define AVI_FIFO_DMALSTR1 UL(0x2C)
+#define AVI_FIFO_DMAFXYCFG UL(0x30)
+#define AVI_FIFO_DMAFXSTR0 UL(0x34)
+#define AVI_FIFO_DMAFXSTR1 UL(0x38)
+#define AVI_FIFO_DMAFYSTR0 UL(0x3C)
+#define AVI_FIFO_DMAFYSTR1 UL(0x40)
+#define AVI_FIFO_KEYINGMIN UL(0x50)
+#define AVI_FIFO_KEYINGMAX UL(0x54)
+#define AVI_FIFO_KEYINGALPHA UL(0x58)
+#define AVI_FIFO_VLIN UL(0x60)
+#define AVI_FIFO_STATUS UL(0x64)
+#define AVI_FIFO_REG_COUNT UL(0x68/4)
+
+/*
+ * FIFO configuration register bitfields
+ */
+#define AVI_FIFO_CFG_VL_TYPE 0 /* Video link to/from other AVI module. */
+#define AVI_FIFO_CFG_DMA_TYPE 1 /* DMA to/from memory */
+
+#define AVI_FIFO_CFG_PACKED_ORG 0 /* Packed frame data organization. */
+#define AVI_FIFO_CFG_PLANAR_ORG 1 /* Semi-planar frame data organization. */
+
+#define AVI_FIFO_CFG_444_FMT 0
+#define AVI_FIFO_CFG_422_FMT 1
+#define AVI_FIFO_CFG_420_FMT 3
+
+#define AVI_FIFO_CFG_4444_BENC 0
+#define AVI_FIFO_CFG_565_BENC 1
+#define AVI_FIFO_CFG_LSB555_BENC 2
+#define AVI_FIFO_CFG_1555_BENC 3
+#define AVI_FIFO_CFG_888_BENC 4
+#define AVI_FIFO_CFG_8888_BENC 8
+
+#define AVI_FIFO_CFG_ARGB_REORG 0
+#define AVI_FIFO_CFG_RGB_REORG 0
+#define AVI_FIFO_CFG_YUV_REORG 0
+#define AVI_FIFO_CFG_ABGR_REORG 1
+#define AVI_FIFO_CFG_BGR_REORG 1
+#define AVI_FIFO_CFG_VUY_REORG 1
+#define AVI_FIFO_CFG_RGBA_REORG 2
+#define AVI_FIFO_CFG_GRB_REORG 2
+#define AVI_FIFO_CFG_UYV_REORG 2
+#define AVI_FIFO_CFG_BGRA_REORG 3
+#define AVI_FIFO_CFG_BRG_REORG 3
+#define AVI_FIFO_CFG_VYU_REORG 3
+
+#define AVI_FIFO_CFG_TRUNC_RND 0
+#define AVI_FIFO_CFG_CONV_RND 1
+#define AVI_FIFO_CFG_DYNTRUNC_RND 2
+#define AVI_FIFO_CFG_DYNCONV_RND 3
+
+#define AVI_FIFO_CFG_TRUNC_RND 0
+#define AVI_FIFO_CFG_CONV_RND 1
+#define AVI_FIFO_CFG_DYNTRUNC_RND 2
+#define AVI_FIFO_CFG_DYNCONV_RND 3
+
+#define AVI_FIFO_CFG_ENABLE_CUS 0 /* Enable chroma up-sampling. */
+#define AVI_FIFO_CFG_DISABLE_CUS 1 /* Disable chroma up-sampling. */
+
+#define AVI_FIFO_CFG_ENABLE_CDS 0 /* Enable chroma down-sampling. */
+#define AVI_FIFO_CFG_DISABLE_CDS 1 /* Disable chroma down-sampling. */
+
+#define AVI_FIFO_CFG_0_PHASE 0 /* O° chroma up-sampling phase. */
+#define AVI_FIFO_CFG_180_PHASE 0 /* 180° chroma up-sampling phase. */
+
+#define AVI_FIFO_CFG_Y_SCALPLANE 0
+#define AVI_FIFO_CFG_UV_SCALPLANE 1
+
+#define AVI_FIFO_CFG_BURST16 0 /* 16 words DMA bursts */
+#define AVI_FIFO_CFG_BURST32 1 /* 32 words DMA bursts */
+
+/**************************
+ * Scaler / Rotator module
+ **************************/
+
+#define AVI_SCALROT UL(0x7000)
+
+/* Offsets 0x0 -> 0x100 of the ScalRot block are used for
+ * reserving memory slots for a specific SCAL/ROT module
+ * instead of dynamic allocation */
+#define AVI_ISP_SCAL_ROT_SCAL0_RESERVE (AVI_SCALROT + UL(0x00))
+#define AVI_ISP_SCAL_ROT_ROT0_RESERVE (AVI_SCALROT + UL(0x04))
+#define AVI_ISP_SCAL_ROT_SCAL1_RESERVE (AVI_SCALROT + UL(0x08))
+#define AVI_ISP_SCAL_ROT_ROT1_RESERVE (AVI_SCALROT + UL(0x0C))
+
+#define AVI_SCALROT0 (AVI_SCALROT + UL(0x100))
+#define AVI_SCALROT1 (AVI_SCALROT + UL(0x300))
+
+/*********************
+ * Scaler sub-modules
+ *********************/
+
+#define AVI_SCAL0 (AVI_SCALROT + UL(0x100)) /* Scaler 0 */
+#define AVI_SCAL1 (AVI_SCALROT + UL(0x300)) /* Scaler 1 */
+
+/*
+ * Per-scaler register definitions
+ */
+#define AVI_SCAL_CONF UL(0x00)
+#define AVI_SCAL_CONF_OUTSRC_SHIFT 0
+#define AVI_SCAL_CONF_HORIZSRC_SHIFT 2
+#define AVI_SCAL_CONF_VERTSRC_SHIFT 4
+#define AVI_SCAL_NULL_SRC 0
+#define AVI_SCAL_IN_SRC 1
+#define AVI_SCAL_HORIZ_SRC 2
+#define AVI_SCAL_VERT_SRC 3
+#define AVI_SCAL_CONF_PLANAR_SHIFT 6
+#define AVI_SCAL_CONF_OUTFMT_SHIFT 7
+
+#define AVI_SCAL_SIZEOUT UL(0x04)
+#define AVI_SCAL_WIDTH_SHIFT 0
+#define AVI_SCAL_HEIGHT_SHIFT 16
+
+#define AVI_SCAL_PLANE0 UL(0x10)
+#define AVI_SCAL_PLANE1 UL(0x80)
+
+/*
+ * Per-plane register definitions (i.e., add plane base adress to offsets below
+ * when accessing registers).
+ */
+#define AVI_SCAL_NTAPS_NPHASES UL(0x00)
+#define AVI_SCAL_NTAPSX_SHIFT 0
+#define AVI_SCAL_NTAPSY_SHIFT 2
+#define AVI_SCAL_NPHASESX_SHIFT 4
+#define AVI_SCAL_NPHASESY_SHIFT 6
+
+#define AVI_SCAL_NTAPS_2 0
+#define AVI_SCAL_NTAPS_4 1
+#define AVI_SCAL_NTAPS_8 2
+#define AVI_SCAL_NTAPS_16 3
+
+#define AVI_SCAL_NPHASES_2 0
+#define AVI_SCAL_NPHASES_4 1
+#define AVI_SCAL_NPHASES_8 2
+#define AVI_SCAL_NPHASES_16 3
+
+#define AVI_SCAL_HORIZ_DIM UL(0x10) /* Horizontal dimension */
+#define AVI_SCAL_VERT_DIM UL(0x40) /* Vertical dimension */
+
+/*
+ * Per-dimension coefficients register definitions (i.e., add dimension base
+ * address to offsets below when accessing registers.
+ */
+#define AVI_SCAL_OFFSET UL(0x00)
+#define AVI_SCAL_INCR UL(0x04)
+#define AVI_SCAL_COEFF UL(0x08) /* Coefficients base address */
+ /* within dimension */
+#define AVI_SCAL_COEFF0300 (AVI_SCAL_COEFF + UL(0x00))
+#define AVI_SCAL_COEFF0704 (AVI_SCAL_COEFF + UL(0x04))
+#define AVI_SCAL_COEFF1108 (AVI_SCAL_COEFF + UL(0x08))
+#define AVI_SCAL_COEFF1512 (AVI_SCAL_COEFF + UL(0x0c))
+#define AVI_SCAL_COEFF1916 (AVI_SCAL_COEFF + UL(0x10))
+#define AVI_SCAL_COEFF2320 (AVI_SCAL_COEFF + UL(0x14))
+#define AVI_SCAL_COEFF2724 (AVI_SCAL_COEFF + UL(0x18))
+#define AVI_SCAL_COEFF3128 (AVI_SCAL_COEFF + UL(0x1c))
+
+/* There's a 64KB RAM shared by both scalers, when only one is in use it can
+ * take the entire memory. To simplify things a bit I just assume both scalers
+ * are always in use and can only take 32K of RAM. */
+#define AVI_SCAL_RAMSZ (32 * SZ_1K)
+
+/**********************
+ * Rotator sub-modules
+ **********************/
+
+#define AVI_ROT0 (AVI_SCALROT + UL(0x200))
+#define AVI_ROT1 (AVI_SCALROT + UL(0x400))
+
+#define AVI_ROT_ANGLE UL(0x00)
+
+#define AVI_ROT_ANGLE_0 0
+#define AVI_ROT_ANGLE_90 1
+#define AVI_ROT_ANGLE_270 2
+#define AVI_ROT_ANGLE_180 3
+#define AVI_ROT_ANGLE_MASK 0x3
+#define AVI_ROT_HORIZ_FLIP (1 << 2)
+
+#define AVI_ROT_SIZE UL(0x04)
+#define AVI_ROT_HEIGHTOUT_SHIFT 16
+#define AVI_ROT_HEIGHTOUT_MASK (0xFFFF)
+#define AVI_ROT_NSTRIPE_SHIFT 0
+#define AVI_ROT_NSTRIPE_MASK (0xFFFF)
+
+/**************************
+ * Chroma converter module
+ **************************/
+#define AVI_CONV UL(0x3000)
+#define AVI_CONV0 AVI_CONV
+#define AVI_CONV1 (AVI_CONV + UL(0x100))
+#define AVI_CONV2 (AVI_CONV + UL(0x200))
+#define AVI_CONV3 (AVI_CONV + UL(0x300))
+
+#include "avi_regmap/avi_isp_chroma.h"
+
+/*****************
+ * Blender module
+ *****************/
+#define AVI_BLEND UL(0x4000)
+#define AVI_BLEND0 AVI_BLEND
+#define AVI_BLEND1 (AVI_BLEND + UL(0x100))
+
+#define AVI_BLEND_BACKGROUND UL(0x00)
+#define AVI_BLEND_FRAMEOUTSIZE UL(0x04)
+#define AVI_BLEND_OFFSET0 UL(0x08)
+#define AVI_BLEND_ALPHA0 UL(0x0C)
+#define AVI_BLEND_OFFSET1 UL(0x10)
+#define AVI_BLEND_ALPHA1 UL(0x14)
+#define AVI_BLEND_OFFSET2 UL(0x18)
+#define AVI_BLEND_ALPHA2 UL(0x1C)
+#define AVI_BLEND_OFFSET3 UL(0x20)
+#define AVI_BLEND_ALPHA3 UL(0x24)
+
+#define AVI_BLEND_NINPUTS 4
+
+/************************
+ * LCD controller module
+ ************************/
+#define AVI_LCD UL(0x5000)
+#define AVI_LCD0 AVI_LCD
+#define AVI_LCD1 (AVI_LCD + UL(0x100))
+
+enum avi_pad_select {
+ AVI_PAD_0 = 0,
+ AVI_PAD_8 = 1,
+ AVI_PAD_16 = 2,
+};
+
+#define AVI_LCD_FORCE_CLEAR_NORMAL 0 /* Normal clear behaviour */
+#define AVI_LCD_FORCE_CLEAR_INACTIVE 1 /* Force clear low */
+#define AVI_LCD_FORCE_CLEAR_ACTIVE 2 /* Force clear high */
+
+#include "avi_regmap/avi_lcd.h"
+
+/**************************
+ * Camera / Capture module
+ **************************/
+#define AVI_CAM UL(0x6000)
+#define AVI_CAM0 AVI_CAM
+#define AVI_CAM1 (AVI_CAM + UL(0x100))
+#define AVI_CAM2 (AVI_CAM + UL(0x200))
+#define AVI_CAM3 (AVI_CAM + UL(0x300))
+#define AVI_CAM4 (AVI_CAM + UL(0x400))
+#define AVI_CAM5 (AVI_CAM + UL(0x500))
+#define AVI_CAM6 (AVI_CAM + UL(0x600))
+#define AVI_CAM7 (AVI_CAM + UL(0x700))
+
+#define AVI_CAM_STATUS UL(0x00)
+#define AVI_CAM_ITSOURCE UL(0x04)
+#define AVI_CAM_INTERFACE UL(0x08)
+#define AVI_CAM_RUN UL(0x0C)
+#define AVI_CAM_H_TIMING UL(0x10)
+#define AVI_CAM_V_TIMING UL(0x20)
+#define AVI_CAM_MESURE_H_TIMING0 UL(0x30)
+#define AVI_CAM_MESURE_H_TIMING1 UL(0x34)
+#define AVI_CAM_MESURE_V_TIMING0 UL(0x40)
+#define AVI_CAM_MESURE_V_TIMING1 UL(0x44)
+#define AVI_CAM_MESURE_V_TIMING2 UL(0x48)
+#define AVI_CAM_MESURE_V_TIMING3 UL(0x4C)
+
+#define AVI_CAP_RAW10_NONE (0)
+#define AVI_CAP_RAW10_1X10 (1)
+#define AVI_CAP_RAW10_3X10 (2)
+
+#define AVI_CAP_RAW8_NONE (0)
+#define AVI_CAP_RAW8_1X8 (1)
+#define AVI_CAP_RAW8_2X8 (2)
+#define AVI_CAP_RAW8_3X8 (3)
+
+/******************************************************
+ * Format control definitions. Used by LCDC and CAMIF.
+ ******************************************************/
+#define AVI_FORMAT_CONTROL_RGB888_1X24 ((0 << 2) | 0)
+#define AVI_FORMAT_CONTROL_YUV888_1X24 ((0 << 2) | 0)
+#define AVI_FORMAT_CONTROL_YUYV_1X16 ((1 << 2) | 0)
+#define AVI_FORMAT_CONTROL_YVYU_1X16 ((1 << 2) | 1)
+#define AVI_FORMAT_CONTROL_UYVY_2X8 ((2 << 2) | 0)
+#define AVI_FORMAT_CONTROL_VYUY_2X8 ((2 << 2) | 1)
+#define AVI_FORMAT_CONTROL_YUYV_2X8 ((2 << 2) | 2)
+#define AVI_FORMAT_CONTROL_YVYU_2X8 ((2 << 2) | 3)
+#define AVI_FORMAT_CONTROL_RGB888_3X8 ((3 << 2) | 0)
+#define AVI_FORMAT_CONTROL_BGR888_3X8 ((3 << 2) | 1)
+/* X is a dummy byte put at the end of the pixel to pad it to 32 bits */
+#define AVI_FORMAT_CONTROL_RGBX8888_4X8 ((4 << 2) | 0)
+#define AVI_FORMAT_CONTROL_BGRX8888_4X8 ((4 << 2) | 1)
+#define AVI_FORMAT_CONTROL_RGB565_2X8 ((5 << 2) | 0)
+#define AVI_FORMAT_CONTROL_BGR565_2X8 ((5 << 2) | 1)
+/* The padding bit is the MSB of the first byte. So those two formats look like
+ * [XRRRRRGG|GGGBBBBB] and [XBBBBBGG|GGGRRRRR] respectively */
+#define AVI_FORMAT_CONTROL_RGB555_2X8 ((6 << 2) | 0)
+#define AVI_FORMAT_CONTROL_BGR555_2X8 ((6 << 2) | 1)
+/* The padding nibble are the MSB of the first byte. So those two formats look
+ * like [XXXXRRRR|GGGGBBBB] and [XXXXBBBB|GGGGRRRR] respectively */
+#define AVI_FORMAT_CONTROL_RGB444_2X8 ((7 << 2) | 0)
+#define AVI_FORMAT_CONTROL_BGR444_2X8 ((7 << 2) | 1)
+
+/*************************
+ * Gamma corrector module
+ *************************/
+#define AVI_GAM UL(0x8000)
+#define AVI_GAM0 AVI_GAM
+#define AVI_GAM1 (AVI_GAM + 0x4000)
+
+#include "avi_regmap/avi_isp_gamma_corrector.h"
+
+/**
+ * Make sure the C structure representation is coherent with the AVI registers
+ */
+#define AVI_ASSERT_REGISTERS_SIZE(_struct, _nregs) \
+ static inline void __avi_check_##_struct##_size(void) \
+ { \
+ BUILD_BUG_ON(sizeof(struct _struct) != (_nregs) * sizeof(u32)); \
+ }
+
+union avi_interconnect_registers
+{
+ union
+ {
+ struct
+ {
+ u8 fifo00_src;
+ u8 fifo01_src;
+ u8 fifo02_src;
+ u8 fifo03_src;
+ };
+ u32 srcsel0;
+ };
+ union
+ {
+ struct
+ {
+ u8 fifo04_src;
+ u8 fifo05_src;
+ u8 fifo06_src;
+ u8 fifo07_src;
+ };
+ u32 srcsel1;
+ };
+ union
+ {
+ struct
+ {
+ u8 fifo08_src;
+ u8 fifo09_src;
+ u8 fifo10_src;
+ u8 fifo11_src;
+ };
+ u32 srcsel2;
+ };
+ union
+ {
+ struct
+ {
+ u8 conv0_src;
+ u8 conv1_src;
+ u8 conv2_src;
+ u8 conv3_src;
+ };
+ u32 srcsel3;
+ };
+ union
+ {
+ struct
+ {
+ u8 blend0_src0;
+ u8 blend0_src1;
+ u8 blend0_src2;
+ u8 blend0_src3;
+ };
+ u32 srcsel4;
+ };
+ union
+ {
+ struct
+ {
+ u8 blend1_src0;
+ u8 blend1_src1;
+ u8 blend1_src2;
+ u8 blend1_src3;
+ };
+ u32 srcsel5;
+ };
+ union
+ {
+ struct
+ {
+ u8 gam0_src;
+ u8 gam1_src;
+ u8 scal0_src0;
+ u8 scal0_src1;
+ };
+ u32 srcsel6;
+ };
+ union
+ {
+ struct
+ {
+ u8 rot0_src;
+ u8 scal1_src0;
+ u8 scal1_src1;
+ u8 rot1_src;
+ };
+ u32 srcsel7;
+ };
+ u32 srcsel[8];
+};
+
+/**
+ * AVI_FIFO_CFG
+ */
+union avi_fifo_cfg
+{
+ struct
+ {
+ unsigned srctype : 1;
+ unsigned srcorg : 1;
+ unsigned srcformat : 2;
+ unsigned srcbitenc : 4;
+ unsigned dsttype : 1;
+ unsigned dstorg : 1;
+ unsigned dstformat : 2;
+ unsigned dstbitenc : 4;
+ unsigned reorg : 2;
+ unsigned dithering : 2;
+ unsigned cdsdis : 1;
+ unsigned cusdis : 1;
+ unsigned phase : 1;
+ unsigned scalplane : 1;
+ unsigned single : 1;/* P7 Release >=2 */
+ unsigned keying : 1;
+ unsigned itsource : 2;
+ unsigned itline : 4;
+ };
+ u32 _register;
+};
+
+/**
+ * AVI_FIFO_FRAMESIZE
+ */
+union avi_fifo_framesize
+{
+ struct
+ {
+ u16 width;
+ u16 height;
+ };
+ u32 _register;
+};
+
+/**
+ * AVI_FIFO_MONITOR
+ */
+union avi_fifo_monitor
+{
+ struct
+ {
+ u16 lvlstarting;
+ u16 lvlwarning;
+ };
+ u32 _register;
+};
+
+/**
+ * AVI_FIFO_TIMEOUT
+ */
+union avi_fifo_timeout
+{
+ struct
+ {
+ u8 vblank;
+ u8 hblank;
+ u8 pincr;
+ unsigned issuing_capability : 3;
+ unsigned burst : 2;
+ unsigned /* unused */ : 3;
+ };
+ u32 _register;
+};
+
+/**
+ * AVI_FIFO_SWAP{0,1}
+ */
+union avi_fifo_swap
+{
+ struct
+ {
+ unsigned sel0 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel1 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel2 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel3 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel4 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel5 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel6 : 3;
+ unsigned /* undef */ : 1;
+ unsigned sel7 : 3;
+ unsigned /* undef */ : 1;
+ };
+ u32 _register;
+};
+
+/**
+ * This macro can be used to set avi_fifo_swap._register when doing colour
+ * swapping. Output format is ARGB, so if you want to convert RGBA you can do:
+ * AVI_FIFO_SWAP(3, 0, 1, 2) (alpha is the 4th byte, red the first etc...)
+ */
+#define AVI_FIFO_MAKE_SWAP16(_a, _b, _c, _d) (_a << 12 | _b << 8 | _c << 4 | _d)
+
+#define AVI_FIFO_MAKE_SWAP(_a, _b, _c, _d) \
+ (((AVI_FIFO_MAKE_SWAP16(_a, _b, _c, _d) | 0x4444) << 16) | \
+ AVI_FIFO_MAKE_SWAP16(_a, _b, _c, _d))
+
+/* This configures a NO-OP swap */
+#define AVI_FIFO_NULL_SWAP AVI_FIFO_MAKE_SWAP(3, 2, 1, 0)
+
+/**
+ * AVI FIFO DMA FRAME XY configuration
+ */
+union avi_fifo_dmafxycfg
+{
+ struct
+ {
+ unsigned dmafxnb : 12;
+ unsigned /* unused */ : 4;
+ unsigned dmafynb : 12;
+ unsigned /* unused */ : 3;
+ unsigned dmafxymode : 1;
+ };
+ u32 _register;
+};
+
+union avi_fifo_keying
+{
+ struct
+ {
+ u8 bv;
+ u8 gu;
+ u8 ry;
+ };
+ u32 _register;
+};
+
+/**
+ * AVI FIFO Video Link In control
+ * Available in P7 Release >=3
+ */
+union avi_fifo_vlin
+{
+ struct
+ {
+ unsigned force_clear : 2;
+ unsigned /* unused */ : 30;
+ };
+ u32 _register;
+};
+/*
+ * force_clear field
+ * possible values
+ */
+enum avi_fifo_vlin_force_clear {
+ /* no force clear signal */
+ AVI_FIFO_VLIN_FORCE_CLEAR_NORMAL=0,
+ /* force clear signal to inactive level */
+ AVI_FIFO_VLIN_FORCE_CLEAR_INACTIVE=1,
+ /* force clear signal to active level */
+ AVI_FIFO_VLIN_FORCE_CLEAR_ACTIVE=2
+};
+
+/* Available in P7 Release >=3 */
+union avi_fifo_status
+{
+ struct
+ {
+ unsigned it_all:1;
+ unsigned it_nl0:1;
+ unsigned it_nl1:1;
+ unsigned :13;
+ unsigned it_monitor0:1;
+ unsigned it_monitor1:1;
+ unsigned :6;
+ unsigned running:1;
+ unsigned :7;
+ };
+ u32 _register;
+};
+
+struct avi_fifo_registers
+{
+ union avi_fifo_cfg cfg;
+ union avi_fifo_framesize framesize;
+ union avi_fifo_monitor monitor;
+ union avi_fifo_timeout timeout;
+ union avi_fifo_swap swap0;
+ union avi_fifo_swap swap1;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ u32 dmasa0;
+ u32 dmasa1;
+ s32 dmalstr0;
+ s32 dmalstr1;
+ union avi_fifo_dmafxycfg dmafxycfg;
+ s32 dmafxstr0;
+ s32 dmafxstr1;
+ s32 dmafystr0;
+ s32 dmafystr1;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ union avi_fifo_keying keyingmin;
+ union avi_fifo_keying keyingmax;
+ unsigned keyingalpha : 8;
+ unsigned /* unused */ : 24;
+ unsigned /* unused */ : 32;
+ union avi_fifo_vlin vlin; /* Available in P7 Release >=3 */
+ union avi_fifo_status status; /* Available in P7 Release >=3 */
+};
+AVI_ASSERT_REGISTERS_SIZE(avi_fifo_registers, AVI_FIFO_REG_COUNT);
+
+union avi_scal_coeff {
+ struct {
+ unsigned coeff00:8;
+ unsigned coeff01:8;
+ unsigned coeff02:8;
+ unsigned coeff03:8;
+
+ unsigned coeff04:8;
+ unsigned coeff05:8;
+ unsigned coeff06:8;
+ unsigned coeff07:8;
+
+ unsigned coeff08:8;
+ unsigned coeff09:8;
+ unsigned coeff10:8;
+ unsigned coeff11:8;
+
+ unsigned coeff12:8;
+ unsigned coeff13:8;
+ unsigned coeff14:8;
+ unsigned coeff15:8;
+
+ unsigned coeff16:8;
+ unsigned coeff17:8;
+ unsigned coeff18:8;
+ unsigned coeff19:8;
+
+ unsigned coeff20:8;
+ unsigned coeff21:8;
+ unsigned coeff22:8;
+ unsigned coeff23:8;
+
+ unsigned coeff24:8;
+ unsigned coeff25:8;
+ unsigned coeff26:8;
+ unsigned coeff27:8;
+
+ unsigned coeff28:8;
+ unsigned coeff29:8;
+ unsigned coeff30:8;
+ unsigned coeff31:8;
+ };
+ struct {
+ u32 coeff0300;
+ u32 coeff0704;
+ u32 coeff1108;
+ u32 coeff1512;
+ u32 coeff1916;
+ u32 coeff2320;
+ u32 coeff2724;
+ u32 coeff3128;
+ } words;
+ u8 bytes[32];
+};
+
+union avi_cam_status
+{
+ struct
+ {
+ unsigned done : 1;
+ unsigned fof : 1;
+ unsigned field : 1;
+ };
+ u32 _register;
+};
+
+union avi_cam_itsource
+{
+ struct
+ {
+ unsigned done_en : 1;
+ unsigned fof_en : 1;
+ };
+ u32 _register;
+};
+
+union avi_cam_interface
+{
+ struct
+ {
+ unsigned ivs : 1;
+ unsigned ihs : 1;
+ unsigned ipc : 1;
+ unsigned ioe : 1;
+ unsigned psync_rf : 1;
+ unsigned psync_en : 1;
+ unsigned itu656 : 1;
+ unsigned timing_auto : 1;
+ unsigned format_control : 5;
+ unsigned pad_select : 2;
+ unsigned unpacker : 2;
+ unsigned raw10 : 2;
+ unsigned ror_lsb : 1;
+ };
+ u32 _register;
+};
+
+union avi_cam_run
+{
+ struct
+ {
+ unsigned run : 1;
+ unsigned free_run : 1;
+ };
+ u32 _register;
+};
+
+union avi_cam_h_timing
+{
+ struct
+ {
+ u16 hactive_on;
+ u16 hactive_off;
+ };
+ u32 _register;
+};
+
+union avi_cam_v_timing
+{
+ struct
+ {
+ u16 vactive_on;
+ u16 vactive_off;
+ };
+ u32 _register;
+};
+
+struct avi_cam_registers
+{
+ union avi_cam_status status;
+ union avi_cam_itsource itsource;
+ union avi_cam_interface interface;
+ union avi_cam_run run;
+ union avi_cam_h_timing h_timing;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ union avi_cam_v_timing v_timing;
+};
+AVI_ASSERT_REGISTERS_SIZE(avi_cam_registers, 0x24 / 4)
+
+/* Those are the read-only timing registers of the cam interface */
+struct avi_cam_measure_regs
+{
+ /* MESURE_H_TIMING0 */
+ u16 hsync_off;
+ u16 hactive_on;
+ /* MESURE_H_TIMING1 */
+ u16 hactive_off;
+ u16 htotal;
+ unsigned /* unused */ : 32;
+ unsigned /* unused */ : 32;
+ /* MESURE_V_TIMING0 */
+ u16 vsync_hon;
+ u16 vsync_von;
+ /* MESURE_V_TIMING1 */
+ u16 vsync_hoff;
+ u16 vsync_voff;
+ /* MESURE_V_TIMING2 */
+ u16 vactive_on;
+ u16 vactive_off;
+ /* MESURE_V_TIMING3 */
+ u16 vtotal;
+ unsigned /* unused */ : 16;
+};
+AVI_ASSERT_REGISTERS_SIZE(avi_cam_measure_regs, (0x50 - 0x30) / 4)
+
+/**************
+ * ISP modules
+ **************/
+/* ISP Chain Bayer */
+#define AVI_ISP_CHAIN_BAYER UL(0x20000)
+#define AVI_ISP_CHAIN_BAYER0 AVI_ISP_CHAIN_BAYER
+#define AVI_ISP_CHAIN_BAYER1 (AVI_ISP_CHAIN_BAYER + UL(0x10000))
+
+#define AVI_ISP_CHAIN_BAYER_INTER UL(0x0000)
+#define AVI_ISP_VL_FORMAT_32_TO_40 UL(0x1000)
+#define AVI_ISP_PEDESTAL UL(0x2000)
+#define AVI_ISP_GREEN_IMBALANCE UL(0x4000)
+#define AVI_ISP_DEAD_PIXEL_CORRECTION UL(0x6000)
+#define AVI_ISP_DENOISING UL(0x7000)
+#define AVI_ISP_STATS_BAYER UL(0x8000)
+#define AVI_ISP_LENS_SHADING_CORRECTION UL(0xA000)
+#define AVI_ISP_CHROMATIC_ABERRATION UL(0xC000)
+#define AVI_ISP_BAYER UL(0xD000)
+#define AVI_ISP_COLOR_CORRECTION UL(0xE000)
+#define AVI_ISP_VL_FORMAT_40_T0_32 UL(0xF000)
+
+#include "avi_regmap/avi_isp_chain_bayer_inter.h"
+#include "avi_regmap/avi_isp_vlformat_32to40.h"
+#include "avi_regmap/avi_isp_pedestal.h"
+#include "avi_regmap/avi_isp_green_imbalance.h"
+#include "avi_regmap/avi_isp_dead_pixel_correction.h"
+#include "avi_regmap/avi_isp_denoising.h"
+#include "avi_regmap/avi_isp_statistics_bayer.h"
+#include "avi_regmap/avi_isp_lens_shading_correction.h"
+#include "avi_regmap/avi_isp_chromatic_aberration.h"
+#include "avi_regmap/avi_isp_bayer.h"
+#include "avi_regmap/avi_isp_color_correction.h"
+#include "avi_regmap/avi_isp_vlformat_40to32.h"
+
+#define AVI_STATS_YUV UL(0x12000)
+#define AVI_STATS_YUV0 AVI_STATS_YUV
+#define AVI_STATS_YUV1 (AVI_STATS_YUV + UL(0x1000))
+
+/* ISP Chain YUV */
+#define AVI_ISP_CHAIN_YUV UL(0x40000)
+#define AVI_ISP_CHAIN_YUV0 AVI_ISP_CHAIN_YUV
+#define AVI_ISP_CHAIN_YUV1 (AVI_ISP_CHAIN_YUV + UL(0x10000))
+
+#define AVI_ISP_CHAIN_YUV_INTER UL(0x0000)
+#define AVI_ISP_EDGE_ENHANCEMENT UL(0x1000)
+#define AVI_ISP_I3D_LUT UL(0x2000)
+#define AVI_ISP_DROP UL(0x3000)
+
+#include "avi_regmap/avi_isp_chain_yuv_inter.h"
+#include "avi_regmap/avi_isp_statistics_yuv.h"
+#include "avi_regmap/avi_isp_edge_enhancement_color_reduction_filter.h"
+#include "avi_regmap/avi_isp_i3d_lut.h"
+#include "avi_regmap/avi_isp_drop.h"
+
+#endif /* _REG_AVI_H */
diff --git a/drivers/parrot/video/scaler-fir-coeffs.pl b/drivers/parrot/video/scaler-fir-coeffs.pl
new file mode 100755
index 00000000000000..c3e66d7a77c0e9
--- /dev/null
+++ b/drivers/parrot/video/scaler-fir-coeffs.pl
@@ -0,0 +1,245 @@
+#! /usr/bin/env perl
+#
+# Script used to generate the FIR coeffs for the AVI scaler using lanczos.
+#
+# Author: Lionel Flandrin <lionel.flandrin@parrot.com>
+# Date: 19-Jun-2013
+
+use strict;
+use warnings;
+
+use Math::Trig;
+
+sub gcd
+{
+ my ($a, $b) = @_;
+
+ while ($a > 0) {
+ my $r = $b % $a;
+ $b = $a;
+ $a = $r;
+ }
+
+ return $b;
+}
+
+sub make_coprime
+{
+ my ($a, $b) = @_;
+
+ my $d = gcd($$a, $$b);
+
+ $$a /= $d;
+ $$b /= $d;
+}
+
+sub round_power2
+{
+ # Assumes $v is a 32bit integer
+ my ($v) = @_;
+
+ $v--;
+ $v |= $v >> $_ foreach (1,2,4,8,16);
+
+ return $v + 1;
+}
+
+sub sinc
+{
+ my ($x) = @_;
+
+ return 1
+ if ($x == 0);
+
+ my $xpi = pi * $x;
+
+ return sin($xpi) / $xpi;
+}
+
+sub lanczos
+{
+ # $n is the number of lobes (3 for lanczos 3).
+ my ($n, $x) = @_;
+
+ $x = abs($x);
+
+ # The function is 0 when we're outside of the lanczos window
+ return 0
+ if ($x > $n);
+
+ return sinc($x) * sinc($x / $n);
+}
+
+if (@ARGV < 2) {
+ print "usage: $0 insize outsize [nlobes [blur]]\n";
+ exit 1;
+}
+
+my ($in, $out, $nlobe, $blur) = @ARGV;
+
+# Default to 2 lobes (lanczos2)
+$nlobe = 2 if (!$nlobe);
+$blur = 1.0 if (!$blur);
+
+make_coprime(\$in, \$out);
+
+# Reduction factor is 1.0 in case of upscale
+my $reduction_factor = ($out < $in) ? (($in * 1.0) / $out) : 1.0;
+
+$reduction_factor *= $blur;
+
+my $ntap = int(2 * $nlobe * $reduction_factor + 0.5);
+
+# Hardware can't handle more than 16 taps
+if ($ntap > 16) {
+ print "#warning Dowscale factor $reduction_factor is too large!\n";
+ print "#warning We would need $ntap taps for good results\n";
+ $ntap = 16;
+}
+
+# ntap has to be a power of two
+$ntap = round_power2($ntap);
+
+my $nphase = 32 / $ntap;
+
+my $tapw = $ntap / 2;
+my $pincr = 1.0 / $nphase;
+
+my @tapcoeffs;
+
+for my $tap ((-$tapw + 1)..$tapw) {
+ my @coeffs;
+ for my $phase (0..($nphase - 1)) {
+ my $x = $tap - $pincr * $phase;
+ # We need to translate $x in the initial image coordinates
+ $x /= $reduction_factor;
+ push(@coeffs, lanczos($nlobe, $x));
+ }
+ push @tapcoeffs, \@coeffs;
+}
+
+# We now normalize each phase to sum to 64
+for my $phase (0..($nphase - 1)) {
+ my $sum = 0.;
+ for my $tap (0 .. ($ntap - 1)) {
+ $sum += $tapcoeffs[$tap][$phase];
+ }
+ my $factor = 64 / $sum;
+
+ $sum = 0;
+
+ for my $tap (0 .. ($ntap - 1)) {
+ my $coeff = \$tapcoeffs[$tap][$phase];
+
+ $$coeff *= $factor;
+ $$coeff = int($$coeff + 0.5);
+ $sum += $$coeff;
+ }
+
+ # If sum is too big, we lower the outer parts of the FIR
+ for (my $i = 0; ($i < $ntap) && ($sum > 64); $i++) {
+ my $tap = $i >> 1;
+ $tap = $ntap - $tap - 1
+ if ($i & 1);
+ $tapcoeffs[$tap][$phase]--;
+ $sum--;
+ }
+
+ for (my $i = 0; ($i < $ntap) && ($sum < 64); $i++) {
+ my $center = ($ntap - 1) / 2;
+ my $tap = $i >> 1;
+ $tap = -$tap - 1
+ if ($i & 1);
+ $tapcoeffs[$center - $tap][$phase]--;
+ $sum--;
+ }
+}
+
+sub set_point
+{
+ my ($lines, $x, $y, $c) = @_;
+
+ substr($lines->[$y], $x, 1) = $c;
+}
+
+sub graph_taps
+{
+ my ($coeffs, $height) = @_;
+
+ $height = 64
+ if (!$height);
+
+ my @tapcoeffs = @$coeffs;
+
+ my @lines;
+
+ push @lines, ' ' x 32
+ foreach (0 .. ($height - 1));
+
+ # y-axis
+ set_point(\@lines, 15, $_, '|')
+ foreach (0 .. ($height - 1));
+
+ # x-axis
+ set_point(\@lines, $_, $height / 2, '-')
+ foreach (0 .. 31);
+
+ my $min_y = $height;
+ my $max_y = 0;
+
+ for my $tap (0 .. ($ntap - 1)) {
+ for my $p (1..$nphase) {
+ my $phase = $nphase - $p;
+ my $x = $tap * $nphase + ($p - 1);
+ my $y = $tapcoeffs[$tap][$phase] + 64;
+
+ $y = int(($y * ($height - 1)) / (2 * 64) + 0.5);
+
+ $min_y = $y
+ if ($min_y > $y);
+ $max_y = $y
+ if ($max_y < $y);
+
+ set_point(\@lines, $x, $y, '+')
+ }
+ }
+
+ foreach (1 .. $height) {
+ my $y = $height - $_;
+
+ next
+ if ($y < $min_y || $y > $max_y);
+
+ $lines[$y] =~ s/\s*$//;
+ print ' * ', $lines[$y], "\n";
+ }
+}
+
+print <<EOF;
+/* Configuration for scaling factor $out / $in using lanczos
+ * with $nlobe lobes.
+ *
+ * This configuration uses $ntap taps and $nphase phases.
+ *
+EOF
+
+graph_taps(\@tapcoeffs, 33);
+
+print <<EOF;
+ */
+static const struct avi_scal_dimcfg avi_scalcfg_${out}_$in = {
+\t.ntaps = AVI_SCAL_NTAPS_$ntap,
+\t.nphases = AVI_SCAL_NPHASES_$nphase,
+\t.coeffs = {
+EOF
+
+for my $tap (0 .. ($ntap - 1)) {
+ print("\t\t",
+ join(', ', map { sprintf("%3d", $_) } @{$tapcoeffs[$tap]}),
+ ", /* tap $tap */\n");
+}
+
+print <<EOF;
+\t},
+};
+EOF
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
new file mode 100644
index 00000000000000..baa3ea8283845d
--- /dev/null
+++ b/drivers/pwm/Kconfig
@@ -0,0 +1,42 @@
+menuconfig PWM
+ bool "Pulse-Width Modulation (PWM) Support"
+ depends on !MACH_JZ4740 && !PUV3_PWM
+ help
+ Generic Pulse-Width Modulation (PWM) support.
+
+ In Pulse-Width Modulation, a variation of the width of pulses
+ in a rectangular pulse signal is used as a means to alter the
+ average power of the signal. Applications include efficient
+ power delivery and voltage regulation. In computer systems,
+ PWMs are commonly used to control fans or the brightness of
+ display backlights.
+
+ This framework provides a generic interface to PWM devices
+ within the Linux kernel. On the driver side it provides an API
+ to register and unregister a PWM chip, an abstraction of a PWM
+ controller, that supports one or more PWM devices. Client
+ drivers can request PWM devices and use the generic framework
+ to configure as well as enable and disable them.
+
+ This generic framework replaces the legacy PWM framework which
+ allows only a single driver implementing the required API. Not
+ all legacy implementations have been ported to the framework
+ yet. The framework provides an API that is backward compatible
+ with the legacy framework so that existing client drivers
+ continue to work as expected.
+
+ If unsure, say no.
+
+config PWM_SYSFS
+ bool "Pulse-Width Modulation (PWM) SYSFS Support"
+ depends on PWM
+ default no
+ help
+ Add SYSFS support to PWM framework
+
+config PWM_SYSFS_DEBUG
+ bool "Pulse-Width Modulation (PWM) SYSFS Support Debug"
+ depends on PWM_SYSFS
+ default no
+ help
+ Activate DEBUG for SYSFS
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
new file mode 100644
index 00000000000000..3469c3d28b7a93
--- /dev/null
+++ b/drivers/pwm/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PWM) += core.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
new file mode 100644
index 00000000000000..aa3de0278e5e6f
--- /dev/null
+++ b/drivers/pwm/core.c
@@ -0,0 +1,1126 @@
+/*
+ * Generic pwmlib implementation
+ *
+ * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2011-2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#if defined(CONFIG_PWM_SYSFS_DEBUG)
+#define DEBUG
+#endif /*CONFIG_PWM_SYS_DEBUG*/
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/radix-tree.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define MAX_PWMS 1024
+
+static DEFINE_MUTEX(pwm_lookup_lock);
+static LIST_HEAD(pwm_lookup_list);
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_chips);
+static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
+static RADIX_TREE(pwm_tree, GFP_KERNEL);
+
+#ifdef CONFIG_PWM_SYSFS
+static int pwmchip_export(struct pwm_chip *chip);
+static int pwmchip_unexport(struct pwm_chip *chip);
+#else
+static inline int pwmchip_export(struct pwm_chip *chip)
+{
+ return 0 ;
+}
+
+static inline int pwmchip_unexport(struct pwm_chip *chip)
+{
+ return 0 ;
+}
+#endif /*CONFIG_PWM_SYSFS*/
+
+static struct pwm_device *pwm_to_device(unsigned int pwm)
+{
+ return radix_tree_lookup(&pwm_tree, pwm);
+}
+
+static int alloc_pwms(int pwm, unsigned int count)
+{
+ unsigned int from = 0;
+ unsigned int start;
+
+ if (pwm >= MAX_PWMS)
+ return -EINVAL;
+
+ if (pwm >= 0)
+ from = pwm;
+
+ start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
+ count, 0);
+
+ if (pwm >= 0 && start != pwm)
+ return -EEXIST;
+
+ if (start + count > MAX_PWMS)
+ return -ENOSPC;
+
+ return start;
+}
+
+static void free_pwms(struct pwm_chip *chip)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ radix_tree_delete(&pwm_tree, pwm->pwm);
+ }
+
+ bitmap_clear(allocated_pwms, chip->base, chip->npwm);
+
+ kfree(chip->pwms);
+ chip->pwms = NULL;
+}
+
+#ifdef CONFIG_OF
+static struct pwm_chip *pwmchip_find_by_name(const char *name)
+{
+ struct pwm_chip *chip;
+
+ if (!name)
+ return NULL;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list) {
+ const char *chip_name = dev_name(chip->dev);
+
+ if (chip_name && strcmp(chip_name, name) == 0) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return NULL;
+}
+#endif /*CONFIG_OF*/
+
+static int pwm_device_request(struct pwm_device *pwm, const char *label)
+{
+ int err;
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ return -EBUSY;
+
+ if (!try_module_get(pwm->chip->ops->owner))
+ return -ENODEV;
+
+ if (pwm->chip->ops->request) {
+ err = pwm->chip->ops->request(pwm->chip, pwm);
+ if (err) {
+ module_put(pwm->chip->ops->owner);
+ return err;
+ }
+ }
+
+ set_bit(PWMF_REQUESTED, &pwm->flags);
+ pwm->label = label;
+
+ return 0;
+}
+
+static struct pwm_device *
+of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (pc->of_pwm_n_cells < 2)
+ return ERR_PTR(-EINVAL);
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[1]);
+
+ return pwm;
+}
+
+static void of_pwmchip_add(struct pwm_chip *chip)
+{
+ if (!chip->dev || !chip->dev->of_node)
+ return;
+
+ if (!chip->of_xlate) {
+ chip->of_xlate = of_pwm_simple_xlate;
+ chip->of_pwm_n_cells = 2;
+ }
+
+ of_node_get(chip->dev->of_node);
+}
+
+static void of_pwmchip_remove(struct pwm_chip *chip)
+{
+ if (chip->dev && chip->dev->of_node)
+ of_node_put(chip->dev->of_node);
+}
+
+/**
+ * pwm_set_chip_data() - set private chip data for a PWM
+ * @pwm: PWM device
+ * @data: pointer to chip-specific data
+ */
+int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+{
+ if (!pwm)
+ return -EINVAL;
+
+ pwm->chip_data = data;
+
+ return 0;
+}
+
+/**
+ * pwm_get_chip_data() - get private chip data for a PWM
+ * @pwm: PWM device
+ */
+void *pwm_get_chip_data(struct pwm_device *pwm)
+{
+ return pwm ? pwm->chip_data : NULL;
+}
+
+/**
+ * pwmchip_add() - register a new PWM chip
+ * @chip: the PWM chip to add
+ *
+ * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
+ * will be used.
+ */
+int pwmchip_add(struct pwm_chip *chip)
+{
+ struct pwm_device *pwm;
+ unsigned int i;
+ int ret;
+
+ if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
+ !chip->ops->enable || !chip->ops->disable)
+ return -EINVAL;
+
+ mutex_lock(&pwm_lock);
+
+ ret = alloc_pwms(chip->base, chip->npwm);
+ if (ret < 0)
+ goto out;
+
+ chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
+ if (!chip->pwms) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ chip->base = ret;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm = &chip->pwms[i];
+
+ pwm->chip = chip;
+ pwm->pwm = chip->base + i;
+ pwm->hwpwm = i;
+
+ radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
+ }
+
+ bitmap_set(allocated_pwms, chip->base, chip->npwm);
+
+ INIT_LIST_HEAD(&chip->list);
+ list_add(&chip->list, &pwm_chips);
+
+ ret = 0;
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_add(chip);
+
+out:
+ if (!ret)
+ ret = pwmchip_export(chip) ;
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_add);
+
+/**
+ * pwmchip_remove() - remove a PWM chip
+ * @chip: the PWM chip to remove
+ *
+ * Removes a PWM chip. This function may return busy if the PWM chip provides
+ * a PWM device that is still requested.
+ */
+int pwmchip_remove(struct pwm_chip *chip)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&pwm_lock);
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ list_del_init(&chip->list);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
+ free_pwms(chip);
+
+out:
+ if (!ret)
+ ret = pwmchip_unexport(chip) ;
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_remove);
+
+/**
+ * pwm_request() - request a PWM device
+ * @pwm_id: global PWM device index
+ * @label: PWM device label
+ *
+ * This function is deprecated, use pwm_get() instead.
+ */
+struct pwm_device *pwm_request(int pwm, const char *label)
+{
+ struct pwm_device *dev;
+ int err;
+
+ if (pwm < 0 || pwm >= MAX_PWMS)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+
+ dev = pwm_to_device(pwm);
+ if (!dev) {
+ dev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ err = pwm_device_request(dev, label);
+ if (err < 0)
+ dev = ERR_PTR(err);
+
+out:
+ mutex_unlock(&pwm_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(pwm_request);
+
+/**
+ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
+ * @chip: PWM chip
+ * @index: per-chip index of the PWM to request
+ * @label: a literal description string of this PWM
+ *
+ * Returns the PWM at the given index of the given PWM chip. A negative error
+ * code is returned if the index is not valid for the specified PWM chip or
+ * if the PWM device cannot be requested.
+ */
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ struct pwm_device *pwm;
+ int err;
+
+ if (!chip || index >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+ pwm = &chip->pwms[index];
+
+ err = pwm_device_request(pwm, label);
+ if (err < 0)
+ pwm = ERR_PTR(err);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+/**
+ * pwm_free() - free a PWM device
+ * @pwm: PWM device
+ *
+ * This function is deprecated, use pwm_put() instead.
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_put(pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_free);
+
+/**
+ * pwm_config() - change a PWM device configuration
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ if (!pwm || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+}
+EXPORT_SYMBOL_GPL(pwm_config);
+
+/**
+ * pwm_enable() - start a PWM output toggling
+ * @pwm: PWM device
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+ if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags))
+ return pwm->chip->ops->enable(pwm->chip, pwm);
+
+ return pwm ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(pwm_enable);
+
+/**
+ * pwm_disable() - stop a PWM output toggling
+ * @pwm: PWM device
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
+ pwm->chip->ops->disable(pwm->chip, pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_disable);
+
+#ifdef CONFIG_OF
+static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
+{
+ struct pwm_chip *chip;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list)
+ if (chip->dev && chip->dev->of_node == np) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+#endif /*CONFIG_OF*/
+
+#ifdef CONFIG_OF
+/**
+ * of_pwm_request() - request a PWM via the PWM framework
+ * @np: device node to get the PWM from
+ * @con_id: consumer name
+ *
+ * Returns the PWM device parsed from the phandle and index specified in the
+ * "pwms" property of a device tree node or a negative error-code on failure.
+ * Values parsed from the device tree are stored in the returned PWM device
+ * object.
+ *
+ * If con_id is NULL, the first PWM device listed in the "pwms" property will
+ * be requested. Otherwise the "pwm-names" property is used to do a reverse
+ * lookup of the PWM index. This also means that the "pwm-names" property
+ * becomes mandatory for devices that look up the PWM device via the con_id
+ * parameter.
+ */
+static struct pwm_device *of_pwm_request(struct device_node *np,
+ const char *con_id)
+{
+ struct pwm_device *pwm = NULL;
+ struct of_phandle_args args;
+ struct pwm_chip *pc;
+ int index = 0;
+ int err;
+
+ if (con_id) {
+ index = of_property_match_string(np, "pwm-names", con_id);
+ if (index < 0)
+ return ERR_PTR(index);
+ }
+
+ err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index,
+ &args);
+ if (err) {
+ pr_debug("%s(): can't parse \"pwms\" property\n", __func__);
+ return ERR_PTR(err);
+ }
+
+ pc = of_node_to_pwmchip(args.np);
+ if (IS_ERR(pc)) {
+ pr_debug("%s(): PWM chip not found\n", __func__);
+ pwm = ERR_CAST(pc);
+ goto put;
+ }
+
+ if (args.args_count != pc->of_pwm_n_cells) {
+ pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name,
+ args.np->full_name);
+ pwm = ERR_PTR(-EINVAL);
+ goto put;
+ }
+
+ pwm = pc->of_xlate(pc, &args);
+ if (IS_ERR(pwm))
+ goto put;
+
+ /*
+ * If a consumer name was not given, try to look it up from the
+ * "pwm-names" property if it exists. Otherwise use the name of
+ * the user device node.
+ */
+ if (!con_id) {
+ err = of_property_read_string_index(np, "pwm-names", index,
+ &con_id);
+ if (err < 0)
+ con_id = np->name;
+ }
+
+ pwm->label = con_id;
+
+put:
+ of_node_put(args.np);
+
+ return pwm;
+}
+#endif /*CONFIG_OF*/
+
+/**
+ * pwm_add_table() - register PWM device consumers
+ * @table: array of consumers to register
+ * @num: number of consumers in table
+ */
+void __init pwm_add_table(struct pwm_lookup *table, size_t num)
+{
+ mutex_lock(&pwm_lookup_lock);
+
+ while (num--) {
+ list_add_tail(&table->list, &pwm_lookup_list);
+ table++;
+ }
+
+ mutex_unlock(&pwm_lookup_lock);
+}
+
+#ifdef CONFIG_OF
+/**
+ * pwm_get() - look up and request a PWM device
+ * @dev: device for PWM consumer
+ * @con_id: consumer name
+ *
+ * Lookup is first attempted using DT. If the device was not instantiated from
+ * a device tree, a PWM chip and a relative index is looked up via a table
+ * supplied by board setup code (see pwm_add_table()).
+ *
+ * Once a PWM chip has been found the specified PWM device will be requested
+ * and is ready to be used.
+ */
+struct pwm_device *pwm_get(struct device *dev, const char *con_id)
+{
+ struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct pwm_chip *chip = NULL;
+ unsigned int index = 0;
+ unsigned int best = 0;
+ struct pwm_lookup *p;
+ unsigned int match;
+
+ /* look up via DT first */
+ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
+ return of_pwm_request(dev->of_node, con_id);
+
+ /*
+ * We look up the provider in the static table typically provided by
+ * board setup code. We first try to lookup the consumer device by
+ * name. If the consumer device was passed in as NULL or if no match
+ * was found, we try to find the consumer by directly looking it up
+ * by name.
+ *
+ * If a match is found, the provider PWM chip is looked up by name
+ * and a PWM device is requested using the PWM device per-chip index.
+ *
+ * The lookup algorithm was shamelessly taken from the clock
+ * framework:
+ *
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following order
+ * of precedence: dev+con > dev only > con only.
+ */
+ mutex_lock(&pwm_lookup_lock);
+
+ list_for_each_entry(p, &pwm_lookup_list, list) {
+ match = 0;
+
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+
+ match += 2;
+ }
+
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+
+ match += 1;
+ }
+
+ if (match > best) {
+ chip = pwmchip_find_by_name(p->provider);
+ index = p->index;
+
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
+ }
+
+ if (chip)
+ pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id);
+
+ mutex_unlock(&pwm_lookup_lock);
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_get);
+#endif /*CONFIG_OF*/
+
+/**
+ * pwm_put() - release a PWM device
+ * @pwm: PWM device
+ */
+void pwm_put(struct pwm_device *pwm)
+{
+ if (!pwm)
+ return;
+
+ mutex_lock(&pwm_lock);
+
+ if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
+ pr_warn("PWM device already freed\n");
+ goto out;
+ }
+
+ if (pwm->chip->ops->free)
+ pwm->chip->ops->free(pwm->chip, pwm);
+
+ pwm->label = NULL;
+
+ module_put(pwm->chip->ops->owner);
+out:
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL_GPL(pwm_put);
+
+
+/**************************************
+ * SYSFS
+ **************************************/
+#ifdef CONFIG_PWM_SYSFS
+
+#define PWM_SYSFS_LABEL "sysfs"
+
+static int pwmchip_export(struct pwm_chip *chip);
+static ssize_t pwm_export_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len);
+static ssize_t pwm_unexport_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len);
+
+static struct class_attribute pwm_class_attrs[] = {
+ __ATTR(export, S_IWUSR, NULL, pwm_export_store),
+ __ATTR(unexport, S_IWUSR, NULL, pwm_unexport_store),
+ __ATTR_NULL,
+};
+
+static struct class pwm_class = {
+ .name = "pwm",
+ .owner = THIS_MODULE,
+ .class_attrs = pwm_class_attrs,
+};
+
+/* PWM devices */
+
+/*
+ * /sys/class/pwm/pwm_N/
+ * /period_ns WR
+ * /duty_ns WR
+ * /run WR
+ */
+
+static ssize_t pwm_period_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_device *pwm ;
+ dev_dbg(dev,"%s",__func__) ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR_OR_NULL(pwm))
+ return -EINVAL ;
+ return sprintf(buf, "%d\n",pwm->period);
+}
+
+static ssize_t pwm_period_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ long period;
+ int status;
+ struct pwm_device * pwm ;
+
+ dev_dbg(dev,"%s",__func__) ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR_OR_NULL(pwm)) {
+ status = -EINVAL ;
+ goto done ;
+ }
+
+ status = strict_strtol(buf, 0, &period);
+ if (status < 0) {
+ status = -EINVAL ;
+ goto done;
+ }
+
+ pwm->period = (int) period;
+ if (test_bit(PWMF_ENABLED,&pwm->flags))
+ status = pwm_config(pwm,pwm->duty,pwm->period) ;
+done:
+ if (status)
+ dev_dbg(dev,"%s: Error : status %d\n", __func__, status);
+ return status ? : count;
+}
+
+static ssize_t pwm_duty_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_device *pwm ;
+ dev_dbg(dev,"%s",__func__) ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR_OR_NULL(pwm))
+ return -EINVAL ;
+ return sprintf(buf, "%d\n",pwm->duty);
+}
+
+static ssize_t pwm_duty_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ long duty;
+ int status;
+ struct pwm_device * pwm ;
+
+ dev_dbg(dev,"%s",__func__) ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR(pwm)) {
+ status = -EINVAL ;
+ goto done ;
+ }
+
+ status = strict_strtol(buf, 0, &duty);
+ if (status < 0) {
+ status = -EINVAL ;
+ goto done;
+ }
+
+ pwm->duty = (int) duty;
+ if (test_bit(PWMF_ENABLED,&pwm->flags))
+ status = pwm_config(pwm,pwm->duty,pwm->period) ;
+done:
+ if (status)
+ dev_dbg(dev,"%s: Error : status %d\n", __func__, status);
+ return status ? : count;
+}
+
+static ssize_t pwm_run_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_device *pwm ;
+ int run = 0 ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR(pwm)) {
+ return -EINVAL ;
+ }
+
+ if (test_bit(PWMF_ENABLED,&pwm->flags))
+ run = 1 ;
+
+ return sprintf(buf, "%d\n",run);
+}
+
+static ssize_t pwm_run_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ long run ;
+ int status;
+ struct pwm_device * pwm ;
+
+ dev_dbg(dev,"%s",__func__) ;
+
+ pwm = (struct pwm_device *) dev_get_drvdata(dev);
+ if (IS_ERR(pwm)) {
+ status = -EINVAL ;
+ goto done ;
+ }
+
+ status = strict_strtol(buf, 0, &run);
+ if (status < 0) {
+ status = -EINVAL ;
+ goto done;
+ }
+
+ if (0 == run)
+ pwm_disable(pwm) ;
+ else if (1 == run) {
+ status = pwm_config(pwm,pwm->duty,pwm->period) ;
+ if (status)
+ goto done ;
+ status = pwm_enable(pwm) ;
+ }
+ else
+ status = -EINVAL ;
+
+done:
+ if (status)
+ dev_dbg(dev,"%s: Error : status %d\n", __func__, status);
+ return status ? : count;
+}
+
+static DEVICE_ATTR(period_ns, 0644, pwm_period_ns_show,pwm_period_ns_store);
+static DEVICE_ATTR(duty_ns, 0644, pwm_duty_ns_show,pwm_duty_ns_store);
+static DEVICE_ATTR(run, 0644, pwm_run_show,pwm_run_store);
+
+static const struct attribute *pwm_attrs[] = {
+ &dev_attr_period_ns.attr,
+ &dev_attr_duty_ns.attr,
+ &dev_attr_run.attr,
+ NULL,
+};
+
+static const struct attribute_group pwm_attr_group = {
+ .attrs = (struct attribute **) pwm_attrs,
+};
+
+static ssize_t pwm_export_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len)
+{
+ long n;
+ int status;
+ struct pwm_device * pwm ;
+ struct device *pwmdev = NULL;
+
+ status = strict_strtol(buf, 0, &n);
+ if (status < 0) {
+ status = -EINVAL ;
+ goto done;
+ }
+
+ pwm = pwm_request(n,PWM_SYSFS_LABEL) ;
+ if (IS_ERR(pwm)) {
+ status = PTR_ERR(pwm) ;
+ goto done;
+ }
+ dev_dbg(pwm->chip->dev,"%s: receive=%ld\n",__func__,n);
+
+ pwm->duty = 0 ;
+ pwm->period = 0 ;
+ pwmdev = device_create(&pwm_class, pwm->chip->dev, MKDEV(0, 0),(void *) pwm,
+ "pwm_%d", pwm->pwm);
+ if (!IS_ERR(pwmdev)) {
+ status = sysfs_create_group(&pwmdev->kobj,
+ &pwm_attr_group);
+ } else
+ status = PTR_ERR(pwmdev);
+done:
+ if (status)
+ dev_dbg(pwmdev,"%s: Error : status %d\n", __func__, status);
+ return status ? : len;
+}
+
+static int match_export(struct device *dev, void *data)
+{
+ return dev_get_drvdata(dev) == data;
+}
+
+static ssize_t pwm_unexport_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len)
+{
+ long n;
+ int status;
+ struct pwm_device * pwm ;
+ struct device *dev = NULL;
+
+ status = strict_strtol(buf, 0, &n);
+ if (status < 0) {
+ status = -EINVAL ;
+ goto done;
+ }
+
+ pwm = pwm_to_device( n) ;
+ if (IS_ERR(pwm)) {
+ status = PTR_ERR(pwm) ;
+ goto done;
+ }
+
+ dev = class_find_device(&pwm_class, NULL, pwm, match_export);
+ dev_dbg(dev,"%s\n",__func__);
+
+ if (dev) {
+ device_unregister(dev);
+ put_device(dev);
+
+ pwm_free(pwm) ;
+ }
+ else
+ status = -EINVAL ;
+done:
+ if (status)
+ dev_dbg(dev,"%s: status %d\n", __func__, status);
+ return status ? : len;
+}
+
+
+/* PWM devices */
+
+/*
+ * /sys/class/pwm/pwmchipN/
+ * /base ... matching pwm_chip.base (N)
+ * /npwm ... matching pwm_chip.npwm
+ */
+
+static ssize_t chip_base_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct pwm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", chip->base);
+}
+static DEVICE_ATTR(base, 0444, chip_base_show, NULL);
+
+static ssize_t chip_npwm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct pwm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->npwm);
+}
+static DEVICE_ATTR(npwm, 0444, chip_npwm_show, NULL);
+
+static const struct attribute *pwmchip_attrs[] = {
+ &dev_attr_base.attr,
+ &dev_attr_npwm.attr,
+ NULL,
+};
+
+static const struct attribute_group pwmchip_attr_group = {
+ .attrs = (struct attribute **) pwmchip_attrs,
+};
+
+static int pwmchip_export(struct pwm_chip *chip)
+{
+ int status;
+ struct device *dev = NULL;
+
+ dev_dbg(chip->dev,"%s: CHIP_BASE=%d\n",
+ __func__,
+ chip->base);
+
+ dev = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
+ "pwmchip%d", chip->base);
+ if (!IS_ERR(dev)) {
+ status = sysfs_create_group(&dev->kobj,
+ &pwmchip_attr_group);
+ } else
+ status = PTR_ERR(dev);
+
+ if (status) {
+ dev_dbg(dev,"%s: Error, status=%d\n",__func__,status) ;
+ }
+
+ return status;
+}
+
+
+static int pwmchip_unexport(struct pwm_chip *chip)
+{
+ int status;
+ struct device *dev = NULL;
+
+ dev_dbg(chip->dev,"%s: CHIP_BASE=%d\n",
+ __func__,
+ chip->base);
+
+ dev = class_find_device(&pwm_class, NULL, chip, match_export);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ status = 0;
+ } else
+ status = -ENODEV;
+
+ if (status)
+ dev_dbg(dev,"%s: pwmchip%d status %d\n", __func__,
+ chip->base, status);
+ return status ;
+}
+
+static int __init pwm_init(void)
+{
+ dev_dbg(NULL,"%s\n",__func__);
+ return class_register(&pwm_class);
+}
+
+static void __exit pwm_exit(void)
+{
+ dev_dbg(NULL,"%s\n",__func__);
+ class_unregister(&pwm_class);
+}
+
+postcore_initcall(pwm_init);
+module_exit(pwm_exit);
+
+#endif /*CONFIG_PWM_SYSFS*/
+
+#ifdef CONFIG_DEBUG_FS
+static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ seq_printf(s, " requested");
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ seq_printf(s, " enabled");
+
+ seq_printf(s, "\n");
+ }
+}
+
+static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
+{
+ mutex_lock(&pwm_lock);
+ s->private = "";
+
+ return seq_list_start(&pwm_chips, *pos);
+}
+
+static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ s->private = "\n";
+
+ return seq_list_next(v, &pwm_chips, pos);
+}
+
+static void pwm_seq_stop(struct seq_file *s, void *v)
+{
+ mutex_unlock(&pwm_lock);
+}
+
+static int pwm_seq_show(struct seq_file *s, void *v)
+{
+ struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
+
+ seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
+ chip->dev->bus ? chip->dev->bus->name : "no-bus",
+ dev_name(chip->dev), chip->npwm,
+ (chip->npwm != 1) ? "s" : "");
+
+ if (chip->ops->dbg_show)
+ chip->ops->dbg_show(chip, s);
+ else
+ pwm_dbg_show(chip, s);
+
+ return 0;
+}
+
+static const struct seq_operations pwm_seq_ops = {
+ .start = pwm_seq_start,
+ .next = pwm_seq_next,
+ .stop = pwm_seq_stop,
+ .show = pwm_seq_show,
+};
+
+static int pwm_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pwm_seq_ops);
+}
+
+static const struct file_operations pwm_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = pwm_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init pwm_debugfs_init(void)
+{
+ debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
+ &pwm_debugfs_ops);
+
+ return 0;
+}
+
+subsys_initcall(pwm_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/staging/android/trace_persistent.c b/drivers/staging/android/trace_persistent.c
new file mode 100644
index 00000000000000..7c3e9499fba10a
--- /dev/null
+++ b/drivers/staging/android/trace_persistent.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/persistent_ram.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "../../../kernel/trace/trace.h"
+
+struct persistent_trace_record {
+ unsigned long ip;
+ unsigned long parent_ip;
+};
+
+#define REC_SIZE sizeof(struct persistent_trace_record)
+
+static struct persistent_ram_zone *persistent_trace;
+
+static int persistent_trace_enabled;
+
+static struct trace_array *persistent_trace_array;
+
+static struct ftrace_ops trace_ops;
+
+static int persistent_tracer_init(struct trace_array *tr)
+{
+ persistent_trace_array = tr;
+ tr->cpu = get_cpu();
+ put_cpu();
+
+ tracing_start_cmdline_record();
+
+ persistent_trace_enabled = 0;
+ smp_wmb();
+
+ register_ftrace_function(&trace_ops);
+
+ smp_wmb();
+ persistent_trace_enabled = 1;
+
+ return 0;
+}
+
+static void persistent_trace_reset(struct trace_array *tr)
+{
+ persistent_trace_enabled = 0;
+ smp_wmb();
+
+ unregister_ftrace_function(&trace_ops);
+
+ tracing_stop_cmdline_record();
+}
+
+static void persistent_trace_start(struct trace_array *tr)
+{
+ tracing_reset_online_cpus(tr);
+}
+
+static void persistent_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = persistent_trace_array;
+ struct trace_array_cpu *data;
+ long disabled;
+ struct persistent_trace_record rec;
+ unsigned long flags;
+ int cpu;
+
+ smp_rmb();
+ if (unlikely(!persistent_trace_enabled))
+ return;
+
+ if (unlikely(oops_in_progress))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ rec.ip = ip;
+ rec.parent_ip = parent_ip;
+ rec.ip |= cpu;
+ persistent_ram_write(persistent_trace, &rec, sizeof(rec));
+ }
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
+static struct ftrace_ops trace_ops __read_mostly = {
+ .func = persistent_trace_call,
+ .flags = FTRACE_OPS_FL_GLOBAL,
+};
+
+static struct tracer persistent_tracer __read_mostly = {
+ .name = "persistent",
+ .init = persistent_tracer_init,
+ .reset = persistent_trace_reset,
+ .start = persistent_trace_start,
+ .wait_pipe = poll_wait_pipe,
+};
+
+struct persistent_trace_seq_data {
+ const void *ptr;
+ size_t off;
+ size_t size;
+};
+
+void *persistent_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct persistent_trace_seq_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->ptr = persistent_ram_old(persistent_trace);
+ data->size = persistent_ram_old_size(persistent_trace);
+ data->off = data->size % REC_SIZE;
+
+ data->off += *pos * REC_SIZE;
+
+ if (data->off + REC_SIZE > data->size) {
+ kfree(data);
+ return NULL;
+ }
+
+ return data;
+
+}
+void persistent_trace_seq_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+void *persistent_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct persistent_trace_seq_data *data = v;
+
+ data->off += REC_SIZE;
+
+ if (data->off + REC_SIZE > data->size)
+ return NULL;
+
+ (*pos)++;
+
+ return data;
+}
+
+int persistent_trace_seq_show(struct seq_file *s, void *v)
+{
+ struct persistent_trace_seq_data *data = v;
+ struct persistent_trace_record *rec;
+
+ rec = (struct persistent_trace_record *)(data->ptr + data->off);
+
+ seq_printf(s, "%ld %08lx %08lx %pf <- %pF\n",
+ rec->ip & 3, rec->ip, rec->parent_ip,
+ (void *)rec->ip, (void *)rec->parent_ip);
+
+ return 0;
+}
+
+static const struct seq_operations persistent_trace_seq_ops = {
+ .start = persistent_trace_seq_start,
+ .next = persistent_trace_seq_next,
+ .stop = persistent_trace_seq_stop,
+ .show = persistent_trace_seq_show,
+};
+
+static int persistent_trace_old_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &persistent_trace_seq_ops);
+}
+
+static const struct file_operations persistent_trace_old_fops = {
+ .open = persistent_trace_old_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __devinit persistent_trace_probe(struct platform_device *pdev)
+{
+ struct dentry *d;
+ int ret;
+
+ persistent_trace = persistent_ram_init_ringbuffer(&pdev->dev, false);
+ if (IS_ERR(persistent_trace)) {
+ pr_err("persistent_trace: failed to init ringbuffer: %ld\n",
+ PTR_ERR(persistent_trace));
+ return PTR_ERR(persistent_trace);
+ }
+
+ ret = register_tracer(&persistent_tracer);
+ if (ret)
+ pr_err("persistent_trace: failed to register tracer");
+
+ if (persistent_ram_old_size(persistent_trace) > 0) {
+ d = debugfs_create_file("persistent_trace", S_IRUGO, NULL,
+ NULL, &persistent_trace_old_fops);
+ if (IS_ERR_OR_NULL(d))
+ pr_err("persistent_trace: failed to create old file\n");
+ }
+
+ return 0;
+}
+
+static struct platform_driver persistent_trace_driver = {
+ .probe = persistent_trace_probe,
+ .driver = {
+ .name = "persistent_trace",
+ },
+};
+
+static int __init persistent_trace_init(void)
+{
+ return platform_driver_register(&persistent_trace_driver);
+}
+core_initcall(persistent_trace_init);
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
new file mode 100644
index 00000000000000..470f7ad9c0730b
--- /dev/null
+++ b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
@@ -0,0 +1,6 @@
+What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ This property causes an internal calibration of the als gain trim
+ value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
new file mode 100644
index 00000000000000..b2798b258bf7bb
--- /dev/null
+++ b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
@@ -0,0 +1,13 @@
+What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
+KernelVersion: 3.3-rc1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Causes an internal calibration of the als gain trim
+ value which is later used in calculating illuminance in lux.
+
+What: /sys/bus/iio/devices/device[n]/in_proximity0_calibrate
+KernelVersion: 3.3-rc1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Causes a recalculation and adjustment to the
+ proximity_thresh_rising_value.
diff --git a/drivers/staging/iio/Documentation/lsiio.c b/drivers/staging/iio/Documentation/lsiio.c
new file mode 100644
index 00000000000000..98a0de098130c1
--- /dev/null
+++ b/drivers/staging/iio/Documentation/lsiio.c
@@ -0,0 +1,163 @@
+/*
+ * Industrial I/O utilities - lsiio.c
+ *
+ * Copyright (c) 2010 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/dir.h>
+#include "iio_utils.h"
+
+
+static enum verbosity {
+ VERBLEVEL_DEFAULT, /* 0 gives lspci behaviour */
+ VERBLEVEL_SENSORS, /* 1 lists sensors */
+} verblevel = VERBLEVEL_DEFAULT;
+
+const char *type_device = "iio:device";
+const char *type_trigger = "trigger";
+
+
+static inline int check_prefix(const char *str, const char *prefix)
+{
+ return strlen(str) > strlen(prefix) &&
+ strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+static inline int check_postfix(const char *str, const char *postfix)
+{
+ return strlen(str) > strlen(postfix) &&
+ strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
+}
+
+static int dump_channels(const char *dev_dir_name)
+{
+ DIR *dp;
+ const struct dirent *ent;
+
+ dp = opendir(dev_dir_name);
+ if (dp == NULL)
+ return -errno;
+ while (ent = readdir(dp), ent != NULL)
+ if (check_prefix(ent->d_name, "in_") &&
+ check_postfix(ent->d_name, "_raw")) {
+ printf(" %-10s\n", ent->d_name);
+ }
+
+ return 0;
+}
+
+static int dump_one_device(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+ int retval;
+
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
+ "%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Device %03d: %s\n", dev_idx, name);
+
+ if (verblevel >= VERBLEVEL_SENSORS)
+ return dump_channels(dev_dir_name);
+ return 0;
+}
+
+static int dump_one_trigger(const char *dev_dir_name)
+{
+ char name[IIO_MAX_NAME_LENGTH];
+ int dev_idx;
+ int retval;
+
+ retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+ "%i", &dev_idx);
+ if (retval != 1)
+ return -EINVAL;
+ read_sysfs_string("name", dev_dir_name, name);
+ printf("Trigger %03d: %s\n", dev_idx, name);
+ return 0;
+}
+
+static void dump_devices(void)
+{
+ const struct dirent *ent;
+ int number, numstrlen;
+
+ FILE *nameFile;
+ DIR *dp;
+ char thisname[IIO_MAX_NAME_LENGTH];
+ char *filename;
+
+ dp = opendir(iio_dir);
+ if (dp == NULL) {
+ printf("No industrial I/O devices available\n");
+ return;
+ }
+
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_device)) {
+ char *dev_dir_name;
+
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_device(dev_dir_name);
+ free(dev_dir_name);
+ if (verblevel >= VERBLEVEL_SENSORS)
+ printf("\n");
+ }
+ }
+ rewinddir(dp);
+ while (ent = readdir(dp), ent != NULL) {
+ if (check_prefix(ent->d_name, type_trigger)) {
+ char *dev_dir_name;
+
+ asprintf(&dev_dir_name, "%s%s", iio_dir, ent->d_name);
+ dump_one_trigger(dev_dir_name);
+ free(dev_dir_name);
+ }
+ }
+ closedir(dp);
+}
+
+int main(int argc, char **argv)
+{
+ int c, err = 0;
+
+ while ((c = getopt(argc, argv, "d:D:v")) != EOF) {
+ switch (c) {
+ case 'v':
+ verblevel++;
+ break;
+
+ case '?':
+ default:
+ err++;
+ break;
+ }
+ }
+ if (err || argc > optind) {
+ fprintf(stderr, "Usage: lsiio [options]...\n"
+ "List industrial I/O devices\n"
+ " -v, --verbose\n"
+ " Increase verbosity (may be given multiple times)\n"
+ );
+ exit(1);
+ }
+
+ dump_devices();
+
+ return 0;
+}
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192 b/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
new file mode 100644
index 00000000000000..1c35c507cc0563
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-ad7192
@@ -0,0 +1,20 @@
+What: /sys/.../iio:deviceX/ac_excitation_en
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, is used to enable the AC
+ excitation mode found on some converters. In ac excitation mode,
+ the polarity of the excitation voltage is reversed on
+ alternate cycles, to eliminate DC errors.
+
+What: /sys/.../iio:deviceX/bridge_switch_en
+KernelVersion: 3.1.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute, if available, is used to close or open the
+ bridge power down switch found on some converters.
+ In bridge applications, such as strain gauges and load cells,
+ the bridge itself consumes the majority of the current in the
+ system. To minimize the current consumption of the system,
+ the bridge can be disconnected (when it is not being used
+ using the bridge_switch_en attribute.
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
new file mode 100644
index 00000000000000..f053535385bf94
--- /dev/null
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -0,0 +1,1684 @@
+/*
+ * Freescale i.MX28 LRADC driver
+ *
+ * Copyright (c) 2012 DENX Software Engineering, GmbH.
+ * Marek Vasut <marex@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/stmp_device.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/clk.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define DRIVER_NAME "mxs-lradc"
+
+#define LRADC_MAX_DELAY_CHANS 4
+#define LRADC_MAX_MAPPED_CHANS 8
+#define LRADC_MAX_TOTAL_CHANS 16
+
+#define LRADC_DELAY_TIMER_HZ 2000
+
+/*
+ * Make this runtime configurable if necessary. Currently, if the buffered mode
+ * is enabled, the LRADC takes LRADC_DELAY_TIMER_LOOP samples of data before
+ * triggering IRQ. The sampling happens every (LRADC_DELAY_TIMER_PER / 2000)
+ * seconds. The result is that the samples arrive every 500mS.
+ */
+#define LRADC_DELAY_TIMER_PER 200
+#define LRADC_DELAY_TIMER_LOOP 5
+
+/*
+ * Once the pen touches the touchscreen, the touchscreen switches from
+ * IRQ-driven mode to polling mode to prevent interrupt storm. The polling
+ * is realized by worker thread, which is called every 20 or so milliseconds.
+ * This gives the touchscreen enough fluence and does not strain the system
+ * too much.
+ */
+#define LRADC_TS_SAMPLE_DELAY_MS 5
+
+/*
+ * The LRADC reads the following amount of samples from each touchscreen
+ * channel and the driver then computes avarage of these.
+ */
+#define LRADC_TS_SAMPLE_AMOUNT 4
+
+enum mxs_lradc_id {
+ IMX23_LRADC,
+ IMX28_LRADC,
+};
+
+static const char * const mx23_lradc_irq_names[] = {
+ "mxs-lradc-touchscreen",
+ "mxs-lradc-channel0",
+ "mxs-lradc-channel1",
+ "mxs-lradc-channel2",
+ "mxs-lradc-channel3",
+ "mxs-lradc-channel4",
+ "mxs-lradc-channel5",
+ "mxs-lradc-channel6",
+ "mxs-lradc-channel7",
+};
+
+static const char * const mx28_lradc_irq_names[] = {
+ "mxs-lradc-touchscreen",
+ "mxs-lradc-thresh0",
+ "mxs-lradc-thresh1",
+ "mxs-lradc-channel0",
+ "mxs-lradc-channel1",
+ "mxs-lradc-channel2",
+ "mxs-lradc-channel3",
+ "mxs-lradc-channel4",
+ "mxs-lradc-channel5",
+ "mxs-lradc-channel6",
+ "mxs-lradc-channel7",
+ "mxs-lradc-button0",
+ "mxs-lradc-button1",
+};
+
+struct mxs_lradc_of_config {
+ const int irq_count;
+ const char * const *irq_name;
+ const uint32_t *vref_mv;
+};
+
+#define VREF_MV_BASE 1850
+
+static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE * 2, /* CH6 VDDIO */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE, /* CH10 */
+ VREF_MV_BASE, /* CH11 */
+ VREF_MV_BASE, /* CH12 USB_DP */
+ VREF_MV_BASE, /* CH13 USB_DN */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
+};
+
+static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = {
+ VREF_MV_BASE, /* CH0 */
+ VREF_MV_BASE, /* CH1 */
+ VREF_MV_BASE, /* CH2 */
+ VREF_MV_BASE, /* CH3 */
+ VREF_MV_BASE, /* CH4 */
+ VREF_MV_BASE, /* CH5 */
+ VREF_MV_BASE, /* CH6 */
+ VREF_MV_BASE * 4, /* CH7 VBATT */
+ VREF_MV_BASE, /* CH8 Temp sense 0 */
+ VREF_MV_BASE, /* CH9 Temp sense 1 */
+ VREF_MV_BASE * 2, /* CH10 VDDIO */
+ VREF_MV_BASE, /* CH11 VTH */
+ VREF_MV_BASE * 2, /* CH12 VDDA */
+ VREF_MV_BASE, /* CH13 VDDD */
+ VREF_MV_BASE, /* CH14 VBG */
+ VREF_MV_BASE * 4, /* CH15 VDD5V */
+};
+
+static const struct mxs_lradc_of_config mxs_lradc_of_config[] = {
+ [IMX23_LRADC] = {
+ .irq_count = ARRAY_SIZE(mx23_lradc_irq_names),
+ .irq_name = mx23_lradc_irq_names,
+ .vref_mv = mx23_vref_mv,
+ },
+ [IMX28_LRADC] = {
+ .irq_count = ARRAY_SIZE(mx28_lradc_irq_names),
+ .irq_name = mx28_lradc_irq_names,
+ .vref_mv = mx28_vref_mv,
+ },
+};
+
+enum mxs_lradc_ts {
+ MXS_LRADC_TOUCHSCREEN_NONE = 0,
+ MXS_LRADC_TOUCHSCREEN_4WIRE,
+ MXS_LRADC_TOUCHSCREEN_5WIRE,
+};
+
+/*
+ * Touchscreen handling
+ */
+enum lradc_ts_plate {
+ LRADC_TOUCH = 0,
+ LRADC_SAMPLE_X,
+ LRADC_SAMPLE_Y,
+ LRADC_SAMPLE_PRESSURE,
+ LRADC_SAMPLE_VALID,
+};
+
+enum mxs_lradc_divbytwo {
+ MXS_LRADC_DIV_DISABLED = 0,
+ MXS_LRADC_DIV_ENABLED,
+};
+
+struct mxs_lradc_scale {
+ unsigned int integer;
+ unsigned int nano;
+};
+
+struct mxs_lradc {
+ struct device *dev;
+ void __iomem *base;
+ int irq[13];
+
+ struct clk *clk;
+
+ uint32_t *buffer;
+ struct iio_trigger *trig;
+
+ struct mutex lock;
+
+ struct completion completion;
+
+ const uint32_t *vref_mv;
+ struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2];
+ unsigned long is_divided;
+
+ /*
+ * Touchscreen LRADC channels receives a private slot in the CTRL4
+ * register, the slot #7. Therefore only 7 slots instead of 8 in the
+ * CTRL4 register can be mapped to LRADC channels when using the
+ * touchscreen.
+ *
+ * Furthermore, certain LRADC channels are shared between touchscreen
+ * and/or touch-buttons and generic LRADC block. Therefore when using
+ * either of these, these channels are not available for the regular
+ * sampling. The shared channels are as follows:
+ *
+ * CH0 -- Touch button #0
+ * CH1 -- Touch button #1
+ * CH2 -- Touch screen XPUL
+ * CH3 -- Touch screen YPLL
+ * CH4 -- Touch screen XNUL
+ * CH5 -- Touch screen YNLR
+ * CH6 -- Touch screen WIPER (5-wire only)
+ *
+ * The bitfields below represents which parts of the LRADC block are
+ * switched into special mode of operation. These channels can not
+ * be sampled as regular LRADC channels. The driver will refuse any
+ * attempt to sample these channels.
+ */
+#define CHAN_MASK_TOUCHBUTTON (0x3 << 0)
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
+ enum mxs_lradc_ts use_touchscreen;
+ bool use_touchbutton;
+
+ struct input_dev *ts_input;
+
+ enum mxs_lradc_id soc;
+ enum lradc_ts_plate cur_plate; /* statemachine */
+ bool ts_valid;
+ unsigned ts_x_pos;
+ unsigned ts_y_pos;
+ unsigned ts_pressure;
+
+ /* handle touchscreen's physical behaviour */
+ /* samples per coordinate */
+ unsigned over_sample_cnt;
+ /* time clocks between samples */
+ unsigned over_sample_delay;
+ /* time in clocks to wait after the plates where switched */
+ unsigned settling_delay;
+};
+
+#define LRADC_CTRL0 0x00
+# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE (1 << 23)
+# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE (1 << 22)
+# define LRADC_CTRL0_MX28_YNNSW /* YM */ (1 << 21)
+# define LRADC_CTRL0_MX28_YPNSW /* YP */ (1 << 20)
+# define LRADC_CTRL0_MX28_YPPSW /* YP */ (1 << 19)
+# define LRADC_CTRL0_MX28_XNNSW /* XM */ (1 << 18)
+# define LRADC_CTRL0_MX28_XNPSW /* XM */ (1 << 17)
+# define LRADC_CTRL0_MX28_XPPSW /* XP */ (1 << 16)
+
+# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE (1 << 20)
+# define LRADC_CTRL0_MX23_YM (1 << 19)
+# define LRADC_CTRL0_MX23_XM (1 << 18)
+# define LRADC_CTRL0_MX23_YP (1 << 17)
+# define LRADC_CTRL0_MX23_XP (1 << 16)
+
+# define LRADC_CTRL0_MX28_PLATE_MASK \
+ (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \
+ LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \
+ LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW)
+
+# define LRADC_CTRL0_MX23_PLATE_MASK \
+ (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \
+ LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP)
+
+#define LRADC_CTRL1 0x10
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN (1 << 24)
+#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
+#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16)
+#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ (1 << 8)
+#define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n))
+#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff
+#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
+
+#define LRADC_CTRL2 0x20
+#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
+#define LRADC_CTRL2_TEMPSENSE_PWD (1 << 15)
+
+#define LRADC_STATUS 0x40
+#define LRADC_STATUS_TOUCH_DETECT_RAW (1 << 0)
+
+#define LRADC_CH(n) (0x50 + (0x10 * (n)))
+#define LRADC_CH_ACCUMULATE (1 << 29)
+#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
+#define LRADC_CH_NUM_SAMPLES_OFFSET 24
+#define LRADC_CH_NUM_SAMPLES(x) \
+ ((x) << LRADC_CH_NUM_SAMPLES_OFFSET)
+#define LRADC_CH_VALUE_MASK 0x3ffff
+#define LRADC_CH_VALUE_OFFSET 0
+
+#define LRADC_DELAY(n) (0xd0 + (0x10 * (n)))
+#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xff << 24)
+#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24
+#define LRADC_DELAY_TRIGGER(x) \
+ (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_LRADCS_MASK)
+#define LRADC_DELAY_KICK (1 << 20)
+#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16)
+#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16
+#define LRADC_DELAY_TRIGGER_DELAYS(x) \
+ (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_DELAYS_MASK)
+#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11)
+#define LRADC_DELAY_LOOP_COUNT_OFFSET 11
+#define LRADC_DELAY_LOOP(x) \
+ (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \
+ LRADC_DELAY_LOOP_COUNT_MASK)
+#define LRADC_DELAY_DELAY_MASK 0x7ff
+#define LRADC_DELAY_DELAY_OFFSET 0
+#define LRADC_DELAY_DELAY(x) \
+ (((x) << LRADC_DELAY_DELAY_OFFSET) & \
+ LRADC_DELAY_DELAY_MASK)
+
+#define LRADC_CTRL4 0x140
+#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
+#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
+
+#define LRADC_RESOLUTION 12
+#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+
+static void mxs_lradc_reg_set(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg + STMP_OFFSET_REG_SET);
+}
+
+static void mxs_lradc_reg_clear(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg + STMP_OFFSET_REG_CLR);
+}
+
+static void mxs_lradc_reg_wrt(struct mxs_lradc *lradc, u32 val, u32 reg)
+{
+ writel(val, lradc->base + reg);
+}
+
+static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_PLATE_MASK;
+ return LRADC_CTRL0_MX28_PLATE_MASK;
+}
+
+static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
+ return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
+}
+
+static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL1_MX23_LRADC_IRQ_MASK;
+ return LRADC_CTRL1_MX28_LRADC_IRQ_MASK;
+}
+
+static u32 mxs_lradc_touch_detect_bit(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE;
+ return LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE;
+}
+
+static u32 mxs_lradc_drive_x_plate(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_XP | LRADC_CTRL0_MX23_XM;
+ return LRADC_CTRL0_MX28_XPPSW | LRADC_CTRL0_MX28_XNNSW;
+}
+
+static u32 mxs_lradc_drive_y_plate(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_YM;
+ return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_YNNSW;
+}
+
+static u32 mxs_lradc_drive_pressure(struct mxs_lradc *lradc)
+{
+ if (lradc->soc == IMX23_LRADC)
+ return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XM;
+ return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW;
+}
+
+static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
+{
+ return !!(readl(lradc->base + LRADC_STATUS) &
+ LRADC_STATUS_TOUCH_DETECT_RAW);
+}
+
+static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+{
+ /*
+ * prepare for oversampling conversion
+ *
+ * from the datasheet:
+ * "The ACCUMULATE bit in the appropriate channel register
+ * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
+ * otherwise, the IRQs will not fire."
+ */
+ mxs_lradc_reg_wrt(lradc, LRADC_CH_ACCUMULATE |
+ LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
+ LRADC_CH(ch));
+
+ /* from the datasheet:
+ * "Software must clear this register in preparation for a
+ * multi-cycle accumulation.
+ */
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch));
+
+ /* prepare the delay/loop unit according to the oversampling count */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch) |
+ LRADC_DELAY_TRIGGER_DELAYS(0) |
+ LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+
+ /* wake us again, when the complete conversion is done */
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+ * SoC's delay unit and start the conversion later
+ * and automatically.
+ */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
+ LRADC_DELAY_KICK |
+ LRADC_DELAY_DELAY(lradc->settling_delay),
+ LRADC_DELAY(2));
+}
+
+/*
+ * Pressure detection is special:
+ * We want to do both required measurements for the pressure detection in
+ * one turn. Use the hardware features to chain both conversions and let the
+ * hardware report one interrupt if both conversions are done
+ */
+static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
+ unsigned ch2)
+{
+ u32 reg;
+
+ /*
+ * prepare for oversampling conversion
+ *
+ * from the datasheet:
+ * "The ACCUMULATE bit in the appropriate channel register
+ * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0;
+ * otherwise, the IRQs will not fire."
+ */
+ reg = LRADC_CH_ACCUMULATE |
+ LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1);
+ mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
+ mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
+
+ /* from the datasheet:
+ * "Software must clear this register in preparation for a
+ * multi-cycle accumulation.
+ */
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch1));
+ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch2));
+
+ /* prepare the delay/loop unit according to the oversampling count */
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch1) |
+ LRADC_DELAY_TRIGGER(1 << ch2) | /* start both channels */
+ LRADC_DELAY_TRIGGER_DELAYS(0) |
+ LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) |
+ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ LRADC_DELAY(3));
+
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+
+ /* wake us again, when the conversions are done */
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
+ /*
+ * after changing the touchscreen plates setting
+ * the signals need some initial time to settle. Start the
+ * SoC's delay unit and start the conversion later
+ * and automatically.
+ */
+ mxs_lradc_reg_wrt(lradc,
+ LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */
+ LRADC_DELAY_TRIGGER_DELAYS(1 << 3) | /* trigger DELAY unit#3 */
+ LRADC_DELAY_KICK |
+ LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2));
+}
+
+static unsigned mxs_lradc_read_raw_channel(struct mxs_lradc *lradc,
+ unsigned channel)
+{
+ u32 reg;
+ unsigned num_samples, val;
+
+ reg = readl(lradc->base + LRADC_CH(channel));
+ if (reg & LRADC_CH_ACCUMULATE)
+ num_samples = lradc->over_sample_cnt;
+ else
+ num_samples = 1;
+
+ val = (reg & LRADC_CH_VALUE_MASK) >> LRADC_CH_VALUE_OFFSET;
+ return val / num_samples;
+}
+
+static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
+ unsigned ch1, unsigned ch2)
+{
+ u32 reg, mask;
+ unsigned pressure, m1, m2;
+
+ mask = LRADC_CTRL1_LRADC_IRQ(ch1) | LRADC_CTRL1_LRADC_IRQ(ch2);
+ reg = readl(lradc->base + LRADC_CTRL1) & mask;
+
+ while (reg != mask) {
+ reg = readl(lradc->base + LRADC_CTRL1) & mask;
+ dev_dbg(lradc->dev, "One channel is still busy: %X\n", reg);
+ }
+
+ m1 = mxs_lradc_read_raw_channel(lradc, ch1);
+ m2 = mxs_lradc_read_raw_channel(lradc, ch2);
+
+ if (m2 == 0) {
+ dev_warn(lradc->dev, "Cannot calculate pressure\n");
+ return 1 << (LRADC_RESOLUTION - 1);
+ }
+
+ /* simply scale the value from 0 ... max ADC resolution */
+ pressure = m1;
+ pressure *= (1 << LRADC_RESOLUTION);
+ pressure /= m2;
+
+ dev_dbg(lradc->dev, "Pressure = %u\n", pressure);
+ return pressure;
+}
+
+#define TS_CH_XP 2
+#define TS_CH_YP 3
+#define TS_CH_XM 4
+#define TS_CH_YM 5
+
+static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
+{
+ u32 reg;
+ int val;
+
+ reg = readl(lradc->base + LRADC_CTRL1);
+
+ /* only channels 3 to 5 are of interest here */
+ if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
+ } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
+ } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
+ LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
+ val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
+ } else {
+ return -EIO;
+ }
+
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+
+ return val;
+}
+
+/*
+ * YP(open)--+-------------+
+ * | |--+
+ * | | |
+ * YM(-)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(weak+) XM(open)
+ *
+ * "weak+" means 200k Ohm VDDIO
+ * (-) means GND
+ */
+static void mxs_lradc_setup_touch_detection(struct mxs_lradc *lradc)
+{
+ /*
+ * In order to detect a touch event the 'touch detect enable' bit
+ * enables:
+ * - a weak pullup to the X+ connector
+ * - a strong ground at the Y- connector
+ */
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_touch_detect_bit(lradc),
+ LRADC_CTRL0);
+}
+
+/*
+ * YP(meas)--+-------------+
+ * | |--+
+ * | | |
+ * YM(open)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(+) XM(-)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_X;
+ mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
+}
+
+/*
+ * YP(+)--+-------------+
+ * | |--+
+ * | | |
+ * YM(-)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(open) XM(meas)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_Y;
+ mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
+}
+
+/*
+ * YP(+)--+-------------+
+ * | |--+
+ * | | |
+ * YM(meas)--+-------------+ |
+ * +--------------+
+ * | |
+ * XP(meas) XM(-)
+ *
+ * (+) means here 1.85 V
+ * (-) means here GND
+ */
+static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
+{
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+ mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
+
+ lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
+ mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+}
+
+static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+{
+ mxs_lradc_setup_touch_detection(lradc);
+
+ lradc->cur_plate = LRADC_TOUCH;
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
+ LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+}
+
+static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
+{
+ input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
+ input_report_abs(lradc->ts_input, ABS_Y, lradc->ts_y_pos);
+ input_report_abs(lradc->ts_input, ABS_PRESSURE, lradc->ts_pressure);
+ input_report_key(lradc->ts_input, BTN_TOUCH, 1);
+ input_sync(lradc->ts_input);
+}
+
+static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
+{
+ mxs_lradc_setup_touch_detection(lradc);
+ lradc->cur_plate = LRADC_SAMPLE_VALID;
+ /*
+ * start a dummy conversion to burn time to settle the signals
+ * note: we are not interested in the conversion's value
+ */
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
+ LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
+ LRADC_DELAY(2));
+}
+
+/*
+ * in order to avoid false measurements, report only samples where
+ * the surface is still touched after the position measurement
+ */
+static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
+{
+ /* if it is still touched, report the sample */
+ if (valid && mxs_lradc_check_touch_event(lradc)) {
+ lradc->ts_valid = true;
+ mxs_lradc_report_ts_event(lradc);
+ }
+
+ /* if it is even still touched, continue with the next measurement */
+ if (mxs_lradc_check_touch_event(lradc)) {
+ mxs_lradc_prepare_y_pos(lradc);
+ return;
+ }
+
+ if (lradc->ts_valid) {
+ /* signal the release */
+ lradc->ts_valid = false;
+ input_report_key(lradc->ts_input, BTN_TOUCH, 0);
+ input_sync(lradc->ts_input);
+ }
+
+ /* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+}
+
+/* touchscreen's state machine */
+static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
+{
+ int val;
+
+ switch (lradc->cur_plate) {
+ case LRADC_TOUCH:
+ /*
+ * start with the Y-pos, because it uses nearly the same plate
+ * settings like the touch detection
+ */
+ if (mxs_lradc_check_touch_event(lradc)) {
+ mxs_lradc_reg_clear(lradc,
+ LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+ LRADC_CTRL1);
+ mxs_lradc_prepare_y_pos(lradc);
+ }
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
+ LRADC_CTRL1);
+ return;
+
+ case LRADC_SAMPLE_Y:
+ val = mxs_lradc_read_ts_channel(lradc);
+ if (val < 0) {
+ mxs_lradc_enable_touch_detection(lradc); /* re-start */
+ return;
+ }
+ lradc->ts_y_pos = val;
+ mxs_lradc_prepare_x_pos(lradc);
+ return;
+
+ case LRADC_SAMPLE_X:
+ val = mxs_lradc_read_ts_channel(lradc);
+ if (val < 0) {
+ mxs_lradc_enable_touch_detection(lradc); /* re-start */
+ return;
+ }
+ lradc->ts_x_pos = val;
+ mxs_lradc_prepare_pressure(lradc);
+ return;
+
+ case LRADC_SAMPLE_PRESSURE:
+ lradc->ts_pressure =
+ mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+ mxs_lradc_complete_touch_event(lradc);
+ return;
+
+ case LRADC_SAMPLE_VALID:
+ val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
+ mxs_lradc_finish_touch_event(lradc, 1);
+ break;
+ }
+}
+
+/*
+ * Raw I/O operations
+ */
+static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+ int ret;
+
+ /*
+ * See if there is no buffered operation in progess. If there is, simply
+ * bail out. This can be improved to support both buffered and raw IO at
+ * the same time, yet the code becomes horribly complicated. Therefore I
+ * applied KISS principle here.
+ */
+ ret = mutex_trylock(&lradc->lock);
+ if (!ret)
+ return -EBUSY;
+
+ reinit_completion(&lradc->completion);
+
+ /*
+ * No buffered operation in progress, map the channel and trigger it.
+ * Virtual channel 0 is always used here as the others are always not
+ * used if doing raw sampling.
+ */
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+
+ /* Enable / disable the divider per requirement */
+ if (test_bit(chan, &lradc->is_divided))
+ mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+ LRADC_CTRL2);
+ else
+ mxs_lradc_reg_clear(lradc,
+ 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
+
+ /* Clean the slot's previous content, then set new one. */
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
+ LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4);
+
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0));
+
+ /* Enable the IRQ and start sampling the channel. */
+ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, 1 << 0, LRADC_CTRL0);
+
+ /* Wait for completion on the channel, 1 second max. */
+ ret = wait_for_completion_killable_timeout(&lradc->completion, HZ);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ if (ret < 0)
+ goto err;
+
+ /* Read the data. */
+ *val = readl(lradc->base + LRADC_CH(0)) & LRADC_CH_VALUE_MASK;
+ ret = IIO_VAL_INT;
+
+err:
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1);
+
+ mutex_unlock(&lradc->lock);
+
+ return ret;
+}
+
+static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val)
+{
+ int ret, min, max;
+
+ ret = mxs_lradc_read_single(iio_dev, 8, &min);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ ret = mxs_lradc_read_single(iio_dev, 9, &max);
+ if (ret != IIO_VAL_INT)
+ return ret;
+
+ *val = max - min;
+
+ return IIO_VAL_INT;
+}
+
+static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_TEMP)
+ return mxs_lradc_read_temp(iio_dev, val);
+
+ return mxs_lradc_read_single(iio_dev, chan->channel, val);
+
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_TEMP) {
+ /* From the datasheet, we have to multiply by 1.012 and
+ * divide by 4
+ */
+ *val = 0;
+ *val2 = 253000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ *val = lradc->vref_mv[chan->channel];
+ *val2 = chan->scan_type.realbits -
+ test_bit(chan->channel, &lradc->is_divided);
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type == IIO_TEMP) {
+ /* The calculated value from the ADC is in Kelvin, we
+ * want Celsius for hwmon so the offset is
+ * -272.15 * scale
+ */
+ *val = -1075;
+ *val2 = 691699;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long m)
+{
+ struct mxs_lradc *lradc = iio_priv(iio_dev);
+ struct mxs_lradc_scale *scale_avail =
+ lradc->scale_avail[chan->channel];
+ int ret;
+
+ ret = mutex_trylock(&lradc->lock);
+ if (!ret)
+ return -EBUSY;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = -EINVAL;
+ if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
+ /* divider by two disabled */
+ clear_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
+ val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
+ /* divider by two enabled */
+ set_bit(chan->channel, &lradc->is_divided);
+ ret = 0;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&lradc->lock);
+
+ return ret;
+}
+
+static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ long m)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ int ch)
+{
+ struct iio_dev *iio = dev_to_iio_dev(dev);
+ struct mxs_lradc *lradc = iio_priv(iio);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++)
+ len += sprintf(buf + len, "%d.%09u ",
+ lradc->scale_avail[ch][i].integer,
+ lradc->scale_avail[ch][i].nano);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t mxs_lradc_show_scale_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr);
+
+ return mxs_lradc_show_scale_available_ch(dev, attr, buf,
+ iio_attr->address);
+}
+
+#define SHOW_SCALE_AVAILABLE_ATTR(ch) \
+static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \
+ mxs_lradc_show_scale_available, NULL, ch)
+
+SHOW_SCALE_AVAILABLE_ATTR(0);
+SHOW_SCALE_AVAILABLE_ATTR(1);
+SHOW_SCALE_AVAILABLE_ATTR(2);
+SHOW_SCALE_AVAILABLE_ATTR(3);
+SHOW_SCALE_AVAILABLE_ATTR(4);
+SHOW_SCALE_AVAILABLE_ATTR(5);
+SHOW_SCALE_AVAILABLE_ATTR(6);
+SHOW_SCALE_AVAILABLE_ATTR(7);
+SHOW_SCALE_AVAILABLE_ATTR(10);
+SHOW_SCALE_AVAILABLE_ATTR(11);
+SHOW_SCALE_AVAILABLE_ATTR(12);
+SHOW_SCALE_AVAILABLE_ATTR(13);
+SHOW_SCALE_AVAILABLE_ATTR(14);
+SHOW_SCALE_AVAILABLE_ATTR(15);
+
+static struct attribute *mxs_lradc_attributes[] = {
+ &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mxs_lradc_attribute_group = {
+ .attrs = mxs_lradc_attributes,
+};
+
+static const struct iio_info mxs_lradc_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mxs_lradc_read_raw,
+ .write_raw = mxs_lradc_write_raw,
+ .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt,
+ .attrs = &mxs_lradc_attribute_group,
+};
+
+static int mxs_lradc_ts_open(struct input_dev *dev)
+{
+ struct mxs_lradc *lradc = input_get_drvdata(dev);
+
+ /* Enable the touch-detect circuitry. */
+ mxs_lradc_enable_touch_detection(lradc);
+
+ return 0;
+}
+
+static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
+{
+ /* stop all interrupts from firing */
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
+ LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
+ LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
+ LRADC_CTRL1);
+
+ /* Power-down touchscreen touch-detect circuitry. */
+ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+}
+
+static void mxs_lradc_ts_close(struct input_dev *dev)
+{
+ struct mxs_lradc *lradc = input_get_drvdata(dev);
+
+ mxs_lradc_disable_ts(lradc);
+}
+
+static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
+{
+ struct input_dev *input;
+ struct device *dev = lradc->dev;
+ int ret;
+
+ if (!lradc->use_touchscreen)
+ return 0;
+
+ input = input_allocate_device();
+ if (!input)
+ return -ENOMEM;
+
+ input->name = DRIVER_NAME;
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = dev;
+ input->open = mxs_lradc_ts_open;
+ input->close = mxs_lradc_ts_close;
+
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
+ 0, 0);
+
+ lradc->ts_input = input;
+ input_set_drvdata(input, lradc);
+ ret = input_register_device(input);
+ if (ret)
+ input_free_device(lradc->ts_input);
+
+ return ret;
+}
+
+static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
+{
+ if (!lradc->use_touchscreen)
+ return;
+
+ mxs_lradc_disable_ts(lradc);
+ input_unregister_device(lradc->ts_input);
+}
+
+/*
+ * IRQ Handling
+ */
+static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
+{
+ struct iio_dev *iio = data;
+ struct mxs_lradc *lradc = iio_priv(iio);
+ unsigned long reg = readl(lradc->base + LRADC_CTRL1);
+ const uint32_t ts_irq_mask =
+ LRADC_CTRL1_TOUCH_DETECT_IRQ |
+ LRADC_CTRL1_LRADC_IRQ(2) |
+ LRADC_CTRL1_LRADC_IRQ(3) |
+ LRADC_CTRL1_LRADC_IRQ(4) |
+ LRADC_CTRL1_LRADC_IRQ(5);
+
+ if (!(reg & mxs_lradc_irq_mask(lradc)))
+ return IRQ_NONE;
+
+ if (lradc->use_touchscreen && (reg & ts_irq_mask))
+ mxs_lradc_handle_touch(lradc);
+
+ if (iio_buffer_enabled(iio))
+ iio_trigger_poll(iio->trig);
+ else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
+ complete(&lradc->completion);
+
+ mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
+ LRADC_CTRL1);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Trigger handling
+ */
+static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio = pf->indio_dev;
+ struct mxs_lradc *lradc = iio_priv(iio);
+ const uint32_t chan_value = LRADC_CH_ACCUMULATE |
+ ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
+ unsigned int i, j = 0;
+
+ for_each_set_bit(i, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
+ lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
+ mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(j));
+ lradc->buffer[j] &= LRADC_CH_VALUE_MASK;
+ lradc->buffer[j] /= LRADC_DELAY_TIMER_LOOP;
+ j++;
+ }
+
+ iio_push_to_buffers_with_timestamp(iio, lradc->buffer, pf->timestamp);
+
+ iio_trigger_notify_done(iio->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int mxs_lradc_configure_trigger(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *iio = iio_trigger_get_drvdata(trig);
+ struct mxs_lradc *lradc = iio_priv(iio);
+ const uint32_t st = state ? STMP_OFFSET_REG_SET : STMP_OFFSET_REG_CLR;
+
+ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_KICK, LRADC_DELAY(0) + st);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mxs_lradc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &mxs_lradc_configure_trigger,
+};
+
+static int mxs_lradc_trigger_init(struct iio_dev *iio)
+{
+ int ret;
+ struct iio_trigger *trig;
+ struct mxs_lradc *lradc = iio_priv(iio);
+
+ trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id);
+ if (trig == NULL)
+ return -ENOMEM;
+
+ trig->dev.parent = lradc->dev;
+ iio_trigger_set_drvdata(trig, iio);
+ trig->ops = &mxs_lradc_trigger_ops;
+
+ ret = iio_trigger_register(trig);
+ if (ret) {
+ iio_trigger_free(trig);
+ return ret;
+ }
+
+ lradc->trig = trig;
+
+ return 0;
+}
+
+static void mxs_lradc_trigger_remove(struct iio_dev *iio)
+{
+ struct mxs_lradc *lradc = iio_priv(iio);
+
+ iio_trigger_unregister(lradc->trig);
+ iio_trigger_free(lradc->trig);
+}
+
+static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
+{
+ struct mxs_lradc *lradc = iio_priv(iio);
+ int ret = 0, chan, ofs = 0;
+ unsigned long enable = 0;
+ uint32_t ctrl4_set = 0;
+ uint32_t ctrl4_clr = 0;
+ uint32_t ctrl1_irq = 0;
+ const uint32_t chan_value = LRADC_CH_ACCUMULATE |
+ ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
+ const int len = bitmap_weight(iio->active_scan_mask,
+ LRADC_MAX_TOTAL_CHANS);
+
+ if (!len)
+ return -EINVAL;
+
+ /*
+ * Lock the driver so raw access can not be done during buffered
+ * operation. This simplifies the code a lot.
+ */
+ ret = mutex_trylock(&lradc->lock);
+ if (!ret)
+ return -EBUSY;
+
+ lradc->buffer = kmalloc_array(len, sizeof(*lradc->buffer), GFP_KERNEL);
+ if (!lradc->buffer) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+
+ for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
+ ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
+ ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
+ ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
+ mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(ofs));
+ bitmap_set(&enable, ofs, 1);
+ ofs++;
+ }
+
+ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ LRADC_DELAY_KICK, LRADC_DELAY(0));
+ mxs_lradc_reg_clear(lradc, ctrl4_clr, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, ctrl4_set, LRADC_CTRL4);
+ mxs_lradc_reg_set(lradc, ctrl1_irq, LRADC_CTRL1);
+ mxs_lradc_reg_set(lradc, enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET,
+ LRADC_DELAY(0));
+
+ return 0;
+
+err_mem:
+ mutex_unlock(&lradc->lock);
+ return ret;
+}
+
+static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
+{
+ struct mxs_lradc *lradc = iio_priv(iio);
+
+ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ LRADC_DELAY_KICK, LRADC_DELAY(0));
+
+ mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+ if (lradc->soc == IMX28_LRADC)
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+ LRADC_CTRL1);
+
+ kfree(lradc->buffer);
+ mutex_unlock(&lradc->lock);
+
+ return 0;
+}
+
+static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
+ const unsigned long *mask)
+{
+ struct mxs_lradc *lradc = iio_priv(iio);
+ const int map_chans = bitmap_weight(mask, LRADC_MAX_TOTAL_CHANS);
+ int rsvd_chans = 0;
+ unsigned long rsvd_mask = 0;
+
+ if (lradc->use_touchbutton)
+ rsvd_mask |= CHAN_MASK_TOUCHBUTTON;
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_4WIRE)
+ rsvd_mask |= CHAN_MASK_TOUCHSCREEN_4WIRE;
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ rsvd_mask |= CHAN_MASK_TOUCHSCREEN_5WIRE;
+
+ if (lradc->use_touchbutton)
+ rsvd_chans++;
+ if (lradc->use_touchscreen)
+ rsvd_chans++;
+
+ /* Test for attempts to map channels with special mode of operation. */
+ if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
+ return false;
+
+ /* Test for attempts to map more channels then available slots. */
+ if (map_chans + rsvd_chans > LRADC_MAX_MAPPED_CHANS)
+ return false;
+
+ return true;
+}
+
+static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
+ .preenable = &mxs_lradc_buffer_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &mxs_lradc_buffer_postdisable,
+ .validate_scan_mask = &mxs_lradc_validate_scan_mask,
+};
+
+/*
+ * Driver initialization
+ */
+
+#define MXS_ADC_CHAN(idx, chan_type) { \
+ .type = (chan_type), \
+ .indexed = 1, \
+ .scan_index = (idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .channel = (idx), \
+ .address = (idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = LRADC_RESOLUTION, \
+ .storagebits = 32, \
+ }, \
+}
+
+static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
+ MXS_ADC_CHAN(0, IIO_VOLTAGE),
+ MXS_ADC_CHAN(1, IIO_VOLTAGE),
+ MXS_ADC_CHAN(2, IIO_VOLTAGE),
+ MXS_ADC_CHAN(3, IIO_VOLTAGE),
+ MXS_ADC_CHAN(4, IIO_VOLTAGE),
+ MXS_ADC_CHAN(5, IIO_VOLTAGE),
+ MXS_ADC_CHAN(6, IIO_VOLTAGE),
+ MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */
+ /* Combined Temperature sensors */
+ {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .scan_index = 8,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .channel = 8,
+ .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ },
+ MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */
+ MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */
+ MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */
+ MXS_ADC_CHAN(13, IIO_VOLTAGE), /* VDDD */
+ MXS_ADC_CHAN(14, IIO_VOLTAGE), /* VBG */
+ MXS_ADC_CHAN(15, IIO_VOLTAGE), /* VDD5V */
+};
+
+static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
+{
+ /* The ADC always uses DELAY CHANNEL 0. */
+ const uint32_t adc_cfg =
+ (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
+ (LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
+
+ int ret = stmp_reset_block(lradc->base);
+
+ if (ret)
+ return ret;
+
+ /* Configure DELAY CHANNEL 0 for generic ADC sampling. */
+ mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
+
+ /* Disable remaining DELAY CHANNELs */
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(1));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+
+ /* Configure the touchscreen type */
+ if (lradc->soc == IMX28_LRADC) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+ }
+
+ /* Start internal temperature sensing. */
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
+
+ return 0;
+}
+
+static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
+{
+ int i;
+
+ mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
+
+ for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
+ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
+}
+
+static const struct of_device_id mxs_lradc_dt_ids[] = {
+ { .compatible = "fsl,imx23-lradc", .data = (void *)IMX23_LRADC, },
+ { .compatible = "fsl,imx28-lradc", .data = (void *)IMX28_LRADC, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids);
+
+static int mxs_lradc_probe_touchscreen(struct mxs_lradc *lradc,
+ struct device_node *lradc_node)
+{
+ int ret;
+ u32 ts_wires = 0, adapt;
+
+ ret = of_property_read_u32(lradc_node, "fsl,lradc-touchscreen-wires",
+ &ts_wires);
+ if (ret)
+ return -ENODEV; /* touchscreen feature disabled */
+
+ switch (ts_wires) {
+ case 4:
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE;
+ break;
+ case 5:
+ if (lradc->soc == IMX28_LRADC) {
+ lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE;
+ break;
+ }
+ /* fall through an error message for i.MX23 */
+ default:
+ dev_err(lradc->dev,
+ "Unsupported number of touchscreen wires (%d)\n",
+ ts_wires);
+ return -EINVAL;
+ }
+
+ lradc->over_sample_cnt = 4;
+ ret = of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt);
+ if (ret == 0)
+ lradc->over_sample_cnt = adapt;
+
+ lradc->over_sample_delay = 2;
+ ret = of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt);
+ if (ret == 0)
+ lradc->over_sample_delay = adapt;
+
+ lradc->settling_delay = 10;
+ ret = of_property_read_u32(lradc_node, "fsl,settling", &adapt);
+ if (ret == 0)
+ lradc->settling_delay = adapt;
+
+ return 0;
+}
+
+static int mxs_lradc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_lradc_dt_ids, &pdev->dev);
+ const struct mxs_lradc_of_config *of_cfg =
+ &mxs_lradc_of_config[(enum mxs_lradc_id)of_id->data];
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct mxs_lradc *lradc;
+ struct iio_dev *iio;
+ struct resource *iores;
+ int ret = 0, touch_ret;
+ int i, s;
+ uint64_t scale_uv;
+
+ /* Allocate the IIO device. */
+ iio = devm_iio_device_alloc(dev, sizeof(*lradc));
+ if (!iio) {
+ dev_err(dev, "Failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ lradc = iio_priv(iio);
+ lradc->soc = (enum mxs_lradc_id)of_id->data;
+
+ /* Grab the memory area */
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lradc->dev = &pdev->dev;
+ lradc->base = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(lradc->base))
+ return PTR_ERR(lradc->base);
+
+ lradc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lradc->clk)) {
+ dev_err(dev, "Failed to get the delay unit clock\n");
+ return PTR_ERR(lradc->clk);
+ }
+ ret = clk_prepare_enable(lradc->clk);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable the delay unit clock\n");
+ return ret;
+ }
+
+ touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
+
+ /* Grab all IRQ sources */
+ for (i = 0; i < of_cfg->irq_count; i++) {
+ lradc->irq[i] = platform_get_irq(pdev, i);
+ if (lradc->irq[i] < 0) {
+ ret = lradc->irq[i];
+ goto err_clk;
+ }
+
+ ret = devm_request_irq(dev, lradc->irq[i],
+ mxs_lradc_handle_irq, 0,
+ of_cfg->irq_name[i], iio);
+ if (ret)
+ goto err_clk;
+ }
+
+ lradc->vref_mv = of_cfg->vref_mv;
+
+ platform_set_drvdata(pdev, iio);
+
+ init_completion(&lradc->completion);
+ mutex_init(&lradc->lock);
+
+ iio->name = pdev->name;
+ iio->dev.parent = &pdev->dev;
+ iio->info = &mxs_lradc_iio_info;
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->channels = mxs_lradc_chan_spec;
+ iio->num_channels = ARRAY_SIZE(mxs_lradc_chan_spec);
+ iio->masklength = LRADC_MAX_TOTAL_CHANS;
+
+ ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
+ &mxs_lradc_trigger_handler,
+ &mxs_lradc_buffer_ops);
+ if (ret)
+ goto err_clk;
+
+ ret = mxs_lradc_trigger_init(iio);
+ if (ret)
+ goto err_trig;
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) {
+ for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) {
+ /*
+ * [s=0] = optional divider by two disabled (default)
+ * [s=1] = optional divider by two enabled
+ *
+ * The scale is calculated by doing:
+ * Vref >> (realbits - s)
+ * which multiplies by two on the second component
+ * of the array.
+ */
+ scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
+ (LRADC_RESOLUTION - s);
+ lradc->scale_avail[i][s].nano =
+ do_div(scale_uv, 100000000) * 10;
+ lradc->scale_avail[i][s].integer = scale_uv;
+ }
+ }
+
+ /* Configure the hardware. */
+ ret = mxs_lradc_hw_init(lradc);
+ if (ret)
+ goto err_dev;
+
+ /* Register the touchscreen input device. */
+ if (touch_ret == 0) {
+ ret = mxs_lradc_ts_register(lradc);
+ if (ret)
+ goto err_ts_register;
+ }
+
+ /* Register IIO device. */
+ ret = iio_device_register(iio);
+ if (ret) {
+ dev_err(dev, "Failed to register IIO device\n");
+ goto err_ts;
+ }
+
+ return 0;
+
+err_ts:
+ mxs_lradc_ts_unregister(lradc);
+err_ts_register:
+ mxs_lradc_hw_stop(lradc);
+err_dev:
+ mxs_lradc_trigger_remove(iio);
+err_trig:
+ iio_triggered_buffer_cleanup(iio);
+err_clk:
+ clk_disable_unprepare(lradc->clk);
+ return ret;
+}
+
+static int mxs_lradc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iio = platform_get_drvdata(pdev);
+ struct mxs_lradc *lradc = iio_priv(iio);
+
+ iio_device_unregister(iio);
+ mxs_lradc_ts_unregister(lradc);
+ mxs_lradc_hw_stop(lradc);
+ mxs_lradc_trigger_remove(iio);
+ iio_triggered_buffer_cleanup(iio);
+
+ clk_disable_unprepare(lradc->clk);
+ return 0;
+}
+
+static struct platform_driver mxs_lradc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mxs_lradc_dt_ids,
+ },
+ .probe = mxs_lradc_probe,
+ .remove = mxs_lradc_remove,
+};
+
+module_platform_driver(mxs_lradc_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale i.MX28 LRADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
new file mode 100644
index 00000000000000..8ad71691fc090c
--- /dev/null
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -0,0 +1,400 @@
+/*
+ * ST SPEAr ADC driver
+ *
+ * Copyright 2012 Stefan Roese <sr@denx.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* SPEAR registers definitions */
+#define SPEAR600_ADC_SCAN_RATE_LO(x) ((x) & 0xFFFF)
+#define SPEAR600_ADC_SCAN_RATE_HI(x) (((x) >> 0x10) & 0xFFFF)
+#define SPEAR_ADC_CLK_LOW(x) (((x) & 0xf) << 0)
+#define SPEAR_ADC_CLK_HIGH(x) (((x) & 0xf) << 4)
+
+/* Bit definitions for SPEAR_ADC_STATUS */
+#define SPEAR_ADC_STATUS_START_CONVERSION (1 << 0)
+#define SPEAR_ADC_STATUS_CHANNEL_NUM(x) ((x) << 1)
+#define SPEAR_ADC_STATUS_ADC_ENABLE (1 << 4)
+#define SPEAR_ADC_STATUS_AVG_SAMPLE(x) ((x) << 5)
+#define SPEAR_ADC_STATUS_VREF_INTERNAL (1 << 9)
+
+#define SPEAR_ADC_DATA_MASK 0x03ff
+#define SPEAR_ADC_DATA_BITS 10
+
+#define SPEAR_ADC_MOD_NAME "spear-adc"
+
+#define SPEAR_ADC_CHANNEL_NUM 8
+
+#define SPEAR_ADC_CLK_MIN 2500000
+#define SPEAR_ADC_CLK_MAX 20000000
+
+struct adc_regs_spear3xx {
+ u32 status;
+ u32 average;
+ u32 scan_rate;
+ u32 clk; /* Not avail for 1340 & 1310 */
+ u32 ch_ctrl[SPEAR_ADC_CHANNEL_NUM];
+ u32 ch_data[SPEAR_ADC_CHANNEL_NUM];
+};
+
+struct chan_data {
+ u32 lsb;
+ u32 msb;
+};
+
+struct adc_regs_spear6xx {
+ u32 status;
+ u32 pad[2];
+ u32 clk;
+ u32 ch_ctrl[SPEAR_ADC_CHANNEL_NUM];
+ struct chan_data ch_data[SPEAR_ADC_CHANNEL_NUM];
+ u32 scan_rate_lo;
+ u32 scan_rate_hi;
+ struct chan_data average;
+};
+
+struct spear_adc_state {
+ struct device_node *np;
+ struct adc_regs_spear3xx __iomem *adc_base_spear3xx;
+ struct adc_regs_spear6xx __iomem *adc_base_spear6xx;
+ struct clk *clk;
+ struct completion completion;
+ u32 current_clk;
+ u32 sampling_freq;
+ u32 avg_samples;
+ u32 vref_external;
+ u32 value;
+};
+
+/*
+ * Functions to access some SPEAr ADC register. Abstracted into
+ * static inline functions, because of different register offsets
+ * on different SoC variants (SPEAr300 vs SPEAr600 etc).
+ */
+static void spear_adc_set_status(struct spear_adc_state *st, u32 val)
+{
+ __raw_writel(val, &st->adc_base_spear6xx->status);
+}
+
+static void spear_adc_set_clk(struct spear_adc_state *st, u32 val)
+{
+ u32 clk_high, clk_low, count;
+ u32 apb_clk = clk_get_rate(st->clk);
+
+ count = DIV_ROUND_UP(apb_clk, val);
+ clk_low = count / 2;
+ clk_high = count - clk_low;
+ st->current_clk = apb_clk / count;
+
+ __raw_writel(SPEAR_ADC_CLK_LOW(clk_low) | SPEAR_ADC_CLK_HIGH(clk_high),
+ &st->adc_base_spear6xx->clk);
+}
+
+static void spear_adc_set_ctrl(struct spear_adc_state *st, int n,
+ u32 val)
+{
+ __raw_writel(val, &st->adc_base_spear6xx->ch_ctrl[n]);
+}
+
+static u32 spear_adc_get_average(struct spear_adc_state *st)
+{
+ if (of_device_is_compatible(st->np, "st,spear600-adc")) {
+ return __raw_readl(&st->adc_base_spear6xx->average.msb) &
+ SPEAR_ADC_DATA_MASK;
+ } else {
+ return __raw_readl(&st->adc_base_spear3xx->average) &
+ SPEAR_ADC_DATA_MASK;
+ }
+}
+
+static void spear_adc_set_scanrate(struct spear_adc_state *st, u32 rate)
+{
+ if (of_device_is_compatible(st->np, "st,spear600-adc")) {
+ __raw_writel(SPEAR600_ADC_SCAN_RATE_LO(rate),
+ &st->adc_base_spear6xx->scan_rate_lo);
+ __raw_writel(SPEAR600_ADC_SCAN_RATE_HI(rate),
+ &st->adc_base_spear6xx->scan_rate_hi);
+ } else {
+ __raw_writel(rate, &st->adc_base_spear3xx->scan_rate);
+ }
+}
+
+static int spear_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct spear_adc_state *st = iio_priv(indio_dev);
+ u32 status;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+
+ status = SPEAR_ADC_STATUS_CHANNEL_NUM(chan->channel) |
+ SPEAR_ADC_STATUS_AVG_SAMPLE(st->avg_samples) |
+ SPEAR_ADC_STATUS_START_CONVERSION |
+ SPEAR_ADC_STATUS_ADC_ENABLE;
+ if (st->vref_external == 0)
+ status |= SPEAR_ADC_STATUS_VREF_INTERNAL;
+
+ spear_adc_set_status(st, status);
+ wait_for_completion(&st->completion); /* set by ISR */
+ *val = st->value;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_external;
+ *val2 = SPEAR_ADC_DATA_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->current_clk;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int spear_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct spear_adc_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if ((val < SPEAR_ADC_CLK_MIN) ||
+ (val > SPEAR_ADC_CLK_MAX) ||
+ (val2 != 0)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ spear_adc_set_clk(st, val);
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+#define SPEAR_ADC_CHAN(idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .channel = idx, \
+}
+
+static const struct iio_chan_spec spear_adc_iio_channels[] = {
+ SPEAR_ADC_CHAN(0),
+ SPEAR_ADC_CHAN(1),
+ SPEAR_ADC_CHAN(2),
+ SPEAR_ADC_CHAN(3),
+ SPEAR_ADC_CHAN(4),
+ SPEAR_ADC_CHAN(5),
+ SPEAR_ADC_CHAN(6),
+ SPEAR_ADC_CHAN(7),
+};
+
+static irqreturn_t spear_adc_isr(int irq, void *dev_id)
+{
+ struct spear_adc_state *st = dev_id;
+
+ /* Read value to clear IRQ */
+ st->value = spear_adc_get_average(st);
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int spear_adc_configure(struct spear_adc_state *st)
+{
+ int i;
+
+ /* Reset ADC core */
+ spear_adc_set_status(st, 0);
+ __raw_writel(0, &st->adc_base_spear6xx->clk);
+ for (i = 0; i < 8; i++)
+ spear_adc_set_ctrl(st, i, 0);
+ spear_adc_set_scanrate(st, 0);
+
+ spear_adc_set_clk(st, st->sampling_freq);
+
+ return 0;
+}
+
+static const struct iio_info spear_adc_info = {
+ .read_raw = &spear_adc_read_raw,
+ .write_raw = &spear_adc_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int spear_adc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct spear_adc_state *st;
+ struct iio_dev *indio_dev = NULL;
+ int ret = -ENODEV;
+ int irq;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
+ if (!indio_dev) {
+ dev_err(dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ st->np = np;
+
+ /*
+ * SPEAr600 has a different register layout than other SPEAr SoC's
+ * (e.g. SPEAr3xx). Let's provide two register base addresses
+ * to support multi-arch kernels.
+ */
+ st->adc_base_spear6xx = of_iomap(np, 0);
+ if (!st->adc_base_spear6xx) {
+ dev_err(dev, "failed mapping memory\n");
+ return -ENOMEM;
+ }
+ st->adc_base_spear3xx =
+ (struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;
+
+ st->clk = clk_get(dev, NULL);
+ if (IS_ERR(st->clk)) {
+ dev_err(dev, "failed getting clock\n");
+ goto errout1;
+ }
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret) {
+ dev_err(dev, "failed enabling clock\n");
+ goto errout2;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed getting interrupt resource\n");
+ ret = -EINVAL;
+ goto errout3;
+ }
+
+ ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
+ st);
+ if (ret < 0) {
+ dev_err(dev, "failed requesting interrupt\n");
+ goto errout3;
+ }
+
+ if (of_property_read_u32(np, "sampling-frequency",
+ &st->sampling_freq)) {
+ dev_err(dev, "sampling-frequency missing in DT\n");
+ ret = -EINVAL;
+ goto errout3;
+ }
+
+ /*
+ * Optional avg_samples defaults to 0, resulting in single data
+ * conversion
+ */
+ of_property_read_u32(np, "average-samples", &st->avg_samples);
+
+ /*
+ * Optional vref_external defaults to 0, resulting in internal vref
+ * selection
+ */
+ of_property_read_u32(np, "vref-external", &st->vref_external);
+
+ spear_adc_configure(st);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&st->completion);
+
+ indio_dev->name = SPEAR_ADC_MOD_NAME;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &spear_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = spear_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(spear_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto errout3;
+
+ dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
+
+ return 0;
+
+errout3:
+ clk_disable_unprepare(st->clk);
+errout2:
+ clk_put(st->clk);
+errout1:
+ iounmap(st->adc_base_spear6xx);
+ return ret;
+}
+
+static int spear_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct spear_adc_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+ iounmap(st->adc_base_spear6xx);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spear_adc_dt_ids[] = {
+ { .compatible = "st,spear600-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spear_adc_dt_ids);
+#endif
+
+static struct platform_driver spear_adc_driver = {
+ .probe = spear_adc_probe,
+ .remove = spear_adc_remove,
+ .driver = {
+ .name = SPEAR_ADC_MOD_NAME,
+ .of_match_table = of_match_ptr(spear_adc_dt_ids),
+ },
+};
+
+module_platform_driver(spear_adc_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("SPEAr ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/frequency/Kconfig b/drivers/staging/iio/frequency/Kconfig
new file mode 100644
index 00000000000000..fc726d3c64a620
--- /dev/null
+++ b/drivers/staging/iio/frequency/Kconfig
@@ -0,0 +1,26 @@
+#
+# Direct Digital Synthesis drivers
+#
+menu "Direct Digital Synthesis"
+
+config AD9832
+ tristate "Analog Devices ad9832/5 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ AD9832 and AD9835, provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad9832.
+
+config AD9834
+ tristate "Analog Devices AD9833/4/7/8 driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices DDS chip
+ AD9833, AD9834, AD9837 and AD9838, provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad9834.
+
+endmenu
diff --git a/drivers/staging/iio/frequency/Makefile b/drivers/staging/iio/frequency/Makefile
new file mode 100644
index 00000000000000..e5dbcfce44f98a
--- /dev/null
+++ b/drivers/staging/iio/frequency/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Direct Digital Synthesis drivers
+#
+
+obj-$(CONFIG_AD9832) += ad9832.o
+obj-$(CONFIG_AD9834) += ad9834.o
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
new file mode 100644
index 00000000000000..cf68159a584815
--- /dev/null
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -0,0 +1,352 @@
+/*
+ * AD9832 SPI DDS driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/div64.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include "dds.h"
+
+#include "ad9832.h"
+
+static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
+{
+ unsigned long long freqreg = (u64) fout *
+ (u64) ((u64) 1L << AD9832_FREQ_BITS);
+ do_div(freqreg, mclk);
+ return freqreg;
+}
+
+static int ad9832_write_frequency(struct ad9832_state *st,
+ unsigned addr, unsigned long fout)
+{
+ unsigned long regval;
+
+ if (fout > (st->mclk / 2))
+ return -EINVAL;
+
+ regval = ad9832_calc_freqreg(st->mclk, fout);
+
+ st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
+ (addr << ADD_SHIFT) |
+ ((regval >> 24) & 0xFF));
+ st->freq_data[1] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
+ ((addr - 1) << ADD_SHIFT) |
+ ((regval >> 16) & 0xFF));
+ st->freq_data[2] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
+ ((addr - 2) << ADD_SHIFT) |
+ ((regval >> 8) & 0xFF));
+ st->freq_data[3] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
+ ((addr - 3) << ADD_SHIFT) |
+ ((regval >> 0) & 0xFF));
+
+ return spi_sync(st->spi, &st->freq_msg);
+}
+
+static int ad9832_write_phase(struct ad9832_state *st,
+ unsigned long addr, unsigned long phase)
+{
+ if (phase > (1 << AD9832_PHASE_BITS))
+ return -EINVAL;
+
+ st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
+ (addr << ADD_SHIFT) |
+ ((phase >> 8) & 0xFF));
+ st->phase_data[1] = cpu_to_be16((AD9832_CMD_PHA16BITSW << CMD_SHIFT) |
+ ((addr - 1) << ADD_SHIFT) |
+ (phase & 0xFF));
+
+ return spi_sync(st->spi, &st->phase_msg);
+}
+
+static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad9832_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32) this_attr->address) {
+ case AD9832_FREQ0HM:
+ case AD9832_FREQ1HM:
+ ret = ad9832_write_frequency(st, this_attr->address, val);
+ break;
+ case AD9832_PHASE0H:
+ case AD9832_PHASE1H:
+ case AD9832_PHASE2H:
+ case AD9832_PHASE3H:
+ ret = ad9832_write_phase(st, this_attr->address, val);
+ break;
+ case AD9832_PINCTRL_EN:
+ if (val)
+ st->ctrl_ss &= ~AD9832_SELSRC;
+ else
+ st->ctrl_ss |= AD9832_SELSRC;
+ st->data = cpu_to_be16((AD9832_CMD_SYNCSELSRC << CMD_SHIFT) |
+ st->ctrl_ss);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9832_FREQ_SYM:
+ if (val == 1) {
+ st->ctrl_fp |= AD9832_FREQ;
+ } else if (val == 0) {
+ st->ctrl_fp &= ~AD9832_FREQ;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
+ st->ctrl_fp);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9832_PHASE_SYM:
+ if (val > 3) {
+ ret = -EINVAL;
+ break;
+ }
+
+ st->ctrl_fp &= ~AD9832_PHASE(3);
+ st->ctrl_fp |= AD9832_PHASE(val);
+
+ st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
+ st->ctrl_fp);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9832_OUTPUT_EN:
+ if (val)
+ st->ctrl_src &= ~(AD9832_RESET | AD9832_SLEEP |
+ AD9832_CLR);
+ else
+ st->ctrl_src |= AD9832_RESET;
+
+ st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
+ st->ctrl_src);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+/**
+ * see dds.h for further information
+ */
+
+static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ0HM);
+static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_FREQ1HM);
+static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ_SYM);
+static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
+
+static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_PHASE0H);
+static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_PHASE1H);
+static IIO_DEV_ATTR_PHASE(0, 2, S_IWUSR, NULL, ad9832_write, AD9832_PHASE2H);
+static IIO_DEV_ATTR_PHASE(0, 3, S_IWUSR, NULL, ad9832_write, AD9832_PHASE3H);
+static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL,
+ ad9832_write, AD9832_PHASE_SYM);
+static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
+
+static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+ ad9832_write, AD9832_PINCTRL_EN);
+static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL,
+ ad9832_write, AD9832_OUTPUT_EN);
+
+static struct attribute *ad9832_attributes[] = {
+ &iio_dev_attr_out_altvoltage0_frequency0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency1.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_frequency_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase1.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase2.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase3.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_pincontrol_en.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequencysymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out_enable.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9832_attribute_group = {
+ .attrs = ad9832_attributes,
+};
+
+static const struct iio_info ad9832_info = {
+ .attrs = &ad9832_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad9832_probe(struct spi_device *spi)
+{
+ struct ad9832_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct ad9832_state *st;
+ struct regulator *reg;
+ int ret;
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ st->reg = reg;
+ st->mclk = pdata->mclk;
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad9832_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default messages */
+
+ st->xfer.tx_buf = &st->data;
+ st->xfer.len = 2;
+
+ spi_message_init(&st->msg);
+ spi_message_add_tail(&st->xfer, &st->msg);
+
+ st->freq_xfer[0].tx_buf = &st->freq_data[0];
+ st->freq_xfer[0].len = 2;
+ st->freq_xfer[0].cs_change = 1;
+ st->freq_xfer[1].tx_buf = &st->freq_data[1];
+ st->freq_xfer[1].len = 2;
+ st->freq_xfer[1].cs_change = 1;
+ st->freq_xfer[2].tx_buf = &st->freq_data[2];
+ st->freq_xfer[2].len = 2;
+ st->freq_xfer[2].cs_change = 1;
+ st->freq_xfer[3].tx_buf = &st->freq_data[3];
+ st->freq_xfer[3].len = 2;
+
+ spi_message_init(&st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[2], &st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[3], &st->freq_msg);
+
+ st->phase_xfer[0].tx_buf = &st->phase_data[0];
+ st->phase_xfer[0].len = 2;
+ st->phase_xfer[0].cs_change = 1;
+ st->phase_xfer[1].tx_buf = &st->phase_data[1];
+ st->phase_xfer[1].len = 2;
+
+ spi_message_init(&st->phase_msg);
+ spi_message_add_tail(&st->phase_xfer[0], &st->phase_msg);
+ spi_message_add_tail(&st->phase_xfer[1], &st->phase_msg);
+
+ st->ctrl_src = AD9832_SLEEP | AD9832_RESET | AD9832_CLR;
+ st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
+ st->ctrl_src);
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret) {
+ dev_err(&spi->dev, "device init failed\n");
+ goto error_disable_reg;
+ }
+
+ ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
+
+ return ret;
+}
+
+static int ad9832_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad9832_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad9832_id[] = {
+ {"ad9832", 0},
+ {"ad9835", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad9832_id);
+
+static struct spi_driver ad9832_driver = {
+ .driver = {
+ .name = "ad9832",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9832_probe,
+ .remove = ad9832_remove,
+ .id_table = ad9832_id,
+};
+module_spi_driver(ad9832_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
new file mode 100644
index 00000000000000..386f4dc8c9a11b
--- /dev/null
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -0,0 +1,126 @@
+/*
+ * AD9832 SPI DDS driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DDS_AD9832_H_
+#define IIO_DDS_AD9832_H_
+
+/* Registers */
+
+#define AD9832_FREQ0LL 0x0
+#define AD9832_FREQ0HL 0x1
+#define AD9832_FREQ0LM 0x2
+#define AD9832_FREQ0HM 0x3
+#define AD9832_FREQ1LL 0x4
+#define AD9832_FREQ1HL 0x5
+#define AD9832_FREQ1LM 0x6
+#define AD9832_FREQ1HM 0x7
+#define AD9832_PHASE0L 0x8
+#define AD9832_PHASE0H 0x9
+#define AD9832_PHASE1L 0xA
+#define AD9832_PHASE1H 0xB
+#define AD9832_PHASE2L 0xC
+#define AD9832_PHASE2H 0xD
+#define AD9832_PHASE3L 0xE
+#define AD9832_PHASE3H 0xF
+
+#define AD9832_PHASE_SYM 0x10
+#define AD9832_FREQ_SYM 0x11
+#define AD9832_PINCTRL_EN 0x12
+#define AD9832_OUTPUT_EN 0x13
+
+/* Command Control Bits */
+
+#define AD9832_CMD_PHA8BITSW 0x1
+#define AD9832_CMD_PHA16BITSW 0x0
+#define AD9832_CMD_FRE8BITSW 0x3
+#define AD9832_CMD_FRE16BITSW 0x2
+#define AD9832_CMD_FPSELECT 0x6
+#define AD9832_CMD_SYNCSELSRC 0x8
+#define AD9832_CMD_SLEEPRESCLR 0xC
+
+#define AD9832_FREQ (1 << 11)
+#define AD9832_PHASE(x) (((x) & 3) << 9)
+#define AD9832_SYNC (1 << 13)
+#define AD9832_SELSRC (1 << 12)
+#define AD9832_SLEEP (1 << 13)
+#define AD9832_RESET (1 << 12)
+#define AD9832_CLR (1 << 11)
+#define CMD_SHIFT 12
+#define ADD_SHIFT 8
+#define AD9832_FREQ_BITS 32
+#define AD9832_PHASE_BITS 12
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad9832_state - driver instance specific data
+ * @spi: spi_device
+ * @reg: supply regulator
+ * @mclk: external master clock
+ * @ctrl_fp: cached frequency/phase control word
+ * @ctrl_ss: cached sync/selsrc control word
+ * @ctrl_src: cached sleep/reset/clr word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @phase_xfer: tuning word spi transfer
+ * @phase_msg: tuning word spi message
+ * @data: spi transmit buffer
+ * @phase_data: tuning word spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9832_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned long mclk;
+ unsigned short ctrl_fp;
+ unsigned short ctrl_ss;
+ unsigned short ctrl_src;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[4];
+ struct spi_message freq_msg;
+ struct spi_transfer phase_xfer[2];
+ struct spi_message phase_msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be16 freq_data[4]____cacheline_aligned;
+ __be16 phase_data[2];
+ __be16 data;
+ };
+};
+
+/*
+ * TODO: struct ad9832_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad9832_platform_data - platform specific information
+ * @mclk: master clock in Hz
+ * @freq0: power up freq0 tuning word in Hz
+ * @freq1: power up freq1 tuning word in Hz
+ * @phase0: power up phase0 value [0..4095] correlates with 0..2PI
+ * @phase1: power up phase1 value [0..4095] correlates with 0..2PI
+ * @phase2: power up phase2 value [0..4095] correlates with 0..2PI
+ * @phase3: power up phase3 value [0..4095] correlates with 0..2PI
+ */
+
+struct ad9832_platform_data {
+ unsigned long mclk;
+ unsigned long freq0;
+ unsigned long freq1;
+ unsigned short phase0;
+ unsigned short phase1;
+ unsigned short phase2;
+ unsigned short phase3;
+};
+
+#endif /* IIO_DDS_AD9832_H_ */
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
new file mode 100644
index 00000000000000..5c803191c2cea9
--- /dev/null
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -0,0 +1,458 @@
+/*
+ * AD9833/AD9834/AD9837/AD9838 SPI DDS driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/div64.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include "dds.h"
+
+#include "ad9834.h"
+
+static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
+{
+ unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS);
+
+ do_div(freqreg, mclk);
+ return freqreg;
+}
+
+static int ad9834_write_frequency(struct ad9834_state *st,
+ unsigned long addr, unsigned long fout)
+{
+ unsigned long regval;
+
+ if (fout > (st->mclk / 2))
+ return -EINVAL;
+
+ regval = ad9834_calc_freqreg(st->mclk, fout);
+
+ st->freq_data[0] = cpu_to_be16(addr | (regval &
+ RES_MASK(AD9834_FREQ_BITS / 2)));
+ st->freq_data[1] = cpu_to_be16(addr | ((regval >>
+ (AD9834_FREQ_BITS / 2)) &
+ RES_MASK(AD9834_FREQ_BITS / 2)));
+
+ return spi_sync(st->spi, &st->freq_msg);
+}
+
+static int ad9834_write_phase(struct ad9834_state *st,
+ unsigned long addr, unsigned long phase)
+{
+ if (phase > (1 << AD9834_PHASE_BITS))
+ return -EINVAL;
+ st->data = cpu_to_be16(addr | phase);
+
+ return spi_sync(st->spi, &st->msg);
+}
+
+static ssize_t ad9834_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+
+ mutex_lock(&indio_dev->mlock);
+ switch ((u32) this_attr->address) {
+ case AD9834_REG_FREQ0:
+ case AD9834_REG_FREQ1:
+ ret = ad9834_write_frequency(st, this_attr->address, val);
+ break;
+ case AD9834_REG_PHASE0:
+ case AD9834_REG_PHASE1:
+ ret = ad9834_write_phase(st, this_attr->address, val);
+ break;
+ case AD9834_OPBITEN:
+ if (st->control & AD9834_MODE) {
+ ret = -EINVAL; /* AD9843 reserved mode */
+ break;
+ }
+
+ if (val)
+ st->control |= AD9834_OPBITEN;
+ else
+ st->control &= ~AD9834_OPBITEN;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_PIN_SW:
+ if (val)
+ st->control |= AD9834_PIN_SW;
+ else
+ st->control &= ~AD9834_PIN_SW;
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_FSEL:
+ case AD9834_PSEL:
+ if (val == 0)
+ st->control &= ~(this_attr->address | AD9834_PIN_SW);
+ else if (val == 1) {
+ st->control |= this_attr->address;
+ st->control &= ~AD9834_PIN_SW;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ case AD9834_RESET:
+ if (val)
+ st->control &= ~AD9834_RESET;
+ else
+ st->control |= AD9834_RESET;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t ad9834_store_wavetype(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret = 0;
+ bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837);
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch ((u32) this_attr->address) {
+ case 0:
+ if (sysfs_streq(buf, "sine")) {
+ st->control &= ~AD9834_MODE;
+ if (is_ad9833_7)
+ st->control &= ~AD9834_OPBITEN;
+ } else if (sysfs_streq(buf, "triangle")) {
+ if (is_ad9833_7) {
+ st->control &= ~AD9834_OPBITEN;
+ st->control |= AD9834_MODE;
+ } else if (st->control & AD9834_OPBITEN) {
+ ret = -EINVAL; /* AD9843 reserved mode */
+ } else {
+ st->control |= AD9834_MODE;
+ }
+ } else if (is_ad9833_7 && sysfs_streq(buf, "square")) {
+ st->control &= ~AD9834_MODE;
+ st->control |= AD9834_OPBITEN;
+ } else {
+ ret = -EINVAL;
+ }
+
+ break;
+ case 1:
+ if (sysfs_streq(buf, "square") &&
+ !(st->control & AD9834_MODE)) {
+ st->control &= ~AD9834_MODE;
+ st->control |= AD9834_OPBITEN;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
+ char *str;
+
+ if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837))
+ str = "sine triangle square";
+ else if (st->control & AD9834_OPBITEN)
+ str = "sine";
+ else
+ str = "sine triangle";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+
+static IIO_DEVICE_ATTR(out_altvoltage0_out0_wavetype_available, S_IRUGO,
+ ad9834_show_out0_wavetype_available, NULL, 0);
+
+static ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad9834_state *st = iio_priv(indio_dev);
+ char *str;
+
+ if (st->control & AD9834_MODE)
+ str = "";
+ else
+ str = "square";
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, S_IRUGO,
+ ad9834_show_out1_wavetype_available, NULL, 0);
+
+/**
+ * see dds.h for further information
+ */
+
+static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0);
+static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1);
+static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL);
+static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
+
+static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0);
+static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1);
+static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL);
+static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
+
+static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+ ad9834_write, AD9834_PIN_SW);
+static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET);
+static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL,
+ ad9834_write, AD9834_OPBITEN);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1);
+
+static struct attribute *ad9834_attributes[] = {
+ &iio_dev_attr_out_altvoltage0_frequency0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency1.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_frequency_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase1.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_pincontrol_en.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequencysymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out_enable.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out1_enable.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out1_wavetype.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out0_wavetype_available.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out1_wavetype_available.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute *ad9833_attributes[] = {
+ &iio_dev_attr_out_altvoltage0_frequency0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequency1.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_frequency_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase0.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phase1.dev_attr.attr,
+ &iio_const_attr_out_altvoltage0_phase_scale.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_frequencysymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_phasesymbol.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out_enable.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out0_wavetype.dev_attr.attr,
+ &iio_dev_attr_out_altvoltage0_out0_wavetype_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad9834_attribute_group = {
+ .attrs = ad9834_attributes,
+};
+
+static const struct attribute_group ad9833_attribute_group = {
+ .attrs = ad9833_attributes,
+};
+
+static const struct iio_info ad9834_info = {
+ .attrs = &ad9834_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_info ad9833_info = {
+ .attrs = &ad9833_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad9834_probe(struct spi_device *spi)
+{
+ struct ad9834_platform_data *pdata = spi->dev.platform_data;
+ struct ad9834_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *reg;
+ int ret;
+
+ if (!pdata) {
+ dev_dbg(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ reg = devm_regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ st->mclk = pdata->mclk;
+ st->spi = spi;
+ st->devid = spi_get_device_id(spi)->driver_data;
+ st->reg = reg;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ switch (st->devid) {
+ case ID_AD9833:
+ case ID_AD9837:
+ indio_dev->info = &ad9833_info;
+ break;
+ default:
+ indio_dev->info = &ad9834_info;
+ break;
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default messages */
+
+ st->xfer.tx_buf = &st->data;
+ st->xfer.len = 2;
+
+ spi_message_init(&st->msg);
+ spi_message_add_tail(&st->xfer, &st->msg);
+
+ st->freq_xfer[0].tx_buf = &st->freq_data[0];
+ st->freq_xfer[0].len = 2;
+ st->freq_xfer[0].cs_change = 1;
+ st->freq_xfer[1].tx_buf = &st->freq_data[1];
+ st->freq_xfer[1].len = 2;
+
+ spi_message_init(&st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
+ spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
+
+ st->control = AD9834_B28 | AD9834_RESET;
+
+ if (!pdata->en_div2)
+ st->control |= AD9834_DIV2;
+
+ if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
+ st->control |= AD9834_SIGN_PIB;
+
+ st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+ ret = spi_sync(st->spi, &st->msg);
+ if (ret) {
+ dev_err(&spi->dev, "device init failed\n");
+ goto error_disable_reg;
+ }
+
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
+
+ return ret;
+}
+
+static int ad9834_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad9834_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad9834_id[] = {
+ {"ad9833", ID_AD9833},
+ {"ad9834", ID_AD9834},
+ {"ad9837", ID_AD9837},
+ {"ad9838", ID_AD9838},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad9834_id);
+
+static struct spi_driver ad9834_driver = {
+ .driver = {
+ .name = "ad9834",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad9834_probe,
+ .remove = ad9834_remove,
+ .id_table = ad9834_id,
+};
+module_spi_driver(ad9834_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
new file mode 100644
index 00000000000000..8ca6e52bae6ba6
--- /dev/null
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -0,0 +1,112 @@
+/*
+ * AD9833/AD9834/AD9837/AD9838 SPI DDS driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef IIO_DDS_AD9834_H_
+#define IIO_DDS_AD9834_H_
+
+/* Registers */
+
+#define AD9834_REG_CMD (0 << 14)
+#define AD9834_REG_FREQ0 (1 << 14)
+#define AD9834_REG_FREQ1 (2 << 14)
+#define AD9834_REG_PHASE0 (6 << 13)
+#define AD9834_REG_PHASE1 (7 << 13)
+
+/* Command Control Bits */
+
+#define AD9834_B28 (1 << 13)
+#define AD9834_HLB (1 << 12)
+#define AD9834_FSEL (1 << 11)
+#define AD9834_PSEL (1 << 10)
+#define AD9834_PIN_SW (1 << 9)
+#define AD9834_RESET (1 << 8)
+#define AD9834_SLEEP1 (1 << 7)
+#define AD9834_SLEEP12 (1 << 6)
+#define AD9834_OPBITEN (1 << 5)
+#define AD9834_SIGN_PIB (1 << 4)
+#define AD9834_DIV2 (1 << 3)
+#define AD9834_MODE (1 << 1)
+
+#define AD9834_FREQ_BITS 28
+#define AD9834_PHASE_BITS 12
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad9834_state - driver instance specific data
+ * @spi: spi_device
+ * @reg: supply regulator
+ * @mclk: external master clock
+ * @control: cached control word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @data: spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9834_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned int mclk;
+ unsigned short control;
+ unsigned short devid;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[2];
+ struct spi_message freq_msg;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data ____cacheline_aligned;
+ __be16 freq_data[2];
+};
+
+
+/*
+ * TODO: struct ad7887_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad9834_platform_data - platform specific information
+ * @mclk: master clock in Hz
+ * @freq0: power up freq0 tuning word in Hz
+ * @freq1: power up freq1 tuning word in Hz
+ * @phase0: power up phase0 value [0..4095] correlates with 0..2PI
+ * @phase1: power up phase1 value [0..4095] correlates with 0..2PI
+ * @en_div2: digital output/2 is passed to the SIGN BIT OUT pin
+ * @en_signbit_msb_out: the MSB (or MSB/2) of the DAC data is connected to the
+ * SIGN BIT OUT pin. en_div2 controls whether it is the MSB
+ * or MSB/2 that is output. if en_signbit_msb_out=false,
+ * the on-board comparator is connected to SIGN BIT OUT
+ */
+
+struct ad9834_platform_data {
+ unsigned int mclk;
+ unsigned int freq0;
+ unsigned int freq1;
+ unsigned short phase0;
+ unsigned short phase1;
+ bool en_div2;
+ bool en_signbit_msb_out;
+};
+
+/**
+ * ad9834_supported_device_ids:
+ */
+
+enum ad9834_supported_device_ids {
+ ID_AD9833,
+ ID_AD9834,
+ ID_AD9837,
+ ID_AD9838,
+};
+
+#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/frequency/dds.h b/drivers/staging/iio/frequency/dds.h
new file mode 100644
index 00000000000000..611e2b0cfc4cde
--- /dev/null
+++ b/drivers/staging/iio/frequency/dds.h
@@ -0,0 +1,110 @@
+/*
+ * dds.h - sysfs attributes associated with DDS devices
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_frequencyY
+ */
+
+#define IIO_DEV_ATTR_FREQ(_channel, _num, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_frequency##_num, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_frequencyY_scale
+ */
+
+#define IIO_CONST_ATTR_FREQ_SCALE(_channel, _string) \
+ IIO_CONST_ATTR(out_altvoltage##_channel##_frequency_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_frequencysymbol
+ */
+
+#define IIO_DEV_ATTR_FREQSYMBOL(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_frequencysymbol, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_phaseY
+ */
+
+#define IIO_DEV_ATTR_PHASE(_channel, _num, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_phase##_num, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_phaseY_scale
+ */
+
+#define IIO_CONST_ATTR_PHASE_SCALE(_channel, _string) \
+ IIO_CONST_ATTR(out_altvoltage##_channel##_phase_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_phasesymbol
+ */
+
+#define IIO_DEV_ATTR_PHASESYMBOL(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_phasesymbol, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_pincontrol_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_pincontrol_en, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_pincontrol_frequency_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_FREQ_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_pincontrol_frequency_en,\
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_pincontrol_phase_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_PHASE_EN(_channel, _mode, _show, _store, _addr)\
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_pincontrol_phase_en, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_out_enable
+ */
+
+#define IIO_DEV_ATTR_OUT_ENABLE(_channel, _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_out_enable, \
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_outY_enable
+ */
+
+#define IIO_DEV_ATTR_OUTY_ENABLE(_channel, _output, \
+ _mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_out##_output##_enable,\
+ _mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_outY_wavetype
+ */
+
+#define IIO_DEV_ATTR_OUT_WAVETYPE(_channel, _output, _store, _addr) \
+ IIO_DEVICE_ATTR(out_altvoltage##_channel##_out##_output##_wavetype,\
+ S_IWUSR, NULL, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../out_altvoltageX_outY_wavetype_available
+ */
+
+#define IIO_CONST_ATTR_OUT_WAVETYPES_AVAILABLE(_channel, _output, _modes)\
+ IIO_CONST_ATTR( \
+ out_altvoltage##_channel##_out##_output##_wavetype_available, _modes)
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
new file mode 100644
index 00000000000000..e969107ddb47e1
--- /dev/null
+++ b/drivers/staging/iio/light/isl29028.c
@@ -0,0 +1,562 @@
+/*
+ * IIO driver for the light sensor ISL29028.
+ * ISL29028 is Concurrent Ambient Light and Proximity Sensor
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define CONVERSION_TIME_MS 100
+
+#define ISL29028_REG_CONFIGURE 0x01
+
+#define CONFIGURE_ALS_IR_MODE_ALS 0
+#define CONFIGURE_ALS_IR_MODE_IR BIT(0)
+#define CONFIGURE_ALS_IR_MODE_MASK BIT(0)
+
+#define CONFIGURE_ALS_RANGE_LOW_LUX 0
+#define CONFIGURE_ALS_RANGE_HIGH_LUX BIT(1)
+#define CONFIGURE_ALS_RANGE_MASK BIT(1)
+
+#define CONFIGURE_ALS_DIS 0
+#define CONFIGURE_ALS_EN BIT(2)
+#define CONFIGURE_ALS_EN_MASK BIT(2)
+
+#define CONFIGURE_PROX_DRIVE BIT(3)
+
+#define CONFIGURE_PROX_SLP_SH 4
+#define CONFIGURE_PROX_SLP_MASK (7 << CONFIGURE_PROX_SLP_SH)
+
+#define CONFIGURE_PROX_EN BIT(7)
+#define CONFIGURE_PROX_EN_MASK BIT(7)
+
+#define ISL29028_REG_INTERRUPT 0x02
+
+#define ISL29028_REG_PROX_DATA 0x08
+#define ISL29028_REG_ALSIR_L 0x09
+#define ISL29028_REG_ALSIR_U 0x0A
+
+#define ISL29028_REG_TEST1_MODE 0x0E
+#define ISL29028_REG_TEST2_MODE 0x0F
+
+#define ISL29028_NUM_REGS (ISL29028_REG_TEST2_MODE + 1)
+
+enum als_ir_mode {
+ MODE_NONE = 0,
+ MODE_ALS,
+ MODE_IR
+};
+
+struct isl29028_chip {
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ unsigned int prox_sampling;
+ bool enable_prox;
+
+ int lux_scale;
+ int als_ir_mode;
+};
+
+static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
+ unsigned int sampling)
+{
+ static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0};
+ int sel;
+ unsigned int period = DIV_ROUND_UP(1000, sampling);
+
+ for (sel = 0; sel < ARRAY_SIZE(prox_period); ++sel) {
+ if (period >= prox_period[sel])
+ break;
+ }
+ return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_PROX_SLP_MASK, sel << CONFIGURE_PROX_SLP_SH);
+}
+
+static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
+{
+ int ret;
+ int val = 0;
+
+ if (enable)
+ val = CONFIGURE_PROX_EN;
+ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_PROX_EN_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for conversion to be complete for first sample */
+ mdelay(DIV_ROUND_UP(1000, chip->prox_sampling));
+ return 0;
+}
+
+static int isl29028_set_als_scale(struct isl29028_chip *chip, int lux_scale)
+{
+ int val = (lux_scale == 2000) ? CONFIGURE_ALS_RANGE_HIGH_LUX :
+ CONFIGURE_ALS_RANGE_LOW_LUX;
+
+ return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_RANGE_MASK, val);
+}
+
+static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
+ enum als_ir_mode mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case MODE_ALS:
+ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_ALS);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_RANGE_MASK, CONFIGURE_ALS_RANGE_HIGH_LUX);
+ break;
+
+ case MODE_IR:
+ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_IR);
+ break;
+
+ case MODE_NONE:
+ return regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_DIS);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Enable the ALS/IR */
+ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
+ CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Need to wait for conversion time if ALS/IR mode enabled */
+ mdelay(CONVERSION_TIME_MS);
+ return 0;
+}
+
+static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
+{
+ unsigned int lsb;
+ unsigned int msb;
+ int ret;
+
+ ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error in reading register ALSIR_L err %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error in reading register ALSIR_U err %d\n", ret);
+ return ret;
+ }
+
+ *als_ir = ((msb & 0xF) << 8) | (lsb & 0xFF);
+ return 0;
+}
+
+static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Error in reading register %d, error %d\n",
+ ISL29028_REG_PROX_DATA, ret);
+ return ret;
+ }
+ *prox = data;
+ return 0;
+}
+
+static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
+{
+ int ret;
+
+ if (!chip->enable_prox) {
+ ret = isl29028_enable_proximity(chip, true);
+ if (ret < 0)
+ return ret;
+ chip->enable_prox = true;
+ }
+ return isl29028_read_proxim(chip, prox_data);
+}
+
+static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
+{
+ int ret;
+ int als_ir_data;
+
+ if (chip->als_ir_mode != MODE_ALS) {
+ ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error in enabling ALS mode err %d\n", ret);
+ return ret;
+ }
+ chip->als_ir_mode = MODE_ALS;
+ }
+
+ ret = isl29028_read_als_ir(chip, &als_ir_data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * convert als data count to lux.
+ * if lux_scale = 125, lux = count * 0.031
+ * if lux_scale = 2000, lux = count * 0.49
+ */
+ if (chip->lux_scale == 125)
+ als_ir_data = (als_ir_data * 31) / 1000;
+ else
+ als_ir_data = (als_ir_data * 49) / 100;
+
+ *als_data = als_ir_data;
+ return 0;
+}
+
+static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
+{
+ int ret;
+
+ if (chip->als_ir_mode != MODE_IR) {
+ ret = isl29028_set_als_ir_mode(chip, MODE_IR);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error in enabling IR mode err %d\n", ret);
+ return ret;
+ }
+ chip->als_ir_mode = MODE_IR;
+ }
+ return isl29028_read_als_ir(chip, ir_data);
+}
+
+/* Channel IO */
+static int isl29028_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct isl29028_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->lock);
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
+ dev_err(chip->dev,
+ "proximity: mask value 0x%08lx not supported\n",
+ mask);
+ break;
+ }
+ if (val < 1 || val > 100) {
+ dev_err(chip->dev,
+ "Samp_freq %d is not in range[1:100]\n", val);
+ break;
+ }
+ ret = isl29028_set_proxim_sampling(chip, val);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Setting proximity samp_freq fail, err %d\n",
+ ret);
+ break;
+ }
+ chip->prox_sampling = val;
+ break;
+
+ case IIO_LIGHT:
+ if (mask != IIO_CHAN_INFO_SCALE) {
+ dev_err(chip->dev,
+ "light: mask value 0x%08lx not supported\n",
+ mask);
+ break;
+ }
+ if ((val != 125) && (val != 2000)) {
+ dev_err(chip->dev,
+ "lux scale %d is invalid [125, 2000]\n", val);
+ break;
+ }
+ ret = isl29028_set_als_scale(chip, val);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Setting lux scale fail with error %d\n", ret);
+ break;
+ }
+ chip->lux_scale = val;
+ break;
+
+ default:
+ dev_err(chip->dev, "Unsupported channel type\n");
+ break;
+ }
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static int isl29028_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct isl29028_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ mutex_lock(&chip->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = isl29028_als_get(chip, val);
+ break;
+ case IIO_INTENSITY:
+ ret = isl29028_ir_get(chip, val);
+ break;
+ case IIO_PROXIMITY:
+ ret = isl29028_proxim_get(chip, val);
+ break;
+ default:
+ break;
+ }
+ if (ret < 0)
+ break;
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type != IIO_PROXIMITY)
+ break;
+ *val = chip->prox_sampling;
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type != IIO_LIGHT)
+ break;
+ *val = chip->lux_scale;
+ ret = IIO_VAL_INT;
+ break;
+
+ default:
+ dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask);
+ break;
+ }
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static IIO_CONST_ATTR(in_proximity_sampling_frequency_available,
+ "1, 3, 5, 10, 13, 20, 83, 100");
+static IIO_CONST_ATTR(in_illuminance_scale_available, "125, 2000");
+
+#define ISL29028_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
+#define ISL29028_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr)
+static struct attribute *isl29028_attributes[] = {
+ ISL29028_CONST_ATTR(in_proximity_sampling_frequency_available),
+ ISL29028_CONST_ATTR(in_illuminance_scale_available),
+ NULL,
+};
+
+static const struct attribute_group isl29108_group = {
+ .attrs = isl29028_attributes,
+};
+
+static const struct iio_chan_spec isl29028_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_INTENSITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ }
+};
+
+static const struct iio_info isl29028_info = {
+ .attrs = &isl29108_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = &isl29028_read_raw,
+ .write_raw = &isl29028_write_raw,
+};
+
+static int isl29028_chip_init(struct isl29028_chip *chip)
+{
+ int ret;
+
+ chip->enable_prox = false;
+ chip->prox_sampling = 20;
+ chip->lux_scale = 2000;
+ chip->als_ir_mode = MODE_NONE;
+
+ ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ __func__, ISL29028_REG_TEST1_MODE, ret);
+ return ret;
+ }
+ ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ __func__, ISL29028_REG_TEST2_MODE, ret);
+ return ret;
+ }
+
+ ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ __func__, ISL29028_REG_CONFIGURE, ret);
+ return ret;
+ }
+
+ ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s(): setting the proximity, err = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = isl29028_set_als_scale(chip, chip->lux_scale);
+ if (ret < 0)
+ dev_err(chip->dev, "%s(): setting als scale failed, err = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static bool is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ISL29028_REG_INTERRUPT:
+ case ISL29028_REG_PROX_DATA:
+ case ISL29028_REG_ALSIR_L:
+ case ISL29028_REG_ALSIR_U:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config isl29028_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = is_volatile_reg,
+ .max_register = ISL29028_NUM_REGS - 1,
+ .num_reg_defaults_raw = ISL29028_NUM_REGS,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int isl29028_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct isl29028_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ return -ENOMEM;
+ }
+
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->dev = &client->dev;
+ mutex_init(&chip->lock);
+
+ chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = isl29028_chip_init(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "chip initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ indio_dev->info = &isl29028_info;
+ indio_dev->channels = isl29028_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl29028_channels);
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(chip->dev, "iio registration fails with error %d\n",
+ ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int isl29028_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id isl29028_id[] = {
+ {"isl29028", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, isl29028_id);
+
+static const struct of_device_id isl29028_of_match[] = {
+ { .compatible = "isl,isl29028", },
+ { .compatible = "isil,isl29028", },/* deprecated, don't use */
+ { },
+};
+MODULE_DEVICE_TABLE(of, isl29028_of_match);
+
+static struct i2c_driver isl29028_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "isl29028",
+ .owner = THIS_MODULE,
+ .of_match_table = isl29028_of_match,
+ },
+ .probe = isl29028_probe,
+ .remove = isl29028_remove,
+ .id_table = isl29028_id,
+};
+
+module_i2c_driver(isl29028_driver);
+
+MODULE_DESCRIPTION("ISL29028 Ambient Light and Proximity Sensor driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
diff --git a/drivers/staging/iio/light/tsl2591.c b/drivers/staging/iio/light/tsl2591.c
new file mode 100644
index 00000000000000..ccb05d72cb5d7e
--- /dev/null
+++ b/drivers/staging/iio/light/tsl2591.c
@@ -0,0 +1,408 @@
+/**
+ * Kernel IIO driver for the TSL2591 illuminance I2C chip.
+ * Datasheet : http://www.adafruit.com/datasheets/TSL25911_Datasheet_EN_v1.pdf
+ *
+ * Author : Maxime Jourdan <maxime.jourdan@parrot.com>
+ * Date : 14/01/2015
+ * Author : Luis Mario Domenzain <ld@airinov.fr>
+ * Date : 12/10/2015
+ */
+#include "tsl2591.h"
+
+struct tsl2591 {
+ struct i2c_client *i2c;
+
+ // ADC channels
+ u16 data0;
+ u16 data1;
+
+ // Integration time (100 -> 600ms)
+ u16 int_time;
+
+ // Gain. 0 = Low, [..] 3 = Max
+ u8 gain;
+};
+
+static int tsl2591_read8(struct tsl2591 *tsl2591, u8 reg, u8 *val)
+{
+ int ret;
+ struct i2c_client *client = tsl2591->i2c;
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2591_read_consecutive(
+ struct tsl2591 *tsl2591,
+ u8 reg,
+ u8 *val,
+ u16 bytes)
+{
+ int ret;
+ struct i2c_client *client = tsl2591->i2c;
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = (u8 *)&reg,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = bytes,
+ .buf = val,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed reading %d bytes at 0x%02x!\n",
+ bytes, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2591_write8(struct tsl2591 *tsl2591, u8 reg, u8 val)
+{
+ int ret;
+ struct i2c_client *client = tsl2591->i2c;
+ struct i2c_msg msg;
+ struct {
+ u8 reg;
+ u8 val;
+ } __packed buf;
+
+ buf.reg = reg;
+ buf.val = val;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = (u8 *)&buf;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tsl2591_is_powered(struct tsl2591 *tsl2591)
+{
+ u8 val;
+
+ tsl2591_read8(tsl2591, TSL2591_ENABLE_RW, &val);
+
+ val &= TSL2591_ENABLE_PON | TSL2591_ENABLE_AEN;
+ return val == (TSL2591_ENABLE_PON | TSL2591_ENABLE_AEN);
+}
+
+static int tsl2591_set_integration(struct tsl2591 *tsl2591)
+{
+ u8 val = TSL2591_CONFIG_SET_AGAIN(tsl2591->gain);
+ val |= TSL2591_CONFIG_SET_ATIME(tsl2591->int_time);
+
+ /* Turn off integration, set the integration time and gain, then turn it
+ * on. This seems to reset the ADC for the next integration. Otherwise
+ * it carries over state from before and gives false results.
+ */
+ return tsl2591_write8(tsl2591, TSL2591_ENABLE_RW, TSL2591_ENABLE_PON) ||
+ tsl2591_write8(tsl2591, TSL2591_CONFIG_RW, val) ||
+ tsl2591_write8(tsl2591, TSL2591_ENABLE_RW,
+ TSL2591_ENABLE_AEN | TSL2591_ENABLE_PON);
+}
+
+static int tsl2591_power(struct tsl2591 *tsl2591, int on)
+{
+ if (on) {
+ tsl2591_write8(tsl2591, TSL2591_ENABLE_RW,
+ TSL2591_ENABLE_PON | TSL2591_ENABLE_AEN);
+ tsl2591_set_integration(tsl2591);
+ return 0;
+ }
+
+ return tsl2591_write8(tsl2591, TSL2591_ENABLE_RW, TSL2591_ENABLE_OFF);
+}
+
+static int tsl2591_get_adc(struct tsl2591 *tsl2591)
+{
+ u8 all[5];
+ u8 valid = 0;
+ u8 sleep = 0;
+
+ if (!tsl2591_is_powered(tsl2591))
+ tsl2591_power(tsl2591, 1);
+
+ while (!valid) {
+ if (sleep)
+ msleep(30);
+
+ //Read status and ADCs in a single transaction: 5 bytes.
+ tsl2591_read_consecutive(tsl2591, TSL2591_ADCDATA_R, all, 5);
+ valid = all[0] & TSL2591_STATUS_AVALID;
+ sleep |= 1;
+ }
+
+ tsl2591->data0 = all[2] << 8 | all[1];
+ tsl2591->data1 = all[4] << 8 | all[3];
+
+ return 0;
+}
+
+static const struct iio_chan_spec tsl2591_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel = 0,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static int tsl2591_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct tsl2591 *tsl2591 = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case 0:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ tsl2591_get_adc(tsl2591);
+
+ if (chan->channel == 0)
+ *val = tsl2591->data0;
+ else
+ *val = tsl2591->data1;
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t tsl2591_gain_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591 *tsl2591 = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", tsl2591->gain);
+}
+
+static ssize_t tsl2591_gain_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591 *tsl2591 = iio_priv(indio_dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ // Only allow 0->3 range
+ if ((value < 0) || (value > 3))
+ return -EINVAL;
+
+ tsl2591->gain = value;
+ tsl2591_set_integration(tsl2591);
+
+ return len;
+}
+
+static ssize_t tsl2591_int_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591 *tsl2591 = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", tsl2591->int_time);
+}
+
+static ssize_t tsl2591_int_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2591 *tsl2591 = iio_priv(indio_dev);
+ unsigned long value;
+
+ if (strict_strtoul(buf, 0, &value))
+ return -EINVAL;
+
+ // Only allow 100->600 range
+ if ((value < 100) || (value > 600))
+ return -EINVAL;
+
+ // With 100 step
+ if (value % 100)
+ return -EINVAL;
+
+ tsl2591->int_time = value;
+ tsl2591_set_integration(tsl2591);
+
+ return len;
+}
+
+static DEVICE_ATTR(illuminance0_gain, S_IRUGO | S_IWUSR,
+ tsl2591_gain_show, tsl2591_gain_store);
+
+static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
+ tsl2591_int_time_show, tsl2591_int_time_store);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_illuminance0_integration_time.attr, /* I time*/
+ &dev_attr_illuminance0_gain.attr, /* gain */
+ NULL
+};
+
+static struct attribute_group tsl2591_attribute_group = {
+ .attrs = sysfs_attrs_ctrl,
+};
+
+static const struct iio_info tsl2591_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2591_read_raw,
+ .attrs = &tsl2591_attribute_group,
+};
+
+static int tsl2591_identify(struct tsl2591* tsl2591)
+{
+ u8 val;
+ int ret;
+
+ ret = tsl2591_read8(tsl2591, TSL2591_ID_R, &val);
+ if (ret)
+ return ret;
+
+ if (val != TSL2591_DEVICE_ID_VALUE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int tsl2591_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct tsl2591 *tsl2591;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_device_alloc(sizeof(*tsl2591));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "iio allocation failed\n");
+ return -ENOMEM;
+ }
+
+ tsl2591 = iio_priv(indio_dev);
+ tsl2591->i2c = client;
+ tsl2591->int_time = 100;
+ tsl2591->gain = 0;
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = tsl2591->i2c->name;
+ indio_dev->info = &tsl2591_info;
+ indio_dev->channels = tsl2591_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tsl2591_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "iio registration failed\n");
+ goto fail;
+ }
+
+ ret = tsl2591_power(tsl2591, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "powerup failed\n");
+ goto fail2;
+ }
+
+ ret = tsl2591_identify(tsl2591);
+ if (ret < 0) {
+ dev_err(&client->dev, "identification failed\n");
+ goto fail2;
+ }
+
+ return 0;
+
+fail2:
+ iio_device_unregister(indio_dev);
+fail:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int tsl2591_remove(struct i2c_client *client)
+{
+ iio_device_unregister(i2c_get_clientdata(client));
+ iio_device_free(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id tsl2591_id[] = {
+ {"tsl2591", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, tsl2591_id);
+
+static struct i2c_driver tsl2591_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tsl2591",
+ },
+ .probe = tsl2591_probe,
+ .remove = tsl2591_remove,
+ .id_table = tsl2591_id,
+};
+
+module_i2c_driver(tsl2591_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxime Jourdan <maxime.jourdan@parrot.com>");
+MODULE_AUTHOR("Luis Mario Domenzain <ld@airinov.fr>");
+MODULE_DESCRIPTION("TSL2591 illuminance sensor driver");
diff --git a/drivers/staging/iio/light/tsl2591.h b/drivers/staging/iio/light/tsl2591.h
new file mode 100644
index 00000000000000..d10def830f5c1a
--- /dev/null
+++ b/drivers/staging/iio/light/tsl2591.h
@@ -0,0 +1,106 @@
+/**
+ * Kernel IIO driver for the TSL2591 illuminance I2C chip.
+ * Datasheet : http://www.adafruit.com/datasheets/TSL25911_Datasheet_EN_v1.pdf
+ *
+ * Author : Maxime Jourdan <maxime.jourdan@parrot.com>
+ * Date : 14/01/2015
+ * Author : Luis Mario Domenzain <ld@airinov.fr>
+ * Date : 12/10/2015
+ */
+#ifndef __H_TSL2591
+#define __H_TSL2591
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+//I2C device address:
+#define TSL2591_ADDR 0x29
+
+#define TSL2591_DEVICE_ID_VALUE 0x50
+
+#define TSL2591_COMMAND (0x1 << 7)
+
+//TYPES OF COMMAND
+#define TSL2591_ADDRESS 0x20
+#define TSL2591_SPECIAL 0xC0
+
+//ADDRESS REGISTER COMMANDS
+#define TSL2591_ENABLE_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x00)
+#define TSL2591_CONFIG_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x01)
+//ALS Interrupt Threshold Register
+#define TSL2591_AILTL_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x04)
+#define TSL2591_AILTH_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x05)
+#define TSL2591_AIHTL_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x06)
+#define TSL2591_AIHTH_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x07)
+#define TSL2591_NPAILTL_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x08)
+#define TSL2591_NPAILTH_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x09)
+#define TSL2591_NPAIHTL_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x0A)
+#define TSL2591_NPAIHTH_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x0B)
+#define TSL2591_PERSIST_RW (TSL2591_COMMAND | TSL2591_ADDRESS | 0x0C)
+#define TSL2591_PID_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x11)
+#define TSL2591_ID_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x12)
+//ADC data registers, with extra convenience address fot the whole block.
+#define TSL2591_STATUS_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x13)
+#define TSL2591_C0DATAL_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x14)
+#define TSL2591_C0DATAH_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x15)
+#define TSL2591_C1DATAL_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x16)
+#define TSL2591_C1DATAH_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x17)
+#define TSL2591_ADCDATA_R (TSL2591_COMMAND | TSL2591_ADDRESS | 0x13)
+
+//SPECIAL FUNCTION COMMANDS
+#define TSL2591_SET_INT (TSL2591_COMMAND | TSL2591_SPECIAL | 0x04)
+#define TSL2591_CLR_ALSINT (TSL2591_COMMAND | TSL2591_SPECIAL | 0x06)
+#define TSL2591_CLR_ALLINT (TSL2591_COMMAND | TSL2591_SPECIAL | 0x07)
+#define TSL2591_CLR_NPINT (TSL2591_COMMAND | TSL2591_SPECIAL | 0x0A)
+
+//ENABLE REGISTER FIELDS
+#define TSL2591_ENABLE_NPIEN (0x1 << 7)
+#define TSL2591_ENABLE_SAI (0x1 << 6)
+#define TSL2591_ENABLE_AIEN (0x1 << 4)
+#define TSL2591_ENABLE_AEN (0x1 << 1)
+#define TSL2591_ENABLE_PON (0x1 << 0)
+#define TSL2591_ENABLE_OFF 0x00
+
+//CONTROL REGISTER FIELDS
+#define TSL2591_CONFIG_SRESET (0x1 << 7)
+#define TSL2591_CONFIG_SET_AGAIN(gain) ((gain&0x3) << 4)
+#define TSL2591_CONFIG_SET_ATIME(time) ((time/100-1) << 0)
+
+//CONTROL REGISTER MASKS
+#define TSL2591_CONFIG_AGAIN (0x3 << 4) /*Two bits in the middle*/
+#define TSL2591_CONFIG_ATIME (0x7 << 0) /*Three bits at LSB*/
+
+//STATUS REGISTER MASKS
+#define TSL2591_STATUS_NPINT (0x1 << 5)
+#define TSL2591_STATUS_AINT (0x1 << 4)
+#define TSL2591_STATUS_AVALID (0x1 << 0)
+
+//PERSIST REGISTER OPTIONS
+#define TSL2591_APERS_1 0x0
+#define TSL2591_APERS_2 0x1
+#define TSL2591_APERS_3 0x2
+#define TSL2591_APERS_5 0x3
+#define TSL2591_APERS_10 0x4
+#define TSL2591_APERS_15 0x5
+#define TSL2591_APERS_20 0x6
+#define TSL2591_APERS_25 0x7
+#define TSL2591_APERS_30 0x8
+#define TSL2591_APERS_35 0x9
+#define TSL2591_APERS_40 0xA
+#define TSL2591_APERS_45 0xB
+#define TSL2591_APERS_50 0xC
+#define TSL2591_APERS_55 0xD
+#define TSL2591_APERS_60 0xF
+
+//Persist register mask
+#define TSL2591_APERS 0xF
+
+#endif
diff --git a/drivers/staging/iio/light/tsl2x7x.h b/drivers/staging/iio/light/tsl2x7x.h
new file mode 100644
index 00000000000000..ecae9221121673
--- /dev/null
+++ b/drivers/staging/iio/light/tsl2x7x.h
@@ -0,0 +1,100 @@
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * and proximity (prox) within the TAOS TSL2X7X family of devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __TSL2X7X_H
+#define __TSL2X7X_H
+#include <linux/pm.h>
+
+/* Max number of segments allowable in LUX table */
+#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
+#define MAX_DEFAULT_TABLE_BYTES (sizeof(int) * TSL2X7X_MAX_LUX_TABLE_SIZE)
+
+struct iio_dev;
+
+struct tsl2x7x_lux {
+ unsigned int ratio;
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+/**
+ * struct tsl2x7x_default_settings - power on defaults unless
+ * overridden by platform data.
+ * @als_time: ALS Integration time - multiple of 50mS
+ * @als_gain: Index into the ALS gain table.
+ * @als_gain_trim: default gain trim to account for
+ * aperture effects.
+ * @wait_time: Time between PRX and ALS cycles
+ * in 2.7 periods
+ * @prx_time: 5.2ms prox integration time -
+ * decrease in 2.7ms periods
+ * @prx_gain: Proximity gain index
+ * @prox_config: Prox configuration filters.
+ * @als_cal_target: Known external ALS reading for
+ * calibration.
+ * @interrupts_en: Enable/Disable - 0x00 = none, 0x10 = als,
+ * 0x20 = prx, 0x30 = bth
+ * @persistence: H/W Filters, Number of 'out of limits'
+ * ADC readings PRX/ALS.
+ * @als_thresh_low: CH0 'low' count to trigger interrupt.
+ * @als_thresh_high: CH0 'high' count to trigger interrupt.
+ * @prox_thres_low: Low threshold proximity detection.
+ * @prox_thres_high: High threshold proximity detection
+ * @prox_pulse_count: Number if proximity emitter pulses
+ * @prox_max_samples_cal: Used for prox cal.
+ */
+struct tsl2x7x_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int wait_time;
+ int prx_time;
+ int prox_gain;
+ int prox_config;
+ int als_cal_target;
+ u8 interrupts_en;
+ u8 persistence;
+ int als_thresh_low;
+ int als_thresh_high;
+ int prox_thres_low;
+ int prox_thres_high;
+ int prox_pulse_count;
+ int prox_max_samples_cal;
+};
+
+/**
+ * struct tsl2X7X_platform_data - Platform callback, glass and defaults
+ * @platform_power: Suspend/resume platform callback
+ * @power_on: Power on callback
+ * @power_off: Power off callback
+ * @platform_lux_table: Device specific glass coefficents
+ * @platform_default_settings: Device specific power on defaults
+ *
+ */
+struct tsl2X7X_platform_data {
+ int (*platform_power)(struct device *dev, pm_message_t);
+ int (*power_on)(struct iio_dev *indio_dev);
+ int (*power_off)(struct i2c_client *dev);
+ struct tsl2x7x_lux platform_lux_table[TSL2X7X_MAX_LUX_TABLE_SIZE];
+ struct tsl2x7x_settings *platform_default_settings;
+};
+
+#endif /* __TSL2X7X_H */
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
new file mode 100644
index 00000000000000..423f96bdf5952c
--- /dev/null
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -0,0 +1,2036 @@
+/*
+ * Device driver for monitoring ambient light intensity in (lux)
+ * and proximity detection (prox) within the TAOS TSL2X7X family of devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include "tsl2x7x.h"
+
+/* Cal defs*/
+#define PROX_STAT_CAL 0
+#define PROX_STAT_SAMP 1
+#define MAX_SAMPLES_CAL 200
+
+/* TSL2X7X Device ID */
+#define TRITON_ID 0x00
+#define SWORDFISH_ID 0x30
+#define HALIBUT_ID 0x20
+
+/* Lux calculation constants */
+#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
+
+/* TAOS Register definitions - note:
+ * depending on device, some of these register are not used and the
+ * register address is benign.
+ */
+/* 2X7X register offsets */
+#define TSL2X7X_MAX_CONFIG_REG 16
+
+/* Device Registers and Masks */
+#define TSL2X7X_CNTRL 0x00
+#define TSL2X7X_ALS_TIME 0X01
+#define TSL2X7X_PRX_TIME 0x02
+#define TSL2X7X_WAIT_TIME 0x03
+#define TSL2X7X_ALS_MINTHRESHLO 0X04
+#define TSL2X7X_ALS_MINTHRESHHI 0X05
+#define TSL2X7X_ALS_MAXTHRESHLO 0X06
+#define TSL2X7X_ALS_MAXTHRESHHI 0X07
+#define TSL2X7X_PRX_MINTHRESHLO 0X08
+#define TSL2X7X_PRX_MINTHRESHHI 0X09
+#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
+#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
+#define TSL2X7X_PERSISTENCE 0x0C
+#define TSL2X7X_PRX_CONFIG 0x0D
+#define TSL2X7X_PRX_COUNT 0x0E
+#define TSL2X7X_GAIN 0x0F
+#define TSL2X7X_NOTUSED 0x10
+#define TSL2X7X_REVID 0x11
+#define TSL2X7X_CHIPID 0x12
+#define TSL2X7X_STATUS 0x13
+#define TSL2X7X_ALS_CHAN0LO 0x14
+#define TSL2X7X_ALS_CHAN0HI 0x15
+#define TSL2X7X_ALS_CHAN1LO 0x16
+#define TSL2X7X_ALS_CHAN1HI 0x17
+#define TSL2X7X_PRX_LO 0x18
+#define TSL2X7X_PRX_HI 0x19
+
+/* tsl2X7X cmd reg masks */
+#define TSL2X7X_CMD_REG 0x80
+#define TSL2X7X_CMD_SPL_FN 0x60
+
+#define TSL2X7X_CMD_PROX_INT_CLR 0X05
+#define TSL2X7X_CMD_ALS_INT_CLR 0x06
+#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
+
+/* tsl2X7X cntrl reg masks */
+#define TSL2X7X_CNTL_ADC_ENBL 0x02
+#define TSL2X7X_CNTL_PWR_ON 0x01
+
+/* tsl2X7X status reg masks */
+#define TSL2X7X_STA_ADC_VALID 0x01
+#define TSL2X7X_STA_PRX_VALID 0x02
+#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID |\
+ TSL2X7X_STA_PRX_VALID)
+#define TSL2X7X_STA_ALS_INTR 0x10
+#define TSL2X7X_STA_PRX_INTR 0x20
+
+/* tsl2X7X cntrl reg masks */
+#define TSL2X7X_CNTL_REG_CLEAR 0x00
+#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
+#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
+#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
+#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
+#define TSL2X7X_CNTL_PWRON 0x01
+#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
+#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
+#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
+#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
+
+/*Prox diode to use */
+#define TSL2X7X_DIODE0 0x10
+#define TSL2X7X_DIODE1 0x20
+#define TSL2X7X_DIODE_BOTH 0x30
+
+/* LED Power */
+#define TSL2X7X_mA100 0x00
+#define TSL2X7X_mA50 0x40
+#define TSL2X7X_mA25 0x80
+#define TSL2X7X_mA13 0xD0
+#define TSL2X7X_MAX_TIMER_CNT (0xFF)
+
+#define TSL2X7X_MIN_ITIME 3
+
+/* TAOS txx2x7x Device family members */
+enum {
+ tsl2571,
+ tsl2671,
+ tmd2671,
+ tsl2771,
+ tmd2771,
+ tsl2572,
+ tsl2672,
+ tmd2672,
+ tsl2772,
+ tmd2772
+};
+
+enum {
+ TSL2X7X_CHIP_UNKNOWN = 0,
+ TSL2X7X_CHIP_WORKING = 1,
+ TSL2X7X_CHIP_SUSPENDED = 2
+};
+
+struct tsl2x7x_parse_result {
+ int integer;
+ int fract;
+};
+
+/* Per-device data */
+struct tsl2x7x_als_info {
+ u16 als_ch0;
+ u16 als_ch1;
+ u16 lux;
+};
+
+struct tsl2x7x_prox_stat {
+ int min;
+ int max;
+ int mean;
+ unsigned long stddev;
+};
+
+struct tsl2x7x_chip_info {
+ int chan_table_elements;
+ struct iio_chan_spec channel[4];
+ const struct iio_info *info;
+};
+
+struct tsl2X7X_chip {
+ kernel_ulong_t id;
+ struct mutex prox_mutex;
+ struct mutex als_mutex;
+ struct i2c_client *client;
+ u16 prox_data;
+ struct tsl2x7x_als_info als_cur_info;
+ struct tsl2x7x_settings tsl2x7x_settings;
+ struct tsl2X7X_platform_data *pdata;
+ int als_time_scale;
+ int als_saturation;
+ int tsl2x7x_chip_status;
+ u8 tsl2x7x_config[TSL2X7X_MAX_CONFIG_REG];
+ const struct tsl2x7x_chip_info *chip_info;
+ const struct iio_info *info;
+ s64 event_timestamp;
+ /* This structure is intentionally large to accommodate
+ * updates via sysfs. */
+ /* Sized to 9 = max 8 segments + 1 termination segment */
+ struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
+};
+
+/* Different devices require different coefficents */
+static const struct tsl2x7x_lux tsl2x71_lux_table[] = {
+ { 14461, 611, 1211 },
+ { 18540, 352, 623 },
+ { 0, 0, 0 },
+};
+
+static const struct tsl2x7x_lux tmd2x71_lux_table[] = {
+ { 11635, 115, 256 },
+ { 15536, 87, 179 },
+ { 0, 0, 0 },
+};
+
+static const struct tsl2x7x_lux tsl2x72_lux_table[] = {
+ { 14013, 466, 917 },
+ { 18222, 310, 552 },
+ { 0, 0, 0 },
+};
+
+static const struct tsl2x7x_lux tmd2x72_lux_table[] = {
+ { 13218, 130, 262 },
+ { 17592, 92, 169 },
+ { 0, 0, 0 },
+};
+
+static const struct tsl2x7x_lux *tsl2x7x_default_lux_table_group[] = {
+ [tsl2571] = tsl2x71_lux_table,
+ [tsl2671] = tsl2x71_lux_table,
+ [tmd2671] = tmd2x71_lux_table,
+ [tsl2771] = tsl2x71_lux_table,
+ [tmd2771] = tmd2x71_lux_table,
+ [tsl2572] = tsl2x72_lux_table,
+ [tsl2672] = tsl2x72_lux_table,
+ [tmd2672] = tmd2x72_lux_table,
+ [tsl2772] = tsl2x72_lux_table,
+ [tmd2772] = tmd2x72_lux_table,
+};
+
+static const struct tsl2x7x_settings tsl2x7x_default_settings = {
+ .als_time = 219, /* 101 ms */
+ .als_gain = 0,
+ .prx_time = 254, /* 5.4 ms */
+ .prox_gain = 1,
+ .wait_time = 245,
+ .prox_config = 0,
+ .als_gain_trim = 1000,
+ .als_cal_target = 150,
+ .als_thresh_low = 200,
+ .als_thresh_high = 256,
+ .persistence = 255,
+ .interrupts_en = 0,
+ .prox_thres_low = 0,
+ .prox_thres_high = 512,
+ .prox_max_samples_cal = 30,
+ .prox_pulse_count = 8
+};
+
+static const s16 tsl2X7X_als_gainadj[] = {
+ 1,
+ 8,
+ 16,
+ 120
+};
+
+static const s16 tsl2X7X_prx_gainadj[] = {
+ 1,
+ 2,
+ 4,
+ 8
+};
+
+/* Channel variations */
+enum {
+ ALS,
+ PRX,
+ ALSPRX,
+ PRX2,
+ ALSPRX2,
+};
+
+static const u8 device_channel_config[] = {
+ ALS,
+ PRX,
+ PRX,
+ ALSPRX,
+ ALSPRX,
+ ALS,
+ PRX2,
+ PRX2,
+ ALSPRX2,
+ ALSPRX2
+};
+
+/**
+ * tsl2x7x_i2c_read() - Read a byte from a register.
+ * @client: i2c client
+ * @reg: device register to read from
+ * @*val: pointer to location to store register contents.
+ *
+ */
+static int
+tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int ret = 0;
+
+ /* select register to write */
+ ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg));
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: failed to write register %x\n"
+ , __func__, reg);
+ return ret;
+ }
+
+ /* read the data */
+ ret = i2c_smbus_read_byte(client);
+ if (ret >= 0)
+ *val = (u8)ret;
+ else
+ dev_err(&client->dev, "%s: failed to read register %x\n"
+ , __func__, reg);
+
+ return ret;
+}
+
+/**
+ * tsl2x7x_get_lux() - Reads and calculates current lux value.
+ * @indio_dev: pointer to IIO device
+ *
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device.
+ * Time scale factor array values are adjusted based on the integration time.
+ * The raw values are multiplied by a scale factor, and device gain is obtained
+ * using gain index. Limit checks are done next, then the ratio of a multiple
+ * of ch1 value, to the ch0 value, is calculated. Array tsl2x7x_device_lux[]
+ * is then scanned to find the first ratio value that is just above the ratio
+ * we just calculated. The ch0 and ch1 multiplier constants in the array are
+ * then used along with the time scale factor array values, to calculate the
+ * lux.
+ */
+static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
+{
+ u16 ch0, ch1; /* separated ch0/ch1 data from device */
+ u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
+ u32 ratio;
+ u8 buf[4];
+ struct tsl2x7x_lux *p;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int i, ret;
+ u32 ch0lux = 0;
+ u32 ch1lux = 0;
+
+ if (mutex_trylock(&chip->als_mutex) == 0)
+ return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
+
+ if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
+ /* device is not enabled */
+ dev_err(&chip->client->dev, "%s: device is not enabled\n",
+ __func__);
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = tsl2x7x_i2c_read(chip->client,
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: Failed to read STATUS Reg\n", __func__);
+ goto out_unlock;
+ }
+ /* is data new & valid */
+ if (!(buf[0] & TSL2X7X_STA_ADC_VALID)) {
+ dev_err(&chip->client->dev,
+ "%s: data not valid yet\n", __func__);
+ ret = chip->als_cur_info.lux; /* return LAST VALUE */
+ goto out_unlock;
+ }
+
+ for (i = 0; i < 4; i++) {
+ ret = tsl2x7x_i2c_read(chip->client,
+ (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
+ &buf[i]);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to read. err=%x\n", __func__, ret);
+ goto out_unlock;
+ }
+ }
+
+ /* clear any existing interrupt status */
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_ALS_INT_CLR));
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: i2c_write_command failed - err = %d\n",
+ __func__, ret);
+ goto out_unlock; /* have no data, so return failure */
+ }
+
+ /* extract ALS/lux data */
+ ch0 = le16_to_cpup((const __le16 *)&buf[0]);
+ ch1 = le16_to_cpup((const __le16 *)&buf[2]);
+
+ chip->als_cur_info.als_ch0 = ch0;
+ chip->als_cur_info.als_ch1 = ch1;
+
+ if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) {
+ lux = TSL2X7X_LUX_CALC_OVER_FLOW;
+ goto return_max;
+ }
+
+ if (ch0 == 0) {
+ /* have no data, so return LAST VALUE */
+ ret = chip->als_cur_info.lux;
+ goto out_unlock;
+ }
+ /* calculate ratio */
+ ratio = (ch1 << 15) / ch0;
+ /* convert to unscaled lux using the pointer to the table */
+ p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
+ while (p->ratio != 0 && p->ratio < ratio)
+ p++;
+
+ if (p->ratio == 0) {
+ lux = 0;
+ } else {
+ ch0lux = DIV_ROUND_UP((ch0 * p->ch0),
+ tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
+ ch1lux = DIV_ROUND_UP((ch1 * p->ch1),
+ tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
+ lux = ch0lux - ch1lux;
+ }
+
+ /* note: lux is 31 bit max at this point */
+ if (ch1lux > ch0lux) {
+ dev_dbg(&chip->client->dev, "ch1lux > ch0lux-return last value\n");
+ ret = chip->als_cur_info.lux;
+ goto out_unlock;
+ }
+
+ /* adjust for active time scale */
+ if (chip->als_time_scale == 0)
+ lux = 0;
+ else
+ lux = (lux + (chip->als_time_scale >> 1)) /
+ chip->als_time_scale;
+
+ /* adjust for active gain scale
+ * The tsl2x7x_device_lux tables have a factor of 256 built-in.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+
+ lux64 = lux;
+ lux64 = lux64 * chip->tsl2x7x_settings.als_gain_trim;
+ lux64 >>= 8;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
+
+ if (lux > TSL2X7X_LUX_CALC_OVER_FLOW) /* check for overflow */
+ lux = TSL2X7X_LUX_CALC_OVER_FLOW;
+
+ /* Update the structure with the latest lux. */
+return_max:
+ chip->als_cur_info.lux = lux;
+ ret = lux;
+
+out_unlock:
+ mutex_unlock(&chip->als_mutex);
+
+ return ret;
+}
+
+/**
+ * tsl2x7x_get_prox() - Reads proximity data registers and updates
+ * chip->prox_data.
+ *
+ * @indio_dev: pointer to IIO device
+ */
+static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
+{
+ int i;
+ int ret;
+ u8 status;
+ u8 chdata[2];
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ if (mutex_trylock(&chip->prox_mutex) == 0) {
+ dev_err(&chip->client->dev,
+ "%s: Can't get prox mutex\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = tsl2x7x_i2c_read(chip->client,
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: i2c err=%d\n", __func__, ret);
+ goto prox_poll_err;
+ }
+
+ switch (chip->id) {
+ case tsl2571:
+ case tsl2671:
+ case tmd2671:
+ case tsl2771:
+ case tmd2771:
+ if (!(status & TSL2X7X_STA_ADC_VALID))
+ goto prox_poll_err;
+ break;
+ case tsl2572:
+ case tsl2672:
+ case tmd2672:
+ case tsl2772:
+ case tmd2772:
+ if (!(status & TSL2X7X_STA_PRX_VALID))
+ goto prox_poll_err;
+ break;
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = tsl2x7x_i2c_read(chip->client,
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_PRX_LO + i)), &chdata[i]);
+ if (ret < 0)
+ goto prox_poll_err;
+ }
+
+ chip->prox_data =
+ le16_to_cpup((const __le16 *)&chdata[0]);
+
+prox_poll_err:
+
+ mutex_unlock(&chip->prox_mutex);
+
+ return chip->prox_data;
+}
+
+/**
+ * tsl2x7x_defaults() - Populates the device nominal operating parameters
+ * with those provided by a 'platform' data struct or
+ * with prefined defaults.
+ *
+ * @chip: pointer to device structure.
+ */
+static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
+{
+ /* If Operational settings defined elsewhere.. */
+ if (chip->pdata && chip->pdata->platform_default_settings)
+ memcpy(&(chip->tsl2x7x_settings),
+ chip->pdata->platform_default_settings,
+ sizeof(tsl2x7x_default_settings));
+ else
+ memcpy(&(chip->tsl2x7x_settings),
+ &tsl2x7x_default_settings,
+ sizeof(tsl2x7x_default_settings));
+
+ /* Load up the proper lux table. */
+ if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
+ memcpy(chip->tsl2x7x_device_lux,
+ chip->pdata->platform_lux_table,
+ sizeof(chip->pdata->platform_lux_table));
+ else
+ memcpy(chip->tsl2x7x_device_lux,
+ (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
+ MAX_DEFAULT_TABLE_BYTES);
+}
+
+/**
+ * tsl2x7x_als_calibrate() - Obtain single reading and calculate
+ * the als_gain_trim.
+ *
+ * @indio_dev: pointer to IIO device
+ */
+static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ u8 reg_val;
+ int gain_trim_val;
+ int ret;
+ int lux_val;
+
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to write CNTRL register, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ reg_val = i2c_smbus_read_byte(chip->client);
+ if ((reg_val & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON))
+ != (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) {
+ dev_err(&chip->client->dev,
+ "%s: failed: ADC not enabled\n", __func__);
+ return -1;
+ }
+
+ ret = i2c_smbus_write_byte(chip->client,
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to write ctrl reg: ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ reg_val = i2c_smbus_read_byte(chip->client);
+ if ((reg_val & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) {
+ dev_err(&chip->client->dev,
+ "%s: failed: STATUS - ADC not valid.\n", __func__);
+ return -ENODATA;
+ }
+
+ lux_val = tsl2x7x_get_lux(indio_dev);
+ if (lux_val < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed to get lux\n", __func__);
+ return lux_val;
+ }
+
+ gain_trim_val = (((chip->tsl2x7x_settings.als_cal_target)
+ * chip->tsl2x7x_settings.als_gain_trim) / lux_val);
+ if ((gain_trim_val < 250) || (gain_trim_val > 4000))
+ return -ERANGE;
+
+ chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
+ dev_info(&chip->client->dev,
+ "%s als_calibrate completed\n", chip->client->name);
+
+ return (int) gain_trim_val;
+}
+
+static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
+{
+ int i;
+ int ret = 0;
+ u8 *dev_reg;
+ u8 utmp;
+ int als_count;
+ int als_time;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ u8 reg_val = 0;
+
+ if (chip->pdata && chip->pdata->power_on)
+ chip->pdata->power_on(indio_dev);
+
+ /* Non calculated parameters */
+ chip->tsl2x7x_config[TSL2X7X_PRX_TIME] =
+ chip->tsl2x7x_settings.prx_time;
+ chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] =
+ chip->tsl2x7x_settings.wait_time;
+ chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] =
+ chip->tsl2x7x_settings.prox_config;
+
+ chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
+ (chip->tsl2x7x_settings.als_thresh_low) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
+ (chip->tsl2x7x_settings.als_thresh_low >> 8) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
+ (chip->tsl2x7x_settings.als_thresh_high) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
+ (chip->tsl2x7x_settings.als_thresh_high >> 8) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] =
+ chip->tsl2x7x_settings.persistence;
+
+ chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
+ chip->tsl2x7x_settings.prox_pulse_count;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
+ (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
+ (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
+ (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
+ (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
+
+ /* and make sure we're not already on */
+ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
+ /* if forcing a register update - turn off, then on */
+ dev_info(&chip->client->dev, "device is already enabled\n");
+ return -EINVAL;
+ }
+
+ /* determine als integration register */
+ als_count = (chip->tsl2x7x_settings.als_time * 100 + 135) / 270;
+ if (als_count == 0)
+ als_count = 1; /* ensure at least one cycle */
+
+ /* convert back to time (encompasses overrides) */
+ als_time = (als_count * 27 + 5) / 10;
+ chip->tsl2x7x_config[TSL2X7X_ALS_TIME] = 256 - als_count;
+
+ /* Set the gain based on tsl2x7x_settings struct */
+ chip->tsl2x7x_config[TSL2X7X_GAIN] =
+ (chip->tsl2x7x_settings.als_gain |
+ (TSL2X7X_mA100 | TSL2X7X_DIODE1)
+ | ((chip->tsl2x7x_settings.prox_gain) << 2));
+
+ /* set chip struct re scaling and saturation */
+ chip->als_saturation = als_count * 922; /* 90% of full scale */
+ chip->als_time_scale = (als_time + 25) / 50;
+
+ /* TSL2X7X Specific power-on / adc enable sequence
+ * Power on the device 1st. */
+ utmp = TSL2X7X_CNTL_PWR_ON;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed on CNTRL reg.\n", __func__);
+ return ret;
+ }
+
+ /* Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers. */
+ for (i = 0, dev_reg = chip->tsl2x7x_config;
+ i < TSL2X7X_MAX_CONFIG_REG; i++) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2X7X_CMD_REG + i, *dev_reg++);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed on write to reg %d.\n", __func__, i);
+ return ret;
+ }
+ }
+
+ mdelay(3); /* Power-on settling time */
+
+ /* NOW enable the ADC
+ * initialize the desired mode of operation */
+ utmp = TSL2X7X_CNTL_PWR_ON |
+ TSL2X7X_CNTL_ADC_ENBL |
+ TSL2X7X_CNTL_PROX_DET_ENBL;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: failed on 2nd CTRL reg.\n", __func__);
+ return ret;
+ }
+
+ chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
+
+ if (chip->tsl2x7x_settings.interrupts_en != 0) {
+ dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
+
+ reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
+ if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
+ (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
+
+ reg_val |= chip->tsl2x7x_settings.interrupts_en;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
+ __func__);
+
+ /* Clear out any initial interrupts */
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
+ if (ret < 0) {
+ dev_err(&chip->client->dev,
+ "%s: Failed to clear Int status\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ /* turn device off */
+ chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
+
+ if (chip->pdata && chip->pdata->power_off)
+ chip->pdata->power_off(chip->client);
+
+ return ret;
+}
+
+/**
+ * tsl2x7x_invoke_change
+ * @indio_dev: pointer to IIO device
+ *
+ * Obtain and lock both ALS and PROX resources,
+ * determine and save device state (On/Off),
+ * cycle device to implement updated parameter,
+ * put device back into proper state, and unlock
+ * resource.
+ */
+static
+int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int device_status = chip->tsl2x7x_chip_status;
+
+ mutex_lock(&chip->als_mutex);
+ mutex_lock(&chip->prox_mutex);
+
+ if (device_status == TSL2X7X_CHIP_WORKING)
+ tsl2x7x_chip_off(indio_dev);
+
+ tsl2x7x_chip_on(indio_dev);
+
+ if (device_status != TSL2X7X_CHIP_WORKING)
+ tsl2x7x_chip_off(indio_dev);
+
+ mutex_unlock(&chip->prox_mutex);
+ mutex_unlock(&chip->als_mutex);
+
+ return 0;
+}
+
+static
+void tsl2x7x_prox_calculate(int *data, int length,
+ struct tsl2x7x_prox_stat *statP)
+{
+ int i;
+ int sample_sum;
+ int tmp;
+
+ if (length == 0)
+ length = 1;
+
+ sample_sum = 0;
+ statP->min = INT_MAX;
+ statP->max = INT_MIN;
+ for (i = 0; i < length; i++) {
+ sample_sum += data[i];
+ statP->min = min(statP->min, data[i]);
+ statP->max = max(statP->max, data[i]);
+ }
+
+ statP->mean = sample_sum / length;
+ sample_sum = 0;
+ for (i = 0; i < length; i++) {
+ tmp = data[i] - statP->mean;
+ sample_sum += tmp * tmp;
+ }
+ statP->stddev = int_sqrt((long)sample_sum)/length;
+}
+
+/**
+ * tsl2x7x_prox_cal() - Calculates std. and sets thresholds.
+ * @indio_dev: pointer to IIO device
+ *
+ * Calculates a standard deviation based on the samples,
+ * and sets the threshold accordingly.
+ */
+static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
+{
+ int prox_history[MAX_SAMPLES_CAL + 1];
+ int i;
+ struct tsl2x7x_prox_stat prox_stat_data[2];
+ struct tsl2x7x_prox_stat *calP;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ u8 tmp_irq_settings;
+ u8 current_state = chip->tsl2x7x_chip_status;
+
+ if (chip->tsl2x7x_settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
+ dev_err(&chip->client->dev,
+ "%s: max prox samples cal is too big: %d\n",
+ __func__, chip->tsl2x7x_settings.prox_max_samples_cal);
+ chip->tsl2x7x_settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
+ }
+
+ /* have to stop to change settings */
+ tsl2x7x_chip_off(indio_dev);
+
+ /* Enable proximity detection save just in case prox not wanted yet*/
+ tmp_irq_settings = chip->tsl2x7x_settings.interrupts_en;
+ chip->tsl2x7x_settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
+
+ /*turn on device if not already on*/
+ tsl2x7x_chip_on(indio_dev);
+
+ /*gather the samples*/
+ for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) {
+ mdelay(15);
+ tsl2x7x_get_prox(indio_dev);
+ prox_history[i] = chip->prox_data;
+ dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
+ i, chip->prox_data);
+ }
+
+ tsl2x7x_chip_off(indio_dev);
+ calP = &prox_stat_data[PROX_STAT_CAL];
+ tsl2x7x_prox_calculate(prox_history,
+ chip->tsl2x7x_settings.prox_max_samples_cal, calP);
+ chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
+
+ dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
+ calP->min, calP->mean, calP->max);
+ dev_info(&chip->client->dev,
+ "%s proximity threshold set to %d\n",
+ chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+
+ /* back to the way they were */
+ chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
+ if (current_state == TSL2X7X_CHIP_WORKING)
+ tsl2x7x_chip_on(indio_dev);
+}
+
+static ssize_t tsl2x7x_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->tsl2x7x_chip_status);
+}
+
+static ssize_t tsl2x7x_power_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool value;
+
+ if (strtobool(buf, &value))
+ return -EINVAL;
+
+ if (value)
+ tsl2x7x_chip_on(indio_dev);
+ else
+ tsl2x7x_chip_off(indio_dev);
+
+ return len;
+}
+
+static ssize_t tsl2x7x_gain_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+
+ switch (chip->id) {
+ case tsl2571:
+ case tsl2671:
+ case tmd2671:
+ case tsl2771:
+ case tmd2771:
+ return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 128");
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
+}
+
+static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
+}
+
+static ssize_t tsl2x7x_als_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+ int y, z;
+
+ y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+ y /= 1000;
+ z %= 1000;
+
+ return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
+}
+
+static ssize_t tsl2x7x_als_time_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ struct tsl2x7x_parse_result result;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
+
+ result.fract /= 3;
+ chip->tsl2x7x_settings.als_time =
+ (TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
+
+ dev_info(&chip->client->dev, "%s: als time = %d",
+ __func__, chip->tsl2x7x_settings.als_time);
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
+ ".00272 - .696");
+
+static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ chip->tsl2x7x_settings.als_cal_target);
+}
+
+static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ unsigned long value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value)
+ chip->tsl2x7x_settings.als_cal_target = value;
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return len;
+}
+
+/* persistence settings */
+static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+ int y, z, filter_delay;
+
+ /* Determine integration time */
+ y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+ filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
+
+ return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
+}
+
+static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ struct tsl2x7x_parse_result result;
+ int y, z, filter_delay;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
+
+ y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+
+ filter_delay =
+ DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
+
+ chip->tsl2x7x_settings.persistence &= 0xF0;
+ chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
+
+ dev_info(&chip->client->dev, "%s: als persistence = %d",
+ __func__, filter_delay);
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+ int y, z, filter_delay;
+
+ /* Determine integration time */
+ y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+ filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
+ y = filter_delay / 1000;
+ z = filter_delay % 1000;
+
+ return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
+}
+
+static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ struct tsl2x7x_parse_result result;
+ int y, z, filter_delay;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
+ if (ret)
+ return ret;
+
+ y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+
+ filter_delay =
+ DIV_ROUND_UP(((result.integer * 1000) + result.fract), z);
+
+ chip->tsl2x7x_settings.persistence &= 0x0F;
+ chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
+
+ dev_info(&chip->client->dev, "%s: prox persistence = %d",
+ __func__, filter_delay);
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t tsl2x7x_do_calibrate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool value;
+
+ if (strtobool(buf, &value))
+ return -EINVAL;
+
+ if (value)
+ tsl2x7x_als_calibrate(indio_dev);
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return len;
+}
+
+static ssize_t tsl2x7x_luxtable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
+ int i = 0;
+ int offset = 0;
+
+ while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
+ offset += snprintf(buf + offset, PAGE_SIZE, "%d,%d,%d,",
+ chip->tsl2x7x_device_lux[i].ratio,
+ chip->tsl2x7x_device_lux[i].ch0,
+ chip->tsl2x7x_device_lux[i].ch1);
+ if (chip->tsl2x7x_device_lux[i].ratio == 0) {
+ /* We just printed the first "0" entry.
+ * Now get rid of the extra "," and break. */
+ offset--;
+ break;
+ }
+ i++;
+ }
+
+ offset += snprintf(buf + offset, PAGE_SIZE, "\n");
+ return offset;
+}
+
+static ssize_t tsl2x7x_luxtable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
+ int n;
+
+ get_options(buf, ARRAY_SIZE(value), value);
+
+ /* We now have an array of ints starting at value[1], and
+ * enumerated by value[0].
+ * We expect each group of three ints is one table entry,
+ * and the last table entry is all 0.
+ */
+ n = value[0];
+ if ((n % 3) || n < 6 ||
+ n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
+ dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
+ return -EINVAL;
+ }
+
+ if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
+ dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
+ return -EINVAL;
+ }
+
+ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING)
+ tsl2x7x_chip_off(indio_dev);
+
+ /* Zero out the table */
+ memset(chip->tsl2x7x_device_lux, 0, sizeof(chip->tsl2x7x_device_lux));
+ memcpy(chip->tsl2x7x_device_lux, &value[1], (value[0] * 4));
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return len;
+}
+
+static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool value;
+
+ if (strtobool(buf, &value))
+ return -EINVAL;
+
+ if (value)
+ tsl2x7x_prox_cal(indio_dev);
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return len;
+}
+
+static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type == IIO_INTENSITY)
+ ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
+ else
+ ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
+
+ return ret;
+}
+
+static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int val)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ if (chan->type == IIO_INTENSITY) {
+ if (val)
+ chip->tsl2x7x_settings.interrupts_en |= 0x10;
+ else
+ chip->tsl2x7x_settings.interrupts_en &= 0x20;
+ } else {
+ if (val)
+ chip->tsl2x7x_settings.interrupts_en |= 0x20;
+ else
+ chip->tsl2x7x_settings.interrupts_en &= 0x10;
+ }
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return 0;
+}
+
+static int tsl2x7x_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ chip->tsl2x7x_settings.als_thresh_high = val;
+ break;
+ case IIO_EV_DIR_FALLING:
+ chip->tsl2x7x_settings.als_thresh_low = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ chip->tsl2x7x_settings.prox_thres_high = val;
+ break;
+ case IIO_EV_DIR_FALLING:
+ chip->tsl2x7x_settings.prox_thres_low = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return 0;
+}
+
+static int tsl2x7x_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ if (chan->type == IIO_INTENSITY) {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->tsl2x7x_settings.als_thresh_high;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->tsl2x7x_settings.als_thresh_low;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = chip->tsl2x7x_settings.prox_thres_high;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = chip->tsl2x7x_settings.prox_thres_low;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret = -EINVAL;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ tsl2x7x_get_lux(indio_dev);
+ *val = chip->als_cur_info.lux;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ tsl2x7x_get_lux(indio_dev);
+ if (chan->channel == 0)
+ *val = chip->als_cur_info.als_ch0;
+ else
+ *val = chip->als_cur_info.als_ch1;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_PROXIMITY:
+ tsl2x7x_get_prox(indio_dev);
+ *val = chip->prox_data;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_LIGHT)
+ *val =
+ tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain];
+ else
+ *val =
+ tsl2X7X_prx_gainadj[chip->tsl2x7x_settings.prox_gain];
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *val = chip->tsl2x7x_settings.als_gain_trim;
+ ret = IIO_VAL_INT;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (chan->type == IIO_INTENSITY) {
+ switch (val) {
+ case 1:
+ chip->tsl2x7x_settings.als_gain = 0;
+ break;
+ case 8:
+ chip->tsl2x7x_settings.als_gain = 1;
+ break;
+ case 16:
+ chip->tsl2x7x_settings.als_gain = 2;
+ break;
+ case 120:
+ switch (chip->id) {
+ case tsl2572:
+ case tsl2672:
+ case tmd2672:
+ case tsl2772:
+ case tmd2772:
+ return -EINVAL;
+ }
+ chip->tsl2x7x_settings.als_gain = 3;
+ break;
+ case 128:
+ switch (chip->id) {
+ case tsl2571:
+ case tsl2671:
+ case tmd2671:
+ case tsl2771:
+ case tmd2771:
+ return -EINVAL;
+ }
+ chip->tsl2x7x_settings.als_gain = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (val) {
+ case 1:
+ chip->tsl2x7x_settings.prox_gain = 0;
+ break;
+ case 2:
+ chip->tsl2x7x_settings.prox_gain = 1;
+ break;
+ case 4:
+ chip->tsl2x7x_settings.prox_gain = 2;
+ break;
+ case 8:
+ chip->tsl2x7x_settings.prox_gain = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ chip->tsl2x7x_settings.als_gain_trim = val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tsl2x7x_invoke_change(indio_dev);
+
+ return 0;
+}
+
+static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ tsl2x7x_power_state_show, tsl2x7x_power_state_store);
+
+static DEVICE_ATTR(in_proximity0_calibscale_available, S_IRUGO,
+ tsl2x7x_prox_gain_available_show, NULL);
+
+static DEVICE_ATTR(in_illuminance0_calibscale_available, S_IRUGO,
+ tsl2x7x_gain_available_show, NULL);
+
+static DEVICE_ATTR(in_illuminance0_integration_time, S_IRUGO | S_IWUSR,
+ tsl2x7x_als_time_show, tsl2x7x_als_time_store);
+
+static DEVICE_ATTR(in_illuminance0_target_input, S_IRUGO | S_IWUSR,
+ tsl2x7x_als_cal_target_show, tsl2x7x_als_cal_target_store);
+
+static DEVICE_ATTR(in_illuminance0_calibrate, S_IWUSR, NULL,
+ tsl2x7x_do_calibrate);
+
+static DEVICE_ATTR(in_proximity0_calibrate, S_IWUSR, NULL,
+ tsl2x7x_do_prox_calibrate);
+
+static DEVICE_ATTR(in_illuminance0_lux_table, S_IRUGO | S_IWUSR,
+ tsl2x7x_luxtable_show, tsl2x7x_luxtable_store);
+
+static DEVICE_ATTR(in_intensity0_thresh_period, S_IRUGO | S_IWUSR,
+ tsl2x7x_als_persistence_show, tsl2x7x_als_persistence_store);
+
+static DEVICE_ATTR(in_proximity0_thresh_period, S_IRUGO | S_IWUSR,
+ tsl2x7x_prox_persistence_show, tsl2x7x_prox_persistence_store);
+
+/* Use the default register values to identify the Taos device */
+static int tsl2x7x_device_id(unsigned char *id, int target)
+{
+ switch (target) {
+ case tsl2571:
+ case tsl2671:
+ case tsl2771:
+ return (*id & 0xf0) == TRITON_ID;
+ case tmd2671:
+ case tmd2771:
+ return (*id & 0xf0) == HALIBUT_ID;
+ case tsl2572:
+ case tsl2672:
+ case tmd2672:
+ case tsl2772:
+ case tmd2772:
+ return (*id & 0xf0) == SWORDFISH_ID;
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns();
+ int ret;
+ u8 value;
+
+ value = i2c_smbus_read_byte_data(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_STATUS);
+
+ /* What type of interrupt do we need to process */
+ if (value & TSL2X7X_STA_PRX_INTR) {
+ tsl2x7x_get_prox(indio_dev); /* freshen data for ABI */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ }
+
+ if (value & TSL2X7X_STA_ALS_INTR) {
+ tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+ }
+ /* Clear interrupt now that we have handled it. */
+ ret = i2c_smbus_write_byte(chip->client,
+ TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
+ if (ret < 0)
+ dev_err(&chip->client->dev,
+ "%s: Failed to clear irq from event handler. err = %d\n",
+ __func__, ret);
+
+ return IRQ_HANDLED;
+}
+
+static struct attribute *tsl2x7x_ALS_device_attrs[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_in_illuminance0_calibscale_available.attr,
+ &dev_attr_in_illuminance0_integration_time.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ NULL
+};
+
+static struct attribute *tsl2x7x_PRX_device_attrs[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_in_proximity0_calibrate.attr,
+ NULL
+};
+
+static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_in_illuminance0_calibscale_available.attr,
+ &dev_attr_in_illuminance0_integration_time.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ &dev_attr_in_proximity0_calibrate.attr,
+ NULL
+};
+
+static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_in_proximity0_calibrate.attr,
+ &dev_attr_in_proximity0_calibscale_available.attr,
+ NULL
+};
+
+static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_in_illuminance0_calibscale_available.attr,
+ &dev_attr_in_illuminance0_integration_time.attr,
+ &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
+ &dev_attr_in_illuminance0_target_input.attr,
+ &dev_attr_in_illuminance0_calibrate.attr,
+ &dev_attr_in_illuminance0_lux_table.attr,
+ &dev_attr_in_proximity0_calibrate.attr,
+ &dev_attr_in_proximity0_calibscale_available.attr,
+ NULL
+};
+
+static struct attribute *tsl2X7X_ALS_event_attrs[] = {
+ &dev_attr_in_intensity0_thresh_period.attr,
+ NULL,
+};
+static struct attribute *tsl2X7X_PRX_event_attrs[] = {
+ &dev_attr_in_proximity0_thresh_period.attr,
+ NULL,
+};
+
+static struct attribute *tsl2X7X_ALSPRX_event_attrs[] = {
+ &dev_attr_in_intensity0_thresh_period.attr,
+ &dev_attr_in_proximity0_thresh_period.attr,
+ NULL,
+};
+
+static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
+ [ALS] = {
+ .attrs = tsl2x7x_ALS_device_attrs,
+ },
+ [PRX] = {
+ .attrs = tsl2x7x_PRX_device_attrs,
+ },
+ [ALSPRX] = {
+ .attrs = tsl2x7x_ALSPRX_device_attrs,
+ },
+ [PRX2] = {
+ .attrs = tsl2x7x_PRX2_device_attrs,
+ },
+ [ALSPRX2] = {
+ .attrs = tsl2x7x_ALSPRX2_device_attrs,
+ },
+};
+
+static struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
+ [ALS] = {
+ .attrs = tsl2X7X_ALS_event_attrs,
+ .name = "events",
+ },
+ [PRX] = {
+ .attrs = tsl2X7X_PRX_event_attrs,
+ .name = "events",
+ },
+ [ALSPRX] = {
+ .attrs = tsl2X7X_ALSPRX_event_attrs,
+ .name = "events",
+ },
+};
+
+static const struct iio_info tsl2X7X_device_info[] = {
+ [ALS] = {
+ .attrs = &tsl2X7X_device_attr_group_tbl[ALS],
+ .event_attrs = &tsl2X7X_event_attr_group_tbl[ALS],
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2x7x_read_raw,
+ .write_raw = &tsl2x7x_write_raw,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
+ },
+ [PRX] = {
+ .attrs = &tsl2X7X_device_attr_group_tbl[PRX],
+ .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2x7x_read_raw,
+ .write_raw = &tsl2x7x_write_raw,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
+ },
+ [ALSPRX] = {
+ .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
+ .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2x7x_read_raw,
+ .write_raw = &tsl2x7x_write_raw,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
+ },
+ [PRX2] = {
+ .attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
+ .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2x7x_read_raw,
+ .write_raw = &tsl2x7x_write_raw,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
+ },
+ [ALSPRX2] = {
+ .attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
+ .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
+ .driver_module = THIS_MODULE,
+ .read_raw = &tsl2x7x_read_raw,
+ .write_raw = &tsl2x7x_write_raw,
+ .read_event_value = &tsl2x7x_read_thresh,
+ .write_event_value = &tsl2x7x_write_thresh,
+ .read_event_config = &tsl2x7x_read_interrupt_config,
+ .write_event_config = &tsl2x7x_write_interrupt_config,
+ },
+};
+
+static const struct iio_event_spec tsl2x7x_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
+ [ALS] = {
+ .channel = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ },
+ },
+ .chan_table_elements = 3,
+ .info = &tsl2X7X_device_info[ALS],
+ },
+ [PRX] = {
+ .channel = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ },
+ },
+ .chan_table_elements = 1,
+ .info = &tsl2X7X_device_info[PRX],
+ },
+ [ALSPRX] = {
+ .channel = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED)
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ },
+ },
+ .chan_table_elements = 4,
+ .info = &tsl2X7X_device_info[ALSPRX],
+ },
+ [PRX2] = {
+ .channel = {
+ {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ },
+ },
+ .chan_table_elements = 1,
+ .info = &tsl2X7X_device_info[PRX2],
+ },
+ [ALSPRX2] = {
+ .channel = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ }, {
+ .type = IIO_INTENSITY,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .type = IIO_PROXIMITY,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE),
+ .event_spec = tsl2x7x_events,
+ .num_event_specs = ARRAY_SIZE(tsl2x7x_events),
+ },
+ },
+ .chan_table_elements = 4,
+ .info = &tsl2X7X_device_info[ALSPRX2],
+ },
+};
+
+static int tsl2x7x_probe(struct i2c_client *clientp,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ unsigned char device_id;
+ struct iio_dev *indio_dev;
+ struct tsl2X7X_chip *chip;
+
+ indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->client = clientp;
+ i2c_set_clientdata(clientp, indio_dev);
+
+ ret = tsl2x7x_i2c_read(chip->client,
+ TSL2X7X_CHIPID, &device_id);
+ if (ret < 0)
+ return ret;
+
+ if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
+ (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
+ dev_info(&chip->client->dev,
+ "%s: i2c device found does not match expected id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ if (ret < 0) {
+ dev_err(&clientp->dev, "%s: write to cmd reg failed. err = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* ALS and PROX functions can be invoked via user space poll
+ * or H/W interrupt. If busy return last sample. */
+ mutex_init(&chip->als_mutex);
+ mutex_init(&chip->prox_mutex);
+
+ chip->tsl2x7x_chip_status = TSL2X7X_CHIP_UNKNOWN;
+ chip->pdata = clientp->dev.platform_data;
+ chip->id = id->driver_data;
+ chip->chip_info =
+ &tsl2x7x_chip_info_tbl[device_channel_config[id->driver_data]];
+
+ indio_dev->info = chip->chip_info->info;
+ indio_dev->dev.parent = &clientp->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = chip->client->name;
+ indio_dev->channels = chip->chip_info->channel;
+ indio_dev->num_channels = chip->chip_info->chan_table_elements;
+
+ if (clientp->irq) {
+ ret = devm_request_threaded_irq(&clientp->dev, clientp->irq,
+ NULL,
+ &tsl2x7x_event_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "TSL2X7X_event",
+ indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev,
+ "%s: irq request failed", __func__);
+ return ret;
+ }
+ }
+
+ /* Load up the defaults */
+ tsl2x7x_defaults(chip);
+ /* Make sure the chip is on */
+ tsl2x7x_chip_on(indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&clientp->dev,
+ "%s: iio registration failed\n", __func__);
+ return ret;
+ }
+
+ dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
+
+ return 0;
+}
+
+static int tsl2x7x_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
+ ret = tsl2x7x_chip_off(indio_dev);
+ chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
+ }
+
+ if (chip->pdata && chip->pdata->platform_power) {
+ pm_message_t pmm = {PM_EVENT_SUSPEND};
+
+ chip->pdata->platform_power(dev, pmm);
+ }
+
+ return ret;
+}
+
+static int tsl2x7x_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2X7X_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->pdata && chip->pdata->platform_power) {
+ pm_message_t pmm = {PM_EVENT_RESUME};
+
+ chip->pdata->platform_power(dev, pmm);
+ }
+
+ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_SUSPENDED)
+ ret = tsl2x7x_chip_on(indio_dev);
+
+ return ret;
+}
+
+static int tsl2x7x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ tsl2x7x_chip_off(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static struct i2c_device_id tsl2x7x_idtable[] = {
+ { "tsl2571", tsl2571 },
+ { "tsl2671", tsl2671 },
+ { "tmd2671", tmd2671 },
+ { "tsl2771", tsl2771 },
+ { "tmd2771", tmd2771 },
+ { "tsl2572", tsl2572 },
+ { "tsl2672", tsl2672 },
+ { "tmd2672", tmd2672 },
+ { "tsl2772", tsl2772 },
+ { "tmd2772", tmd2772 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tsl2x7x_idtable);
+
+static const struct dev_pm_ops tsl2x7x_pm_ops = {
+ .suspend = tsl2x7x_suspend,
+ .resume = tsl2x7x_resume,
+};
+
+/* Driver definition */
+static struct i2c_driver tsl2x7x_driver = {
+ .driver = {
+ .name = "tsl2x7x",
+ .pm = &tsl2x7x_pm_ops,
+ },
+ .id_table = tsl2x7x_idtable,
+ .probe = tsl2x7x_probe,
+ .remove = tsl2x7x_remove,
+};
+
+module_i2c_driver(tsl2x7x_driver);
+
+MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
+MODULE_DESCRIPTION("TAOS tsl2x7x ambient and proximity light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/staging/iio/magnetometer/hmc5843.h
new file mode 100644
index 00000000000000..b784e3eb4591ed
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/hmc5843.h
@@ -0,0 +1,59 @@
+/*
+ * Header file for hmc5843 driver
+ *
+ * Split from hmc5843.c
+ * Copyright (C) Josef Gajdusek <atx@atx.name>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * */
+
+
+#ifndef HMC5843_CORE_H
+#define HMC5843_CORE_H
+
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+
+#define HMC5843_CONFIG_REG_A 0x00
+#define HMC5843_CONFIG_REG_B 0x01
+#define HMC5843_MODE_REG 0x02
+#define HMC5843_DATA_OUT_MSB_REGS 0x03
+#define HMC5843_STATUS_REG 0x09
+#define HMC5843_ID_REG 0x0a
+#define HMC5843_ID_END 0x0c
+
+enum hmc5843_ids {
+ HMC5843_ID,
+ HMC5883_ID,
+ HMC5883L_ID,
+ HMC5983_ID,
+};
+
+struct hmc5843_data {
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *regmap;
+ const struct hmc5843_chip_info *variant;
+ __be16 buffer[8]; /* 3x 16-bit channels + padding + 64-bit timestamp */
+};
+
+int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
+ enum hmc5843_ids id);
+int hmc5843_common_remove(struct device *dev);
+
+int hmc5843_common_suspend(struct device *dev);
+int hmc5843_common_resume(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(hmc5843_pm_ops,
+ hmc5843_common_suspend,
+ hmc5843_common_resume);
+#define HMC5843_PM_OPS (&hmc5843_pm_ops)
+#else
+#define HMC5843_PM_OPS NULL
+#endif
+
+#endif /* HMC5843_CORE_H */
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c
new file mode 100644
index 00000000000000..fd171d8b38fbcc
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/hmc5843_core.c
@@ -0,0 +1,638 @@
+/* Copyright (C) 2010 Texas Instruments
+ Author: Shubhrajyoti Datta <shubhrajyoti@ti.com>
+ Acknowledgement: Jonathan Cameron <jic23@kernel.org> for valuable inputs.
+
+ Support for HMC5883 and HMC5883L by Peter Meerwald <pmeerw@pmeerw.net>.
+
+ Split to multiple files by Josef Gajdusek <atx@atx.name> - 2014
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/delay.h>
+
+#include "hmc5843.h"
+
+/*
+ * Range gain settings in (+-)Ga
+ * Beware: HMC5843 and HMC5883 have different recommended sensor field
+ * ranges; default corresponds to +-1.0 Ga and +-1.3 Ga, respectively
+ */
+#define HMC5843_RANGE_GAIN_OFFSET 0x05
+#define HMC5843_RANGE_GAIN_DEFAULT 0x01
+#define HMC5843_RANGE_GAIN_MASK 0xe0
+
+/* Device status */
+#define HMC5843_DATA_READY 0x01
+#define HMC5843_DATA_OUTPUT_LOCK 0x02
+
+/* Mode register configuration */
+#define HMC5843_MODE_CONVERSION_CONTINUOUS 0x00
+#define HMC5843_MODE_CONVERSION_SINGLE 0x01
+#define HMC5843_MODE_IDLE 0x02
+#define HMC5843_MODE_SLEEP 0x03
+#define HMC5843_MODE_MASK 0x03
+
+/*
+ * HMC5843: Minimum data output rate
+ * HMC5883: Typical data output rate
+ */
+#define HMC5843_RATE_OFFSET 0x02
+#define HMC5843_RATE_DEFAULT 0x04
+#define HMC5843_RATE_MASK 0x1c
+
+/* Device measurement configuration */
+#define HMC5843_MEAS_CONF_NORMAL 0x00
+#define HMC5843_MEAS_CONF_POSITIVE_BIAS 0x01
+#define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02
+#define HMC5843_MEAS_CONF_MASK 0x03
+
+/* Scaling factors: 10000000/Gain */
+static const int hmc5843_regval_to_nanoscale[] = {
+ 6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
+};
+
+static const int hmc5883_regval_to_nanoscale[] = {
+ 7812, 9766, 13021, 16287, 24096, 27701, 32573, 45662
+};
+
+static const int hmc5883l_regval_to_nanoscale[] = {
+ 7299, 9174, 12195, 15152, 22727, 25641, 30303, 43478
+};
+
+/*
+ * From the datasheet:
+ * Value | HMC5843 | HMC5883/HMC5883L
+ * | Data output rate (Hz) | Data output rate (Hz)
+ * 0 | 0.5 | 0.75
+ * 1 | 1 | 1.5
+ * 2 | 2 | 3
+ * 3 | 5 | 7.5
+ * 4 | 10 (default) | 15
+ * 5 | 20 | 30
+ * 6 | 50 | 75
+ * 7 | Not used | Not used
+ */
+static const int hmc5843_regval_to_samp_freq[][2] = {
+ {0, 500000}, {1, 0}, {2, 0}, {5, 0}, {10, 0}, {20, 0}, {50, 0}
+};
+
+static const int hmc5883_regval_to_samp_freq[][2] = {
+ {0, 750000}, {1, 500000}, {3, 0}, {7, 500000}, {15, 0}, {30, 0},
+ {75, 0}
+};
+
+static const int hmc5983_regval_to_samp_freq[][2] = {
+ {0, 750000}, {1, 500000}, {3, 0}, {7, 500000}, {15, 0}, {30, 0},
+ {75, 0}, {220, 0}
+};
+
+/* Describe chip variants */
+struct hmc5843_chip_info {
+ const struct iio_chan_spec *channels;
+ const int (*regval_to_samp_freq)[2];
+ const int n_regval_to_samp_freq;
+ const int *regval_to_nanoscale;
+ const int n_regval_to_nanoscale;
+};
+
+/* The lower two bits contain the current conversion mode */
+static s32 hmc5843_set_mode(struct hmc5843_data *data, u8 operating_mode)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_update_bits(data->regmap, HMC5843_MODE_REG,
+ HMC5843_MODE_MASK, operating_mode);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int hmc5843_wait_measurement(struct hmc5843_data *data)
+{
+ int tries = 150;
+ unsigned int val;
+ int ret;
+
+ while (tries-- > 0) {
+ ret = regmap_read(data->regmap, HMC5843_STATUS_REG, &val);
+ if (ret < 0)
+ return ret;
+ if (val & HMC5843_DATA_READY)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(data->dev, "data not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Return the measurement value from the specified channel */
+static int hmc5843_read_measurement(struct hmc5843_data *data,
+ int idx, int *val)
+{
+ __be16 values[3];
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = hmc5843_wait_measurement(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
+ values, sizeof(values));
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(be16_to_cpu(values[idx]), 15);
+ return IIO_VAL_INT;
+}
+
+/*
+ * API for setting the measurement configuration to
+ * Normal, Positive bias and Negative bias
+ *
+ * From the datasheet:
+ * 0 - Normal measurement configuration (default): In normal measurement
+ * configuration the device follows normal measurement flow. Pins BP
+ * and BN are left floating and high impedance.
+ *
+ * 1 - Positive bias configuration: In positive bias configuration, a
+ * positive current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ * 2 - Negative bias configuration. In negative bias configuration, a
+ * negative current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ */
+static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_A,
+ HMC5843_MEAS_CONF_MASK, meas_conf);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static ssize_t hmc5843_show_measurement_configuration(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &val);
+ if (ret)
+ return ret;
+ val &= HMC5843_MEAS_CONF_MASK;
+
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t hmc5843_set_measurement_configuration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ unsigned long meas_conf = 0;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &meas_conf);
+ if (ret)
+ return ret;
+ if (meas_conf >= HMC5843_MEAS_CONF_MASK)
+ return -EINVAL;
+
+ ret = hmc5843_set_meas_conf(data, meas_conf);
+
+ return (ret < 0) ? ret : count;
+}
+
+static IIO_DEVICE_ATTR(meas_conf,
+ S_IWUSR | S_IRUGO,
+ hmc5843_show_measurement_configuration,
+ hmc5843_set_measurement_configuration,
+ 0);
+
+static ssize_t hmc5843_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < data->variant->n_regval_to_samp_freq; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%d ", data->variant->regval_to_samp_freq[i][0],
+ data->variant->regval_to_samp_freq[i][1]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hmc5843_show_samp_freq_avail);
+
+static int hmc5843_set_samp_freq(struct hmc5843_data *data, u8 rate)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_A,
+ HMC5843_RATE_MASK, rate << HMC5843_RATE_OFFSET);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int hmc5843_get_samp_freq_index(struct hmc5843_data *data,
+ int val, int val2)
+{
+ int i;
+
+ for (i = 0; i < data->variant->n_regval_to_samp_freq; i++)
+ if (val == data->variant->regval_to_samp_freq[i][0] &&
+ val2 == data->variant->regval_to_samp_freq[i][1])
+ return i;
+
+ return -EINVAL;
+}
+
+static int hmc5843_set_range_gain(struct hmc5843_data *data, u8 range)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = regmap_update_bits(data->regmap, HMC5843_CONFIG_REG_B,
+ HMC5843_RANGE_GAIN_MASK,
+ range << HMC5843_RANGE_GAIN_OFFSET);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static ssize_t hmc5843_show_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < data->variant->n_regval_to_nanoscale; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "0.%09d ", data->variant->regval_to_nanoscale[i]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(scale_available, S_IRUGO,
+ hmc5843_show_scale_avail, NULL, 0);
+
+static int hmc5843_get_scale_index(struct hmc5843_data *data, int val, int val2)
+{
+ int i;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < data->variant->n_regval_to_nanoscale; i++)
+ if (val2 == data->variant->regval_to_nanoscale[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int hmc5843_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct hmc5843_data *data = iio_priv(indio_dev);
+ unsigned int rval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return hmc5843_read_measurement(data, chan->scan_index, val);
+ case IIO_CHAN_INFO_SCALE:
+ ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_B, &rval);
+ if (ret < 0)
+ return ret;
+ rval >>= HMC5843_RANGE_GAIN_OFFSET;
+ *val = 0;
+ *val2 = data->variant->regval_to_nanoscale[rval];
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &rval);
+ if (ret < 0)
+ return ret;
+ rval >>= HMC5843_RATE_OFFSET;
+ *val = data->variant->regval_to_samp_freq[rval][0];
+ *val2 = data->variant->regval_to_samp_freq[rval][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int hmc5843_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hmc5843_data *data = iio_priv(indio_dev);
+ int rate, range;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ rate = hmc5843_get_samp_freq_index(data, val, val2);
+ if (rate < 0)
+ return -EINVAL;
+
+ return hmc5843_set_samp_freq(data, rate);
+ case IIO_CHAN_INFO_SCALE:
+ range = hmc5843_get_scale_index(data, val, val2);
+ if (range < 0)
+ return -EINVAL;
+
+ return hmc5843_set_range_gain(data, range);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hmc5843_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hmc5843_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = hmc5843_wait_measurement(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto done;
+ }
+
+ ret = regmap_bulk_read(data->regmap, HMC5843_DATA_OUT_MSB_REGS,
+ data->buffer, 3 * sizeof(__be16));
+
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ goto done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define HMC5843_CHANNEL(axis, idx) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec hmc5843_channels[] = {
+ HMC5843_CHANNEL(X, 0),
+ HMC5843_CHANNEL(Y, 1),
+ HMC5843_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/* Beware: Y and Z are exchanged on HMC5883 and 5983 */
+static const struct iio_chan_spec hmc5883_channels[] = {
+ HMC5843_CHANNEL(X, 0),
+ HMC5843_CHANNEL(Z, 1),
+ HMC5843_CHANNEL(Y, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static struct attribute *hmc5843_attributes[] = {
+ &iio_dev_attr_meas_conf.dev_attr.attr,
+ &iio_dev_attr_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hmc5843_group = {
+ .attrs = hmc5843_attributes,
+};
+
+static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
+ [HMC5843_ID] = {
+ .channels = hmc5843_channels,
+ .regval_to_samp_freq = hmc5843_regval_to_samp_freq,
+ .n_regval_to_samp_freq =
+ ARRAY_SIZE(hmc5843_regval_to_samp_freq),
+ .regval_to_nanoscale = hmc5843_regval_to_nanoscale,
+ .n_regval_to_nanoscale =
+ ARRAY_SIZE(hmc5843_regval_to_nanoscale),
+ },
+ [HMC5883_ID] = {
+ .channels = hmc5883_channels,
+ .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
+ .n_regval_to_samp_freq =
+ ARRAY_SIZE(hmc5883_regval_to_samp_freq),
+ .regval_to_nanoscale = hmc5883_regval_to_nanoscale,
+ .n_regval_to_nanoscale =
+ ARRAY_SIZE(hmc5883_regval_to_nanoscale),
+ },
+ [HMC5883L_ID] = {
+ .channels = hmc5883_channels,
+ .regval_to_samp_freq = hmc5883_regval_to_samp_freq,
+ .n_regval_to_samp_freq =
+ ARRAY_SIZE(hmc5883_regval_to_samp_freq),
+ .regval_to_nanoscale = hmc5883l_regval_to_nanoscale,
+ .n_regval_to_nanoscale =
+ ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
+ },
+ [HMC5983_ID] = {
+ .channels = hmc5883_channels,
+ .regval_to_samp_freq = hmc5983_regval_to_samp_freq,
+ .n_regval_to_samp_freq =
+ ARRAY_SIZE(hmc5983_regval_to_samp_freq),
+ .regval_to_nanoscale = hmc5883l_regval_to_nanoscale,
+ .n_regval_to_nanoscale =
+ ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
+ }
+};
+
+static int hmc5843_init(struct hmc5843_data *data)
+{
+ int ret;
+ u8 id[3];
+
+ ret = regmap_bulk_read(data->regmap, HMC5843_ID_REG,
+ id, ARRAY_SIZE(id));
+ if (ret < 0)
+ return ret;
+ if (id[0] != 'H' || id[1] != '4' || id[2] != '3') {
+ dev_err(data->dev, "no HMC5843/5883/5883L/5983 sensor\n");
+ return -ENODEV;
+ }
+
+ ret = hmc5843_set_meas_conf(data, HMC5843_MEAS_CONF_NORMAL);
+ if (ret < 0)
+ return ret;
+ ret = hmc5843_set_samp_freq(data, HMC5843_RATE_DEFAULT);
+ if (ret < 0)
+ return ret;
+ ret = hmc5843_set_range_gain(data, HMC5843_RANGE_GAIN_DEFAULT);
+ if (ret < 0)
+ return ret;
+ return hmc5843_set_mode(data, HMC5843_MODE_CONVERSION_CONTINUOUS);
+}
+
+static const struct iio_info hmc5843_info = {
+ .attrs = &hmc5843_group,
+ .read_raw = &hmc5843_read_raw,
+ .write_raw = &hmc5843_write_raw,
+ .write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
+
+
+int hmc5843_common_suspend(struct device *dev)
+{
+ return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
+ HMC5843_MODE_CONVERSION_CONTINUOUS);
+}
+EXPORT_SYMBOL(hmc5843_common_suspend);
+
+int hmc5843_common_resume(struct device *dev)
+{
+ return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
+ HMC5843_MODE_SLEEP);
+}
+EXPORT_SYMBOL(hmc5843_common_resume);
+
+int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
+ enum hmc5843_ids id)
+{
+ struct hmc5843_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /* default settings at probe */
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = regmap;
+ data->variant = &hmc5843_chip_info_tbl[id];
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &hmc5843_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = data->variant->channels;
+ indio_dev->num_channels = 4;
+ indio_dev->available_scan_masks = hmc5843_scan_masks;
+
+ ret = hmc5843_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ hmc5843_trigger_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+EXPORT_SYMBOL(hmc5843_common_probe);
+
+int hmc5843_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ /* sleep mode to save power */
+ hmc5843_set_mode(iio_priv(indio_dev), HMC5843_MODE_SLEEP);
+
+ return 0;
+}
+EXPORT_SYMBOL(hmc5843_common_remove);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com>");
+MODULE_DESCRIPTION("HMC5843/5883/5883L/5983 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/staging/iio/magnetometer/hmc5843_i2c.c
new file mode 100644
index 00000000000000..6acd614cdbc652
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/hmc5843_i2c.c
@@ -0,0 +1,104 @@
+/*
+ * i2c driver for hmc5843/5843/5883/5883l/5983
+ *
+ * Split from hmc5843.c
+ * Copyright (C) Josef Gajdusek <atx@atx.name>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "hmc5843.h"
+
+static const struct regmap_range hmc5843_readable_ranges[] = {
+ regmap_reg_range(0, HMC5843_ID_END),
+};
+
+static struct regmap_access_table hmc5843_readable_table = {
+ .yes_ranges = hmc5843_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_readable_ranges),
+};
+
+static const struct regmap_range hmc5843_writable_ranges[] = {
+ regmap_reg_range(0, HMC5843_MODE_REG),
+};
+
+static struct regmap_access_table hmc5843_writable_table = {
+ .yes_ranges = hmc5843_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_writable_ranges),
+};
+
+static const struct regmap_range hmc5843_volatile_ranges[] = {
+ regmap_reg_range(HMC5843_DATA_OUT_MSB_REGS, HMC5843_STATUS_REG),
+};
+
+static struct regmap_access_table hmc5843_volatile_table = {
+ .yes_ranges = hmc5843_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_volatile_ranges),
+};
+
+static struct regmap_config hmc5843_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .rd_table = &hmc5843_readable_table,
+ .wr_table = &hmc5843_writable_table,
+ .volatile_table = &hmc5843_volatile_table,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int hmc5843_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return hmc5843_common_probe(&client->dev,
+ devm_regmap_init_i2c(client, &hmc5843_i2c_regmap_config),
+ id->driver_data);
+}
+
+static int hmc5843_i2c_remove(struct i2c_client *client)
+{
+ return hmc5843_common_remove(&client->dev);
+}
+
+static const struct i2c_device_id hmc5843_id[] = {
+ { "hmc5843", HMC5843_ID },
+ { "hmc5883", HMC5883_ID },
+ { "hmc5883l", HMC5883L_ID },
+ { "hmc5983", HMC5983_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hmc5843_id);
+
+static const struct of_device_id hmc5843_of_match[] = {
+ { .compatible = "honeywell,hmc5843", .data = (void *)HMC5843_ID },
+ { .compatible = "honeywell,hmc5883", .data = (void *)HMC5883_ID },
+ { .compatible = "honeywell,hmc5883l", .data = (void *)HMC5883L_ID },
+ { .compatible = "honeywell,hmc5983", .data = (void *)HMC5983_ID },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hmc5843_of_match);
+
+static struct i2c_driver hmc5843_driver = {
+ .driver = {
+ .name = "hmc5843",
+ .pm = HMC5843_PM_OPS,
+ .of_match_table = hmc5843_of_match,
+ },
+ .id_table = hmc5843_id,
+ .probe = hmc5843_i2c_probe,
+ .remove = hmc5843_i2c_remove,
+};
+module_i2c_driver(hmc5843_driver);
+
+MODULE_AUTHOR("Josef Gajdusek <atx@atx.name>");
+MODULE_DESCRIPTION("HMC5843/5883/5883L/5983 i2c driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/staging/iio/magnetometer/hmc5843_spi.c
new file mode 100644
index 00000000000000..98c4b57101c9d9
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/hmc5843_spi.c
@@ -0,0 +1,100 @@
+/*
+ * SPI driver for hmc5983
+ *
+ * Copyright (C) Josef Gajdusek <atx@atx.name>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include "hmc5843.h"
+
+static const struct regmap_range hmc5843_readable_ranges[] = {
+ regmap_reg_range(0, HMC5843_ID_END),
+};
+
+static struct regmap_access_table hmc5843_readable_table = {
+ .yes_ranges = hmc5843_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_readable_ranges),
+};
+
+static const struct regmap_range hmc5843_writable_ranges[] = {
+ regmap_reg_range(0, HMC5843_MODE_REG),
+};
+
+static struct regmap_access_table hmc5843_writable_table = {
+ .yes_ranges = hmc5843_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_writable_ranges),
+};
+
+static const struct regmap_range hmc5843_volatile_ranges[] = {
+ regmap_reg_range(HMC5843_DATA_OUT_MSB_REGS, HMC5843_STATUS_REG),
+};
+
+static struct regmap_access_table hmc5843_volatile_table = {
+ .yes_ranges = hmc5843_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(hmc5843_volatile_ranges),
+};
+
+static struct regmap_config hmc5843_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .rd_table = &hmc5843_readable_table,
+ .wr_table = &hmc5843_writable_table,
+ .volatile_table = &hmc5843_volatile_table,
+
+ /* Autoincrement address pointer */
+ .read_flag_mask = 0xc0,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int hmc5843_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ spi->mode = SPI_MODE_3;
+ spi->max_speed_hz = 8000000;
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ return hmc5843_common_probe(&spi->dev,
+ devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
+ HMC5983_ID);
+}
+
+static int hmc5843_spi_remove(struct spi_device *spi)
+{
+ return hmc5843_common_remove(&spi->dev);
+}
+
+static const struct spi_device_id hmc5843_id[] = {
+ { "hmc5983", HMC5983_ID },
+ { }
+};
+
+static struct spi_driver hmc5843_driver = {
+ .driver = {
+ .name = "hmc5843",
+ .pm = HMC5843_PM_OPS,
+ .owner = THIS_MODULE,
+ },
+ .id_table = hmc5843_id,
+ .probe = hmc5843_spi_probe,
+ .remove = hmc5843_spi_remove,
+};
+
+module_spi_driver(hmc5843_driver);
+
+MODULE_AUTHOR("Josef Gajdusek <atx@atx.name>");
+MODULE_DESCRIPTION("HMC5983 SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.h b/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
new file mode 100644
index 00000000000000..c07321f8d94c21
--- /dev/null
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
@@ -0,0 +1,24 @@
+#ifndef __IIO_BFIN_TIMER_TRIGGER_H__
+#define __IIO_BFIN_TIMER_TRIGGER_H__
+
+/**
+ * struct iio_bfin_timer_trigger_pdata - timer trigger platform data
+ * @output_enable: Enable external trigger pulse generation.
+ * @active_low: Whether the trigger pulse is active low.
+ * @duty_ns: Length of the trigger pulse in nanoseconds.
+ *
+ * This struct is used to configure the output pulse generation of the blackfin
+ * timer trigger. If output_enable is set to true an external trigger signal
+ * will generated on the pin corresponding to the timer. This is useful for
+ * converters which needs an external signal to start conversion. active_low and
+ * duty_ns are used to configure the type of the trigger pulse. If output_enable
+ * is set to false no external trigger pulse will be generated and active_low
+ * and duty_ns are ignored.
+ **/
+struct iio_bfin_timer_trigger_pdata {
+ bool output_enable;
+ bool active_low;
+ unsigned int duty_ns;
+};
+
+#endif
diff --git a/drivers/staging/ulogger/Kconfig b/drivers/staging/ulogger/Kconfig
new file mode 100644
index 00000000000000..f84566c76a0aaa
--- /dev/null
+++ b/drivers/staging/ulogger/Kconfig
@@ -0,0 +1,26 @@
+config ULOGGER
+ boolean "Ulogger, a fork of Android Logger with a few additional features"
+ default n
+
+if ULOGGER
+config ULOGGER_BUF_SHIFT
+ int "Ulogger static log buffer size (17 => 128KB, 18 => 256KB)"
+ range 16 20
+ default 18
+ help
+ Select ulogger static log (ulog_main) buffer size as a power of 2.
+ Examples:
+ 18 => 256 KB
+ 17 => 128 KB
+ 16 => 64 KB
+ 15 => 32 KB
+
+config ULOGGER_USE_MONOTONIC_CLOCK
+ bool "Use raw monotonic clock"
+ default n
+ help
+ Use raw monotonic clock (CLOCK_MONOTONIC_RAW) for timestamps instead
+ of kernel wallclock. Note that this clock may differ from the one used
+ in printk messages.
+
+endif # ULOGGER
diff --git a/drivers/staging/ulogger/Makefile b/drivers/staging/ulogger/Makefile
new file mode 100644
index 00000000000000..81cd2c164c3408
--- /dev/null
+++ b/drivers/staging/ulogger/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ULOGGER) += ulogger.o
diff --git a/drivers/staging/ulogger/ulogger.c b/drivers/staging/ulogger/ulogger.c
new file mode 100644
index 00000000000000..39c17501161d9d
--- /dev/null
+++ b/drivers/staging/ulogger/ulogger.c
@@ -0,0 +1,952 @@
+/*
+ * drivers/misc/ulogger.c
+ *
+ * A fork of the Android Logger.
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include "ulogger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct ulogger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct ulogger_log {
+ unsigned char *buffer;/* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct rt_mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct ulogger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct ulogger_reader {
+ struct ulogger_log *log; /* associated log */
+ struct list_head list; /* entry in ulogger_log's list */
+ size_t r_off; /* current read head offset */
+ bool r_all; /* reader can read all entries */
+ int r_ver; /* reader ABI version */
+};
+
+/* ulogger_offset - returns index 'n' into the log via (optimized) modulus */
+size_t ulogger_offset(struct ulogger_log *log, size_t n)
+{
+ return n & (log->size-1);
+}
+
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O operation
+ * 2) Readers need to maintain state (ulogger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->ulogger_reader->ulogger_log.
+ * For a writer, we don't want to maintain a ulogger_reader, so we just go
+ * file->ulogger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct ulogger_log *file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct ulogger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_header - returns a pointer to the ulogger_entry header within
+ * 'log' starting at offset 'off'. A temporary ulogger_entry 'scratch' must
+ * be provided. Typically the return value will be a pointer within
+ * 'ulogger->buf'. However, a pointer to 'scratch' may be returned if
+ * the log entry spans the end and beginning of the circular buffer.
+ */
+static struct ulogger_entry *get_entry_header(struct ulogger_log *log,
+ size_t off, struct ulogger_entry *scratch)
+{
+ size_t len = min(sizeof(struct ulogger_entry), log->size - off);
+ if (len != sizeof(struct ulogger_entry)) {
+ memcpy(((void *) scratch), log->buffer + off, len);
+ memcpy(((void *) scratch) + len, log->buffer,
+ sizeof(struct ulogger_entry) - len);
+ return scratch;
+ }
+
+ return (struct ulogger_entry *) (log->buffer + off);
+}
+
+/*
+ * get_entry_msg_len - Grabs the length of the message of the entry
+ * starting from from 'off'.
+ *
+ * An entry length is 2 bytes (16 bits) in host endian order.
+ * In the log, the length does not include the size of the log entry structure.
+ * This function returns the size including the log entry structure.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_msg_len(struct ulogger_log *log, size_t off)
+{
+ struct ulogger_entry scratch;
+ struct ulogger_entry *entry;
+
+ entry = get_entry_header(log, off, &scratch);
+ return entry->len;
+}
+
+static size_t get_user_hdr_len(int ver)
+{
+ if (ver < 2)
+ return sizeof(struct user_ulogger_entry_compat);
+ else
+ return sizeof(struct ulogger_entry);
+}
+
+static ssize_t copy_header_to_user(int ver, struct ulogger_entry *entry,
+ char __user *buf)
+{
+ void *hdr;
+ size_t hdr_len;
+ struct user_ulogger_entry_compat v1;
+
+ if (ver < 2) {
+ v1.len = entry->len;
+ v1.__pad = 0;
+ v1.pid = entry->pid;
+ v1.tid = entry->tid;
+ v1.sec = entry->sec;
+ v1.nsec = entry->nsec;
+ hdr = &v1;
+ hdr_len = sizeof(struct user_ulogger_entry_compat);
+ } else {
+ hdr = entry;
+ hdr_len = sizeof(struct ulogger_entry);
+ }
+
+ return copy_to_user(buf, hdr, hdr_len);
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct ulogger_log *log,
+ struct ulogger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ struct ulogger_entry scratch;
+ struct ulogger_entry *entry;
+ size_t len;
+ size_t msg_start;
+
+ /*
+ * First, copy the header to userspace, using the version of
+ * the header requested
+ */
+ entry = get_entry_header(log, reader->r_off, &scratch);
+ if (copy_header_to_user(reader->r_ver, entry, buf))
+ return -EFAULT;
+
+ count -= get_user_hdr_len(reader->r_ver);
+ buf += get_user_hdr_len(reader->r_ver);
+ msg_start = ulogger_offset(log,
+ reader->r_off + sizeof(struct ulogger_entry));
+
+ /*
+ * We read from the msg in two disjoint operations. First, we read from
+ * the current msg head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - msg_start);
+ if (copy_to_user(buf, log->buffer + msg_start, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = ulogger_offset(log, reader->r_off +
+ sizeof(struct ulogger_entry) + count);
+
+ return count + get_user_hdr_len(reader->r_ver);
+}
+
+/*
+ * get_next_entry_by_uid - Starting at 'off', returns an offset into
+ * 'log->buffer' which contains the first entry readable by 'euid'
+ */
+static size_t get_next_entry_by_uid(struct ulogger_log *log,
+ size_t off, uid_t euid)
+{
+ while (off != log->w_off) {
+ struct ulogger_entry *entry;
+ struct ulogger_entry scratch;
+ size_t next_len;
+
+ entry = get_entry_header(log, off, &scratch);
+
+ if (entry->euid == euid)
+ return off;
+
+ next_len = sizeof(struct ulogger_entry) + entry->len;
+ off = ulogger_offset(log, off + next_len);
+ }
+
+ return off;
+}
+
+/*
+ * ulogger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t ulogger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ulogger_reader *reader = file->private_data;
+ struct ulogger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ rt_mutex_lock(&log->mutex);
+
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ ret = (log->w_off == reader->r_off);
+ rt_mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ rt_mutex_lock(&log->mutex);
+
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ rt_mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ rt_mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct ulogger_log *log, size_t off, size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = sizeof(struct ulogger_entry) +
+ get_entry_msg_len(log, off);
+ off = ulogger_offset(log, off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * is_between - is a < c < b, accounting for wrapping of a, b, and c
+ * positions in the buffer
+ *
+ * That is, if a<b, check for c between a and b
+ * and if a>b, check for c outside (not between) a and b
+ *
+ * |------- a xxxxxxxx b --------|
+ * c^
+ *
+ * |xxxxx b --------- a xxxxxxxxx|
+ * c^
+ * or c^
+ */
+static inline int is_between(size_t a, size_t b, size_t c)
+{
+ if (a < b) {
+ /* is c between a and b? */
+ if (a < c && c <= b)
+ return 1;
+ } else {
+ /* is c outside of b through a? */
+ if (c <= b || a < c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct ulogger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = ulogger_offset(log, old + len);
+ struct ulogger_reader *reader;
+
+ if (is_between(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (is_between(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct ulogger_log *log, const void *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = ulogger_offset(log, log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct ulogger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ /*
+ * Note that by not updating w_off, this abandons the
+ * portion of the new entry that *was* successfully
+ * copied, just above. This is intentional to avoid
+ * message corruption from missing fragments.
+ */
+ return -EFAULT;
+
+ log->w_off = ulogger_offset(log, log->w_off + count);
+
+ return count;
+}
+
+/*
+ * ulogger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t ulogger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct ulogger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct ulogger_entry header;
+ struct timespec now;
+ ssize_t ret = 0, prefix;
+ const size_t commlen = sizeof(current->comm);
+ char tcomm[commlen+4];
+ char pcomm[commlen+4];
+ size_t tlen, plen;
+
+ if (ulogger_use_monotonic_clock())
+ getrawmonotonic(&now);
+ else
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+
+ /* retrieve process (thread group leader) and thread names */
+ memcpy(pcomm, current->group_leader->comm, commlen);
+ pcomm[commlen] = '\0';
+ plen = strlen(pcomm)+1;
+ if (header.pid != header.tid) {
+ memcpy(tcomm, current->comm, commlen);
+ tcomm[commlen] = '\0';
+ tlen = strlen(tcomm)+1;
+ } else {
+ tlen = 0;
+ }
+
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.euid = current_euid();
+ header.len = min_t(size_t, plen+tlen+iocb->ki_left,
+ ULOGGER_ENTRY_MAX_PAYLOAD);
+ header.hdr_size = sizeof(struct ulogger_entry);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ rt_mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct ulogger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct ulogger_entry));
+
+ /* append null-terminated process and thread names */
+ do_write_log(log, pcomm, plen);
+ prefix = plen;
+ if (tlen) {
+ do_write_log(log, tcomm, tlen);
+ prefix += tlen;
+ }
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret - prefix);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ rt_mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ rt_mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct ulogger_log *get_log_from_minor(int minor);
+
+/*
+ * ulogger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int ulogger_open(struct inode *inode, struct file *file)
+{
+ struct ulogger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ if (file_private_data_has_miscdevice_pointer()) {
+ /* file->private_data points to our misc struct */
+ log = container_of(file->private_data, struct ulogger_log, misc);
+ } else {
+ /* slower version for older kernels, requires mutex locking */
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ struct ulogger_reader *reader;
+
+ reader = kmalloc(sizeof(struct ulogger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ reader->r_ver = 2;
+ reader->r_all = in_egroup_p(inode->i_gid) ||
+ capable(CAP_SYS_ADMIN);
+
+ INIT_LIST_HEAD(&reader->list);
+
+ rt_mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ rt_mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * ulogger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int ulogger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct ulogger_reader *reader = file->private_data;
+ struct ulogger_log *log = reader->log;
+
+ rt_mutex_lock(&log->mutex);
+ list_del(&reader->list);
+ rt_mutex_unlock(&log->mutex);
+
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * ulogger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int ulogger_poll(struct file *file, poll_table *wait)
+{
+ struct ulogger_reader *reader;
+ struct ulogger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ rt_mutex_lock(&log->mutex);
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ rt_mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long ulogger_set_version(struct ulogger_reader *reader, void __user *arg)
+{
+ int version;
+ if (copy_from_user(&version, arg, sizeof(int)))
+ return -EFAULT;
+
+ if ((version < 1) || (version > 2))
+ return -EINVAL;
+
+ reader->r_ver = version;
+ return 0;
+}
+
+static long ulogger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ulogger_log *log = file_get_log(file);
+ struct ulogger_reader *reader;
+ long ret = -EINVAL;
+ void __user *argp = (void __user *) arg;
+
+ rt_mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case ULOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case ULOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case ULOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+
+ if (!reader->r_all)
+ reader->r_off = get_next_entry_by_uid(log,
+ reader->r_off, current_euid());
+
+ if (log->w_off != reader->r_off)
+ ret = get_user_hdr_len(reader->r_ver) +
+ get_entry_msg_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case ULOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ case ULOGGER_GET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = reader->r_ver;
+ break;
+ case ULOGGER_SET_VERSION:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ ret = ulogger_set_version(reader, argp);
+ break;
+ }
+
+ rt_mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static const struct file_operations ulogger_fops = {
+ .owner = THIS_MODULE,
+ .read = ulogger_read,
+ .aio_write = ulogger_aio_write,
+ .poll = ulogger_poll,
+ .unlocked_ioctl = ulogger_ioctl,
+ .compat_ioctl = ulogger_ioctl,
+ .open = ulogger_open,
+ .release = ulogger_release,
+};
+
+/*
+ * Defines a static main log structure.
+ * Buffer size must be a power of two, and greater than
+ * (ULOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct ulogger_entry)).
+ */
+static unsigned char _buf_log_main[1U << CONFIG_ULOGGER_BUF_SHIFT];
+
+static struct ulogger_log log_main = {
+ .buffer = _buf_log_main,
+ .misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = ULOGGER_LOG_MAIN,
+ .fops = &ulogger_fops,
+ .parent = NULL,
+ .mode = 0666,
+ },
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(log_main.wq),
+ .readers = LIST_HEAD_INIT(log_main.readers),
+ .mutex = __RT_MUTEX_INITIALIZER(log_main.mutex),
+ .w_off = 0,
+ .head = 0,
+ .size = sizeof(_buf_log_main),
+};
+
+/*
+ * ulogger_logs - the array of currently available logging devices
+ *
+ * New devices can be added by writing to a sysfs attribute of log_main.
+ */
+static struct ulogger_log *ulogger_logs[ULOGGER_MAX_LOGS] = {&log_main};
+
+/* logs_mutex - protects access to ulogger_logs */
+static DEFINE_MUTEX(logs_mutex);
+
+static struct ulogger_log *get_log_from_minor(int minor)
+{
+ unsigned int i;
+ struct ulogger_log *log = NULL;
+
+ if (log_main.misc.minor == minor)
+ return &log_main;
+
+ mutex_lock(&logs_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(ulogger_logs); i++) {
+ if (ulogger_logs[i] == NULL)
+ break;
+ if (ulogger_logs[i]->misc.minor == minor) {
+ log = ulogger_logs[i];
+ break;
+ }
+ }
+
+ mutex_unlock(&logs_mutex);
+
+ return log;
+}
+
+static int init_log(struct ulogger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "ulogger: failed to register misc "
+ "device for log '%s'!\n", log->misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "ulogger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->misc.name);
+
+ return 0;
+}
+
+/*
+ * List all log buffers.
+ */
+static ssize_t ulogger_show_logs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int i;
+ ssize_t count = 0;
+ const unsigned int length = PAGE_SIZE/ARRAY_SIZE(ulogger_logs);
+
+ mutex_lock(&logs_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(ulogger_logs); i++) {
+ if (ulogger_logs[i] == NULL)
+ break;
+ count += scnprintf(&buf[count], length, "%s %u\n",
+ ulogger_logs[i]->misc.name,
+ ffs(ulogger_logs[i]->size)-1);
+ }
+
+ mutex_unlock(&logs_mutex);
+
+ return count;
+}
+
+/*
+ * Dynamically allocate and register a new log device.
+ */
+static ssize_t ulogger_add_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char namebuf[16];
+ int i, len, slot, ret = -EINVAL;
+ unsigned int size;
+ char *name = NULL;
+ unsigned char *buffer = NULL;
+ struct ulogger_log *log = NULL;
+
+ /* parse buffer specification */
+ if (sscanf(buf, "%15s %u", namebuf, &size) != 2)
+ goto bad_spec;
+
+ /* log name should start with prefix ulog_ */
+ if (strncmp(namebuf, "ulog_", 5) != 0)
+ goto bad_spec;
+
+ /* log name should contain readable lowercase alpha characters only */
+ len = strlen(namebuf);
+ for (i = 5; i < len; i++)
+ if (!islower(namebuf[i]))
+ goto bad_spec;
+
+ /* sanity check: 16 kB <= size <= 1 MB */
+ if ((size < 14) || (size > 20))
+ goto bad_spec;
+
+ name = kstrdup(namebuf, GFP_KERNEL);
+ size = (1U << size);
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ buffer = vmalloc(size);
+
+ if (!log || !buffer) {
+ printk(KERN_ERR "ulogger: failed to allocate log '%s' size %u\n",
+ name, size);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ log->misc.minor = MISC_DYNAMIC_MINOR;
+ log->misc.name = name;
+ log->misc.fops = &ulogger_fops;
+ log->misc.mode = 0666;
+ log->size = size;
+ log->buffer = buffer;
+ init_waitqueue_head(&log->wq);
+ INIT_LIST_HEAD(&log->readers);
+ rt_mutex_init(&log->mutex);
+
+ /* find a slot for this log device */
+ slot = -1;
+
+ mutex_lock(&logs_mutex);
+ for (i = 0; i < ARRAY_SIZE(ulogger_logs); i++) {
+ if (ulogger_logs[i] == NULL) {
+ if (init_log(log))
+ break;
+ ulogger_logs[i] = log;
+ slot = i;
+ break;
+ }
+ if (strcmp(ulogger_logs[i]->misc.name, name) == 0)
+ /* we already have a buffer with this name */
+ break;
+ }
+ mutex_unlock(&logs_mutex);
+
+ if (slot < 0)
+ /* device registration failed */
+ goto fail;
+
+ return count;
+
+bad_spec:
+ printk(KERN_ERR "ulogger: invalid buffer specification\n");
+fail:
+ kfree(name);
+ kfree(log);
+ vfree(buffer);
+ return ret;
+}
+
+static DEVICE_ATTR(logs, S_IWUSR|S_IRUGO, ulogger_show_logs, ulogger_add_log);
+
+static int __init ulogger_init(void)
+{
+ int ret;
+
+ /* static device 'main' is always present */
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ /* write-only attribute for dynamically adding new buffers */
+ ret = device_create_file(log_main.misc.this_device, &dev_attr_logs);
+out:
+ return ret;
+}
+device_initcall(ulogger_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ulogger/ulogger.h b/drivers/staging/ulogger/ulogger.h
new file mode 100644
index 00000000000000..7d4cc904b9395f
--- /dev/null
+++ b/drivers/staging/ulogger/ulogger.h
@@ -0,0 +1,90 @@
+/* include/linux/ulogger.h
+ *
+ * A fork of the Android Logger.
+ *
+ * Copyright (C) 2013 Parrot S.A.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ULOGGER_H
+#define _LINUX_ULOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/version.h>
+
+/*
+ * The userspace structure for version 1 of the ulogger_entry ABI.
+ * This structure is returned to userspace unless the caller requests
+ * an upgrade to a newer ABI version.
+ */
+struct user_ulogger_entry_compat {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+/*
+ * The structure for version 2 of the ulogger_entry ABI.
+ * This structure is returned to userspace if ioctl(ULOGGER_SET_VERSION)
+ * is called with version >= 2
+ */
+struct ulogger_entry {
+ __u16 len; /* length of the payload */
+ __u16 hdr_size; /* sizeof(struct ulogger_entry_v2) */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ uid_t euid; /* effective UID of ulogger */
+ char msg[0]; /* the entry's payload */
+};
+
+#define ULOGGER_LOG_MAIN "ulog_main" /* everything else */
+
+#define ULOGGER_ENTRY_MAX_PAYLOAD 4076
+
+#define ULOGGER_MAX_LOGS 16
+
+#define __ULOGGERIO 0xAE
+
+#define ULOGGER_GET_LOG_BUF_SIZE _IO(__ULOGGERIO, 21) /* size of log */
+#define ULOGGER_GET_LOG_LEN _IO(__ULOGGERIO, 22) /* used log len */
+#define ULOGGER_GET_NEXT_ENTRY_LEN _IO(__ULOGGERIO, 23) /* next entry len */
+#define ULOGGER_FLUSH_LOG _IO(__ULOGGERIO, 24) /* flush log */
+#define ULOGGER_GET_VERSION _IO(__ULOGGERIO, 25) /* abi version */
+#define ULOGGER_SET_VERSION _IO(__ULOGGERIO, 26) /* abi version */
+
+/* Can we take advantage of misc_open() setting a pointer to our device ? */
+static inline int file_private_data_has_miscdevice_pointer(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static inline int ulogger_use_monotonic_clock(void)
+{
+#if defined(CONFIG_ULOGGER_USE_MONOTONIC_CLOCK)
+ return 1;
+#else
+ return 0;
+#endif /* defined(CONFIG_ULOGGER_USE_MONOTONIC_CLOCK) */
+}
+
+#endif /* _LINUX_ULOGGER_H */
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 00000000000000..52385914b9ae64
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+ tristate "Switch class support"
+ help
+ Say Y here to enable switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+ tristate "GPIO Swith support"
+ depends on GENERIC_GPIO
+ help
+ Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 00000000000000..f7606ed4a719b9
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH) += switch_class.o
+obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 00000000000000..f9cf2b4c16df57
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ dev_set_drvdata(sdev->dev, NULL);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 00000000000000..7e9faa211e48b8
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = __devexit_p(gpio_switch_remove),
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
new file mode 100644
index 00000000000000..c21856a1ac7482
--- /dev/null
+++ b/drivers/usb/chipidea/Kconfig
@@ -0,0 +1,40 @@
+#config USB_CHIPIDEA
+# tristate "ChipIdea Highspeed Dual Role Controller"
+# depends on USB
+# help
+# Say Y here if your system has a dual role high speed USB
+# controller based on ChipIdea silicon IP. Currently, only the
+# peripheral mode is supported.
+#
+# When compiled dynamically, the module will be called ci-hdrc.ko.
+
+if USB_CHIPIDEA
+
+config USB_CHIPIDEA_UDC
+ bool "ChipIdea device controller"
+ depends on USB_GADGET
+ select USB_GADGET_DUALSPEED
+ help
+ Say Y here to enable device controller functionality of the
+ ChipIdea driver.
+
+config USB_CHIPIDEA_HOST
+ bool "ChipIdea host controller"
+ depends on USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ help
+ Say Y here to enable host controller functionality of the
+ ChipIdea driver.
+
+config USB_CHIPIDEA_DEBUG
+ bool "ChipIdea driver debug"
+ help
+ Say Y here to enable debugging output of the ChipIdea driver.
+
+config USB_CHIPIDEA_P7_BUG_WORKAROUND
+ bool "ChipIdea p7 bug workaround"
+ help
+ Say Y here to enable workaround,
+ this workaround will reset the root hub if the bug is detected.
+
+endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
new file mode 100644
index 00000000000000..cc349376972435
--- /dev/null
+++ b/drivers/usb/chipidea/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
+
+ci_hdrc-y := core.o
+ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
+ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
+ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG) += debug.o
+
+ifneq ($(CONFIG_PCI),)
+ obj-$(CONFIG_USB_CHIPIDEA) += ci13xxx_pci.o
+endif
+
+ifneq ($(CONFIG_ARCH_MSM),)
+ obj-$(CONFIG_USB_CHIPIDEA) += ci13xxx_msm.o
+endif
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
new file mode 100644
index 00000000000000..0457a90851c712
--- /dev/null
+++ b/drivers/usb/chipidea/bits.h
@@ -0,0 +1,97 @@
+/*
+ * bits.h - register bits of the ChipIdea USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_USB_CHIPIDEA_BITS_H
+#define __DRIVERS_USB_CHIPIDEA_BITS_H
+
+#include <linux/usb/ehci_def.h>
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN (0x1F << 0)
+#define DCCPARAMS_DC BIT(7)
+#define DCCPARAMS_HC BIT(8)
+
+/* TESTMODE */
+#define TESTMODE_FORCE BIT(0)
+
+/* USBCMD */
+#define USBCMD_RS BIT(0)
+#define USBCMD_RST BIT(1)
+#define USBCMD_SUTW BIT(13)
+#define USBCMD_ATDTW BIT(14)
+
+/* USBSTS & USBINTR */
+#define USBi_UI BIT(0)
+#define USBi_UEI BIT(1)
+#define USBi_PCI BIT(2)
+#define USBi_URI BIT(6)
+#define USBi_SLI BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA BIT(24)
+#define DEVICEADDR_USBADR (0x7FUL << 25)
+
+/* viewport */
+#define VIEWPORT_ULPIRUN (1 << 30)
+#define VIEWPORT_ULPIRW (1 << 29)
+#define VIEWPORT_ULPIADDR(_a) (((_a)&0xff) << 16)
+#define VIEWPORT_ULPIDATRD(_d) (((_d)&0xff) << 8)
+#define VIEWPORT_ULPIDATWR(_d) ((_d)&0xff)
+
+/* PORTSC */
+#define PORTSC_FPR BIT(6)
+#define PORTSC_SUSP BIT(7)
+#define PORTSC_HSP BIT(9)
+#define PORTSC_PTC (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD (0x03UL << 25)
+#define DEVLC_PSPD_HS (0x02UL << 25)
+
+/* OTGSC */
+#define OTGSC_IDPU BIT(5)
+#define OTGSC_ID BIT(8)
+#define OTGSC_AVV BIT(9)
+#define OTGSC_ASV BIT(10)
+#define OTGSC_BSV BIT(11)
+#define OTGSC_BSE BIT(12)
+#define OTGSC_IDIS BIT(16)
+#define OTGSC_AVVIS BIT(17)
+#define OTGSC_ASVIS BIT(18)
+#define OTGSC_BSVIS BIT(19)
+#define OTGSC_BSEIS BIT(20)
+#define OTGSC_IDIE BIT(24)
+#define OTGSC_AVVIE BIT(25)
+#define OTGSC_ASVIE BIT(26)
+#define OTGSC_BSVIE BIT(27)
+#define OTGSC_BSEIE BIT(28)
+
+/* USBMODE */
+#define USBMODE_CM (0x03UL << 0)
+#define USBMODE_CM_DC (0x02UL << 0)
+#define USBMODE_SLOM BIT(3)
+#define USBMODE_CI_SDIS BIT(4)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS BIT(0)
+#define ENDPTCTRL_RXT (0x03UL << 2)
+#define ENDPTCTRL_RXR BIT(6) /* reserved for port 0 */
+#define ENDPTCTRL_RXE BIT(7)
+#define ENDPTCTRL_TXS BIT(16)
+#define ENDPTCTRL_TXT (0x03UL << 18)
+#define ENDPTCTRL_TXR BIT(22) /* reserved for port 0 */
+#define ENDPTCTRL_TXE BIT(23)
+
+#endif /* __DRIVERS_USB_CHIPIDEA_BITS_H */
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
new file mode 100644
index 00000000000000..9eaa1a10d03413
--- /dev/null
+++ b/drivers/usb/chipidea/ci.h
@@ -0,0 +1,322 @@
+/*
+ * ci.h - common structures, functions, and macros of the ChipIdea driver
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_USB_CHIPIDEA_CI_H
+#define __DRIVERS_USB_CHIPIDEA_CI_H
+
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define TD_PAGE_COUNT 5
+#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
+#define ENDPT_MAX 32
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/**
+ * struct ci13xxx_ep - endpoint representation
+ * @ep: endpoint structure for gadget drivers
+ * @dir: endpoint direction (TX/RX)
+ * @num: endpoint number
+ * @type: endpoint type
+ * @name: string description of the endpoint
+ * @qh: queue head for this endpoint
+ * @wedge: is the endpoint wedged
+ * @udc: pointer to the controller
+ * @lock: pointer to controller's spinlock
+ * @td_pool: pointer to controller's TD pool
+ */
+struct ci13xxx_ep {
+ struct usb_ep ep;
+ u8 dir;
+ u8 num;
+ u8 type;
+ char name[16];
+ struct {
+ struct list_head queue;
+ struct ci13xxx_qh *ptr;
+ dma_addr_t dma;
+ } qh;
+ int wedge;
+
+ /* global resources */
+ struct ci13xxx *udc;
+ spinlock_t *lock;
+ struct dma_pool *td_pool;
+};
+
+enum ci_role {
+ CI_ROLE_HOST = 0,
+ CI_ROLE_GADGET,
+ CI_ROLE_END,
+};
+
+/**
+ * struct ci_role_driver - host/gadget role driver
+ * start: start this role
+ * stop: stop this role
+ * irq: irq handler for this role
+ * name: role name string (host/gadget)
+ */
+struct ci_role_driver {
+ int (*start)(struct ci13xxx *);
+ void (*stop)(struct ci13xxx *);
+ irqreturn_t (*irq)(struct ci13xxx *);
+ const char *name;
+};
+
+/**
+ * struct hw_bank - hardware register mapping representation
+ * @lpm: set if the device is LPM capable
+ * @phys: physical address of the controller's registers
+ * @abs: absolute address of the beginning of register window
+ * @cap: capability registers
+ * @op: operational registers
+ * @size: size of the register window
+ * @regmap: register lookup table
+ */
+struct hw_bank {
+ unsigned lpm;
+ resource_size_t phys;
+ void __iomem *abs;
+ void __iomem *cap;
+ void __iomem *op;
+ size_t size;
+ void __iomem **regmap;
+};
+
+/**
+ * struct ci13xxx - chipidea device representation
+ * @dev: pointer to parent device
+ * @lock: access synchronization
+ * @hw_bank: hardware register mapping
+ * @irq: IRQ number
+ * @roles: array of supported roles for this controller
+ * @role: current role
+ * @is_otg: if the device is otg-capable
+ * @work: work for role changing
+ * @wq: workqueue thread
+ * @qh_pool: allocation pool for queue heads
+ * @td_pool: allocation pool for transfer descriptors
+ * @gadget: device side representation for peripheral controller
+ * @driver: gadget driver
+ * @hw_ep_max: total number of endpoints supported by hardware
+ * @ci13xxx_ep: array of endpoints
+ * @ep0_dir: ep0 direction
+ * @ep0out: pointer to ep0 OUT endpoint
+ * @ep0in: pointer to ep0 IN endpoint
+ * @status: ep0 status request
+ * @setaddr: if we should set the address on status completion
+ * @address: usb address received from the host
+ * @remote_wakeup: host-enabled remote wakeup
+ * @suspended: suspended by host
+ * @test_mode: the selected test mode
+ * @udc_driver: platform specific information supplied by parent device
+ * @vbus_active: is VBUS active
+ * @transceiver: pointer to USB PHY, if any
+ * @hcd: pointer to usb_hcd for ehci host driver
+ */
+struct ci13xxx {
+ struct device *dev;
+ spinlock_t lock;
+ spinlock_t switch_lock;
+ struct hw_bank hw_bank;
+ int irq;
+ struct ci_role_driver *roles[CI_ROLE_END];
+ enum ci_role role;
+ enum ci_role role_itr;
+ bool is_otg;
+ struct work_struct work;
+ struct workqueue_struct *wq;
+
+ struct dma_pool *qh_pool;
+ struct dma_pool *td_pool;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ unsigned hw_ep_max;
+ struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX];
+ u32 ep0_dir;
+ struct ci13xxx_ep *ep0out, *ep0in;
+
+ struct usb_request *status;
+ bool setaddr;
+ u8 address;
+ u8 remote_wakeup;
+ u8 suspended;
+ u8 test_mode;
+
+ struct ci13xxx_udc_driver *udc_driver;
+ int vbus_active;
+ struct usb_phy *transceiver;
+ struct usb_hcd *hcd;
+};
+
+static inline struct ci_role_driver *ci_role(struct ci13xxx *ci)
+{
+ BUG_ON(ci->role >= CI_ROLE_END || !ci->roles[ci->role]);
+ return ci->roles[ci->role];
+}
+
+static inline int ci_role_start(struct ci13xxx *ci, enum ci_role role)
+{
+ int ret;
+
+ if (role >= CI_ROLE_END)
+ return -EINVAL;
+
+ if (!ci->roles[role])
+ return -ENXIO;
+
+ ret = ci->roles[role]->start(ci);
+ if (!ret)
+ ci->role = role;
+ return ret;
+}
+
+static inline void ci_role_stop(struct ci13xxx *ci)
+{
+ enum ci_role role = ci->role;
+
+ if (role == CI_ROLE_END)
+ return;
+
+ ci->role = CI_ROLE_END;
+
+ ci->roles[role]->stop(ci);
+}
+
+static inline int ci_role_switch(struct ci13xxx *ci, enum ci_role role)
+{
+ int ret;
+ unsigned long flags;
+
+ ci_role_stop(ci);
+
+ spin_lock_irqsave(&ci->switch_lock, flags);
+ ci->role_itr = role;
+ spin_unlock_irqrestore(&ci->switch_lock, flags);
+
+ ret = ci_role_start(ci, role);
+ if (ret)
+ ci->role_itr = CI_ROLE_END;
+
+ return ret;
+}
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS (32)
+
+/* register indices */
+enum ci13xxx_regs {
+ CAP_CAPLENGTH,
+ CAP_HCCPARAMS,
+ CAP_DCCPARAMS,
+ CAP_TESTMODE,
+ CAP_LAST = CAP_TESTMODE,
+ OP_USBCMD,
+ OP_USBSTS,
+ OP_USBINTR,
+ OP_DEVICEADDR,
+ OP_ENDPTLISTADDR,
+ OP_VIEWPORT,
+ OP_PORTSC,
+ OP_DEVLC,
+ OP_OTGSC,
+ OP_USBMODE,
+ OP_ENDPTSETUPSTAT,
+ OP_ENDPTPRIME,
+ OP_ENDPTFLUSH,
+ OP_ENDPTSTAT,
+ OP_ENDPTCOMPLETE,
+ OP_ENDPTCTRL,
+ /* endptctrl1..15 follow */
+ OP_LAST = OP_ENDPTCTRL + ENDPT_MAX / 2,
+};
+
+/**
+ * hw_read: reads from a hw register
+ * @reg: register index
+ * @mask: bitfield mask
+ *
+ * This function returns register contents
+ */
+static inline u32 hw_read(struct ci13xxx *udc, enum ci13xxx_regs reg, u32 mask)
+{
+ return ioread32(udc->hw_bank.regmap[reg]) & mask;
+}
+
+/**
+ * hw_write: writes to a hw register
+ * @reg: register index
+ * @mask: bitfield mask
+ * @data: new value
+ */
+static inline void hw_write(struct ci13xxx *udc, enum ci13xxx_regs reg,
+ u32 mask, u32 data)
+{
+ if (~mask)
+ data = (ioread32(udc->hw_bank.regmap[reg]) & ~mask)
+ | (data & mask);
+
+ iowrite32(data, udc->hw_bank.regmap[reg]);
+}
+
+/**
+ * hw_test_and_clear: tests & clears a hw register
+ * @reg: register index
+ * @mask: bitfield mask
+ *
+ * This function returns register contents
+ */
+static inline u32 hw_test_and_clear(struct ci13xxx *udc, enum ci13xxx_regs reg,
+ u32 mask)
+{
+ u32 val = ioread32(udc->hw_bank.regmap[reg]) & mask;
+
+ iowrite32(val, udc->hw_bank.regmap[reg]);
+ return val;
+}
+
+/**
+ * hw_test_and_write: tests & writes a hw register
+ * @reg: register index
+ * @mask: bitfield mask
+ * @data: new value
+ *
+ * This function returns register contents
+ */
+static inline u32 hw_test_and_write(struct ci13xxx *udc, enum ci13xxx_regs reg,
+ u32 mask, u32 data)
+{
+ u32 val = hw_read(udc, reg, ~0);
+
+ hw_write(udc, reg, mask, data);
+ return (val & mask) >> __ffs(mask);
+}
+
+int hw_device_reset(struct ci13xxx *ci, u32 mode);
+
+int hw_port_test_set(struct ci13xxx *ci, u8 mode);
+
+u8 hw_port_test_get(struct ci13xxx *ci);
+
+#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/ci13xxx_msm.c b/drivers/usb/chipidea/ci13xxx_msm.c
new file mode 100644
index 00000000000000..958069ef95e392
--- /dev/null
+++ b/drivers/usb/chipidea/ci13xxx_msm.c
@@ -0,0 +1,110 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+
+#define MSM_USB_BASE (udc->hw_bank.abs)
+
+static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
+{
+ struct device *dev = udc->gadget.dev.parent;
+ int val;
+
+ switch (event) {
+ case CI13XXX_CONTROLLER_RESET_EVENT:
+ dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+ writel(0, USB_AHBBURST);
+ writel(0, USB_AHBMODE);
+ break;
+ case CI13XXX_CONTROLLER_STOPPED_EVENT:
+ dev_dbg(dev, "CI13XXX_CONTROLLER_STOPPED_EVENT received\n");
+ /*
+ * Put the transceiver in non-driving mode. Otherwise host
+ * may not detect soft-disconnection.
+ */
+ val = usb_phy_io_read(udc->transceiver, ULPI_FUNC_CTRL);
+ val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+ val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+ usb_phy_io_write(udc->transceiver, val, ULPI_FUNC_CTRL);
+ break;
+ default:
+ dev_dbg(dev, "unknown ci13xxx_udc event\n");
+ break;
+ }
+}
+
+static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
+ .name = "ci13xxx_msm",
+ .flags = CI13XXX_REGS_SHARED |
+ CI13XXX_REQUIRE_TRANSCEIVER |
+ CI13XXX_PULLUP_ON_VBUS |
+ CI13XXX_DISABLE_STREAMING,
+
+ .notify_event = ci13xxx_msm_notify_event,
+};
+
+static int ci13xxx_msm_probe(struct platform_device *pdev)
+{
+ struct platform_device *plat_ci;
+ int ret;
+
+ dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
+
+ plat_ci = platform_device_alloc("ci_hdrc", -1);
+ if (!plat_ci) {
+ dev_err(&pdev->dev, "can't allocate ci_hdrc platform device\n");
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add_resources(plat_ci, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "can't add resources to platform device\n");
+ goto put_platform;
+ }
+
+ ret = platform_device_add_data(plat_ci, &ci13xxx_msm_udc_driver,
+ sizeof(ci13xxx_msm_udc_driver));
+ if (ret)
+ goto put_platform;
+
+ ret = platform_device_add(plat_ci);
+ if (ret)
+ goto put_platform;
+
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+put_platform:
+ platform_device_put(plat_ci);
+
+ return ret;
+}
+
+static struct platform_driver ci13xxx_msm_driver = {
+ .probe = ci13xxx_msm_probe,
+ .driver = { .name = "msm_hsusb", },
+};
+MODULE_ALIAS("platform:msm_hsusb");
+
+static int __init ci13xxx_msm_init(void)
+{
+ return platform_driver_register(&ci13xxx_msm_driver);
+}
+module_init(ci13xxx_msm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci13xxx_pci.c b/drivers/usb/chipidea/ci13xxx_pci.c
new file mode 100644
index 00000000000000..e3dab27f5c751c
--- /dev/null
+++ b/drivers/usb/chipidea/ci13xxx_pci.c
@@ -0,0 +1,180 @@
+/*
+ * ci13xxx_pci.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+/* driver name */
+#define UDC_DRIVER_NAME "ci13xxx_pci"
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+struct ci13xxx_udc_driver pci_driver = {
+ .name = UDC_DRIVER_NAME,
+ .capoffset = DEF_CAPOFFSET,
+};
+
+struct ci13xxx_udc_driver langwell_pci_driver = {
+ .name = UDC_DRIVER_NAME,
+ .capoffset = 0,
+};
+
+struct ci13xxx_udc_driver penwell_pci_driver = {
+ .name = UDC_DRIVER_NAME,
+ .capoffset = 0,
+ .power_budget = 200,
+};
+
+/**
+ * ci13xxx_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id: PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ci13xxx_udc_driver *driver = (void *)id->driver_data;
+ struct platform_device *plat_ci;
+ struct resource res[3];
+ int retval = 0, nres = 2;
+
+ if (!driver) {
+ dev_err(&pdev->dev, "device doesn't provide driver data\n");
+ return -ENODEV;
+ }
+
+ retval = pci_enable_device(pdev);
+ if (retval)
+ goto done;
+
+ if (!pdev->irq) {
+ dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+ retval = -ENODEV;
+ goto disable_device;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ plat_ci = platform_device_alloc("ci_hdrc", -1);
+ if (!plat_ci) {
+ dev_err(&pdev->dev, "can't allocate ci_hdrc platform device\n");
+ retval = -ENOMEM;
+ goto disable_device;
+ }
+
+ memset(res, 0, sizeof(res));
+ res[0].start = pci_resource_start(pdev, 0);
+ res[0].end = pci_resource_end(pdev, 0);
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pdev->irq;
+ res[1].flags = IORESOURCE_IRQ;
+
+ retval = platform_device_add_resources(plat_ci, res, nres);
+ if (retval) {
+ dev_err(&pdev->dev, "can't add resources to platform device\n");
+ goto put_platform;
+ }
+
+ retval = platform_device_add_data(plat_ci, driver, sizeof(*driver));
+ if (retval)
+ goto put_platform;
+
+ dma_set_coherent_mask(&plat_ci->dev, pdev->dev.coherent_dma_mask);
+ plat_ci->dev.dma_mask = pdev->dev.dma_mask;
+ plat_ci->dev.dma_parms = pdev->dev.dma_parms;
+ plat_ci->dev.parent = &pdev->dev;
+
+ pci_set_drvdata(pdev, plat_ci);
+
+ retval = platform_device_add(plat_ci);
+ if (retval)
+ goto put_platform;
+
+ return 0;
+
+ put_platform:
+ pci_set_drvdata(pdev, NULL);
+ platform_device_put(plat_ci);
+ disable_device:
+ pci_disable_device(pdev);
+ done:
+ return retval;
+}
+
+/**
+ * ci13xxx_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci13xxx_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
+{
+ struct platform_device *plat_ci = pci_get_drvdata(pdev);
+
+ platform_device_unregister(plat_ci);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
+ {
+ PCI_DEVICE(0x153F, 0x1004),
+ .driver_data = (kernel_ulong_t)&pci_driver,
+ },
+ {
+ PCI_DEVICE(0x153F, 0x1006),
+ .driver_data = (kernel_ulong_t)&pci_driver,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811),
+ .driver_data = (kernel_ulong_t)&langwell_pci_driver,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
+ .driver_data = (kernel_ulong_t)&penwell_pci_driver,
+ },
+ { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
+
+static struct pci_driver ci13xxx_pci_driver = {
+ .name = UDC_DRIVER_NAME,
+ .id_table = ci13xxx_pci_id_table,
+ .probe = ci13xxx_pci_probe,
+ .remove = __devexit_p(ci13xxx_pci_remove),
+};
+
+module_pci_driver(ci13xxx_pci_driver);
+
+MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
new file mode 100644
index 00000000000000..325418b384d107
--- /dev/null
+++ b/drivers/usb/chipidea/core.c
@@ -0,0 +1,668 @@
+/*
+ * core.c - ChipIdea USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: ChipIdea USB IP core family device controller
+ *
+ * This driver is composed of several blocks:
+ * - HW: hardware interface
+ * - DBG: debug facilities (optional)
+ * - UTIL: utilities
+ * - ISR: interrupts handling
+ * - ENDPT: endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS: bus glue code, bus abstraction layer
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN: non-empty bulk-in pipes cannot be halted
+ * if defined mass storage compliance succeeds but with warnings
+ * => case 4: Hi > Dn
+ * => case 5: Hi > Di
+ * => case 8: Hi <> Do
+ * if undefined usbtest 13 fails
+ * - TRACE: enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ * - Suspend & Remote Wakeup
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/chipidea.h>
+#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/jiffies.h>
+
+#include "ci.h"
+#include "udc.h"
+#include "bits.h"
+#include "host.h"
+#include "debug.h"
+
+/* Controller register map */
+static uintptr_t ci_regs_nolpm[] = {
+ [CAP_CAPLENGTH] = 0x000UL,
+ [CAP_HCCPARAMS] = 0x008UL,
+ [CAP_DCCPARAMS] = 0x024UL,
+ [CAP_TESTMODE] = 0x038UL,
+ [OP_USBCMD] = 0x000UL,
+ [OP_USBSTS] = 0x004UL,
+ [OP_USBINTR] = 0x008UL,
+ [OP_DEVICEADDR] = 0x014UL,
+ [OP_ENDPTLISTADDR] = 0x018UL,
+ [OP_VIEWPORT] = 0x030UL,
+ [OP_PORTSC] = 0x044UL,
+ [OP_DEVLC] = 0x084UL,
+ [OP_OTGSC] = 0x064UL,
+ [OP_USBMODE] = 0x068UL,
+ [OP_ENDPTSETUPSTAT] = 0x06CUL,
+ [OP_ENDPTPRIME] = 0x070UL,
+ [OP_ENDPTFLUSH] = 0x074UL,
+ [OP_ENDPTSTAT] = 0x078UL,
+ [OP_ENDPTCOMPLETE] = 0x07CUL,
+ [OP_ENDPTCTRL] = 0x080UL,
+};
+
+static uintptr_t ci_regs_lpm[] = {
+ [CAP_CAPLENGTH] = 0x000UL,
+ [CAP_HCCPARAMS] = 0x008UL,
+ [CAP_DCCPARAMS] = 0x024UL,
+ [CAP_TESTMODE] = 0x0FCUL,
+ [OP_USBCMD] = 0x000UL,
+ [OP_USBSTS] = 0x004UL,
+ [OP_USBINTR] = 0x008UL,
+ [OP_DEVICEADDR] = 0x014UL,
+ [OP_ENDPTLISTADDR] = 0x018UL,
+ [OP_PORTSC] = 0x044UL,
+ [OP_DEVLC] = 0x084UL,
+ [OP_OTGSC] = 0x0C4UL,
+ [OP_USBMODE] = 0x0C8UL,
+ [OP_ENDPTSETUPSTAT] = 0x0D8UL,
+ [OP_ENDPTPRIME] = 0x0DCUL,
+ [OP_ENDPTFLUSH] = 0x0E0UL,
+ [OP_ENDPTSTAT] = 0x0E4UL,
+ [OP_ENDPTCOMPLETE] = 0x0E8UL,
+ [OP_ENDPTCTRL] = 0x0ECUL,
+};
+
+static int hw_alloc_regmap(struct ci13xxx *ci, bool is_lpm)
+{
+ int i;
+
+ kfree(ci->hw_bank.regmap);
+
+ ci->hw_bank.regmap = kzalloc((OP_LAST + 1) * sizeof(void *),
+ GFP_KERNEL);
+ if (!ci->hw_bank.regmap)
+ return -ENOMEM;
+
+ for (i = 0; i < OP_ENDPTCTRL; i++)
+ ci->hw_bank.regmap[i] =
+ (i <= CAP_LAST ? ci->hw_bank.cap : ci->hw_bank.op) +
+ (is_lpm ? ci_regs_lpm[i] : ci_regs_nolpm[i]);
+
+ for (; i <= OP_LAST; i++)
+ ci->hw_bank.regmap[i] = ci->hw_bank.op +
+ 4 * (i - OP_ENDPTCTRL) +
+ (is_lpm
+ ? ci_regs_lpm[OP_ENDPTCTRL]
+ : ci_regs_nolpm[OP_ENDPTCTRL]);
+
+ return 0;
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+int hw_port_test_set(struct ci13xxx *ci, u8 mode)
+{
+ const u8 TEST_MODE_MAX = 7;
+
+ if (mode > TEST_MODE_MAX)
+ return -EINVAL;
+
+ hw_write(ci, OP_PORTSC, PORTSC_PTC, mode << __ffs(PORTSC_PTC));
+ return 0;
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+u8 hw_port_test_get(struct ci13xxx *ci)
+{
+ return hw_read(ci, OP_PORTSC, PORTSC_PTC) >> __ffs(PORTSC_PTC);
+}
+
+static int hw_device_init(struct ci13xxx *ci, void __iomem *base)
+{
+ u32 reg;
+
+ /* bank is a module variable */
+ ci->hw_bank.abs = base;
+
+ ci->hw_bank.cap = ci->hw_bank.abs;
+ ci->hw_bank.cap += ci->udc_driver->capoffset;
+ ci->hw_bank.op = ci->hw_bank.cap + (ioread32(ci->hw_bank.cap) & 0xff);
+
+ hw_alloc_regmap(ci, false);
+ reg = hw_read(ci, CAP_HCCPARAMS, HCCPARAMS_LEN) >>
+ __ffs(HCCPARAMS_LEN);
+ ci->hw_bank.lpm = reg;
+ hw_alloc_regmap(ci, !!reg);
+ ci->hw_bank.size = ci->hw_bank.op - ci->hw_bank.abs;
+ ci->hw_bank.size += OP_LAST;
+ ci->hw_bank.size /= sizeof(u32);
+
+ reg = hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DEN) >>
+ __ffs(DCCPARAMS_DEN);
+ ci->hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
+
+ if (ci->hw_ep_max > ENDPT_MAX)
+ return -ENODEV;
+
+ dev_dbg(ci->dev, "ChipIdea HDRC found, lpm: %d; cap: %p op: %p\n",
+ ci->hw_bank.lpm, ci->hw_bank.cap, ci->hw_bank.op);
+
+ /* setup lock mode ? */
+
+ /* ENDPTSETUPSTAT is '0' by default */
+
+ /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+ return 0;
+}
+
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @ci: the controller
+ *
+ * This function returns an error code
+ */
+int hw_device_reset(struct ci13xxx *ci, u32 mode)
+{
+ /* should flush & stop before reset */
+ hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
+ hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
+
+ hw_write(ci, OP_USBCMD, USBCMD_RST, USBCMD_RST);
+ while (hw_read(ci, OP_USBCMD, USBCMD_RST))
+ udelay(10); /* not RTOS friendly */
+
+
+ if (ci->udc_driver->notify_event)
+ ci->udc_driver->notify_event(ci,
+ CI13XXX_CONTROLLER_RESET_EVENT);
+
+ if (ci->udc_driver->flags & CI13XXX_DISABLE_STREAMING)
+ hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
+ /* USBMODE should be configured step by step */
+ hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+ hw_write(ci, OP_USBMODE, USBMODE_CM, mode);
+ /* HW >= 2.3 */
+ hw_write(ci, OP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);
+
+ if (hw_read(ci, OP_USBMODE, USBMODE_CM) != mode) {
+ pr_err("cannot enter in %s mode", ci_role(ci)->name);
+ pr_err("lpm = %i", ci->hw_bank.lpm);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ci_otg_role - pick role based on ID pin state
+ * @ci: the controller
+ */
+static enum ci_role ci_otg_role(struct ci13xxx *ci)
+{
+ u32 sts = hw_read(ci, OP_OTGSC, ~0);
+ enum ci_role role = sts & OTGSC_ID
+ ? CI_ROLE_GADGET
+ : CI_ROLE_HOST;
+
+ return role;
+}
+
+/**
+ * ci_role_work - perform role changing based on ID pin
+ * @work: work struct
+ */
+static void ci_role_work(struct work_struct *work)
+{
+ struct ci13xxx *ci = container_of(work, struct ci13xxx, work);
+ enum ci_role role = ci_otg_role(ci);
+
+ hw_write(ci, OP_OTGSC, OTGSC_IDIS, OTGSC_IDIS);
+
+ if (role != ci->role) {
+ dev_dbg(ci->dev, "switching from %s to %s\n",
+ ci_role(ci)->name, ci->roles[role]->name);
+
+ ci_role_switch(ci, role);
+ }
+}
+
+static ssize_t show_role(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *ci = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ci_role(ci)->name);
+}
+
+static ssize_t store_role(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *ci = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ enum ci_role role;
+ int ret;
+
+ for (role = CI_ROLE_HOST; role < CI_ROLE_END; role++)
+ if (ci->roles[role] && !strcmp(buf, ci->roles[role]->name))
+ break;
+
+ if (role == CI_ROLE_END || role == ci->role)
+ return -EINVAL;
+
+ ret = ci_role_switch(ci, role);
+ if (ret)
+ return ret;
+
+ // set vbus if new role is host, unset if role is device
+
+ // Apple role switch don't like power to be cut down between
+ // role_stop & start so do ithere, it will not cause much harm
+ ci->udc_driver->set_vbus(pdev, role == CI_ROLE_HOST ? 1 : 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(role, S_IRUSR | S_IWUSR, show_role, store_role);
+
+static ssize_t store_vbus(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *ci = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if(buf[0] != '0' && buf[0] != '1')
+ return -EINVAL;
+
+ ci->udc_driver->set_vbus(pdev, buf[0] - '0');
+
+ return count;
+}
+
+static DEVICE_ATTR(vbus, S_IWUSR, NULL, store_vbus);
+
+static irqreturn_t ci_irq(int irq, void *data)
+{
+ struct ci13xxx *ci = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ /*If a switch is ongoing, this is really a spurious interrupt */
+ if(!spin_trylock(&ci->switch_lock))
+ return ret;
+
+ if (ci->is_otg) {
+ u32 sts = hw_read(ci, OP_OTGSC, ~0);
+
+ if (sts & OTGSC_IDIS) {
+ queue_work(ci->wq, &ci->work);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ ret = ci->role_itr == CI_ROLE_END ?
+ ret : ci->roles[ci->role_itr]->irq(ci);
+
+ spin_unlock(&ci->switch_lock);
+
+ return ret;
+}
+
+int ci_ulpi_write(struct platform_device *pdev, u8 addr, u8 val)
+{
+ struct ci13xxx *ci = platform_get_drvdata(pdev);
+ u32 data = VIEWPORT_ULPIRUN |
+ VIEWPORT_ULPIRW |
+ VIEWPORT_ULPIADDR(addr) |
+ VIEWPORT_ULPIDATWR(val);
+ unsigned long timeout = jiffies + HZ;
+
+ hw_write(ci, OP_VIEWPORT, ~0, data);
+
+ do {
+ data = hw_read(ci, OP_VIEWPORT, ~0);
+ schedule();
+ } while ((data & VIEWPORT_ULPIRUN)
+ && time_before(jiffies, timeout));
+
+ return (data & VIEWPORT_ULPIRUN) ? -1 : 0;
+}
+
+static inline void ci_role_destroy(struct ci13xxx *ci)
+{
+ ci_hdrc_gadget_destroy(ci);
+ ci_hdrc_host_destroy(ci);
+}
+
+static int __devinit ci_hdrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ci13xxx *ci;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ {
+ static char clk_name[10];
+ struct clk *usb_clk;
+ snprintf(clk_name, 10, "usb_clk.%d", pdev->id);
+
+ /* enable clock */
+ usb_clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(usb_clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EIO;
+ }
+ clk_prepare_enable(usb_clk);
+ clk_put(usb_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "missing resource\n");
+ return -ENODEV;
+ }
+
+ base = devm_request_and_ioremap(dev, res);
+ if (!res) {
+ dev_err(dev, "can't request and ioremap resource\n");
+ return -ENOMEM;
+ }
+
+ ci = devm_kzalloc(dev, sizeof(*ci), GFP_KERNEL);
+ if (!ci) {
+ dev_err(dev, "can't allocate device\n");
+ return -ENOMEM;
+ }
+
+ ci->dev = dev;
+ ci->udc_driver = dev->platform_data;
+
+ spin_lock_init(&ci->switch_lock);
+
+ ret = hw_device_init(ci, base);
+ if (ret < 0) {
+ dev_err(dev, "can't initialize hardware\n");
+ return -ENODEV;
+ }
+
+ ci->hw_bank.phys = res->start;
+
+ ci->irq = platform_get_irq(pdev, 0);
+ if (ci->irq < 0) {
+ dev_err(dev, "missing IRQ\n");
+ return -ENODEV;
+ }
+
+ INIT_WORK(&ci->work, ci_role_work);
+ ci->wq = create_singlethread_workqueue("ci_otg");
+ if (!ci->wq) {
+ dev_err(dev, "can't create workqueue\n");
+ return -ENODEV;
+ }
+
+ /* initialize role(s) before the interrupt is requested */
+ if(ci->udc_driver->operating_mode == CI_UDC_DR_HOST ||
+ ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_DEVICE ||
+ ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_HOST)
+ {
+ ret = ci_hdrc_host_init(ci);
+ if (ret)
+ dev_info(dev, "doesn't support host\n");
+ }
+
+ if(ci->udc_driver->operating_mode == CI_UDC_DR_DEVICE ||
+ ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_DEVICE ||
+ ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_HOST)
+ {
+ ret = ci_hdrc_gadget_init(ci);
+ if (ret)
+ dev_info(dev, "doesn't support gadget\n");
+ }
+
+ if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
+ dev_err(dev, "no supported roles\n");
+ ret = -ENODEV;
+ goto rm_wq;
+ }
+
+ if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
+// ci->is_otg = true;
+// ci->role = ci_otg_role(ci);
+ if(ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_DEVICE)
+ ci->role = CI_ROLE_GADGET;
+ else
+ ci->role = CI_ROLE_HOST;
+ } else {
+ ci->role = ci->roles[CI_ROLE_HOST]
+ ? CI_ROLE_HOST
+ : CI_ROLE_GADGET;
+ }
+
+ ci->vbus_active = 1;
+
+ platform_set_drvdata(pdev, ci);
+
+ if (ci->udc_driver->init && ci->udc_driver->init(pdev, ci_ulpi_write)) // Call Parrot specific init (auto calibration, p7mu ...) need platform_set_drvdata
+ goto stop;
+
+ if(ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_HOST)
+ ci->roles[CI_ROLE_GADGET]->stop(ci); // workaround needed since device is enabled so stop it to be able to start host
+
+ if(ci->udc_driver->operating_mode == CI_UDC_DR_DUAL_HOST ||
+ ci->udc_driver->operating_mode == CI_UDC_DR_HOST) // device is already initialized so avoid to do it a second time
+ {
+ ret = ci_role_switch(ci, ci->role);
+ if (ret) {
+ dev_err(dev, "can't start host role\n");
+ ret = -ENODEV;
+ goto de_init;
+ }
+ }
+
+ ci->role_itr = ci->role;
+
+ ret = request_irq(ci->irq, ci_irq, IRQF_SHARED, ci->udc_driver->name,
+ ci);
+ if (ret)
+ goto de_init;
+
+ ret = device_create_file(dev, &dev_attr_role);
+ if (ret)
+ goto rm_attr;
+
+ ret = device_create_file(dev, &dev_attr_vbus);
+ if (ret)
+ goto rm_attr2;
+
+ if (ci->is_otg)
+ hw_write(ci, OP_OTGSC, OTGSC_IDIE, OTGSC_IDIE);
+
+ return ret;
+
+rm_attr2:
+ device_remove_file(dev, &dev_attr_vbus);
+rm_attr:
+ device_remove_file(dev, &dev_attr_role);
+de_init:
+ ci->udc_driver->exit(pdev);
+stop:
+ ci_role_destroy(ci);
+rm_wq:
+ flush_workqueue(ci->wq);
+ destroy_workqueue(ci->wq);
+
+ return ret;
+}
+
+static int __devexit ci_hdrc_remove(struct platform_device *pdev)
+{
+ struct ci13xxx *ci = platform_get_drvdata(pdev);
+
+ flush_workqueue(ci->wq);
+ destroy_workqueue(ci->wq);
+ ci_role_destroy(ci);
+ ci->udc_driver->exit(pdev);
+ device_remove_file(ci->dev, &dev_attr_role);
+ device_remove_file(ci->dev, &dev_attr_vbus);
+ free_irq(ci->irq, ci);
+
+ {
+ static char clk_name[10];
+ struct clk *usb_clk;
+ snprintf(clk_name, 10, "usb_clk.%d", pdev->id);
+
+ /* disable clock */
+ usb_clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(usb_clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EIO;
+ }
+ clk_disable_unprepare(usb_clk);
+ clk_put(usb_clk);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int ci_hdrc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ci13xxx *ci = platform_get_drvdata(pdev);
+ static char clk_name[10];
+ struct clk *usb_clk;
+
+ dev_dbg(dev, "suspending...\n");
+
+ ci_hdrc_host_suspend(ci);
+ ci_hdrc_gadget_suspend(ci);
+
+ snprintf(clk_name, 10, "usb_clk.%d", pdev->id);
+
+ usb_clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(usb_clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EIO;
+ }
+
+ clk_disable_unprepare(usb_clk);
+ clk_put(usb_clk);
+
+ return 0;
+}
+
+int ci_hdrc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ci13xxx *ci = platform_get_drvdata(pdev);
+ static char clk_name[10];
+ struct clk *usb_clk;
+
+ dev_dbg(dev, "resuming...\n");
+
+ snprintf(clk_name, 10, "usb_clk.%d", pdev->id);
+
+ usb_clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(usb_clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return -EIO;
+ }
+
+ clk_prepare_enable(usb_clk);
+ clk_put(usb_clk);
+
+ ci_hdrc_host_resume(ci);
+ ci_hdrc_gadget_resume(ci);
+
+ return 0;
+}
+
+static struct dev_pm_ops ci_hdrc_dev_pm_ops = {
+ .suspend = ci_hdrc_suspend,
+ .resume = ci_hdrc_resume,
+};
+#endif
+
+
+static struct platform_driver ci_hdrc_driver = {
+ .probe = ci_hdrc_probe,
+ .remove = __devexit_p(ci_hdrc_remove),
+ .driver = {
+ .name = "ci_hdrc",
+#ifdef CONFIG_PM
+ .pm = &ci_hdrc_dev_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(ci_hdrc_driver);
+
+MODULE_ALIAS("platform:ci_hdrc");
+MODULE_ALIAS("platform:ci13xxx");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("ChipIdea HDRC Driver");
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
new file mode 100644
index 00000000000000..95ad5407db7bea
--- /dev/null
+++ b/drivers/usb/chipidea/debug.c
@@ -0,0 +1,446 @@
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+#include "udc.h"
+#include "bits.h"
+#include "debug.h"
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf: destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(struct ci13xxx *udc, u32 *buf, size_t size)
+{
+ unsigned i;
+
+ if (size > udc->hw_bank.size)
+ size = udc->hw_bank.size;
+
+ for (i = 0; i < size; i++)
+ buf[i] = hw_read(udc, i * sizeof(u32), ~0);
+
+ return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(struct ci13xxx *udc, u16 addr, u32 data)
+{
+ /* align */
+ addr /= sizeof(u32);
+
+ if (addr >= udc->hw_bank.size)
+ return -EINVAL;
+
+ /* align */
+ addr *= sizeof(u32);
+
+ hw_write(udc, addr, ~0, data);
+ return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(struct ci13xxx *udc, int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_write(udc, OP_USBINTR, BIT(n), 0);
+ hw_write(udc, OP_USBSTS, BIT(n), BIT(n));
+ return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ * interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(struct ci13xxx *udc, int n)
+{
+ if (n >= REG_BITS)
+ return -EINVAL;
+
+ hw_write(udc, CAP_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+ hw_write(udc, OP_USBINTR, BIT(n), BIT(n));
+ hw_write(udc, OP_USBSTS, BIT(n), BIT(n));
+ hw_write(udc, CAP_TESTMODE, TESTMODE_FORCE, 0);
+ return 0;
+}
+
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget *gadget = &udc->gadget;
+ int n = 0;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
+ gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
+ gadget_is_dualspeed(gadget));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
+ gadget->is_otg);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
+ gadget->is_a_peripheral);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
+ gadget->b_hnp_enable);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
+ gadget->a_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+ gadget->a_alt_hnp_support);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
+ (gadget->name ? gadget->name : ""));
+
+ return n;
+}
+static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ struct usb_gadget_driver *driver = udc->driver;
+ int n = 0;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ if (driver == NULL)
+ return scnprintf(buf, PAGE_SIZE,
+ "There is no gadget attached!\n");
+
+ n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
+ (driver->function ? driver->function : ""));
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+ driver->max_speed);
+
+ return n;
+}
+static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "EINVAL\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ mode = hw_port_test_get(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned mode;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%u", &mode) != 1) {
+ dev_err(udc->dev, "<mode>: set port test mode");
+ goto done;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (hw_port_test_set(udc, mode))
+ dev_err(udc->dev, "invalid mode\n");
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
+ show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ unsigned i, j, n = 0;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ for (i = 0; i < udc->hw_ep_max/2; i++) {
+ struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+ struct ci13xxx_ep *mEpTx =
+ &udc->ci13xxx_ep[i + udc->hw_ep_max/2];
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: RX=%08X TX=%08X\n",
+ i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
+ for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X %08X\n", j,
+ *((u32 *)mEpRx->qh.ptr + j),
+ *((u32 *)mEpTx->qh.ptr + j));
+ }
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+#define DUMP_ENTRIES 512
+static ssize_t show_registers(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ u32 *dump;
+ unsigned i, k, n = 0;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
+ if (!dump) {
+ dev_err(udc->dev, "%s: out of memory\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ k = hw_register_read(udc, dump, DUMP_ENTRIES);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ for (i = 0; i < k; i++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "reg[0x%04X] = 0x%08X\n",
+ i * (unsigned)sizeof(u32), dump[i]);
+ }
+ kfree(dump);
+
+ return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long addr, data, flags;
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+ dev_err(udc->dev,
+ "<addr> <data>: write data to register address\n");
+ goto done;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (hw_register_write(udc, addr, data))
+ dev_err(udc->dev, "invalid address range\n");
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ done:
+ return count;
+}
+static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
+ show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+ unsigned long flags;
+ struct list_head *ptr = NULL;
+ struct ci13xxx_req *req = NULL;
+ unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+ if (attr == NULL || buf == NULL) {
+ dev_err(udc->dev, "[%s] EINVAL\n", __func__);
+ return 0;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ for (i = 0; i < udc->hw_ep_max; i++)
+ list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+ {
+ req = list_entry(ptr, struct ci13xxx_req, queue);
+
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "EP=%02i: TD=%08X %s\n",
+ i % udc->hw_ep_max/2, (u32)req->dma,
+ ((i < udc->hw_ep_max/2) ? "RX" : "TX"));
+
+ for (j = 0; j < qSize; j++)
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ " %04X: %08X\n", j,
+ *((u32 *)req->ptr + j));
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+int dbg_create_files(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+ retval = device_create_file(dev, &dev_attr_device);
+ if (retval)
+ goto done;
+ retval = device_create_file(dev, &dev_attr_driver);
+ if (retval)
+ goto rm_device;
+ retval = device_create_file(dev, &dev_attr_port_test);
+ if (retval)
+ goto rm_driver;
+ retval = device_create_file(dev, &dev_attr_qheads);
+ if (retval)
+ goto rm_port_test;
+ retval = device_create_file(dev, &dev_attr_registers);
+ if (retval)
+ goto rm_qheads;
+ retval = device_create_file(dev, &dev_attr_requests);
+ if (retval)
+ goto rm_registers;
+ return 0;
+
+ rm_registers:
+ device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+ device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+ device_remove_file(dev, &dev_attr_port_test);
+ rm_driver:
+ device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+ device_remove_file(dev, &dev_attr_device);
+ done:
+ return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+int dbg_remove_files(struct device *dev)
+{
+ if (dev == NULL)
+ return -EINVAL;
+ device_remove_file(dev, &dev_attr_requests);
+ device_remove_file(dev, &dev_attr_registers);
+ device_remove_file(dev, &dev_attr_qheads);
+ device_remove_file(dev, &dev_attr_port_test);
+ device_remove_file(dev, &dev_attr_driver);
+ device_remove_file(dev, &dev_attr_device);
+ return 0;
+}
diff --git a/drivers/usb/chipidea/debug.h b/drivers/usb/chipidea/debug.h
new file mode 100644
index 00000000000000..425f1ff6284aa5
--- /dev/null
+++ b/drivers/usb/chipidea/debug.h
@@ -0,0 +1,31 @@
+/*
+ * debug.h - ChipIdea USB driver debug interfaces
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_USB_CHIPIDEA_DEBUG_H
+#define __DRIVERS_USB_CHIPIDEA_DEBUG_H
+
+#ifdef CONFIG_USB_CHIPIDEA_DEBUG
+int dbg_create_files(struct device *dev);
+int dbg_remove_files(struct device *dev);
+#else
+static inline int dbg_create_files(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dbg_remove_files(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif /* __DRIVERS_USB_CHIPIDEA_DEBUG_H */
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
new file mode 100644
index 00000000000000..5f6ed3d6b85db2
--- /dev/null
+++ b/drivers/usb/chipidea/host.c
@@ -0,0 +1,207 @@
+/*
+ * host.c - ChipIdea USB host controller driver
+ *
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * Author: Alexander Shishkin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/chipidea.h>
+
+#define CHIPIDEA_EHCI
+#include "../host/ehci-hcd.c"
+
+#include "ci.h"
+#include "bits.h"
+#include "host.h"
+
+static int ci_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int ret;
+
+ hcd->has_tt = 1;
+
+ ret = ehci_setup(hcd);
+ if (ret)
+ return ret;
+
+ ehci_port_power(ehci, 0);
+
+ return ret;
+}
+
+static const struct hc_driver ci_ehci_hc_driver = {
+ .description = "ehci_hcd",
+ .product_desc = "ChipIdea HDRC EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ci_ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static irqreturn_t host_irq(struct ci13xxx *ci)
+{
+ return usb_hcd_irq(ci->irq, ci->hcd);
+}
+
+static int host_start(struct ci13xxx *ci)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&ci_ehci_hc_driver, ci->dev, dev_name(ci->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ dev_set_drvdata(ci->dev, ci);
+ hcd->rsrc_start = ci->hw_bank.phys;
+ hcd->rsrc_len = ci->hw_bank.size;
+ hcd->regs = ci->hw_bank.abs;
+ hcd->has_tt = 1;
+
+ hcd->power_budget = ci->udc_driver->power_budget;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = ci->hw_bank.cap;
+ ehci->has_hostpc = ci->hw_bank.lpm;
+#ifdef CONFIG_USB_CHIPIDEA_P7_BUG_WORKAROUND
+ ehci->reset_nb = ci->udc_driver->workaround_reset_nb;
+#endif
+ ci->hcd = hcd;
+ barrier();
+
+ ret = usb_add_hcd(hcd, 0, 0);
+ if (ret)
+ usb_put_hcd(hcd);
+
+ return ret;
+}
+
+static void host_stop(struct ci13xxx *ci)
+{
+ struct usb_hcd *hcd = ci->hcd;
+
+ if(hcd) // Handle "nousb" case
+ {
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ }
+}
+
+void ci_hdrc_host_destroy(struct ci13xxx *ci)
+{
+ if (ci->role == CI_ROLE_HOST)
+ host_stop(ci);
+}
+
+int ci_hdrc_host_init(struct ci13xxx *ci)
+{
+ struct ci_role_driver *rdrv;
+
+ if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_HC))
+ return -ENXIO;
+
+ rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = host_start;
+ rdrv->stop = host_stop;
+ rdrv->irq = host_irq;
+ rdrv->name = "host";
+ ci->roles[CI_ROLE_HOST] = rdrv;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+void ci_hdrc_host_suspend(struct ci13xxx *ci)
+{
+ struct usb_hcd *hcd = ci->hcd;
+ struct ehci_hcd *ehci;
+
+ if (ci->role != CI_ROLE_HOST)
+ return;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci_halt(ehci);
+
+ return;
+}
+
+void ci_hdrc_host_resume(struct ci13xxx *ci)
+{
+ struct usb_hcd *hcd = ci->hcd;
+ struct ehci_hcd *ehci;
+
+ if (ci->role != CI_ROLE_HOST)
+ return;
+
+ ehci = hcd_to_ehci(hcd);
+
+ ehci_reset(ehci);
+ ehci_prepare_ports_for_controller_resume(ehci);
+
+ usb_hcd_resume_root_hub(hcd);
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ return;
+}
+#endif
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
new file mode 100644
index 00000000000000..ab167403b892fe
--- /dev/null
+++ b/drivers/usb/chipidea/host.h
@@ -0,0 +1,42 @@
+#ifndef __DRIVERS_USB_CHIPIDEA_HOST_H
+#define __DRIVERS_USB_CHIPIDEA_HOST_H
+
+#ifdef CONFIG_USB_CHIPIDEA_HOST
+
+int ci_hdrc_host_init(struct ci13xxx *ci);
+void ci_hdrc_host_destroy(struct ci13xxx *ci);
+
+#ifdef CONFIG_PM
+void ci_hdrc_host_suspend(struct ci13xxx *ci);
+void ci_hdrc_host_resume(struct ci13xxx *ci);
+#endif
+
+#else
+
+struct ci13xxx;
+
+static inline int ci_hdrc_host_init(struct ci13xxx *ci)
+{
+ return -ENXIO;
+}
+
+static inline void ci_hdrc_host_destroy(struct ci13xxx *ci)
+{
+
+}
+
+#ifdef CONFIG_PM
+static inline void ci_hdrc_host_suspend(struct ci13xxx *ci)
+{
+}
+
+static inline void ci_hdrc_host_resume(struct ci13xxx *ci)
+{
+}
+
+#endif
+
+
+#endif
+
+#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
new file mode 100644
index 00000000000000..0f9820561cf14a
--- /dev/null
+++ b/drivers/usb/chipidea/udc.c
@@ -0,0 +1,1902 @@
+/*
+ * udc.c - ChipIdea UDC driver
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+#include "udc.h"
+#include "bits.h"
+#include "debug.h"
+
+static int need_pullup = 0;
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+ return num + (dir ? 16 : 0);
+}
+
+static inline int ep_to_bit(struct ci13xxx *udc, int n)
+{
+ int fill = 16 - udc->hw_ep_max / 2;
+
+ if (n >= udc->hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts (execute without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(struct ci13xxx *udc, u32 dma)
+{
+ if (dma) {
+ hw_write(udc, OP_ENDPTLISTADDR, ~0, dma);
+ /* interrupt, error, port change, reset, sleep/suspend */
+ hw_write(udc, OP_USBINTR, ~0,
+ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
+ } else {
+ hw_write(udc, OP_USBINTR, ~0, 0);
+ }
+ return 0;
+}
+
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(struct ci13xxx *udc, int num, int dir)
+{
+ int n = hw_ep_bit(num, dir);
+
+ do {
+ /* flush any pending transfer */
+ hw_write(udc, OP_ENDPTFLUSH, ~0, BIT(n));
+ while (hw_read(udc, OP_ENDPTFLUSH, BIT(n)))
+ cpu_relax();
+ } while (hw_read(udc, OP_ENDPTSTAT, BIT(n)));
+
+ return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(struct ci13xxx *udc, int num, int dir)
+{
+ hw_ep_flush(udc, num, dir);
+ hw_write(udc, OP_ENDPTCTRL + num,
+ dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+ return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(struct ci13xxx *udc, int num, int dir, int type)
+{
+ u32 mask, data;
+
+ if (dir) {
+ mask = ENDPTCTRL_TXT; /* type */
+ data = type << __ffs(mask);
+
+ mask |= ENDPTCTRL_TXS; /* unstall */
+ mask |= ENDPTCTRL_TXR; /* reset data toggle */
+ data |= ENDPTCTRL_TXR;
+ mask |= ENDPTCTRL_TXE; /* enable */
+ data |= ENDPTCTRL_TXE;
+ } else {
+ mask = ENDPTCTRL_RXT; /* type */
+ data = type << __ffs(mask);
+
+ mask |= ENDPTCTRL_RXS; /* unstall */
+ mask |= ENDPTCTRL_RXR; /* reset data toggle */
+ data |= ENDPTCTRL_RXR;
+ mask |= ENDPTCTRL_RXE; /* enable */
+ data |= ENDPTCTRL_RXE;
+ }
+ hw_write(udc, OP_ENDPTCTRL + num, mask, data);
+ return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(struct ci13xxx *udc, int num, int dir)
+{
+ u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+ return hw_read(udc, OP_ENDPTCTRL + num, mask) ? 1 : 0;
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(struct ci13xxx *udc, int n)
+{
+ n = ep_to_bit(udc, n);
+ return hw_test_and_clear(udc, OP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(struct ci13xxx *udc, int num, int dir, int is_ctrl)
+{
+ int n = hw_ep_bit(num, dir);
+
+ if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ hw_write(udc, OP_ENDPTPRIME, ~0, BIT(n));
+
+ while (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
+ cpu_relax();
+ if (is_ctrl && dir == RX && hw_read(udc, OP_ENDPTSETUPSTAT, BIT(num)))
+ return -EAGAIN;
+
+ /* status shoult be tested according with manual but it doesn't work */
+ return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ * without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(struct ci13xxx *udc, int num, int dir, int value)
+{
+ if (value != 0 && value != 1)
+ return -EINVAL;
+
+ do {
+ enum ci13xxx_regs reg = OP_ENDPTCTRL + num;
+ u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+ u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+ /* data toggle - reserved for EP0 but it's in ESS */
+ hw_write(udc, reg, mask_xs|mask_xr,
+ value ? mask_xs : mask_xr);
+ } while (value != hw_ep_get_halt(udc, num, dir));
+
+ return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(struct ci13xxx *udc)
+{
+ return udc->hw_bank.lpm ? hw_read(udc, OP_DEVLC, DEVLC_PSPD) :
+ hw_read(udc, OP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(struct ci13xxx *udc)
+{
+ return hw_read(udc, OP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(struct ci13xxx *udc)
+{
+ return hw_read(udc, OP_USBSTS, ~0);
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ * interruption)
+ * @n: endpoint number
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(struct ci13xxx *udc, int n)
+{
+ n = ep_to_bit(udc, n);
+ return hw_test_and_clear(udc, OP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ * without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(struct ci13xxx *udc)
+{
+ u32 reg = hw_read_intr_status(udc) & hw_read_intr_enable(udc);
+
+ hw_write(udc, OP_USBSTS, ~0, reg);
+ return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(struct ci13xxx *udc)
+{
+ return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ * interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(struct ci13xxx *udc)
+{
+ return hw_test_and_write(udc, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function explicitly sets the address, without the "USBADRA" (advance)
+ * feature, which is not supported by older versions of the controller.
+ */
+static void hw_usb_set_address(struct ci13xxx *udc, u8 value)
+{
+ hw_write(udc, OP_DEVICEADDR, DEVICEADDR_USBADR,
+ value << __ffs(DEVICEADDR_USBADR));
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ * interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(struct ci13xxx *udc)
+{
+ hw_usb_set_address(udc, 0);
+
+ /* ESS flushes only at end?!? */
+ hw_write(udc, OP_ENDPTFLUSH, ~0, ~0);
+
+ /* clear setup token semaphores */
+ hw_write(udc, OP_ENDPTSETUPSTAT, 0, 0);
+
+ /* clear complete status */
+ hw_write(udc, OP_ENDPTCOMPLETE, 0, 0);
+
+ /* wait until all bits cleared */
+ while (hw_read(udc, OP_ENDPTPRIME, ~0))
+ udelay(10); /* not RTOS friendly */
+
+ /* reset all endpoints ? */
+
+ /* reset internal status and wait for further instructions
+ no need to verify the port reset status (ESS does it) */
+
+ return 0;
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep: endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+ return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ struct ci13xxx *udc = mEp->udc;
+ unsigned i;
+ int ret = 0;
+ unsigned length = mReq->req.length;
+
+ /* don't queue twice */
+ if (mReq->req.status == -EALREADY)
+ return -EALREADY;
+
+ mReq->req.status = -EALREADY;
+
+ if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
+ mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
+ &mReq->zdma);
+ if (mReq->zptr == NULL)
+ return -ENOMEM;
+
+ memset(mReq->zptr, 0, sizeof(*mReq->zptr));
+ mReq->zptr->next = cpu_to_le32(TD_TERMINATE);
+ mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE);
+ if (!mReq->req.no_interrupt)
+ mReq->zptr->token |= cpu_to_le32(TD_IOC);
+ }
+ ret = usb_gadget_map_request(&udc->gadget, &mReq->req, mEp->dir);
+ if (ret)
+ return ret;
+
+ /*
+ * TD configuration
+ * TODO - handle requests which spawns into several TDs
+ */
+ memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+ mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
+ mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
+ mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
+ if (mReq->zptr) {
+ mReq->ptr->next = cpu_to_le32(mReq->zdma);
+ } else {
+ mReq->ptr->next = cpu_to_le32(TD_TERMINATE);
+ if (!mReq->req.no_interrupt)
+ mReq->ptr->token |= cpu_to_le32(TD_IOC);
+ }
+ mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma);
+ for (i = 1; i < TD_PAGE_COUNT; i++) {
+ u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
+ page &= ~TD_RESERVED_MASK;
+ mReq->ptr->page[i] = cpu_to_le32(page);
+ }
+
+ if (!list_empty(&mEp->qh.queue)) {
+ struct ci13xxx_req *mReqPrev;
+ int n = hw_ep_bit(mEp->num, mEp->dir);
+ int tmp_stat;
+ u32 next = mReq->dma & TD_ADDR_MASK;
+
+ mReqPrev = list_entry(mEp->qh.queue.prev,
+ struct ci13xxx_req, queue);
+ if (mReqPrev->zptr)
+ mReqPrev->zptr->next = cpu_to_le32(next);
+ else
+ mReqPrev->ptr->next = cpu_to_le32(next);
+ wmb();
+ if (hw_read(udc, OP_ENDPTPRIME, BIT(n)))
+ goto done;
+ do {
+ hw_write(udc, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
+ tmp_stat = hw_read(udc, OP_ENDPTSTAT, BIT(n));
+ } while (!hw_read(udc, OP_USBCMD, USBCMD_ATDTW));
+ hw_write(udc, OP_USBCMD, USBCMD_ATDTW, 0);
+ if (tmp_stat)
+ goto done;
+ }
+
+ /* QH configuration */
+ mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */
+ mEp->qh.ptr->td.token &=
+ cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
+
+ if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
+ u32 mul = mReq->req.length / mEp->ep.maxpacket;
+
+ if (mReq->req.length % mEp->ep.maxpacket)
+ mul++;
+ mEp->qh.ptr->cap |= mul << __ffs(QH_MULT);
+ }
+
+ wmb(); /* synchronize before ep prime */
+
+ ret = hw_ep_prime(udc, mEp->num, mEp->dir,
+ mEp->type == USB_ENDPOINT_XFER_CONTROL);
+done:
+ return ret;
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+ u32 tmptoken = le32_to_cpu(mReq->ptr->token);
+
+ if (mReq->req.status != -EALREADY)
+ return -EINVAL;
+
+ if ((TD_STATUS_ACTIVE & tmptoken) != 0)
+ {
+ int n = hw_ep_bit(mEp->num, mEp->dir);
+ /* Workaround for bug : 9000531823 Adding a dTD to a Primed Endpoint May Not Get Recognized */
+ /* This bug don't occur on last chipidea core (2.50a) */
+
+ /* The workaround is: check when the last token is still active if the transfer is still in progress */
+ /* if not then the bug was triggered and the transfer needs to be reprimed */
+
+ if(!hw_read(mEp->udc, OP_ENDPTSTAT, BIT(n))) /* TD ACTIVE but status is inactive*/
+ {
+ /* Reprime */
+ mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */
+ mEp->qh.ptr->td.token &=
+ cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
+
+ //TODO Check if needed
+ if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
+ u32 mul = mReq->req.length / mEp->ep.maxpacket;
+
+ if (mReq->req.length % mEp->ep.maxpacket)
+ mul++;
+ mEp->qh.ptr->cap |= mul << __ffs(QH_MULT);
+ }
+
+ wmb(); /* synchronize before ep prime */
+
+ hw_ep_prime(mEp->udc, mEp->num, mEp->dir,
+ mEp->type == USB_ENDPOINT_XFER_CONTROL);
+ }
+
+ return -EBUSY;
+ }
+
+ if (mReq->zptr) {
+ if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0)
+ return -EBUSY;
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ }
+
+ mReq->req.status = 0;
+
+ usb_gadget_unmap_request(&mEp->udc->gadget, &mReq->req, mEp->dir);
+
+ mReq->req.status = tmptoken & TD_STATUS;
+ if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+ else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+ mReq->req.status = -1;
+
+ mReq->req.actual = tmptoken & TD_TOTAL_BYTES;
+ mReq->req.actual >>= __ffs(TD_TOTAL_BYTES);
+ mReq->req.actual = mReq->req.length - mReq->req.actual;
+ mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
+
+ return mReq->req.actual;
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ if (mEp == NULL)
+ return -EINVAL;
+
+ hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
+
+ while (!list_empty(&mEp->qh.queue)) {
+
+ /* pop oldest request */
+ struct ci13xxx_req *mReq = \
+ list_entry(mEp->qh.queue.next,
+ struct ci13xxx_req, queue);
+
+ if (mReq->zptr) {
+ dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
+ mReq->zptr = NULL;
+ }
+
+ list_del_init(&mReq->queue);
+ mReq->req.status = -ESHUTDOWN;
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+ }
+ return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+{
+ struct usb_ep *ep;
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->remote_wakeup = 0;
+ udc->suspended = 0;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ /* flush all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_fifo_flush(ep);
+ }
+ usb_ep_fifo_flush(&udc->ep0out->ep);
+ usb_ep_fifo_flush(&udc->ep0in->ep);
+
+ if (udc->driver)
+ udc->driver->disconnect(gadget);
+
+ /* make sure to disable all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_disable(ep);
+ }
+
+ if (udc->status != NULL) {
+ usb_ep_free_request(&udc->ep0in->ep, udc->status);
+ udc->status = NULL;
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ int retval;
+
+ spin_unlock(&udc->lock);
+ retval = _gadget_stop_activity(&udc->gadget);
+ if (retval)
+ goto done;
+
+ retval = hw_usb_reset(udc);
+ if (retval)
+ goto done;
+
+ udc->status = usb_ep_alloc_request(&udc->ep0in->ep, GFP_ATOMIC);
+ if (udc->status == NULL)
+ retval = -ENOMEM;
+
+ usb_gadget_set_state(&udc->gadget, USB_STATE_DEFAULT);
+
+done:
+ spin_lock(&udc->lock);
+
+ if (retval)
+ dev_err(udc->dev, "error: %i\n", retval);
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ if (ep == NULL || req == NULL)
+ return;
+
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+}
+
+ /**
+ * _ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Caller must hold lock
+ */
+static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t __maybe_unused gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ struct ci13xxx *udc = mEp->udc;
+ int retval = 0;
+
+ if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
+ return -EINVAL;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (req->length)
+ mEp = (udc->ep0_dir == RX) ?
+ udc->ep0out : udc->ep0in;
+ if (!list_empty(&mEp->qh.queue)) {
+ _ep_nuke(mEp);
+ retval = -EOVERFLOW;
+ dev_warn(mEp->udc->dev, "endpoint ctrl %X nuked\n",
+ _usb_addr(mEp));
+ }
+ }
+
+ if (usb_endpoint_xfer_isoc(mEp->ep.desc) &&
+ mReq->req.length > (1 + mEp->ep.mult) * mEp->ep.maxpacket) {
+ dev_err(mEp->udc->dev, "request length too big for isochronous\n");
+ return -EMSGSIZE;
+ }
+
+ /* first nuke then test link, e.g. previous status has not sent */
+ if (!list_empty(&mReq->queue)) {
+ dev_err(mEp->udc->dev, "request already in queue\n");
+ return -EBUSY;
+ }
+
+ if (req->length > (TD_PAGE_COUNT - 1) * CI13XXX_PAGE_SIZE) {
+ dev_err(mEp->udc->dev, "request bigger than one td\n");
+ return -EMSGSIZE;
+ }
+
+ /* push request */
+ mReq->req.status = -EINPROGRESS;
+ mReq->req.actual = 0;
+
+ retval = _hardware_enqueue(mEp, mReq);
+
+ if (retval == -EALREADY)
+ retval = 0;
+ if (!retval)
+ list_add_tail(&mReq->queue, &mEp->qh.queue);
+
+ return retval;
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @udc: udc struct
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx *udc,
+ struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_ep *mEp = udc->ep0in;
+ struct usb_request *req = NULL;
+ gfp_t gfp_flags = GFP_ATOMIC;
+ int dir, num, retval;
+
+ if (mEp == NULL || setup == NULL)
+ return -EINVAL;
+
+ spin_unlock(mEp->lock);
+ req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
+ spin_lock(mEp->lock);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->complete = isr_get_status_complete;
+ req->length = 2;
+ req->buf = kzalloc(req->length, gfp_flags);
+ if (req->buf == NULL) {
+ retval = -ENOMEM;
+ goto err_free_req;
+ }
+
+ if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* Assume that device is bus powered for now. */
+ *(u16 *)req->buf = udc->remote_wakeup << 1;
+ retval = 0;
+ } else if ((setup->bRequestType & USB_RECIP_MASK) \
+ == USB_RECIP_ENDPOINT) {
+ dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+ TX : RX;
+ num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+ *(u16 *)req->buf = hw_ep_get_halt(udc, num, dir);
+ }
+ /* else do nothing; reserved for future use */
+
+ retval = _ep_queue(&mEp->ep, req, gfp_flags);
+ if (retval)
+ goto err_free_buf;
+
+ return 0;
+
+ err_free_buf:
+ kfree(req->buf);
+ err_free_req:
+ spin_unlock(mEp->lock);
+ usb_ep_free_request(&mEp->ep, req);
+ spin_lock(mEp->lock);
+ return retval;
+}
+
+/**
+ * isr_setup_status_complete: setup_status request complete function
+ * @ep: endpoint
+ * @req: request handled
+ *
+ * Caller must release lock. Put the port in test mode if test mode
+ * feature is selected.
+ */
+static void
+isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx *udc = req->context;
+ unsigned long flags;
+
+ if (udc->setaddr) {
+ hw_usb_set_address(udc, udc->address);
+ udc->setaddr = false;
+ if (udc->address)
+ usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->test_mode)
+ hw_port_test_set(udc, udc->test_mode);
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @udc: udc struct
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx *udc)
+{
+ int retval;
+ struct ci13xxx_ep *mEp;
+
+ mEp = (udc->ep0_dir == TX) ? udc->ep0out : udc->ep0in;
+ udc->status->context = udc;
+ udc->status->complete = isr_setup_status_complete;
+
+ retval = _ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+ struct ci13xxx_req *mReq, *mReqTemp;
+ struct ci13xxx_ep *mEpTemp = mEp;
+ int retval = 0;
+
+ list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
+ queue) {
+ retval = _hardware_dequeue(mEp, mReq);
+ if (retval < 0)
+ break;
+ list_del_init(&mReq->queue);
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
+ mReq->req.length)
+ mEpTemp = mEp->udc->ep0in;
+ mReq->req.complete(&mEpTemp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+ }
+
+ if (retval == -EBUSY)
+ retval = 0;
+
+ return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+ unsigned i;
+ u8 tmode = 0;
+
+ for (i = 0; i < udc->hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+ int type, num, dir, err = -EINVAL;
+ struct usb_ctrlrequest req;
+
+ if (mEp->ep.desc == NULL)
+ continue; /* not configured */
+
+ if (hw_test_and_clear_complete(udc, i)) {
+ err = isr_tr_complete_low(mEp);
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (err > 0) /* needs status phase */
+ err = isr_setup_status_phase(udc);
+ if (err < 0) {
+ spin_unlock(&udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ dev_err(udc->dev,
+ "error: ep_set_halt\n");
+ spin_lock(&udc->lock);
+ }
+ }
+ }
+
+ if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+ !hw_test_and_clear_setup_status(udc, i))
+ continue;
+
+ if (i != 0) {
+ dev_warn(udc->dev, "ctrl traffic at endpoint %d\n", i);
+ continue;
+ }
+
+ /*
+ * Flush data and handshake transactions of previous
+ * setup packet.
+ */
+ _ep_nuke(udc->ep0out);
+ _ep_nuke(udc->ep0in);
+
+ /* read_setup_packet */
+ do {
+ hw_test_and_set_setup_guard(udc);
+ memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+ } while (!hw_test_and_clear_setup_guard(udc));
+
+ type = req.bRequestType;
+
+ udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
+
+ switch (req.bRequest) {
+ case USB_REQ_CLEAR_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += udc->hw_ep_max/2;
+ if (!udc->ci13xxx_ep[num].wedge) {
+ spin_unlock(&udc->lock);
+ err = usb_ep_clear_halt(
+ &udc->ci13xxx_ep[num].ep);
+ spin_lock(&udc->lock);
+ if (err)
+ break;
+ }
+ err = isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
+ le16_to_cpu(req.wValue) ==
+ USB_DEVICE_REMOTE_WAKEUP) {
+ if (req.wLength != 0)
+ break;
+ udc->remote_wakeup = 0;
+ err = isr_setup_status_phase(udc);
+ } else {
+ goto delegate;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+ type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 2 ||
+ le16_to_cpu(req.wValue) != 0)
+ break;
+ err = isr_get_status_response(udc, &req);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+ goto delegate;
+ if (le16_to_cpu(req.wLength) != 0 ||
+ le16_to_cpu(req.wIndex) != 0)
+ break;
+ udc->address = (u8)le16_to_cpu(req.wValue);
+ udc->setaddr = true;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+ le16_to_cpu(req.wValue) ==
+ USB_ENDPOINT_HALT) {
+ if (req.wLength != 0)
+ break;
+ num = le16_to_cpu(req.wIndex);
+ dir = num & USB_ENDPOINT_DIR_MASK;
+ num &= USB_ENDPOINT_NUMBER_MASK;
+ if (dir) /* TX */
+ num += udc->hw_ep_max/2;
+
+ spin_unlock(&udc->lock);
+ err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+ spin_lock(&udc->lock);
+ if (!err)
+ isr_setup_status_phase(udc);
+ } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
+ if (req.wLength != 0)
+ break;
+ switch (le16_to_cpu(req.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ udc->remote_wakeup = 1;
+ err = isr_setup_status_phase(udc);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ tmode = le16_to_cpu(req.wIndex) >> 8;
+ switch (tmode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ udc->test_mode = tmode;
+ err = isr_setup_status_phase(
+ udc);
+ break;
+ default:
+ break;
+ }
+ default:
+ goto delegate;
+ }
+ } else {
+ goto delegate;
+ }
+ break;
+ default:
+delegate:
+ if (req.wLength == 0) /* no data phase */
+ udc->ep0_dir = TX;
+
+ spin_unlock(&udc->lock);
+ err = udc->driver->setup(&udc->gadget, &req);
+ spin_lock(&udc->lock);
+ break;
+ }
+
+ if (err < 0) {
+ spin_unlock(&udc->lock);
+ if (usb_ep_set_halt(&mEp->ep))
+ dev_err(udc->dev, "error: ep_set_halt\n");
+ spin_lock(&udc->lock);
+ }
+ }
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int retval = 0;
+ unsigned long flags;
+ u32 cap = 0;
+
+ if (ep == NULL || desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should enable ctrl endpts */
+
+ mEp->ep.desc = desc;
+
+ if (!list_empty(&mEp->qh.queue))
+ dev_warn(mEp->udc->dev, "enabling a non-empty endpoint!\n");
+
+ mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
+ mEp->num = usb_endpoint_num(desc);
+ mEp->type = usb_endpoint_type(desc);
+
+ mEp->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
+ mEp->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ cap |= QH_IOS;
+
+ cap |= QH_ZLT;
+ cap |= (mEp->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
+
+ mEp->qh.ptr->cap = cpu_to_le32(cap);
+
+ mEp->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
+
+ /*
+ * Enable endpoints in the HW other than ep0 as ep0
+ * is always enabled
+ */
+ if (mEp->num)
+ retval |= hw_ep_enable(mEp->udc, mEp->num, mEp->dir, mEp->type);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ if (ep == NULL)
+ return -EINVAL;
+ else if (mEp->ep.desc == NULL)
+ return -EBUSY;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ /* only internal SW should disable ctrl endpts */
+
+ direction = mEp->dir;
+ do {
+ retval |= _ep_nuke(mEp);
+ retval |= hw_ep_disable(mEp->udc, mEp->num, mEp->dir);
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ mEp->ep.desc = NULL;
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = NULL;
+
+ if (ep == NULL)
+ return NULL;
+
+ mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+ if (mReq != NULL) {
+ INIT_LIST_HEAD(&mReq->queue);
+
+ mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+ &mReq->dma);
+ if (mReq->ptr == NULL) {
+ kfree(mReq);
+ mReq = NULL;
+ }
+ }
+
+ return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ if (ep == NULL || req == NULL) {
+ return;
+ } else if (!list_empty(&mReq->queue)) {
+ dev_err(mEp->udc->dev, "freeing queued request\n");
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ if (mReq->ptr)
+ dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+ kfree(mReq);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t __maybe_unused gfp_flags)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int retval = 0;
+ unsigned long flags;
+
+ if (ep == NULL || req == NULL || mEp->ep.desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+ retval = _ep_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+ unsigned long flags;
+
+ if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
+ mEp->ep.desc == NULL || list_empty(&mReq->queue) ||
+ list_empty(&mEp->qh.queue))
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
+
+ /* pop request */
+ list_del_init(&mReq->queue);
+
+ usb_gadget_unmap_request(&mEp->udc->gadget, req, mEp->dir);
+
+ req->status = -ECONNRESET;
+
+ if (mReq->req.complete != NULL) {
+ spin_unlock(mEp->lock);
+ mReq->req.complete(&mEp->ep, &mReq->req);
+ spin_lock(mEp->lock);
+ }
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return 0;
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ if (ep == NULL || mEp->ep.desc == NULL)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(mEp->ep.desc))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+ /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+ if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+ !list_empty(&mEp->qh.queue)) {
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return -EAGAIN;
+ }
+#endif
+
+ direction = mEp->dir;
+ do {
+ retval |= hw_ep_set_halt(mEp->udc, mEp->num, mEp->dir, value);
+
+ if (!value)
+ mEp->wedge = 0;
+
+ if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+ mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+ } while (mEp->dir != direction);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+ return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ if (ep == NULL || mEp->ep.desc == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(mEp->lock, flags);
+ mEp->wedge = 1;
+ spin_unlock_irqrestore(mEp->lock, flags);
+
+ return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+ struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+ unsigned long flags;
+
+ if (ep == NULL) {
+ dev_err(mEp->udc->dev, "%02X: -EINVAL\n", _usb_addr(mEp));
+ return;
+ }
+
+ spin_lock_irqsave(mEp->lock, flags);
+
+ hw_ep_flush(mEp->udc, mEp->num, mEp->dir);
+
+ spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+ .enable = ep_enable,
+ .disable = ep_disable,
+ .alloc_request = ep_alloc_request,
+ .free_request = ep_free_request,
+ .queue = ep_queue,
+ .dequeue = ep_dequeue,
+ .set_halt = ep_set_halt,
+ .set_wedge = ep_set_wedge,
+ .fifo_flush = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int gadget_ready = 0;
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->vbus_active = is_active;
+ if (udc->driver)
+ gadget_ready = 1;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (gadget_ready) {
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(udc, USBMODE_CM_DC);
+ hw_device_state(udc, udc->ep0out->qh.dma);
+ usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+ } else {
+ hw_device_state(udc, 0);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_STOPPED_EVENT);
+ _gadget_stop_activity(&udc->gadget);
+ pm_runtime_put_sync(&_gadget->dev);
+ usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
+ }
+ }
+
+ return 0;
+}
+
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (!udc->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (!hw_read(udc, OP_PORTSC, PORTSC_SUSP)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ hw_write(udc, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+
+ if (udc->transceiver)
+ return usb_phy_set_power(udc->transceiver, mA);
+ return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnet
+ */
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+
+ if (is_on) {
+ need_pullup = 1;
+ hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
+ }
+ else {
+ need_pullup = 0;
+ hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
+ }
+
+ return 0;
+}
+
+static int ci13xxx_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+static int ci13xxx_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops = {
+ .vbus_session = ci13xxx_vbus_session,
+ .wakeup = ci13xxx_wakeup,
+ .pullup = ci13xxx_pullup,
+ .vbus_draw = ci13xxx_vbus_draw,
+ .udc_start = ci13xxx_start,
+ .udc_stop = ci13xxx_stop,
+};
+
+static int init_eps(struct ci13xxx *udc)
+{
+ int retval = 0, i, j;
+
+ for (i = 0; i < udc->hw_ep_max/2; i++)
+ for (j = RX; j <= TX; j++) {
+ int k = i + j * udc->hw_ep_max/2;
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
+
+ scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+ (j == TX) ? "in" : "out");
+
+ mEp->udc = udc;
+ mEp->lock = &udc->lock;
+ mEp->td_pool = udc->td_pool;
+
+ mEp->ep.name = mEp->name;
+ mEp->ep.ops = &usb_ep_ops;
+ /*
+ * for ep0: maxP defined in desc, for other
+ * eps, maxP is set by epautoconfig() called
+ * by gadget layer
+ */
+ mEp->ep.maxpacket = (unsigned short)~0;
+
+ INIT_LIST_HEAD(&mEp->qh.queue);
+ mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+ &mEp->qh.dma);
+ if (mEp->qh.ptr == NULL)
+ retval = -ENOMEM;
+ else
+ memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+ /*
+ * set up shorthands for ep0 out and in endpoints,
+ * don't add to gadget's ep_list
+ */
+ if (i == 0) {
+ if (j == RX)
+ udc->ep0out = mEp;
+ else
+ udc->ep0in = mEp;
+
+ mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+ continue;
+ }
+
+ list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+ }
+
+ return retval;
+}
+
+static void destroy_eps(struct ci13xxx *udc)
+{
+ int i;
+
+ for (i = 0; i < udc->hw_ep_max; i++) {
+ struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+ dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+ }
+}
+
+/**
+ * ci13xxx_start: register a gadget driver
+ * @gadget: our gadget
+ * @driver: the driver being registered
+ *
+ * Interrupts are enabled here.
+ */
+static int ci13xxx_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+ int retval = -ENOMEM;
+
+ if (driver->disconnect == NULL)
+ return -EINVAL;
+
+ udc->ep0out->ep.desc = &ctrl_endpt_out_desc;
+ retval = usb_ep_enable(&udc->ep0out->ep);
+ if (retval)
+ return retval;
+
+ udc->ep0in->ep.desc = &ctrl_endpt_in_desc;
+ retval = usb_ep_enable(&udc->ep0in->ep);
+ if (retval)
+ return retval;
+
+ udc->driver = driver;
+ pm_runtime_get_sync(&udc->gadget.dev);
+ if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
+ if (udc->vbus_active) {
+ spin_lock_irqsave(&udc->lock, flags);
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
+ hw_device_reset(udc, USBMODE_CM_DC);
+ } else {
+ pm_runtime_put_sync(&udc->gadget.dev);
+ return retval;
+ }
+ }
+ else
+ spin_lock_irqsave(&udc->lock, flags);
+
+ retval = hw_device_state(udc, udc->ep0out->qh.dma);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (retval)
+ pm_runtime_put_sync(&udc->gadget.dev);
+
+ return retval;
+}
+
+/**
+ * ci13xxx_stop: unregister a gadget driver
+ */
+static int ci13xxx_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
+ udc->vbus_active) {
+ hw_device_state(udc, 0);
+ if (udc->udc_driver->notify_event)
+ udc->udc_driver->notify_event(udc,
+ CI13XXX_CONTROLLER_STOPPED_EVENT);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ _gadget_stop_activity(&udc->gadget);
+ spin_lock_irqsave(&udc->lock, flags);
+ pm_runtime_put(&udc->gadget.dev);
+ }
+
+ udc->driver = NULL;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: udc interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(struct ci13xxx *udc)
+{
+ irqreturn_t retval;
+ u32 intr;
+
+ if (udc == NULL)
+ return IRQ_HANDLED;
+
+ spin_lock(&udc->lock);
+
+ if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
+ if (hw_read(udc, OP_USBMODE, USBMODE_CM) !=
+ USBMODE_CM_DC) {
+ spin_unlock(&udc->lock);
+ return IRQ_NONE;
+ }
+ }
+ intr = hw_test_and_clear_intr_active(udc);
+
+ if (intr) {
+ /* order defines priority - do NOT change it */
+ if (USBi_URI & intr)
+ isr_reset_handler(udc);
+
+ if (USBi_PCI & intr) {
+ udc->gadget.speed = hw_port_is_high_speed(udc) ?
+ USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (udc->suspended && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ udc->suspended = 0;
+ }
+ }
+
+ if (USBi_UI & intr)
+ isr_tr_complete_handler(udc);
+
+ if (USBi_SLI & intr) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->driver->suspend) {
+ udc->suspended = 1;
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ usb_gadget_set_state(&udc->gadget,
+ USB_STATE_SUSPENDED);
+ spin_lock(&udc->lock);
+ }
+ }
+ retval = IRQ_HANDLED;
+ } else {
+ retval = IRQ_NONE;
+ }
+ spin_unlock(&udc->lock);
+
+ return retval;
+}
+
+/**
+ * udc_release: driver release function
+ * @dev: device
+ *
+ * Currently does nothing
+ */
+static void udc_release(struct device *dev)
+{
+}
+
+/**
+ * udc_start: initialize gadget role
+ * @udc: chipidea controller
+ */
+static int udc_start(struct ci13xxx *udc)
+{
+ struct device *dev = udc->dev;
+ int retval = 0;
+
+ spin_lock_init(&udc->lock);
+
+ udc->gadget.ops = &usb_gadget_ops;
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.is_otg = 0;
+ udc->gadget.name = udc->udc_driver->name;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ dev_set_name(&udc->gadget.dev, "gadget");
+ udc->gadget.dev.dma_mask = dev->dma_mask;
+ udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
+ udc->gadget.dev.parent = dev;
+ udc->gadget.dev.release = udc_release;
+
+ /* alloc resources */
+ udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
+ sizeof(struct ci13xxx_qh),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->qh_pool == NULL)
+ return -ENOMEM;
+
+ udc->td_pool = dma_pool_create("ci13xxx_td", dev,
+ sizeof(struct ci13xxx_td),
+ 64, CI13XXX_PAGE_SIZE);
+ if (udc->td_pool == NULL) {
+ retval = -ENOMEM;
+ goto free_qh_pool;
+ }
+
+ retval = init_eps(udc);
+ if (retval)
+ goto free_pools;
+
+ udc->gadget.ep0 = &udc->ep0in->ep;
+
+ udc->transceiver = usb_get_transceiver();
+
+ if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+ if (udc->transceiver == NULL) {
+ retval = -ENODEV;
+ goto destroy_eps;
+ }
+ }
+
+ if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
+ retval = hw_device_reset(udc, USBMODE_CM_DC);
+ if (retval)
+ goto put_transceiver;
+ }
+
+ retval = device_register(&udc->gadget.dev);
+ if (retval) {
+ put_device(&udc->gadget.dev);
+ goto put_transceiver;
+ }
+
+ retval = dbg_create_files(&udc->gadget.dev);
+ if (retval)
+ goto unreg_device;
+
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver->otg,
+ &udc->gadget);
+ /*
+ * If we implement all USB functions using chipidea drivers,
+ * it doesn't need to call above API, meanwhile, if we only
+ * use gadget function, calling above API is useless.
+ */
+ if (retval && retval != -ENOTSUPP)
+ goto remove_dbg;
+ }
+
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto remove_trans;
+
+ if (need_pullup)
+ hw_write(udc, OP_USBCMD, USBCMD_RS, USBCMD_RS);
+
+ pm_runtime_no_callbacks(&udc->gadget.dev);
+ pm_runtime_enable(&udc->gadget.dev);
+
+ return retval;
+
+remove_trans:
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver->otg, NULL);
+ usb_put_transceiver(udc->transceiver);
+ }
+
+ dev_err(dev, "error = %i\n", retval);
+remove_dbg:
+ dbg_remove_files(&udc->gadget.dev);
+unreg_device:
+ device_unregister(&udc->gadget.dev);
+put_transceiver:
+ if (udc->transceiver)
+ usb_put_transceiver(udc->transceiver);
+destroy_eps:
+ destroy_eps(udc);
+free_pools:
+ dma_pool_destroy(udc->td_pool);
+free_qh_pool:
+ dma_pool_destroy(udc->qh_pool);
+ return retval;
+}
+
+/**
+ * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+void ci_hdrc_gadget_destroy(struct ci13xxx *udc)
+{
+ if (!udc->roles[CI_ROLE_GADGET])
+ return;
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ destroy_eps(udc);
+
+ dma_pool_destroy(udc->td_pool);
+ dma_pool_destroy(udc->qh_pool);
+
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver->otg, NULL);
+ usb_put_transceiver(udc->transceiver);
+ }
+ dbg_remove_files(&udc->gadget.dev);
+ device_unregister(&udc->gadget.dev);
+}
+
+static int udc_id_switch_for_device(struct ci13xxx *udc)
+{
+ ci13xxx_vbus_session(&udc->gadget, 1);
+
+ return 0;
+}
+
+static void udc_id_switch_for_host(struct ci13xxx *udc)
+{
+ ci13xxx_vbus_session(&udc->gadget, 0);
+}
+
+/**
+ * ci_hdrc_gadget_init - initialize device related bits
+ * ci: the controller
+ *
+ * This function initializes the gadget, if the device is "device capable".
+ */
+int ci_hdrc_gadget_init(struct ci13xxx *ci)
+{
+ struct ci_role_driver *rdrv;
+
+ if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
+ return -ENXIO;
+
+ rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = udc_id_switch_for_device;
+ rdrv->stop = udc_id_switch_for_host;
+ rdrv->irq = udc_irq;
+ rdrv->name = "gadget";
+ ci->roles[CI_ROLE_GADGET] = rdrv;
+
+ return udc_start(ci);
+}
+
+void ci_hdrc_gadget_suspend(struct ci13xxx *ci)
+{
+ if (ci->role == CI_ROLE_GADGET)
+ ci->roles[CI_ROLE_GADGET]->stop(ci);
+}
+
+void ci_hdrc_gadget_resume(struct ci13xxx *ci)
+{
+ if (ci->role == CI_ROLE_GADGET)
+ ci->roles[CI_ROLE_GADGET]->start(ci);
+}
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
new file mode 100644
index 00000000000000..90759b1db8eab4
--- /dev/null
+++ b/drivers/usb/chipidea/udc.h
@@ -0,0 +1,114 @@
+/*
+ * udc.h - ChipIdea UDC structures
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_USB_CHIPIDEA_UDC_H
+#define __DRIVERS_USB_CHIPIDEA_UDC_H
+
+#include <linux/list.h>
+
+#define CTRL_PAYLOAD_MAX 64
+#define RX 0 /* similar to USB_DIR_OUT but can be used as an index */
+#define TX 1 /* similar to USB_DIR_IN but can be used as an index */
+
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+ /* 0 */
+ u32 next;
+#define TD_TERMINATE BIT(0)
+#define TD_ADDR_MASK (0xFFFFFFEUL << 5)
+ /* 1 */
+ u32 token;
+#define TD_STATUS (0x00FFUL << 0)
+#define TD_STATUS_TR_ERR BIT(3)
+#define TD_STATUS_DT_ERR BIT(5)
+#define TD_STATUS_HALTED BIT(6)
+#define TD_STATUS_ACTIVE BIT(7)
+#define TD_MULTO (0x0003UL << 10)
+#define TD_IOC BIT(15)
+#define TD_TOTAL_BYTES (0x7FFFUL << 16)
+ /* 2 */
+ u32 page[5];
+#define TD_CURR_OFFSET (0x0FFFUL << 0)
+#define TD_FRAME_NUM (0x07FFUL << 0)
+#define TD_RESERVED_MASK (0x0FFFUL << 0)
+} __attribute__ ((packed));
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+ /* 0 */
+ u32 cap;
+#define QH_IOS BIT(15)
+#define QH_MAX_PKT (0x07FFUL << 16)
+#define QH_ZLT BIT(29)
+#define QH_MULT (0x0003UL << 30)
+#define QH_ISO_MULT(x) ((x >> 11) & 0x03)
+ /* 1 */
+ u32 curr;
+ /* 2 - 8 */
+ struct ci13xxx_td td;
+ /* 9 */
+ u32 RESERVED;
+ struct usb_ctrlrequest setup;
+} __attribute__ ((packed));
+
+/**
+ * struct ci13xxx_req - usb request representation
+ * @req: request structure for gadget drivers
+ * @queue: link to QH list
+ * @ptr: transfer descriptor for this request
+ * @dma: dma address for the transfer descriptor
+ * @zptr: transfer descriptor for the zero packet
+ * @zdma: dma address of the zero packet's transfer descriptor
+ */
+struct ci13xxx_req {
+ struct usb_request req;
+ struct list_head queue;
+ struct ci13xxx_td *ptr;
+ dma_addr_t dma;
+ struct ci13xxx_td *zptr;
+ dma_addr_t zdma;
+};
+
+#ifdef CONFIG_USB_CHIPIDEA_UDC
+
+int ci_hdrc_gadget_init(struct ci13xxx *ci);
+void ci_hdrc_gadget_destroy(struct ci13xxx *ci);
+
+#ifdef CONFIG_PM
+void ci_hdrc_gadget_suspend(struct ci13xxx *ci);
+void ci_hdrc_gadget_resume(struct ci13xxx *ci);
+#endif
+
+#else
+
+static inline int ci_hdrc_gadget_init(struct ci13xxx *ci)
+{
+ return -ENXIO;
+}
+
+static inline void ci_hdrc_gadget_destroy(struct ci13xxx *ci)
+{
+
+}
+
+#ifdef CONFIG_PM
+static inline void ci_hdrc_gadget_suspend(struct ci13xxx *ci)
+{
+}
+static inline void ci_hdrc_gadget_resume(struct ci13xxx *ci)
+{
+}
+#endif
+
+#endif
+
+#endif /* __DRIVERS_USB_CHIPIDEA_UDC_H */
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 00000000000000..e515fb3a01440e
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1581 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ * Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+#include "f_fs.c"
+#ifdef CONFIG_SND /* if ALSA used */
+#include "f_audio_source.c"
+#endif /* CONFIG_SND */
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_adb.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID 0x18D1
+#define PRODUCT_ID 0x0001
+
+struct android_usb_function {
+ char *name;
+ void *config;
+
+ struct device *dev;
+ char *dev_name;
+ struct device_attribute **attributes;
+
+ /* for android_dev.enabled_functions */
+ struct list_head enabled_list;
+
+ /* Optional: initialization during gadget bind */
+ int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+ /* Optional: cleanup during gadget unbind */
+ void (*cleanup)(struct android_usb_function *);
+ /* Optional: called when the function is added the list of
+ * enabled functions */
+ void (*enable)(struct android_usb_function *);
+ /* Optional: called when it is removed */
+ void (*disable)(struct android_usb_function *);
+
+ int (*bind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+
+ /* Optional: called when the configuration is removed */
+ void (*unbind_config)(struct android_usb_function *,
+ struct usb_configuration *);
+ /* Optional: handle ctrl requests before the device is configured */
+ int (*ctrlrequest)(struct android_usb_function *,
+ struct usb_composite_dev *,
+ const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+ struct android_usb_function **functions;
+ struct list_head enabled_functions;
+ struct usb_composite_dev *cdev;
+ struct device *dev;
+
+ bool enabled;
+ int disable_depth;
+ struct mutex mutex;
+ bool connected;
+ bool sw_connected;
+ struct work_struct work;
+ char ffs_aliases[256];
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_SERIAL_IDX 2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_string,
+ [STRING_PRODUCT_IDX].s = product_string,
+ [STRING_SERIAL_IDX].s = serial_string,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof(device_desc),
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+ .idVendor = __constant_cpu_to_le16(VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(PRODUCT_ID),
+ .bcdDevice = __constant_cpu_to_le16(0xffff),
+ .bNumConfigurations = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+ .label = "android",
+ .unbind = android_unbind_config,
+ .bConfigurationValue = 1,
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 0xFA, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+ struct android_dev *dev = container_of(data, struct android_dev, work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+ char *connected[2] = { "USB_STATE=CONNECTED", NULL };
+ char *configured[2] = { "USB_STATE=CONFIGURED", NULL };
+ char **uevent_envp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ uevent_envp = configured;
+ else if (dev->connected != dev->sw_connected)
+ uevent_envp = dev->connected ? connected : disconnected;
+ dev->sw_connected = dev->connected;
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ if (uevent_envp) {
+ kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+ pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+ } else {
+ pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+ dev->connected, dev->sw_connected, cdev->config);
+ }
+}
+
+static void android_enable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (WARN_ON(!dev->disable_depth))
+ return;
+
+ if (--dev->disable_depth == 0) {
+ usb_add_config(cdev, &android_config_driver,
+ android_bind_config);
+ /* force reenumeration */
+ usb_gadget_disconnect(cdev->gadget);
+ msleep(10);
+ usb_gadget_connect(cdev->gadget);
+ }
+}
+
+static void android_disable(struct android_dev *dev)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ if (dev->disable_depth++ == 0) {
+ usb_gadget_disconnect(cdev->gadget);
+ /* Cancel pending control requests */
+ usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+ usb_remove_config(cdev, &android_config_driver);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+struct functionfs_config {
+ bool opened;
+ bool enabled;
+ struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+ functionfs_cleanup();
+ kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = true;
+
+ /* Disable the gadget until the function is ready */
+ if (!config->opened)
+ android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = f->config;
+
+ config->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!config->opened)
+ android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct functionfs_config *config = f->config;
+ return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = _android_dev;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+ ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct android_dev *dev = _android_dev;
+ char buff[256];
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ strlcpy(buff, buf, sizeof(buff));
+ strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+ ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+ &dev_attr_aliases,
+ NULL
+};
+
+static struct android_usb_function ffs_function = {
+ .name = "ffs",
+ .init = ffs_function_init,
+ .enable = ffs_function_enable,
+ .disable = ffs_function_disable,
+ .cleanup = ffs_function_cleanup,
+ .bind_config = ffs_function_bind_config,
+ .attributes = ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ ret = functionfs_bind(ffs, dev->cdev);
+ if (ret)
+ goto err;
+
+ config->data = ffs;
+ config->opened = true;
+
+ if (config->enabled)
+ android_enable(dev);
+
+err:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ struct android_dev *dev = _android_dev;
+ struct functionfs_config *config = ffs_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ if (config->enabled)
+ android_disable(dev);
+
+ config->opened = false;
+ config->data = NULL;
+
+ functionfs_unbind(ffs);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+
+struct adb_data {
+ bool opened;
+ bool enabled;
+};
+
+static int
+adb_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct adb_data), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return adb_setup();
+}
+
+static void adb_function_cleanup(struct android_usb_function *f)
+{
+ adb_cleanup();
+ kfree(f->config);
+}
+
+static int
+adb_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return adb_bind_config(c);
+}
+
+static void adb_android_function_enable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = f->config;
+
+ data->enabled = true;
+
+ /* Disable the gadget until adbd is ready */
+ if (!data->opened)
+ android_disable(dev);
+}
+
+static void adb_android_function_disable(struct android_usb_function *f)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = f->config;
+
+ data->enabled = false;
+
+ /* Balance the disable that was called in closed_callback */
+ if (!data->opened)
+ android_enable(dev);
+}
+
+static struct android_usb_function adb_function = {
+ .name = "adb",
+ .enable = adb_android_function_enable,
+ .disable = adb_android_function_disable,
+ .init = adb_function_init,
+ .cleanup = adb_function_cleanup,
+ .bind_config = adb_function_bind_config,
+};
+
+static void adb_ready_callback(void)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = adb_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ data->opened = true;
+
+ if (data->enabled)
+ android_enable(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void adb_closed_callback(void)
+{
+ struct android_dev *dev = _android_dev;
+ struct adb_data *data = adb_function.config;
+
+ mutex_lock(&dev->mutex);
+
+ data->opened = false;
+
+ if (data->enabled)
+ android_disable(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+ int instances;
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES);
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+ gserial_cleanup();
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int i;
+ int ret = 0;
+ struct acm_function_config *config = f->config;
+
+ for (i = 0; i < config->instances; i++) {
+ ret = acm_bind_config(c, i);
+ if (ret) {
+ pr_err("Could not bind acm%u config\n", i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct acm_function_config *config = f->config;
+ int value;
+
+ sscanf(buf, "%d", &value);
+ if (value > MAX_ACM_INSTANCES)
+ value = MAX_ACM_INSTANCES;
+ config->instances = value;
+ return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+ acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+ &dev_attr_instances,
+ NULL
+};
+
+static struct android_usb_function acm_function = {
+ .name = "acm",
+ .init = acm_function_init,
+ .cleanup = acm_function_cleanup,
+ .bind_config = acm_function_bind_config,
+ .attributes = acm_function_attributes,
+};
+
+
+static int
+mtp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+ mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ /* nothing to do - initialization is handled by mtp_function_init */
+ return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+ /* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+ .name = "mtp",
+ .init = mtp_function_init,
+ .cleanup = mtp_function_cleanup,
+ .bind_config = mtp_function_bind_config,
+ .ctrlrequest = mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+ .name = "ptp",
+ .init = ptp_function_init,
+ .cleanup = ptp_function_cleanup,
+ .bind_config = ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ char manufacturer[256];
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ bool wceis;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+ return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct rndis_function_config *rndis = f->config;
+
+ if (!rndis) {
+ pr_err("%s: rndis_pdata\n", __func__);
+ return -1;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+ ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+
+ if (rndis->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_control_intf.bInterfaceSubClass = 0x01;
+ rndis_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ return rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+ rndis->manufacturer);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ gether_cleanup();
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+
+ if (size >= sizeof(config->manufacturer))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->manufacturer) == 1)
+ return size;
+ return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+ rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%d", &value) == 1) {
+ config->wceis = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+ rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+ return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *rndis = f->config;
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+ (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+ (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+ return size;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+ rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%04x", &value) == 1) {
+ config->vendorID = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+ rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+ &dev_attr_manufacturer,
+ &dev_attr_wceis,
+ &dev_attr_ethaddr,
+ &dev_attr_vendorID,
+ NULL
+};
+
+static struct android_usb_function rndis_function = {
+ .name = "rndis",
+ .init = rndis_function_init,
+ .cleanup = rndis_function_cleanup,
+ .bind_config = rndis_function_bind_config,
+ .unbind_config = rndis_function_unbind_config,
+ .attributes = rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+ struct fsg_config fsg;
+ struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct mass_storage_function_config *config;
+ struct fsg_common *common;
+ int err;
+
+ config = kzalloc(sizeof(struct mass_storage_function_config),
+ GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ config->fsg.nluns = 1;
+ config->fsg.luns[0].removable = 1;
+
+ common = fsg_common_init(NULL, cdev, &config->fsg);
+ if (IS_ERR(common)) {
+ kfree(config);
+ return PTR_ERR(common);
+ }
+
+ err = sysfs_create_link(&f->dev->kobj,
+ &common->luns[0].dev.kobj,
+ "lun");
+ if (err) {
+ kfree(config);
+ return err;
+ }
+
+ config->common = common;
+ f->config = config;
+ return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct mass_storage_function_config *config = f->config;
+ return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct mass_storage_function_config *config = f->config;
+ if (size >= sizeof(config->common->inquiry_string))
+ return -EINVAL;
+ if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+ return -EINVAL;
+ return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+ mass_storage_inquiry_show,
+ mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+ &dev_attr_inquiry_string,
+ NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+ .name = "mass_storage",
+ .init = mass_storage_function_init,
+ .cleanup = mass_storage_function_cleanup,
+ .bind_config = mass_storage_function_bind_config,
+ .attributes = mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+ acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+ struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *c)
+{
+ return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+ .name = "accessory",
+ .init = accessory_function_init,
+ .cleanup = accessory_function_cleanup,
+ .bind_config = accessory_function_bind_config,
+ .ctrlrequest = accessory_function_ctrlrequest,
+};
+
+#ifdef CONFIG_SND /* if ALSA used */
+static int audio_source_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ struct audio_source_config *config;
+
+ config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+ config->card = -1;
+ config->device = -1;
+ f->config = config;
+ return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ struct audio_source_config *config = f->config;
+
+ config->card = -1;
+ config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct audio_source_config *config = f->config;
+
+ /* print PCM card and device numbers */
+ return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+ &dev_attr_pcm,
+ NULL
+};
+
+static struct android_usb_function audio_source_function = {
+ .name = "audio_source",
+ .init = audio_source_function_init,
+ .cleanup = audio_source_function_cleanup,
+ .bind_config = audio_source_function_bind_config,
+ .unbind_config = audio_source_function_unbind_config,
+ .attributes = audio_source_function_attributes,
+};
+#endif /* CONFIG_SND */
+
+static struct android_usb_function *supported_functions[] = {
+ &ffs_function,
+ &adb_function,
+ &acm_function,
+ &mtp_function,
+ &ptp_function,
+ &rndis_function,
+ &mass_storage_function,
+ &accessory_function,
+#ifdef CONFIG_SND /* if ALSA used */
+ &audio_source_function,
+#endif /* CONFIG_SND */
+ NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+ struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct android_usb_function *f;
+ struct device_attribute **attrs;
+ struct device_attribute *attr;
+ int err;
+ int index = 0;
+
+ for (; (f = *functions++); index++) {
+ f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+ f->dev = device_create(android_class, dev->dev,
+ MKDEV(0, index), f, f->dev_name);
+ if (IS_ERR(f->dev)) {
+ pr_err("%s: Failed to create dev %s", __func__,
+ f->dev_name);
+ err = PTR_ERR(f->dev);
+ goto err_create;
+ }
+
+ if (f->init) {
+ err = f->init(f, cdev);
+ if (err) {
+ pr_err("%s: Failed to init %s", __func__,
+ f->name);
+ goto err_out;
+ }
+ }
+
+ attrs = f->attributes;
+ if (attrs) {
+ while ((attr = *attrs++) && !err)
+ err = device_create_file(f->dev, attr);
+ }
+ if (err) {
+ pr_err("%s: Failed to create function %s attributes",
+ __func__, f->name);
+ goto err_out;
+ }
+ }
+ return 0;
+
+err_out:
+ device_destroy(android_class, f->dev->devt);
+err_create:
+ kfree(f->dev_name);
+ return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+ struct android_usb_function *f;
+
+ while (*functions) {
+ f = *functions++;
+
+ if (f->dev) {
+ device_destroy(android_class, f->dev->devt);
+ kfree(f->dev_name);
+ }
+
+ if (f->cleanup)
+ f->cleanup(f);
+ }
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+ int ret;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ ret = f->bind_config(f, c);
+ if (ret) {
+ pr_err("%s: %s failed", __func__, f->name);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+ struct usb_configuration *c)
+{
+ struct android_usb_function *f;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->unbind_config)
+ f->unbind_config(f, c);
+ }
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+ struct android_usb_function **functions = dev->functions;
+ struct android_usb_function *f;
+ while ((f = *functions++)) {
+ if (!strcmp(name, f->name)) {
+ list_add_tail(&f->enabled_list,
+ &dev->enabled_functions);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct android_usb_function *f;
+ char *buff = buf;
+
+ mutex_lock(&dev->mutex);
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+ buff += sprintf(buff, "%s,", f->name);
+
+ mutex_unlock(&dev->mutex);
+
+ if (buff != buf)
+ *(buff-1) = '\n';
+ return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ char *name;
+ char buf[256], *b;
+ char aliases[256], *a;
+ int err;
+ int is_ffs;
+ int ffs_enabled = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->enabled) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&dev->enabled_functions);
+
+ strlcpy(buf, buff, sizeof(buf));
+ b = strim(buf);
+
+ while (b) {
+ name = strsep(&b, ",");
+ if (!name)
+ continue;
+
+ is_ffs = 0;
+ strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+ a = aliases;
+
+ while (a) {
+ char *alias = strsep(&a, ",");
+ if (alias && !strcmp(name, alias)) {
+ is_ffs = 1;
+ break;
+ }
+ }
+
+ if (is_ffs) {
+ if (ffs_enabled)
+ continue;
+ err = android_enable_function(dev, "ffs");
+ if (err)
+ pr_err("android_usb: Cannot enable ffs (%d)",
+ err);
+ else
+ ffs_enabled = 1;
+ continue;
+ }
+
+ err = android_enable_function(dev, name);
+ if (err)
+ pr_err("android_usb: Cannot enable '%s' (%d)",
+ name, err);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+ const char *buff, size_t size)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct android_usb_function *f;
+ int enabled = 0;
+
+
+ if (!cdev)
+ return -ENODEV;
+
+ mutex_lock(&dev->mutex);
+
+ sscanf(buff, "%d", &enabled);
+ if (enabled && !dev->enabled) {
+ /*
+ * Update values in composite driver's copy of
+ * device descriptor.
+ */
+ cdev->desc.idVendor = device_desc.idVendor;
+ cdev->desc.idProduct = device_desc.idProduct;
+ cdev->desc.bcdDevice = device_desc.bcdDevice;
+ cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+ cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+ cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->enable)
+ f->enable(f);
+ }
+ android_enable(dev);
+ dev->enabled = true;
+ } else if (!enabled && dev->enabled) {
+ android_disable(dev);
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->disable)
+ f->disable(f);
+ }
+ dev->enabled = false;
+ } else {
+ pr_err("android_usb: already %s\n",
+ dev->enabled ? "enabled" : "disabled");
+ }
+
+ mutex_unlock(&dev->mutex);
+ return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+ struct usb_composite_dev *cdev = dev->cdev;
+ char *state = "DISCONNECTED";
+ unsigned long flags;
+
+ if (!cdev)
+ goto out;
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (cdev->config)
+ state = "CONFIGURED";
+ else if (dev->connected)
+ state = "CONNECTED";
+ spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+ return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, format_string, device_desc.field); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ int value; \
+ if (sscanf(buf, format_string, &value) == 1) { \
+ device_desc.field = value; \
+ return size; \
+ } \
+ return -1; \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer) \
+static ssize_t \
+field ## _show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%s", buffer); \
+} \
+static ssize_t \
+field ## _store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ if (size >= sizeof(buffer)) \
+ return -EINVAL; \
+ return strlcpy(buffer, buf, sizeof(buffer)); \
+} \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+ functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+ &dev_attr_idVendor,
+ &dev_attr_idProduct,
+ &dev_attr_bcdDevice,
+ &dev_attr_bDeviceClass,
+ &dev_attr_bDeviceSubClass,
+ &dev_attr_bDeviceProtocol,
+ &dev_attr_iManufacturer,
+ &dev_attr_iProduct,
+ &dev_attr_iSerial,
+ &dev_attr_functions,
+ &dev_attr_enable,
+ &dev_attr_state,
+ NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+ int ret = 0;
+
+ ret = android_bind_enabled_functions(dev, c);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+ struct android_dev *dev = _android_dev;
+
+ android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_gadget *gadget = cdev->gadget;
+ int gcnum, id, ret;
+
+ /*
+ * Start disconnected. Userspace will connect the gadget once
+ * it is done configuring the functions.
+ */
+ usb_gadget_disconnect(gadget);
+
+ ret = android_init_functions(dev->functions, cdev);
+ if (ret)
+ return ret;
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_MANUFACTURER_IDX].id = id;
+ device_desc.iManufacturer = id;
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_PRODUCT_IDX].id = id;
+ device_desc.iProduct = id;
+
+ /* Default strings - should be updated by userspace */
+ strncpy(manufacturer_string, "Android", sizeof(manufacturer_string)-1);
+ strncpy(product_string, "Android", sizeof(product_string) - 1);
+ strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+ id = usb_string_id(cdev);
+ if (id < 0)
+ return id;
+ strings_dev[STRING_SERIAL_IDX].id = id;
+ device_desc.iSerialNumber = id;
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
+ else {
+ pr_warning("%s: controller '%s' not recognized\n",
+ longname, gadget->name);
+ device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+ }
+
+ usb_gadget_set_selfpowered(gadget);
+ dev->cdev = cdev;
+
+ return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = _android_dev;
+
+ cancel_work_sync(&dev->work);
+ android_cleanup_functions(dev->functions);
+ return 0;
+}
+
+static struct usb_composite_driver android_usb_driver = {
+ .name = "android_usb",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .unbind = android_usb_unbind,
+ .max_speed = USB_SPEED_HIGH,
+};
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_request *req = cdev->req;
+ struct android_usb_function *f;
+ int value = -EOPNOTSUPP;
+ unsigned long flags;
+
+ req->zero = 0;
+ req->complete = composite_setup_complete;
+ req->length = 0;
+ gadget->ep0->driver_data = cdev;
+
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ if (f->ctrlrequest) {
+ value = f->ctrlrequest(f, cdev, c);
+ if (value >= 0)
+ break;
+ }
+ }
+
+ /* Special case the accessory function.
+ * It needs to handle control requests before it is enabled.
+ */
+ if (value < 0)
+ value = acc_ctrlrequest(cdev, c);
+
+ if (value < 0)
+ value = composite_setup(gadget, c);
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ if (!dev->connected) {
+ dev->connected = 1;
+ schedule_work(&dev->work);
+ } else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+ cdev->config) {
+ schedule_work(&dev->work);
+ }
+ spin_unlock_irqrestore(&cdev->lock, flags);
+
+ return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+ struct android_dev *dev = _android_dev;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ unsigned long flags;
+
+ composite_disconnect(gadget);
+ /* accessory HID support can be active while the
+ accessory function is not actually enabled,
+ so we need to inform it when we are disconnected.
+ */
+ acc_disconnect();
+
+ spin_lock_irqsave(&cdev->lock, flags);
+ dev->connected = 0;
+ schedule_work(&dev->work);
+ spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+static int android_create_device(struct android_dev *dev)
+{
+ struct device_attribute **attrs = android_usb_attributes;
+ struct device_attribute *attr;
+ int err;
+
+ dev->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, "android0");
+ if (IS_ERR(dev->dev))
+ return PTR_ERR(dev->dev);
+
+ dev_set_drvdata(dev->dev, dev);
+
+ while ((attr = *attrs++)) {
+ err = device_create_file(dev->dev, attr);
+ if (err) {
+ device_destroy(android_class, dev->dev->devt);
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static int __init init(void)
+{
+ struct android_dev *dev;
+ int err;
+
+ android_class = class_create(THIS_MODULE, "android_usb");
+ if (IS_ERR(android_class))
+ return PTR_ERR(android_class);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->disable_depth = 1;
+ dev->functions = supported_functions;
+ INIT_LIST_HEAD(&dev->enabled_functions);
+ INIT_WORK(&dev->work, android_work);
+ mutex_init(&dev->mutex);
+
+ err = android_create_device(dev);
+ if (err) {
+ class_destroy(android_class);
+ kfree(dev);
+ return err;
+ }
+
+ _android_dev = dev;
+
+ /* Override composite driver functions */
+ composite_driver.setup = android_setup;
+ composite_driver.disconnect = android_disconnect;
+
+ err = usb_composite_probe(&android_usb_driver, android_bind);
+
+ // Android driver has no function on start, so we disable pull-up
+ // until someone do something with it
+ if(err == 0)
+ usb_gadget_disconnect(dev->cdev->gadget);
+
+ return err;
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&android_usb_driver);
+ class_destroy(android_class);
+ kfree(_android_dev);
+ _android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/carplay.c b/drivers/usb/gadget/carplay.c
new file mode 100644
index 00000000000000..13643fc9d92c4a
--- /dev/null
+++ b/drivers/usb/gadget/carplay.c
@@ -0,0 +1,260 @@
+/*
+ * carplay.c -- CDC Composite driver, with ECM and ACM support
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+#include "u_ether.h"
+#include "u_serial.h"
+
+
+#define DRIVER_DESC "NCM + iAP2"
+#define DRIVER_VERSION "King Kamehameha Day 2008"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
+ * Instead: allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only this composite CDC configuration.
+ */
+#define CDC_VENDOR_NUM 0x19cf /* Parrot SA */
+#define CDC_PRODUCT_NUM 0xa4aa /* CDC Composite: ECM + ACM */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#define CARPLAY_SPECIFIC_CODE 1 // define for f_serial.c
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "u_serial.c"
+#include "f_serial.c"
+#include "f_ncm.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+
+ .bDeviceClass = 0x00,
+ .bDeviceSubClass = 0x00,
+ .bDeviceProtocol = 0,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(CDC_VENDOR_NUM),
+ .idProduct = cpu_to_le16(CDC_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = "Parrot",
+ [STRING_PRODUCT_IDX].s = "Generic",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We _always_ have both CDC ECM and CDC ACM functions.
+ */
+static int __init cdc_do_config(struct usb_configuration *c)
+{
+ int status;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+
+ status = gser_bind_config(c, 0);
+ if (status < 0)
+ return status;
+
+ status = ncm_bind_config(c, hostaddr);
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+static struct usb_configuration cdc_config_driver = {
+ .label = "Digital iPod Out",
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init cdc_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+
+ if (!can_support_ecm(cdev->gadget)) {
+ dev_err(&gadget->dev, "controller '%s' not usable\n",
+ gadget->name);
+ return -EINVAL;
+ }
+
+ /* set up network link layer */
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ return status;
+
+ /* set up serial link layer */
+ status = gserial_setup(cdev->gadget, 1);
+ if (status < 0)
+ goto fail0;
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else {
+ /* We assume that can_support_ecm() tells the truth;
+ * but if the controller isn't recognized at all then
+ * that assumption is a bit more likely to be wrong.
+ */
+ WARNING(cdev, "controller '%s' not recognized; trying %s\n",
+ gadget->name,
+ cdc_config_driver.label);
+ device_desc.bcdDevice =
+ cpu_to_le16(0x0300 | 0x0099);
+ }
+
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail1;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail1;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ /* register our configuration */
+ status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
+ if (status < 0)
+ goto fail1;
+
+ dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
+ DRIVER_DESC);
+
+ return 0;
+
+fail1:
+ gserial_cleanup();
+fail0:
+ gether_cleanup();
+ return status;
+}
+
+static int __exit cdc_unbind(struct usb_composite_dev *cdev)
+{
+ gserial_cleanup();
+ gether_cleanup();
+ return 0;
+}
+
+static struct usb_composite_driver cdc_driver = {
+ .name = "g_cdc",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
+ .unbind = __exit_p(cdc_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+ return usb_composite_probe(&cdc_driver, cdc_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ usb_composite_unregister(&cdc_driver);
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 00000000000000..a244265c114348
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,1180 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE 16384
+#define ACC_STRING_SIZE 256
+
+#define PROTOCOL_VERSION 2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+ struct list_head list;
+ struct hid_device *hid;
+ struct acc_dev *dev;
+ /* accessory defined ID */
+ int id;
+ /* HID report descriptor */
+ u8 *report_desc;
+ /* length of HID report descriptor */
+ int report_desc_len;
+ /* number of bytes of report_desc we have received so far */
+ int report_desc_offset;
+};
+
+struct acc_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ /* set to 1 when we connect */
+ int online:1;
+ /* Set to 1 when we disconnect.
+ * Not cleared until our file is closed.
+ */
+ int disconnected:1;
+
+ /* strings sent by the host */
+ char manufacturer[ACC_STRING_SIZE];
+ char model[ACC_STRING_SIZE];
+ char description[ACC_STRING_SIZE];
+ char version[ACC_STRING_SIZE];
+ char uri[ACC_STRING_SIZE];
+ char serial[ACC_STRING_SIZE];
+
+ /* for acc_complete_set_string */
+ int string_index;
+
+ /* set to 1 if we have a pending start request */
+ int start_requested;
+
+ int audio_mode;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* delayed work for handling ACCESSORY_START */
+ struct delayed_work start_work;
+
+ /* worker for registering and unregistering hid devices */
+ struct work_struct hid_work;
+
+ /* list of active HID devices */
+ struct list_head hid_list;
+
+ /* list of new HID devices to register */
+ struct list_head new_hid_list;
+
+ /* list of dead HID devices to unregister */
+ struct list_head dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+ (struct usb_descriptor_header *) &acc_interface_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+ (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+ NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+ [INTERFACE_STRING_INDEX].s = "Android Accessory Interface",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+ &acc_string_table,
+ NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+ return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+ dev->online = 0;
+ dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = _acc_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ acc_set_disconnected(dev);
+
+ wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+ struct acc_dev *dev = ep->driver_data;
+ char *string_dest = NULL;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_string, err %d\n", req->status);
+ return;
+ }
+
+ switch (dev->string_index) {
+ case ACCESSORY_STRING_MANUFACTURER:
+ string_dest = dev->manufacturer;
+ break;
+ case ACCESSORY_STRING_MODEL:
+ string_dest = dev->model;
+ break;
+ case ACCESSORY_STRING_DESCRIPTION:
+ string_dest = dev->description;
+ break;
+ case ACCESSORY_STRING_VERSION:
+ string_dest = dev->version;
+ break;
+ case ACCESSORY_STRING_URI:
+ string_dest = dev->uri;
+ break;
+ case ACCESSORY_STRING_SERIAL:
+ string_dest = dev->serial;
+ break;
+ }
+ if (string_dest) {
+ unsigned long flags;
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ pr_err("unknown accessory string index %d\n",
+ dev->string_index);
+ }
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ struct acc_dev *dev = hid->dev;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_set_hid_report_desc, err %d\n",
+ req->status);
+ return;
+ }
+
+ memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+ hid->report_desc_offset += length;
+ if (hid->report_desc_offset == hid->report_desc_len) {
+ /* After we have received the entire report descriptor
+ * we schedule work to initialize the HID device
+ */
+ schedule_work(&dev->hid_work);
+ }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct acc_hid_dev *hid = req->context;
+ int length = req->actual;
+
+ if (req->status != 0) {
+ pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+ return;
+ }
+
+ hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+ struct acc_hid_dev *hdev = hid->driver_data;
+
+ hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+ return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+ .parse = acc_hid_parse,
+ .start = acc_hid_start,
+ .stop = acc_hid_stop,
+ .open = acc_hid_open,
+ .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+ int id, int desc_len)
+{
+ struct acc_hid_dev *hdev;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+ if (!hdev)
+ return NULL;
+ hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+ if (!hdev->report_desc) {
+ kfree(hdev);
+ return NULL;
+ }
+ hdev->dev = dev;
+ hdev->id = id;
+ hdev->report_desc_len = desc_len;
+
+ return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+ struct acc_hid_dev *hid;
+
+ list_for_each_entry(hid, list, list) {
+ if (hid->id == id)
+ return hid;
+ }
+ return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ /* report descriptor length must be > 0 */
+ if (desc_length <= 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ /* replace HID if one already exists with this ID */
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (hid)
+ list_move(&hid->list, &dev->dead_hid_list);
+
+ hid = acc_hid_new(dev, id, desc_length);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOMEM;
+ }
+
+ list_add(&hid->list, &dev->new_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* schedule work to register the HID device */
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+ struct acc_hid_dev *hid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, id);
+ if (!hid)
+ hid = acc_hid_get(&dev->new_hid_list, id);
+ if (!hid) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EINVAL;
+ }
+
+ list_move(&hid->list, &dev->dead_hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+ return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_in;
+ req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = acc_complete_out;
+ dev->rx_req[i] = req;
+ }
+
+ return 0;
+
+fail:
+ pr_err("acc_bind() could not allocate requests\n");
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+ return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ pr_debug("acc_read(%d)\n", count);
+
+ if (dev->disconnected)
+ return -ENODEV;
+
+ if (count > BULK_BUFFER_SIZE)
+ count = BULK_BUFFER_SIZE;
+
+ /* we will block until we're online */
+ pr_debug("acc_read: waiting for online\n");
+ ret = wait_event_interruptible(dev->read_wq, dev->online);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->online) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ pr_debug("acc_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct acc_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("acc_write(%d)\n", count);
+
+ if (!dev->online || dev->disconnected)
+ return -ENODEV;
+
+ while (count > 0) {
+ if (!dev->online) {
+ pr_debug("acc_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > BULK_BUFFER_SIZE)
+ xfer = BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("acc_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ req_put(dev, &dev->tx_idle, req);
+
+ pr_debug("acc_write returning %d\n", r);
+ return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct acc_dev *dev = fp->private_data;
+ char *src = NULL;
+ int ret;
+
+ switch (code) {
+ case ACCESSORY_GET_STRING_MANUFACTURER:
+ src = dev->manufacturer;
+ break;
+ case ACCESSORY_GET_STRING_MODEL:
+ src = dev->model;
+ break;
+ case ACCESSORY_GET_STRING_DESCRIPTION:
+ src = dev->description;
+ break;
+ case ACCESSORY_GET_STRING_VERSION:
+ src = dev->version;
+ break;
+ case ACCESSORY_GET_STRING_URI:
+ src = dev->uri;
+ break;
+ case ACCESSORY_GET_STRING_SERIAL:
+ src = dev->serial;
+ break;
+ case ACCESSORY_IS_START_REQUESTED:
+ return dev->start_requested;
+ case ACCESSORY_GET_AUDIO_MODE:
+ return dev->audio_mode;
+ }
+ if (!src)
+ return -EINVAL;
+
+ ret = strlen(src) + 1;
+ if (copy_to_user((void __user *)value, src, ret))
+ ret = -EFAULT;
+ return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_open\n");
+ if (atomic_xchg(&_acc_dev->open_excl, 1))
+ return -EBUSY;
+
+ _acc_dev->disconnected = 0;
+ fp->private_data = _acc_dev;
+ return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "acc_release\n");
+
+ WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+ _acc_dev->disconnected = 0;
+ return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+ .owner = THIS_MODULE,
+ .read = acc_read,
+ .write = acc_write,
+ .unlocked_ioctl = acc_ioctl,
+ .open = acc_open,
+ .release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usb_accessory",
+ .fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+ { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+ { }
+};
+
+static struct hid_driver acc_hid_driver = {
+ .name = "USB accessory",
+ .id_table = acc_hid_table,
+ .probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct acc_dev *dev = _acc_dev;
+ int value = -EOPNOTSUPP;
+ struct acc_hid_dev *hid;
+ int offset;
+ u8 b_requestType = ctrl->bRequestType;
+ u8 b_request = ctrl->bRequest;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+/*
+ printk(KERN_INFO "acc_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ b_requestType, b_request,
+ w_value, w_index, w_length);
+*/
+
+ if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_START) {
+ dev->start_requested = 1;
+ schedule_delayed_work(
+ &dev->start_work, msecs_to_jiffies(10));
+ value = 0;
+ } else if (b_request == ACCESSORY_SEND_STRING) {
+ dev->string_index = w_index;
+ cdev->gadget->ep0->driver_data = dev;
+ cdev->req->complete = acc_complete_set_string;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+ w_index == 0 && w_length == 0) {
+ dev->audio_mode = w_value;
+ value = 0;
+ } else if (b_request == ACCESSORY_REGISTER_HID) {
+ value = acc_register_hid(dev, w_value, w_index);
+ } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+ value = acc_unregister_hid(dev, w_value);
+ } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->new_hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ offset = w_index;
+ if (offset != hid->report_desc_offset
+ || offset + w_length > hid->report_desc_len) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_set_hid_report_desc;
+ value = w_length;
+ } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+ spin_lock_irqsave(&dev->lock, flags);
+ hid = acc_hid_get(&dev->hid_list, w_value);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!hid) {
+ value = -EINVAL;
+ goto err;
+ }
+ cdev->req->context = hid;
+ cdev->req->complete = acc_complete_send_hid_event;
+ value = w_length;
+ }
+ } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+ if (b_request == ACCESSORY_GET_PROTOCOL) {
+ *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+ value = sizeof(u16);
+
+ /* clear any string left over from a previous session */
+ memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+ memset(dev->model, 0, sizeof(dev->model));
+ memset(dev->description, 0, sizeof(dev->description));
+ memset(dev->version, 0, sizeof(dev->version));
+ memset(dev->uri, 0, sizeof(dev->uri));
+ memset(dev->serial, 0, sizeof(dev->serial));
+ dev->start_requested = 0;
+ dev->audio_mode = 0;
+ }
+ }
+
+ if (value >= 0) {
+ cdev->req->zero = 0;
+ cdev->req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (value < 0)
+ ERROR(cdev, "%s setup response queue error\n",
+ __func__);
+ }
+
+err:
+ if (value == -EOPNOTSUPP)
+ VDBG(cdev,
+ "unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct acc_dev *dev = func_to_dev(f);
+ int id;
+ int ret;
+
+ DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+ ret = hid_register_driver(&acc_hid_driver);
+ if (ret)
+ return ret;
+
+ dev->start_requested = 0;
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ acc_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+ &acc_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ acc_highspeed_in_desc.bEndpointAddress =
+ acc_fullspeed_in_desc.bEndpointAddress;
+ acc_highspeed_out_desc.bEndpointAddress =
+ acc_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+ struct acc_hid_dev *hid;
+ struct list_head *entry, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_for_each_safe(entry, temp, &dev->hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ list_add(&hid->list, &dev->dead_hid_list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+ hid_unregister_driver(&acc_hid_driver);
+ kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = req_get(dev, &dev->tx_idle)))
+ acc_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ acc_request_free(dev->rx_req[i], dev->ep_out);
+
+ acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+ char *envp[2] = { "ACCESSORY=START", NULL };
+ kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &acc_hid_ll_driver;
+ hid->dev.parent = acc_device.this_device;
+
+ hid->bus = BUS_USB;
+ hid->vendor = HID_ANY_ID;
+ hid->product = HID_ANY_ID;
+ hid->driver_data = hdev;
+ ret = hid_add_device(hid);
+ if (ret) {
+ pr_err("can't add hid device: %d\n", ret);
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ hdev->hid = hid;
+ return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+ kfree(hid->report_desc);
+ kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+ struct acc_dev *dev = _acc_dev;
+ struct list_head *entry, *temp;
+ struct acc_hid_dev *hid;
+ struct list_head new_list, dead_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&new_list);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* copy hids that are ready for initialization to new_list */
+ list_for_each_safe(entry, temp, &dev->new_hid_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (hid->report_desc_offset == hid->report_desc_len)
+ list_move(&hid->list, &new_list);
+ }
+
+ if (list_empty(&dev->dead_hid_list)) {
+ INIT_LIST_HEAD(&dead_list);
+ } else {
+ /* move all of dev->dead_hid_list to dead_list */
+ dead_list.prev = dev->dead_hid_list.prev;
+ dead_list.next = dev->dead_hid_list.next;
+ dead_list.next->prev = &dead_list;
+ dead_list.prev->next = &dead_list;
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* register new HID devices */
+ list_for_each_safe(entry, temp, &new_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ if (acc_hid_init(hid)) {
+ pr_err("can't add HID device %p\n", hid);
+ acc_hid_delete(hid);
+ } else {
+ spin_lock_irqsave(&dev->lock, flags);
+ list_move(&hid->list, &dev->hid_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ }
+
+ /* remove dead HID devices */
+ list_for_each_safe(entry, temp, &dead_list) {
+ hid = list_entry(entry, struct acc_hid_dev, list);
+ list_del(&hid->list);
+ if (hid->hid)
+ hid_destroy_device(hid->hid);
+ acc_hid_delete(hid);
+ }
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+ struct acc_dev *dev = func_to_dev(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "acc_function_disable\n");
+ acc_set_disconnected(dev);
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+ struct acc_dev *dev = _acc_dev;
+ int ret;
+
+ printk(KERN_INFO "acc_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ acc_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "accessory";
+ dev->function.strings = acc_strings,
+ dev->function.descriptors = fs_acc_descs;
+ dev->function.hs_descriptors = hs_acc_descs;
+ dev->function.bind = acc_function_bind;
+ dev->function.unbind = acc_function_unbind;
+ dev->function.set_alt = acc_function_set_alt;
+ dev->function.disable = acc_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+ struct acc_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ atomic_set(&dev->open_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->hid_list);
+ INIT_LIST_HEAD(&dev->new_hid_list);
+ INIT_LIST_HEAD(&dev->dead_hid_list);
+ INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+ INIT_WORK(&dev->hid_work, acc_hid_work);
+
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
+ ret = misc_register(&acc_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ pr_err("USB accessory gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void acc_disconnect(void)
+{
+ /* unregister all HID devices if USB is disconnected */
+ kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+ misc_deregister(&acc_device);
+ kfree(_acc_dev);
+ _acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
new file mode 100644
index 00000000000000..2e84f4f0eef41c
--- /dev/null
+++ b/drivers/usb/gadget/f_adb.c
@@ -0,0 +1,622 @@
+/*
+ * Gadget Driver for Android ADB
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#define ADB_BULK_BUFFER_SIZE 4096
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX CONFIG_USB_G_ANDROID_ADB_TX_REQ_MAX
+
+static const char adb_shortname[] = "android_adb";
+
+struct adb_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+
+ int online;
+ int error;
+
+ atomic_t read_excl;
+ atomic_t write_excl;
+ atomic_t open_excl;
+
+ struct list_head tx_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+};
+
+static struct usb_interface_descriptor adb_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = 0xFF,
+ .bInterfaceSubClass = 0x42,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor adb_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_fullspeed_out_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_adb_descs[] = {
+ (struct usb_descriptor_header *) &adb_interface_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_in_desc,
+ (struct usb_descriptor_header *) &adb_highspeed_out_desc,
+ NULL,
+};
+
+static void adb_ready_callback(void);
+static void adb_closed_callback(void);
+
+/* temporary variable used between adb_open() and adb_gadget_bind() */
+static struct adb_dev *_adb_dev;
+
+static inline struct adb_dev *func_to_adb(struct usb_function *f)
+{
+ return container_of(f, struct adb_dev, function);
+}
+
+
+static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void adb_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int adb_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void adb_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void adb_req_put(struct adb_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *adb_req_get(struct adb_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void adb_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ if (req->status != 0)
+ dev->error = 1;
+
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void adb_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0 && req->status != -ECONNRESET)
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+}
+
+static int adb_create_bulk_endpoints(struct adb_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ /* now allocate requests for our endpoints */
+ req = adb_request_new(dev->ep_out, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_out;
+ dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = adb_request_new(dev->ep_in, ADB_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = adb_complete_in;
+ adb_req_put(dev, &dev->tx_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "adb_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret;
+
+ pr_debug("adb_read(%d)\n", count);
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (count > ADB_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (adb_lock(&dev->read_excl))
+ return -EBUSY;
+
+ /* we will block until we're online */
+ while (!(dev->online || dev->error)) {
+ pr_debug("adb_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ (dev->online || dev->error));
+ if (ret < 0) {
+ adb_unlock(&dev->read_excl);
+ return ret;
+ }
+ }
+ if (dev->error) {
+ r = -EIO;
+ goto done;
+ }
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req;
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_read: failed to queue req %p (%d)\n", req, ret);
+ r = -EIO;
+ dev->error = 1;
+ goto done;
+ } else {
+ pr_debug("rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ if (ret != -ERESTARTSYS)
+ dev->error = 1;
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (!dev->error) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ pr_debug("rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ } else
+ r = -EIO;
+
+done:
+ adb_unlock(&dev->read_excl);
+ pr_debug("adb_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct adb_dev *dev = fp->private_data;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int ret;
+
+ if (!_adb_dev)
+ return -ENODEV;
+ pr_debug("adb_write(%d)\n", count);
+
+ if (adb_lock(&dev->write_excl))
+ return -EBUSY;
+
+ while (count > 0) {
+ if (dev->error) {
+ pr_debug("adb_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = adb_req_get(dev, &dev->tx_idle)) || dev->error);
+
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ if (req != 0) {
+ if (count > ADB_BULK_BUFFER_SIZE)
+ xfer = ADB_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_debug("adb_write: xfer error %d\n", ret);
+ dev->error = 1;
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+ }
+
+ if (req)
+ adb_req_put(dev, &dev->tx_idle, req);
+
+ adb_unlock(&dev->write_excl);
+ pr_debug("adb_write returning %d\n", r);
+ return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+ pr_info("adb_open\n");
+ if (!_adb_dev)
+ return -ENODEV;
+
+ if (adb_lock(&_adb_dev->open_excl))
+ return -EBUSY;
+
+ fp->private_data = _adb_dev;
+
+ /* clear the error latch */
+ _adb_dev->error = 0;
+
+ adb_ready_callback();
+
+ return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+ pr_info("adb_release\n");
+
+ if (!_adb_dev)
+ return -ENODEV;
+
+ adb_closed_callback();
+
+ adb_unlock(&_adb_dev->open_excl);
+ return 0;
+}
+
+/* file operations for ADB device /dev/android_adb */
+static const struct file_operations adb_fops = {
+ .owner = THIS_MODULE,
+ .read = adb_read,
+ .write = adb_write,
+ .open = adb_open,
+ .release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = adb_shortname,
+ .fops = &adb_fops,
+};
+
+
+
+
+static int
+adb_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct adb_dev *dev = func_to_adb(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "adb_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ adb_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = adb_create_bulk_endpoints(dev, &adb_fullspeed_in_desc,
+ &adb_fullspeed_out_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ adb_highspeed_in_desc.bEndpointAddress =
+ adb_fullspeed_in_desc.bEndpointAddress;
+ adb_highspeed_out_desc.bEndpointAddress =
+ adb_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+adb_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_request *req;
+
+
+ dev->online = 0;
+ dev->error = 1;
+
+ wake_up(&dev->read_wq);
+
+ adb_request_free(dev->rx_req, dev->ep_out);
+ while ((req = adb_req_get(dev, &dev->tx_idle)))
+ adb_request_free(req, dev->ep_in);
+}
+
+static int adb_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->online = 1;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void adb_function_disable(struct usb_function *f)
+{
+ struct adb_dev *dev = func_to_adb(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "adb_function_disable cdev %p\n", cdev);
+ dev->online = 0;
+ dev->error = 1;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int adb_bind_config(struct usb_configuration *c)
+{
+ struct adb_dev *dev = _adb_dev;
+
+ printk(KERN_INFO "adb_bind_config\n");
+
+ dev->cdev = c->cdev;
+ dev->function.name = "adb";
+ dev->function.descriptors = fs_adb_descs;
+ dev->function.hs_descriptors = hs_adb_descs;
+ dev->function.bind = adb_function_bind;
+ dev->function.unbind = adb_function_unbind;
+ dev->function.set_alt = adb_function_set_alt;
+ dev->function.disable = adb_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int adb_setup(void)
+{
+ struct adb_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->read_excl, 0);
+ atomic_set(&dev->write_excl, 0);
+
+ INIT_LIST_HEAD(&dev->tx_idle);
+
+ _adb_dev = dev;
+
+ ret = misc_register(&adb_device);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(dev);
+ printk(KERN_ERR "adb gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void adb_cleanup(void)
+{
+ misc_deregister(&adb_device);
+
+ kfree(_adb_dev);
+ _adb_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 00000000000000..c757409edf942f
--- /dev/null
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE 0
+#define AUDIO_AS_INTERFACE 1
+#define AUDIO_NUM_INTERFACES 2
+
+/* B.3.1 Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+ + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+ + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2 Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_HEADER,
+ .bcdADC = __constant_cpu_to_le16(0x0100),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+ .bInCollection = AUDIO_NUM_INTERFACES,
+ .baInterfaceNr = {
+ [0] = AUDIO_AC_INTERFACE,
+ [1] = AUDIO_AS_INTERFACE,
+ }
+};
+
+#define INPUT_TERMINAL_ID 1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
+ .bTerminalID = INPUT_TERMINAL_ID,
+ .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE,
+ .bAssocTerminal = 0,
+ .wChannelConfig = 0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID 2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
+ .bUnitID = FEATURE_UNIT_ID,
+ .bSourceID = INPUT_TERMINAL_ID,
+ .bControlSize = 2,
+};
+
+#define OUTPUT_TERMINAL_ID 3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+ .bTerminalID = OUTPUT_TERMINAL_ID,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
+ .bAssocTerminal = FEATURE_UNIT_ID,
+ .bSourceID = FEATURE_UNIT_ID,
+};
+
+/* B.4.1 Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2 Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
+ .bTerminalLink = INPUT_TERMINAL_ID,
+ .bDelay = 1,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
+ .bSubframeSize = 2,
+ .bBitResolution = 16,
+ .bSamFreqType = 1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_SYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+ .bInterval = 1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
+ .bmAttributes = 1,
+ .bLockDelayUnits = 1,
+ .wLockDelay = __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&input_terminal_desc,
+ (struct usb_descriptor_header *)&output_terminal_desc,
+ (struct usb_descriptor_header *)&feature_unit_desc,
+
+ (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_header_desc,
+
+ (struct usb_descriptor_header *)&as_type_i_desc,
+
+ (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = SAMPLE_RATE,
+ .rate_max = SAMPLE_RATE,
+
+ .buffer_bytes_max = 1024 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 512 * 1024,
+ .periods_min = 2,
+ .periods_max = 1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+ int card;
+ int device;
+};
+
+struct audio_dev {
+ struct usb_function func;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *substream;
+
+ struct list_head idle_reqs;
+ struct usb_ep *in_ep;
+
+ spinlock_t lock;
+
+ /* beginning, end and current position in our buffer */
+ void *buffer_start;
+ void *buffer_end;
+ void *buffer_pos;
+
+ /* byte size of a "period" */
+ unsigned int period;
+ /* bytes sent since last call to snd_pcm_period_elapsed */
+ unsigned int period_offset;
+ /* time we started playing */
+ ktime_t start_time;
+ /* number of frames sent since start_time */
+ s64 frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+ return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+ req->length = buffer_size;
+ return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ list_add_tail(&req->list, &audio->idle_reqs);
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ if (list_empty(&audio->idle_reqs)) {
+ req = 0;
+ } else {
+ req = list_first_entry(&audio->idle_reqs, struct usb_request,
+ list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&audio->lock, flags);
+ return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+ struct snd_pcm_runtime *runtime;
+ struct usb_request *req;
+ int length, length1, length2, ret;
+ s64 msecs;
+ s64 frames;
+ ktime_t now;
+
+ /* audio->substream will be null if we have been closed */
+ if (!audio->substream)
+ return;
+ /* audio->buffer_pos will be null if we have been stopped */
+ if (!audio->buffer_pos)
+ return;
+
+ runtime = audio->substream->runtime;
+
+ /* compute number of frames to send */
+ now = ktime_get();
+ msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+ do_div(msecs, 1000000);
+ frames = msecs * SAMPLE_RATE;
+ do_div(frames, 1000);
+
+ /* Readjust our frames_sent if we fall too far behind.
+ * If we get too far behind it is better to drop some frames than
+ * to keep sending data too fast in an attempt to catch up.
+ */
+ if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+ audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+ frames -= audio->frames_sent;
+
+ /* We need to send something to keep the pipeline going */
+ if (frames <= 0)
+ frames = FRAMES_PER_MSEC;
+
+ while (frames > 0) {
+ req = audio_req_get(audio);
+ if (!req)
+ break;
+
+ length = frames_to_bytes(runtime, frames);
+ if (length > IN_EP_MAX_PACKET_SIZE)
+ length = IN_EP_MAX_PACKET_SIZE;
+
+ if (audio->buffer_pos + length > audio->buffer_end)
+ length1 = audio->buffer_end - audio->buffer_pos;
+ else
+ length1 = length;
+ memcpy(req->buf, audio->buffer_pos, length1);
+ if (length1 < length) {
+ /* Wrap around and copy remaining length
+ * at beginning of buffer.
+ */
+ length2 = length - length1;
+ memcpy(req->buf + length1, audio->buffer_start,
+ length2);
+ audio->buffer_pos = audio->buffer_start + length2;
+ } else {
+ audio->buffer_pos += length1;
+ if (audio->buffer_pos >= audio->buffer_end)
+ audio->buffer_pos = audio->buffer_start;
+ }
+
+ req->length = length;
+ ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("usb_ep_queue failed ret: %d\n", ret);
+ audio_req_put(audio, req);
+ break;
+ }
+
+ frames -= bytes_to_frames(runtime, length);
+ audio->frames_sent += bytes_to_frames(runtime, length);
+ }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct audio_dev *audio = req->context;
+
+ pr_debug("audio_data_complete req->status %d req->actual %d\n",
+ req->status, req->actual);
+
+ audio_req_put(audio, req);
+
+ if (!audio->buffer_start || req->status)
+ return;
+
+ audio->period_offset += req->actual;
+ if (audio->period_offset >= audio->period) {
+ snd_pcm_period_elapsed(audio->substream);
+ audio->period_offset = 0;
+ }
+ audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ case UAC_SET_MIN:
+ case UAC_SET_MAX:
+ case UAC_SET_RES:
+ value = len;
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u8 *buf = cdev->req->buf;
+
+ pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ /* return our sample rate */
+ buf[0] = (u8)SAMPLE_RATE;
+ buf[1] = (u8)(SAMPLE_RATE >> 8);
+ buf[2] = (u8)(SAMPLE_RATE >> 16);
+ value = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
+ */
+ switch (ctrl->bRequestType) {
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ req->complete = audio_control_complete;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("audio response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+ if (ret)
+ return ret;
+
+ usb_ep_enable(audio->in_ep);
+ return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+
+ pr_debug("audio_disable\n");
+ usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+ u8 *sam_freq;
+ int rate;
+
+ /* Set channel numbers */
+ input_terminal_desc.bNrChannels = 2;
+ as_type_i_desc.bNrChannels = 2;
+
+ /* Set sample rates */
+ rate = SAMPLE_RATE;
+ sam_freq = as_type_i_desc.tSamFreq[0];
+ memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct audio_dev *audio = func_to_audio(f);
+ int status;
+ struct usb_ep *ep;
+ struct usb_request *req;
+ int i;
+
+ audio_build_desc(audio);
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ac_interface_desc.bInterfaceNumber = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ as_interface_alt_0_desc.bInterfaceNumber = status;
+ as_interface_alt_1_desc.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate our endpoint */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+ if (!ep)
+ goto fail;
+ audio->in_ep = ep;
+ ep->driver_data = audio; /* claim */
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ hs_as_in_ep_desc.bEndpointAddress =
+ fs_as_in_ep_desc.bEndpointAddress;
+
+ f->descriptors = fs_audio_desc;
+ f->hs_descriptors = hs_audio_desc;
+
+ for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+ req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+ if (req) {
+ req->context = audio;
+ req->complete = audio_data_complete;
+ audio_req_put(audio, req);
+ } else
+ status = -ENOMEM;
+ }
+
+fail:
+ return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct audio_dev *audio = func_to_audio(f);
+ struct usb_request *req;
+
+ while ((req = audio_req_get(audio)))
+ audio_request_free(req, audio->in_ep);
+
+ snd_card_free_when_closed(audio->card);
+ audio->card = NULL;
+ audio->pcm = NULL;
+ audio->substream = NULL;
+ audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+ audio->start_time = ktime_get();
+ audio->frames_sent = 0;
+ audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->buffer_start = 0;
+ audio->buffer_end = 0;
+ audio->buffer_pos = 0;
+ spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = substream->private_data;
+
+ runtime->private_data = audio;
+ runtime->hw = audio_hw_info;
+ snd_pcm_limit_hw_rates(runtime);
+ runtime->hw.channels_max = 2;
+
+ audio->substream = substream;
+ return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct audio_dev *audio = substream->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->lock, flags);
+ audio->substream = NULL;
+ spin_unlock_irqrestore(&audio->lock, flags);
+
+ return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+
+ if (rate != SAMPLE_RATE)
+ return -EINVAL;
+ if (channels != 2)
+ return -EINVAL;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+
+ audio->period = snd_pcm_lib_period_bytes(substream);
+ audio->period_offset = 0;
+ audio->buffer_start = runtime->dma_area;
+ audio->buffer_end = audio->buffer_start
+ + snd_pcm_lib_buffer_bytes(substream);
+ audio->buffer_pos = audio->buffer_start;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct audio_dev *audio = runtime->private_data;
+ ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+ /* return offset of next frame to fill in our buffer */
+ return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct audio_dev *audio = substream->runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ audio_pcm_playback_start(audio);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ audio_pcm_playback_stop(audio);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct audio_dev _audio_dev = {
+ .func = {
+ .name = "audio_source",
+ .bind = audio_bind,
+ .unbind = audio_unbind,
+ .set_alt = audio_set_alt,
+ .setup = audio_setup,
+ .disable = audio_disable,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+ .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+ .open = audio_pcm_open,
+ .close = audio_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = audio_pcm_hw_params,
+ .hw_free = audio_pcm_hw_free,
+ .prepare = audio_pcm_prepare,
+ .trigger = audio_pcm_playback_trigger,
+ .pointer = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+ struct audio_source_config *config)
+{
+ struct audio_dev *audio;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ int err;
+
+ config->card = -1;
+ config->device = -1;
+
+ audio = &_audio_dev;
+
+ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (err)
+ return err;
+
+ snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+ err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+ if (err)
+ goto pcm_fail;
+ pcm->private_data = audio;
+ pcm->info_flags = 0;
+ audio->pcm = pcm;
+
+ strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ NULL, 0, 64 * 1024);
+
+ strlcpy(card->driver, "audio_source", sizeof(card->driver));
+ strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+ strlcpy(card->longname, "USB accessory audio source",
+ sizeof(card->longname));
+
+ err = snd_card_register(card);
+ if (err)
+ goto register_fail;
+
+ err = usb_add_function(c, &audio->func);
+ if (err)
+ goto add_fail;
+
+ config->card = pcm->card->number;
+ config->device = pcm->device;
+ audio->card = card;
+ return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+ snd_card_free(audio->card);
+ return err;
+}
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 00000000000000..1638977a54105c
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1283 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE 16384
+#define INTR_BUFFER_SIZE 28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE 0 /* initial state, disconnected */
+#define STATE_READY 1 /* ready for userspace calls */
+#define STATE_BUSY 2 /* processing userspace calls */
+#define STATE_CANCELED 3 /* transaction canceled by host */
+#define STATE_ERROR 4 /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID 0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK 0x2001
+#define MTP_RESPONSE_DEVICE_BUSY 0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ struct usb_ep *ep_intr;
+
+ int state;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+ /* to enforce only one ioctl at a time */
+ atomic_t ioctl_excl;
+
+ struct list_head tx_idle;
+ struct list_head intr_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ wait_queue_head_t intr_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+ */
+ struct workqueue_struct *wq;
+ struct work_struct send_file_work;
+ struct work_struct receive_file_work;
+ struct file *xfer_file;
+ loff_t xfer_file_offset;
+ int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
+ int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+ .bInterval = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+ /* Naming interface "MTP" so libmtp will recognize us */
+ [INTERFACE_STRING_INDEX].s = "MTP",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+ &mtp_string_table,
+ NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ 1,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+ struct mtp_ext_config_desc_header header;
+ struct mtp_ext_config_desc_function function;
+} mtp_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = __constant_cpu_to_le16(1),
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'M', 'T', 'P' },
+ },
+};
+
+struct mtp_device_status {
+ __le16 wLength;
+ __le16 wCode;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+ return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_endpoint_descriptor *intr_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_intr = ep;
+
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_in;
+ mtp_req_put(dev, &dev->tx_idle, req);
+ }
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_out;
+ dev->rx_req[i] = req;
+ }
+ for (i = 0; i < INTR_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_intr;
+ mtp_req_put(dev, &dev->intr_idle, req);
+ }
+
+ return 0;
+
+fail:
+ printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret = 0;
+
+ DBG(cdev, "mtp_read(%d)\n", count);
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ return -EINVAL;
+
+ /* we will block until we're online */
+ DBG(cdev, "mtp_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->state != STATE_OFFLINE);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = count;
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ DBG(cdev, "rx %p queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ if (dev->state == STATE_BUSY) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ DBG(cdev, "rx %p %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+done:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ int r = count, xfer;
+ int sendZLP = 0;
+ int ret;
+
+ DBG(cdev, "mtp_write(%d)\n", count);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ if (dev->state != STATE_BUSY) {
+ DBG(cdev, "mtp_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY));
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+ if (xfer && copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "mtp_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "mtp_write returning %d\n", r);
+ return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ send_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ struct mtp_data_header *header;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int xfer, ret, hdr_size;
+ int r = 0;
+ int sendZLP = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ break;
+ }
+ if (!req) {
+ r = ret;
+ break;
+ }
+
+ if (count > MTP_BULK_BUFFER_SIZE)
+ xfer = MTP_BULK_BUFFER_SIZE;
+ else
+ xfer = count;
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ header->length = __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id =
+ __cpu_to_le32(dev->xfer_transaction_id);
+ }
+
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+ &offset);
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+ xfer = ret + hdr_size;
+ hdr_size = 0;
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "send_file_work: xfer error %d\n", ret);
+ dev->state = STATE_ERROR;
+ r = -EIO;
+ break;
+ }
+
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ DBG(cdev, "send_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ receive_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *read_req = NULL, *write_req = NULL;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int ret, cur_buf = 0;
+ int r = 0;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "receive_file_work(%lld)\n", count);
+
+ while (count > 0 || write_req) {
+ if (count > 0) {
+ /* queue a request */
+ read_req = dev->rx_req[cur_buf];
+ cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+ read_req->length = (count > MTP_BULK_BUFFER_SIZE
+ ? MTP_BULK_BUFFER_SIZE : count);
+ dev->rx_done = 0;
+ ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ }
+
+ if (write_req) {
+ DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+ ret = vfs_write(filp, write_req->buf, write_req->actual,
+ &offset);
+ DBG(cdev, "vfs_write %d\n", ret);
+ if (ret != write_req->actual) {
+ r = -EIO;
+ dev->state = STATE_ERROR;
+ break;
+ }
+ write_req = NULL;
+ }
+
+ if (read_req) {
+ /* wait for our last read to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ /* if xfer_file_length is 0xFFFFFFFF, then we read until
+ * we get a zero length packet
+ */
+ if (count != 0xFFFFFFFF)
+ count -= read_req->actual;
+ if (read_req->actual < read_req->length) {
+ /*
+ * short packet is used to signal EOF for
+ * sizes > 4 gig
+ */
+ DBG(cdev, "got short packet\n");
+ count = 0;
+ }
+
+ write_req = read_req;
+ read_req = NULL;
+ }
+ }
+
+ DBG(cdev, "receive_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+ struct usb_request *req = NULL;
+ int ret;
+ int length = event->length;
+
+ DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
+
+ if (length < 0 || length > INTR_BUFFER_SIZE)
+ return -EINVAL;
+ if (dev->state == STATE_OFFLINE)
+ return -ENODEV;
+
+ ret = wait_event_interruptible_timeout(dev->intr_wq,
+ (req = mtp_req_get(dev, &dev->intr_idle)),
+ msecs_to_jiffies(1000));
+ if (!req)
+ return -ETIME;
+
+ if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+ mtp_req_put(dev, &dev->intr_idle, req);
+ return -EFAULT;
+ }
+ req->length = length;
+ ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+ if (ret)
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct file *filp = NULL;
+ int ret = -EINVAL;
+
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+
+ switch (code) {
+ case MTP_SEND_FILE:
+ case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
+ {
+ struct mtp_file_range mfr;
+ struct work_struct *work;
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr.fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr.offset;
+ dev->xfer_file_length = mfr.length;
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr.command;
+ dev->xfer_transaction_id = mfr.transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+ break;
+ }
+ case MTP_SEND_EVENT:
+ {
+ struct mtp_event event;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+ ret = -EFAULT;
+ else
+ ret = mtp_send_event(dev, &event);
+ goto out;
+ }
+ }
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d\n", ret);
+ return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_open\n");
+ if (mtp_lock(&_mtp_dev->open_excl))
+ return -EBUSY;
+
+ /* clear any error condition */
+ if (_mtp_dev->state != STATE_OFFLINE)
+ _mtp_dev->state = STATE_READY;
+
+ fp->private_data = _mtp_dev;
+ return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_release\n");
+
+ mtp_unlock(&_mtp_dev->open_excl);
+ return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+ .owner = THIS_MODULE,
+ .read = mtp_read,
+ .write = mtp_write,
+ .unlocked_ioctl = mtp_ioctl,
+ .open = mtp_open,
+ .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = mtp_shortname,
+ .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+ VDBG(cdev, "mtp_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MTP OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+ value = (w_length < sizeof(mtp_os_string)
+ ? w_length : sizeof(mtp_os_string));
+ memcpy(cdev->req->buf, mtp_os_string, value);
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+ /* Handle MTP OS descriptor */
+ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == 1
+ && (ctrl->bRequestType & USB_DIR_IN)
+ && (w_index == 4 || w_index == 5)) {
+ value = (w_length < sizeof(mtp_ext_config_desc) ?
+ w_length : sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+ }
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+ && w_value == 0) {
+ DBG(cdev, "MTP_REQ_CANCEL\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state == STATE_BUSY) {
+ dev->state = STATE_CANCELED;
+ wake_up(&dev->read_wq);
+ wake_up(&dev->write_wq);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We need to queue a request to read the remaining
+ * bytes, but we don't actually need to look at
+ * the contents.
+ */
+ value = w_length;
+ } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+ && w_index == 0 && w_value == 0) {
+ struct mtp_device_status *status = cdev->req->buf;
+ status->wLength =
+ __constant_cpu_to_le16(sizeof(*status));
+
+ DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ /* device status is "busy" until we report
+ * the cancelation to userspace
+ */
+ if (dev->state == STATE_CANCELED)
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+ else
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_OK);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ value = sizeof(*status);
+ }
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ ERROR(cdev, "%s: response queue error\n", __func__);
+ }
+ return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct mtp_dev *dev = func_to_mtp(f);
+ int id;
+ int ret;
+
+ dev->cdev = cdev;
+ DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ mtp_interface_desc.bInterfaceNumber = id;
+
+ /* allocate endpoints */
+ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+ &mtp_fullspeed_out_desc, &mtp_intr_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ mtp_highspeed_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_highspeed_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_request *req;
+ int i;
+
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = mtp_req_get(dev, &dev->intr_idle)))
+ mtp_request_free(req, dev->ep_intr);
+ dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_intr);
+ if (ret) {
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->state = STATE_READY;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "mtp_function_disable\n");
+ dev->state = STATE_OFFLINE;
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_intr);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int ret = 0;
+
+ printk(KERN_INFO "mtp_bind_config\n");
+
+ /* allocate a string ID for our interface */
+ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ mtp_interface_desc.iInterface = ret;
+ }
+
+ dev->cdev = c->cdev;
+ dev->function.name = "mtp";
+ dev->function.strings = mtp_strings;
+ if (ptp_config) {
+ dev->function.descriptors = fs_ptp_descs;
+ dev->function.hs_descriptors = hs_ptp_descs;
+ } else {
+ dev->function.descriptors = fs_mtp_descs;
+ dev->function.hs_descriptors = hs_mtp_descs;
+ }
+ dev->function.bind = mtp_function_bind;
+ dev->function.unbind = mtp_function_unbind;
+ dev->function.set_alt = mtp_function_set_alt;
+ dev->function.disable = mtp_function_disable;
+
+ return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+ struct mtp_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ init_waitqueue_head(&dev->intr_wq);
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->intr_idle);
+
+ dev->wq = create_singlethread_workqueue("f_mtp");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ INIT_WORK(&dev->send_file_work, send_file_work);
+ INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+ _mtp_dev = dev;
+
+ ret = misc_register(&mtp_device);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ destroy_workqueue(dev->wq);
+err1:
+ _mtp_dev = NULL;
+ kfree(dev);
+ printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+ return ret;
+}
+
+static void mtp_cleanup(void)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (!dev)
+ return;
+
+ misc_deregister(&mtp_device);
+ destroy_workqueue(dev->wq);
+ _mtp_dev = NULL;
+ kfree(dev);
+}
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
new file mode 100644
index 00000000000000..05d39c6e836ae4
--- /dev/null
+++ b/drivers/usb/misc/ehset.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/ch11.h>
+
+#define TEST_SE0_NAK_PID 0x0101
+#define TEST_J_PID 0x0102
+#define TEST_K_PID 0x0103
+#define TEST_PACKET_PID 0x0104
+#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
+#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
+#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
+
+static int ehset_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret = -EINVAL;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_device *hub_udev = dev->parent;
+ struct usb_device_descriptor *buf;
+ u8 portnum = dev->portnum;
+ u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+
+ switch (test_pid) {
+ case TEST_SE0_NAK_PID:
+ case TEST_J_PID:
+ case TEST_K_PID:
+ case TEST_PACKET_PID:
+ if(dev->bus->root_hub != hub_udev)
+ usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_SUSPEND,
+ portnum,
+ NULL, 0, 1000);
+ break;
+ default: break;
+ }
+
+ switch (test_pid) {
+ case TEST_SE0_NAK_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_SE0_NAK << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_J_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_J << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_K_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_K << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_PACKET_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_PACKET << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_HS_HOST_PORT_SUSPEND_RESUME:
+ /* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
+ msleep(15 * 1000);
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_SUSPEND, portnum,
+ NULL, 0, 1000);
+ if (ret < 0)
+ break;
+
+ msleep(15 * 1000);
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_SUSPEND, portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_SINGLE_STEP_GET_DEV_DESC:
+ /* Test: wait for 15secs -> GetDescriptor request */
+ msleep(15 * 1000);
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ USB_DT_DEVICE << 8, 0,
+ buf, USB_DT_DEVICE_SIZE,
+ USB_CTRL_GET_TIMEOUT);
+ kfree(buf);
+ break;
+ case TEST_SINGLE_STEP_SET_FEATURE:
+ /*
+ * GetDescriptor SETUP request -> 15secs delay -> IN & STATUS
+ *
+ * Note, this test is only supported on root hubs since the
+ * SetPortFeature handling can only be done inside the HCD's
+ * hub_control callback function.
+ */
+ if (hub_udev != dev->bus->root_hub) {
+ dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
+ break;
+ }
+
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (6 << 8) | portnum,
+ NULL, 0, 60 * 1000);
+
+ break;
+ default:
+ dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
+ __func__, test_pid);
+ }
+
+ return (ret < 0) ? ret : 0;
+}
+
+static void ehset_disconnect(struct usb_interface *intf)
+{
+}
+
+static const struct usb_device_id ehset_id_table[] = {
+ { USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
+ { USB_DEVICE(0x1a0a, TEST_J_PID) },
+ { USB_DEVICE(0x1a0a, TEST_K_PID) },
+ { USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
+ { USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
+ { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
+ { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ehset_id_table);
+
+static struct usb_driver ehset_driver = {
+ .name = "usb_ehset_test",
+ .probe = ehset_probe,
+ .disconnect = ehset_disconnect,
+ .id_table = ehset_id_table,
+};
+
+module_usb_driver(ehset_driver);
+
+MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c
new file mode 100644
index 00000000000000..e17e27299062c7
--- /dev/null
+++ b/drivers/usb/otg/otg-wakelock.c
@@ -0,0 +1,170 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME 2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+ char name[40];
+ struct wake_lock wakelock;
+ bool held;
+};
+
+/*
+ * VBUS present lock. Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+ if (!lock->held) {
+ wake_lock(&lock->wakelock);
+ lock->held = true;
+ }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+ wake_lock_timeout(&lock->wakelock,
+ msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+ lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+ if (lock->held) {
+ wake_unlock(&lock->wakelock);
+ lock->held = false;
+ }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+ if (!enabled) {
+ otgwl_drop(&vbus_lock);
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+ return;
+ }
+
+ switch (event) {
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ otgwl_hold(&vbus_lock);
+ break;
+
+ case USB_EVENT_NONE:
+ case USB_EVENT_ID:
+ case USB_EVENT_CHARGER:
+ otgwl_temporary_hold(&vbus_lock);
+ break;
+
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ otgwl_handle_event(event);
+ return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+ int rv = param_set_bool(val, kp);
+
+ if (rv)
+ return rv;
+
+ if (otgwl_xceiv)
+ otgwl_handle_event(otgwl_xceiv->last_event);
+
+ return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+ .set = set_enabled,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+ int ret;
+
+ otgwl_xceiv = usb_get_transceiver();
+
+ if (!otgwl_xceiv) {
+ pr_err("%s: No USB transceiver found\n", __func__);
+ return -ENODEV;
+ }
+
+ snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+ dev_name(otgwl_xceiv->dev));
+ wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+ vbus_lock.name);
+
+ otgwl_nb.notifier_call = otgwl_otg_notifications;
+ ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+ if (ret) {
+ pr_err("%s: usb_register_notifier on transceiver %s"
+ " failed\n", __func__,
+ dev_name(otgwl_xceiv->dev));
+ otgwl_xceiv = NULL;
+ wake_lock_destroy(&vbus_lock.wakelock);
+ return ret;
+ }
+
+ otgwl_handle_event(otgwl_xceiv->last_event);
+ return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/video/cea861_modedb.h b/drivers/video/cea861_modedb.h
new file mode 100644
index 00000000000000..9828ce3a945956
--- /dev/null
+++ b/drivers/video/cea861_modedb.h
@@ -0,0 +1 @@
+extern const struct fb_videomode cea_modes[];
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
new file mode 100644
index 00000000000000..9e758a8f890d3d
--- /dev/null
+++ b/drivers/video/hdmi.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/hdmi.h>
+#include <linux/string.h>
+
+static void hdmi_infoframe_checksum(void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ ptr[3] = 256 - csum;
+}
+
+/**
+ * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AVI;
+ frame->version = 2;
+ frame->length = HDMI_AVI_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+
+/**
+ * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
+
+ /*
+ * Data byte 1, bit 4 has to be set if we provide the active format
+ * aspect ratio
+ */
+ if (frame->active_aspect & 0xf)
+ ptr[0] |= BIT(4);
+
+ /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
+ if (frame->top_bar || frame->bottom_bar)
+ ptr[0] |= BIT(3);
+
+ if (frame->left_bar || frame->right_bar)
+ ptr[0] |= BIT(2);
+
+ ptr[1] = ((frame->colorimetry & 0x3) << 6) |
+ ((frame->picture_aspect & 0x3) << 4) |
+ (frame->active_aspect & 0xf);
+
+ ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
+ ((frame->quantization_range & 0x3) << 2) |
+ (frame->nups & 0x3);
+
+ if (frame->itc)
+ ptr[2] |= BIT(7);
+
+ ptr[3] = frame->video_code & 0x7f;
+
+ ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
+ ((frame->content_type & 0x3) << 4) |
+ (frame->pixel_repeat & 0xf);
+
+ ptr[5] = frame->top_bar & 0xff;
+ ptr[6] = (frame->top_bar >> 8) & 0xff;
+ ptr[7] = frame->bottom_bar & 0xff;
+ ptr[8] = (frame->bottom_bar >> 8) & 0xff;
+ ptr[9] = frame->left_bar & 0xff;
+ ptr[10] = (frame->left_bar >> 8) & 0xff;
+ ptr[11] = frame->right_bar & 0xff;
+ ptr[12] = (frame->right_bar >> 8) & 0xff;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
+
+/**
+ * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ * @vendor: vendor string
+ * @product: product string
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_SPD;
+ frame->version = 1;
+ frame->length = HDMI_SPD_INFOFRAME_SIZE;
+
+ strncpy(frame->vendor, vendor, sizeof(frame->vendor));
+ strncpy(frame->product, product, sizeof(frame->product));
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+
+/**
+ * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ memcpy(ptr, frame->vendor, sizeof(frame->vendor));
+ memcpy(ptr + 8, frame->product, sizeof(frame->product));
+
+ ptr[24] = frame->sdi;
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
+
+/**
+ * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
+ frame->version = 1;
+ frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+
+/**
+ * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * @frame: HDMI audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ unsigned char channels;
+ u8 *ptr = buffer;
+ size_t length;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* start infoframe payload */
+ ptr += HDMI_INFOFRAME_HEADER_SIZE;
+
+ ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ ptr[2] = frame->coding_type_ext & 0x1f;
+ ptr[3] = frame->channel_allocation;
+ ptr[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ ptr[4] |= BIT(7);
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
+
+/**
+ * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
+ * @frame: HDMI vendor infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame->version = 1;
+
+ frame->oui = HDMI_IEEE_OUI;
+
+ /*
+ * 0 is a valid value for s3d_struct, so we use a special "not set"
+ * value
+ */
+ frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ u8 *ptr = buffer;
+ size_t length;
+
+ /* empty info frame */
+ if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ frame->length = 6;
+ else
+ frame->length = 5;
+
+ length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(buffer, 0, size);
+
+ ptr[0] = frame->type;
+ ptr[1] = frame->version;
+ ptr[2] = frame->length;
+ ptr[3] = 0; /* checksum */
+
+ /* HDMI OUI */
+ ptr[4] = 0x03;
+ ptr[5] = 0x0c;
+ ptr[6] = 0x00;
+
+ if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x2 << 5; /* video format */
+ ptr[8] = (frame->s3d_struct & 0xf) << 4;
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ }
+
+ hdmi_infoframe_checksum(buffer, length);
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+}
+
+/**
+ * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);
diff --git a/drivers/video/logo/logo_parrot_clut224.ppm b/drivers/video/logo/logo_parrot_clut224.ppm
new file mode 100644
index 00000000000000..656b9927767c2c
--- /dev/null
+++ b/drivers/video/logo/logo_parrot_clut224.ppm
@@ -0,0 +1,592 @@
+P3
+# 224-color Parrot logo
+80 80
+255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+16 43 24 0 113 58 0 122 64 0 151 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 154 80 0 122 64 0 120 62 0 120 62 8 85 44 8 85 44 11 73 39 16 49 29 0 14 7
+0 14 7 0 14 7 0 14 7 0 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 49 29
+0 113 58 0 158 82 0 161 84 0 161 84 0 160 83 0 155 81 0 153 80 0 152 79
+0 154 80 0 154 80 0 154 80 0 155 81 0 155 81 0 156 81 0 156 81 0 157 82
+0 152 79 0 146 76 0 133 70 0 122 64 0 120 62 7 100 53 11 73 39 16 43 24
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 16 43 24 11 73 39 0 122 64 0 151 79
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 153 80 0 156 81
+0 158 82 0 161 84 3 130 68 8 85 44 0 14 7 0 11 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 0 133 70 0 154 80
+0 160 83 0 157 82 0 156 81 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 154 80 0 156 81 0 158 82
+0 133 70 11 73 39 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 49 29 0 113 58 0 113 58 0 113 58 0 113 58 0 155 81
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 155 81 0 122 64 0 3 1 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7
+7 100 53 0 133 70 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 152 79 0 156 81 3 130 68 0 14 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 155 81 0 152 79 0 152 79 0 143 75
+0 152 79 0 153 80 0 153 80 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79
+0 113 58 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 49 29 3 130 68 0 152 79 0 154 80 0 158 82 0 161 84
+0 145 76 0 122 64 0 120 62 7 100 53 7 100 53 0 122 64 0 133 70 0 153 80
+0 159 83 0 154 80 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 160 83 11 73 39 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 49 29 0 153 80 0 154 80
+0 156 81 3 130 68 11 73 39 0 14 7 0 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 3 1 16 49 29 0 143 75 0 155 81 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 151 79 0 14 7 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 8 85 44 16 49 29
+0 3 1 0 11 5 16 43 24 11 73 39 7 100 53 0 133 70 0 145 76 0 161 84 0 161 84
+0 161 84 3 130 68 8 85 44 16 49 29 0 11 5 16 43 24 0 113 58 0 154 80 0 154 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+7 100 53 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 3 1 11 73 39 0 133 70 0 145 76 0 150 78 0 154 80 0 153 80 0 153 80
+0 153 80 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 145 76
+8 85 44 0 0 0 0 14 7 0 143 75 0 153 80 0 152 79 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 159 83 0 14 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 14 7 16 49 29 0 113 58 0 161 84 0 158 82 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 152 79 0 156 81 0 150 78 16 43 24 0 0 0 7 100 53
+0 152 79 0 153 80 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 154 80
+0 113 58 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 113 58 0 143 75
+0 154 80 0 154 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 154 80 0 120 62 0 0 0 16 43 24 0 159 83
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 145 76 0 14 7 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 11 5 16 43 24 0 122 64 0 160 83 0 154 80 0 153 80 0 153 80 0 152 79
+0 153 80 0 152 80 0 152 80 0 151 80 0 152 80 0 153 79 0 152 80 0 152 80
+0 153 79 0 153 79 0 153 80 0 152 80 0 152 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 154 80 0 150 78 0 14 7 0 14 7 0 143 75 0 154 80
+0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 16 43 24 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 85 44 0 133 70
+0 156 81 0 154 80 0 152 79 0 153 80 0 153 80 0 157 82 0 160 83 0 143 75
+40 167 76 113 176 60 177 182 53 103 170 61 0 150 81 0 150 81 103 170 61
+58 161 68 0 153 79 0 151 80 0 151 80 5 154 79 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 152 79 0 153 80 0 146 76 16 49 29 0 11 5 0 133 70
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 7 100 53 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 5 11 73 39 0 150 78 0 155 81
+0 152 79 0 153 80 0 154 80 0 153 79 0 145 76 7 139 70 43 88 46 134 81 37
+220 191 46 255 224 44 255 219 44 229 207 47 75 162 69 165 179 52 247 214 45
+75 162 69 0 153 80 23 159 77 75 162 69 58 161 68 0 151 80 2 153 79 2 153 79
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 154 80
+16 43 24 0 3 1 0 133 70 0 154 80 0 153 80 0 153 80 0 153 80 0 159 83 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 0 14 7 0 14 7 16 49 29 0 14 7 0 11 5
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39
+0 133 70 0 157 82 0 153 80 0 152 79 0 158 82 0 150 78 30 110 54 92 51 36
+136 4 37 186 95 36 220 191 46 255 221 44 255 228 45 247 212 45 254 217 44
+255 218 44 255 220 43 255 219 44 251 217 45 202 194 50 202 194 50 251 217 45
+40 167 76 14 156 77 103 170 61 122 174 62 5 154 79 0 151 80 0 151 80 0 153 79
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 16 49 29 0 11 5 3 130 68
+0 152 79 0 153 80 0 152 79 0 159 83 16 43 24 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7
+0 113 58 0 150 78 0 161 84 0 161 84 0 160 83 0 143 75 8 85 44 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 16 43 24 0 157 82 0 154 80 0 152 79
+0 153 80 0 151 79 2 150 78 61 77 45 148 5 34 172 6 37 170 6 37 186 95 36
+255 226 45 252 215 44 247 212 45 186 95 36 220 191 46 255 218 45 255 218 45
+252 215 44 247 212 45 255 218 45 255 218 45 255 218 45 254 217 45 255 220 43
+255 220 43 247 214 45 0 151 80 7 156 79 64 163 71 75 162 69 0 151 80 0 152 80
+0 153 79 0 153 80 0 153 80 0 153 80 0 152 79 0 151 79 0 14 7 0 14 7 0 150 78
+0 153 80 0 153 80 0 155 81 8 85 44 0 0 0 0 0 0 0 0 0 0 14 7 0 152 79 0 153 80
+61 77 45 92 51 36 61 77 45 0 145 76 0 153 80 2 150 78 11 73 39 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 43 24 0 120 62 0 159 83 0 153 80 0 156 81 0 160 83
+30 110 54 92 51 36 160 5 35 174 6 37 167 5 37 164 0 36 186 95 36 196 173 49
+186 95 36 174 6 37 186 95 36 252 215 44 255 228 45 255 226 45 196 173 49
+220 191 46 247 212 45 255 218 45 255 222 45 252 215 44 254 217 44 255 218 45
+255 218 44 251 217 45 237 211 46 251 217 45 113 176 60 0 150 81 0 150 81
+40 167 76 0 152 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 150 78
+0 14 7 0 14 7 0 143 75 0 152 79 0 152 79 0 122 64 0 0 0 0 0 0 0 3 1 0 143 75
+0 150 78 118 32 31 196 173 49 90 70 36 186 95 36 142 18 31 44 103 53 176 158 43
+220 191 46 13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 85 44 0 156 81 0 153 80 0 152 79
+0 152 79 7 100 53 95 5 23 166 5 36 167 6 37 167 6 37 166 5 37 167 6 37
+167 6 37 166 5 36 166 5 36 167 6 37 164 0 36 186 95 36 220 191 46 186 95 36
+166 5 36 196 173 49 255 219 45 254 217 44 254 217 45 196 173 49 220 191 46
+255 218 45 255 218 45 255 218 45 254 217 44 255 218 45 255 220 43 122 174 62
+122 174 62 208 203 51 17 157 77 0 152 80 0 152 80 0 153 79 0 153 80 0 153 80
+0 153 80 0 152 79 0 146 76 0 11 5 16 49 29 0 155 81 0 153 80 0 133 70 0 3 1
+0 0 0 0 133 70 0 160 83 92 51 36 186 95 36 220 191 46 0 0 0 86 55 31 166 6 36
+202 186 45 254 217 44 255 225 46 125 106 44 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 0 122 64 0 161 84
+0 158 82 0 122 64 11 73 39 69 24 21 156 5 35 172 6 37 171 6 37 168 6 36
+136 4 37 148 5 34 167 6 37 167 6 37 166 5 37 168 6 36 166 5 37 167 6 37
+164 0 36 161 0 36 164 0 36 166 5 36 186 95 36 186 95 36 186 95 36 174 6 37
+220 191 46 255 228 45 255 226 45 247 212 45 220 191 46 255 219 45 255 218 45
+254 217 45 255 220 43 255 222 43 40 167 76 0 150 81 14 156 77 14 156 77
+0 152 80 0 153 80 0 153 80 0 153 80 0 153 80 0 154 80 0 143 75 0 0 0 11 73 39
+0 154 80 0 143 75 0 11 5 16 43 24 0 153 80 3 130 68 171 6 37 186 95 36
+255 228 45 176 158 43 196 173 49 186 95 36 255 226 45 255 218 45 255 221 44
+176 158 43 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 3 1 0 146 76 0 154 80 0 113 58 16 43 24 31 9 9 163 5 38 166 5 36
+168 6 37 168 6 37 97 2 24 54 1 16 160 5 35 166 5 36 167 6 36 167 5 37 168 6 37
+148 5 34 168 6 36 167 6 37 167 6 37 167 6 37 167 6 37 167 6 37 166 5 36
+166 5 36 166 5 36 186 95 36 220 191 46 220 191 46 186 95 36 186 95 36
+254 217 44 255 218 45 254 217 45 255 218 45 254 217 45 255 219 44 147 184 56
+147 184 56 183 189 50 0 152 80 0 152 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 152 79 0 153 80 0 120 62 0 0 0 0 120 62 0 159 83 0 14 7 7 100 53 0 153 80
+61 77 45 167 5 37 166 5 36 174 6 37 186 95 36 142 18 31 247 212 45 255 218 45
+255 218 45 255 225 46 134 81 37 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 3 1 0 0 0 13 0 3 125 4 28
+113 5 27 95 5 23 54 1 16 0 0 0 78 3 19 174 6 37 168 6 36 174 6 37 156 5 35
+115 3 30 113 5 27 143 6 32 167 5 37 166 5 36 168 6 37 166 6 36 167 5 37
+167 6 37 167 6 37 167 6 37 167 6 37 164 0 36 161 0 36 161 0 36 186 95 36
+255 224 44 255 223 45 255 222 45 220 191 46 247 212 45 255 218 45 254 217 45
+255 220 43 237 211 46 5 154 79 0 152 80 0 152 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 152 79 0 159 83 8 85 44 0 3 1 16 43 24 0 11 5 0 158 82
+0 153 80 90 70 36 167 6 37 167 6 37 166 5 36 164 0 36 134 81 37 176 158 43
+247 212 45 255 221 44 255 225 46 31 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 8 0 1 168 6 36 174 6 37 148 5 34 62 5 15 7 0 2 115 3 30
+165 5 35 167 6 37 167 6 37 172 6 37 139 4 30 115 3 30 169 6 37 167 6 37
+167 6 37 167 6 37 167 6 37 167 6 37 167 6 37 164 0 36 186 95 36 220 191 46
+186 95 36 174 6 37 220 191 46 254 217 44 254 217 45 255 218 45 255 218 44
+208 203 51 40 167 76 40 167 76 14 156 77 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 152 79 0 153 80 7 100 53 11 73 39 0 143 75 0 153 80
+0 152 79 44 103 53 170 6 37 166 5 36 167 6 37 148 5 34 232 205 46 232 205 46
+35 27 16 176 158 43 176 158 43 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 0 2 8 0 1 0 0 0 0 0 0 80 1 21 167 6 36 160 5 35 143 6 32
+115 3 30 54 1 16 104 4 26 170 6 37 166 5 36 167 5 37 170 6 37 156 5 35
+168 6 38 167 6 37 167 6 37 167 6 37 164 0 36 161 0 36 164 0 36 196 173 49
+255 221 44 255 219 45 255 219 45 232 205 46 255 219 45 255 218 44 255 222 43
+233 209 46 14 156 77 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 152 79 0 156 81 0 158 82 0 153 80 0 153 80 0 153 80 0 156 81
+118 32 31 171 6 37 167 5 37 134 81 37 255 228 45 255 219 45 202 186 45
+13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 8 0 1 13 0 3 13 0 3 7 0 2 0 0 0 125 4 28 169 6 37
+169 6 37 174 6 37 169 6 37 97 2 24 163 5 38 167 6 37 167 6 37 167 6 37
+167 6 37 167 6 37 166 5 36 174 6 37 255 231 45 255 228 45 220 191 46
+186 95 36 247 212 45 255 218 45 255 218 45 255 221 43 23 159 77 0 152 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 7 139 70 142 18 31
+164 0 36 196 173 49 255 228 45 255 222 45 125 106 44 8 0 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 7 0 2 136 4 37 128 4 30 102 2 25 55 2 14 13 0 3 148 5 34
+167 6 36 166 5 37 167 6 37 168 6 37 167 5 37 166 6 36 167 6 37 166 5 36
+174 6 37 174 6 37 172 6 37 252 215 44 255 218 45 254 217 44 255 218 45
+255 222 43 58 161 68 23 159 77 0 152 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 154 80 43 88 46 134 81 37 125 106 44 8 0 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 13 0 3
+156 5 35 168 6 36 167 5 37 170 6 37 168 6 36 115 3 30 165 5 35 167 6 37
+167 6 37 166 5 36 166 5 36 171 6 37 232 205 46 255 219 45 254 217 44
+255 226 45 247 212 45 220 191 46 255 218 44 208 203 51 0 150 81 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 154 80 0 156 81 0 160 83 7 100 53 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 82 3 18 156 5 35 156 5 35 148 5 34 102 2 25 62 5 15 160 5 35 167 5 37
+166 6 36 167 6 37 167 6 37 167 6 37 174 6 37 247 212 45 247 212 45 232 205 46
+186 95 36 186 95 36 255 228 45 255 218 44 208 203 51 0 150 81 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 152 79 0 153 80 0 150 78 25 140 67 39 114 57 28 91 46 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 0 2 13 0 3 13 0 3 7 0 2 74 7 16 166 5 36 167 6 37 167 6 37 166 5 37
+169 6 37 166 5 36 167 6 37 167 6 37 174 6 37 174 6 37 161 0 36 186 95 36
+255 222 45 254 217 44 255 218 44 147 184 56 0 151 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 155 81
+0 133 70 165 179 52 232 205 46 255 218 44 31 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 31 9 9 169 5 39 167 6 37 167 6 37 166 6 36 160 5 35 128 4 30
+169 5 39 166 6 36 167 6 37 167 6 37 166 5 36 186 95 36 255 231 45 254 217 44
+255 218 45 255 218 45 64 163 71 0 153 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 152 79 0 153 80 0 122 64 196 173 49 255 219 45
+255 219 45 196 173 49 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 97 2 24
+174 6 37 174 6 37 136 4 37 78 3 19 97 2 24 159 4 34 166 5 36 167 6 37 167 6 37
+167 6 37 174 6 37 247 212 45 255 218 45 255 223 45 255 218 45 186 95 36
+14 156 77 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 152 79 0 154 80 7 139 70 202 186 45 255 223 45 254 217 44 255 225 46
+35 27 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 74 7 16 167 6 37 166 5 36 167 6 37 166 5 37 167 6 36 167 6 36 174 6 37
+247 212 45 247 212 45 196 173 49 174 6 37 61 77 45 0 156 81 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 145 76 176 158 43
+255 218 45 255 218 45 255 218 45 176 158 43 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 74 7 16 174 6 37 167 5 37 167 6 37
+167 6 36 172 6 37 163 5 38 160 5 35 166 5 36 161 0 36 161 0 36 164 0 36
+142 18 31 0 152 78 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 151 79 107 141 56 255 228 45 254 217 44 254 217 44
+255 225 46 57 37 24 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 13 0 3 156 5 35 167 5 37 166 5 37 167 6 36 169 6 37 97 2 24
+113 5 27 168 6 37 167 6 37 167 6 37 166 6 36 167 5 37 44 103 53 0 153 80
+0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79
+5 154 79 220 191 46 255 218 45 255 218 45 255 222 45 121 125 48 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 0 2 97 2 24
+125 4 28 115 3 30 80 1 21 0 0 0 91 3 20 174 6 37 166 5 36 167 6 37 167 6 37
+171 6 37 92 51 36 0 156 81 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 156 81 57 118 51 255 218 44 255 218 45 254 217 44
+220 191 46 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 95 5 23 169 6 37 166 5 36 167 6 37
+167 6 37 167 6 37 142 18 31 2 150 78 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 0 155 84 176 158 43 255 219 45
+254 217 44 255 231 45 37 15 12 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 62 5 15 172 6 37
+166 5 36 167 6 37 167 6 37 167 5 37 159 4 34 3 130 68 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 120 62 255 219 45 255 218 45 252 215 44 86 55 31 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 160 5 35 166 5 37 167 6 37 166 5 37 167 6 36 165 5 35 30 110 54
+0 155 81 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 67 147 57 255 223 45 255 231 45 57 37 24 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 62 5 15 174 6 37 170 6 37 167 6 37 148 5 34 115 3 30
+28 91 46 0 154 80 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 130 165 55 220 191 46 37 15 12
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 0 1 74 7 16 74 7 16 36 0 7 13 0 3 8 85 44
+0 156 81 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 152 79 66 93 42 13 0 3 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 156 81 0 152 79
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 152 79 0 154 80 0 133 70 0 11 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 157 82 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 152 79
+0 156 81 0 113 58 0 11 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 16 43 24 0 157 82 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 151 79 7 100 53
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39
+0 159 83 0 152 79 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80 0 153 80
+0 153 80 0 153 80 0 156 81 0 160 83 0 113 58 16 43 24 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 158 82 0 153 80
+0 153 79 0 152 80 0 152 80 0 152 80 0 152 80 0 153 79 2 153 79 2 150 78
+2 150 78 30 110 54 0 14 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 85 44 0 156 81 0 152 79 0 153 79 0 152 80
+5 154 79 103 170 61 113 176 60 122 174 62 183 189 50 176 158 43 142 18 31
+156 5 35 39 0 8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 100 53 0 154 80 0 153 80 0 153 80 0 152 80 14 156 77
+223 201 47 255 218 44 254 217 45 255 218 44 220 191 46 168 6 37 171 6 37
+36 0 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 3 1 7 100 53 0 156 81 0 152 79 0 153 79 0 150 81 40 167 76 229 207 47
+255 218 44 254 217 45 255 222 45 220 191 46 174 6 37 160 5 35 40 1 11 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 133 70
+0 153 80 0 152 79 0 153 79 0 152 80 75 162 69 247 214 45 255 218 44 254 217 44
+255 222 45 220 191 46 169 6 37 156 5 35 13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 0 143 75 0 155 81 0 153 80
+0 153 79 0 152 80 103 170 61 255 218 44 254 217 45 254 217 44 255 221 44
+186 95 36 164 0 36 148 5 34 31 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 145 76 0 153 80 0 152 79 0 153 79
+0 150 81 165 179 52 254 217 45 255 218 44 255 218 45 255 231 45 186 95 36
+166 5 36 148 5 34 13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 43 24 0 159 83 0 152 79 0 153 79 0 152 80 14 156 77
+202 194 50 255 220 43 254 217 45 255 219 45 247 212 45 186 95 36 164 0 36
+127 4 30 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 11 73 39 0 152 79 0 153 80 0 153 80 0 152 80 23 159 77 223 201 47
+255 218 44 254 217 45 255 218 44 247 212 45 174 6 37 174 6 37 78 3 19 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39
+0 155 81 0 152 79 0 153 79 0 152 80 40 167 76 247 214 45 255 218 44 254 217 44
+255 219 45 220 191 46 174 6 37 163 5 38 54 1 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 100 53 0 156 81 0 153 80
+0 153 79 0 150 81 103 170 61 251 217 45 255 218 44 254 217 44 255 231 45
+186 95 36 169 6 37 156 5 35 36 0 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 113 58 0 155 81 0 153 79 0 152 80
+0 153 79 122 174 62 255 221 43 254 217 45 255 218 45 252 215 44 186 95 36
+164 0 36 148 5 34 7 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 11 5 0 122 64 0 156 81 0 152 79 0 152 80 7 156 79 183 189 50
+255 219 44 254 217 45 255 219 45 232 205 46 186 95 36 174 6 37 95 5 23 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7
+0 150 78 0 153 80 0 153 79 0 152 80 20 160 78 229 207 47 255 218 44 254 217 44
+255 218 44 232 205 46 172 6 37 163 5 38 62 5 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 133 70 0 153 80
+0 153 79 0 151 80 64 163 71 233 209 46 255 218 44 255 218 45 255 228 45
+186 95 36 164 0 36 139 3 31 36 0 7 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 151 79 0 153 80 0 153 79
+0 152 80 64 163 71 255 218 44 255 218 44 254 217 44 254 217 44 186 95 36
+167 5 37 128 4 30 7 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 152 79 0 153 80 0 152 80 0 152 80 147 184 56
+255 222 43 254 217 45 255 222 45 220 191 46 174 6 37 169 5 39 78 3 19 0 3 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+11 73 39 0 153 80 0 153 80 0 153 79 0 152 80 202 194 50 255 218 44 254 217 45
+255 223 45 220 191 46 170 6 37 156 5 35 13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 100 53 0 161 84
+0 152 79 0 151 80 23 159 77 208 203 51 255 218 44 255 219 45 254 217 45
+186 95 36 164 0 36 125 4 28 13 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 0 113 58 0 154 80 0 152 79 0 152 80
+40 167 76 237 211 46 255 218 44 255 218 45 247 212 45 174 6 37 174 6 37
+80 1 21 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 14 7 0 120 62 0 155 81 0 152 79 0 152 80 75 162 69
+255 222 43 255 218 44 255 219 45 220 191 46 174 6 37 143 6 32 36 0 7 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 14 7 0 133 70 0 155 81 0 152 80 0 150 81 147 184 56 255 218 44 255 218 45
+255 231 45 186 95 36 171 6 37 102 2 25 8 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 0 146 76 0 153 80
+0 152 80 17 157 77 183 189 50 255 220 43 255 221 44 247 212 45 174 6 37
+161 0 36 80 1 21 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 145 76 0 154 80 0 152 80 40 167 76
+229 207 47 255 218 44 255 223 45 176 158 43 118 13 28 97 2 24 13 0 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 16 49 29 0 161 84 0 153 79 0 152 80 40 167 76 254 217 45 255 218 45
+252 216 44 125 106 44 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 73 39 0 153 80
+0 152 79 0 150 81 113 176 60 255 218 44 255 223 45 232 205 46 51 57 28
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 85 44 0 155 81 0 153 80 0 150 78 130 165 55
+254 217 44 247 212 45 196 173 49 0 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 1 7 100 53
+0 158 82 0 154 80 0 143 75 16 43 24 0 0 0 0 3 1 18 20 12 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 122 64 0 153 80 0 153 80 0 133 70 0 11 5 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 7 0 122 64 0 157 82 0 157 82 7 100 53
+0 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 5 0 150 78 0 153 80
+0 160 83 11 73 39 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 11 5
+0 150 78 0 158 82 0 133 70 16 49 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 16 43 24 0 14 7 0 11 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/mxc/Kconfig b/drivers/video/mxc/Kconfig
new file mode 100644
index 00000000000000..5adc5975940b15
--- /dev/null
+++ b/drivers/video/mxc/Kconfig
@@ -0,0 +1,116 @@
+config FB_MXC
+ tristate "MXC Framebuffer support"
+# depends on FB && (MXC_IPU || ARCH_MX21 || ARCH_MX27 || ARCH_MX25)
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS
+ default y
+ help
+ This is a framebuffer device for the MXC LCD Controller.
+ See <http://www.linux-fbdev.org/> for information on framebuffer
+ devices.
+
+ If you plan to use the LCD display with your MXC system, say
+ Y here.
+
+config FB_MXC_SYNC_PANEL
+ depends on FB_MXC
+ tristate "Synchronous Panel Framebuffer"
+ default y
+
+config FB_MXC_EPSON_VGA_SYNC_PANEL
+ depends on FB_MXC_SYNC_PANEL
+ tristate "Epson VGA Panel"
+ default n
+
+config FB_MXC_TVOUT_TVE
+ tristate "MXC TVE TV Out Encoder"
+ depends on FB_MXC_SYNC_PANEL
+ depends on MXC_IPU_V3
+
+config FB_MXC_LDB
+ tristate "MXC LDB"
+ depends on FB_MXC_SYNC_PANEL
+ depends on MXC_IPU_V3
+
+config FB_MXC_CLAA_WVGA_SYNC_PANEL
+ depends on FB_MXC_SYNC_PANEL
+ tristate "CLAA WVGA Panel"
+
+config FB_MXC_SIIHDMI
+ tristate "SII9xxx HDMI Interface Chips"
+# depends on FB_MXC_SYNC_PANEL && MACH_MX51_EFIKAMX
+ depends on FB_AVI
+ select HDMI
+ select SWITCH
+
+config FB_MXC_MTL017
+ depends on FB_MXC_SYNC_PANEL && MACH_MX51_EFIKASB
+ tristate "Myson MTL017 LVDS Controller"
+ default n
+
+config FB_MXC_CH7026
+ depends on FB_MXC_SYNC_PANEL
+ tristate "Chrontel CH7026 VGA Interface Chip"
+
+config FB_MXC_TVOUT_CH7024
+ tristate "CH7024 TV Out Encoder"
+ depends on FB_MXC_SYNC_PANEL
+
+config FB_MXC_LOW_PWR_DISPLAY
+ bool "Low Power Display Refresh Mode"
+ depends on FB_MXC_SYNC_PANEL && MXC_FB_IRAM
+ default y
+
+config FB_MXC_INTERNAL_MEM
+ bool "Framebuffer in Internal RAM"
+ depends on FB_MXC_SYNC_PANEL && MXC_FB_IRAM
+ default y
+
+config FB_MXC_ASYNC_PANEL
+ depends on FB_MXC
+ bool "Asynchronous Panels"
+ default n
+
+menu "Asynchronous Panel Type"
+ depends on FB_MXC_ASYNC_PANEL && FB_MXC
+
+config FB_MXC_EPSON_PANEL
+ depends on FB_MXC_ASYNC_PANEL
+ default n
+ bool "Epson 176x220 Panel"
+
+endmenu
+
+config FB_MXC_EINK_PANEL
+ depends on FB_MXC
+ depends on DMA_ENGINE
+ select FB_DEFERRED_IO
+ tristate "E-Ink Panel Framebuffer"
+
+config FB_MXC_EINK_AUTO_UPDATE_MODE
+ bool "E-Ink Auto-update Mode Support"
+ default n
+ depends on FB_MXC_EINK_PANEL
+
+config FB_MXC_ELCDIF_FB
+ depends on FB && ARCH_MXC
+ tristate "Support MXC ELCDIF framebuffer"
+
+choice
+ prompt "Async Panel Interface Type"
+ depends on FB_MXC_ASYNC_PANEL && FB_MXC
+ default FB_MXC_ASYNC_PANEL_IFC_16_BIT
+
+config FB_MXC_ASYNC_PANEL_IFC_8_BIT
+ bool "8-bit Parallel Bus Interface"
+
+config FB_MXC_ASYNC_PANEL_IFC_16_BIT
+ bool "16-bit Parallel Bus Interface"
+
+config FB_MXC_ASYNC_PANEL_IFC_SERIAL
+ bool "Serial Bus Interface"
+
+endchoice
diff --git a/drivers/video/mxc/Makefile b/drivers/video/mxc/Makefile
new file mode 100644
index 00000000000000..5bf77810318837
--- /dev/null
+++ b/drivers/video/mxc/Makefile
@@ -0,0 +1,26 @@
+#ifeq ($(CONFIG_ARCH_MX21)$(CONFIG_ARCH_MX27)$(CONFIG_ARCH_MX25),y)
+# obj-$(CONFIG_FB_MXC_TVOUT) += fs453.o
+# obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mx2fb.o mxcfb_modedb.o
+# obj-$(CONFIG_FB_MXC_EPSON_PANEL) += mx2fb_epson.o
+#else
+#ifeq ($(CONFIG_MXC_IPU_V1),y)
+# obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxcfb.o mxcfb_modedb.o
+#else
+# obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_ipuv3_fb.o
+#endif
+# obj-$(CONFIG_FB_MXC_EPSON_PANEL) += mxcfb_epson.o
+# obj-$(CONFIG_FB_MXC_EPSON_QVGA_PANEL) += mxcfb_epson_qvga.o
+# obj-$(CONFIG_FB_MXC_TOSHIBA_QVGA_PANEL) += mxcfb_toshiba_qvga.o
+# obj-$(CONFIG_FB_MXC_SHARP_128_PANEL) += mxcfb_sharp_128x128.o
+#endif
+#obj-$(CONFIG_FB_MXC_EPSON_VGA_SYNC_PANEL) += mxcfb_epson_vga.o
+#obj-$(CONFIG_FB_MXC_CLAA_WVGA_SYNC_PANEL) += mxcfb_claa_wvga.o
+#obj-$(CONFIG_FB_MXC_TVOUT_CH7024) += ch7024.o
+#obj-$(CONFIG_FB_MXC_TVOUT_TVE) += tve.o
+#obj-$(CONFIG_FB_MXC_LDB) += ldb.o
+#obj-$(CONFIG_FB_MXC_CH7026) += mxcfb_ch7026.o
+ccflags-y += -I$(srctree)/drivers/parrot
+obj-$(CONFIG_FB_MXC_SIIHDMI) += siihdmi.o
+#obj-$(CONFIG_FB_MXC_MTL017) += mxcfb_mtl017.o
+#obj-$(CONFIG_FB_MXC_EINK_PANEL) += mxc_epdc_fb.o
+#obj-$(CONFIG_FB_MXC_ELCDIF_FB) += mxc_elcdif_fb.o
diff --git a/drivers/video/mxc/siihdmi.c b/drivers/video/mxc/siihdmi.c
new file mode 100644
index 00000000000000..fa208b94e6edeb
--- /dev/null
+++ b/drivers/video/mxc/siihdmi.c
@@ -0,0 +1,1742 @@
+/* vim: set noet ts=8 sts=8 sw=8 : */
+/*
+ * Copyright Âİ 2010 Saleem Abdulrasool <compnerd@compnerd.org>.
+ * Copyright Âİ 2010 Genesi USA, Inc. <matt@genesi-usa.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/console.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/edid.h>
+#include <linux/cea861.h>
+#include <video/avifb.h>
+#include <linux/switch.h>
+#include <linux/hdmi.h>
+#include "../cea861_modedb.h"
+#include "siihdmi.h"
+
+
+/* logging helpers */
+#define CONTINUE(fmt, ...) printk(KERN_CONT fmt, ## __VA_ARGS__)
+#define DEBUG(fmt, ...) printk(KERN_DEBUG "SIIHDMI: " fmt, ## __VA_ARGS__)
+#define ERROR(fmt, ...) printk(KERN_ERR "SIIHDMI: " fmt, ## __VA_ARGS__)
+#define WARNING(fmt, ...) printk(KERN_WARNING "SIIHDMI: " fmt, ## __VA_ARGS__)
+#define INFO(fmt, ...) printk(KERN_INFO "SIIHDMI: " fmt, ## __VA_ARGS__)
+
+/* DDC segment for 4 block read */
+#define EDID_I2C_DDC_DATA_SEGMENT 0x30
+
+/* Default Monspecs when EDID is not available or no format is available in EDID */
+static struct fb_monspecs default_monspecs = {
+ .modedb = (struct fb_videomode *) &cea_modes[1],
+ .modedb_len = 1,
+};
+
+/* Aspect ratio extracted from CEA-861-D: needed for AVI Infoframe */
+static enum hdmi_picture_aspect cea_ratios[65] = {
+ HDMI_PICTURE_ASPECT_NONE, /* No VIC: default mode */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 1: 640x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 2: 720x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 3: 720x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 4: 1280x720p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 5: 1920x1080i @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 6: 720(1440)x480i @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 7: 720(1440)x480i @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 8: 720(1440)x240p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 9: 720(1440)x240p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 10: 2880x480i @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 11: 2880x480i @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 12: 2880x240p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 13: 2880x240p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 14: 1440x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 15: 1440x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 16: 1920x1080p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 17: 720x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 18: 720x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 19: 1280x720p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 20: 1920x1080i @ 50Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 21: 720(1440)x576i @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 22: 720(1440)x576i @ 50Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 23: 720(1440)x288p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 24: 720(1440)x288p @ 50Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 25: 2880x576i @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 26: 2880x576i @ 50Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 27: 2880x288p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 28: 2880x288p @ 50Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 29: 1440x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 30: 1440x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 31: 1920x1080p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 32: 1920x1080p @ 23.97Hz/24Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 33: 1920x1080p @ 25Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 34: 1920x1080p @ 29.97Hz/30Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 35: 2880x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 36: 2880x480p @ 59.94Hz/60Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 37: 2880x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 38: 2880x576p @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 39: 1920x1080i (1250 total) @ 50Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 40: 1920x1080i @ 100Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 41: 1280x720p @ 100Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 42: 720x576p @ 100Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 43: 720x576p @ 100Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 44: 720(1440)x576i @ 100Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 45: 720(1440)x576i @ 100Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 46: 1920x1080i @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 47: 1280x720p @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 48: 720x480p @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 49: 720x480p @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 50: 720(1440)x480i @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 51: 720(1440)x480i @ 119.88/120Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 52: 720x576p @ 200Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 53: 720x576p @ 200Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 54: 720(1440)x576i @ 200Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 55: 720(1440)x576i @ 200Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 56: 720x480p @ 239.76/240Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 57: 720x480p @ 239.76/240Hz */
+ HDMI_PICTURE_ASPECT_4_3, /* VIC 58: 720(1440)x480i @ 239.76/240Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 59: 720(1440)x480i @ 239.76/240Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 60: 1280x720p @ 24Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 61: 1280x720p @ 25Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 62: 1280x720p @ 30Hz */
+ HDMI_PICTURE_ASPECT_16_9, /* VIC 63: 1920x1080p @ 120Hz */
+ HDMI_PICTURE_ASPECT_16_9 /* VIC 64: 1920x1080p @ 100Hz */
+};
+
+/* module parameters */
+static unsigned int bus_timeout = 50;
+module_param(bus_timeout, uint, 0644);
+MODULE_PARM_DESC(bus_timeout, "bus timeout in milliseconds");
+
+static unsigned int seventwenty = 1;
+module_param(seventwenty, uint, 0644);
+MODULE_PARM_DESC(seventwenty, "attempt to use 720p mode");
+
+static unsigned int teneighty = 0;
+module_param(teneighty, uint, 0644);
+MODULE_PARM_DESC(teneighty, "attempt to use 1080p mode");
+
+static unsigned int useitmodes = 1;
+module_param(useitmodes, uint, 0644);
+MODULE_PARM_DESC(useitmodes, "prefer IT modes over CEA modes when sanitizing the modelist");
+
+static unsigned int modevic = 0;
+module_param_named(vic, modevic, uint, 0644);
+MODULE_PARM_DESC(modevic, "CEA VIC to try and match before autodetection");
+
+static int siihdmi_detect_revision(struct siihdmi_tx *tx)
+{
+ u8 data;
+ unsigned long start;
+
+ start = jiffies;
+ do {
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_DEVICE_ID);
+ } while (data != SIIHDMI_DEVICE_ID_902x &&
+ !time_after(jiffies, start + bus_timeout));
+
+ if (data != SIIHDMI_DEVICE_ID_902x)
+ return -ENODEV;
+
+ INFO("Device ID: %#02x", data);
+
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_DEVICE_REVISION);
+ if (data)
+ CONTINUE(" (rev %01u.%01u)",
+ (data >> 4) & 0xf, (data >> 0) & 0xf);
+
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_TPI_REVISION);
+ CONTINUE(" (%s",
+ (data & SIIHDMI_VERSION_FLAG_VIRTUAL) ? "Virtual " : "");
+ data &= ~SIIHDMI_VERSION_FLAG_VIRTUAL;
+ data = data ? data : SIIHDMI_BASE_TPI_REVISION;
+ CONTINUE("TPI revision %01u.%01u)",
+ (data >> 4) & 0xf, (data >> 0) & 0xf);
+
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_HDCP_REVISION);
+ if (data)
+ CONTINUE(" (HDCP version %01u.%01u)",
+ (data >> 4) & 0xf, (data >> 0) & 0xf);
+
+ CONTINUE("\n");
+
+ return 0;
+}
+
+static inline int siihdmi_power_up(struct siihdmi_tx *tx)
+{
+ int ret;
+
+ DEBUG("Powering up transmitter\n");
+
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_PWR_STATE,
+ SIIHDMI_POWER_STATE_D0);
+ if (ret < 0)
+ ERROR("unable to power up transmitter\n");
+
+ return ret;
+}
+
+static inline int siihdmi_power_down(struct siihdmi_tx *tx)
+{
+ int ret;
+ u8 ctrl;
+
+ DEBUG("Powering down transmitter\n");
+
+#ifdef SIIHDMI_USE_FB
+ memset((void *) &tx->sink.current_mode, 0, sizeof(struct fb_videomode));
+#endif
+
+ ctrl = SIIHDMI_SYS_CTRL_TMDS_OUTPUT_POWER_DOWN;
+ if (tx->sink.type == SINK_TYPE_HDMI)
+ ctrl |= SIIHDMI_SYS_CTRL_OUTPUT_MODE_SELECT_HDMI;
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL, ctrl);
+ if (ret < 0) {
+ ERROR("unable to power down transmitter\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_PWR_STATE,
+ SIIHDMI_POWER_STATE_D2);
+ if (ret < 0) {
+ ERROR("unable to set transmitter into D2\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int siihdmi_check_sync_mux_mode(struct siihdmi_tx *tx)
+{
+ int ret;
+ u8 value;
+
+ /* Configure only when mux */
+ if (tx->platform->input_format != YUV_422_8_MUX)
+ return 0;
+
+ /* Check sync mux mode activated */
+ value = i2c_smbus_read_byte_data(tx->client,
+ 0x60);
+ if ((value & 1 << 5) && (value & 1 << 7))
+ return 0;
+
+ /* Restore explicitly, if necessary, TPI 0x60[5] to enable YC Mux mode */
+ value |= 1 << 5; /* YC mux mode one-to-two data channel demux enable */
+ value |= 1 << 7; /* External sync method */
+ ret = i2c_smbus_write_byte_data(tx->client,
+ 0x60,
+ value);
+ if (ret < 0) {
+ WARNING("failed to configure syn mux mode: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int siihdmi_check_embsync_extract(struct siihdmi_tx *tx)
+{
+ int ret;
+ u8 value;
+
+ /* Configure only when mux */
+ if (tx->platform->input_format != YUV_422_8_MUX)
+ return 0;
+
+ siihdmi_check_sync_mux_mode(tx);
+
+ /* Check embedded sync extraction */
+ value = i2c_smbus_read_byte_data(tx->client,
+ 0x63);
+ if (value & 1 << 6)
+ return 0;
+
+ value |= 1 << 6;
+ ret = i2c_smbus_write_byte_data(tx->client,
+ 0x63,
+ value);
+ if (ret < 0) {
+ WARNING("failed to activate de generator: %d\n", ret);
+ return ret;
+ }
+
+ /* Check again .. */
+ siihdmi_check_sync_mux_mode(tx);
+ return 0;
+}
+
+static int siihdmi_initialise(struct siihdmi_tx *tx)
+{
+ int ret;
+ u8 value;
+
+ ret = i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_RQB, 0x00);
+ if (ret < 0) {
+ WARNING("unable to initialise device to TPI mode\n");
+ return ret;
+ }
+
+ /* step 2: detect revision */
+ if ((ret = siihdmi_detect_revision(tx)) < 0) {
+ DEBUG("unable to detect device revision\n");
+ return ret;
+ }
+
+ /* step 3: power up transmitter */
+ if ((ret = siihdmi_power_up(tx)) < 0)
+ return ret;
+
+ /* step 3.5: enable source termination
+ * (recommanded for pixclock > 100 MHz)
+ * 1. Set to internal page 0 (0xBC = 0x01)
+ * 2. Set to indexed register 130 (0xBD = 0x82)
+ * 3. Read value of register (0xBE)
+ * 4. Set bit 0 to 1 in order to enable source termination
+ * 5. Write value to register (0xBE)
+ */
+ ret = i2c_smbus_write_byte_data(tx->client, 0xBC, 0x01);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(tx->client, 0xBD, 0x82);
+ if (ret < 0)
+ return ret;
+ value = i2c_smbus_read_byte_data(tx->client, 0xBE);
+ value |= 0x01;
+ ret = i2c_smbus_write_byte_data(tx->client, 0xBE, value);
+ if (ret < 0)
+ return ret;
+
+ /* step 4: configure input bus and pixel repetition */
+ if (tx->platform->input_format == YUV_422_8_MUX) {
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_INPUT_BUS_PIXEL_REPETITION,
+ 0x30);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* step 5: select YC input mode */
+
+ /* step 6: configure sync methods */
+
+ /* step 7: configure explicit sync DE generation */
+
+ /* step 8: configure embedded sync extraction */
+
+ /* step 8.5: power down trasnmitter since interrupt power up transmitter
+ * if a display is attached
+ */
+ value = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_ISR);
+ if (!(value & SIIHDMI_ISR_DISPLAY_ATTACHED))
+ siihdmi_power_down(tx);
+
+ /* step 9: setup interrupt service */
+ if (tx->hotplug.enabled) {
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_IER,
+ SIIHDMI_IER_HOT_PLUG_EVENT);
+ if (ret < 0)
+ WARNING("unable to setup interrupt request\n");
+ }
+
+ return ret;
+}
+
+static int siihdmi_read_edid(struct siihdmi_tx *tx, u8 *edid, unsigned int block, size_t size)
+{
+ u8 offset = block * EDID_BLOCK_SIZE;
+ u8 segment = block >> 1;
+ u8 xfers = segment ? 3 : 2;
+ u8 ctrl;
+ int ret;
+ unsigned long start;
+
+ struct i2c_msg request[] = {
+ { .addr = EDID_I2C_DDC_DATA_SEGMENT,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment, },
+ { .addr = EDID_I2C_DDC_DATA_ADDRESS,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset, },
+ { .addr = EDID_I2C_DDC_DATA_ADDRESS,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = edid, },
+ };
+
+ /* step 1: (potentially) disable HDCP */
+
+ /* step 2: request the DDC bus */
+ ctrl = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_SYS_CTRL);
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ ctrl | SIIHDMI_SYS_CTRL_DDC_BUS_REQUEST);
+ if (ret < 0) {
+ DEBUG("unable to request DDC bus\n");
+ return ret;
+ }
+
+ /* step 3: poll for bus grant */
+ start = jiffies;
+ do {
+ ctrl = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL);
+ } while ((~ctrl & SIIHDMI_SYS_CTRL_DDC_BUS_GRANTED) &&
+ !time_after(jiffies, start + bus_timeout));
+
+ if (~ctrl & SIIHDMI_SYS_CTRL_DDC_BUS_GRANTED)
+ goto relinquish;
+
+ /* step 4: take ownership of the DDC bus */
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ SIIHDMI_SYS_CTRL_DDC_BUS_REQUEST |
+ SIIHDMI_SYS_CTRL_DDC_BUS_OWNER_HOST);
+ if (ret < 0) {
+ DEBUG("unable to take ownership of the DDC bus\n");
+ goto relinquish;
+ }
+
+ /* step 5: read edid */
+ ret = i2c_transfer(tx->client->adapter, &request[3 - xfers], xfers);
+ if (ret != xfers)
+ DEBUG("unable to read EDID block\n");
+
+relinquish:
+ /* step 6: relinquish ownership of the DDC bus */
+ start = jiffies;
+ do {
+ i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ 0x00);
+ ctrl = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL);
+ } while ((ctrl & SIIHDMI_SYS_CTRL_DDC_BUS_GRANTED) &&
+ !time_after(jiffies, start + bus_timeout));
+
+ /* step 7: (potentially) enable HDCP */
+
+ return ret;
+}
+
+static inline void _process_cea861_vsdb(struct siihdmi_tx *tx,
+ const struct hdmi_vsdb * const vsdb)
+{
+ unsigned int max_tmds;
+
+ if (memcmp(vsdb->ieee_registration, CEA861_OUI_REGISTRATION_ID_HDMI_LSB,
+ sizeof(vsdb->ieee_registration)))
+ return;
+
+ max_tmds = KHZ2PICOS(vsdb->max_tmds_clock * 200);
+
+ /* Sink type is HDMI when VSDB is available in one of extensions */
+ tx->sink.type = SINK_TYPE_HDMI;
+
+ DEBUG("HDMI VSDB detected (basic audio %ssupported)\n",
+ tx->audio.available ? "" : "not ");
+ INFO("HDMI port configuration: %u.%u.%u.%u\n",
+ vsdb->port_configuration_a, vsdb->port_configuration_b,
+ vsdb->port_configuration_c, vsdb->port_configuration_d);
+
+ if (max_tmds && max_tmds < tx->platform->pixclock) {
+ INFO("maximum TMDS clock limited to %u by device\n", max_tmds);
+ tx->platform->pixclock = max_tmds;
+ }
+}
+
+static inline void _process_cea861_video(struct siihdmi_tx *tx,
+ const struct cea861_video_data_block * const video)
+{
+ const struct cea861_data_block_header * const header =
+ (struct cea861_data_block_header *) video;
+ u8 i, count;
+
+ for (i = 0, count = 0; i < header->length; i++) {
+ const int vic = video->svd[i] & ~CEA861_SVD_NATIVE_FLAG;
+
+ if (vic && vic <= ARRAY_SIZE(cea_modes)) {
+#ifdef SIIHDMI_USE_FB
+ fb_add_videomode(&cea_modes[vic], &tx->info->modelist);
+#endif
+ count++;
+ }
+ }
+
+ DEBUG("%u modes parsed from CEA video data block\n", count);
+}
+
+static inline void _process_cea861_extended(struct siihdmi_tx *tx,
+ const struct cea861_data_block_extended *ext)
+{
+ static const char * const scannings[] = {
+ [SCAN_INFORMATION_UNKNOWN] = "unknown",
+ [SCAN_INFORMATION_OVERSCANNED] = "overscanned",
+ [SCAN_INFORMATION_UNDERSCANNED] = "underscanned",
+ [SCAN_INFORMATION_RESERVED] = "reserved",
+ };
+
+ switch (ext->extension_tag) {
+ case CEA861_DATA_BLOCK_EXTENSION_VIDEO_CAPABILITY: {
+ const struct cea861_video_capability_block * const vcb =
+ (struct cea861_video_capability_block *) ext;
+ INFO("CEA video capability (scanning behaviour):\n"
+ " Preferred Mode: %s\n"
+ " VESA/PC Mode: %s\n"
+ " CEA/TV Mode: %s\n",
+ scannings[vcb->pt_overunder_behavior],
+ scannings[vcb->it_overunder_behavior],
+ scannings[vcb->ce_overunder_behavior]);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void siihdmi_parse_cea861_timing_block(struct siihdmi_tx *tx,
+ const struct edid_extension *ext)
+{
+ const struct cea861_timing_block * const cea = (struct cea861_timing_block *) ext;
+ const u8 size = cea->dtd_offset - offsetof(struct cea861_timing_block, data);
+ u8 index;
+
+ BUILD_BUG_ON(sizeof(*cea) != sizeof(*ext));
+
+ tx->audio.available = cea->basic_audio_supported;
+ if (cea->underscan_supported)
+ tx->sink.scanning = SCANNING_UNDERSCANNED;
+
+ if (cea->dtd_offset == CEA861_NO_DTDS_PRESENT)
+ return;
+
+ index = 0;
+ while (index < size) {
+ const struct cea861_data_block_header * const header =
+ (struct cea861_data_block_header *) &cea->data[index];
+
+ switch (header->tag) {
+ case CEA861_DATA_BLOCK_TYPE_VENDOR_SPECIFIC:
+ _process_cea861_vsdb(tx, (struct hdmi_vsdb *) header);
+ break;
+#ifdef SIIHDMI_USE_FB
+ case CEA861_DATA_BLOCK_TYPE_VIDEO:
+ _process_cea861_video(tx, (struct cea861_video_data_block *) header);
+ break;
+#endif
+ case CEA861_DATA_BLOCK_TYPE_EXTENDED:
+ _process_cea861_extended(tx, (struct cea861_data_block_extended *) header);
+ break;
+ }
+
+ index = index + header->length + sizeof(*header);
+ }
+}
+
+static void siihdmi_set_vmode_registers(struct siihdmi_tx *tx,
+ const struct fb_videomode *mode)
+{
+ enum basic_video_mode_fields {
+ PIXEL_CLOCK,
+ REFRESH_RATE,
+ X_RESOLUTION,
+ Y_RESOLUTION,
+ FIELDS,
+ };
+
+ u16 vmode[FIELDS];
+ u32 pixclk, htotal, vtotal, refresh;
+ u8 format;
+ int ret;
+ u16 hsync_len, vsync_len;
+
+ BUILD_BUG_ON(sizeof(vmode) != 8);
+
+ BUG_ON(mode->pixclock == 0);
+ pixclk = PICOS2KHZ(mode->pixclock);
+
+ htotal = mode->xres + mode->left_margin + mode->hsync_len + mode->right_margin;
+ vtotal = mode->yres + mode->upper_margin + mode->vsync_len + mode->lower_margin;
+
+ /* explicitly use 64-bit division to avoid overflow truncation */
+ refresh = (u32) div_u64(pixclk * 100000ull, htotal * vtotal);
+
+ /* basic video mode data */
+ vmode[PIXEL_CLOCK] = (u16) (pixclk / 10);
+ if (tx->platform->input_format == YUV_422_8_MUX)
+ vmode[PIXEL_CLOCK] *= 2;
+
+ /*
+ Silicon Image example code implies refresh to be 6000 for 60Hz?
+ This may work simply because we only test it on little-endian :(
+ */
+ vmode[REFRESH_RATE] = (u16) refresh;
+ vmode[X_RESOLUTION] = (u16) htotal;
+ vmode[Y_RESOLUTION] = (u16) vtotal;
+
+ ret = i2c_smbus_write_i2c_block_data(tx->client,
+ SIIHDMI_TPI_REG_VIDEO_MODE_DATA_BASE,
+ sizeof(vmode),
+ (u8 *) vmode);
+ if (ret < 0)
+ DEBUG("unable to write video mode data\n");
+
+ /* input format */
+ format = SIIHDMI_INPUT_VIDEO_RANGE_EXPANSION_AUTO
+ | SIIHDMI_INPUT_COLOR_DEPTH_8BIT;
+
+ switch (tx->platform->input_format) {
+ case YUV_422_8_MUX:
+ format |= SIIHDMI_INPUT_COLOR_SPACE_YUV_422;
+ break;
+ case RGB_24:
+ default :
+ format |= SIIHDMI_INPUT_COLOR_SPACE_RGB;
+ break;
+ }
+
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_AVI_INPUT_FORMAT,
+ format);
+ if (ret < 0)
+ DEBUG("unable to set input format\n");
+
+ /* output format */
+ format = SIIHDMI_OUTPUT_VIDEO_RANGE_COMPRESSION_AUTO
+ | SIIHDMI_OUTPUT_COLOR_STANDARD_BT601
+ | SIIHDMI_OUTPUT_COLOR_DEPTH_8BIT;
+
+ if (tx->sink.type == SINK_TYPE_HDMI)
+ format |= SIIHDMI_OUTPUT_FORMAT_HDMI_RGB;
+ else
+ format |= SIIHDMI_OUTPUT_FORMAT_DVI_RGB;
+
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_AVI_OUTPUT_FORMAT,
+ format);
+ if (ret < 0)
+ DEBUG("unable to set output format\n");
+
+ /* If format is not muxed, terminated */
+ if (tx->platform->input_format != YUV_422_8_MUX)
+ return;
+
+ /* Default values from hdmi_1024x768p60_video_mode */
+ hsync_len = mode->hsync_len;
+ vsync_len = mode->vsync_len;
+
+ /* Embedded sync register set */
+ siihdmi_check_sync_mux_mode(tx);
+ i2c_smbus_write_byte_data(tx->client, 0x62, 0x01);
+ i2c_smbus_write_byte_data(tx->client, 0x64, 0x00);
+ i2c_smbus_write_byte_data(tx->client, 0x65, 0x02);
+ i2c_smbus_write_byte_data(tx->client, 0x66, hsync_len & 0xFF);
+ i2c_smbus_write_byte_data(tx->client, 0x67, (hsync_len >> 8) & 0x03);
+ i2c_smbus_write_byte_data(tx->client, 0x68, 0x01);
+ i2c_smbus_write_byte_data(tx->client, 0x69, vsync_len & 0x3F);
+ siihdmi_check_sync_mux_mode(tx);
+}
+
+static int siihdmi_clear_avi_info_frame(struct siihdmi_tx *tx)
+{
+ const u8 buffer[SIIHDMI_TPI_REG_AVI_INFO_FRAME_LENGTH] = {0};
+ int ret;
+
+ BUG_ON(tx->sink.type != SINK_TYPE_DVI);
+
+ ret = i2c_smbus_write_i2c_block_data(tx->client,
+ SIIHDMI_TPI_REG_AVI_INFO_FRAME_BASE,
+ sizeof(buffer), buffer);
+ if (ret < 0)
+ DEBUG("unable to clear avi info frame\n");
+
+ return ret;
+}
+
+static int siihdmi_set_avi_info_frame(struct siihdmi_tx *tx)
+{
+ int ret = 0, i;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ int min_refresh, max_refresh;
+ struct hdmi_avi_infoframe infoframe;
+ struct edid_block0 *block0;
+ struct fb_videomode *m;
+ unsigned ratio;
+ enum {
+ ASPECT_RATIO_4_3 = 1,
+ ASPECT_RATIO_16_9 = 2
+ } modes = 0;
+
+ hdmi_avi_infoframe_init(&infoframe);
+
+ infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+
+ /* Fill Video Code and Aspect ratio */
+ if (tx->selected_mode != NULL) {
+ /* Vertical refresh is +/- 0.5% in CEA spec */
+ min_refresh = tx->selected_mode->refresh * 995 / 1000;
+ max_refresh = tx->selected_mode->refresh * 1005 / 1000;
+
+ /* Find correct VIC in CEA
+ * Interlaced video are not supported
+ * Vertical refesh is +/-0.5%
+ * The name is NULL or equal to name in cea_modes (some format
+ * have same timings but not same VIC and aspect ratio)
+ */
+ for (i = 1; i <= ARRAY_SIZE(cea_modes); i++) {
+ m = (struct fb_videomode *) &cea_modes[i];
+ if (!(m->vmode & FB_VMODE_INTERLACED) &&
+ tx->selected_mode->xres == m->xres &&
+ tx->selected_mode->yres == m->yres &&
+ min_refresh <= m->refresh &&
+ max_refresh >= m->refresh &&
+ tx->selected_mode->hsync_len == m->hsync_len &&
+ (tx->selected_mode->name == NULL ||
+ tx->selected_mode->name == m->name)) {
+ infoframe.video_code = i;
+ break;
+ }
+ }
+
+ /* Get aspect ratio from CEA ratio table */
+ if (infoframe.video_code > 1 && infoframe.video_code < 64 &&
+ (fb_mode_is_equal(&cea_modes[infoframe.video_code],
+ &cea_modes[infoframe.video_code+1]) ||
+ fb_mode_is_equal(&cea_modes[infoframe.video_code],
+ &cea_modes[infoframe.video_code-1]))) {
+ /* Video format has two aspect ratio in CEA table
+ * Get correct VIC from E-EDID (supported CEA modes)
+ * If none or both aspect ratio are available, ratio of
+ * screen is calculated and correct VIC is selected.
+ */
+ if (cea_ratios[infoframe.video_code] ==
+ HDMI_PICTURE_ASPECT_16_9)
+ infoframe.video_code--;
+
+ /* Find in E-EDID which VIC are available */
+ for (i = 0; i < tx->monspecs.modedb_len; i++) {
+ if (tx->monspecs.modedb[i].name ==
+ cea_modes[infoframe.video_code].name)
+ modes |= ASPECT_RATIO_4_3;
+ else if (tx->monspecs.modedb[i].name ==
+ cea_modes[infoframe.video_code+1].name)
+ modes |= ASPECT_RATIO_16_9;
+ }
+
+ /* Calculate ratio from EDID */
+ if ((modes == 0 ||
+ modes == (ASPECT_RATIO_4_3 | ASPECT_RATIO_16_9)) &&
+ tx->edid.data != NULL) {
+ block0 = (struct edid_block0 *) tx->edid.data;
+ ratio = block0->maximum_horizontal_image_size *
+ 100 / block0->maximum_vertical_image_size;
+ if (ratio >= 177)
+ infoframe.video_code++;
+ }
+ else if (modes == ASPECT_RATIO_16_9)
+ infoframe.video_code++;
+
+ /* Select an active aspect ratio:
+ * The CEA modes with two aspect ratio (4:3 and 16:9)
+ * for the same resolution as 480p or 576p, imply that
+ * video must be streched to 4:3 when aspect ratio is
+ * 16:9 since the screen will rescale the image to fit
+ * 16:9. To bypass the streching process, an active
+ * aspect ratio can be specified for both formats in
+ * order to pass HDMI certification: if the active
+ * aspect ratio field is not "As the coded frame"
+ * (= 8), the aspect ratio of displayed picture is not
+ * verified and the 16:9 case doesn't make issue.
+ */
+ if (cea_ratios[infoframe.video_code] ==
+ HDMI_PICTURE_ASPECT_4_3)
+ infoframe.active_aspect =
+ HDMI_ACTIVE_ASPECT_16_9;
+ else
+ infoframe.active_aspect =
+ HDMI_ACTIVE_ASPECT_16_9_CENTER;
+ }
+ infoframe.picture_aspect = cea_ratios[infoframe.video_code];
+
+ /* Set pixel repetition */
+ if (tx->selected_mode->vmode & FB_VMODE_DOUBLE)
+ infoframe.pixel_repeat = 1;
+
+ /* Log VIC format used */
+ if (infoframe.video_code > 0)
+ INFO("Use VIC %d\n", infoframe.video_code);
+ }
+
+ switch (tx->sink.scanning) {
+ case SCANNING_UNDERSCANNED:
+ infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+ break;
+ case SCANNING_OVERSCANNED:
+ infoframe.scan_mode = HDMI_SCAN_MODE_OVERSCAN;
+ break;
+ default:
+ infoframe.scan_mode = HDMI_SCAN_MODE_NONE;
+ }
+
+ hdmi_avi_infoframe_pack(&infoframe,buffer,sizeof(buffer));
+
+ ret = i2c_smbus_write_i2c_block_data(tx->client,
+ SIIHDMI_TPI_REG_AVI_INFO_FRAME_BASE,
+ sizeof(buffer) - SIIHDMI_AVI_INFO_FRAME_OFFSET,
+ (buffer) + SIIHDMI_AVI_INFO_FRAME_OFFSET);
+
+ if (ret < 0)
+ DEBUG("unable to write avi info frame\n");
+
+ return ret;
+}
+
+static int siihdmi_set_audio_info_frame(struct siihdmi_tx *tx)
+{
+ int ret;
+ struct siihdmi_audio_info_frame packet = {
+ .header = {
+ .info_frame = SIIHDMI_INFO_FRAME_AUDIO,
+ .repeat = false,
+ .enable = tx->audio.available,
+ },
+
+ .info_frame = {
+ .header = {
+ .type = INFO_FRAME_TYPE_AUDIO,
+ .version = CEA861_AUDIO_INFO_FRAME_VERSION,
+ .length = sizeof(packet.info_frame) - sizeof(packet.info_frame.header),
+ },
+
+ .channel_count = CHANNEL_COUNT_REFER_STREAM_HEADER,
+ .channel_allocation = CHANNEL_ALLOCATION_STEREO,
+
+ .format_code_extension = CODING_TYPE_REFER_STREAM_HEADER,
+
+ /* required to Refer to Stream Header by CEA861-D */
+ .coding_type = CODING_TYPE_REFER_STREAM_HEADER,
+
+ .sample_size = SAMPLE_SIZE_REFER_STREAM_HEADER,
+ .sample_frequency = FREQUENCY_REFER_STREAM_HEADER,
+ },
+ };
+
+ BUG_ON(tx->sink.type != SINK_TYPE_HDMI);
+
+ cea861_checksum_hdmi_info_frame((u8 *) &packet.info_frame);
+
+ BUILD_BUG_ON(sizeof(packet) != SIIHDMI_TPI_REG_AUDIO_INFO_FRAME_LENGTH);
+ ret = i2c_smbus_write_i2c_block_data(tx->client,
+ SIIHDMI_TPI_REG_MISC_INFO_FRAME_BASE,
+ sizeof(packet),
+ (u8 *) &packet);
+ if (ret < 0)
+ DEBUG("unable to write audio info frame\n");
+
+ return ret;
+}
+
+static int siihdmi_set_spd_info_frame(struct siihdmi_tx *tx)
+{
+ int ret;
+ struct siihdmi_spd_info_frame packet = {
+ .header = {
+ .info_frame = SIIHDMI_INFO_FRAME_SPD_ACP,
+ .repeat = false,
+ .enable = true,
+ },
+
+ .info_frame = {
+ .header = {
+ .type = INFO_FRAME_TYPE_SOURCE_PRODUCT_DESCRIPTION,
+ .version = CEA861_SPD_INFO_FRAME_VERSION,
+ .length = sizeof(packet.info_frame) - sizeof(packet.info_frame.header),
+ },
+
+ .source_device_info = SPD_SOURCE_PC_GENERAL,
+ },
+ };
+
+ BUG_ON(tx->sink.type != SINK_TYPE_HDMI);
+
+ strncpy(packet.info_frame.vendor, tx->platform->vendor,
+ sizeof(packet.info_frame.vendor));
+
+ strncpy(packet.info_frame.description, tx->platform->description,
+ sizeof(packet.info_frame.description));
+
+ cea861_checksum_hdmi_info_frame((u8 *) &packet.info_frame);
+
+ BUILD_BUG_ON(sizeof(packet) != SIIHDMI_TPI_REG_MISC_INFO_FRAME_LENGTH);
+ ret = i2c_smbus_write_i2c_block_data(tx->client,
+ SIIHDMI_TPI_REG_MISC_INFO_FRAME_BASE,
+ sizeof(packet),
+ (u8 *) &packet);
+ if (ret < 0)
+ DEBUG("unable to write SPD info frame\n");
+
+ return ret;
+}
+
+static inline void siihdmi_audio_mute(struct siihdmi_tx *tx)
+{
+ u8 data;
+
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL);
+
+ i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL,
+ data | SIIHDMI_AUDIO_MUTE);
+}
+
+static inline void siihdmi_audio_unmute(struct siihdmi_tx *tx)
+{
+ u8 data;
+
+ data = i2c_smbus_read_byte_data(tx->client,
+ SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL);
+
+ i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL,
+ data & ~SIIHDMI_AUDIO_MUTE);
+}
+
+static inline void siihdmi_configure_audio(struct siihdmi_tx *tx)
+{
+ siihdmi_audio_mute(tx);
+
+ i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL, SIIHDMI_AUDIO_SPDIF_ENABLE | SIIHDMI_AUDIO_MUTE);
+ i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_I2S_AUDIO_SAMPLING_HBR, 0);
+ i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_I2S_ORIGINAL_FREQ_SAMPLE_LENGTH, SIIHDMI_AUDIO_HANDLING_DOWN_SAMPLE);
+
+ siihdmi_audio_unmute(tx);
+ siihdmi_set_audio_info_frame(tx);
+}
+
+static void siihdmi_print_modeline(const struct siihdmi_tx *tx,
+ const struct fb_videomode *mode,
+ const char * const message)
+{
+ const bool interlaced = (mode->vmode & FB_VMODE_INTERLACED);
+ const bool double_scan = (mode->vmode & FB_VMODE_DOUBLE);
+
+ u32 pixclk = mode->pixclock ? PICOS2KHZ(mode->pixclock) : 0;
+ char flag = ' ';
+
+ pixclk >>= (double_scan ? 1 : 0);
+
+#ifdef SIIHDMI_USE_FB
+ if (fb_mode_is_equal(&tx->sink.preferred_mode, mode))
+ flag = '*';
+#endif
+
+#ifdef SIIHDMI_USE_FB
+ if (mode->flag & FB_MODE_IS_CEA)
+ flag = 'C';
+#endif
+ INFO(" %c \"%dx%d@%d%s\" %lu.%.2lu %u %u %u %u %u %u %u %u %chsync %cvsync",
+ /* CEA or preferred status of modeline */
+ flag,
+
+ /* mode name */
+ mode->xres, mode->yres,
+ mode->refresh << (interlaced ? 1 : 0),
+ interlaced ? "i" : (double_scan ? "d" : ""),
+
+ /* dot clock frequency (MHz) */
+ pixclk / 1000ul,
+ pixclk % 1000ul,
+
+ /* horizontal timings */
+ mode->xres,
+ mode->xres + mode->right_margin,
+ mode->xres + mode->right_margin + mode->hsync_len,
+ mode->xres + mode->right_margin + mode->hsync_len + mode->left_margin,
+
+ /* vertical timings */
+ mode->yres,
+ mode->yres + mode->lower_margin,
+ mode->yres + mode->lower_margin + mode->vsync_len,
+ mode->yres + mode->lower_margin + mode->vsync_len + mode->upper_margin,
+
+ /* sync direction */
+ (mode->sync & FB_SYNC_HOR_HIGH_ACT) ? '+' : '-',
+ (mode->sync & FB_SYNC_VERT_HIGH_ACT) ? '+' : '-');
+
+ if (message)
+ CONTINUE(" (%s)", message);
+
+ CONTINUE("\n");
+}
+
+static int siihdmi_set_resolution(struct siihdmi_tx *tx,
+ const struct fb_videomode *mode)
+{
+ u8 ctrl;
+ int ret;
+
+#ifdef SIIHDMI_USE_FB
+ /* don't care if config differs from FB */
+ if (0 == memcmp((void *) &tx->sink.current_mode, (void *) mode, sizeof(struct fb_videomode)))
+ {
+ return 0;
+ }
+#endif
+
+ INFO("selected configuration: \n");
+ siihdmi_print_modeline(tx, mode, NULL);
+
+ ctrl = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_SYS_CTRL);
+
+ /* setup the sink type */
+ if (tx->sink.type == SINK_TYPE_DVI)
+ ctrl &= ~SIIHDMI_SYS_CTRL_OUTPUT_MODE_SELECT_HDMI;
+ else
+ ctrl |= SIIHDMI_SYS_CTRL_OUTPUT_MODE_SELECT_HDMI;
+
+ /* step 1: (potentially) disable HDCP */
+
+ /* step 2: (optionally) blank the display */
+ /*
+ * Note that if we set the AV Mute, switching to DVI could result in a
+ * permanently muted display until a hardware reset. Thus only do this
+ * if the sink is a HDMI connection
+ */
+ if (tx->sink.type == SINK_TYPE_HDMI)
+ ctrl |= SIIHDMI_SYS_CTRL_AV_MUTE_HDMI;
+ /* optimisation: merge the write into the next one */
+
+ /* step 3: prepare for resolution change */
+ ctrl |= SIIHDMI_SYS_CTRL_TMDS_OUTPUT_POWER_DOWN;
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ ctrl);
+ if (ret < 0)
+ DEBUG("unable to prepare for resolution change\n");
+
+ msleep(SIIHDMI_CTRL_INFO_FRAME_DRAIN_TIME);
+
+ /* step 4: change video resolution */
+
+ /* step 5: set the vmode registers */
+ siihdmi_set_vmode_registers(tx, mode);
+ siihdmi_check_embsync_extract(tx);
+
+ /*
+ * step 6:
+ * [DVI] clear AVI InfoFrame
+ * [HDMI] set AVI InfoFrame
+ */
+ if (tx->sink.type == SINK_TYPE_HDMI)
+ siihdmi_set_avi_info_frame(tx);
+ else
+ siihdmi_clear_avi_info_frame(tx);
+ siihdmi_check_embsync_extract(tx);
+
+ /* step 7: [HDMI] set new audio information */
+ if (tx->sink.type == SINK_TYPE_HDMI) {
+ if (tx->audio.available)
+ siihdmi_configure_audio(tx);
+ siihdmi_set_spd_info_frame(tx);
+ }
+
+ /* step 8: enable display */
+ ctrl &= ~SIIHDMI_SYS_CTRL_TMDS_OUTPUT_POWER_DOWN;
+ /* optimisation: merge the write into the next one */
+
+ /* step 9: (optionally) un-blank the display */
+ ctrl &= ~SIIHDMI_SYS_CTRL_AV_MUTE_HDMI;
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ ctrl);
+ if (ret < 0)
+ DEBUG("unable to enable the display\n");
+
+ /* step 10: (potentially) enable HDCP */
+
+#ifdef SIIHDMI_USE_FB
+ memcpy((void *) &tx->sink.current_mode, mode, sizeof(struct fb_videomode));
+#endif
+
+ return ret;
+}
+
+#ifdef SIIHDMI_USE_FB
+static void siihdmi_dump_modelines(struct siihdmi_tx *tx)
+{
+ const struct fb_modelist *entry;
+ const struct list_head * const modelines = &tx->info->modelist;
+
+ INFO("supported modelines:\n");
+ list_for_each_entry(entry, modelines, list)
+ siihdmi_print_modeline(tx, &entry->mode, NULL);
+}
+#endif
+
+static inline void siihdmi_process_extensions(struct siihdmi_tx *tx)
+{
+ const struct edid_block0 * const block0 =
+ (struct edid_block0 *) tx->edid.data;
+ const struct edid_extension * const extensions =
+ (struct edid_extension *) (tx->edid.data + sizeof(*block0));
+ u8 i;
+
+ for (i = 0; i < block0->extensions; i++) {
+ const struct edid_extension * const extension = &extensions[i];
+
+ if (!edid_verify_checksum((u8 *) extension))
+ WARNING("EDID block %u CRC mismatch\n", i);
+
+ switch (extension->tag) {
+ case EDID_EXTENSION_CEA:
+ siihdmi_parse_cea861_timing_block(tx, extension);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+#ifdef SIIHDMI_USE_FB
+
+static const struct fb_videomode *
+_find_similar_mode(const struct fb_videomode * const mode, struct list_head *head)
+{
+ const struct fb_modelist *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list) {
+ if (fb_res_is_equal(mode, &entry->mode) && (mode != &entry->mode))
+ return &entry->mode;
+ }
+
+ return NULL;
+}
+
+static void siihdmi_sanitize_modelist(struct siihdmi_tx * const tx)
+{
+ struct list_head *modelist = &tx->info->modelist;
+ const struct fb_modelist *entry, *next;
+ const struct fb_videomode *mode;
+ int num_removed = 0;
+
+ if ((mode = fb_find_best_display(&tx->info->monspecs, modelist)))
+ tx->sink.preferred_mode = *mode;
+
+ list_for_each_entry_safe(entry, next, modelist, list) {
+ const char *reason = NULL;
+ mode = &entry->mode;
+
+ if (mode->vmode & FB_VMODE_INTERLACED) {
+ reason = "interlaced";
+ } else if (mode->vmode & FB_VMODE_DOUBLE) {
+ reason = "doublescan";
+ } else if (mode->pixclock < tx->platform->pixclock) {
+ reason = "pixel clock exceeded";
+ } else if ((tx->sink.type == SINK_TYPE_HDMI) && mode->lower_margin < 2) {
+ /*
+ * HDMI spec (§ 5.1.2) stipulates â‰2 lines of vsync
+ *
+ * We do not care so much on DVI, although it may be that the SII9022 cannot
+ * actually display this mode. Requires testing!!
+ */
+ reason = "insufficient margin";
+ } else {
+ const struct fb_videomode *match = _find_similar_mode(mode, modelist);
+
+ if (match) {
+ /*
+ * Prefer detailed timings found in EDID. Certain sinks support slight
+ * variations of VESA/CEA timings, and using those allows us to support
+ * a wider variety of monitors.
+ */
+ if ((~(mode->flag) & FB_MODE_IS_DETAILED) &&
+ (match->flag & FB_MODE_IS_DETAILED)) {
+ reason = "detailed match present";
+ } else if ((~(mode->flag) & FB_MODE_IS_CEA) &&
+ (match->flag & FB_MODE_IS_CEA)) {
+ if ((tx->sink.type == SINK_TYPE_HDMI) && !useitmodes) {
+ /*
+ * for HDMI connections we want to remove any detailed timings
+ * and leave in CEA mode timings. This is on the basis that you
+ * would expect HDMI monitors to do better with CEA (TV) modes
+ * than you would PC modes. No data is truly lost: these modes
+ * are duplicated in terms of size and refresh but may have
+ * subtle differences insofaras more compatible timings.
+ *
+ * That is, unless we want to prefer IT modes, since most TVs
+ * will overscan CEA modes (720p, 1080p) by default, but display
+ * IT (PC) modes to the edge of the screen.
+ */
+ reason = "CEA match present";
+ } else {
+ /*
+ * DVI connections are the opposite to the above; remove CEA
+ * modes which duplicate normal modes, on the basis that a
+ * DVI sink will better display a standard EDID mode but may
+ * not be fully compatible with CEA timings. This is the
+ * behavior on HDMI sinks if we want to prefer IT modes.
+ *
+ * All we do is copy the matched mode into the mode value
+ * such that we remove the correct mode below.
+ */
+ mode = match;
+ reason = "IT match present";
+ }
+ }
+ }
+ }
+
+ if (reason) {
+ struct fb_modelist *modelist =
+ container_of(mode, struct fb_modelist, mode);
+
+ if (num_removed == 0) { // first time only
+ INFO("Unsupported modelines:\n");
+ }
+
+ siihdmi_print_modeline(tx, mode, reason);
+
+ list_del(&modelist->list);
+ kfree(&modelist->list);
+ num_removed++;
+ }
+ }
+
+ if (num_removed > 0) {
+ INFO("discarded %u incompatible modes\n", num_removed);
+ }
+}
+
+static inline const struct fb_videomode *_match(const struct fb_videomode * const mode,
+ struct list_head *modelist)
+{
+ const struct fb_videomode *match;
+
+ if ((match = fb_find_best_mode_at_most(mode, modelist)))
+ return match;
+
+ return fb_find_nearest_mode(mode, modelist);
+}
+#endif
+
+/* This function iterates through the videomode and removes those we don't
+ * support */
+static void siihdmi_filter_monspecs(struct fb_monspecs *monspecs) {
+ int i, j;
+
+ for (i = 0, j = 0; i < monspecs->modedb_len; i++) {
+ struct fb_videomode *m = &monspecs->modedb[i];
+
+ if (m->vmode & FB_VMODE_INTERLACED) {
+ // SIIHDMI doesn't handle interlaced video
+ continue;
+ }
+
+ // We can use this mode
+ memmove(&monspecs->modedb[j++], m, sizeof(*m));
+ }
+
+ monspecs->modedb_len = j;
+}
+
+static int siihdmi_setup_display(struct siihdmi_tx *tx)
+{
+#ifdef SIIHDMI_USE_FB
+ struct fb_var_screeninfo var = {0};
+#endif
+ struct edid_block0 *block0;
+ unsigned width;
+ unsigned height;
+ int ret;
+ u8 isr;
+ u8 *new;
+ int i;
+
+ BUILD_BUG_ON(sizeof(struct edid_block0) != EDID_BLOCK_SIZE);
+ BUILD_BUG_ON(sizeof(struct edid_extension) != EDID_BLOCK_SIZE);
+
+ /* defaults */
+ tx->sink.scanning = SCANNING_EXACT;
+ tx->sink.type = SINK_TYPE_DVI;
+ tx->audio.available = false;
+
+ isr = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_ISR);
+ DEBUG("hotplug: display %s, powered %s\n",
+ (isr & SIIHDMI_ISR_DISPLAY_ATTACHED) ? "attached" : "detached",
+ (isr & SIIHDMI_ISR_RECEIVER_SENSE) ? "on" : "off");
+
+ if (~isr & (SIIHDMI_ISR_DISPLAY_ATTACHED))
+ return siihdmi_power_down(tx);
+
+ /* Free previous video mode database and selected mode */
+ if (tx->monspecs.modedb != NULL)
+ {
+ fb_destroy_modedb(tx->monspecs.modedb);
+ tx->selected_mode = NULL;
+ tx->monspecs.modedb = NULL;
+ tx->monspecs.modedb_len = 0;
+ }
+
+ /* Allocate EDID buffer for one block */
+ if (tx->edid.data != NULL)
+ kfree(tx->edid.data);
+ tx->edid.data = kzalloc(EDID_BLOCK_SIZE, GFP_KERNEL);
+ if (!tx->edid.data)
+ return -ENOMEM;
+
+ /* use EDID to detect sink characteristics */
+ ret = siihdmi_read_edid(tx, tx->edid.data, 0, EDID_BLOCK_SIZE);
+ block0 = (struct edid_block0 *) tx->edid.data;
+
+ if (ret < 0 || !edid_verify_checksum((u8 *) block0)) {
+ WARNING("couldn't read EDID, selecting default vmode");
+ /* Set display to 640x480p @ 60Hz */
+ tx->selected_mode = avifb_select_mode(
+ tx->platform->lcd_id,
+ &default_monspecs);
+ return siihdmi_set_resolution(tx, tx->selected_mode);
+ }
+
+ /* Reallocate space for block 0 as well as the extensions */
+ tx->edid.length = (block0->extensions + 1) * EDID_BLOCK_SIZE;
+ new = krealloc(tx->edid.data, tx->edid.length, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ tx->edid.data = new;
+ block0 = (struct edid_block0 *) new;
+
+ /* create monspecs from EDID for the basic stuff */
+ fb_edid_to_monspecs(tx->edid.data, &tx->monspecs);
+
+ /* Read all E-EDID */
+ for (i = 1; i <= block0->extensions; i++)
+ {
+ /* Read an E-EDID block */
+ new = tx->edid.data + (i * EDID_BLOCK_SIZE);
+ ret = siihdmi_read_edid(tx, new, i, EDID_BLOCK_SIZE);
+ if (ret >= 0)
+ {
+ /* Add monspecs from E-EDID */
+ fb_edid_add_monspecs(new, &tx->monspecs);
+ }
+ }
+
+ if (block0->extensions)
+ siihdmi_process_extensions(tx);
+
+ /* Filter unsupported monspecs */
+ siihdmi_filter_monspecs(&tx->monspecs);
+
+ tx->selected_mode = avifb_select_mode(tx->platform->lcd_id,
+ tx->monspecs.modedb_len > 0 ?
+ &tx->monspecs :
+ &default_monspecs);
+
+ if (IS_ERR_OR_NULL(tx->selected_mode))
+ return tx->selected_mode ? PTR_ERR(tx->selected_mode) : -ENODEV;
+
+ /* Width and height in the EDID block are in cm */
+ width = block0->maximum_horizontal_image_size * 10;
+ height = block0->maximum_vertical_image_size * 10;
+
+ if (avifb_set_screen_size(tx->platform->lcd_id,
+ width,
+ height) < 0)
+ return -ENODEV;
+
+ /* Set monitor specs in avifb */
+ avifb_set_monspecs(tx->platform->lcd_id, &tx->monspecs);
+
+ ret = siihdmi_set_resolution(tx, tx->selected_mode);
+ if (ret < 0)
+ return ret;
+
+#ifdef SIIHDMI_USE_FB
+ /* activate the framebuffer */
+ fb_videomode_to_var(&var, mode);
+ var.activate = FB_ACTIVATE_ALL;
+
+ console_lock();
+ tx->info->flags |= FBINFO_MISC_USEREVENT;
+ fb_set_var(tx->info, &var);
+ tx->info->flags &= ~FBINFO_MISC_USEREVENT;
+ console_unlock();
+#endif
+
+ return 0;
+}
+
+#ifdef SIIHDMI_USE_FB
+
+static int siihdmi_blank(struct siihdmi_tx *tx)
+{
+ u8 data;
+
+ data = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_RQB);
+
+ return i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_RQB,
+ data | SIIHDMI_RQB_FORCE_VIDEO_BLANK);
+}
+
+static int siihdmi_unblank(struct siihdmi_tx *tx)
+{
+ u8 data;
+
+ data = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_RQB);
+
+ return i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_RQB,
+ data & ~SIIHDMI_RQB_FORCE_VIDEO_BLANK);
+}
+
+static int siihdmi_fb_event_handler(struct notifier_block *nb,
+ unsigned long val,
+ void *v)
+{
+ const struct fb_event * const event = v;
+ struct siihdmi_tx * const tx = container_of(nb, struct siihdmi_tx, nb);
+
+ if (strcmp(event->info->fix.id, tx->platform->framebuffer))
+ return 0;
+
+ switch (val) {
+ case FB_EVENT_FB_REGISTERED:
+ case FB_EVENT_FB_UNREGISTERED:
+ return siihdmi_setup_display(tx);
+
+ case FB_EVENT_MODE_CHANGE:
+ if (event->info->mode)
+ return siihdmi_set_resolution(tx, event->info->mode);
+ case FB_EVENT_MODE_CHANGE_ALL:
+ /* is handled above, removes a "unhandled event" warning in dmesg */
+ break;
+ case FB_EVENT_BLANK:
+ switch (*((int *) event->data)) {
+ case FB_BLANK_POWERDOWN:
+ /* do NOT siihdmi_power_down() here */
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ return siihdmi_blank(tx);
+ case FB_BLANK_UNBLANK:
+ return siihdmi_unblank(tx);
+ }
+ break;
+ default:
+ DEBUG("unhandled fb event 0x%lx", val);
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+static irqreturn_t siihdmi_hotplug_event(int irq, void *dev_id)
+{
+ struct siihdmi_tx *tx = dev_id;
+ u8 isr;
+
+ isr = i2c_smbus_read_byte_data(tx->client, SIIHDMI_TPI_REG_ISR);
+ if (~isr & SIIHDMI_ISR_HOT_PLUG_EVENT)
+ goto complete;
+
+ DEBUG("hotplug: display %s, powered %s\n",
+ (isr & SIIHDMI_ISR_DISPLAY_ATTACHED) ? "attached" : "detached",
+ (isr & SIIHDMI_ISR_RECEIVER_SENSE) ? "on" : "off");
+
+ if (isr & SIIHDMI_ISR_DISPLAY_ATTACHED) {
+ siihdmi_power_up(tx);
+ siihdmi_setup_display(tx);
+ } else {
+ siihdmi_power_down(tx);
+ }
+
+ if (isr & SIIHDMI_ISR_HOT_PLUG_EVENT) {
+ switch_set_state(&tx->sdev,!!(isr & SIIHDMI_ISR_DISPLAY_ATTACHED));
+ if (tx->platform->on_hotplug)
+ tx->platform->on_hotplug(!!(isr & SIIHDMI_ISR_DISPLAY_ATTACHED));
+ }
+
+complete:
+ /* clear the interrupt */
+ i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_ISR, isr);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_SYSFS)
+static ssize_t siihdmi_sysfs_read_edid(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t offset, size_t count)
+{
+ const struct siihdmi_tx * const tx =
+ container_of(bin_attr, struct siihdmi_tx, edid.attributes);
+
+ return memory_read_from_buffer(buf, count, &offset,
+ tx->edid.data, tx->edid.length);
+}
+
+static ssize_t siihdmi_sysfs_read_audio(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ static const char * const sources[] = { "none\n", "hdmi\n" };
+ const struct siihdmi_tx * const tx =
+ container_of(bin_attr, struct siihdmi_tx, audio.attributes);
+
+ return memory_read_from_buffer(buf, count, &off,
+ sources[tx->audio.available],
+ strlen(sources[tx->audio.available]));
+}
+#endif
+
+static int __devinit siihdmi_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct siihdmi_tx *tx;
+ int ret;
+
+ tx = kzalloc(sizeof(struct siihdmi_tx), GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+
+ tx->client = client;
+ tx->platform = client->dev.platform_data;
+
+ i2c_set_clientdata(client, tx);
+
+ /* Check if device is present (chip must be reset before checking I2C) */
+ if (tx->platform->reset)
+ tx->platform->reset();
+ ret = i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_RQB, 0x00);
+ if (ret < 0) {
+ return -ENODEV;
+ }
+
+ tx->sdev.name = "hdmi";
+ ret = switch_dev_register(&tx->sdev);
+ if (ret)
+ goto switch_dev_register_failed;
+
+ /* i2c_client irq is set in parrot_init_i2c_slave_shared_gpio */
+ tx->platform->hotplug.start = client->irq;
+ ret = request_threaded_irq(tx->platform->hotplug.start, NULL, siihdmi_hotplug_event,
+ tx->platform->hotplug.flags,
+ tx->platform->hotplug.name, tx);
+ if (ret < 0)
+ WARNING("failed to setup hotplug interrupt: %d\n", ret);
+ else
+ tx->hotplug.enabled = true;
+
+ /* reinitialise the device to configure irq */
+ /* step 1: reset and initialise */
+ if (tx->platform->reset)
+ tx->platform->reset();
+ /* Do not use spinlock here, the siihdmi_initialise
+ * will call scheduler (need to wait the chip) */
+ ret = siihdmi_initialise(tx);
+ if (ret < 0)
+ goto irq_initialize_failed;
+ /* Do nothing on the chip configuration because an
+ * interrupt can be raised at anytime.
+ * This will totally break the chip */
+
+#if defined(CONFIG_SYSFS)
+ /* /sys/<>/edid */
+ tx->edid.attributes.attr.name = "edid";
+ tx->edid.attributes.attr.mode = 0444;
+
+ /* maximum size of EDID, not necessarily the size of our data */
+ tx->edid.attributes.size = SZ_32K;
+ tx->edid.attributes.read = siihdmi_sysfs_read_edid;
+
+ sysfs_attr_init(&tx->edid.attributes.attr);
+ if (sysfs_create_bin_file(&tx->client->dev.kobj, &tx->edid.attributes) < 0)
+ WARNING("unable to construct attribute for sysfs exported EDID\n");
+
+ /* /sys/<>/audio */
+ tx->audio.attributes.attr.name = "audio";
+ tx->audio.attributes.attr.mode = 0444;
+
+ /* we only want to return the value "hdmi" or "none" */
+ tx->audio.attributes.size = 5;
+ tx->audio.attributes.read = siihdmi_sysfs_read_audio;
+
+ sysfs_attr_init(&tx->audio.attributes.attr);
+ if (sysfs_create_bin_file(&tx->client->dev.kobj, &tx->audio.attributes) < 0)
+ WARNING("unable to construct attribute for sysfs exported audio\n");
+#endif
+
+ return 0;
+
+irq_initialize_failed:
+ if (tx->platform->hotplug.start)
+ free_irq(tx->platform->hotplug.start, tx);
+ switch_dev_unregister(&tx->sdev);
+switch_dev_register_failed:
+ i2c_set_clientdata(client, NULL);
+ kfree(tx);
+ return ret;
+}
+
+static int __devexit siihdmi_remove(struct i2c_client *client)
+{
+ struct siihdmi_tx *tx;
+
+ tx = i2c_get_clientdata(client);
+
+ if (!tx)
+ return -ENODEV;
+
+ siihdmi_power_down(tx);
+
+ if (tx->platform->hotplug.start)
+ free_irq(tx->platform->hotplug.start, tx);
+
+#if defined(CONFIG_SYSFS)
+ sysfs_remove_bin_file(&tx->client->dev.kobj, &tx->edid.attributes);
+ sysfs_remove_bin_file(&tx->client->dev.kobj, &tx->audio.attributes);
+#endif
+
+ if (tx->edid.data)
+ kfree(tx->edid.data);
+
+ switch_dev_unregister(&tx->sdev);
+ i2c_set_clientdata(client, NULL);
+ kfree(tx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int siihdmi_suspend(struct i2c_client *client, pm_message_t state)
+{
+ DEBUG("suspend\n");
+ return 0;
+}
+
+static int siihdmi_resume(struct i2c_client *client)
+{
+ int ret;
+ struct siihdmi_tx *tx;
+ DEBUG("resume\n");
+ tx = i2c_get_clientdata(client);
+ if (tx == NULL) {
+ ERROR("Unconfigured siihdmi\n");
+ return 1;
+ }
+
+#if 0
+ if ((ret = siihdmi_initialise(tx)) < 0)
+ return ret;
+ ret = siihdmi_setup_display(tx);
+#else
+ /* Try to initialize faster */
+ ret = i2c_smbus_write_byte_data(tx->client, SIIHDMI_TPI_REG_RQB, 0x00);
+ if (ret < 0) {
+ WARNING("unable to initialise device to TPI mode\n");
+ return ret;
+ }
+
+ /* power up transmitter */
+ if ((ret = siihdmi_power_up(tx)) < 0)
+ return ret;
+
+ if (tx->hotplug.enabled) {
+ /* Re-enable interrupt service */
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_IER,
+ SIIHDMI_IER_HOT_PLUG_EVENT);
+ if (ret < 0)
+ WARNING("unable to setup interrupt request\n");
+ }
+
+ ret = i2c_smbus_write_byte_data(tx->client,
+ SIIHDMI_TPI_REG_SYS_CTRL,
+ SIIHDMI_SYS_CTRL_OUTPUT_MODE_SELECT_HDMI);
+#endif
+ return 0;
+}
+#else
+#define siihdmi_suspend NULL
+#define siihdmi_resume NULL
+#endif
+
+static const struct i2c_device_id siihdmi_device_table[] = {
+ { "siihdmi", 0 },
+ { },
+};
+
+static struct i2c_driver siihdmi_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "siihdmi",
+ },
+ .suspend = siihdmi_suspend,
+ .resume = siihdmi_resume,
+ .probe = siihdmi_probe,
+ .remove = __devexit_p(siihdmi_remove),
+ .id_table = siihdmi_device_table,
+};
+
+#ifdef MODULE
+module_i2c_driver(siihdmi_driver);
+#else
+static int __init siihdmi_init(void)
+{
+ return i2c_add_driver(&siihdmi_driver);
+}
+
+late_initcall(siihdmi_init);
+#endif
+
+/* Module Information */
+MODULE_AUTHOR("Saleem Abdulrasool <compnerd@compnerd.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Silicon Image SiI9xxx TMDS Driver");
+MODULE_DEVICE_TABLE(i2c, siihdmi_device_table);
diff --git a/drivers/video/mxc/siihdmi.h b/drivers/video/mxc/siihdmi.h
new file mode 100644
index 00000000000000..e6aca6102605ac
--- /dev/null
+++ b/drivers/video/mxc/siihdmi.h
@@ -0,0 +1,422 @@
+/* vim: set noet ts=8 sts=8 sw=8 : */
+/*
+ * Copyright Âİ 2010 Saleem Abdulrasool <compnerd@compnerd.org>.
+ * Copyright Âİ 2010 Genesi USA, Inc. <matt@genesi-usa.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LINUX_DRIVERS_VIDEO_SIIHDMI_H
+#define LINUX_DRIVERS_VIDEO_SIIHDMI_H
+
+#include <linux/cea861.h>
+#include <linux/ioport.h>
+#include <linux/switch.h>
+/* TPI registers */
+#define SIIHDMI_TPI_REG_VIDEO_MODE_DATA_BASE (0x00)
+#define SIIHDMI_TPI_REG_PIXEL_CLOCK_LSB (0x00)
+#define SIIHDMI_TPI_REG_PIXEL_CLOCK_MSB (0x01)
+#define SIIHDMI_TPI_REG_VFREQ_LSB (0x02)
+#define SIIHDMI_TPI_REG_VFREQ_MSB (0x03)
+#define SIIHDMI_TPI_REG_PIXELS_LSB (0x04)
+#define SIIHDMI_TPI_REG_PIXELS_MSB (0x05)
+#define SIIHDMI_TPI_REG_LINES_LSB (0x06)
+#define SIIHDMI_TPI_REG_LINES_MSB (0x07)
+#define SIIHDMI_TPI_REG_INPUT_BUS_PIXEL_REPETITION (0x08)
+#define SIIHDMI_TPI_REG_AVI_INPUT_FORMAT (0x09)
+#define SIIHDMI_TPI_REG_AVI_OUTPUT_FORMAT (0x0a)
+#define SIIHDMI_TPI_REG_YC_INPUT_MODE (0x0b)
+#define SIIHDMI_TPI_REG_AVI_DBYTE0 (0x0c)
+#define SIIHDMI_TPI_REG_AVI_DBYTE1 (0x0d)
+#define SIIHDMI_TPI_REG_AVI_DBYTE2 (0x0e)
+#define SIIHDMI_TPI_REG_AVI_DBYTE3 (0x0f)
+#define SIIHDMI_TPI_REG_AVI_DBYTE4 (0x10)
+#define SIIHDMI_TPI_REG_AVI_DBYTE5 (0x11)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_TOP_BAR_LSB (0x12)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_TOP_BAR_MSB (0x13)
+#define SIIHDMI_TPI_REG_AVI_INFO_START_BOTTOM_BAR_LSB (0x14)
+#define SIIHDMI_TPI_REG_AVI_INFO_START_BOTTOM_BAR_MSB (0x15)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_LEFT_BAR_LSB (0x16)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_LEFT_BAR_MSB (0x17)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_RIGHT_BAR_LSB (0x18)
+#define SIIHDMI_TPI_REG_AVI_INFO_END_RIGHT_BAR_MSB (0x19)
+#define SIIHDMI_TPI_REG_SYS_CTRL (0x1a)
+#define SIIHDMI_TPI_REG_DEVICE_ID (0x1b)
+#define SIIHDMI_TPI_REG_DEVICE_REVISION (0x1c)
+#define SIIHDMI_TPI_REG_TPI_REVISION (0x1d)
+#define SIIHDMI_TPI_REG_PWR_STATE (0x1e)
+#define SIIHDMI_TPI_REG_I2S_ENABLE_MAPPING (0x1f)
+#define SIIHDMI_TPI_REG_I2S_INPUT_CONFIGURATION (0x20)
+#define SIIHDMI_TPI_REG_I2S_STREAM_HEADER_SETTINGS_BASE (0x21)
+#define SIIHDMI_TPI_REG_I2S_CHANNEL_STATUS (0x21)
+#define SIIHDMI_TPI_REG_I2S_CATEGORY_CODE (0x22)
+#define SIIHDMI_TPI_REG_I2S_SOURCE_CHANNEL (0x23)
+#define SIIHDMI_TPI_REG_I2S_ACCURACY_SAMPLING_FREQUENCY (0x24)
+#define SIIHDMI_TPI_REG_I2S_ORIGINAL_FREQ_SAMPLE_LENGTH (0x25)
+#define SIIHDMI_TPI_REG_I2S_AUDIO_CONFIGURATION_BASE (0x26)
+#define SIIHDMI_TPI_REG_I2S_AUDIO_PACKET_LAYOUT_CTRL (0x26)
+#define SIIHDMI_TPI_REG_I2S_AUDIO_SAMPLING_HBR (0x27)
+#define SIIHDMI_TPI_REG_I2S_AUDIO_RESERVED (0x28)
+#define SIIHDMI_TPI_REG_HDCP_QUERY_DATA (0x29)
+#define SIIHDMI_TPI_REG_HDCP_CONTROL_DATA (0x2a)
+#define SIIHDMI_TPI_REG_HDCP_CONTROL_DATA (0x2a)
+#define SIIHDMI_TPI_REG_HDCP_BKSV_1 (0x2b)
+#define SIIHDMI_TPI_REG_HDCP_BKSV_2 (0x2c)
+#define SIIHDMI_TPI_REG_HDCP_BKSV_3 (0x2d)
+#define SIIHDMI_TPI_REG_HDCP_BKSV_4 (0x2e)
+#define SIIHDMI_TPI_REG_HDCP_BKSV_5 (0x2f)
+#define SIIHDMI_TPI_REG_HDCP_REVISION (0x30)
+#define SIIHDMI_TPI_REG_HDCP_KSV_V_STAR_VALUE (0x31)
+#define SIIHDMI_TPI_REG_HDCP_V_STAR_VALUE_1 (0x32)
+#define SIIHDMI_TPI_REG_HDCP_V_STAR_VALUE_2 (0x33)
+#define SIIHDMI_TPI_REG_HDCP_V_STAR_VALUE_3 (0x34)
+#define SIIHDMI_TPI_REG_HDCP_V_STAR_VALUE_4 (0x35)
+#define SIIHDMI_TPI_REG_HDCP_AKSV_1 (0x36)
+#define SIIHDMI_TPI_REG_HDCP_AKSV_2 (0x37)
+#define SIIHDMI_TPI_REG_HDCP_AKSV_3 (0x38)
+#define SIIHDMI_TPI_REG_HDCP_AKSV_4 (0x39)
+#define SIIHDMI_TPI_REG_HDCP_AKSV_5 (0x3a)
+
+#define SIIHDMI_TPI_REG_IER (0x3c)
+#define SIIHDMI_TPI_REG_ISR (0x3d)
+
+#define SIIHDMI_INTERNAL_REG_SET_PAGE (0xbc)
+#define SIIHDMI_INTERNAL_REG_SET_OFFSET (0xbd)
+#define SIIHDMI_INTERNAL_REG_ACCESS (0xbe)
+
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_SELECT (0xbf)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_TYPE (0xc0)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_VERSION (0xc1)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_LEN (0xc2)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE0 (0xc3)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE1 (0xc4)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE2 (0xc5)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE3 (0xc6)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE4 (0xc7)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE5 (0xc8)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE6 (0xc9)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE7 (0xca)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE8 (0xcb)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE9 (0xcc)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE10 (0xcd)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE11 (0xce)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE12 (0xcf)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE13 (0xd0)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE14 (0xd1)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE15 (0xd2)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE16 (0xd3)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE17 (0xd4)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE18 (0xd5)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE19 (0xd6)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE20 (0xd7)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE21 (0xd8)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE22 (0xd9)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE23 (0xda)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE24 (0xdb)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE25 (0xdc)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE26 (0xdd)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE27 (0xde)
+
+#define SIIHDMI_TPI_REG_RQB (0xc7)
+
+/* driver constants */
+#define SIIHDMI_DEVICE_ID_902x (0xb0)
+#define SIIHDMI_BASE_TPI_REVISION (0x29)
+#define SIIHDMI_CTRL_INFO_FRAME_DRAIN_TIME (0x80)
+
+#define SIIHDMI_HOTPLUG_HANDLER_TIMEOUT (0x32)
+
+#define SIIHDMI_VERSION_FLAG_VIRTUAL (1 << 7)
+
+/* Input Bus and Pixel Repetition */
+#define SIIHDMI_PIXEL_REPETITION_DUAL (1 << 0)
+#define SIIHDMI_PIXEL_REPETITION_QUAD (3 << 0)
+#define SIIHDMI_INPUT_BUS_EDGE_SELECT_RISING (1 << 4)
+#define SIIHDMI_INPUT_BUS_SELECT_FULL_PIXEL_WIDTH (1 << 5)
+#define SIIHDMI_INPUT_BUS_TMDS_CLOCK_RATIO_1X (1 << 6)
+#define SIIHDMI_INPUT_BUS_TMDS_CLOCK_RATIO_2X (2 << 6)
+#define SIIHDMI_INPUT_BUS_TMDS_CLOCK_RATIO_4X (3 << 6)
+
+/* Input Format */
+#define SIIHDMI_INPUT_COLOR_SPACE_RGB (0 << 0)
+#define SIIHDMI_INPUT_COLOR_SPACE_YUV_444 (1 << 0)
+#define SIIHDMI_INPUT_COLOR_SPACE_YUV_422 (2 << 0)
+#define SIIHDMI_INPUT_COLOR_SPACE_BLACK (3 << 0)
+#define SIIHDMI_INPUT_VIDEO_RANGE_EXPANSION_AUTO (0 << 2)
+#define SIIHDMI_INPUT_VIDEO_RANGE_EXPANSION_ON (1 << 2)
+#define SIIHDMI_INPUT_VIDEO_RANGE_EXPANSION_OFF (2 << 2)
+#define SIIHDMI_INPUT_COLOR_DEPTH_8BIT (0 << 6)
+#define SIIHDMI_INPUT_COLOR_DEPTH_10BIT (2 << 6)
+#define SIIHDMI_INPUT_COLOR_DEPTH_12BIT (3 << 6)
+#define SIIHDMI_INPUT_COLOR_DEPTH_16BIT (1 << 6)
+
+/* Output Format */
+#define SIIHDMI_OUTPUT_FORMAT_HDMI_RGB (0 << 0)
+#define SIIHDMI_OUTPUT_FORMAT_HDMI_YUV_444 (1 << 0)
+#define SIIHDMI_OUTPUT_FORMAT_HDMI_YUV_422 (2 << 0)
+#define SIIHDMI_OUTPUT_FORMAT_DVI_RGB (3 << 0)
+#define SIIHDMI_OUTPUT_VIDEO_RANGE_COMPRESSION_AUTO (0 << 2)
+#define SIIHDMI_OUTPUT_VIDEO_RANGE_COMPRESSION_ON (1 << 2)
+#define SIIHDMI_OUTPUT_VIDEO_RANGE_COMPRESSION_OFF (2 << 2)
+#define SIIHDMI_OUTPUT_COLOR_STANDARD_BT601 (0 << 4)
+#define SIIHDMI_OUTPUT_COLOR_STANDARD_BT709 (1 << 4)
+#define SIIHDMI_OUTPUT_DITHERING (1 << 5)
+#define SIIHDMI_OUTPUT_COLOR_DEPTH_8BIT (0 << 6)
+#define SIIHDMI_OUTPUT_COLOR_DEPTH_10BIT (2 << 6)
+#define SIIHDMI_OUTPUT_COLOR_DEPTH_12BIT (3 << 6)
+#define SIIHDMI_OUTPUT_COLOR_DEPTH_16BIT (1 << 6)
+
+/* System Control */
+#define SIIHDMI_SYS_CTRL_OUTPUT_MODE_SELECT_HDMI (1 << 0)
+#define SIIHDMI_SYS_CTRL_DDC_BUS_OWNER_HOST (1 << 1)
+#define SIIHDMI_SYS_CTRL_DDC_BUS_GRANTED (1 << 1)
+#define SIIHDMI_SYS_CTRL_DDC_BUS_REQUEST (1 << 2)
+#define SIIHDMI_SYS_CTRL_AV_MUTE_HDMI (1 << 3)
+#define SIIHDMI_SYS_CTRL_TMDS_OUTPUT_POWER_DOWN (1 << 4)
+#define SIIHDMI_SYS_CTRL_DYNAMIC_LINK_INTEGRITY (1 << 6)
+
+/* Device Power State Control Data */
+#define SIIHDMI_POWER_STATE_D0 (0 << 0)
+#define SIIHDMI_POWER_STATE_D1 (1 << 0)
+#define SIIHDMI_POWER_STATE_D2 (2 << 0)
+#define SIIHDMI_POWER_STATE_D3 (3 << 0)
+#define SIIHDMI_WAKEUP_STATE_COLD (1 << 2)
+
+/* Audio Frequency Sampling Length */
+#define SIIHDMI_AUDIO_HANDLING_PASS_BASIC_AUDIO (0 << 0)
+#define SIIHDMI_AUDIO_HANDLING_PASS_ALL_AUDIO_MODES (1 << 0)
+#define SIIHDMI_AUDIO_HANDLING_DOWN_SAMPLE (2 << 0)
+#define SIIHDMI_AUDIO_HANDLING_UNCHECKED (3 << 0)
+
+/* Audio Interface Control */
+#define SIIHDMI_AUDIO_MUTE (1 << 4)
+#define SIIHDMI_AUDIO_DISABLE (0 << 6)
+#define SIIHDMI_AUDIO_I2S_ENABLE (2 << 6)
+#define SIIHDMI_AUDIO_SPDIF_ENABLE (1 << 6)
+
+/* Audio Sampling HBR */
+#define SIIHDMI_AUDIO_SAMPLING_HBR_ENABLE (1 << 2)
+
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_AUTO (0 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_32_KHZ (1 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_44_1_KHZ (2 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_48_KHZ (3 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_88_2_KHZ (4 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_96_KHZ (5 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_176_4_KHZ (6 << 3)
+#define SIIHDMI_AUDIO_SAMPLING_FREQUENCY_192_KHZ (7 << 3)
+
+#define SIIHDMI_AUDIO_SAMPLING_DEPTH_AUTO (0 << 6)
+#define SIIHDMI_AUDIO_SAMPLING_DEPTH_16_BIT (1 << 6)
+#define SIIHDMI_AUDIO_SAMPLING_DEPTH_20_BIT (2 << 6)
+#define SIIHDMI_AUDIO_SAMPLING_DEPTH_24_BIT (3 << 6)
+
+/* I²S Enable and Mapping */
+#define SIIHDMI_I2S_MAPPING_SELECT_SD_CHANNEL_0 (0 << 0)
+#define SIIHDMI_I2S_MAPPING_SELECT_SD_CHANNEL_1 (1 << 0)
+#define SIIHDMI_I2S_MAPPING_SELECT_SD_CHANNEL_2 (2 << 0)
+#define SIIHDMI_I2S_MAPPING_SELECT_SD_CHANNEL_3 (3 << 0)
+#define SIIHDMI_I2S_MAPPING_SWAP_CHANNELS (1 << 3)
+#define SIIHDMI_I2S_ENABLE_BASIC_AUDIO_DOWNSAMPLING (1 << 4)
+#define SIIHDMI_I2S_MAPPING_SELECT_FIFO_0 (0 << 5)
+#define SIIHDMI_I2S_MAPPING_SELECT_FIFO_1 (1 << 5)
+#define SIIHDMI_I2S_MAPPING_SELECT_FIFO_2 (2 << 5)
+#define SIIHDMI_I2S_MAPPING_SELECT_FIFO_3 (3 << 5)
+#define SIIHDMI_I2S_ENABLE_SELECTED_FIFO (1 << 7)
+
+/* I²S Input Configuration */
+#define SIIHDMI_I2S_WS_TO_SD_FIRST_BIT_SHIFT (1 << 0)
+#define SIIHDMI_I2S_SD_LSB_FIRST (1 << 1)
+#define SIIHDMI_I2S_SD_RIGHT_JUSTIFY_DATA (1 << 2)
+#define SIIHDMI_I2S_WS_POLARITY_HIGH (1 << 3)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_128 (0 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_256 (1 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_384 (2 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_512 (3 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_768 (4 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_1024 (5 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_1152 (6 << 4)
+#define SIIHDMI_I2S_MCLK_MULTIPLIER_192 (7 << 4)
+#define SIIHDMI_I2S_SCK_SAMPLE_RISING_EDGE (1 << 7)
+
+/* Interrupt Enable */
+#define SIIHDMI_IER_HOT_PLUG_EVENT (1 << 0)
+#define SIIHDMI_IER_RECEIVER_SENSE_EVENT (1 << 1)
+#define SIIHDMI_IER_CTRL_BUS_EVENT (1 << 2)
+#define SIIHDMI_IER_CPI_EVENT (1 << 3)
+#define SIIHDMI_IER_AUDIO_EVENT (1 << 4)
+#define SIIHDMI_IER_SECURITY_STATUS_CHANGE (1 << 5)
+#define SIIHDMI_IER_HDCP_VALUE_READY (1 << 6)
+#define SIIHDMI_IER_HDCP_AUTHENTICATION_STATUS_CHANGE (1 << 7)
+
+/* Interrupt Status */
+#define SIIHDMI_ISR_HOT_PLUG_EVENT (1 << 0)
+#define SIIHDMI_ISR_RECEIVER_SENSE_EVENT (1 << 1)
+#define SIIHDMI_ISR_CTRL_BUS_EVENT (1 << 2)
+#define SIIHDMI_ISR_DISPLAY_ATTACHED (1 << 2)
+#define SIIHDMI_ISR_CPI_EVENT (1 << 3)
+#define SIIHDMI_ISR_RECEIVER_SENSE (1 << 3)
+#define SIIHDMI_ISR_AUDIO_EVENT (1 << 4)
+#define SIIHDMI_ISR_SECURITY_STATUS_CHANGED (1 << 5)
+#define SIIHDMI_ISR_HDCP_VALUE_READY (1 << 6)
+#define SIIHDMI_ISR_HDCP_AUTHENTICATION_STATUS_CHANGED (1 << 7)
+
+/* Request/Grant/Black Mode */
+#define SIIHDMI_RQB_DDC_BUS_REQUEST (1 << 0)
+#define SIIHDMI_RQB_DDC_BUS_GRANTED (1 << 1)
+#define SIIHDMI_RQB_I2C_ACCESS_DDC_BUS (1 << 2)
+#define SIIHDMI_RQB_FORCE_VIDEO_BLANK (1 << 5)
+#define SIIHDMI_RQB_TPI_MODE_DISABLE (1 << 7)
+
+/*
+ * SII HDMI chips have two ways to send HDMI InfoFrames:
+ * a) AVI InfoFrame is sent via a dedicated TPI register block
+ * b) Other InfoFrames are sent via a misc TPI register block with a header to
+ * identifty the InfoFrame data
+ */
+
+/* InfoFrame blocks */
+#define SIIHDMI_TPI_REG_AVI_INFO_FRAME_BASE (SIIHDMI_TPI_REG_AVI_DBYTE0)
+#define SIIHDMI_TPI_REG_AVI_INFO_FRAME_LENGTH (SIIHDMI_TPI_REG_AVI_INFO_END_RIGHT_BAR_MSB - SIIHDMI_TPI_REG_AVI_DBYTE0 + 1)
+
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_BASE (SIIHDMI_TPI_REG_MISC_INFO_FRAME_SELECT)
+#define SIIHDMI_TPI_REG_MISC_INFO_FRAME_LENGTH (SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE27 - SIIHDMI_TPI_REG_MISC_INFO_FRAME_SELECT + 1)
+
+#define SIIHDMI_TPI_REG_AUDIO_INFO_FRAME_BASE (SIIHDMI_TPI_REG_MISC_INFO_FRAME_SELECT)
+#define SIIHDMI_TPI_REG_AUDIO_INFO_FRAME_LENGTH (SIIHDMI_TPI_REG_MISC_INFO_FRAME_DBYTE10 - SIIHDMI_TPI_REG_MISC_INFO_FRAME_SELECT + 1)
+
+/*
+ * AVI InfoFrame is handled specially. The type, version, and length fields (1
+ * byte each) are skipped in transmission. We must offset into the standard
+ * structure to skip these fields.
+ */
+#define SIIHDMI_AVI_INFO_FRAME_OFFSET (0x03)
+
+
+enum siihdmi_info_frame_type {
+ SIIHDMI_INFO_FRAME_NONE,
+ SIIHDMI_INFO_FRAME_SPD_ACP,
+ SIIHDMI_INFO_FRAME_AUDIO,
+ SIIHDMI_INFO_FRAME_MPEG_GBD,
+ SIIHDMI_INFO_FRAME_GENERIC1_ISRC1,
+ SIIHDMI_INFO_FRAME_GENERIC2_ISRC2,
+};
+
+struct __packed info_frame_buffer_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned info_frame : 3;
+ unsigned : 1;
+ unsigned : 2;
+ unsigned repeat : 1;
+ unsigned enable : 1;
+#else
+ unsigned enable : 1;
+ unsigned repeat : 1;
+ unsigned : 2;
+ unsigned : 1;
+ unsigned info_frame : 3;
+#endif
+};
+
+struct siihdmi_spd_info_frame {
+ struct info_frame_buffer_header header;
+ struct spd_info_frame info_frame;
+ u8 padding[2];
+};
+
+struct siihdmi_audio_info_frame {
+ struct info_frame_buffer_header header;
+ struct audio_info_frame info_frame;
+};
+
+struct siihdmi_platform_data {
+ /* reset function */
+ void (*reset)(void);
+
+ /* HDMI SPD InfoFrame Data */
+ char *vendor;
+ char *description;
+
+ /* id of the lcd segment */
+ const char *lcd_id;
+
+ /* hotplug IRQ */
+ struct resource hotplug;
+
+ /* maximum pixel clock rate */
+ int pixclock;
+
+ enum {
+ RGB_24,
+ YUV_422_8_MUX,
+ } input_format;
+
+ /* Specific method called when a hotplug event is detected*/
+ void (*on_hotplug)(int screen_attached);
+};
+
+struct siihdmi_tx {
+ struct i2c_client *client;
+ struct siihdmi_platform_data *platform;
+ struct fb_monspecs monspecs;
+ const struct fb_videomode *selected_mode;
+ struct switch_dev sdev;
+ struct {
+ bool enabled;
+ } hotplug;
+
+ struct {
+ u8 *data;
+ u32 length;
+#if defined(CONFIG_SYSFS)
+ struct bin_attribute attributes;
+#endif
+ } edid;
+
+ struct {
+ bool available;
+#if defined(CONFIG_SYSFS)
+ struct bin_attribute attributes;
+#endif
+ } audio;
+
+ struct {
+ enum {
+ SINK_TYPE_DVI,
+ SINK_TYPE_HDMI,
+ } type;
+
+ enum {
+ SCANNING_EXACT,
+ SCANNING_UNDERSCANNED,
+ SCANNING_OVERSCANNED,
+ } scanning;
+
+ } sink;
+};
+
+#endif
+
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 00000000000000..635414059997d1
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,161 @@
+#
+# YAFFS file system configurations
+#
+
+config YAFFS_FS
+ tristate "YAFFS2 file system support"
+ default n
+ depends on MTD_BLOCK
+ select YAFFS_YAFFS1
+ select YAFFS_YAFFS2
+ help
+ YAFFS2, or Yet Another Flash Filing System, is a filing system
+ optimised for NAND Flash chips.
+
+ To compile the YAFFS2 file system support as a module, choose M
+ here: the module will be called yaffs2.
+
+ If unsure, say N.
+
+ Further information on YAFFS2 is available at
+ <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+ bool "512 byte / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS1 support -- yaffs for 512 byte / page devices
+
+ Not needed for 2K-page devices.
+
+ If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+ bool "Use older-style on-NAND data format with pageStatus byte"
+ depends on YAFFS_YAFFS1
+ default n
+ help
+
+ Older-style on-NAND data format has a "pageStatus" byte to record
+ chunk/page state. This byte is zero when the page is discarded.
+ Choose this option if you have existing on-NAND data using this
+ format that you need to continue to support. New data written
+ also uses the older-style format. Note: Use of this option
+ generally requires that MTD's oob layout be adjusted to use the
+ older-style format. See notes on tags formats and MTD versions
+ in yaffs_mtdif1.c.
+
+ If unsure, say N.
+
+config YAFFS_DOES_ECC
+ bool "Lets Yaffs do its own ECC"
+ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This enables Yaffs to use its own ECC functions instead of using
+ the ones from the generic MTD-NAND driver.
+
+ If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+ default n
+ help
+ This makes yaffs_ecc.c use the same ecc byte order as Steven
+ Hill's nand_ecc.c. If not set, then you get the same ecc byte
+ order as SmartMedia.
+
+ If unsure, say N.
+
+config YAFFS_YAFFS2
+ bool "2048 byte (or larger) / page devices"
+ depends on YAFFS_FS
+ default y
+ help
+ Enable YAFFS2 support -- yaffs for >= 2K bytes per page devices
+
+ If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+ bool "Autoselect yaffs2 format"
+ depends on YAFFS_YAFFS2
+ default y
+ help
+ Without this, you need to explicitely use yaffs2 as the file
+ system type. With this, you can say "yaffs" and yaffs or yaffs2
+ will be used depending on the device page size (yaffs on
+ 512-byte page devices, yaffs2 on 2K page devices).
+
+ If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+ bool "Disable YAFFS from doing ECC on tags by default"
+ depends on YAFFS_FS && YAFFS_YAFFS2
+ default n
+ help
+ This defaults Yaffs to using its own ECC calculations on tags instead of
+ just relying on the MTD.
+ This behavior can also be overridden with tags_ecc_on and
+ tags_ecc_off mount options.
+
+ If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ bool "Force chunk erase check"
+ depends on YAFFS_FS
+ default n
+ help
+ Normally YAFFS only checks chunks before writing until an erased
+ chunk is found. This helps to detect any partially written
+ chunks that might have happened due to power loss.
+
+ Enabling this forces on the test that chunks are erased in flash
+ before writing to them. This takes more time but is potentially
+ a bit more secure.
+
+ Suggest setting Y during development and ironing out driver
+ issues etc. Suggest setting to N if you want faster writing.
+
+ If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+ bool "Empty lost and found on boot"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is enabled then the contents of lost and found is
+ automatically dumped at mount.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+ bool "Disable yaffs2 block refreshing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then block refreshing is disabled.
+ Block refreshing infrequently refreshes the oldest block in
+ a yaffs2 file system. This mechanism helps to refresh flash to
+ mitigate against data loss. This is particularly useful for MLC.
+
+ If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+ bool "Disable yaffs2 background processing"
+ depends on YAFFS_FS
+ default n
+ help
+ If this is set, then background processing is disabled.
+ Background processing makes many foreground activities faster.
+
+ If unsure, say N.
+
+config YAFFS_XATTR
+ bool "Enable yaffs2 xattr support"
+ depends on YAFFS_FS
+ default y
+ help
+ If this is set then yaffs2 will provide xattr support.
+ If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 00000000000000..e63a28aa3ed691
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsvalidity.o
+yaffs-y += yaffs_mtdif.o yaffs_mtdif1.o yaffs_mtdif2.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 00000000000000..f9cd5becd8f470
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,396 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+#ifdef CONFIG_YAFFS_KMALLOC_ALLOCATOR
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ return (struct yaffs_tnode *)kmalloc(dev->tnode_size, GFP_NOFS);
+}
+
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ dev = dev;
+ kfree(tn);
+}
+
+void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ dev = dev;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return (struct yaffs_obj *)kmalloc(sizeof(struct yaffs_obj));
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ dev = dev;
+ kfree(obj);
+}
+
+#else
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct yaffs_obj *free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,"Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ YBUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (allocator) {
+ allocator->allocated_obj_list = NULL;
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ } else {
+ YBUG();
+ }
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ YBUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+
+ allocator->allocated_obj_list = tmp;
+ }
+
+ allocator->free_objs = NULL;
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ YBUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ if (new_objs) {
+ kfree(new_objs);
+ new_objs = NULL;
+ }
+ if (list) {
+ kfree(list);
+ list = NULL;
+ }
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj - 1; i++) {
+ new_objs[i].siblings.next =
+ (struct list_head *)(&new_objs[i + 1]);
+ }
+
+ new_objs[n_obj - 1].siblings.next = (void *)allocator->free_objs;
+ allocator->free_objs = new_objs;
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ YBUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_objs)
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (allocator->free_objs) {
+ obj = allocator->free_objs;
+ allocator->free_objs =
+ (struct yaffs_obj *)(allocator->free_objs->siblings.next);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator)
+ YBUG();
+ else {
+ /* Link into the free list. */
+ obj->siblings.next = (struct list_head *)(allocator->free_objs);
+ allocator->free_objs = obj;
+ allocator->n_free_objects++;
+ }
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ if (dev->allocator) {
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+ } else {
+ YBUG();
+ }
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (!dev->allocator) {
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+ } else {
+ YBUG();
+ }
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 00000000000000..4d5f2aec89ff64
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 00000000000000..9b47d376310c5a
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_mtime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+}
+
+loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 00000000000000..33d541d69441b4
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 00000000000000..7df42cd0066ed8
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,98 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ YBUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 00000000000000..cf9ea58da0d93c
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 00000000000000..4e40f437e655e8
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,415 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->param.erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ if (dev->param.
+ erase_fn(dev,
+ i - dev->block_offset /* realign */ )) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->param.bad_block_fn(dev, i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ int realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk,
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d oid %d seq %d eccr %d",
+ i, tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->
+ blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->param.write_chunk_tags_fn ||
+ !dev->param.read_chunk_tags_fn ||
+ !dev->param.erase_fn || !dev->param.bad_block_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ dev->checkpt_byte_offs = 0;
+ return yaffs_checkpt_erase(dev);
+ } else {
+ int i;
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is (hopefully)
+ * going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 +
+ 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+ }
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xFF);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int realigned_chunk;
+
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_writes++;
+
+ dev->param.write_chunk_tags_fn(dev, realigned_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+
+ int chunk;
+ int realigned_chunk;
+
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ ok = 0;
+ else {
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->param.read_chunk_tags_fn(dev,
+ realigned_chunk,
+ dev->
+ checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1)
+ || tags.ecc_result > YAFFS_ECC_RESULT_FIXED
+ || tags.seq_number !=
+ YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ ok = 0;
+
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+ }
+ }
+
+ if (ok) {
+ *data_bytes =
+ dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs != 0)
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ int i;
+ for (i = 0;
+ i < dev->blocks_in_checkpt
+ && dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+ if (dev->internal_start_block <= blk
+ && blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ else {
+ /* Todo this looks odd... */
+ }
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,"checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 00000000000000..361c6067717eeb
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 00000000000000..e95a8069a8c5bc
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,298 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
+ * this bytes influence on the line parity.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* Swap the bytes into the wrong order */
+ t = ecc[0];
+ ecc[0] = ecc[1];
+ ecc[1] = t;
+#endif
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
+ /* swap the bytes to correct for the wrong order */
+ unsigned char t;
+
+ t = d0;
+ d0 = d1;
+ d1 = t;
+#endif
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 00000000000000..b0c461d699e674
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
+ * blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_cacl(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 00000000000000..d87acbde997cf7
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ YBUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 00000000000000..f4ae9deed727bb
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5164 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_tagsvalidity.h"
+#include "yaffs_getblockinfo.h"
+
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_nand.h"
+
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+
+#include "yaffs_attribs.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve);
+
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 * offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].line = 0; /* not in use */
+ dev->temp_buffer[i].buffer = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
+{
+ int i, j;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].line == 0) {
+ dev->temp_buffer[i].line = line_no;
+ if ((i + 1) > dev->max_temp) {
+ dev->max_temp = i + 1;
+ for (j = 0; j <= i; j++)
+ dev->temp_buffer[j].max_line =
+ dev->temp_buffer[j].line;
+ }
+
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Out of temp buffers at line %d, other held by lines:",
+ line_no);
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ yaffs_trace(YAFFS_TRACE_BUFFERS," %d", dev->temp_buffer[i].line);
+
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].line = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer in line %d",
+ line_no);
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 * buffer)
+{
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: unmaged buffer detected.");
+ return 0;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ data = data;
+ tags = tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ tags = tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire this */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ n = abs(n);
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 * buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xFF)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
+ struct yaffs_ext_tags tags;
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS, "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+ int result;
+
+ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ ((reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block);
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks" );
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* Not enough space to allocate unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR, "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!" );
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ if (dev->alloc_block > 0) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 * data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retired_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ yaffs_init_tags(&tags);
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR * name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ const YUCHAR *bname = (const YUCHAR *)name;
+ if (bname) {
+ while ((*bname) && (i < (YAFFS_MAX_NAME_LENGTH / 2))) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*bname) & 0x1f) * i;
+ i++;
+ bname++;
+ }
+ }
+ return sum;
+}
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+ if (name &&
+ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH)
+ strcpy(obj->short_name, name);
+ else
+ obj->short_name[0] = _Y('\0');
+#endif
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ mask =
+ dev->tnode_mask >> ( /*dev->tnode_width - */ bit_in_word);
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ dev = dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* AddOrFindLevel0Tnode finds the level 0 tnode if it exists, otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the specified tn will
+ * be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0
+ || file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, then release it. */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->
+ internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id && !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags, obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ }
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (tn) {
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val =
+ yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file during scanning
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ YBUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only ever
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the real chunk id.
+ *
+ * We have a duplicate now we need to decide which one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what we use, dump the new one.
+ * Forward scanning YAFFS2: The new one is what we use, dump the old one.
+ * YAFFS1: Get both sets of tags and compare serial numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise the chunks will
+ * not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the existing one
+ * Use existing.
+ * Delete the new one and return early so that the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls the chunk out
+ * of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = YAFFS_NTNODES_INTERNAL - 1; all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal
+ [i],
+ level - 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal
+ [i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Hoosterman... how could this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ } else if (level == 0) {
+
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ /* Note this does not find the real chunk, only the chunk group.
+ * We make an assumption that a chunk group is not larger than
+ * a block.
+ */
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+
+ }
+ return 1;
+
+ }
+
+ }
+
+ return 1;
+
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ YBUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ YBUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ YBUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR * new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ YBUG();
+ }
+
+ /* TODO: Do we need this different handling for YAFFS2 and YAFFS1?? */
+ if (obj->my_dev->param.is_yaffs2)
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ else
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir
+ && obj->variant_type == YAFFS_OBJECT_TYPE_FILE);
+
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are allowed.
+ * else only proceed if the new name does not exist and if we're putting
+ * it into a directory.
+ */
+ if ((unlink_op ||
+ del_op ||
+ force ||
+ (shadows > 0) ||
+ !existing_target) &&
+ new_dir->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc purposes. */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >=
+ 0)
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ----------------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a limited number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches > 0) {
+ do {
+ cache = NULL;
+
+ /* Find the dirty cache for this object with the lowest chunk id. */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache
+ || dev->cache[i].chunk_id <
+ lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+
+ }
+
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+ int pushout;
+
+ if (dev->param.n_caches > 0) {
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the last recently used object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate, we actually flush the object
+ * the last recently used page.
+ */
+
+ /* With locking we can't assume we can use entry zero */
+
+ the_obj = NULL;
+ usage = -1;
+ cache = NULL;
+ pushout = -1;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage
+ || !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ pushout = i;
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+
+ }
+ return cache;
+ } else {
+ return NULL;
+ }
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+
+ if (dev->param.n_caches > 0) {
+ if (dev->cache_last_use < 0 || dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ int i;
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_last_use++;
+
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+ }
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ if (object->my_dev->param.n_caches > 0) {
+ struct yaffs_cache *cache =
+ yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+
+ if (!obj)
+ YBUG();
+ if (obj->parent)
+ YBUG();
+ if (!list_empty(&obj->siblings))
+ YBUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+
+ /* First off, invalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && (in->parent != in->my_dev->del_dir)) {
+ /* Move to the unlinked directory so we have a record that it was deleted. */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (obj->deleted &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_FILE && !obj->soft_del) {
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev,
+ obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (tn) {
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i ==
+ 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+
+ }
+
+ return tn;
+
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level > 0) {
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top,
+ file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but the height
+ * is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* AllocateEmptyObject gets us a clean Object. Tries to make allocate more if we run out */
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (obj) {
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ int found = 0;
+ struct list_head *i;
+
+ u32 n = (u32) bucket;
+
+ /* yaffs_check_obj_hash_sane(); */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ if (i) {
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+
+ /* Don't tell the VFS about this one if it is defered free */
+ if (in->defered_free)
+ return NULL;
+
+ return in;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ if (the_obj) {
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size = ~0; /* max */
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ }
+
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (obj) {
+ obj->fake = 1; /* it is fake so it might have no NAND presence... */
+ obj->rename_allowed = 0; /* ... and we're not allowed to rename it... */
+ obj->unlink_allowed = 0; /* ... or unlink it */
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ }
+
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR * str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR * name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR * alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists. If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ if (str)
+ kfree(str);
+ return NULL;
+ }
+
+ if (in) {
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id =
+ equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail the creation */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ yaffs_update_parent(parent);
+ }
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link as an equivalent object */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj
+ (YAFFS_OBJECT_TYPE_HARDLINK, parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0)) {
+ return equiv_obj;
+ } else {
+ return NULL;
+ }
+
+}
+
+
+
+/*------------------------- Block Management and Page Allocation ----------------*/
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (dev->block_info) {
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ }
+
+ if (dev->block_info && dev->chunk_bits) {
+ memset(dev->block_info, 0,
+ n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else if (dev->block_info)
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else if (dev->chunk_bits)
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+
+ int erased_ok = 0;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing this block */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE)
+ || !yaffs_skip_verification(dev))) {
+ int i;
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased
+ (dev, block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (erased_ok) {
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE,
+ "Erased block %d", block_no);
+ } else {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ }
+}
+
+
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int new_chunk;
+ int mark_flash;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int matching_chunk;
+ int max_copies;
+
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+
+ struct yaffs_ext_tags tags;
+
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ struct yaffs_obj *object;
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for ( /* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0; dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+
+ /* This page is in use and might need to be copied off */
+
+ max_copies--;
+
+ mark_flash = 1;
+
+ yaffs_init_tags(&tags);
+
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ matching_chunk = old_chunk; /* Defeat the test */
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file, throw it away
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget about it and
+ * fix up the object.
+ */
+
+ /* Free chunks already includes softdeleted chunks.
+ * How ever this chunk is going to soon be really deleted
+ * which will increment free chunks.
+ * We have to decrement free chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up the object */
+ dev->gc_cleanup_list[dev->
+ n_clean_ups]
+ = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (0) {
+ /* Todo object && object->deleted && object->n_data_chunks == 0 */
+ /* Deleted object header with no data chunks.
+ * Can be discarded and the file deleted.
+ */
+ object->hdr_chunk = 0;
+ yaffs_free_tnode(object->my_dev,
+ object->
+ variant.file_variant.
+ top);
+ object->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(object);
+
+ } else if (object) {
+ /* It's either a data chunk in a live file or
+ * an ObjectHeader, so we're interested in it.
+ * NB Need to keep the ObjectHeaders of deleted files
+ * until the whole file has been deleted off
+ */
+ tags.serial_number++;
+
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the shrinkheader flags first
+ * Also need to clean up shadowing.
+ * We no longer want the shrink_header flag since its work is done
+ * and if it is left in place it will mess up scanning.
+ */
+
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *)
+ buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ oh->file_size =
+ object->variant.
+ file_variant.
+ file_size;
+ tags.extra_length =
+ oh->file_size;
+ }
+
+ yaffs_verify_oh(object, oh,
+ &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ (u8 *)
+ oh,
+ &tags,
+ 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev,
+ buffer,
+ &tags,
+ 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Ok, now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk =
+ new_chunk;
+ object->serial =
+ tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ int ok;
+ ok = yaffs_put_chunk_in_file(object, tags.chunk_id, new_chunk, 0);
+ }
+ }
+ }
+
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk,
+ mark_flash, __LINE__);
+
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.
+ file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * FindBlockForgarbageCollection is used to select the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block gumming
+ * up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty block, and
+ * search harder.
+ * else (we're doing a leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1
+ || pages_used < dev->gc_pages_in_use)
+ && yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try selecting the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and will only accept more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable) {
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+ }
+
+ /* This loop should pass the first time.
+ * We'll only see looping here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks)
+ && dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ yaffs_init_tags(&tags);
+
+ tags.is_deleted = 1;
+
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+
+ yaffs_clear_chunk_bit(dev, block, page);
+
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ yaffs_block_became_dirty(dev, block);
+ }
+
+ }
+
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 * buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ yaffs_init_tags(&new_tags);
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ YBUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR * name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR * name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+
+ char *x_buffer;
+
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *chunk_data;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ int result;
+ int alloc_failed = 0;
+
+ if (!in)
+ return;
+
+ dev = in->my_dev;
+
+ if (in->lazy_loaded && in->hdr_chunk > 0) {
+ in->lazy_loaded = 0;
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, chunk_data,
+ &tags);
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.symlink_variant.alias)
+ alloc_failed = 1; /* Not returned to caller */
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+ }
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR * name,
+ const YCHAR * oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR * oh_name,
+ const YCHAR * name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so do a unicode to ascii conversion */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* It is a unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ strncpy(oh_name + 1, name,
+ YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ {
+#endif
+ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+
+ struct yaffs_dev *dev = in->my_dev;
+
+ int prev_chunk_id;
+ int ret_val = 0;
+ int result = 0;
+
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj_hdr *oh = NULL;
+
+ strcpy(old_name, _Y("silly old name"));
+
+ if (!in->fake || in == dev->root_dir ||
+ force || xmod) {
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xFF, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xFF, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ oh->file_size =
+ (oh->parent_obj_id == YAFFS_OBJECTID_DELETED
+ || oh->parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) ? 0 : in->
+ variant.file_variant.file_size;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ yaffs_init_tags(&new_tags);
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_length = oh->file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (new_chunk_id >= 0) {
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0) {
+ yaffs_chunk_del(dev, prev_chunk_id, 1,
+ __LINE__);
+ }
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.
+ chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+ }
+
+ ret_val = new_chunk_id;
+
+ }
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+
+ return ret_val;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ /* chunk = offset / dev->data_bytes_per_chunk + 1; */
+ /* start = offset % dev->data_bytes_per_chunk; */
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than a whole chunk
+ * or we're using inband tags then use the cache (if there is caching)
+ * else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache, then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+ }
+
+ } else {
+
+ /* A full chunk. Read directly into the supplied buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+
+ }
+
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+
+ }
+
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ int start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ u32 chunk_start;
+
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (chunk * dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %d gives chunk %d start %d",
+ (int)offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now folks, to calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of file then
+ * we need to write back as much as was there before.
+ */
+
+ chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0
+ || n_writeback > dev->data_bytes_per_chunk)
+ YBUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk
+ || dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both start and end chunk),
+ * or we're using inband tags, so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+ /* If we can't find the data in the cache, then load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache
+ && yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache item and
+ * no space check has been made for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_trhrough) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+
+ } else {
+ chunk_written = -1; /* fail the write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe both start and end chunk)
+ * Read into the local buffer then copy, then copy over and write back.
+ */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev, __LINE__);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer,
+ __LINE__);
+
+ }
+
+ } else {
+ /* A full chunk. Write directly from the supplied buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data, we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
+
+ int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
+ dev->data_bytes_per_chunk;
+ int i;
+ int chunk_id;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+ if (chunk_id > 0) {
+ if (chunk_id <
+ (dev->internal_start_block *
+ dev->param.chunks_per_block)
+ || chunk_id >=
+ ((dev->internal_end_block +
+ 1) * dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+ }
+
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ int old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ int ret_val;
+ if (in->dirty) {
+ yaffs_flush_file_cache(in);
+ if (data_sync) /* Only sync data */
+ ret_val = YAFFS_OK;
+ else {
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ ret_val = (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >=
+ 0) ? YAFFS_OK : YAFFS_FAIL;
+ }
+ } else {
+ ret_val = YAFFS_OK;
+ }
+
+ return ret_val;
+
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+
+ return ret_val;
+}
+
+int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ if (in->variant.symlink_variant.alias)
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list assocaited with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+
+ int del_now = 0;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ if (obj)
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory (so that the rename can work)
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if(old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+
+ if (obj && obj->rename_allowed) {
+
+ /* Now do the handling for an existing target, if there is one */
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)){
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc otherwise it can mess up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (!backward_scanning) {
+ /* Handle YAFFS1 forward scanning case
+ * For YAFFS1 we always do the deletion
+ */
+
+ } else {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info for this file */
+
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list)
+{
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ while (hard_list) {
+ hl = hard_list;
+ hard_list = (struct yaffs_obj *)(hard_list->hard_links.next);
+
+ in = yaffs_find_by_number(dev,
+ hl->variant.
+ hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+ }
+
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships between
+ * directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been deleted
+ * leaving the object "hanging" without being rooted in the directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY
+ && depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found,
+ obj);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ YBUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ if (lh) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+
+ /* Need to use UnlinkObject since Delete would not handle
+ * hardlinked objects correctly.
+ */
+ yaffs_unlink_obj(obj);
+ }
+ }
+
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR * name)
+{
+ int sum;
+
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ YBUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ YBUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ if (i) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ YBUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum
+ || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (strncmp
+ (name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
+ return l;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ /* We want the object id of the equivalent object, not this one */
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR * name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ strcat(local_name, x);
+ strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+
+ yaffs_check_obj_details_loaded(obj);
+
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ }
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ else if (obj->short_name[0]) {
+ strcpy(name, obj->short_name);
+ }
+#endif
+ else if (obj->hdr_chunk > 0) {
+ int result;
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+int yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
+{
+
+ /* Common functions, gotta have */
+ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+ if (dev->param.write_chunk_tags_fn &&
+ dev->param.read_chunk_tags_fn &&
+ !dev->param.write_chunk_fn &&
+ !dev->param.read_chunk_fn &&
+ dev->param.bad_block_fn && dev->param.query_block_fn)
+ return 1;
+#endif
+
+ /* Can use the "spare" style interface for yaffs1 */
+ if (!dev->param.is_yaffs2 &&
+ !dev->param.write_chunk_tags_fn &&
+ !dev->param.read_chunk_tags_fn &&
+ dev->param.write_chunk_fn &&
+ dev->param.read_chunk_fn &&
+ !dev->param.bad_block_fn && !dev->param.query_block_fn)
+ return 1;
+
+ return 0; /* bad */
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost and found directories */
+
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()" );
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. One or two more checks happen later on too. */
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* OK, we've finished verifying the device, lets continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1; /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted checkpoint load
+ * and scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retired_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data)
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ if (dev->param.deinitialise_flash_fn)
+ dev->param.deinitialise_flash_fn(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count the number of dirty chunks in the cache and subtract those */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now we figure out how much to reserve for the checkpoint and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 00000000000000..307eba28676f5c
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,915 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xFF
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941FF53
+
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 6
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+#endif
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+#define YAFFS_MAX_CHUNK_ID 0x000FFFFF
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE -1)
+
+#define YAFFS_CHECKPOINT_VERSION 4
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_SB_HEADER 0x10
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks per second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xEFFFFF00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* Tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary otherwise
+ * the structure size will get blown out.
+ */
+
+#ifndef CONFIG_YAFFS_NO_YAFFS1
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+#endif
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+
+ unsigned validity0;
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 then this is not part of an object (unused) */
+ unsigned chunk_id; /* If 0 then this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* There is extra info available if this is not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ unsigned extra_length; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object Id if it is a hard link */
+
+ unsigned validity1;
+
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING,
+ /* The block might have something on it (ie it is allocating or full, perhaps empty)
+ * but it needs to be scanned to determine its true state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of deciding
+ * However, if this state is returned on a YAFFS2 device, then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. NB use unsigned because enum is sometimes an int */
+ u32 needs_retiring:1; /* Data has failed on this block, need to get valid data off */
+ /* and retire the block. */
+ u32 skip_erased_check:1; /* If this is set we can skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed on this block.
+ It should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc failures on this block and tried to reuse it */
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ u32 has_shrink_hdr:1; /* This block has at least one shrink object header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+#endif
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to directories, files, symlinks - not hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ int file_size;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* device stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 reserved[2];
+ int shadows_obj; /* This object header shadows the specified object if > 0 */
+
+ /* is_shrink applies to object headers written when we shrink the file (ie resize) */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ u32 file_size;
+ u32 scanned_size;
+ u32 shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file. The file should be in the unlinked directory. */
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects are not allowed to be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available (ie. file data records appear before the header).
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and is missing some detail */
+
+ u8 defered_free:1; /* For Linux kernel. Object is removed from NAND, but is
+ * still in the inode cache. Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created so skip some checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs or not. */
+ u8 has_xattr:1; /* This object has xattribs. Valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND. Cached here */
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in this hash bucket */
+
+ struct list_head hard_links; /* all the equivalent hard linked objects */
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks attached to the file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+#ifndef CONFIG_YAFFS_NO_SHORT_NAMES
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ u32 size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int line; /* track from whence this buffer was allocated */
+ int max_line;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and defualt values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* We want this tuneable so that we can reduce */
+ /* reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled, else
+ * the number of short op caches (don't use too many).
+ * 10 to 20 is a good bet.
+ */
+ int use_nand_ecc; /* Flag to decide whether or not to use NANDECC on data (yaffs1) */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often we should check to do a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS) */
+
+ int (*write_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_spare * spare);
+ int (*read_chunk_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_spare * spare);
+ int (*erase_fn) (struct yaffs_dev * dev, int flash_block);
+ int (*initialise_flash_fn) (struct yaffs_dev * dev);
+ int (*deinitialise_flash_fn) (struct yaffs_dev * dev);
+
+#ifdef CONFIG_YAFFS_YAFFS2
+ int (*write_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags * tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev * dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags * tags);
+ int (*bad_block_fn) (struct yaffs_dev * dev, int block_no);
+ int (*query_block_fn) (struct yaffs_dev * dev, int block_no,
+ enum yaffs_block_state * state,
+ u32 * seq_number);
+#endif
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj * obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev * dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control) (struct yaffs_dev * dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for power-of-2 sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpoint pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* was allocated using alternative strategy */
+ unsigned chunk_bits_alt:1; /* was allocated using alternative strategy */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ /* Buffer areas for storing data to recover from write failures TODO
+ * u8 buffered_data[YAFFS_CHUNKS_PER_BLOCK][YAFFS_BYTES_PER_CHUNK];
+ * struct yaffs_spare buffered_spare[YAFFS_CHUNKS_PER_BLOCK];
+ */
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter. Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Statistcs */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retired_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes at runtime and
+ * must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+int yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR * name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR * name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, const YCHAR * alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR * name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 * buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev, int line_no);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 * buffer, int line_no);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR * str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct yaffs_obj *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR * name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 00000000000000..3b508cbc4e8a87
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,41 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+ struct list_head context_list; /* List of these we have mounted */
+ struct yaffs_dev *dev;
+ struct super_block *super;
+ struct task_struct *bg_thread; /* Background thread for this device */
+ int bg_running;
+ struct mutex gross_lock; /* Gross locking mutex*/
+ u8 *spare_buffer; /* For mtdif2 use. Don't know the size of the buffer
+ * at compile time so we have to allocate it.
+ */
+ struct list_head search_contexts;
+ void (*put_super_fn) (struct super_block * sb);
+
+ struct task_struct *readdir_process;
+ unsigned mount_id;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 00000000000000..7cf53b3d91be50
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,54 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+#include "yaffs_linux.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u32 addr =
+ ((loff_t) block_no) * dev->param.total_bytes_per_chunk
+ * dev->param.chunks_per_block;
+ struct erase_info ei;
+
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ retval = mtd->erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_initialise(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 00000000000000..666507417fece1
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no);
+int nandmtd_initialise(struct yaffs_dev *dev);
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif1.c b/fs/yaffs2/yaffs_mtdif1.c
new file mode 100644
index 00000000000000..51083695eb3347
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -0,0 +1,330 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This module provides the interface between yaffs_nand.c and the
+ * MTD API. This version is used when the MTD interface supports the
+ * 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
+ * and we have small-page NAND device.
+ *
+ * These functions are invoked via function pointers in yaffs_nand.c.
+ * This replaces functionality provided by functions in yaffs_mtdif.c
+ * and the yaffs_tags compatability functions in yaffs_tagscompat.c that are
+ * called in yaffs_mtdif.c when the function pointers are NULL.
+ * We assume the MTD layer is performing ECC (use_nand_ecc is true).
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_packedtags1.h"
+#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
+#include "yaffs_linux.h"
+
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+#include "linux/mtd/mtd.h"
+
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+# define YTAG1_SIZE 8
+#else
+# define YTAG1_SIZE 9
+#endif
+
+/* Write a chunk (page) of data to NAND.
+ *
+ * Caller always provides ExtendedTags data which are converted to a more
+ * compact (packed) form for storage in NAND. A mini-ECC runs over the
+ * contents of the tags meta-data; used to valid the tags when read.
+ *
+ * - Pack ExtendedTags to packed_tags1 form
+ * - Compute mini-ECC for packed_tags1
+ * - Write data and packed tags to NAND.
+ *
+ * Note: Due to the use of the packed_tags1 meta-data which does not include
+ * a full sequence number (as found in the larger packed_tags2 form) it is
+ * necessary for Yaffs to re-write a chunk/page (just once) to mark it as
+ * discarded and dirty. This is not ideal: newer NAND parts are supposed
+ * to be written just once. When Yaffs performs this operation, this
+ * function is called with a NULL data pointer -- calling MTD write_oob
+ * without data is valid usage (2.6.17).
+ *
+ * Any underlying MTD error results in YAFFS_FAIL.
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ const struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+
+ /* we assume that packed_tags1 and struct yaffs_tags are compatible */
+ compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
+ compile_time_assertion(sizeof(struct yaffs_tags) == 8);
+
+ yaffs_pack_tags1(&pt1, etags);
+ yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
+
+ /* When deleting a chunk, the upper layer provides only skeletal
+ * etags, one with is_deleted set. However, we need to update the
+ * tags, not erase them completely. So we use the NAND write property
+ * that only zeroed-bits stick and set tag bytes to all-ones and
+ * zero just the (not) deleted bit.
+ */
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* clear delete status bit to indicate deleted */
+ pt1.deleted = 0;
+ }
+#else
+ ((u8 *) & pt1)[8] = 0xff;
+ if (etags->is_deleted) {
+ memset(&pt1, 0xff, 8);
+ /* zero page_status byte to indicate deleted */
+ ((u8 *) & pt1)[8] = 0;
+ }
+#endif
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ retval = mtd->write_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "write_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+ return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Return with empty ExtendedTags but add ecc_result.
+ */
+static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval)
+{
+ if (etags) {
+ memset(etags, 0, sizeof(*etags));
+ etags->ecc_result = ecc_result;
+ }
+ return retval;
+}
+
+/* Read a chunk (page) from NAND.
+ *
+ * Caller expects ExtendedTags data to be usable even on error; that is,
+ * all members except ecc_result and block_bad are zeroed.
+ *
+ * - Check ECC results for data (if applicable)
+ * - Check for blank/erased block (return empty ExtendedTags if blank)
+ * - Check the packed_tags1 mini-ECC (correct if necessary/possible)
+ * - Convert packed_tags1 to ExtendedTags
+ * - Update ecc_result and block_bad members to refect state.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
+ int nand_chunk, u8 * data,
+ struct yaffs_ext_tags *etags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_bytes = dev->data_bytes_per_chunk;
+ loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
+ int eccres = YAFFS_ECC_RESULT_NO_ERROR;
+ struct mtd_oob_ops ops;
+ struct yaffs_packed_tags1 pt1;
+ int retval;
+ int deleted;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = MTD_OOB_AUTO;
+ ops.len = (data) ? chunk_bytes : 0;
+ ops.ooblen = YTAG1_SIZE;
+ ops.datbuf = data;
+ ops.oobbuf = (u8 *) & pt1;
+
+ /* Read page and oob using MTD.
+ * Check status and determine ECC result.
+ */
+ retval = mtd->read_oob(mtd, addr, &ops);
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "read_oob failed, chunk %d, mtd error %d",
+ nand_chunk, retval);
+ }
+
+ switch (retval) {
+ case 0:
+ /* no error */
+ break;
+
+ case -EUCLEAN:
+ /* MTD's ECC fixed the data */
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ break;
+
+ case -EBADMSG:
+ /* MTD's ECC could not fix the data */
+ dev->n_ecc_unfixed++;
+ /* fall into... */
+ default:
+ rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
+ etags->block_bad = (mtd->block_isbad) (mtd, addr);
+ return YAFFS_FAIL;
+ }
+
+ /* Check for a blank/erased chunk.
+ */
+ if (yaffs_check_ff((u8 *) & pt1, 8)) {
+ /* when blank, upper layers want ecc_result to be <= NO_ERROR */
+ return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
+ }
+#ifndef CONFIG_YAFFS_9BYTE_TAGS
+ /* Read deleted status (bit) then return it to it's non-deleted
+ * state before performing tags mini-ECC check. pt1.deleted is
+ * inverted.
+ */
+ deleted = !pt1.deleted;
+ pt1.deleted = 1;
+#else
+ deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7);
+#endif
+
+ /* Check the packed tags mini-ECC and correct if necessary/possible.
+ */
+ retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
+ switch (retval) {
+ case 0:
+ /* no tags error, use MTD result */
+ break;
+ case 1:
+ /* recovered tags-ECC error */
+ dev->n_tags_ecc_fixed++;
+ if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
+ eccres = YAFFS_ECC_RESULT_FIXED;
+ break;
+ default:
+ /* unrecovered tags-ECC error */
+ dev->n_tags_ecc_unfixed++;
+ return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
+ }
+
+ /* Unpack the tags to extended form and set ECC result.
+ * [set should_be_ff just to keep yaffs_unpack_tags1 happy]
+ */
+ pt1.should_be_ff = 0xFFFFFFFF;
+ yaffs_unpack_tags1(etags, &pt1);
+ etags->ecc_result = eccres;
+
+ /* Set deleted state */
+ etags->is_deleted = deleted;
+ return YAFFS_OK;
+}
+
+/* Mark a block bad.
+ *
+ * This is a persistant state.
+ * Use of this function should be rare.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "marking block %d bad", block_no);
+
+ retval = mtd->block_markbad(mtd, (loff_t) blocksize * block_no);
+ return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+/* Check any MTD prerequists.
+ *
+ * Returns YAFFS_OK or YAFFS_FAIL.
+ */
+static int nandmtd1_test_prerequists(struct mtd_info *mtd)
+{
+ /* 2.6.18 has mtd->ecclayout->oobavail */
+ /* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
+ int oobavail = mtd->ecclayout->oobavail;
+
+ if (oobavail < YTAG1_SIZE) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "mtd device has only %d bytes for tags, need %d",
+ oobavail, YTAG1_SIZE);
+ return YAFFS_FAIL;
+ }
+ return YAFFS_OK;
+}
+
+/* Query for the current state of a specific block.
+ *
+ * Examine the tags of the first chunk of the block and return the state:
+ * - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
+ * - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
+ * - YAFFS_BLOCK_STATE_EMPTY, the block is clean
+ *
+ * Always returns YAFFS_OK.
+ */
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state_ptr, u32 * seq_ptr)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int chunk_num = block_no * dev->param.chunks_per_block;
+ loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk;
+ struct yaffs_ext_tags etags;
+ int state = YAFFS_BLOCK_STATE_DEAD;
+ int seqnum = 0;
+ int retval;
+
+ /* We don't yet have a good place to test for MTD config prerequists.
+ * Do it here as we are called during the initial scan.
+ */
+ if (nandmtd1_test_prerequists(mtd) != YAFFS_OK)
+ return YAFFS_FAIL;
+
+ retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags);
+ etags.block_bad = (mtd->block_isbad) (mtd, addr);
+ if (etags.block_bad) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is marked bad", block_no);
+ state = YAFFS_BLOCK_STATE_DEAD;
+ } else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
+ /* bad tags, need to look more closely */
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else if (etags.chunk_used) {
+ state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ seqnum = etags.seq_number;
+ } else {
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+
+ *state_ptr = state;
+ *seq_ptr = seqnum;
+
+ /* query always succeeds */
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_mtdif1.h b/fs/yaffs2/yaffs_mtdif1.h
new file mode 100644
index 00000000000000..07ce4524f0f6a3
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif1.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF1_H__
+#define __YAFFS_MTDIF1_H__
+
+int nandmtd1_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+
+int nandmtd1_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+
+int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no);
+
+int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif2.c b/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 00000000000000..d1643df2c381b8
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,225 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_packedtags2.h"
+
+#include "yaffs_linux.h"
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size total_bytes_per_chunk so that we can also
+ * use it to load the tags.
+ */
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+ int retval = 0;
+
+ loff_t addr;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_write_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd->write_oob(mtd, addr, &ops);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+
+ size_t dummy;
+ int retval = 0;
+ int local_data = 0;
+
+ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_read_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev, __LINE__);
+ }
+
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = mtd->read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OOB_AUTO;
+ ops.ooblen = packed_tags_size;
+ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+ ops.oobbuf = yaffs_dev_to_lc(dev)->spare_buffer;
+ retval = mtd->read_oob(mtd, addr, &ops);
+ }
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)&data[dev->
+ data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+ memcpy(packed_tags_ptr,
+ yaffs_dev_to_lc(dev)->spare_buffer,
+ packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data, __LINE__);
+
+ if (tags && retval == -EBADMSG
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+ if (tags && retval == -EUCLEAN
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_mark_block_bad %d", block_no);
+
+ retval =
+ mtd->block_markbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+
+}
+
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "nandmtd2_query_block %d", block_no);
+ retval =
+ mtd->block_isbad(mtd,
+ block_no * dev->param.chunks_per_block *
+ dev->param.total_bytes_per_chunk);
+
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *seq_number = 0;
+ } else {
+ struct yaffs_ext_tags t;
+ nandmtd2_read_chunk_tags(dev, block_no *
+ dev->param.chunks_per_block, NULL, &t);
+
+ if (t.chunk_used) {
+ *seq_number = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+ } else {
+ *seq_number = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "block is bad seq %d state %d", *seq_number, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
diff --git a/fs/yaffs2/yaffs_mtdif2.h b/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 00000000000000..d82112610d0430
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,29 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 * data,
+ const struct yaffs_ext_tags *tags);
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int nandmtd2_mark_block_bad(struct yaffs_dev *dev, int block_no);
+int nandmtd2_query_block(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 * seq_number);
+
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 00000000000000..daa36f989d318b
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,201 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of values and fits
+ * into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR * name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (strncmp
+ ((YCHAR *) (xb + pos + sizeof(int)), name, size) == 0) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -1;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR * name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+ /* Find size, shift rest over this record, then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+ } else {
+ return -ENODATA;
+ }
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+
+ }
+ if (pos >= 0)
+ return -ERANGE;
+ else
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) && size <= xb_size && (pos + size) < xb_size
+ && !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 00000000000000..2bb02b62762835
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 00000000000000..e816cabf43f8e9
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,127 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsvalidity.h"
+
+#include "yaffs_getblockinfo.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+
+ int realigned_chunk = nand_chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided, use local tags to get prioritised gc working */
+ if (!tags)
+ tags = &local_tags;
+
+ if (dev->param.read_chunk_tags_fn)
+ result =
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk, buffer,
+ tags);
+ else
+ result = yaffs_tags_compat_rd(dev,
+ realigned_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags)
+{
+
+ dev->n_page_writes++;
+
+ nand_chunk -= dev->chunk_offset;
+
+ if (tags) {
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ if (!yaffs_validate_tags(tags)) {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing uninitialised tags");
+ YBUG();
+ }
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ YBUG();
+ }
+
+ if (dev->param.write_chunk_tags_fn)
+ return dev->param.write_chunk_tags_fn(dev, nand_chunk, buffer,
+ tags);
+ else
+ return yaffs_tags_compat_wr(dev, nand_chunk, buffer, tags);
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.bad_block_fn)
+ return dev->param.bad_block_fn(dev, block_no);
+ else
+ return yaffs_tags_compat_mark_bad(dev, block_no);
+}
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+ block_no -= dev->block_offset;
+
+ if (dev->param.query_block_fn)
+ return dev->param.query_block_fn(dev, block_no, state,
+ seq_number);
+ else
+ return yaffs_tags_compat_query_block(dev, block_no,
+ state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block)
+{
+ int result;
+
+ flash_block -= dev->block_offset;
+
+ dev->n_erasures++;
+
+ result = dev->param.erase_fn(dev, flash_block);
+
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->param.initialise_flash_fn)
+ return dev->param.initialise_flash_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 00000000000000..543f1987124e6a
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 00000000000000..a77f0954fc135a
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,53 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xFFFFFFFF;
+
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+ static const u8 all_ff[] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff
+ };
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xFFFFFFFF)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 00000000000000..d6861ff505e457
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 00000000000000..8e7fea3d286037
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,196 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_tagsvalidity.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xF0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0F) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(const struct
+ yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ if (t->chunk_id == 0 && t->extra_available) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = t->extra_length;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ yaffs_init_tags(t);
+
+ if (ptt->seq_number != 0xFFFFFFFF) {
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id =
+ ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink =
+ (ptt->chunk_id & EXTRA_SHRINK_FLAG) ? 1 : 0;
+ t->extra_shadows =
+ (ptt->chunk_id & EXTRA_SHADOWS_FLAG) ? 1 : 0;
+ t->extra_obj_type =
+ ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_length = ptt->n_bytes;
+ }
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xFFFFFFFF && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct
+ yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 00000000000000..f3296697bc0c51
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 00000000000000..7578075d9ac10a
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+void yaffs_calc_ecc(const u8 * data, struct yaffs_spare *spare)
+{
+ yaffs_ecc_cacl(data, spare->ecc1);
+ yaffs_ecc_cacl(&data[256], spare->ecc2);
+}
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+
+ tags->ecc = ecc;
+
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xFF, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 * data,
+ struct yaffs_spare *spare)
+{
+ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d is not valid",
+ nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+
+ if (!spare && data) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+
+ if (!dev->param.use_nand_ecc) {
+ ret_val =
+ dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && correct_errors) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ yaffs_ecc_cacl(data, calc_ecc);
+ ecc_result1 =
+ yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_cacl(&data[256], calc_ecc);
+ ecc_result2 =
+ yaffs_ecc_correct(&data[256], spare->ecc2,
+ calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+ ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
+ (struct yaffs_spare *)
+ &nspare);
+ memcpy(spare, &nspare, sizeof(struct yaffs_spare));
+ if (data && correct_errors) {
+ if (nspare.eccres1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ } else if (nspare.eccres1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ }
+
+ if (nspare.eccres2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ } else if (nspare.eccres2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev,
+ flash_block + dev->block_offset)->needs_retiring =
+ 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & 0x3ff;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data)
+ yaffs_calc_ecc(data, &spare);
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+
+ }
+
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *ext_tags)
+{
+
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+
+ static struct yaffs_spare spare_ff;
+ static int init;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (yaffs_rd_chunk_nand(dev, nand_chunk, data, &spare, &ecc_result, 1)) {
+ /* ext_tags may be NULL */
+ if (ext_tags) {
+
+ int deleted =
+ (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ (memcmp(&spare_ff, &spare, sizeof(spare_ff)) !=
+ 0) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.
+ n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+ }
+
+ return YAFFS_OK;
+ } else {
+ return YAFFS_FAIL;
+ }
+}
+
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number)
+{
+
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xFF, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 1);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 00000000000000..8cd35dcd3cab37
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,36 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 * data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 * data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 * seq_number);
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+int yaffs_count_bits(u8 byte);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsvalidity.c b/fs/yaffs2/yaffs_tagsvalidity.c
new file mode 100644
index 00000000000000..4358d79d4bece9
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.c
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_tagsvalidity.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags)
+{
+ memset(tags, 0, sizeof(struct yaffs_ext_tags));
+ tags->validity0 = 0xAAAAAAAA;
+ tags->validity1 = 0x55555555;
+}
+
+int yaffs_validate_tags(struct yaffs_ext_tags *tags)
+{
+ return (tags->validity0 == 0xAAAAAAAA && tags->validity1 == 0x55555555);
+
+}
diff --git a/fs/yaffs2/yaffs_tagsvalidity.h b/fs/yaffs2/yaffs_tagsvalidity.h
new file mode 100644
index 00000000000000..36a021fc8fa8bf
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsvalidity.h
@@ -0,0 +1,23 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGS_VALIDITY_H__
+#define __YAFFS_TAGS_VALIDITY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_tags(struct yaffs_ext_tags *tags);
+int yaffs_validate_tags(struct yaffs_ext_tags *tags);
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 00000000000000..6273dbf9f63f15
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000F0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xF0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 00000000000000..738c7f69a5ec1d
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,535 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char *block_state_name[] = {
+ "Unknown",
+ "Needs scanning",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCANNING:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0
+ || bi->pages_in_use > dev->param.chunks_per_block
+ || bi->soft_del_pages < 0
+ || bi->soft_del_pages > dev->param.chunks_per_block
+ || actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in which
+ * case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Trashed name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xFF",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ int required_depth;
+ int actual_depth;
+ u32 last_chunk;
+ u32 x;
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+ /* Check file size is consistent with tnode depth */
+ last_chunk =
+ obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ actual_depth = obj->variant.file_variant.top_level;
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (tn) {
+ u32 the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ u32 chunk_min;
+ u32 chunk_max;
+
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer, __LINE__);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent
+ && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent" );
+ YBUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ YBUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ YBUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ YBUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ if (lh) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ YBUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ in = in;
+ return YAFFS_OK;
+}
+
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 00000000000000..cc6f88999305dc
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 00000000000000..d5b875314005a3
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,2792 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ * this superblock
+ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
+ * superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * NB There are two variants of Linux VFS glue code. This variant supports
+ * a single version and should not include any multi-version code.
+ */
+#include <linux/version.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include <linux/exportfs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+
+#include <asm/div64.h>
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+
+#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
+
+#define YPROC_ROOT NULL
+
+#define Y_INIT_TIMER(a) init_timer_on_stack(a)
+
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+ uint64_t result = partition_size;
+ do_div(result, block_size);
+ return (uint32_t) result;
+}
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif1.h"
+#include "yaffs_mtdif2.h"
+
+unsigned int yaffs_trace_mask = YAFFS_TRACE_BAD_BLOCKS | YAFFS_TRACE_ALWAYS;
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+
+/* Module Parameters */
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+
+
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#define yaffs_inode_to_obj(iptr) ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
+
+#define update_dir_time(dir) do {\
+ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+ } while(0)
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+ return yaffs_gc_control;
+}
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj);
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ /* NB This is called as a side effect of other functions, but
+ * we had to release the lock to prevent deadlocks, so
+ * need to lock again.
+ */
+
+ yaffs_gross_lock(dev);
+
+ obj = yaffs_find_by_number(dev, inode->i_ino);
+
+ yaffs_fill_inode_from_obj(inode, obj);
+
+ yaffs_gross_unlock(dev);
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+ struct yaffs_obj *obj)
+{
+ struct inode *inode;
+
+ if (!sb) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL super_block!!");
+ return NULL;
+
+ }
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for NULL object!!");
+ return NULL;
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_get_inode for object %d",
+ obj->obj_id);
+
+ inode = yaffs_iget(sb, obj->obj_id);
+ if (IS_ERR(inode))
+ return NULL;
+
+ /* NB Side effect: iget calls back to yaffs_read_inode(). */
+ /* iget also increments the inode's i_count */
+ /* NB You can't be holding gross_lock or deadlock will happen! */
+
+ return inode;
+}
+
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+{
+ struct inode *inode;
+
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_dev *dev;
+
+ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+ int error = -ENOSPC;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+ mode |= S_ISGID;
+
+ if (parent) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: parent object %d type %d",
+ parent->obj_id, parent->variant_type);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: could not get parent object");
+ return -EPERM;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod: making oject for %s, mode %x dev %x",
+ dentry->d_name.name, mode, rdev);
+
+ dev = parent->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ switch (mode & S_IFMT) {
+ default:
+ /* Special (socket, fifo, device...) */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+ obj =
+ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+ gid, old_encode_dev(rdev));
+ break;
+ case S_IFREG: /* file */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+ gid);
+ break;
+ case S_IFDIR: /* directory */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+ uid, gid);
+ break;
+ case S_IFLNK: /* symlink */
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+ obj = NULL; /* Do we ever get here? */
+ break;
+ }
+
+ /* Can not call yaffs_get_inode() with gross lock held */
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_mknod created object %d count = %d",
+ obj->obj_id, atomic_read(&inode->i_count));
+ error = 0;
+ yaffs_fill_inode_from_obj(dir, parent);
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+ error = -ENOMEM;
+ }
+
+ return error;
+}
+
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *n)
+{
+ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *link = NULL;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
+ link =
+ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ obj);
+
+ if (link) {
+ old_dentry->d_inode->i_nlink = yaffs_get_obj_link_count(obj);
+ d_instantiate(dentry, old_dentry->d_inode);
+ atomic_inc(&old_dentry->d_inode->i_count);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_link link count %d i_count %d",
+ old_dentry->d_inode->i_nlink,
+ atomic_read(&old_dentry->d_inode->i_count));
+ }
+
+ yaffs_gross_unlock(dev);
+
+ if (link) {
+ update_dir_time(dir);
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ uid_t uid = current->cred->fsuid;
+ gid_t gid =
+ (dir->i_mode & S_ISGID) ? dir->i_gid : current->cred->fsgid;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+ dev = yaffs_inode_to_obj(dir)->my_dev;
+ yaffs_gross_lock(dev);
+ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+ S_IFLNK | S_IRWXUGO, uid, gid, symname);
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ struct inode *inode;
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+ d_instantiate(dentry, inode);
+ update_dir_time(dir);
+ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+ return 0;
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+ }
+
+ return -ENOMEM;
+}
+
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *n)
+{
+ struct yaffs_obj *obj;
+ struct inode *inode = NULL;
+
+ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup for %d:%s",
+ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
+
+ /* Can't hold gross lock when calling yaffs_get_inode() */
+ if (current != yaffs_dev_to_lc(dev)->readdir_process)
+ yaffs_gross_unlock(dev);
+
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_lookup found %d", obj->obj_id);
+
+ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+
+ if (inode) {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_loookup dentry");
+ d_add(dentry, inode);
+ /* return dentry; */
+ return NULL;
+ }
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+ }
+
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret_val;
+
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_unlink %d:%s",
+ (int)(dir->i_ino), dentry->d_name.name);
+ obj = yaffs_inode_to_obj(dir);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+ if (ret_val == YAFFS_OK) {
+ dentry->d_inode->i_nlink--;
+ dir->i_version++;
+ yaffs_gross_unlock(dev);
+ mark_inode_dirty(dentry->d_inode);
+ update_dir_time(dir);
+ return 0;
+ }
+ yaffs_gross_unlock(dev);
+ return -ENOTEMPTY;
+}
+
+static int yaffs_sync_object(struct file *file, int datasync)
+{
+
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct dentry *dentry = file->f_path.dentry;
+
+ obj = yaffs_dentry_to_obj(dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC, "yaffs_sync_object");
+ yaffs_gross_lock(dev);
+ yaffs_flush_file(obj, 1, datasync);
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct yaffs_dev *dev;
+ int ret_val = YAFFS_FAIL;
+ struct yaffs_obj *target;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+ dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ /* Check if the target is an existing directory that is not empty. */
+ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+
+ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+ !list_empty(&target->variant.dir_variant.children)) {
+
+ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+ ret_val = YAFFS_FAIL;
+ } else {
+ /* Now does unlinking internally using shadowing mechanism */
+ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+ old_dentry->d_name.name,
+ yaffs_inode_to_obj(new_dir),
+ new_dentry->d_name.name);
+ }
+ yaffs_gross_unlock(dev);
+
+ if (ret_val == YAFFS_OK) {
+ if (target) {
+ new_dentry->d_inode->i_nlink--;
+ mark_inode_dirty(new_dentry->d_inode);
+ }
+
+ update_dir_time(old_dir);
+ if (old_dir != new_dir)
+ update_dir_time(new_dir);
+ return 0;
+ } else {
+ return -ENOTEMPTY;
+ }
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_setattr of object %d",
+ yaffs_inode_to_obj(inode)->obj_id);
+
+ /* Fail if a requested resize >= 2GB */
+ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+ error = -EINVAL;
+
+ if (error == 0)
+ error = inode_change_ok(inode, attr);
+ if (error == 0) {
+ int result;
+ if (!error) {
+ setattr_copy(inode, attr);
+ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+ if (attr->ia_valid & ATTR_SIZE) {
+ truncate_setsize(inode, attr->ia_size);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+ }
+ }
+ dev = yaffs_inode_to_obj(inode)->my_dev;
+ if (attr->ia_valid & ATTR_SIZE) {
+ yaffs_trace(YAFFS_TRACE_OS, "resize to %d(%x)",
+ (int)(attr->ia_size),
+ (int)(attr->ia_size));
+ }
+ yaffs_gross_lock(dev);
+ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+ if (result == YAFFS_OK) {
+ error = 0;
+ } else {
+ error = -EPERM;
+ }
+ yaffs_gross_unlock(dev);
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+ return error;
+}
+
+#ifdef CONFIG_YAFFS_XATTR
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_set_xattrib(obj, name, value, size, flags);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name, void *buff,
+ size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_getxattr \"%s\" from object %d",
+ name, obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_get_xattrib(obj, name, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+ return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ int result;
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ result = yaffs_remove_xattrib(obj, name);
+ if (result == YAFFS_OK)
+ error = 0;
+ else if (result < 0)
+ error = result;
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_removexattr done returning %d", error);
+
+ return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct yaffs_dev *dev;
+ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr of object %d", obj->obj_id);
+
+ if (error == 0) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ error = yaffs_list_xattrib(obj, buff, size);
+ yaffs_gross_unlock(dev);
+
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_listxattr done returning %d", error);
+
+ return error;
+}
+
+#endif
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+ .create = yaffs_create,
+ .lookup = yaffs_lookup,
+ .link = yaffs_link,
+ .unlink = yaffs_unlink,
+ .symlink = yaffs_symlink,
+ .mkdir = yaffs_mkdir,
+ .rmdir = yaffs_unlink,
+ .mknod = yaffs_mknod,
+ .rename = yaffs_rename,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+ struct yaffs_dev *dev;
+ struct yaffs_obj *dir_obj;
+ struct yaffs_obj *next_return;
+ struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+ struct yaffs_dev *dev = dir->my_dev;
+ struct yaffs_search_context *sc =
+ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+ if (sc) {
+ sc->dir_obj = dir;
+ sc->dev = dev;
+ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else
+ sc->next_return =
+ list_entry(dir->variant.dir_variant.children.next,
+ struct yaffs_obj, siblings);
+ INIT_LIST_HEAD(&sc->others);
+ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+ }
+ return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+ if (sc) {
+ list_del(&sc->others);
+ kfree(sc);
+ }
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+ if (!sc)
+ return;
+
+ if (sc->next_return == NULL ||
+ list_empty(&sc->dir_obj->variant.dir_variant.children))
+ sc->next_return = NULL;
+ else {
+ struct list_head *next = sc->next_return->siblings.next;
+
+ if (next == &sc->dir_obj->variant.dir_variant.children)
+ sc->next_return = NULL; /* end of list */
+ else
+ sc->next_return =
+ list_entry(next, struct yaffs_obj, siblings);
+ }
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffs_search_context *sc;
+ struct list_head *search_contexts =
+ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+ /* Iterate through the directory search contexts.
+ * If any are currently on the object being removed, then advance
+ * the search context to the next object to prevent a hanging pointer.
+ */
+ list_for_each(i, search_contexts) {
+ if (i) {
+ sc = list_entry(i, struct yaffs_search_context, others);
+ if (sc->next_return == obj)
+ yaffs_search_advance(sc);
+ }
+ }
+
+}
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ struct yaffs_search_context *sc;
+ struct inode *inode = f->f_dentry->d_inode;
+ unsigned long offset, curoffs;
+ struct yaffs_obj *l;
+ int ret_val = 0;
+
+ char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_dev_to_lc(dev)->readdir_process = current;
+
+ offset = f->f_pos;
+
+ sc = yaffs_new_search(obj);
+ if (!sc) {
+ ret_val = -ENOMEM;
+ goto out;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: starting at %d", (int)offset);
+
+ if (offset == 0) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry . ino %d",
+ (int)inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+ if (offset == 1) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: entry .. ino %d",
+ (int)f->f_dentry->d_parent->d_inode->i_ino);
+ yaffs_gross_unlock(dev);
+ if (filldir(dirent, "..", 2, offset,
+ f->f_dentry->d_parent->d_inode->i_ino,
+ DT_DIR) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+ yaffs_gross_lock(dev);
+ offset++;
+ f->f_pos++;
+ }
+
+ curoffs = 1;
+
+ /* If the directory has changed since the open or last call to
+ readdir, rewind to after the 2 canned entries. */
+ if (f->f_version != inode->i_version) {
+ offset = 2;
+ f->f_pos = offset;
+ f->f_version = inode->i_version;
+ }
+
+ while (sc->next_return) {
+ curoffs++;
+ l = sc->next_return;
+ if (curoffs >= offset) {
+ int this_inode = yaffs_get_obj_inode(l);
+ int this_type = yaffs_get_obj_type(l);
+
+ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readdir: %s inode %d",
+ name, yaffs_get_obj_inode(l));
+
+ yaffs_gross_unlock(dev);
+
+ if (filldir(dirent,
+ name,
+ strlen(name),
+ offset, this_inode, this_type) < 0) {
+ yaffs_gross_lock(dev);
+ goto out;
+ }
+
+ yaffs_gross_lock(dev);
+
+ offset++;
+ f->f_pos++;
+ }
+ yaffs_search_advance(sc);
+ }
+
+out:
+ yaffs_search_end(sc);
+ yaffs_dev_to_lc(dev)->readdir_process = NULL;
+ yaffs_gross_unlock(dev);
+
+ return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = yaffs_readdir,
+ .fsync = yaffs_sync_object,
+ .llseek = generic_file_llseek,
+};
+
+
+
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+{
+ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_flush object %d (%s)",
+ obj->obj_id, obj->dirty ? "dirty" : "clean");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_file(obj, 1, 0);
+
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+static const struct file_operations yaffs_file_operations = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .mmap = generic_file_mmap,
+ .flush = yaffs_file_flush,
+ .fsync = yaffs_sync_object,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/* ExportFS support */
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+ uint32_t generation)
+{
+ return yaffs_iget(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct dentry *parent = ERR_PTR(-ENOENT);
+ struct inode *inode;
+ unsigned long parent_ino;
+ struct yaffs_obj *d_obj;
+ struct yaffs_obj *parent_obj;
+
+ d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+ if (d_obj) {
+ parent_obj = d_obj->parent;
+ if (parent_obj) {
+ parent_ino = yaffs_get_obj_inode(parent_obj);
+ inode = yaffs_iget(sb, parent_ino);
+
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_obtain_alias(inode);
+ if (!IS_ERR(parent)) {
+ parent = ERR_PTR(-ENOMEM);
+ iput(inode);
+ }
+ }
+ }
+ }
+
+ return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+ .fh_to_dentry = yaffs2_fh_to_dentry,
+ .fh_to_parent = yaffs2_fh_to_parent,
+ .get_parent = yaffs2_get_parent,
+};
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+ int buflen)
+{
+ unsigned char *alias;
+ int ret;
+
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+ yaffs_gross_unlock(dev);
+
+ if (!alias)
+ return -ENOMEM;
+
+ ret = vfs_readlink(dentry, buffer, buflen, alias);
+ kfree(alias);
+ return ret;
+}
+
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ unsigned char *alias;
+ void *ret;
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+ yaffs_gross_unlock(dev);
+
+ if (!alias) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ nd_set_link(nd, alias);
+ ret = (void *)alias;
+out:
+ return ret;
+}
+
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+ kfree(alias);
+}
+
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+ /* Clear the association between the inode and
+ * the struct yaffs_obj.
+ */
+ obj->my_inode = NULL;
+ yaffs_inode_to_obj_lv(inode) = NULL;
+
+ /* If the object freeing was deferred, then the real
+ * free happens now.
+ * This should fix the inode inconsistency problem.
+ */
+ yaffs_handle_defered_free(obj);
+}
+
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+ int deleteme = 0;
+
+ obj = yaffs_inode_to_obj(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_evict_inode: ino %d, count %d %s",
+ (int)inode->i_ino,
+ atomic_read(&inode->i_count),
+ obj ? "object exists" : "null object");
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ deleteme = 1;
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
+ if (deleteme && obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_del_obj(obj);
+ yaffs_gross_unlock(dev);
+ }
+ if (obj) {
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+ yaffs_unstitch_obj(inode, obj);
+ yaffs_gross_unlock(dev);
+ }
+
+}
+
+static void yaffs_touch_super(struct yaffs_dev *dev)
+{
+ struct super_block *sb = yaffs_dev_to_lc(dev)->super;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_touch_super() sb = %p", sb);
+ if (sb)
+ sb->s_dirt = 1;
+}
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+ /* Lifted from jffs2 */
+
+ struct yaffs_obj *obj;
+ unsigned char *pg_buf;
+ int ret;
+
+ struct yaffs_dev *dev;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_readpage_nolock at %08x, size %08x",
+ (unsigned)(pg->index << PAGE_CACHE_SHIFT),
+ (unsigned)PAGE_CACHE_SIZE);
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ BUG_ON(!PageLocked(pg));
+
+ pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
+
+ yaffs_gross_lock(dev);
+
+ ret = yaffs_file_rd(obj, pg_buf,
+ pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+ yaffs_gross_unlock(dev);
+
+ if (ret >= 0)
+ ret = 0;
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
+ }
+
+ flush_dcache_page(pg);
+ kunmap(pg);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+ return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+ int ret = yaffs_readpage_nolock(f, pg);
+ UnlockPage(pg);
+ return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+ int ret;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+ ret = yaffs_readpage_unlock(f, pg);
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+ return ret;
+}
+
+/* writepage inspired by/stolen from smbfs */
+
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct yaffs_dev *dev;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ unsigned long end_index;
+ char *buffer;
+ struct yaffs_obj *obj;
+ int n_written = 0;
+ unsigned n_bytes;
+ loff_t i_size;
+
+ if (!mapping)
+ BUG();
+ inode = mapping->host;
+ if (!inode)
+ BUG();
+ i_size = i_size_read(inode);
+
+ end_index = i_size >> PAGE_CACHE_SHIFT;
+
+ if (page->index < end_index)
+ n_bytes = PAGE_CACHE_SIZE;
+ else {
+ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (page->index > end_index || !n_bytes) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, inode size = %08x!!!",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT),
+ (unsigned)inode->i_size);
+ yaffs_trace(YAFFS_TRACE_OS,
+ " -> don't care!!");
+
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ return 0;
+ }
+ }
+
+ if (n_bytes != PAGE_CACHE_SIZE)
+ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+ get_page(page);
+
+ buffer = kmap(page);
+
+ obj = yaffs_inode_to_obj(inode);
+ dev = obj->my_dev;
+ yaffs_gross_lock(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_writepage at %08x, size %08x",
+ (unsigned)(page->index << PAGE_CACHE_SHIFT), n_bytes);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag0: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ n_written = yaffs_wr_file(obj, buffer,
+ page->index << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "writepag1: obj = %05x, ino = %05x",
+ (int)obj->variant.file_variant.file_size, (int)inode->i_size);
+
+ yaffs_gross_unlock(dev);
+
+ kunmap(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ end_page_writeback(page);
+ put_page(page);
+
+ return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for
+ * write_begin/end.
+ * For now we just assume few parallel writes and check against a small
+ * number.
+ * Todo: need to do this with a counter to handle parallel reads better.
+ */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ int n_free_chunks;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+ yaffs_gross_unlock(dev);
+
+ return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ yaffs_gross_unlock(dev);
+}
+
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct page *pg = NULL;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ int ret = 0;
+ int space_held = 0;
+
+ /* Get a page */
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+
+ *pagep = pg;
+ if (!pg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ yaffs_trace(YAFFS_TRACE_OS,
+ "start yaffs_write_begin index %d(%x) uptodate %d",
+ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+ /* Get fs space */
+ space_held = yaffs_hold_space(filp);
+
+ if (!space_held) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Update page if required */
+
+ if (!Page_Uptodate(pg))
+ ret = yaffs_readpage_nolock(filp, pg);
+
+ if (ret)
+ goto out;
+
+ /* Happy path return */
+ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+ return 0;
+
+out:
+ yaffs_trace(YAFFS_TRACE_OS,
+ "end yaffs_write_begin fail returning %d", ret);
+ if (space_held)
+ yaffs_release_space(filp);
+ if (pg) {
+ unlock_page(pg);
+ page_cache_release(pg);
+ }
+ return ret;
+}
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+ loff_t * pos)
+{
+ struct yaffs_obj *obj;
+ int n_written, ipos;
+ struct inode *inode;
+ struct yaffs_dev *dev;
+
+ obj = yaffs_dentry_to_obj(f->f_dentry);
+
+ dev = obj->my_dev;
+
+ yaffs_gross_lock(dev);
+
+ inode = f->f_dentry->d_inode;
+
+ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+ ipos = inode->i_size;
+ else
+ ipos = *pos;
+
+ if (!obj)
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: hey obj is null!");
+ else
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %d(%x)",
+ (unsigned)n, (unsigned)n, obj->obj_id, ipos, ipos);
+
+ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+ yaffs_touch_super(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write: %d(%x) bytes written",
+ (unsigned)n, (unsigned)n);
+
+ if (n_written > 0) {
+ ipos += n_written;
+ *pos = ipos;
+ if (ipos > inode->i_size) {
+ inode->i_size = ipos;
+ inode->i_blocks = (ipos + 511) >> 9;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_file_write size updated to %d bytes, %d blocks",
+ ipos, (int)(inode->i_blocks));
+ }
+
+ }
+ yaffs_gross_unlock(dev);
+ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *pg, void *fsdadata)
+{
+ int ret = 0;
+ void *addr, *kva;
+ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+ kva = kmap(pg);
+ addr = kva + offset_into_page;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end addr %p pos %x n_bytes %d",
+ addr, (unsigned)pos, copied);
+
+ ret = yaffs_file_write(filp, addr, copied, &pos);
+
+ if (ret != copied) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_write_end not same size ret %d copied %d",
+ ret, copied);
+ SetPageError(pg);
+ }
+
+ kunmap(pg);
+
+ yaffs_release_space(filp);
+ unlock_page(pg);
+ page_cache_release(pg);
+ return ret;
+}
+
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+ struct super_block *sb = dentry->d_sb;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+ yaffs_gross_lock(dev);
+
+ buf->f_type = YAFFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = 255;
+
+ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+ /* Do this if chunk size is not a power of 2 */
+
+ uint64_t bytes_in_dev;
+ uint64_t bytes_free;
+
+ bytes_in_dev =
+ ((uint64_t)
+ ((dev->param.end_block - dev->param.start_block +
+ 1))) * ((uint64_t) (dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk));
+
+ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
+ buf->f_blocks = bytes_in_dev;
+
+ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+ ((uint64_t) (dev->data_bytes_per_chunk));
+
+ do_div(bytes_free, sb->s_blocksize);
+
+ buf->f_bfree = bytes_free;
+
+ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) /
+ (sb->s_blocksize / dev->data_bytes_per_chunk);
+ } else {
+ buf->f_blocks =
+ (dev->param.end_block - dev->param.start_block + 1) *
+ dev->param.chunks_per_block *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+ buf->f_bfree =
+ yaffs_get_n_free_chunks(dev) *
+ (dev->data_bytes_per_chunk / sb->s_blocksize);
+ }
+
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_bavail = buf->f_bfree;
+
+ yaffs_gross_unlock(dev);
+ return 0;
+}
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+ struct inode *iptr;
+ struct yaffs_obj *obj;
+
+ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+ obj = yaffs_inode_to_obj(iptr);
+ if (obj) {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "flushing obj %d", obj->obj_id);
+ yaffs_flush_file(obj, 1, 0);
+ }
+ }
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ if (!dev)
+ return;
+
+ yaffs_flush_inodes(sb);
+ yaffs_update_dirty_dirs(dev);
+ yaffs_flush_whole_cache(dev);
+ if (do_checkpoint)
+ yaffs_checkpoint_save(dev);
+}
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+ unsigned erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned scattered = 0; /* Free chunks not in an erased block */
+
+ if (erased_chunks < dev->n_free_chunks)
+ scattered = (dev->n_free_chunks - erased_chunks);
+
+ if (!context->bg_running)
+ return 0;
+ else if (scattered < (dev->param.chunks_per_block * 2))
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 2)
+ return 0;
+ else if (erased_chunks > dev->n_free_chunks / 4)
+ return 1;
+ else
+ return 2;
+}
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+ int do_checkpoint;
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+ gc_urgent,
+ sb->s_dirt ? "dirty" : "clean",
+ request_checkpoint ? "checkpoint requested" : "no checkpoint",
+ oneshot_checkpoint ? " one-shot" : "");
+
+ yaffs_gross_lock(dev);
+ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+ oneshot_checkpoint) && !dev->is_checkpointed;
+
+ if (sb->s_dirt || do_checkpoint) {
+ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+ sb->s_dirt = 0;
+ if (oneshot_checkpoint)
+ yaffs_auto_checkpoint &= ~4;
+ }
+ yaffs_gross_unlock(dev);
+
+ return 0;
+}
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+void yaffs_background_waker(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+ struct yaffs_dev *dev = (struct yaffs_dev *)data;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+ unsigned long now = jiffies;
+ unsigned long next_dir_update = now;
+ unsigned long next_gc = now;
+ unsigned long expires;
+ unsigned int urgency;
+
+ int gc_result;
+ struct timer_list timer;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "yaffs_background starting for dev %p", (void *)dev);
+
+ set_freezable();
+ while (context->bg_running) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ yaffs_gross_lock(dev);
+
+ now = jiffies;
+
+ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+ yaffs_update_dirty_dirs(dev);
+ next_dir_update = now + HZ;
+ }
+
+ if (time_after(now, next_gc) && yaffs_bg_enable) {
+ if (!dev->is_checkpointed) {
+ urgency = yaffs_bg_gc_urgency(dev);
+ gc_result = yaffs_bg_gc(dev, urgency);
+ if (urgency > 1)
+ next_gc = now + HZ / 20 + 1;
+ else if (urgency > 0)
+ next_gc = now + HZ / 10 + 1;
+ else
+ next_gc = now + HZ * 2;
+ } else {
+ /*
+ * gc not running so set to next_dir_update
+ * to cut down on wake ups
+ */
+ next_gc = next_dir_update;
+ }
+ }
+ yaffs_gross_unlock(dev);
+ expires = next_dir_update;
+ if (time_before(next_gc, expires))
+ expires = next_gc;
+ if (time_before(expires, now))
+ expires = now + HZ;
+
+ Y_INIT_TIMER(&timer);
+ timer.expires = expires + 1;
+ timer.data = (unsigned long)current;
+ timer.function = yaffs_background_waker;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+ }
+
+ return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+ int retval = 0;
+ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+ if (dev->read_only)
+ return -1;
+
+ context->bg_running = 1;
+
+ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+ (void *)dev, "yaffs-bg-%d",
+ context->mount_id);
+
+ if (IS_ERR(context->bg_thread)) {
+ retval = PTR_ERR(context->bg_thread);
+ context->bg_thread = NULL;
+ context->bg_running = 0;
+ }
+ return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+ ctxt->bg_running = 0;
+
+ if (ctxt->bg_thread) {
+ kthread_stop(ctxt->bg_thread);
+ ctxt->bg_thread = NULL;
+ }
+}
+
+static void yaffs_write_super(struct super_block *sb)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+ "yaffs_write_super%s",
+ request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+}
+
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+{
+ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+ yaffs_do_sync_fs(sb, request_checkpoint);
+
+ return 0;
+}
+
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+
+
+struct yaffs_options {
+ int inband_tags;
+ int skip_checkpoint_read;
+ int skip_checkpoint_write;
+ int no_cache;
+ int tags_ecc_on;
+ int tags_ecc_overridden;
+ int lazy_loading_enabled;
+ int lazy_loading_overridden;
+ int empty_lost_and_found;
+ int empty_lost_and_found_overridden;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+ const char *options_str)
+{
+ char cur_opt[MAX_OPT_LEN + 1];
+ int p;
+ int error = 0;
+
+ /* Parse through the options which is a comma seperated list */
+
+ while (options_str && *options_str && !error) {
+ memset(cur_opt, 0, MAX_OPT_LEN + 1);
+ p = 0;
+
+ while (*options_str == ',')
+ options_str++;
+
+ while (*options_str && *options_str != ',') {
+ if (p < MAX_OPT_LEN) {
+ cur_opt[p] = *options_str;
+ p++;
+ }
+ options_str++;
+ }
+
+ if (!strcmp(cur_opt, "inband-tags")) {
+ options->inband_tags = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
+ options->tags_ecc_on = 0;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
+ options->tags_ecc_on = 1;
+ options->tags_ecc_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
+ options->lazy_loading_enabled = 0;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
+ options->lazy_loading_enabled = 1;
+ options->lazy_loading_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+ options->empty_lost_and_found = 0;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+ options->empty_lost_and_found = 1;
+ options->empty_lost_and_found_overridden = 1;
+ } else if (!strcmp(cur_opt, "no-cache")) {
+ options->no_cache = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+ options->skip_checkpoint_read = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+ options->skip_checkpoint_write = 1;
+ } else if (!strcmp(cur_opt, "no-checkpoint")) {
+ options->skip_checkpoint_read = 1;
+ options->skip_checkpoint_write = 1;
+ } else {
+ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+ cur_opt);
+ error = 1;
+ }
+ }
+
+ return error;
+}
+
+static struct address_space_operations yaffs_file_address_operations = {
+ .readpage = yaffs_readpage,
+ .writepage = yaffs_writepage,
+ .write_begin = yaffs_write_begin,
+ .write_end = yaffs_write_end,
+};
+
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+ .readlink = yaffs_readlink,
+ .follow_link = yaffs_follow_link,
+ .put_link = yaffs_put_link,
+ .setattr = yaffs_setattr,
+#ifdef CONFIG_YAFFS_XATTR
+ .setxattr = yaffs_setxattr,
+ .getxattr = yaffs_getxattr,
+ .listxattr = yaffs_listxattr,
+ .removexattr = yaffs_removexattr,
+#endif
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+ struct yaffs_obj *obj)
+{
+ if (inode && obj) {
+
+ /* Check mode against the variant type and attempt to repair if broken. */
+ u32 mode = obj->yst_mode;
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (!S_ISREG(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFREG;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (!S_ISLNK(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFLNK;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!S_ISDIR(mode)) {
+ obj->yst_mode &= ~S_IFMT;
+ obj->yst_mode |= S_IFDIR;
+ }
+
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ default:
+ /* TODO? */
+ break;
+ }
+
+ inode->i_flags |= S_NOATIME;
+
+ inode->i_ino = obj->obj_id;
+ inode->i_mode = obj->yst_mode;
+ inode->i_uid = obj->yst_uid;
+ inode->i_gid = obj->yst_gid;
+
+ inode->i_rdev = old_decode_dev(obj->yst_rdev);
+
+ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+ inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+ inode->i_mtime.tv_nsec = 0;
+ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_size = yaffs_get_obj_length(obj);
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ inode->i_nlink = yaffs_get_obj_link_count(obj);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode mode %x uid %d gid %d size %d count %d",
+ inode->i_mode, inode->i_uid, inode->i_gid,
+ (int)inode->i_size, atomic_read(&inode->i_count));
+
+ switch (obj->yst_mode & S_IFMT) {
+ default: /* fifo, device or socket */
+ init_special_inode(inode, obj->yst_mode,
+ old_decode_dev(obj->yst_rdev));
+ break;
+ case S_IFREG: /* file */
+ inode->i_op = &yaffs_file_inode_operations;
+ inode->i_fop = &yaffs_file_operations;
+ inode->i_mapping->a_ops =
+ &yaffs_file_address_operations;
+ break;
+ case S_IFDIR: /* directory */
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+ break;
+ case S_IFLNK: /* symlink */
+ inode->i_op = &yaffs_symlink_inode_operations;
+ break;
+ }
+
+ yaffs_inode_to_obj_lv(inode) = obj;
+
+ obj->my_inode = inode;
+
+ } else {
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_fill_inode invalid parameters");
+ }
+}
+
+static void yaffs_put_super(struct super_block *sb)
+{
+ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_put_super");
+
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "Shutting down yaffs background thread");
+ yaffs_bg_stop(dev);
+ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+ "yaffs background thread shut down");
+
+ yaffs_gross_lock(dev);
+
+ yaffs_flush_super(sb, 1);
+
+ if (yaffs_dev_to_lc(dev)->put_super_fn)
+ yaffs_dev_to_lc(dev)->put_super_fn(sb);
+
+ yaffs_deinitialise(dev);
+
+ yaffs_gross_unlock(dev);
+ mutex_lock(&yaffs_context_lock);
+ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+ mutex_unlock(&yaffs_context_lock);
+
+ if (yaffs_dev_to_lc(dev)->spare_buffer) {
+ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+ }
+
+ kfree(dev);
+}
+
+static void yaffs_mtd_put_super(struct super_block *sb)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(yaffs_super_to_dev(sb));
+
+ if (mtd->sync)
+ mtd->sync(mtd);
+
+ put_mtd_device(mtd);
+}
+
+static const struct super_operations yaffs_super_ops = {
+ .statfs = yaffs_statfs,
+ .put_super = yaffs_put_super,
+ .evict_inode = yaffs_evict_inode,
+ .sync_fs = yaffs_sync_fs,
+ .write_super = yaffs_write_super,
+};
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+ struct super_block *sb,
+ void *data, int silent)
+{
+ int n_blocks;
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct yaffs_dev *dev = 0;
+ char devname_buf[BDEVNAME_SIZE + 1];
+ struct mtd_info *mtd;
+ int err;
+ char *data_str = (char *)data;
+ struct yaffs_linux_context *context = NULL;
+ struct yaffs_param *param;
+
+ int read_only = 0;
+
+ struct yaffs_options options;
+
+ unsigned mount_id;
+ int found;
+ struct yaffs_linux_context *context_iterator;
+ struct list_head *l;
+
+ sb->s_magic = YAFFS_MAGIC;
+ sb->s_op = &yaffs_super_ops;
+ sb->s_flags |= MS_NOATIME;
+
+ read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+ sb->s_export_op = &yaffs_export_ops;
+
+ if (!sb)
+ printk(KERN_INFO "yaffs: sb is NULL\n");
+ else if (!sb->s_dev)
+ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+ else if (!yaffs_devname(sb, devname_buf))
+ printk(KERN_INFO "yaffs: devname is NULL\n");
+ else
+ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+ sb->s_dev,
+ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+ if (!data_str)
+ data_str = "";
+
+ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+ memset(&options, 0, sizeof(options));
+
+ if (yaffs_parse_options(&options, data_str)) {
+ /* Option parsing failed */
+ return NULL;
+ }
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: Using yaffs%d", yaffs_version);
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Attempting MTD mount of %u.%u,\"%s\"",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ yaffs_devname(sb, devname_buf));
+
+ /* Check it's an mtd device..... */
+ if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
+ return NULL; /* This isn't an mtd device */
+
+ /* Get the device */
+ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+ if (!mtd) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device #%u doesn't appear to exist",
+ MINOR(sb->s_dev));
+ return NULL;
+ }
+ /* Check it's NAND */
+ if (mtd->type != MTD_NANDFLASH) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device is not NAND it's type %d",
+ mtd->type);
+ return NULL;
+ }
+
+ yaffs_trace(YAFFS_TRACE_OS, " erase %p", mtd->erase);
+ yaffs_trace(YAFFS_TRACE_OS, " read %p", mtd->read);
+ yaffs_trace(YAFFS_TRACE_OS, " write %p", mtd->write);
+ yaffs_trace(YAFFS_TRACE_OS, " readoob %p", mtd->read_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " writeoob %p", mtd->write_oob);
+ yaffs_trace(YAFFS_TRACE_OS, " block_isbad %p", mtd->block_isbad);
+ yaffs_trace(YAFFS_TRACE_OS, " block_markbad %p", mtd->block_markbad);
+ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+
+#ifdef CONFIG_YAFFS_AUTO_YAFFS2
+
+ if (yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+ yaffs_version = 2;
+ }
+
+ /* Added NCB 26/5/2006 for completeness */
+ if (yaffs_version == 2 && !options.inband_tags
+ && WRITE_SIZE(mtd) == 512) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+ yaffs_version = 1;
+ }
+#endif
+
+ if (yaffs_version == 2) {
+ /* Check for version 2 style functions */
+ if (!mtd->erase ||
+ !mtd->block_isbad ||
+ !mtd->block_markbad ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+ !options.inband_tags) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not have the right page sizes");
+ return NULL;
+ }
+ } else {
+ /* Check for V1 style functions */
+ if (!mtd->erase ||
+ !mtd->read ||
+ !mtd->write || !mtd->read_oob || !mtd->write_oob) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support required functions");
+ return NULL;
+ }
+
+ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "MTD device does not support have the right page sizes");
+ return NULL;
+ }
+ }
+
+ /* OK, so if we got here, we have an MTD that's NAND and looks
+ * like it has the right capabilities
+ * Set the struct yaffs_dev up for mtd
+ */
+
+ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+ read_only = 1;
+ printk(KERN_INFO
+ "yaffs: mtd is read only, setting superblock read only");
+ sb->s_flags |= MS_RDONLY;
+ }
+
+ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+ if (!dev || !context) {
+ if (dev)
+ kfree(dev);
+ if (context)
+ kfree(context);
+ dev = NULL;
+ context = NULL;
+ }
+
+ if (!dev) {
+ /* Deep shit could not allocate device structure */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super failed trying to allocate yaffs_dev");
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ param = &(dev->param);
+
+ memset(context, 0, sizeof(struct yaffs_linux_context));
+ dev->os_context = context;
+ INIT_LIST_HEAD(&(context->context_list));
+ context->dev = dev;
+ context->super = sb;
+
+ dev->read_only = read_only;
+
+ sb->s_fs_info = dev;
+
+ dev->driver_context = mtd;
+ param->name = mtd->name;
+
+ /* Set up the memory size parameters.... */
+
+ n_blocks =
+ YCALCBLOCKS(mtd->size,
+ (YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK));
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+ param->n_reserved_blocks = 5;
+ param->n_caches = (options.no_cache) ? 0 : 10;
+ param->inband_tags = options.inband_tags;
+
+#ifdef CONFIG_YAFFS_DISABLE_LAZY_LOAD
+ param->disable_lazy_load = 1;
+#endif
+#ifdef CONFIG_YAFFS_XATTR
+ param->enable_xattr = 1;
+#endif
+ if (options.lazy_loading_overridden)
+ param->disable_lazy_load = !options.lazy_loading_enabled;
+
+#ifdef CONFIG_YAFFS_DISABLE_TAGS_ECC
+ param->no_tags_ecc = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BACKGROUND
+#else
+ param->defered_dir_update = 1;
+#endif
+
+ if (options.tags_ecc_overridden)
+ param->no_tags_ecc = !options.tags_ecc_on;
+
+#ifdef CONFIG_YAFFS_EMPTY_LOST_AND_FOUND
+ param->empty_lost_n_found = 1;
+#endif
+
+#ifdef CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING
+ param->refresh_period = 0;
+#else
+ param->refresh_period = 500;
+#endif
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ param->always_check_erased = 1;
+#endif
+
+ if (options.empty_lost_and_found_overridden)
+ param->empty_lost_n_found = options.empty_lost_and_found;
+
+ /* ... and the functions. */
+ if (yaffs_version == 2) {
+ param->write_chunk_tags_fn = nandmtd2_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd2_read_chunk_tags;
+ param->bad_block_fn = nandmtd2_mark_block_bad;
+ param->query_block_fn = nandmtd2_query_block;
+ yaffs_dev_to_lc(dev)->spare_buffer =
+ kmalloc(mtd->oobsize, GFP_NOFS);
+ param->is_yaffs2 = 1;
+ param->total_bytes_per_chunk = mtd->writesize;
+ param->chunks_per_block = mtd->erasesize / mtd->writesize;
+ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+ param->start_block = 0;
+ param->end_block = n_blocks - 1;
+ } else {
+ /* use the MTD interface in yaffs_mtdif1.c */
+ param->write_chunk_tags_fn = nandmtd1_write_chunk_tags;
+ param->read_chunk_tags_fn = nandmtd1_read_chunk_tags;
+ param->bad_block_fn = nandmtd1_mark_block_bad;
+ param->query_block_fn = nandmtd1_query_block;
+ param->is_yaffs2 = 0;
+ }
+ /* ... and common functions */
+ param->erase_fn = nandmtd_erase_block;
+ param->initialise_flash_fn = nandmtd_initialise;
+
+ yaffs_dev_to_lc(dev)->put_super_fn = yaffs_mtd_put_super;
+
+ param->sb_dirty_fn = yaffs_touch_super;
+ param->gc_control = yaffs_gc_control_callback;
+
+ yaffs_dev_to_lc(dev)->super = sb;
+
+#ifndef CONFIG_YAFFS_DOES_ECC
+ param->use_nand_ecc = 1;
+#endif
+
+ param->skip_checkpt_rd = options.skip_checkpoint_read;
+ param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+ mutex_lock(&yaffs_context_lock);
+ /* Get a mount id */
+ found = 0;
+ for (mount_id = 0; !found; mount_id++) {
+ found = 1;
+ list_for_each(l, &yaffs_context_list) {
+ context_iterator =
+ list_entry(l, struct yaffs_linux_context,
+ context_list);
+ if (context_iterator->mount_id == mount_id)
+ found = 0;
+ }
+ }
+ context->mount_id = mount_id;
+
+ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+ &yaffs_context_list);
+ mutex_unlock(&yaffs_context_lock);
+
+ /* Directory search handling... */
+ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+ param->remove_obj_fn = yaffs_remove_obj_callback;
+
+ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+ yaffs_gross_lock(dev);
+
+ err = yaffs_guts_initialise(dev);
+
+ yaffs_trace(YAFFS_TRACE_OS,
+ "yaffs_read_super: guts initialised %s",
+ (err == YAFFS_OK) ? "OK" : "FAILED");
+
+ if (err == YAFFS_OK)
+ yaffs_bg_start(dev);
+
+ if (!context->bg_thread)
+ param->defered_dir_update = 0;
+
+ /* Release lock before yaffs_get_inode() */
+ yaffs_gross_unlock(dev);
+
+ /* Create root inode */
+ if (err == YAFFS_OK)
+ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+ if (!inode)
+ return NULL;
+
+ inode->i_op = &yaffs_dir_inode_operations;
+ inode->i_fop = &yaffs_dir_operations;
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+ root = d_alloc_root(inode);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: d_alloc_root done");
+
+ if (!root) {
+ iput(inode);
+ return NULL;
+ }
+ sb->s_root = root;
+ sb->s_dirt = !dev->is_checkpointed;
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs_read_super: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+ return sb;
+}
+
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
+{
+
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs",
+ .get_sb = yaffs_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+#ifdef CONFIG_YAFFS_YAFFS2
+
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+ int silent)
+{
+ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+static int yaffs2_read_super(struct file_system_type *fs,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs, flags, dev_name, data,
+ yaffs2_internal_read_super_mtd, mnt);
+}
+
+static struct file_system_type yaffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "yaffs2",
+ .get_sb = yaffs2_read_super,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#endif /* CONFIG_YAFFS_YAFFS2 */
+
+static struct proc_dir_entry *my_proc_entry;
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+ struct yaffs_param *param = &dev->param;
+ buf += sprintf(buf, "start_block........... %d\n", param->start_block);
+ buf += sprintf(buf, "end_block............. %d\n", param->end_block);
+ buf += sprintf(buf, "total_bytes_per_chunk. %d\n",
+ param->total_bytes_per_chunk);
+ buf += sprintf(buf, "use_nand_ecc.......... %d\n",
+ param->use_nand_ecc);
+ buf += sprintf(buf, "no_tags_ecc........... %d\n", param->no_tags_ecc);
+ buf += sprintf(buf, "is_yaffs2............. %d\n", param->is_yaffs2);
+ buf += sprintf(buf, "inband_tags........... %d\n", param->inband_tags);
+ buf += sprintf(buf, "empty_lost_n_found.... %d\n",
+ param->empty_lost_n_found);
+ buf += sprintf(buf, "disable_lazy_load..... %d\n",
+ param->disable_lazy_load);
+ buf += sprintf(buf, "refresh_period........ %d\n",
+ param->refresh_period);
+ buf += sprintf(buf, "n_caches.............. %d\n", param->n_caches);
+ buf += sprintf(buf, "n_reserved_blocks..... %d\n",
+ param->n_reserved_blocks);
+ buf += sprintf(buf, "always_check_erased... %d\n",
+ param->always_check_erased);
+
+ return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+ buf +=
+ sprintf(buf, "data_bytes_per_chunk.. %d\n",
+ dev->data_bytes_per_chunk);
+ buf += sprintf(buf, "chunk_grp_bits........ %d\n", dev->chunk_grp_bits);
+ buf += sprintf(buf, "chunk_grp_size........ %d\n", dev->chunk_grp_size);
+ buf +=
+ sprintf(buf, "n_erased_blocks....... %d\n", dev->n_erased_blocks);
+ buf +=
+ sprintf(buf, "blocks_in_checkpt..... %d\n", dev->blocks_in_checkpt);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_tnodes.............. %d\n", dev->n_tnodes);
+ buf += sprintf(buf, "n_obj................. %d\n", dev->n_obj);
+ buf += sprintf(buf, "n_free_chunks......... %d\n", dev->n_free_chunks);
+ buf += sprintf(buf, "\n");
+ buf += sprintf(buf, "n_page_writes......... %u\n", dev->n_page_writes);
+ buf += sprintf(buf, "n_page_reads.......... %u\n", dev->n_page_reads);
+ buf += sprintf(buf, "n_erasures............ %u\n", dev->n_erasures);
+ buf += sprintf(buf, "n_gc_copies........... %u\n", dev->n_gc_copies);
+ buf += sprintf(buf, "all_gcs............... %u\n", dev->all_gcs);
+ buf +=
+ sprintf(buf, "passive_gc_count...... %u\n", dev->passive_gc_count);
+ buf +=
+ sprintf(buf, "oldest_dirty_gc_count. %u\n",
+ dev->oldest_dirty_gc_count);
+ buf += sprintf(buf, "n_gc_blocks........... %u\n", dev->n_gc_blocks);
+ buf += sprintf(buf, "bg_gcs................ %u\n", dev->bg_gcs);
+ buf +=
+ sprintf(buf, "n_retired_writes...... %u\n", dev->n_retired_writes);
+ buf +=
+ sprintf(buf, "n_retired_blocks...... %u\n", dev->n_retired_blocks);
+ buf += sprintf(buf, "n_ecc_fixed........... %u\n", dev->n_ecc_fixed);
+ buf += sprintf(buf, "n_ecc_unfixed......... %u\n", dev->n_ecc_unfixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_fixed...... %u\n", dev->n_tags_ecc_fixed);
+ buf +=
+ sprintf(buf, "n_tags_ecc_unfixed.... %u\n",
+ dev->n_tags_ecc_unfixed);
+ buf += sprintf(buf, "cache_hits............ %u\n", dev->cache_hits);
+ buf +=
+ sprintf(buf, "n_deleted_files....... %u\n", dev->n_deleted_files);
+ buf +=
+ sprintf(buf, "n_unlinked_files...... %u\n", dev->n_unlinked_files);
+ buf += sprintf(buf, "refresh_count......... %u\n", dev->refresh_count);
+ buf += sprintf(buf, "n_bg_deletions........ %u\n", dev->n_bg_deletions);
+
+ return buf;
+}
+
+static int yaffs_proc_read(char *page,
+ char **start,
+ off_t offset, int count, int *eof, void *data)
+{
+ struct list_head *item;
+ char *buf = page;
+ int step = offset;
+ int n = 0;
+
+ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
+ * We use 'offset' (*ppos) to indicate where we are in dev_list.
+ * This also assumes the user has posted a read buffer large
+ * enough to hold the complete output; but that's life in /proc.
+ */
+
+ *(int *)start = 1;
+
+ /* Print header first */
+ if (step == 0)
+ buf += sprintf(buf, "YAFFS built:" __DATE__ " " __TIME__ "\n");
+ else if (step == 1)
+ buf += sprintf(buf, "\n");
+ else {
+ step -= 2;
+
+ mutex_lock(&yaffs_context_lock);
+
+ /* Locate and print the Nth entry. Order N-squared but N is small. */
+ list_for_each(item, &yaffs_context_list) {
+ struct yaffs_linux_context *dc =
+ list_entry(item, struct yaffs_linux_context,
+ context_list);
+ struct yaffs_dev *dev = dc->dev;
+
+ if (n < (step & ~1)) {
+ n += 2;
+ continue;
+ }
+ if ((step & 1) == 0) {
+ buf +=
+ sprintf(buf, "\nDevice %d \"%s\"\n", n,
+ dev->param.name);
+ buf = yaffs_dump_dev_part0(buf, dev);
+ } else {
+ buf = yaffs_dump_dev_part1(buf, dev);
+ }
+
+ break;
+ }
+ mutex_unlock(&yaffs_context_lock);
+ }
+
+ return buf - page < count ? buf - page : count;
+}
+
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+ char *mask_name;
+ unsigned mask_bitfield;
+} mask_flags[] = {
+ {"allocate", YAFFS_TRACE_ALLOCATE},
+ {"always", YAFFS_TRACE_ALWAYS},
+ {"background", YAFFS_TRACE_BACKGROUND},
+ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+ {"buffers", YAFFS_TRACE_BUFFERS},
+ {"bug", YAFFS_TRACE_BUG},
+ {"checkpt", YAFFS_TRACE_CHECKPOINT},
+ {"deletion", YAFFS_TRACE_DELETION},
+ {"erase", YAFFS_TRACE_ERASE},
+ {"error", YAFFS_TRACE_ERROR},
+ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
+ {"gc", YAFFS_TRACE_GC},
+ {"lock", YAFFS_TRACE_LOCK},
+ {"mtd", YAFFS_TRACE_MTD},
+ {"nandaccess", YAFFS_TRACE_NANDACCESS},
+ {"os", YAFFS_TRACE_OS},
+ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+ {"scan", YAFFS_TRACE_SCAN},
+ {"mount", YAFFS_TRACE_MOUNT},
+ {"tracing", YAFFS_TRACE_TRACING},
+ {"sync", YAFFS_TRACE_SYNC},
+ {"write", YAFFS_TRACE_WRITE},
+ {"verify", YAFFS_TRACE_VERIFY},
+ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
+ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
+ {"all", 0xffffffff},
+ {"none", 0},
+ {NULL, 0},
+};
+
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ unsigned rg = 0, mask_bitfield;
+ char *end;
+ char *mask_name;
+ const char *x;
+ char substring[MAX_MASK_NAME_LENGTH + 1];
+ int i;
+ int done = 0;
+ int add, len = 0;
+ int pos = 0;
+
+ rg = yaffs_trace_mask;
+
+ while (!done && (pos < count)) {
+ done = 1;
+ while ((pos < count) && isspace(buf[pos]))
+ pos++;
+
+ switch (buf[pos]) {
+ case '+':
+ case '-':
+ case '=':
+ add = buf[pos];
+ pos++;
+ break;
+
+ default:
+ add = ' ';
+ break;
+ }
+ mask_name = NULL;
+
+ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+ if (end > buf + pos) {
+ mask_name = "numeral";
+ len = end - (buf + pos);
+ pos += len;
+ done = 0;
+ } else {
+ for (x = buf + pos, i = 0;
+ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+ substring[i] = *x;
+ substring[i] = '\0';
+
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ if (strcmp(substring, mask_flags[i].mask_name)
+ == 0) {
+ mask_name = mask_flags[i].mask_name;
+ mask_bitfield =
+ mask_flags[i].mask_bitfield;
+ done = 0;
+ break;
+ }
+ }
+ }
+
+ if (mask_name != NULL) {
+ done = 0;
+ switch (add) {
+ case '-':
+ rg &= ~mask_bitfield;
+ break;
+ case '+':
+ rg |= mask_bitfield;
+ break;
+ case '=':
+ rg = mask_bitfield;
+ break;
+ default:
+ rg |= mask_bitfield;
+ break;
+ }
+ }
+ }
+
+ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+ if (rg & YAFFS_TRACE_ALWAYS) {
+ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+ char flag;
+ flag = ((rg & mask_flags[i].mask_bitfield) ==
+ mask_flags[i].mask_bitfield) ? '+' : '-';
+ printk(KERN_DEBUG "%c%s\n", flag,
+ mask_flags[i].mask_name);
+ }
+ }
+
+ return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+ struct file_system_type *fst;
+ int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+ {&yaffs_fs_type, 0},
+ {&yaffs2_fs_type, 0},
+ {NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+ int error = 0;
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "\n\nYAFFS-WARNING CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED selected.\n\n\n");
+#endif
+
+ mutex_init(&yaffs_context_lock);
+
+ /* Install the proc_fs entries */
+ my_proc_entry = create_proc_entry("yaffs",
+ S_IRUGO | S_IFREG, YPROC_ROOT);
+
+ if (my_proc_entry) {
+ my_proc_entry->write_proc = yaffs_proc_write;
+ my_proc_entry->read_proc = yaffs_proc_read;
+ my_proc_entry->data = NULL;
+ } else {
+ return -ENOMEM;
+ }
+
+
+ /* Now add the file system entries */
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst && !error) {
+ error = register_filesystem(fsinst->fst);
+ if (!error)
+ fsinst->installed = 1;
+ fsinst++;
+ }
+
+ /* Any errors? uninstall */
+ if (error) {
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+ }
+
+ return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+ struct file_system_to_install *fsinst;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs built " __DATE__ " " __TIME__ " removing.");
+
+ remove_proc_entry("yaffs", YPROC_ROOT);
+
+ fsinst = fs_to_install;
+
+ while (fsinst->fst) {
+ if (fsinst->installed) {
+ unregister_filesystem(fsinst->fst);
+ fsinst->installed = 0;
+ }
+ fsinst++;
+ }
+}
+
+module_init(init_yaffs_fs)
+ module_exit(exit_yaffs_fs)
+
+ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2010");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 00000000000000..9eb60308254751
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,433 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int result;
+
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+
+ int alloc_failed = 0;
+
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0; !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCANNING; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED
+ || tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ /*T((" %d %d deleted\n",blk,c)); */
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated from */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+ /* Set block finder here to encourage the allocator to go forth from here. */
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash (two data chunks with
+ * the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in
+ && in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE
+ && in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ /* T((" %d %d data %d %d\n",blk,c,tags.obj_id,tags.chunk_id)); */
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that has been reused but not yet
+ * deleted, and worse still it has changed type. Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+
+ in = 0;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one. We have a duplicate and need to resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ if (0 && (parent == dev->del_dir ||
+ parent ==
+ dev->unlinked_dir)) {
+ in->deleted = 1; /* If it is unlinked at start up then it wants deleting */
+ dev->n_deleted_files++;
+ }
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run through this
+ * list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+
+ in->variant.
+ file_variant.file_size
+ = oh->file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+
+ yaffs_link_fixup(dev, hard_list);
+
+ /* Fix up any shadowed objects */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the shadowed object
+ * then setting the object header to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 00000000000000..db23e04973ba98
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 00000000000000..33397af7003d1f
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1598 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block && b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this one that have
+ * discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev * dev)
+{
+ u32 b;
+
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks =
+ (dev->param.end_block - dev->param.start_block + 1);
+
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) +
+ sizeof(u32)) * (dev->n_obj);
+ n_bytes += (dev->tnode_size + sizeof(u32)) * (dev->n_tnodes);
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks, so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ /* Write block info */
+ if (ok) {
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) ==
+ n_bytes);
+ }
+
+ /* Write chunk bits */
+ if (ok) {
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) ==
+ n_bytes);
+ }
+ return ok ? 1 : 0;
+
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int taffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+
+ if (tn) {
+ if (level > 0) {
+
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (tn->internal[i]) {
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->
+ internal
+ [i],
+ level -
+ 1,
+ (chunk_offset
+ <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ }
+ }
+ } else if (level == 0) {
+ u32 base_offset =
+ chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr
+ (dev, &base_offset,
+ sizeof(base_offset)) == sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (dev, tn,
+ dev->tnode_size) == dev->tnode_size);
+ }
+ }
+
+ return ok;
+
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr
+ (obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+ }
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn) {
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ } else {
+ ok = 0;
+ }
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ if (lh) {
+ obj =
+ list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr
+ (dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok
+ && obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes
+ (obj);
+ }
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xFF, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ struct yaffs_obj *hard_list = NULL;
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = taffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj->hard_links.next =
+ (struct list_head *)hard_list;
+ hard_list = obj;
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start of hole marker.
+ */
+
+ loff_t old_file_size;
+ int increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+
+ u8 *local_buffer = NULL;
+
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ int pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
+
+ /* If we were out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+ if (aseq == bseq)
+ return ablock - bblock;
+ else
+ return aseq - bseq;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+
+ int chunk;
+ int result;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ struct yaffs_obj *hard_list = NULL;
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int is_unlinked;
+ u8 *chunk_data;
+
+ int file_size;
+ int is_shrink;
+ int found_chunks;
+ int equiv_id;
+ int alloc_failed = 0;
+
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index = kmalloc(n_blocks * sizeof(struct yaffs_block_index),
+ GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev, __LINE__);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+
+ n_to_scan++;
+
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter; !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+
+ bi = yaffs_get_block_info(dev, blk);
+
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ for (c = dev->param.chunks_per_block - 1;
+ !alloc_failed && c >= 0 &&
+ (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing the erased
+ * check. Just skip it so that it can be deleted.
+ * But, more typically, We get here when this is an unallocated
+ * chunk and his means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (found_chunks) {
+ /* This is a chunk that was skipped due to failing the erased check */
+ } else if (c == 0) {
+ /* We're looking at the first chunk in the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCANNING
+ || state ==
+ YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number ==
+ bi->seq_number) {
+ /* this is the block being allocated from */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+
+ state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->
+ alloc_block_finder =
+ blk;
+ } else {
+ /* This is a partially written block that is not
+ * the current allocation block.
+ */
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, c);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ (tags.chunk_id > 0
+ && tags.n_bytes > dev->data_bytes_per_chunk)
+ || tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, c, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ dev->n_free_chunks++;
+
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+ u32 chunk_base =
+ (tags.chunk_id -
+ 1) * dev->data_bytes_per_chunk;
+
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in) {
+ /* Out of memory */
+ alloc_failed = 1;
+ }
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE
+ && chunk_base <
+ in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by a resize */
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, -1)) {
+ alloc_failed = 1;
+ }
+
+ /* File size is calculated by looking at the data chunks if we have not
+ * seen an object header yet. Stop this practice once we find an object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid && /* have not got an object header yet */
+ in->variant.file_variant.
+ scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+
+ } else if (in) {
+ /* This chunk has been invalidated by a resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make the object
+ */
+ found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.
+ obj_id,
+ tags.
+ extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load)
+ || tags.extra_shadows || (!in->valid
+ && (tags.obj_id ==
+ YAFFS_OBJECTID_ROOT
+ || tags.
+ obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)))
+ {
+
+ /* If we don't have valid info then we need to read the chunk
+ * TODO In future we can probably defer reading the chunk and
+ * living with invalid data until needed.
+ */
+
+ result = yaffs_rd_chunk_tags_nand(dev,
+ chunk,
+ chunk_data,
+ NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev, tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ continue;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be discarded, but
+ * we first have to suck out resize info if it is a file.
+ */
+
+ if ((in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) && ((oh
+ &&
+ oh->
+ type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ||
+ (tags.
+ extra_available
+ &&
+ tags.
+ extra_obj_type
+ ==
+ YAFFS_OBJECT_TYPE_FILE)))
+ {
+ u32 this_size =
+ (oh) ? oh->
+ file_size :
+ tags.extra_length;
+ u32 parent_obj_id =
+ (oh) ? oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink =
+ (oh) ? oh->
+ is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked at start also means deleted)
+ * we treat the file size as being zeroed at this point.
+ */
+ if (parent_obj_id ==
+ YAFFS_OBJECTID_DELETED
+ || parent_obj_id ==
+ YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink
+ && in->variant.file_variant.
+ shrink_size > this_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ?
+ oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+
+ if (oh) {
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj
+ (dev,
+ oh->shadows_obj,
+ 1);
+
+ yaffs_set_obj_name_from_oh(in,
+ oh);
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ file_size = oh->file_size;
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+
+ } else {
+ in->variant_type =
+ tags.extra_obj_type;
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_length;
+ is_shrink =
+ tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent
+ || parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * We're trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir)
+ || (parent == dev->unlinked_dir);
+
+ if (is_shrink) {
+ /* Mark the block as having a shrink header */
+ bi->has_shrink_hdr = 1;
+ }
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent object is scanned
+ * we put them all in a list.
+ * After scanning is complete, we should have all the objects, so we run
+ * through this list and fix up all the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+
+ if (in->variant.
+ file_variant.scanned_size <
+ file_size) {
+ /* This covers the case where the file size is greater
+ * than where the data is
+ * This will happen if the file is resized to be larger
+ * than its current data extents.
+ */
+ in->variant.
+ file_variant.
+ file_size =
+ file_size;
+ in->variant.
+ file_variant.
+ scanned_size =
+ file_size;
+ }
+
+ if (in->variant.file_variant.
+ shrink_size > file_size)
+ in->variant.
+ file_variant.
+ shrink_size =
+ file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ if (!is_unlinked) {
+ in->variant.
+ hardlink_variant.
+ equiv_id = equiv_id;
+ in->hard_links.next =
+ (struct list_head *)
+ hard_list;
+ hard_list = in;
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ if (oh) {
+ in->variant.
+ symlink_variant.
+ alias =
+ yaffs_clone_str(oh->
+ alias);
+ if (!in->variant.
+ symlink_variant.
+ alias)
+ alloc_failed =
+ 1;
+ }
+ break;
+ }
+
+ }
+
+ }
+
+ } /* End of scanning for each chunk */
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
+ /* If we got this far while scanning, then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data, __LINE__);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 00000000000000..e1a9287fc5065b
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 00000000000000..8183425448cde0
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,70 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2010 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_LINUX_H__
+#define __YPORTENV_LINUX_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+
+#define YAFFS_ROOT_MODE 0755
+#define YAFFS_LOSTNFOUND_MODE 0700
+
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+
+#define compile_time_assertion(assertion) \
+ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#ifndef Y_DUMP_STACK
+#define Y_DUMP_STACK() dump_stack()
+#endif
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if(yaffs_trace_mask & (msk)) \
+ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while(0)
+
+#ifndef YBUG
+#define YBUG() do {\
+ yaffs_trace(YAFFS_TRACE_BUG,\
+ "bug " __FILE__ " %d",\
+ __LINE__);\
+ Y_DUMP_STACK();\
+} while (0)
+#endif
+
+#endif
diff --git a/include/linux/akm8975.h b/include/linux/akm8975.h
new file mode 100644
index 00000000000000..6a7c432600184a
--- /dev/null
+++ b/include/linux/akm8975.h
@@ -0,0 +1,87 @@
+/*
+ * Definitions for akm8975 compass chip.
+ */
+#ifndef AKM8975_H
+#define AKM8975_H
+
+#include <linux/ioctl.h>
+
+/*! \name AK8975 operation mode
+ \anchor AK8975_Mode
+ Defines an operation mode of the AK8975.*/
+/*! @{*/
+#define AK8975_MODE_SNG_MEASURE 0x01
+#define AK8975_MODE_SELF_TEST 0x08
+#define AK8975_MODE_FUSE_ACCESS 0x0F
+#define AK8975_MODE_POWER_DOWN 0x00
+/*! @}*/
+
+#define RBUFF_SIZE 8 /* Rx buffer size */
+
+/*! \name AK8975 register address
+\anchor AK8975_REG
+Defines a register address of the AK8975.*/
+/*! @{*/
+#define AK8975_REG_WIA 0x00
+#define AK8975_REG_INFO 0x01
+#define AK8975_REG_ST1 0x02
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+#define AK8975_REG_ST2 0x09
+#define AK8975_REG_CNTL 0x0A
+#define AK8975_REG_RSV 0x0B
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+#define AK8975_REG_I2CDIS 0x0F
+/*! @}*/
+
+/*! \name AK8975 fuse-rom address
+\anchor AK8975_FUSE
+Defines a read-only address of the fuse ROM of the AK8975.*/
+/*! @{*/
+#define AK8975_FUSE_ASAX 0x10
+#define AK8975_FUSE_ASAY 0x11
+#define AK8975_FUSE_ASAZ 0x12
+/*! @}*/
+
+#define AKMIO 0xA1
+
+/* IOCTLs for AKM library */
+#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5])
+#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5])
+#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE])
+#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12])
+#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int)
+#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int)
+#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short)
+
+/* IOCTLs for APPs */
+#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short)
+#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short)
+#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short)
+#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short)
+#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short)
+#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY
+/* Set raw magnetic vector flag */
+#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short)
+/* Get raw magnetic vector flag */
+#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short)
+#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short)
+
+
+struct akm8975_platform_data {
+ int intr;
+
+ int (*init)(void);
+ void (*exit)(void);
+ int (*power_on)(void);
+ int (*power_off)(void);
+};
+
+#endif
+
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 00000000000000..0f904b3ba7f07a
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_NET_BT_ADMIN 3001
+#define AID_NET_BT 3002
+#define AID_INET 3003
+#define AID_NET_RAW 3004
+#define AID_NET_ADMIN 3005
+#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */
+#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */
+
+#endif
diff --git a/include/linux/cea861.h b/include/linux/cea861.h
new file mode 100644
index 00000000000000..57e466022f6801
--- /dev/null
+++ b/include/linux/cea861.h
@@ -0,0 +1,444 @@
+/* vim: set noet ts=8 sts=8 sw=8 : */
+/*
+ * Copyright Âİ 2010 Saleem Abdulrasool <compnerd@compnerd.org>.
+ * Copyright Âİ 2010 Genesi USA, Inc. <matt@genesi-usa.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Header file for EIA CEA 861-E structures and definitions */
+
+#ifndef LINUX_CEA861_H
+#define LINUX_CEA861_H
+
+#include <linux/edid.h>
+
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD)
+#warning "structures defined and packed for little-endian byte order"
+#endif
+
+
+#define CEA861_NO_DTDS_PRESENT (0x04)
+
+static const u8 CEA861_OUI_REGISTRATION_ID_HDMI_LSB[] = { 0x03, 0x0C, 0x00 };
+
+
+enum cea861_data_block_type {
+ CEA861_DATA_BLOCK_TYPE_RESERVED0,
+ CEA861_DATA_BLOCK_TYPE_AUDIO,
+ CEA861_DATA_BLOCK_TYPE_VIDEO,
+ CEA861_DATA_BLOCK_TYPE_VENDOR_SPECIFIC,
+ CEA861_DATA_BLOCK_TYPE_SPEAKER_ALLOCATION,
+ CEA861_DATA_BLOCK_TYPE_VESA_DTC,
+ CEA861_DATA_BLOCK_TYPE_RESERVED6,
+ CEA861_DATA_BLOCK_TYPE_EXTENDED,
+};
+
+
+struct __packed cea861_data_block_header {
+ unsigned length : 5;
+ unsigned tag : 3;
+};
+
+/* if header->tag is not 0x7 this will work without any definition */
+struct __packed cea861_data_block_generic {
+ struct cea861_data_block_header header;
+ u8 data[31];
+};
+
+struct __packed cea861_vendor_specific_data_block {
+ struct cea861_data_block_header header;
+
+ u8 ieee_registration[3];
+ u8 data[28];
+};
+
+#define CEA861_SVD_NATIVE_FLAG (1 << 7)
+
+struct __packed cea861_video_data_block {
+ struct cea861_data_block_header header;
+
+ /*
+ * actual length in header, the index stops us from walking out of
+ * spec but not out of bounds
+ */
+ u8 svd[31];
+};
+
+struct __packed cea861_timing_block {
+ /* CEA Extension Header */
+ u8 tag;
+ u8 revision;
+ u8 dtd_offset;
+
+ /* Global Declarations */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned native_dtds : 4;
+ unsigned yuv_422_supported : 1;
+ unsigned yuv_444_supported : 1;
+ unsigned basic_audio_supported : 1;
+ unsigned underscan_supported : 1;
+#else
+ unsigned underscan_supported : 1;
+ unsigned basic_audio_supported : 1;
+ unsigned yuv_444_supported : 1;
+ unsigned yuv_422_supported : 1;
+ unsigned native_dtds : 4;
+#endif
+
+ u8 data[123];
+
+ u8 checksum;
+};
+
+
+/* HDMI Constants and Structures */
+
+#define HDMI_PACKET_TYPE_INFO_FRAME (0x80)
+#define HDMI_PACKET_CHECKSUM (0x100)
+
+struct __packed hdmi_vsdb {
+ struct cea861_data_block_header header;
+
+ u8 ieee_registration[3];
+ unsigned port_configuration_b : 4;
+ unsigned port_configuration_a : 4;
+ unsigned port_configuration_d : 4;
+ unsigned port_configuration_c : 4;
+
+ /* extension fields */
+ unsigned dvi_dual_link : 1;
+ unsigned : 1;
+ unsigned : 1;
+ unsigned yuv_444_supported : 1;
+ unsigned colour_depth_30_bit : 1;
+ unsigned colour_depth_36_bit : 1;
+ unsigned colour_depth_48_bit : 1;
+ unsigned audio_info_supported : 1;
+
+ u8 max_tmds_clock;
+
+ unsigned : 1;
+ unsigned : 1;
+ unsigned : 1;
+ unsigned : 1;
+ unsigned : 1;
+ unsigned : 1;
+ unsigned interlaced_latency_fields : 1;
+ unsigned latency_fields : 1;
+
+ u8 video_latency;
+ u8 audio_latency;
+ u8 interlaced_video_latency;
+ u8 interlaced_audio_latency;
+
+ u8 reserved[];
+};
+
+
+
+
+/* if header->tag == 0x7 then extended_tag is valid so we can cast the header to this,
+ * find the tag and then recast it to the appropriate structure (ugh!)
+ */
+struct __packed cea861_data_block_extended {
+ struct cea861_data_block_header header;
+ u8 extension_tag;
+};
+
+/* we're missing a few.. */
+enum cea861_data_block_extension_type {
+ CEA861_DATA_BLOCK_EXTENSION_VIDEO_CAPABILITY,
+ CEA861_DATA_BLOCK_EXTENSION_COLORIMETRY = 5,
+ CEA861_DATA_BLOCK_EXTENSION_CEA_MISC_AUDIO = 16,
+};
+
+struct __packed cea861_video_capability_block {
+ struct cea861_data_block_extended header;
+ unsigned ce_overunder_behavior : 2;
+ unsigned it_overunder_behavior : 2;
+ unsigned pt_overunder_behavior : 2;
+ unsigned quant_range_rgb : 1;
+ unsigned quant_range_ycc : 1;
+};
+
+
+
+/* InfoFrame type constants */
+enum info_frame_type {
+ INFO_FRAME_TYPE_RESERVED,
+ INFO_FRAME_TYPE_VENDOR_SPECIFIC,
+ INFO_FRAME_TYPE_AUXILIARY_VIDEO_INFO,
+ INFO_FRAME_TYPE_SOURCE_PRODUCT_DESCRIPTION,
+ INFO_FRAME_TYPE_AUDIO,
+ INFO_FRAME_TYPE_MPEG,
+};
+
+/* Common InfoFrame header information */
+struct __packed info_frame_header {
+ u8 type;
+ u8 version;
+ u8 length;
+ u8 chksum;
+};
+
+/* AVI InfoFrame (v2) */
+#define CEA861_AVI_INFO_FRAME_VERSION (0x02)
+
+enum scan_information {
+ SCAN_INFORMATION_UNKNOWN,
+ SCAN_INFORMATION_OVERSCANNED,
+ SCAN_INFORMATION_UNDERSCANNED,
+ SCAN_INFORMATION_RESERVED,
+};
+
+enum bar_info {
+ BAR_INFO_INVALID,
+ BAR_INFO_VERTICAL,
+ BAR_INFO_HORIZONTAL,
+ BAR_INFO_BOTH,
+};
+
+enum pixel_format {
+ PIXEL_FORMAT_RGB, /* default */
+ PIXEL_FORMAT_YUV_422,
+ PIXEL_FORMAT_YUV_444,
+};
+
+enum active_format_description {
+ ACTIVE_FORMAT_DESCRIPTION_UNSCALED = 0x08,
+ ACTIVE_FORMAT_DESCRIPTION_4_3_CENTERED = 0x09,
+ ACTIVE_FORMAT_DESCRIPTION_16_9_CENTERED = 0x10,
+ ACTIVE_FORMAT_DESCRIPTION_14_9_CENTERED = 0x11,
+};
+
+enum picture_aspect_ratio {
+ PICTURE_ASPECT_RATIO_UNSCALED,
+ PICTURE_ASPECT_RATIO_4_3,
+ PICTURE_ASPECT_RATIO_16_9,
+};
+
+enum colorimetry {
+ COLORIMETRY_UNKNOWN,
+ COLORIMETRY_BT601,
+ COLORIMETRY_BT709,
+ COLORIMETRY_EXTENDED,
+};
+
+enum non_uniform_picture_scaling {
+ NON_UNIFORM_PICTURE_SCALING_NONE,
+ NON_UNIFORM_PICTURE_SCALING_HORIZONTAL,
+ NON_UNIFORM_PICTURE_SCALING_VERTICAL,
+ NON_UNIFORM_PICTURE_SCALING_BOTH,
+};
+
+/* quantization range are the same flag values for RGB and YCC */
+enum quantization_range {
+ QUANTIZATION_RANGE_LIMITED,
+ QUANTIZATION_RANGE_FULL,
+};
+
+enum extended_colorimetry {
+ EXTENDED_COLORIMETRY_BT601,
+ EXTENDED_COLORIMETRY_BT709,
+};
+
+enum video_format {
+ VIDEO_FORMAT_UNKNOWN,
+};
+
+struct __packed avi_info_frame {
+ struct info_frame_header header;
+
+ unsigned scan_information : 2;
+ unsigned bar_info : 2;
+ unsigned active_format_info_valid : 1;
+ unsigned pixel_format : 2;
+ unsigned dbyte1_reserved0 : 1;
+
+ unsigned active_format_description : 4;
+ unsigned picture_aspect_ratio : 2;
+ unsigned colorimetry : 2;
+
+ unsigned non_uniform_picture_scaling : 2;
+ unsigned rgb_quantization_range : 2;
+ unsigned extended_colorimetry : 3;
+ unsigned it_content_present : 1;
+
+ unsigned video_format : 7;
+ unsigned dbyte4_reserved0 : 1;
+
+ unsigned pixel_repetition_factor : 4; /* value - 1 */
+ unsigned content_type : 2;
+ unsigned ycc_quantizaton_range : 2;
+
+ u16 end_of_top_bar;
+ u16 start_of_bottom_bar;
+ u16 end_of_left_bar;
+ u16 start_of_right_bar;
+};
+
+
+/* SPD InfoFrame */
+#define CEA861_SPD_INFO_FRAME_VERSION (0x01)
+
+enum spd_source_information {
+ SPD_SOURCE_UNKNOWN,
+ SPD_SOURCE_DIGITAL_STB,
+ SPD_SOURCE_DVD_PLAYER,
+ SPD_SOURCE_D_VHS,
+ SPD_SOURCE_HDD_VIDEORECORDER,
+ SPD_SOURCE_DVC,
+ SPD_SOURCE_DSC,
+ SPD_SOURCE_VIDEOCD,
+ SPD_SOURCE_GAME,
+ SPD_SOURCE_PC_GENERAL,
+ SPD_SOURCE_BLURAY_SACD,
+ SPD_SOURCE_HD_DVD,
+ SPD_SOURCE_PMP,
+};
+
+struct __packed spd_info_frame {
+ struct info_frame_header header;
+
+ u8 vendor[8];
+ u8 description[16];
+ u8 source_device_info;
+};
+
+
+/* Audio InfoFrame */
+#define CEA861_AUDIO_INFO_FRAME_VERSION (0x01)
+
+enum audio_coding_type {
+ CODING_TYPE_REFER_STREAM_HEADER,
+ CODING_TYPE_PCM,
+ CODING_TYPE_AC3,
+ CODING_TYPE_MPEG1_LAYER12,
+ CODING_TYPE_MP3,
+ CODING_TYPE_MPEG2,
+ CODING_TYPE_AAC_LC,
+ CODING_TYPE_DTS,
+ CODING_TYPE_ATRAC,
+ CODING_TYPE_DSD,
+ CODING_TYPE_E_AC3,
+ CODING_TYPE_DTS_HD,
+ CODING_TYPE_MLP,
+ CODING_TYPE_DST,
+ CODING_TYPE_WMA_PRO,
+ CODING_TYPE_EXTENDED,
+};
+
+enum audio_sample_frequency {
+ FREQUENCY_REFER_STREAM_HEADER,
+ FREQUENCY_32_KHZ,
+ FREQUENCY_44_1_KHZ,
+ FREQUENCY_CD = FREQUENCY_44_1_KHZ,
+ FREQUENCY_48_KHZ,
+ FREQUENCY_88_2_KHZ,
+ FREQUENCY_96_KHZ,
+ FREQUENCY_176_4_KHZ,
+ FREQUENCY_192_KHZ,
+};
+
+enum audio_sample_size {
+ SAMPLE_SIZE_REFER_STREAM_HEADER,
+ SAMPLE_SIZE_16_BIT,
+ SAMPLE_SIZE_20_BIT,
+ SAMPLE_SIZE_24_BIT,
+};
+
+enum audio_coding_extended_type {
+ CODING_TYPE_HE_AAC = 1,
+ CODING_TYPE_HE_AACv2,
+ CODING_TYPE_MPEG_SURROUND,
+};
+
+/* TODO define speaker allocation bits */
+
+#define CHANNEL_COUNT_REFER_STREAM_HEADER (0x00)
+#define CHANNEL_ALLOCATION_STEREO (0x00)
+
+enum audio_downmix {
+ DOWNMIX_PERMITTED,
+ DOWNMIX_PROHIBITED,
+};
+
+enum audio_lfe_level {
+ LFE_LEVEL_UNKNOWN,
+ LFE_LEVEL_0dB,
+ LFE_LEVEL_PLUS10dB,
+ LFE_LEVEL_RESERVED,
+};
+
+struct __packed audio_info_frame {
+ struct info_frame_header header;
+
+ unsigned channel_count : 3;
+ unsigned future13 : 1;
+ unsigned coding_type : 4;
+
+ unsigned sample_size : 2;
+ unsigned sample_frequency : 3;
+ unsigned future25 : 1;
+ unsigned future26 : 1;
+ unsigned future27 : 1;
+
+ unsigned format_code_extension : 5;
+ unsigned future35 : 1;
+ unsigned future36 : 1;
+ unsigned future37 : 1;
+
+ u8 channel_allocation;
+
+ unsigned lfe_playback_level : 2;
+ unsigned future52 : 1;
+ unsigned level_shift : 4; /* 0-15dB */
+ unsigned down_mix_inhibit : 1;
+
+ u8 future_byte_6_10[5];
+};
+
+static inline void cea861_checksum_hdmi_info_frame(u8 * const info_frame)
+{
+ struct info_frame_header * const header =
+ (struct info_frame_header *) info_frame;
+
+ int i;
+ u8 crc;
+
+ crc = (HDMI_PACKET_TYPE_INFO_FRAME + header->type) +
+ header->version + (header->length - 1);
+
+ for (i = 1; i < header->length; i++)
+ crc += info_frame[i];
+
+ header->chksum = HDMI_PACKET_CHECKSUM - crc;
+}
+
+#endif /* LINUX_CEA861_H */
+
diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h
new file mode 100755
index 00000000000000..8343b817af31b8
--- /dev/null
+++ b/include/linux/earlysuspend.h
@@ -0,0 +1,56 @@
+/* include/linux/earlysuspend.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_EARLYSUSPEND_H
+#define _LINUX_EARLYSUSPEND_H
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/list.h>
+#endif
+
+/* The early_suspend structure defines suspend and resume hooks to be called
+ * when the user visible sleep state of the system changes, and a level to
+ * control the order. They can be used to turn off the screen and input
+ * devices that are not used for wakeup.
+ * Suspend handlers are called in low to high level order, resume handlers are
+ * called in the opposite order. If, when calling register_early_suspend,
+ * the suspend handlers have already been called without a matching call to the
+ * resume handlers, the suspend handler will be called directly from
+ * register_early_suspend. This direct call can violate the normal level order.
+ */
+enum {
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+ EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100,
+ EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+struct early_suspend {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct list_head link;
+ int level;
+ void (*suspend)(struct early_suspend *h);
+ void (*resume)(struct early_suspend *h);
+#endif
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void register_early_suspend(struct early_suspend *handler);
+void unregister_early_suspend(struct early_suspend *handler);
+#else
+#define register_early_suspend(handler) do { } while (0)
+#define unregister_early_suspend(handler) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/edid.h b/include/linux/edid.h
new file mode 100644
index 00000000000000..6f97ed63f1fb1b
--- /dev/null
+++ b/include/linux/edid.h
@@ -0,0 +1,373 @@
+/* vim: set noet ts=8 sts=8 sw=8 : */
+/*
+ * Copyright Âİ 2010 Saleem Abdulrasool <compnerd@compnerd.org>.
+ * Copyright Âİ 2010 Genesi USA, Inc. <matt@genesi-usa.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LINUX_EDID_H
+#define LINUX_EDID_H
+
+/* EDID constants and Structures */
+#define EDID_I2C_DDC_DATA_ADDRESS (0x50)
+
+#define EDID_BLOCK_SIZE (0x80)
+#define EDID_MAX_EXTENSIONS (0xfe)
+
+
+static const u8 EDID_HEADER[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+
+enum edid_extension_type {
+ EDID_EXTENSION_TIMING = 0x01, // Timing Extension
+ EDID_EXTENSION_CEA = 0x02, // Additional Timing Block Data (CEA EDID Timing Extension)
+ EDID_EXTENSION_VTB = 0x10, // Video Timing Block Extension (VTB-EXT)
+ EDID_EXTENSION_EDID_2_0 = 0x20, // EDID 2.0 Extension
+ EDID_EXTENSION_DI = 0x40, // Display Information Extension (DI-EXT)
+ EDID_EXTENSION_LS = 0x50, // Localised String Extension (LS-EXT)
+ EDID_EXTENSION_MI = 0x60, // Microdisplay Interface Extension (MI-EXT)
+ EDID_EXTENSION_DTCDB_1 = 0xa7, // Display Transfer Characteristics Data Block (DTCDB)
+ EDID_EXTENSION_DTCDB_2 = 0xaf,
+ EDID_EXTENSION_DTCDB_3 = 0xbf,
+ EDID_EXTENSION_BLOCK_MAP = 0xf0, // Block Map
+ EDID_EXTENSION_DDDB = 0xff, // Display Device Data Block (DDDB)
+};
+
+enum edid_display_type {
+ EDID_DISPLAY_TYPE_MONOCHROME,
+ EDID_DISPLAY_TYPE_RGB,
+ EDID_DISPLAY_TYPE_NON_RGB,
+ EDID_DISPLAY_TYPE_UNDEFINED,
+};
+
+enum edid_aspect_ratio {
+ EDID_ASPECT_RATIO_16_10,
+ EDID_ASPECT_RATIO_4_3,
+ EDID_ASPECT_RATIO_5_4,
+ EDID_ASPECT_RATIO_16_9,
+};
+
+enum edid_monitor_descriptor_type {
+ EDID_MONITOR_DESCRIPTOR_STANDARD_TIMING_IDENTIFIERS = 0xfa,
+ EDID_MONITOR_DESCRIPTOR_COLOR_POINT = 0xfb,
+ EDID_MONITOR_DESCRIPTOR_MONITOR_NAME = 0xfc,
+ EDID_MONITOR_DESCRIPTOR_MONITOR_RANGE_LIMITS = 0xfd,
+ EDID_MONITOR_DESCRIPTOR_MONITOR_SERIAL_NUMBER = 0xff,
+};
+
+
+struct __packed edid_detailed_timing_descriptor {
+ u16 pixel_clock; /* = value * 10000 */
+
+ u8 horizontal_active_lo;
+ u8 horizontal_blanking_lo;
+
+ unsigned horizontal_blanking_hi : 4;
+ unsigned horizontal_active_hi : 4;
+
+ u8 vertical_active_lo;
+ u8 vertical_blanking_lo;
+
+ unsigned vertical_blanking_hi : 4;
+ unsigned vertical_active_hi : 4;
+
+ u8 horizontal_sync_offset_lo;
+ u8 horizontal_sync_pulse_width_lo;
+
+ unsigned vertical_sync_pulse_width_lo : 4;
+ unsigned vertical_sync_offset_lo : 4;
+
+ unsigned vertical_sync_pulse_width_hi : 2;
+ unsigned vertical_sync_offset_hi : 2;
+ unsigned horizontal_sync_pulse_width_hi : 2;
+ unsigned horizontal_sync_offset_hi : 2;
+
+ u8 horizontal_image_size_lo;
+ u8 vertical_image_size_lo;
+
+ unsigned vertical_image_size_hi : 4;
+ unsigned horizontal_image_size_hi : 4;
+
+ u8 horizontal_border;
+ u8 vertical_border;
+
+ u8 flags;
+};
+
+
+static inline u16
+edid_timing_pixel_clock(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return dtb->pixel_clock * 10000;
+}
+
+static inline u16
+edid_timing_horizontal_blanking(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->horizontal_blanking_hi << 8) | dtb->horizontal_blanking_lo;
+}
+
+static inline u16
+edid_timing_horizontal_active(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->horizontal_active_hi << 8) | dtb->horizontal_active_lo;
+}
+
+static inline u16
+edid_timing_vertical_blanking(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->vertical_blanking_hi << 8) | dtb->vertical_blanking_lo;
+}
+
+static inline u16
+edid_timing_vertical_active(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->vertical_active_hi << 8) | dtb->vertical_active_lo;
+}
+
+static inline u8
+edid_timing_vertical_sync_offset(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->vertical_sync_offset_hi << 4) | dtb->vertical_sync_offset_lo;
+}
+
+static inline u8
+edid_timing_vertical_sync_pulse_width(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->vertical_sync_pulse_width_hi << 4) | dtb->vertical_sync_pulse_width_lo;
+}
+
+static inline u8
+edid_timing_horizontal_sync_offset(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->horizontal_sync_offset_hi << 4) | dtb->horizontal_sync_offset_lo;
+}
+
+static inline u8
+edid_timing_horizontal_sync_pulse_width(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->horizontal_sync_pulse_width_hi << 4) | dtb->horizontal_sync_pulse_width_lo;
+}
+
+static inline u16
+edid_timing_horizontal_image_size(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->horizontal_image_size_hi << 8) | dtb->horizontal_image_size_lo;
+}
+
+static inline u16
+edid_timing_vertical_image_size(const struct edid_detailed_timing_descriptor * const dtb)
+{
+ return (dtb->vertical_image_size_hi << 8) | dtb->vertical_image_size_lo;
+}
+
+
+struct __packed edid_monitor_descriptor {
+ u16 flag0;
+ u8 flag1;
+ u8 tag;
+ u8 flag2;
+ u8 data[13];
+};
+
+struct __packed edid_standard_timing_descriptor {
+ u8 horizontal_active_pixels; /* = (value + 31) * 8 */
+
+ unsigned refresh_rate : 6; /* = value + 60 */
+ unsigned image_aspect_ratio : 2;
+};
+
+struct __packed edid_block0 {
+ /* header information */
+ u8 header[8];
+
+ /* vendor/product identification */
+ struct __packed {
+ unsigned id2 : 5;
+ unsigned id1 : 5;
+ unsigned id0 : 5;
+ unsigned zero : 1;
+ } manufacturer;
+
+ u8 product[2];
+ u8 serial_number[4];
+ u8 manufacture_week;
+ u8 manufacture_year;
+
+ /* EDID version */
+ u8 version;
+ u8 revision;
+
+ /* basic display parameters and features */
+ struct __packed {
+ unsigned dfp_1x_vsync_serration : 1; /* VESA DFP 1.x */
+ unsigned green_video_sync : 1;
+ unsigned composite_sync : 1;
+ unsigned separate_sync : 1;
+ unsigned blank_to_black_setup : 1;
+ unsigned signal_level_standard : 2;
+ unsigned digital : 1;
+ } video_input_definition;
+
+ u8 maximum_horizontal_image_size; /* cm */
+ u8 maximum_vertical_image_size; /* cm */
+
+ u8 display_transfer_characteristics; /* gamma = (value + 100) / 100 */
+
+ struct __packed {
+ unsigned default_gtf : 1; /* generalised timing formula */
+ unsigned preferred_timing_mode : 1;
+ unsigned standard_default_color_space : 1;
+ unsigned display_type : 2;
+ unsigned active_off : 1;
+ unsigned suspend : 1;
+ unsigned standby : 1;
+ } feature_support;
+
+ /* color characteristics block */
+ unsigned green_y_low : 2;
+ unsigned green_x_low : 2;
+ unsigned red_y_low : 2;
+ unsigned red_x_low : 2;
+
+ unsigned white_y_low : 2;
+ unsigned white_x_low : 2;
+ unsigned blue_y_low : 2;
+ unsigned blue_x_low : 2;
+
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+
+ /* established timings */
+ struct __packed {
+ unsigned timing_800x600_60 : 1;
+ unsigned timing_800x600_56 : 1;
+ unsigned timing_640x480_75 : 1;
+ unsigned timing_640x480_72 : 1;
+ unsigned timing_640x480_67 : 1;
+ unsigned timing_640x480_60 : 1;
+ unsigned timing_720x400_88 : 1;
+ unsigned timing_720x400_70 : 1;
+
+ unsigned timing_1280x1024_75 : 1;
+ unsigned timing_1024x768_75 : 1;
+ unsigned timing_1024x768_70 : 1;
+ unsigned timing_1024x768_60 : 1;
+ unsigned timing_1024x768_87 : 1;
+ unsigned timing_832x624_75 : 1;
+ unsigned timing_800x600_75 : 1;
+ unsigned timing_800x600_72 : 1;
+ } established_timings;
+
+ struct __packed {
+ unsigned reserved : 7;
+ unsigned timing_1152x870_75 : 1;
+ } manufacturer_timings;
+
+ /* standard timing id */
+ struct edid_standard_timing_descriptor standard_timing_id[8];
+
+ /* detailed timing */
+ union {
+ struct edid_detailed_timing_descriptor detailed_timing[4];
+ struct edid_monitor_descriptor monitor_descriptor[4];
+ } detailed_timings;
+
+ u8 extensions;
+ u8 checksum;
+};
+
+struct __packed edid_color_characteristics_data {
+ struct {
+ u16 x;
+ u16 y;
+ } red, green, blue, white;
+};
+
+
+static inline u16 edid_gamma(const struct edid_block0 * const block0)
+{
+ return (block0->display_transfer_characteristics + 100) / 100;
+}
+
+static inline struct edid_color_characteristics_data
+edid_color_characteristics(const struct edid_block0 * const block0)
+{
+ struct edid_color_characteristics_data characteristics = {
+ .red = {
+ .x = (block0->red_x << 8) | block0->red_x_low,
+ .y = (block0->red_y << 8) | block0->red_y_low,
+ },
+ .green = {
+ .x = (block0->green_x << 8) | block0->green_x_low,
+ .y = (block0->green_y << 8) | block0->green_y_low,
+ },
+ .blue = {
+ .x = (block0->blue_x << 8) | block0->blue_x_low,
+ .y = (block0->blue_y << 8) | block0->blue_y_low,
+ },
+ .white = {
+ .x = (block0->white_x << 8) | block0->white_x_low,
+ .y = (block0->white_y << 8) | block0->white_y_low,
+ },
+ };
+
+ return characteristics;
+}
+
+struct __packed edid_block_map {
+ u8 tag;
+ u8 extension_tag[126];
+ u8 checksum;
+};
+
+struct __packed edid_extension {
+ u8 tag;
+ u8 revision;
+ u8 extension_data[125];
+ u8 checksum;
+};
+
+static inline bool edid_verify_checksum(const u8 * const block)
+{
+ u8 checksum = 0;
+ int i;
+
+ for (i = 0; i < EDID_BLOCK_SIZE; i++)
+ checksum += block[i];
+
+ return (checksum == 0);
+}
+
+#endif
+
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 00000000000000..2613fc5e4a93c0
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+ int count;
+ struct input_dev *dev[];
+};
+enum {
+ GPIO_EVENT_FUNC_UNINIT = 0x0,
+ GPIO_EVENT_FUNC_INIT = 0x1,
+ GPIO_EVENT_FUNC_SUSPEND = 0x2,
+ GPIO_EVENT_FUNC_RESUME = 0x3,
+};
+struct gpio_event_info {
+ int (*func)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, int func);
+ int (*event)(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info,
+ void **data, unsigned int dev, unsigned int type,
+ unsigned int code, int value); /* out events */
+ bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+ const char *name;
+ struct gpio_event_info **info;
+ size_t info_count;
+ int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+ const char *names[]; /* If name is NULL, names contain a NULL */
+ /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+ /* unset: drive active output low, set: drive active output high */
+ GPIOKPF_ACTIVE_HIGH = 1U << 0,
+ GPIOKPF_DEBOUNCE = 1U << 1,
+ GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+ GPIOKPF_REMOVE_PHANTOM_KEYS = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+ GPIOKPF_DEBOUNCE,
+ GPIOKPF_DRIVE_INACTIVE = 1U << 3,
+ GPIOKPF_LEVEL_TRIGGERED_IRQ = 1U << 4,
+ GPIOKPF_PRINT_UNMAPPED_KEYS = 1U << 16,
+ GPIOKPF_PRINT_MAPPED_KEYS = 1U << 17,
+ GPIOKPF_PRINT_PHANTOM_KEYS = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+ (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+ /* initialize to gpio_event_matrix_func */
+ struct gpio_event_info info;
+ /* size must be ninputs * noutputs */
+ const unsigned short *keymap;
+ unsigned int *input_gpios;
+ unsigned int *output_gpios;
+ unsigned int ninputs;
+ unsigned int noutputs;
+ /* time to wait before reading inputs after driving each output */
+ ktime_t settle_time;
+ /* time to wait before scanning the keypad a second time */
+ ktime_t debounce_delay;
+ ktime_t poll_time;
+ unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+ GPIOEDF_ACTIVE_HIGH = 1U << 0,
+/* GPIOEDF_USE_DOWN_IRQ = 1U << 1, */
+/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+ GPIOEDF_PRINT_KEYS = 1U << 8,
+ GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9,
+ GPIOEDF_PRINT_KEY_UNSTABLE = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+ uint32_t gpio:16;
+ uint32_t code:10;
+ uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+ /* initialize to gpio_event_input_func */
+ struct gpio_event_info info;
+ ktime_t debounce_time;
+ ktime_t poll_time;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data,
+ unsigned int dev, unsigned int type,
+ unsigned int code, int value);
+struct gpio_event_output_info {
+ /* initialize to gpio_event_output_func and gpio_event_output_event */
+ struct gpio_event_info info;
+ uint16_t flags;
+ uint16_t type;
+ const struct gpio_event_direct_entry *keymap;
+ size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+ GPIOEAF_PRINT_UNKNOWN_DIRECTION = 1U << 16,
+ GPIOEAF_PRINT_RAW = 1U << 17,
+ GPIOEAF_PRINT_EVENT = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+ struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+ /* initialize to gpio_event_axis_func */
+ struct gpio_event_info info;
+ uint8_t count; /* number of gpios for this axis */
+ uint8_t dev; /* device index when using multiple input devices */
+ uint8_t type; /* EV_REL or EV_ABS */
+ uint16_t code;
+ uint16_t decoded_size;
+ uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+ uint32_t *gpio;
+ uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+ struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
new file mode 100644
index 00000000000000..11c0182a153b8a
--- /dev/null
+++ b/include/linux/hdmi.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_HDMI_H_
+#define __LINUX_HDMI_H_
+
+#include <linux/types.h>
+
+enum hdmi_infoframe_type {
+ HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
+ HDMI_INFOFRAME_TYPE_AVI = 0x82,
+ HDMI_INFOFRAME_TYPE_SPD = 0x83,
+ HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+};
+
+#define HDMI_IEEE_OUI 0x000c03
+#define HDMI_INFOFRAME_HEADER_SIZE 4
+#define HDMI_AVI_INFOFRAME_SIZE 13
+#define HDMI_SPD_INFOFRAME_SIZE 25
+#define HDMI_AUDIO_INFOFRAME_SIZE 10
+
+#define HDMI_INFOFRAME_SIZE(type) \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+};
+
+enum hdmi_colorspace {
+ HDMI_COLORSPACE_RGB,
+ HDMI_COLORSPACE_YUV422,
+ HDMI_COLORSPACE_YUV444,
+};
+
+enum hdmi_scan_mode {
+ HDMI_SCAN_MODE_NONE,
+ HDMI_SCAN_MODE_OVERSCAN,
+ HDMI_SCAN_MODE_UNDERSCAN,
+};
+
+enum hdmi_colorimetry {
+ HDMI_COLORIMETRY_NONE,
+ HDMI_COLORIMETRY_ITU_601,
+ HDMI_COLORIMETRY_ITU_709,
+ HDMI_COLORIMETRY_EXTENDED,
+};
+
+enum hdmi_picture_aspect {
+ HDMI_PICTURE_ASPECT_NONE,
+ HDMI_PICTURE_ASPECT_4_3,
+ HDMI_PICTURE_ASPECT_16_9,
+};
+
+enum hdmi_active_aspect {
+ HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
+ HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
+ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
+ HDMI_ACTIVE_ASPECT_PICTURE = 8,
+ HDMI_ACTIVE_ASPECT_4_3 = 9,
+ HDMI_ACTIVE_ASPECT_16_9 = 10,
+ HDMI_ACTIVE_ASPECT_14_9 = 11,
+ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
+ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
+ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
+};
+
+enum hdmi_extended_colorimetry {
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
+ HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+};
+
+enum hdmi_quantization_range {
+ HDMI_QUANTIZATION_RANGE_DEFAULT,
+ HDMI_QUANTIZATION_RANGE_LIMITED,
+ HDMI_QUANTIZATION_RANGE_FULL,
+};
+
+/* non-uniform picture scaling */
+enum hdmi_nups {
+ HDMI_NUPS_UNKNOWN,
+ HDMI_NUPS_HORIZONTAL,
+ HDMI_NUPS_VERTICAL,
+ HDMI_NUPS_BOTH,
+};
+
+enum hdmi_ycc_quantization_range {
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
+ HDMI_YCC_QUANTIZATION_RANGE_FULL,
+};
+
+enum hdmi_content_type {
+ HDMI_CONTENT_TYPE_NONE,
+ HDMI_CONTENT_TYPE_PHOTO,
+ HDMI_CONTENT_TYPE_CINEMA,
+ HDMI_CONTENT_TYPE_GAME,
+};
+
+struct hdmi_avi_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_colorspace colorspace;
+ enum hdmi_scan_mode scan_mode;
+ enum hdmi_colorimetry colorimetry;
+ enum hdmi_picture_aspect picture_aspect;
+ enum hdmi_active_aspect active_aspect;
+ bool itc;
+ enum hdmi_extended_colorimetry extended_colorimetry;
+ enum hdmi_quantization_range quantization_range;
+ enum hdmi_nups nups;
+ unsigned char video_code;
+ enum hdmi_ycc_quantization_range ycc_quantization_range;
+ enum hdmi_content_type content_type;
+ unsigned char pixel_repeat;
+ unsigned short top_bar;
+ unsigned short bottom_bar;
+ unsigned short left_bar;
+ unsigned short right_bar;
+};
+
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_spd_sdi {
+ HDMI_SPD_SDI_UNKNOWN,
+ HDMI_SPD_SDI_DSTB,
+ HDMI_SPD_SDI_DVDP,
+ HDMI_SPD_SDI_DVHS,
+ HDMI_SPD_SDI_HDDVR,
+ HDMI_SPD_SDI_DVC,
+ HDMI_SPD_SDI_DSC,
+ HDMI_SPD_SDI_VCD,
+ HDMI_SPD_SDI_GAME,
+ HDMI_SPD_SDI_PC,
+ HDMI_SPD_SDI_BD,
+ HDMI_SPD_SDI_SACD,
+ HDMI_SPD_SDI_HDDVD,
+ HDMI_SPD_SDI_PMP,
+};
+
+struct hdmi_spd_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ char vendor[8];
+ char product[16];
+ enum hdmi_spd_sdi sdi;
+};
+
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product);
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_audio_coding_type {
+ HDMI_AUDIO_CODING_TYPE_STREAM,
+ HDMI_AUDIO_CODING_TYPE_PCM,
+ HDMI_AUDIO_CODING_TYPE_AC3,
+ HDMI_AUDIO_CODING_TYPE_MPEG1,
+ HDMI_AUDIO_CODING_TYPE_MP3,
+ HDMI_AUDIO_CODING_TYPE_MPEG2,
+ HDMI_AUDIO_CODING_TYPE_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_DTS,
+ HDMI_AUDIO_CODING_TYPE_ATRAC,
+ HDMI_AUDIO_CODING_TYPE_DSD,
+ HDMI_AUDIO_CODING_TYPE_EAC3,
+ HDMI_AUDIO_CODING_TYPE_DTS_HD,
+ HDMI_AUDIO_CODING_TYPE_MLP,
+ HDMI_AUDIO_CODING_TYPE_DST,
+ HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_SIZE_STREAM,
+ HDMI_AUDIO_SAMPLE_SIZE_16,
+ HDMI_AUDIO_SAMPLE_SIZE_20,
+ HDMI_AUDIO_SAMPLE_SIZE_24,
+};
+
+enum hdmi_audio_sample_frequency {
+ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
+};
+
+enum hdmi_audio_coding_type_ext {
+ HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+};
+
+struct hdmi_audio_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char channels;
+ enum hdmi_audio_coding_type coding_type;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_sample_frequency sample_frequency;
+ enum hdmi_audio_coding_type_ext coding_type_ext;
+ unsigned char channel_allocation;
+ unsigned char level_shift_value;
+ bool downmix_inhibit;
+
+};
+
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+
+enum hdmi_3d_structure {
+ HDMI_3D_STRUCTURE_INVALID = -1,
+ HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
+ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
+ HDMI_3D_STRUCTURE_L_DEPTH,
+ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
+ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
+};
+
+
+struct hdmi_vendor_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ u8 vic;
+ enum hdmi_3d_structure s3d_struct;
+ unsigned int s3d_ext_data;
+};
+
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+
+union hdmi_vendor_any_infoframe {
+ struct {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ } any;
+ struct hdmi_vendor_infoframe hdmi;
+};
+
+/**
+ * union hdmi_infoframe - overall union of all abstract infoframe representations
+ * @any: generic infoframe
+ * @avi: avi infoframe
+ * @spd: spd infoframe
+ * @vendor: union of all vendor infoframes
+ * @audio: audio infoframe
+ *
+ * This is used by the generic pack function. This works since all infoframes
+ * have the same header which also indicates which type of infoframe should be
+ * packed.
+ */
+union hdmi_infoframe {
+ struct hdmi_any_infoframe any;
+ struct hdmi_avi_infoframe avi;
+ struct hdmi_spd_infoframe spd;
+ union hdmi_vendor_any_infoframe vendor;
+ struct hdmi_audio_infoframe audio;
+};
+
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+
+#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
new file mode 100644
index 00000000000000..51f7ccadf923c3
--- /dev/null
+++ b/include/linux/hid-sensor-hub.h
@@ -0,0 +1,232 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _HID_SENSORS_HUB_H
+#define _HID_SENSORS_HUB_H
+
+#include <linux/hid.h>
+#include <linux/hid-sensor-ids.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+/**
+ * struct hid_sensor_hub_attribute_info - Attribute info
+ * @usage_id: Parent usage id of a physical device.
+ * @attrib_id: Attribute id for this attribute.
+ * @report_id: Report id in which this information resides.
+ * @index: Field index in the report.
+ * @units: Measurment unit for this attribute.
+ * @unit_expo: Exponent used in the data.
+ * @size: Size in bytes for data size.
+ */
+struct hid_sensor_hub_attribute_info {
+ u32 usage_id;
+ u32 attrib_id;
+ s32 report_id;
+ s32 index;
+ s32 units;
+ s32 unit_expo;
+ s32 size;
+ s32 logical_minimum;
+ s32 logical_maximum;
+};
+
+/**
+ * struct hid_sensor_hub_device - Stores the hub instance data
+ * @hdev: Stores the hid instance.
+ * @vendor_id: Vendor id of hub device.
+ * @product_id: Product id of hub device.
+ * @start_collection_index: Starting index for a phy type collection
+ * @end_collection_index: Last index for a phy type collection
+ */
+struct hid_sensor_hub_device {
+ struct hid_device *hdev;
+ u32 vendor_id;
+ u32 product_id;
+ int start_collection_index;
+ int end_collection_index;
+};
+
+/**
+ * struct hid_sensor_hub_callbacks - Client callback functions
+ * @pdev: Platform device instance of the client driver.
+ * @suspend: Suspend callback.
+ * @resume: Resume callback.
+ * @capture_sample: Callback to get a sample.
+ * @send_event: Send notification to indicate all samples are
+ * captured, process and send event
+ */
+struct hid_sensor_hub_callbacks {
+ struct platform_device *pdev;
+ int (*suspend)(struct hid_sensor_hub_device *hsdev, void *priv);
+ int (*resume)(struct hid_sensor_hub_device *hsdev, void *priv);
+ int (*capture_sample)(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id, size_t raw_len, char *raw_data,
+ void *priv);
+ int (*send_event)(struct hid_sensor_hub_device *hsdev, u32 usage_id,
+ void *priv);
+};
+
+/**
+* sensor_hub_device_open() - Open hub device
+* @hsdev: Hub device instance.
+*
+* Used to open hid device for sensor hub.
+*/
+int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
+
+/**
+* sensor_hub_device_clode() - Close hub device
+* @hsdev: Hub device instance.
+*
+* Used to clode hid device for sensor hub.
+*/
+void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
+
+/* Registration functions */
+
+/**
+* sensor_hub_register_callback() - Register client callbacks
+* @hsdev: Hub device instance.
+* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+* @usage_callback: Callback function storage
+*
+* Used to register callbacks by client processing drivers. Sensor
+* hub core driver will call these callbacks to offload processing
+* of data streams and notifications.
+*/
+int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_hub_callbacks *usage_callback);
+
+/**
+* sensor_hub_remove_callback() - Remove client callbacks
+* @hsdev: Hub device instance.
+* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+*
+* If there is a callback registred, this call will remove that
+* callbacks, so that it will stop data and event notifications.
+*/
+int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id);
+
+
+/* Hid sensor hub core interfaces */
+
+/**
+* sensor_hub_input_get_attribute_info() - Get an attribute information
+* @hsdev: Hub device instance.
+* @type: Type of this attribute, input/output/feature
+* @usage_id: Attribute usage id of parent physical device as per spec
+* @attr_usage_id: Attribute usage id as per spec
+* @info: return information about attribute after parsing report
+*
+* Parses report and returns the attribute information such as report id,
+* field index, units and exponet etc.
+*/
+int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
+ u8 type,
+ u32 usage_id, u32 attr_usage_id,
+ struct hid_sensor_hub_attribute_info *info);
+
+/**
+* sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @usage_id: Attribute usage id of parent physical device as per spec
+* @attr_usage_id: Attribute usage id as per spec
+* @report_id: Report id to look for
+*
+* Issues a synchronous read request for an input attribute. Returns
+* data upto 32 bits. Since client can get events, so this call should
+* not be used for data paths, this will impact performance.
+*/
+
+int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ u32 attr_usage_id, u32 report_id);
+/**
+* sensor_hub_set_feature() - Feature set request
+* @report_id: Report id to look for
+* @field_index: Field index inside a report
+* @value: Value to set
+*
+* Used to set a field in feature report. For example this can set polling
+* interval, sensitivity, activate/deactivate state.
+*/
+int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+ u32 field_index, s32 value);
+
+/**
+* sensor_hub_get_feature() - Feature get request
+* @report_id: Report id to look for
+* @field_index: Field index inside a report
+* @value: Place holder for return value
+*
+* Used to get a field in feature report. For example this can get polling
+* interval, sensitivity, activate/deactivate state.
+*/
+int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+ u32 field_index, s32 *value);
+
+/* hid-sensor-attributes */
+
+/* Common hid sensor iio structure */
+struct hid_sensor_common {
+ struct hid_sensor_hub_device *hsdev;
+ struct platform_device *pdev;
+ unsigned usage_id;
+ atomic_t data_ready;
+ struct iio_trigger *trigger;
+ struct hid_sensor_hub_attribute_info poll;
+ struct hid_sensor_hub_attribute_info report_state;
+ struct hid_sensor_hub_attribute_info power_state;
+ struct hid_sensor_hub_attribute_info sensitivity;
+};
+
+/* Convert from hid unit expo to regular exponent */
+static inline int hid_sensor_convert_exponent(int unit_expo)
+{
+ if (unit_expo < 0x08)
+ return unit_expo;
+ else if (unit_expo <= 0x0f)
+ return -(0x0f-unit_expo+1);
+ else
+ return 0;
+}
+
+int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_common *st);
+int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
+ int val1, int val2);
+int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
+int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
+ int val1, int val2);
+int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
+
+int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev,
+ u32 report_id, int field_index, u32 usage_id);
+
+int hid_sensor_format_scale(u32 usage_id,
+ struct hid_sensor_hub_attribute_info *attr_info,
+ int *val0, int *val1);
+
+s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
+
+#endif
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
new file mode 100644
index 00000000000000..109f0e633e01f1
--- /dev/null
+++ b/include/linux/hid-sensor-ids.h
@@ -0,0 +1,155 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _HID_SENSORS_IDS_H
+#define _HID_SENSORS_IDS_H
+
+#define HID_MAX_PHY_DEVICES 0xFF
+
+/* Accel 3D (200073) */
+#define HID_USAGE_SENSOR_ACCEL_3D 0x200073
+#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452
+#define HID_USAGE_SENSOR_ACCEL_X_AXIS 0x200453
+#define HID_USAGE_SENSOR_ACCEL_Y_AXIS 0x200454
+#define HID_USAGE_SENSOR_ACCEL_Z_AXIS 0x200455
+
+/* ALS (200041) */
+#define HID_USAGE_SENSOR_ALS 0x200041
+#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
+#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+
+/* PROX (200011) */
+#define HID_USAGE_SENSOR_PROX 0x200011
+#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0
+#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1
+
+/* Pressure (200031) */
+#define HID_USAGE_SENSOR_PRESSURE 0x200031
+#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430
+#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431
+
+/* Gyro 3D: (200076) */
+#define HID_USAGE_SENSOR_GYRO_3D 0x200076
+#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS 0x200457
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
+
+/* ORIENTATION: Compass 3D: (200083) */
+#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
+#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Y 0x200473
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Z 0x200474
+
+#define HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH 0x200475
+#define HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH 0x200476
+#define HID_USAGE_SENSOR_ORIENT_MAGN_NORTH 0x200477
+#define HID_USAGE_SENSOR_ORIENT_TRUE_NORTH 0x200478
+
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE 0x200479
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_X 0x20047A
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Y 0x20047B
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Z 0x20047C
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_OUT_OF_RANGE 0x20047D
+
+/* ORIENTATION: Inclinometer 3D: (200086) */
+#define HID_USAGE_SENSOR_INCLINOMETER_3D 0x200086
+#define HID_USAGE_SENSOR_ORIENT_TILT 0x20047E
+#define HID_USAGE_SENSOR_ORIENT_TILT_X 0x20047F
+#define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480
+#define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481
+
+#define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A
+#define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482
+#define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484
+
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS 0x200485
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS 0x200486
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS 0x200487
+
+/* Time (2000a0) */
+#define HID_USAGE_SENSOR_TIME 0x2000a0
+#define HID_USAGE_SENSOR_TIME_YEAR 0x200521
+#define HID_USAGE_SENSOR_TIME_MONTH 0x200522
+#define HID_USAGE_SENSOR_TIME_DAY 0x200523
+#define HID_USAGE_SENSOR_TIME_HOUR 0x200525
+#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526
+#define HID_USAGE_SENSOR_TIME_SECOND 0x200527
+
+/* Units */
+#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00
+#define HID_USAGE_SENSOR_UNITS_LUX 0x01
+#define HID_USAGE_SENSOR_UNITS_KELVIN 0x01000100
+#define HID_USAGE_SENSOR_UNITS_FAHRENHEIT 0x03000100
+#define HID_USAGE_SENSOR_UNITS_PASCAL 0xF1E1
+#define HID_USAGE_SENSOR_UNITS_NEWTON 0x11E1
+#define HID_USAGE_SENSOR_UNITS_METERS_PER_SECOND 0x11F0
+#define HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD 0x11E0
+#define HID_USAGE_SENSOR_UNITS_FARAD 0xE14F2000
+#define HID_USAGE_SENSOR_UNITS_AMPERE 0x01001000
+#define HID_USAGE_SENSOR_UNITS_WATT 0x21d1
+#define HID_USAGE_SENSOR_UNITS_HENRY 0x21E1E000
+#define HID_USAGE_SENSOR_UNITS_OHM 0x21D1E000
+#define HID_USAGE_SENSOR_UNITS_VOLT 0x21D1F000
+#define HID_USAGE_SENSOR_UNITS_HERTZ 0x01F0
+#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SEC_SQRD 0x14E0
+#define HID_USAGE_SENSOR_UNITS_RADIANS 0x12
+#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND 0x12F0
+#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SEC_SQRD 0x12E0
+#define HID_USAGE_SENSOR_UNITS_SECOND 0x0110
+#define HID_USAGE_SENSOR_UNITS_GAUSS 0x01E1F000
+#define HID_USAGE_SENSOR_UNITS_GRAM 0x0101
+#define HID_USAGE_SENSOR_UNITS_CENTIMETER 0x11
+#define HID_USAGE_SENSOR_UNITS_G 0x1A
+#define HID_USAGE_SENSOR_UNITS_MILLISECOND 0x19
+#define HID_USAGE_SENSOR_UNITS_PERCENT 0x17
+#define HID_USAGE_SENSOR_UNITS_DEGREES 0x14
+#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
+
+/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_REL_PCT 0x200311
+#define HID_USAGE_SENSOR_PROP_ACCURACY 0x200312
+#define HID_USAGE_SENSOR_PROP_RESOLUTION 0x200313
+#define HID_USAGE_SENSOR_PROP_RANGE_MAXIMUM 0x200314
+#define HID_USAGE_SENSOR_PROP_RANGE_MINIMUM 0x200315
+#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
+#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
+
+/* Per data field properties */
+#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+
+/* Power state enumerations */
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x200851
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x200852
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x200853
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x200854
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x200855
+
+/* Report State enumerations */
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+
+#endif
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 00000000000000..c06bd6c8ba26d5
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OLAC */
+ int udp_socket;
+ struct __attribute__((packed)) {
+ __u16 tunnel, session;
+ } local, remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 00000000000000..0cf34b4d551fed
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+ sa_family_t sa_family; /* AF_PPPOX */
+ unsigned int sa_protocol; /* PX_PROTO_OPNS */
+ int tcp_socket;
+ __u16 local;
+ __u16 remote;
+} __attribute__((packed));
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/iio/Kbuild b/include/linux/iio/Kbuild
new file mode 100644
index 00000000000000..86b0b8348b1fb2
--- /dev/null
+++ b/include/linux/iio/Kbuild
@@ -0,0 +1,3 @@
+header-y += events.h
+header-y += types.h
+
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
new file mode 100644
index 00000000000000..fd1d540ea62d22
--- /dev/null
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -0,0 +1,22 @@
+/*
+ * KXCJK-1013 3-axis accelerometer Interface
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IIO_KXCJK_1013_H__
+#define __IIO_KXCJK_1013_H__
+
+struct kxcjk_1013_platform_data {
+ bool active_high_intr;
+};
+
+#endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
new file mode 100644
index 00000000000000..e7fdec4db9dac5
--- /dev/null
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -0,0 +1,173 @@
+/*
+ * Support code for Analog Devices Sigma-Delta ADCs
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __AD_SIGMA_DELTA_H__
+#define __AD_SIGMA_DELTA_H__
+
+enum ad_sigma_delta_mode {
+ AD_SD_MODE_CONTINUOUS = 0,
+ AD_SD_MODE_SINGLE = 1,
+ AD_SD_MODE_IDLE = 2,
+ AD_SD_MODE_POWERDOWN = 3,
+};
+
+/**
+ * struct ad_sigma_delta_calib_data - Calibration data for Sigma Delta devices
+ * @mode: Calibration mode.
+ * @channel: Calibration channel.
+ */
+struct ad_sd_calib_data {
+ unsigned int mode;
+ unsigned int channel;
+};
+
+struct ad_sigma_delta;
+struct iio_dev;
+
+/**
+ * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
+ * @set_channel: Will be called to select the current channel, may be NULL.
+ * @set_mode: Will be called to select the current mode, may be NULL.
+ * @postprocess_sample: Is called for each sampled data word, can be used to
+ * modify or drop the sample data, it, may be NULL.
+ * @has_registers: true if the device has writable and readable registers, false
+ * if there is just one read-only sample data shift register.
+ * @addr_shift: Shift of the register address in the communications register.
+ * @read_mask: Mask for the communications register having the read bit set.
+ */
+struct ad_sigma_delta_info {
+ int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
+ bool has_registers;
+ unsigned int addr_shift;
+ unsigned int read_mask;
+};
+
+/**
+ * struct ad_sigma_delta - Sigma Delta device struct
+ * @spi: The spi device associated with the Sigma Delta device.
+ * @trig: The IIO trigger associated with the Sigma Delta device.
+ *
+ * Most of the fields are private to the sigma delta library code and should not
+ * be accessed by individual drivers.
+ */
+struct ad_sigma_delta {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+
+/* private: */
+ struct completion completion;
+ bool irq_dis;
+
+ bool bus_locked;
+
+ uint8_t comm;
+
+ const struct ad_sigma_delta_info *info;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ uint8_t data[4] ____cacheline_aligned;
+};
+
+static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
+ unsigned int channel)
+{
+ if (sd->info->set_channel)
+ return sd->info->set_channel(sd, channel);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
+ unsigned int mode)
+{
+ if (sd->info->set_mode)
+ return sd->info->set_mode(sd, mode);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd,
+ unsigned int raw_sample)
+{
+ if (sd->info->postprocess_sample)
+ return sd->info->postprocess_sample(sd, raw_sample);
+
+ return 0;
+}
+
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm);
+int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ unsigned int size, unsigned int val);
+int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ unsigned int size, unsigned int *val);
+
+int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
+ const struct ad_sd_calib_data *cd, unsigned int n);
+int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct ad_sigma_delta_info *info);
+
+int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
+void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+
+int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
+
+#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE)
+
+#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE)
+
+#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE)
+
+#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_TEMP)
+
+#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE)
+
+#endif
diff --git a/include/linux/iio/bldc/parrot_bldc_cypress_iio.h b/include/linux/iio/bldc/parrot_bldc_cypress_iio.h
new file mode 100644
index 00000000000000..c32fc54ae9bd35
--- /dev/null
+++ b/include/linux/iio/bldc/parrot_bldc_cypress_iio.h
@@ -0,0 +1,31 @@
+/**
+ ************************************************
+ * @file bldc_cypress_core.c
+ * @brief BLDC cypress IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+#ifndef __IIO_PARROT_BLDC_CYPRESS_H__
+#define __IIO_PARROT_BLDC_CYPRESS_H__
+
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/i2c.h>
+
+int bldc_cypress_probe(struct iio_dev *indio_dev);
+void bldc_cypress_remove(struct iio_dev *indio_dev);
+irqreturn_t bldc_cypress_read_fifo(int irq, void *p);
+
+#endif
diff --git a/include/linux/iio/bldc/parrot_bldc_iio.h b/include/linux/iio/bldc/parrot_bldc_iio.h
new file mode 100644
index 00000000000000..6af924acace719
--- /dev/null
+++ b/include/linux/iio/bldc/parrot_bldc_iio.h
@@ -0,0 +1,145 @@
+/**
+ ************************************************
+ * @file bldc_cypress_core.c
+ * @brief BLDC cypress IIO driver
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * @author Karl Leplat <karl.leplat@parrot.com>
+ * @date 2015-06-22
+ *************************************************
+ */
+#ifndef __IIO_PARROT_BLDC_H__
+#define __IIO_PARROT_BLDC_H__
+
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <iio/platform_data/bldc_cypress.h>
+
+#include <linux/i2c.h>
+
+#define PARROT_BLDC_REG_REF_SPEED 2
+#define PARROT_BLDC_REG_OBS_DATA 32
+#define PARROT_BLDC_REG_START_PROP 64
+#define PARROT_BLDC_REG_STOP_PROP 96
+#define PARROT_BLDC_REG_CLEAR_ERROR 128
+#define PARROT_BLDC_REG_LED 77
+#define PARROT_BLDC_REG_INFO 160
+
+#define PARROT_BLDC_GET_INFO_LENGTH 14
+#define PARROT_BLDC_OBS_DATA_LENGTH 15
+
+/* scan element definition */
+enum {
+ PARROT_BLDC_SCAN_OBS_DATA_SPEED_MOTOR1,
+ PARROT_BLDC_SCAN_OBS_DATA_SPEED_MOTOR2,
+ PARROT_BLDC_SCAN_OBS_DATA_SPEED_MOTOR3,
+ PARROT_BLDC_SCAN_OBS_DATA_SPEED_MOTOR4,
+ PARROT_BLDC_SCAN_OBS_DATA_BATT_VOLTAGE,
+ PARROT_BLDC_SCAN_OBS_DATA_STATUS,
+ PARROT_BLDC_SCAN_OBS_DATA_ERRNO,
+ PARROT_BLDC_SCAN_OBS_DATA_FAULT_MOTOR,
+ PARROT_BLDC_SCAN_OBS_DATA_TEMP,
+ PARROT_BLDC_SCAN_TIMESTAMP,
+ PARROT_BLDC_SCAN_MAX,
+};
+
+#define PARROT_BLDC_OBS_NAME_SPEED_MOTOR1 "sm1"
+#define PARROT_BLDC_OBS_NAME_SPEED_MOTOR2 "sm2"
+#define PARROT_BLDC_OBS_NAME_SPEED_MOTOR3 "sm3"
+#define PARROT_BLDC_OBS_NAME_SPEED_MOTOR4 "sm4"
+#define PARROT_BLDC_OBS_NAME_BATT_VOLTAGE "batt"
+#define PARROT_BLDC_OBS_NAME_STATUS "status"
+#define PARROT_BLDC_OBS_NAME_ERRNO "errno"
+#define PARROT_BLDC_OBS_NAME_FAULT_MOTOR "fm"
+#define PARROT_BLDC_OBS_NAME_TEMP "temp"
+
+#define PARROT_BLDC_OBS_CHAN_EXT_NAME(_mod, _channel, _type, _bits, _storage) \
+{ \
+ .type = _type, \
+ .modified = 0, \
+ .extend_name = PARROT_BLDC_OBS_NAME_ ## _mod, \
+ .channel = _channel, \
+ .scan_index = PARROT_BLDC_SCAN_OBS_DATA_ ## _mod, \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storage), \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define PARROT_BLDC_OBS_CHAN(_mod, _channel, _type, _bits, _storage) \
+{ \
+ .type = _type, \
+ .modified = 0, \
+ .channel = _channel, \
+ .scan_index = PARROT_BLDC_SCAN_OBS_DATA_ ## _mod, \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storage), \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+
+/* IIO attribute address */
+enum PARROT_BLDC_IIO_ATTR_ADDR {
+ ATTR_START_MOTORS,
+ ATTR_START_MOTORS_WITH_DIR,
+ ATTR_STOP_MOTORS,
+ ATTR_SPIN_DIRECTION_MOTORS,
+ ATTR_LED,
+ ATTR_REBOOT,
+ ATTR_CLEAR_ERROR,
+ ATTR_MOTORS_SPEED,
+ ATTR_INFO_SOFT_VERSION,
+ ATTR_INFO_FT_NB_FLIGHTS,
+ ATTR_INFO_FT_PREVIOUS_TIME,
+ ATTR_INFO_FT_TOTAL_TIME,
+ ATTR_INFO_FT_LAST_ERROR,
+ ATTR_INFO_SPIN_DIR,
+ ATTR_OBS_MOTORS,
+ ATTR_RC
+};
+
+struct bldc_transfer_function {
+ int (*read_multiple_byte) (struct device *dev,
+ u8 reg_addr,
+ int len,
+ u8 *data);
+ int (*write_multiple_byte) (struct device *dev,
+ u8 reg_addr,
+ int len,
+ u8 *data);
+};
+
+struct bldc_state {
+ struct device *dev;
+ struct iio_trigger *trig;
+ struct iio_chan_spec channels[PARROT_BLDC_SCAN_MAX];
+ const struct bldc_transfer_function *tf;
+ struct bldc_cypress_platform_data pdata;
+ struct mutex mutex;
+ u8 *buffer;
+ int is_overflow;
+};
+
+void bldc_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct bldc_state *st);
+
+#endif
+
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
new file mode 100644
index 00000000000000..22d1b5d93161f8
--- /dev/null
+++ b/include/linux/iio/buffer.h
@@ -0,0 +1,253 @@
+/* The industrial I/O core - generic buffer interfaces.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_BUFFER_GENERIC_H_
+#define _IIO_BUFFER_GENERIC_H_
+#include <linux/sysfs.h>
+#include <linux/iio/iio.h>
+#include <linux/kref.h>
+
+#ifdef CONFIG_IIO_BUFFER
+
+struct iio_buffer;
+
+/**
+ * struct iio_buffer_access_funcs - access functions for buffers.
+ * @store_to: actually store stuff to the buffer
+ * @read_first_n: try to get a specified number of bytes (must exist)
+ * @data_available: indicates whether data for reading from the buffer is
+ * available.
+ * @request_update: if a parameter change has been marked, update underlying
+ * storage.
+ * @get_bytes_per_datum:get current bytes per datum
+ * @set_bytes_per_datum:set number of bytes per datum
+ * @get_length: get number of datums in buffer
+ * @set_length: set number of datums in buffer
+ * @release: called when the last reference to the buffer is dropped,
+ * should free all resources allocated by the buffer.
+ *
+ * The purpose of this structure is to make the buffer element
+ * modular as event for a given driver, different usecases may require
+ * different buffer designs (space efficiency vs speed for example).
+ *
+ * It is worth noting that a given buffer implementation may only support a
+ * small proportion of these functions. The core code 'should' cope fine with
+ * any of them not existing.
+ **/
+struct iio_buffer_access_funcs {
+ int (*store_to)(struct iio_buffer *buffer, const void *data);
+ int (*store_to_buffer)(struct iio_buffer *buffer, const void *data, int num);
+ int (*read_first_n)(struct iio_buffer *buffer,
+ size_t n,
+ char __user *buf);
+ bool (*data_available)(struct iio_buffer *buffer);
+
+ int (*request_update)(struct iio_buffer *buffer);
+
+ int (*get_bytes_per_datum)(struct iio_buffer *buffer);
+ int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
+ int (*get_length)(struct iio_buffer *buffer);
+ int (*set_length)(struct iio_buffer *buffer, int length);
+
+ void (*release)(struct iio_buffer *buffer);
+};
+
+/**
+ * struct iio_buffer - general buffer structure
+ * @length: [DEVICE] number of datums in buffer
+ * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
+ * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
+ * control method is used
+ * @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_timestamp: [INTERN] does the scan mode include a timestamp
+ * @access: [DRIVER] buffer access functions associated with the
+ * implementation.
+ * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @scan_el_group: [DRIVER] attribute group for those attributes not
+ * created from the iio_chan_info array.
+ * @pollq: [INTERN] wait queue to allow for polling on the buffer.
+ * @stufftoread: [INTERN] flag to indicate new data.
+ * @demux_list: [INTERN] list of operations required to demux the scan.
+ * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
+ * @buffer_list: [INTERN] entry in the devices list of current buffers.
+ * @ref: [INTERN] reference count of the buffer.
+ */
+struct iio_buffer {
+ int length;
+ int bytes_per_datum;
+ struct attribute_group *scan_el_attrs;
+ long *scan_mask;
+ bool scan_timestamp;
+ const struct iio_buffer_access_funcs *access;
+ struct list_head scan_el_dev_attr_list;
+ struct attribute_group scan_el_group;
+ wait_queue_head_t pollq;
+ bool stufftoread;
+ const struct attribute_group *attrs;
+ struct list_head demux_list;
+ void *demux_bounce;
+ struct list_head buffer_list;
+ struct kref ref;
+};
+
+/**
+ * iio_update_buffers() - add or remove buffer from active list
+ * @indio_dev: device to add buffer to
+ * @insert_buffer: buffer to insert
+ * @remove_buffer: buffer_to_remove
+ *
+ * Note this will tear down the all buffering and build it up again
+ */
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer);
+
+/**
+ * iio_buffer_init() - Initialize the buffer structure
+ * @buffer: buffer to be initialized
+ **/
+void iio_buffer_init(struct iio_buffer *buffer);
+
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @indio_dev IIO device structure
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ **/
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
+
+/**
+ * iio_push_to_buffers() - push to a registered buffer.
+ * @indio_dev: iio_dev structure for device.
+ * @data: Full scan.
+ */
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+
+int iio_push_to_buffers_n(struct iio_dev *indio_dev, const void *data, int n);
+
+/*
+ * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: sample data
+ * @timestamp: timestamp for the sample data
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be indio->scan_bytes bytes large).
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
+ void *data, int64_t timestamp)
+{
+ if (indio_dev->scan_timestamp) {
+ size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
+ ((int64_t *)data)[ts_offset] = timestamp;
+ }
+
+ return iio_push_to_buffers(indio_dev, data);
+}
+
+int iio_update_demux(struct iio_dev *indio_dev);
+
+/**
+ * iio_buffer_register() - register the buffer with IIO core
+ * @indio_dev: device with the buffer to be registered
+ * @channels: the channel descriptions used to construct buffer
+ * @num_channels: the number of channels
+ **/
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels);
+
+/**
+ * iio_buffer_unregister() - unregister the buffer from IIO core
+ * @indio_dev: the device with the buffer to be unregistered
+ **/
+void iio_buffer_unregister(struct iio_dev *indio_dev);
+
+/**
+ * iio_buffer_read_length() - attr func to get number of datums in the buffer
+ **/
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+/**
+ * iio_buffer_write_length() - attr func to set number of datums in the buffer
+ **/
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+/**
+ * iio_buffer_store_enable() - attr to turn the buffer on
+ **/
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len);
+/**
+ * iio_buffer_show_enable() - attr to see if the buffer is on
+ **/
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
+ iio_buffer_read_length, \
+ iio_buffer_write_length)
+
+#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
+ iio_buffer_show_enable, \
+ iio_buffer_store_enable)
+
+bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
+ const unsigned long *mask);
+
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
+void iio_buffer_put(struct iio_buffer *buffer);
+
+/**
+ * iio_device_attach_buffer - Attach a buffer to a IIO device
+ * @indio_dev: The device the buffer should be attached to
+ * @buffer: The buffer to attach to the device
+ *
+ * This function attaches a buffer to a IIO device. The buffer stays attached to
+ * the device until the device is freed. The function should only be called at
+ * most once per device.
+ */
+static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ indio_dev->buffer = iio_buffer_get(buffer);
+}
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels)
+{
+ return 0;
+}
+
+static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
+{}
+
+static inline void iio_buffer_get(struct iio_buffer *buffer) {}
+static inline void iio_buffer_put(struct iio_buffer *buffer) {}
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
new file mode 100644
index 00000000000000..2c476acb87d927
--- /dev/null
+++ b/include/linux/iio/common/st_sensors.h
@@ -0,0 +1,290 @@
+/*
+ * STMicroelectronics sensors library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_H
+#define ST_SENSORS_H
+
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/irqreturn.h>
+#include <linux/iio/trigger.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/platform_data/st_sensors_pdata.h>
+
+#define ST_SENSORS_TX_MAX_LENGTH 2
+#define ST_SENSORS_RX_MAX_LENGTH 6
+
+#define ST_SENSORS_ODR_LIST_MAX 10
+#define ST_SENSORS_FULLSCALE_AVL_MAX 10
+
+#define ST_SENSORS_NUMBER_ALL_CHANNELS 4
+#define ST_SENSORS_ENABLE_ALL_AXIS 0x07
+#define ST_SENSORS_SCAN_X 0
+#define ST_SENSORS_SCAN_Y 1
+#define ST_SENSORS_SCAN_Z 2
+#define ST_SENSORS_DEFAULT_POWER_ON_VALUE 0x01
+#define ST_SENSORS_DEFAULT_POWER_OFF_VALUE 0x00
+#define ST_SENSORS_DEFAULT_WAI_ADDRESS 0x0f
+#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20
+#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07
+#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3
+
+#define ST_SENSORS_MAX_NAME 17
+#define ST_SENSORS_MAX_4WAI 7
+
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+{ \
+ .type = device_type, \
+ .modified = mod, \
+ .info_mask_separate = mask, \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = index, \
+ .channel2 = ch2, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = s, \
+ .realbits = rbits, \
+ .shift = sbits - rbits, \
+ .storagebits = sbits, \
+ .endianness = endian, \
+ }, \
+}
+
+#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
+ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
+ st_sensors_sysfs_sampling_frequency_avail)
+
+#define ST_SENSORS_DEV_ATTR_SCALE_AVAIL(name) \
+ IIO_DEVICE_ATTR(name, S_IRUGO, \
+ st_sensors_sysfs_scale_avail, NULL , 0);
+
+struct st_sensor_odr_avl {
+ unsigned int hz;
+ u8 value;
+};
+
+struct st_sensor_odr {
+ u8 addr;
+ u8 mask;
+ struct st_sensor_odr_avl odr_avl[ST_SENSORS_ODR_LIST_MAX];
+};
+
+struct st_sensor_power {
+ u8 addr;
+ u8 mask;
+ u8 value_off;
+ u8 value_on;
+};
+
+struct st_sensor_axis {
+ u8 addr;
+ u8 mask;
+};
+
+struct st_sensor_fullscale_avl {
+ unsigned int num;
+ u8 value;
+ unsigned int gain;
+ unsigned int gain2;
+};
+
+struct st_sensor_fullscale {
+ u8 addr;
+ u8 mask;
+ struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
+};
+
+/**
+ * struct st_sensor_bdu - ST sensor device block data update
+ * @addr: address of the register.
+ * @mask: mask to write the block data update flag.
+ */
+struct st_sensor_bdu {
+ u8 addr;
+ u8 mask;
+};
+
+/**
+ * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
+ * @addr: address of the register.
+ * @mask_int1: mask to enable/disable IRQ on INT1 pin.
+ * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * struct ig1 - represents the Interrupt Generator 1 of sensors.
+ * @en_addr: address of the enable ig1 register.
+ * @en_mask: mask to write the on/off value for enable.
+ */
+struct st_sensor_data_ready_irq {
+ u8 addr;
+ u8 mask_int1;
+ u8 mask_int2;
+ struct {
+ u8 en_addr;
+ u8 en_mask;
+ } ig1;
+};
+
+/**
+ * struct st_sensor_transfer_buffer - ST sensor device I/O buffer
+ * @buf_lock: Mutex to protect rx and tx buffers.
+ * @tx_buf: Buffer used by SPI transfer function to send data to the sensors.
+ * This buffer is used to avoid DMA not-aligned issue.
+ * @rx_buf: Buffer used by SPI transfer to receive data from sensors.
+ * This buffer is used to avoid DMA not-aligned issue.
+ */
+struct st_sensor_transfer_buffer {
+ struct mutex buf_lock;
+ u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH];
+ u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+/**
+ * struct st_sensor_transfer_function - ST sensor device I/O function
+ * @read_byte: Function used to read one byte.
+ * @write_byte: Function used to write one byte.
+ * @read_multiple_byte: Function used to read multiple byte.
+ */
+struct st_sensor_transfer_function {
+ int (*read_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte);
+ int (*write_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data);
+ int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, int len, u8 *data,
+ bool multiread_bit);
+};
+
+/**
+ * struct st_sensor_settings - ST specific sensor settings
+ * @wai: Contents of WhoAmI register.
+ * @sensors_supported: List of supported sensors by struct itself.
+ * @ch: IIO channels for the sensor.
+ * @odr: Output data rate register and ODR list available.
+ * @pw: Power register of the sensor.
+ * @enable_axis: Enable one or more axis of the sensor.
+ * @fs: Full scale register and full scale list available.
+ * @bdu: Block data update register.
+ * @drdy_irq: Data ready register of the sensor.
+ * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
+ * @bootime: samples to discard when sensor passing from power-down to power-up.
+ */
+struct st_sensor_settings {
+ u8 wai;
+ char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
+ struct iio_chan_spec *ch;
+ int num_ch;
+ struct st_sensor_odr odr;
+ struct st_sensor_power pw;
+ struct st_sensor_axis enable_axis;
+ struct st_sensor_fullscale fs;
+ struct st_sensor_bdu bdu;
+ struct st_sensor_data_ready_irq drdy_irq;
+ bool multi_read_bit;
+ unsigned int bootime;
+};
+
+/**
+ * struct st_sensor_data - ST sensor device status
+ * @dev: Pointer to instance of struct device (I2C or SPI).
+ * @trig: The trigger in use by the core driver.
+ * @sensor_settings: Pointer to the specific sensor settings in use.
+ * @current_fullscale: Maximum range of measure by the sensor.
+ * @vdd: Pointer to sensor's Vdd power supply
+ * @vdd_io: Pointer to sensor's Vdd-IO power supply
+ * @enabled: Status of the sensor (false->off, true->on).
+ * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
+ * @buffer_data: Data used by buffer part.
+ * @odr: Output data rate of the sensor [Hz].
+ * num_data_channels: Number of data channels used in buffer.
+ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
+ * @tf: Transfer function structure used by I/O operations.
+ * @tb: Transfer buffers and mutex used by I/O operations.
+ */
+struct st_sensor_data {
+ struct device *dev;
+ struct iio_trigger *trig;
+ struct st_sensor_settings *sensor_settings;
+ struct st_sensor_fullscale_avl *current_fullscale;
+ struct regulator *vdd;
+ struct regulator *vdd_io;
+
+ bool enabled;
+ bool multiread_bit;
+
+ char *buffer_data;
+
+ unsigned int odr;
+ unsigned int num_data_channels;
+
+ u8 drdy_int_pin;
+
+ unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
+
+ const struct st_sensor_transfer_function *tf;
+ struct st_sensor_transfer_buffer tb;
+};
+
+#ifdef CONFIG_IIO_BUFFER
+irqreturn_t st_sensors_trigger_handler(int irq, void *p);
+
+int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf);
+#endif
+
+#ifdef CONFIG_IIO_TRIGGER
+int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops);
+
+void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
+
+#else
+static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops)
+{
+ return 0;
+}
+static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
+{
+ return;
+}
+#endif
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
+
+int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
+
+int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
+
+void st_sensors_power_enable(struct iio_dev *indio_dev);
+
+void st_sensors_power_disable(struct iio_dev *indio_dev);
+
+int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr);
+
+int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable);
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale);
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val);
+
+int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list, const struct st_sensor_settings *sensor_settings);
+
+ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
new file mode 100644
index 00000000000000..1796af09336819
--- /dev/null
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -0,0 +1,31 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_I2C_H
+#define ST_SENSORS_I2C_H
+
+#include <linux/i2c.h>
+#include <linux/iio/common/st_sensors.h>
+#include <linux/of.h>
+
+void st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct st_sensor_data *sdata);
+
+#ifdef CONFIG_OF
+void st_sensors_of_i2c_probe(struct i2c_client *client,
+ const struct of_device_id *match);
+#else
+static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
+ const struct of_device_id *match)
+{
+}
+#endif
+
+#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h
new file mode 100644
index 00000000000000..d964a3563dc62b
--- /dev/null
+++ b/include/linux/iio/common/st_sensors_spi.h
@@ -0,0 +1,20 @@
+/*
+ * STMicroelectronics sensors spi library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_SPI_H
+#define ST_SENSORS_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/iio/common/st_sensors.h>
+
+void st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi, struct st_sensor_data *sdata);
+
+#endif /* ST_SENSORS_SPI_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
new file mode 100644
index 00000000000000..651f9a0e2765fa
--- /dev/null
+++ b/include/linux/iio/consumer.h
@@ -0,0 +1,199 @@
+/*
+ * Industrial I/O in kernel consumer interface
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_INKERN_CONSUMER_H_
+#define _IIO_INKERN_CONSUMER_H_
+
+#include <linux/types.h>
+#include <linux/iio/types.h>
+
+struct iio_dev;
+struct iio_chan_spec;
+struct device;
+
+/**
+ * struct iio_channel - everything needed for a consumer to use a channel
+ * @indio_dev: Device on which the channel exists.
+ * @channel: Full description of the channel.
+ * @data: Data about the channel used by consumer.
+ */
+struct iio_channel {
+ struct iio_dev *indio_dev;
+ const struct iio_chan_spec *channel;
+ void *data;
+};
+
+/**
+ * iio_channel_get() - get description of all that is needed to access channel.
+ * @dev: Pointer to consumer device. Device name must match
+ * the name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *iio_channel_get(struct device *dev,
+ const char *consumer_channel);
+
+/**
+ * iio_channel_release() - release channels obtained via iio_channel_get
+ * @chan: The channel to be released.
+ */
+void iio_channel_release(struct iio_channel *chan);
+
+/**
+ * iio_channel_get_all() - get all channels associated with a client
+ * @dev: Pointer to consumer device.
+ *
+ * Returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ */
+struct iio_channel *iio_channel_get_all(struct device *dev);
+
+/**
+ * iio_channel_release_all() - reverse iio_channel_get_all
+ * @chan: Array of channels to be released.
+ */
+void iio_channel_release_all(struct iio_channel *chan);
+
+struct iio_cb_buffer;
+/**
+ * iio_channel_get_all_cb() - register callback for triggered capture
+ * @dev: Pointer to client device.
+ * @cb: Callback function.
+ * @private: Private data passed to callback.
+ *
+ * NB right now we have no ability to mux data from multiple devices.
+ * So if the channels requested come from different devices this will
+ * fail.
+ */
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
+ int (*cb)(const void *data,
+ void *private),
+ void *private);
+/**
+ * iio_channel_release_all_cb() - release and unregister the callback.
+ * @cb_buffer: The callback buffer that was allocated.
+ */
+void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buffer);
+
+/**
+ * iio_channel_start_all_cb() - start the flow of data through callback.
+ * @cb_buff: The callback buffer we are starting.
+ */
+int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff);
+
+/**
+ * iio_channel_stop_all_cb() - stop the flow of data through the callback.
+ * @cb_buff: The callback buffer we are stopping.
+ */
+void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
+
+/**
+ * iio_channel_cb_get_channels() - get access to the underlying channels.
+ * @cb_buff: The callback buffer from whom we want the channel
+ * information.
+ *
+ * This function allows one to obtain information about the channels.
+ * Whilst this may allow direct reading if all buffers are disabled, the
+ * primary aim is to allow drivers that are consuming a channel to query
+ * things like scaling of the channel.
+ */
+struct iio_channel
+*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer);
+
+/**
+ * iio_read_channel_raw() - read from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_read_channel_raw(struct iio_channel *chan,
+ int *val);
+
+/**
+ * iio_read_channel_average_raw() - read from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ *
+ * In opposit to the normal iio_read_channel_raw this function
+ * returns the average of multiple reads.
+ */
+int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
+
+/**
+ * iio_read_channel_processed() - read processed value from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. A processed value
+ * means that this value will have the correct unit and not some device internal
+ * representation. If the device does not support reporting a processed value
+ * the function will query the raw value and the channels scale and offset and
+ * do the appropriate transformation.
+ */
+int iio_read_channel_processed(struct iio_channel *chan, int *val);
+
+/**
+ * iio_get_channel_type() - get the type of a channel
+ * @channel: The channel being queried.
+ * @type: The type of the channel.
+ *
+ * returns the enum iio_chan_type of the channel
+ */
+int iio_get_channel_type(struct iio_channel *channel,
+ enum iio_chan_type *type);
+
+/**
+ * iio_read_channel_scale() - read the scale value for a channel
+ * @chan: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_scale(struct iio_channel *chan, int *val,
+ int *val2);
+
+/**
+ * iio_convert_raw_to_processed() - Converts a raw value to a processed value
+ * @chan: The channel being queried
+ * @raw: The raw IIO to convert
+ * @processed: The result of the conversion
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function converts a raw value to processed value for a specific channel.
+ * A raw value is the device internal representation of a sample and the value
+ * returned by iio_read_channel_raw, so the unit of that value is device
+ * depended. A processed value on the other hand is value has a normed unit
+ * according with the IIO specification.
+ *
+ * The scale factor allows to increase the precession of the returned value. For
+ * a scale factor of 1 the function will return the result in the normal IIO
+ * unit for the channel type. E.g. millivolt for voltage channels, if you want
+ * nanovolts instead pass 1000 as the scale factor.
+ */
+int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
+ int *processed, unsigned int scale);
+
+#endif
diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h
new file mode 100644
index 00000000000000..8fd8f057a890b5
--- /dev/null
+++ b/include/linux/iio/dac/ad5421.h
@@ -0,0 +1,28 @@
+#ifndef __IIO_DAC_AD5421_H__
+#define __IIO_DAC_AD5421_H__
+
+/**
+ * enum ad5421_current_range - Current range the AD5421 is configured for.
+ * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00)
+ * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1)
+ * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10)
+ */
+
+enum ad5421_current_range {
+ AD5421_CURRENT_RANGE_4mA_20mA,
+ AD5421_CURRENT_RANGE_3mA8_21mA,
+ AD5421_CURRENT_RANGE_3mA2_24mA,
+};
+
+/**
+ * struct ad5421_platform_data - AD5421 DAC driver platform data
+ * @external_vref: whether an external reference voltage is used or not
+ * @current_range: Current range the AD5421 is configured for
+ */
+
+struct ad5421_platform_data {
+ bool external_vref;
+ enum ad5421_current_range current_range;
+};
+
+#endif
diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h
new file mode 100644
index 00000000000000..43895376a9cab0
--- /dev/null
+++ b/include/linux/iio/dac/ad5504.h
@@ -0,0 +1,16 @@
+/*
+ * AD5504 SPI DAC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef SPI_AD5504_H_
+#define SPI_AD5504_H_
+
+struct ad5504_platform_data {
+ u16 vref_mv;
+};
+
+#endif /* SPI_AD5504_H_ */
diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h
new file mode 100644
index 00000000000000..45ee281c6660b7
--- /dev/null
+++ b/include/linux/iio/dac/ad5791.h
@@ -0,0 +1,25 @@
+/*
+ * AD5791 SPI DAC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef SPI_AD5791_H_
+#define SPI_AD5791_H_
+
+/**
+ * struct ad5791_platform_data - platform specific information
+ * @vref_pos_mv: Vdd Positive Analog Supply Volatge (mV)
+ * @vref_neg_mv: Vdd Negative Analog Supply Volatge (mV)
+ * @use_rbuf_gain2: ext. amplifier connected in gain of two configuration
+ */
+
+struct ad5791_platform_data {
+ u16 vref_pos_mv;
+ u16 vref_neg_mv;
+ bool use_rbuf_gain2;
+};
+
+#endif /* SPI_AD5791_H_ */
diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h
new file mode 100644
index 00000000000000..f6d1d252f08d31
--- /dev/null
+++ b/include/linux/iio/dac/max517.h
@@ -0,0 +1,15 @@
+/*
+ * MAX517 DAC driver
+ *
+ * Copyright 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DAC_MAX517_H_
+#define IIO_DAC_MAX517_H_
+
+struct max517_platform_data {
+ u16 vref_mv[2];
+};
+
+#endif /* IIO_DAC_MAX517_H_ */
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
new file mode 100644
index 00000000000000..91530e6611e9cc
--- /dev/null
+++ b/include/linux/iio/dac/mcp4725.h
@@ -0,0 +1,16 @@
+/*
+ * MCP4725 DAC driver
+ *
+ * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef IIO_DAC_MCP4725_H_
+#define IIO_DAC_MCP4725_H_
+
+struct mcp4725_platform_data {
+ u16 vref_mv;
+};
+
+#endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
new file mode 100644
index 00000000000000..7dfb10ee26698b
--- /dev/null
+++ b/include/linux/iio/driver.h
@@ -0,0 +1,31 @@
+/*
+ * Industrial I/O in kernel access map interface.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_INKERN_H_
+#define _IIO_INKERN_H_
+
+struct iio_map;
+
+/**
+ * iio_map_array_register() - tell the core about inkernel consumers
+ * @indio_dev: provider device
+ * @map: array of mappings specifying association of channel with client
+ */
+int iio_map_array_register(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+/**
+ * iio_map_array_unregister() - tell the core to remove consumer mappings for
+ * the given provider device
+ * @indio_dev: provider device
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
new file mode 100644
index 00000000000000..03fa332ad2a8ce
--- /dev/null
+++ b/include/linux/iio/events.h
@@ -0,0 +1,87 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_EVENTS_H_
+#define _IIO_EVENTS_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/iio/types.h>
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurrence (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ __u64 id;
+ __s64 timestamp;
+};
+
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+
+/**
+ * IIO_EVENT_CODE() - create event identifier
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @diff: Whether the event is for an differential channel or not.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @chan: Channel number for non-differential channels.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ */
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
+ ((u16)chan))
+
+
+/**
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
+ type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+
+/**
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
+
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+
+/* Event code number extraction depends on which type of event we have.
+ * Perhaps review this function in the future*/
+#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
+
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
+
+#endif
diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h
new file mode 100644
index 00000000000000..12ce3ee427fdd0
--- /dev/null
+++ b/include/linux/iio/frequency/ad9523.h
@@ -0,0 +1,195 @@
+/*
+ * AD9523 SPI Low Jitter Clock Generator
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_FREQUENCY_AD9523_H_
+#define IIO_FREQUENCY_AD9523_H_
+
+enum outp_drv_mode {
+ TRISTATE,
+ LVPECL_8mA,
+ LVDS_4mA,
+ LVDS_7mA,
+ HSTL0_16mA,
+ HSTL1_8mA,
+ CMOS_CONF1,
+ CMOS_CONF2,
+ CMOS_CONF3,
+ CMOS_CONF4,
+ CMOS_CONF5,
+ CMOS_CONF6,
+ CMOS_CONF7,
+ CMOS_CONF8,
+ CMOS_CONF9
+};
+
+enum ref_sel_mode {
+ NONEREVERTIVE_STAY_ON_REFB,
+ REVERT_TO_REFA,
+ SELECT_REFA,
+ SELECT_REFB,
+ EXT_REF_SEL
+};
+
+/**
+ * struct ad9523_channel_spec - Output channel configuration
+ *
+ * @channel_num: Output channel number.
+ * @divider_output_invert_en: Invert the polarity of the output clock.
+ * @sync_ignore_en: Ignore chip-level SYNC signal.
+ * @low_power_mode_en: Reduce power used in the differential output modes.
+ * @use_alt_clock_src: Channel divider uses alternative clk source.
+ * @output_dis: Disables, powers down the entire channel.
+ * @driver_mode: Output driver mode (logic level family).
+ * @divider_phase: Divider initial phase after a SYNC. Range 0..63
+ LSB = 1/2 of a period of the divider input clock.
+ * @channel_divider: 10-bit channel divider.
+ * @extended_name: Optional descriptive channel name.
+ */
+
+struct ad9523_channel_spec {
+ unsigned channel_num;
+ bool divider_output_invert_en;
+ bool sync_ignore_en;
+ bool low_power_mode_en;
+ /* CH0..CH3 VCXO, CH4..CH9 VCO2 */
+ bool use_alt_clock_src;
+ bool output_dis;
+ enum outp_drv_mode driver_mode;
+ unsigned char divider_phase;
+ unsigned short channel_divider;
+ char extended_name[16];
+};
+
+enum pll1_rzero_resistor {
+ RZERO_883_OHM,
+ RZERO_677_OHM,
+ RZERO_341_OHM,
+ RZERO_135_OHM,
+ RZERO_10_OHM,
+ RZERO_USE_EXT_RES = 8,
+};
+
+enum rpole2_resistor {
+ RPOLE2_900_OHM,
+ RPOLE2_450_OHM,
+ RPOLE2_300_OHM,
+ RPOLE2_225_OHM,
+};
+
+enum rzero_resistor {
+ RZERO_3250_OHM,
+ RZERO_2750_OHM,
+ RZERO_2250_OHM,
+ RZERO_2100_OHM,
+ RZERO_3000_OHM,
+ RZERO_2500_OHM,
+ RZERO_2000_OHM,
+ RZERO_1850_OHM,
+};
+
+enum cpole1_capacitor {
+ CPOLE1_0_PF,
+ CPOLE1_8_PF,
+ CPOLE1_16_PF,
+ CPOLE1_24_PF,
+ _CPOLE1_24_PF, /* place holder */
+ CPOLE1_32_PF,
+ CPOLE1_40_PF,
+ CPOLE1_48_PF,
+};
+
+/**
+ * struct ad9523_platform_data - platform specific information
+ *
+ * @vcxo_freq: External VCXO frequency in Hz
+ * @refa_diff_rcv_en: REFA differential/single-ended input selection.
+ * @refb_diff_rcv_en: REFB differential/single-ended input selection.
+ * @zd_in_diff_en: Zero Delay differential/single-ended input selection.
+ * @osc_in_diff_en: OSC differential/ single-ended input selection.
+ * @refa_cmos_neg_inp_en: REFA single-ended neg./pos. input enable.
+ * @refb_cmos_neg_inp_en: REFB single-ended neg./pos. input enable.
+ * @zd_in_cmos_neg_inp_en: Zero Delay single-ended neg./pos. input enable.
+ * @osc_in_cmos_neg_inp_en: OSC single-ended neg./pos. input enable.
+ * @refa_r_div: PLL1 10-bit REFA R divider.
+ * @refb_r_div: PLL1 10-bit REFB R divider.
+ * @pll1_feedback_div: PLL1 10-bit Feedback N divider.
+ * @pll1_charge_pump_current_nA: Magnitude of PLL1 charge pump current (nA).
+ * @zero_delay_mode_internal_en: Internal, external Zero Delay mode selection.
+ * @osc_in_feedback_en: PLL1 feedback path, local feedback from
+ * the OSC_IN receiver or zero delay mode
+ * @pll1_loop_filter_rzero: PLL1 Loop Filter Zero Resistor selection.
+ * @ref_mode: Reference selection mode.
+ * @pll2_charge_pump_current_nA: Magnitude of PLL2 charge pump current (nA).
+ * @pll2_ndiv_a_cnt: PLL2 Feedback N-divider, A Counter, range 0..4.
+ * @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63.
+ * @pll2_freq_doubler_en: PLL2 frequency doubler enable.
+ * @pll2_r2_div: PLL2 R2 divider, range 0..31.
+ * @pll2_vco_diff_m1: VCO1 divider, range 3..5.
+ * @pll2_vco_diff_m2: VCO2 divider, range 3..5.
+ * @rpole2: PLL2 loop filter Rpole resistor value.
+ * @rzero: PLL2 loop filter Rzero resistor value.
+ * @cpole1: PLL2 loop filter Cpole capacitor value.
+ * @rzero_bypass_en: PLL2 loop filter Rzero bypass enable.
+ * @num_channels: Array size of struct ad9523_channel_spec.
+ * @channels: Pointer to channel array.
+ * @name: Optional alternative iio device name.
+ */
+
+struct ad9523_platform_data {
+ unsigned long vcxo_freq;
+
+ /* Differential/ Single-Ended Input Configuration */
+ bool refa_diff_rcv_en;
+ bool refb_diff_rcv_en;
+ bool zd_in_diff_en;
+ bool osc_in_diff_en;
+
+ /*
+ * Valid if differential input disabled
+ * if false defaults to pos input
+ */
+ bool refa_cmos_neg_inp_en;
+ bool refb_cmos_neg_inp_en;
+ bool zd_in_cmos_neg_inp_en;
+ bool osc_in_cmos_neg_inp_en;
+
+ /* PLL1 Setting */
+ unsigned short refa_r_div;
+ unsigned short refb_r_div;
+ unsigned short pll1_feedback_div;
+ unsigned short pll1_charge_pump_current_nA;
+ bool zero_delay_mode_internal_en;
+ bool osc_in_feedback_en;
+ enum pll1_rzero_resistor pll1_loop_filter_rzero;
+
+ /* Reference */
+ enum ref_sel_mode ref_mode;
+
+ /* PLL2 Setting */
+ unsigned int pll2_charge_pump_current_nA;
+ unsigned char pll2_ndiv_a_cnt;
+ unsigned char pll2_ndiv_b_cnt;
+ bool pll2_freq_doubler_en;
+ unsigned char pll2_r2_div;
+ unsigned char pll2_vco_diff_m1; /* 3..5 */
+ unsigned char pll2_vco_diff_m2; /* 3..5 */
+
+ /* Loop Filter PLL2 */
+ enum rpole2_resistor rpole2;
+ enum rzero_resistor rzero;
+ enum cpole1_capacitor cpole1;
+ bool rzero_bypass_en;
+
+ /* Output Channel Configuration */
+ int num_channels;
+ struct ad9523_channel_spec *channels;
+
+ char name[SPI_NAME_SIZE];
+};
+
+#endif /* IIO_FREQUENCY_AD9523_H_ */
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
new file mode 100644
index 00000000000000..ffd8c8f90928cf
--- /dev/null
+++ b/include/linux/iio/frequency/adf4350.h
@@ -0,0 +1,128 @@
+/*
+ * ADF4350/ADF4351 SPI PLL driver
+ *
+ * Copyright 2012-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_PLL_ADF4350_H_
+#define IIO_PLL_ADF4350_H_
+
+/* Registers */
+#define ADF4350_REG0 0
+#define ADF4350_REG1 1
+#define ADF4350_REG2 2
+#define ADF4350_REG3 3
+#define ADF4350_REG4 4
+#define ADF4350_REG5 5
+
+/* REG0 Bit Definitions */
+#define ADF4350_REG0_FRACT(x) (((x) & 0xFFF) << 3)
+#define ADF4350_REG0_INT(x) (((x) & 0xFFFF) << 15)
+
+/* REG1 Bit Definitions */
+#define ADF4350_REG1_MOD(x) (((x) & 0xFFF) << 3)
+#define ADF4350_REG1_PHASE(x) (((x) & 0xFFF) << 15)
+#define ADF4350_REG1_PRESCALER (1 << 27)
+
+/* REG2 Bit Definitions */
+#define ADF4350_REG2_COUNTER_RESET_EN (1 << 3)
+#define ADF4350_REG2_CP_THREESTATE_EN (1 << 4)
+#define ADF4350_REG2_POWER_DOWN_EN (1 << 5)
+#define ADF4350_REG2_PD_POLARITY_POS (1 << 6)
+#define ADF4350_REG2_LDP_6ns (1 << 7)
+#define ADF4350_REG2_LDP_10ns (0 << 7)
+#define ADF4350_REG2_LDF_FRACT_N (0 << 8)
+#define ADF4350_REG2_LDF_INT_N (1 << 8)
+#define ADF4350_REG2_CHARGE_PUMP_CURR_uA(x) (((((x)-312) / 312) & 0xF) << 9)
+#define ADF4350_REG2_DOUBLE_BUFF_EN (1 << 13)
+#define ADF4350_REG2_10BIT_R_CNT(x) ((x) << 14)
+#define ADF4350_REG2_RDIV2_EN (1 << 24)
+#define ADF4350_REG2_RMULT2_EN (1 << 25)
+#define ADF4350_REG2_MUXOUT(x) ((x) << 26)
+#define ADF4350_REG2_NOISE_MODE(x) (((unsigned)(x)) << 29)
+#define ADF4350_MUXOUT_THREESTATE 0
+#define ADF4350_MUXOUT_DVDD 1
+#define ADF4350_MUXOUT_GND 2
+#define ADF4350_MUXOUT_R_DIV_OUT 3
+#define ADF4350_MUXOUT_N_DIV_OUT 4
+#define ADF4350_MUXOUT_ANALOG_LOCK_DETECT 5
+#define ADF4350_MUXOUT_DIGITAL_LOCK_DETECT 6
+
+/* REG3 Bit Definitions */
+#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
+#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
+#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
+#define ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH (1 << 23)
+
+/* REG4 Bit Definitions */
+#define ADF4350_REG4_OUTPUT_PWR(x) ((x) << 3)
+#define ADF4350_REG4_RF_OUT_EN (1 << 5)
+#define ADF4350_REG4_AUX_OUTPUT_PWR(x) ((x) << 6)
+#define ADF4350_REG4_AUX_OUTPUT_EN (1 << 8)
+#define ADF4350_REG4_AUX_OUTPUT_FUND (1 << 9)
+#define ADF4350_REG4_AUX_OUTPUT_DIV (0 << 9)
+#define ADF4350_REG4_MUTE_TILL_LOCK_EN (1 << 10)
+#define ADF4350_REG4_VCO_PWRDOWN_EN (1 << 11)
+#define ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(x) ((x) << 12)
+#define ADF4350_REG4_RF_DIV_SEL(x) ((x) << 20)
+#define ADF4350_REG4_FEEDBACK_DIVIDED (0 << 23)
+#define ADF4350_REG4_FEEDBACK_FUND (1 << 23)
+
+/* REG5 Bit Definitions */
+#define ADF4350_REG5_LD_PIN_MODE_LOW (0 << 22)
+#define ADF4350_REG5_LD_PIN_MODE_DIGITAL (1 << 22)
+#define ADF4350_REG5_LD_PIN_MODE_HIGH (3 << 22)
+
+/* Specifications */
+#define ADF4350_MAX_OUT_FREQ 4400000000ULL /* Hz */
+#define ADF4350_MIN_OUT_FREQ 137500000 /* Hz */
+#define ADF4351_MIN_OUT_FREQ 34375000 /* Hz */
+#define ADF4350_MIN_VCO_FREQ 2200000000ULL /* Hz */
+#define ADF4350_MAX_FREQ_45_PRESC 3000000000ULL /* Hz */
+#define ADF4350_MAX_FREQ_PFD 32000000 /* Hz */
+#define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */
+#define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */
+#define ADF4350_MAX_MODULUS 4095
+#define ADF4350_MAX_R_CNT 1023
+
+
+/**
+ * struct adf4350_platform_data - platform specific information
+ * @name: Optional device name.
+ * @clkin: REFin frequency in Hz.
+ * @channel_spacing: Channel spacing in Hz (influences MODULUS).
+ * @power_up_frequency: Optional, If set in Hz the PLL tunes to the desired
+ * frequency on probe.
+ * @ref_div_factor: Optional, if set the driver skips dynamic calculation
+ * and uses this default value instead.
+ * @ref_doubler_en: Enables reference doubler.
+ * @ref_div2_en: Enables reference divider.
+ * @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2.
+ * @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3.
+ * @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4.
+ * @gpio_lock_detect: Optional, if set with a valid GPIO number,
+ * pll lock state is tested upon read.
+ * If not used - set to -1.
+ */
+
+struct adf4350_platform_data {
+ char name[32];
+ unsigned long clkin;
+ unsigned long channel_spacing;
+ unsigned long long power_up_frequency;
+
+ unsigned short ref_div_factor; /* 10-bit R counter */
+ bool ref_doubler_en;
+ bool ref_div2_en;
+
+ unsigned r2_user_settings;
+ unsigned r3_user_settings;
+ unsigned r4_user_settings;
+ int gpio_lock_detect;
+};
+
+#endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
new file mode 100644
index 00000000000000..2a820850f284f0
--- /dev/null
+++ b/include/linux/iio/gyro/itg3200.h
@@ -0,0 +1,154 @@
+/*
+ * itg3200.h -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef I2C_ITG3200_H_
+#define I2C_ITG3200_H_
+
+#include <linux/iio/iio.h>
+
+/* Register with I2C address (34h) */
+#define ITG3200_REG_ADDRESS 0x00
+
+/* Sample rate divider
+ * Range: 0 to 255
+ * Default value: 0x00 */
+#define ITG3200_REG_SAMPLE_RATE_DIV 0x15
+
+/* Digital low pass filter settings */
+#define ITG3200_REG_DLPF 0x16
+/* DLPF full scale range */
+#define ITG3200_DLPF_FS_SEL_2000 0x18
+/* Bandwidth (Hz) and internal sample rate
+ * (kHz) of DLPF */
+#define ITG3200_DLPF_256_8 0x00
+#define ITG3200_DLPF_188_1 0x01
+#define ITG3200_DLPF_98_1 0x02
+#define ITG3200_DLPF_42_1 0x03
+#define ITG3200_DLPF_20_1 0x04
+#define ITG3200_DLPF_10_1 0x05
+#define ITG3200_DLPF_5_1 0x06
+
+#define ITG3200_DLPF_CFG_MASK 0x07
+
+/* Configuration for interrupt operations */
+#define ITG3200_REG_IRQ_CONFIG 0x17
+/* Logic level */
+#define ITG3200_IRQ_ACTIVE_LOW 0x80
+#define ITG3200_IRQ_ACTIVE_HIGH 0x00
+/* Drive type */
+#define ITG3200_IRQ_OPEN_DRAIN 0x40
+#define ITG3200_IRQ_PUSH_PULL 0x00
+/* Latch mode */
+#define ITG3200_IRQ_LATCH_UNTIL_CLEARED 0x20
+#define ITG3200_IRQ_LATCH_50US_PULSE 0x00
+/* Latch clear method */
+#define ITG3200_IRQ_LATCH_CLEAR_ANY 0x10
+#define ITG3200_IRQ_LATCH_CLEAR_STATUS 0x00
+/* Enable interrupt when device is ready */
+#define ITG3200_IRQ_DEVICE_RDY_ENABLE 0x04
+/* Enable interrupt when data is available */
+#define ITG3200_IRQ_DATA_RDY_ENABLE 0x01
+
+/* Determine the status of ITG-3200 interrupts */
+#define ITG3200_REG_IRQ_STATUS 0x1A
+/* Status of 'device is ready'-interrupt */
+#define ITG3200_IRQ_DEVICE_RDY_STATUS 0x04
+/* Status of 'data is available'-interrupt */
+#define ITG3200_IRQ_DATA_RDY_STATUS 0x01
+
+/* Sensor registers */
+#define ITG3200_REG_TEMP_OUT_H 0x1B
+#define ITG3200_REG_TEMP_OUT_L 0x1C
+#define ITG3200_REG_GYRO_XOUT_H 0x1D
+#define ITG3200_REG_GYRO_XOUT_L 0x1E
+#define ITG3200_REG_GYRO_YOUT_H 0x1F
+#define ITG3200_REG_GYRO_YOUT_L 0x20
+#define ITG3200_REG_GYRO_ZOUT_H 0x21
+#define ITG3200_REG_GYRO_ZOUT_L 0x22
+
+/* Power management */
+#define ITG3200_REG_POWER_MANAGEMENT 0x3E
+/* Reset device and internal registers to the
+ * power-up-default settings */
+#define ITG3200_RESET 0x80
+/* Enable low power sleep mode */
+#define ITG3200_SLEEP 0x40
+/* Put according gyroscope in standby mode */
+#define ITG3200_STANDBY_GYRO_X 0x20
+#define ITG3200_STANDBY_GYRO_Y 0x10
+#define ITG3200_STANDBY_GYRO_Z 0x08
+/* Determine the device clock source */
+#define ITG3200_CLK_INTERNAL 0x00
+#define ITG3200_CLK_GYRO_X 0x01
+#define ITG3200_CLK_GYRO_Y 0x02
+#define ITG3200_CLK_GYRO_Z 0x03
+#define ITG3200_CLK_EXT_32K 0x04
+#define ITG3200_CLK_EXT_19M 0x05
+
+
+/**
+ * struct itg3200 - device instance specific data
+ * @i2c: actual i2c_client
+ * @trig: data ready trigger from itg3200 pin
+ **/
+struct itg3200 {
+ struct i2c_client *i2c;
+ struct iio_trigger *trig;
+};
+
+enum ITG3200_SCAN_INDEX {
+ ITG3200_SCAN_TEMP,
+ ITG3200_SCAN_GYRO_X,
+ ITG3200_SCAN_GYRO_Y,
+ ITG3200_SCAN_GYRO_Z,
+ ITG3200_SCAN_ELEMENTS,
+};
+
+int itg3200_write_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 val);
+
+int itg3200_read_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 *val);
+
+
+#ifdef CONFIG_IIO_BUFFER
+
+void itg3200_remove_trigger(struct iio_dev *indio_dev);
+int itg3200_probe_trigger(struct iio_dev *indio_dev);
+
+int itg3200_buffer_configure(struct iio_dev *indio_dev);
+void itg3200_buffer_unconfigure(struct iio_dev *indio_dev);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline void itg3200_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int itg3200_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline int itg3200_buffer_configure(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
+{
+}
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ITG3200_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
new file mode 100644
index 00000000000000..c8f27c236d8870
--- /dev/null
+++ b/include/linux/iio/iio.h
@@ -0,0 +1,633 @@
+
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _INDUSTRIAL_IO_H_
+#define _INDUSTRIAL_IO_H_
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/iio/types.h>
+#include <linux/of.h>
+/* IIO TODO LIST */
+/*
+ * Provide means of adjusting timer accuracy.
+ * Currently assumes nano seconds.
+ */
+
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+};
+
+enum iio_shared_by {
+ IIO_SEPARATE,
+ IIO_SHARED_BY_TYPE,
+ IIO_SHARED_BY_DIR,
+ IIO_SHARED_BY_ALL
+};
+
+enum iio_endian {
+ IIO_CPU,
+ IIO_BE,
+ IIO_LE,
+};
+
+struct iio_chan_spec;
+struct iio_dev;
+
+/**
+ * struct iio_chan_spec_ext_info - Extended channel info attribute
+ * @name: Info attribute name
+ * @shared: Whether this attribute is shared between all channels.
+ * @read: Read callback for this info attribute, may be NULL.
+ * @write: Write callback for this info attribute, may be NULL.
+ * @private: Data private to the driver.
+ */
+struct iio_chan_spec_ext_info {
+ const char *name;
+ enum iio_shared_by shared;
+ ssize_t (*read)(struct iio_dev *, uintptr_t private,
+ struct iio_chan_spec const *, char *buf);
+ ssize_t (*write)(struct iio_dev *, uintptr_t private,
+ struct iio_chan_spec const *, const char *buf,
+ size_t len);
+ uintptr_t private;
+};
+
+/**
+ * struct iio_enum - Enum channel info attribute
+ * @items: An array of strings.
+ * @num_items: Length of the item array.
+ * @set: Set callback function, may be NULL.
+ * @get: Get callback function, may be NULL.
+ *
+ * The iio_enum struct can be used to implement enum style channel attributes.
+ * Enum style attributes are those which have a set of strings which map to
+ * unsigned integer values. The IIO enum helper code takes care of mapping
+ * between value and string as well as generating a "_available" file which
+ * contains a list of all available items. The set callback will be called when
+ * the attribute is updated. The last parameter is the index to the newly
+ * activated item. The get callback will be used to query the currently active
+ * item and is supposed to return the index for it.
+ */
+struct iio_enum {
+ const char * const *items;
+ unsigned int num_items;
+ int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
+ int (*get)(struct iio_dev *, const struct iio_chan_spec *);
+};
+
+ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
+ssize_t iio_enum_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
+ssize_t iio_enum_write(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
+ size_t len);
+
+/**
+ * IIO_ENUM() - Initialize enum extended channel attribute
+ * @_name: Attribute name
+ * @_shared: Whether the attribute is shared between all channels
+ * @_e: Pointer to an iio_enum struct
+ *
+ * This should usually be used together with IIO_ENUM_AVAILABLE()
+ */
+#define IIO_ENUM(_name, _shared, _e) \
+{ \
+ .name = (_name), \
+ .shared = (_shared), \
+ .read = iio_enum_read, \
+ .write = iio_enum_write, \
+ .private = (uintptr_t)(_e), \
+}
+
+/**
+ * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
+ * @_name: Attribute name ("_available" will be appended to the name)
+ * @_e: Pointer to an iio_enum struct
+ *
+ * Creates a read only attribute which lists all the available enum items in a
+ * space separated list. This should usually be used together with IIO_ENUM()
+ */
+#define IIO_ENUM_AVAILABLE(_name, _e) \
+{ \
+ .name = (_name "_available"), \
+ .shared = IIO_SHARED_BY_TYPE, \
+ .read = iio_enum_available_read, \
+ .private = (uintptr_t)(_e), \
+}
+
+/**
+ * struct iio_event_spec - specification for a channel event
+ * @type: Type of the event
+ * @dir: Direction of the event
+ * @mask_separate: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be registered per channel.
+ * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type.
+ * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type and
+ * direction.
+ * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by all channels.
+ */
+struct iio_event_spec {
+ enum iio_event_type type;
+ enum iio_event_direction dir;
+ unsigned long mask_separate;
+ unsigned long mask_shared_by_type;
+ unsigned long mask_shared_by_dir;
+ unsigned long mask_shared_by_all;
+};
+
+/**
+ * struct iio_chan_spec - specification of a single channel
+ * @type: What type of measurement is the channel making.
+ * @channel: What number do we wish to assign the channel.
+ * @channel2: If there is a second number for a differential
+ * channel then this is it. If modified is set then the
+ * value here specifies the modifier.
+ * @address: Driver specific identifier.
+ * @scan_index: Monotonic index to give ordering in scans when read
+ * from a buffer.
+ * @scan_type: Sign: 's' or 'u' to specify signed or unsigned
+ * realbits: Number of valid bits of data
+ * storage_bits: Realbits + padding
+ * shift: Shift right by this before masking out
+ * realbits.
+ * endianness: little or big endian
+ * repeat: Number of times real/storage bits
+ * repeats. When the repeat element is
+ * more than 1, then the type element in
+ * sysfs will show a repeat value.
+ * Otherwise, the number of repetitions is
+ * omitted.
+ * @info_mask_separate: What information is to be exported that is specific to
+ * this channel.
+ * @info_mask_shared_by_type: What information is to be exported that is shared
+ * by all channels of the same type.
+ * @info_mask_shared_by_dir: What information is to be exported that is shared
+ * by all channels of the same direction.
+ * @info_mask_shared_by_all: What information is to be exported that is shared
+ * by all channels.
+ * @event_spec: Array of events which should be registered for this
+ * channel.
+ * @num_event_specs: Size of the event_spec array.
+ * @ext_info: Array of extended info attributes for this channel.
+ * The array is NULL terminated, the last element should
+ * have its name field set to NULL.
+ * @extend_name: Allows labeling of channel attributes with an
+ * informative name. Note this has no effect codes etc,
+ * unlike modifiers.
+ * @datasheet_name: A name used in in-kernel mapping of channels. It should
+ * correspond to the first name that the channel is referred
+ * to by in the datasheet (e.g. IND), or the nearest
+ * possible compound name (e.g. IND-INC).
+ * @modified: Does a modifier apply to this channel. What these are
+ * depends on the channel type. Modifier is set in
+ * channel2. Examples are IIO_MOD_X for axial sensors about
+ * the 'x' axis.
+ * @indexed: Specify the channel has a numerical index. If not,
+ * the channel index number will be suppressed for sysfs
+ * attributes but not for event codes.
+ * @output: Channel is output.
+ * @differential: Channel is differential.
+ */
+struct iio_chan_spec {
+ enum iio_chan_type type;
+ int channel;
+ int channel2;
+ unsigned long address;
+ int scan_index;
+ struct {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+ } scan_type;
+ long info_mask_separate;
+ long info_mask_shared_by_type;
+ long info_mask_shared_by_dir;
+ long info_mask_shared_by_all;
+ const struct iio_event_spec *event_spec;
+ unsigned int num_event_specs;
+ const struct iio_chan_spec_ext_info *ext_info;
+ const char *extend_name;
+ const char *datasheet_name;
+ unsigned modified:1;
+ unsigned indexed:1;
+ unsigned output:1;
+ unsigned differential:1;
+};
+
+
+/**
+ * iio_channel_has_info() - Checks whether a channel supports a info attribute
+ * @chan: The channel to be queried
+ * @type: Type of the info attribute to be checked
+ *
+ * Returns true if the channels supports reporting values for the given info
+ * attribute type, false otherwise.
+ */
+static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
+ enum iio_chan_info_enum type)
+{
+ return (chan->info_mask_separate & BIT(type)) |
+ (chan->info_mask_shared_by_type & BIT(type)) |
+ (chan->info_mask_shared_by_dir & BIT(type)) |
+ (chan->info_mask_shared_by_all & BIT(type));
+}
+
+#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
+ .type = IIO_TIMESTAMP, \
+ .channel = -1, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 64, \
+ .storagebits = 64, \
+ }, \
+}
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ **/
+extern ktime_t ktime_get_real(void);
+static inline s64 iio_get_time_ns(void)
+{
+ return ktime_to_ns(ktime_get_real());
+}
+
+/* Device operating modes */
+#define INDIO_DIRECT_MODE 0x01
+#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_HARDWARE 0x08
+
+#define INDIO_ALL_BUFFER_MODES \
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
+
+#define INDIO_MAX_RAW_ELEMENTS 4
+
+struct iio_trigger; /* forward declaration */
+struct iio_dev;
+
+/**
+ * struct iio_info - constant information about device
+ * @driver_module: module structure used to ensure correct
+ * ownership of chrdevs etc
+ * @event_attrs: event control attributes
+ * @attrs: general purpose device attributes
+ * @read_raw: function to request a value from the device.
+ * mask specifies which value. Note 0 means a reading of
+ * the channel in question. Return value will specify the
+ * type of value returned by the device. val and val2 will
+ * contain the elements making up the returned value.
+ * @read_raw_multi: function to return values from the device.
+ * mask specifies which value. Note 0 means a reading of
+ * the channel in question. Return value will specify the
+ * type of value returned by the device. vals pointer
+ * contain the elements making up the returned value.
+ * max_len specifies maximum number of elements
+ * vals pointer can contain. val_len is used to return
+ * length of valid elements in vals.
+ * @write_raw: function to write a value to the device.
+ * Parameters are the same as for read_raw.
+ * @write_raw_get_fmt: callback function to query the expected
+ * format/precision. If not set by the driver, write_raw
+ * returns IIO_VAL_INT_PLUS_MICRO.
+ * @read_event_config: find out if the event is enabled.
+ * @write_event_config: set if the event is enabled.
+ * @read_event_value: read a configuration value associated with the event.
+ * @write_event_value: write a configuration value for the event.
+ * @validate_trigger: function to validate the trigger when the
+ * current trigger gets changed.
+ * @update_scan_mode: function to configure device and scan buffer when
+ * channels have changed
+ * @debugfs_reg_access: function to read or write register value of device
+ * @of_xlate: function pointer to obtain channel specifier index.
+ * When #iio-cells is greater than '0', the driver could
+ * provide a custom of_xlate function that reads the
+ * *args* and returns the appropriate index in registered
+ * IIO channels array.
+ **/
+struct iio_info {
+ struct module *driver_module;
+ struct attribute_group *event_attrs;
+ const struct attribute_group *attrs;
+
+ int (*read_raw)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask);
+
+ int (*read_raw_multi)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int max_len,
+ int *vals,
+ int *val_len,
+ long mask);
+
+ int (*write_raw)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask);
+
+ int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask);
+
+ int (*read_event_config)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir);
+
+ int (*write_event_config)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state);
+
+ int (*read_event_value)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2);
+
+ int (*write_event_value)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2);
+
+ int (*validate_trigger)(struct iio_dev *indio_dev,
+ struct iio_trigger *trig);
+ int (*update_scan_mode)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+ int (*debugfs_reg_access)(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval);
+ int (*of_xlate)(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec);
+};
+
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ * @validate_scan_mask: [DRIVER] function callback to check whether a given
+ * scan mask is valid for the device.
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
+ bool (*validate_scan_mask)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+};
+
+/**
+ * struct iio_dev - industrial I/O device
+ * @id: [INTERN] used to identify device internally
+ * @modes: [DRIVER] operating modes supported by device
+ * @currentmode: [DRIVER] current operating mode
+ * @dev: [DRIVER] device structure, should be assigned a parent
+ * and owner
+ * @event_interface: [INTERN] event chrdevs associated with interrupt lines
+ * @buffer: [DRIVER] any buffer present
+ * @buffer_list: [INTERN] list of all buffers currently attached
+ * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
+ * changes
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @masklength: [INTERN] the length of the mask established from
+ * channels
+ * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
+ * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
+ * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
+ * @trig: [INTERN] current device trigger (buffer modes)
+ * @pollfunc: [DRIVER] function run on trigger being received
+ * @channels: [DRIVER] channel specification structure table
+ * @num_channels: [DRIVER] number of channels specified in @channels.
+ * @channel_attr_list: [INTERN] keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: [INTERN] group for all attrs in base directory
+ * @name: [DRIVER] name of the device.
+ * @info: [DRIVER] callbacks and constant info from driver
+ * @info_exist_lock: [INTERN] lock to prevent use during removal
+ * @setup_ops: [DRIVER] callbacks to call before and after buffer
+ * enable/disable
+ * @chrdev: [INTERN] associated character device
+ * @groups: [INTERN] attribute groups
+ * @groupcounter: [INTERN] index of next attribute group
+ * @flags: [INTERN] file ops related flags including busy flag.
+ * @debugfs_dentry: [INTERN] device specific debugfs dentry.
+ * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ */
+struct iio_dev {
+ int id;
+
+ int modes;
+ int currentmode;
+ struct device dev;
+
+ struct iio_event_interface *event_interface;
+
+ struct iio_buffer *buffer;
+ struct list_head buffer_list;
+ int scan_bytes;
+ struct mutex mlock;
+
+ const unsigned long *available_scan_masks;
+ unsigned masklength;
+ const unsigned long *active_scan_mask;
+ bool scan_timestamp;
+ unsigned scan_index_timestamp;
+ struct iio_trigger *trig;
+ struct iio_poll_func *pollfunc;
+
+ struct iio_chan_spec const *channels;
+ int num_channels;
+
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ const char *name;
+ const struct iio_info *info;
+ struct mutex info_exist_lock;
+ const struct iio_buffer_setup_ops *setup_ops;
+ struct cdev chrdev;
+#define IIO_MAX_GROUPS 6
+ const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
+ int groupcounter;
+
+ unsigned long flags;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+#endif
+};
+
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
+int iio_device_register(struct iio_dev *indio_dev);
+void iio_device_unregister(struct iio_dev *indio_dev);
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+
+extern struct bus_type iio_bus_type;
+
+/**
+ * iio_device_put() - reference counted deallocation of struct device
+ * @indio_dev: IIO device structure containing the device
+ **/
+static inline void iio_device_put(struct iio_dev *indio_dev)
+{
+ if (indio_dev)
+ put_device(&indio_dev->dev);
+}
+
+/**
+ * dev_to_iio_dev() - Get IIO device struct from a device struct
+ * @dev: The device embedded in the IIO device
+ *
+ * Note: The device must be a IIO device, otherwise the result is undefined.
+ */
+static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
+{
+ return container_of(dev, struct iio_dev, dev);
+}
+
+/**
+ * iio_device_get() - increment reference count for the device
+ * @indio_dev: IIO device structure
+ *
+ * Returns: The passed IIO device
+ **/
+static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
+{
+ return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
+}
+
+
+/**
+ * iio_device_set_drvdata() - Set device driver data
+ * @indio_dev: IIO device structure
+ * @data: Driver specific data
+ *
+ * Allows to attach an arbitrary pointer to an IIO device, which can later be
+ * retrieved by iio_device_get_drvdata().
+ */
+static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
+{
+ dev_set_drvdata(&indio_dev->dev, data);
+}
+
+/**
+ * iio_device_get_drvdata() - Get device driver data
+ * @indio_dev: IIO device structure
+ *
+ * Returns the data previously set with iio_device_set_drvdata()
+ */
+static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+{
+ return dev_get_drvdata(&indio_dev->dev);
+}
+
+/* Can we make this smaller? */
+#define IIO_ALIGN L1_CACHE_BYTES
+struct iio_dev *iio_device_alloc(int sizeof_priv);
+
+static inline void *iio_priv(const struct iio_dev *indio_dev)
+{
+ return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
+}
+
+static inline struct iio_dev *iio_priv_to_dev(void *priv)
+{
+ return (struct iio_dev *)((char *)priv -
+ ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
+}
+
+void iio_device_free(struct iio_dev *indio_dev);
+struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
+void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
+struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
+ const char *fmt, ...);
+void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
+
+/**
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev: IIO device structure for device
+ **/
+static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
+{
+ return indio_dev->currentmode
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
+}
+
+/**
+ * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
+ * @indio_dev: IIO device structure for device
+ **/
+#if defined(CONFIG_DEBUG_FS)
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return indio_dev->debugfs_dentry;
+}
+#else
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return NULL;
+}
+#endif
+
+int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+ int *fract);
+
+/**
+ * IIO_DEGREE_TO_RAD() - Convert degree to rad
+ * @deg: A value in degree
+ *
+ * Returns the given value converted from degree to rad
+ */
+#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
+
+/**
+ * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+ * @g: A value in g
+ *
+ * Returns the given value converted from g to meter / second**2
+ */
+#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+
+#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
new file mode 100644
index 00000000000000..fa2d01ef8f55aa
--- /dev/null
+++ b/include/linux/iio/imu/adis.h
@@ -0,0 +1,283 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __IIO_ADIS_H__
+#define __IIO_ADIS_H__
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/iio/types.h>
+
+#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
+#define ADIS_READ_REG(reg) ((reg) & 0x7f)
+
+#define ADIS_PAGE_SIZE 0x80
+#define ADIS_REG_PAGE_ID 0x00
+
+struct adis;
+
+/**
+ * struct adis_data - ADIS chip variant specific data
+ * @read_delay: SPI delay for read operations in us
+ * @write_delay: SPI delay for write operations in us
+ * @glob_cmd_reg: Register address of the GLOB_CMD register
+ * @msc_ctrl_reg: Register address of the MSC_CTRL register
+ * @diag_stat_reg: Register address of the DIAG_STAT register
+ * @status_error_msgs: Array of error messgaes
+ * @status_error_mask:
+ */
+struct adis_data {
+ unsigned int read_delay;
+ unsigned int write_delay;
+
+ unsigned int glob_cmd_reg;
+ unsigned int msc_ctrl_reg;
+ unsigned int diag_stat_reg;
+
+ unsigned int self_test_mask;
+ unsigned int startup_delay;
+
+ const char * const *status_error_msgs;
+ unsigned int status_error_mask;
+
+ int (*enable_irq)(struct adis *adis, bool enable);
+
+ bool has_paging;
+};
+
+struct adis {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+
+ const struct adis_data *data;
+
+ struct mutex txrx_lock;
+ struct spi_message msg;
+ struct spi_transfer *xfer;
+ unsigned int current_page;
+ void *buffer;
+
+ uint8_t tx[10] ____cacheline_aligned;
+ uint8_t rx[4];
+};
+
+int adis_init(struct adis *adis, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct adis_data *data);
+int adis_reset(struct adis *adis);
+
+int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size);
+int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size);
+
+/**
+ * adis_write_reg_8() - Write single byte to a register
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @value: The value to write
+ */
+static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
+ uint8_t val)
+{
+ return adis_write_reg(adis, reg, val, 1);
+}
+
+/**
+ * adis_write_reg_16() - Write 2 bytes to a pair of registers
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: Value to be written
+ */
+static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t val)
+{
+ return adis_write_reg(adis, reg, val, 2);
+}
+
+/**
+ * adis_write_reg_32() - write 4 bytes to four registers
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @value: Value to be written
+ */
+static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t val)
+{
+ return adis_write_reg(adis, reg, val, 4);
+}
+
+/**
+ * adis_read_reg_16() - read 2 bytes from a 16-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis_read_reg(adis, reg, &tmp, 2);
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * adis_read_reg_32() - read 4 bytes from a 32-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis_read_reg(adis, reg, &tmp, 4);
+ *val = tmp;
+
+ return ret;
+}
+
+int adis_enable_irq(struct adis *adis, bool enable);
+int adis_check_status(struct adis *adis);
+
+int adis_initial_startup(struct adis *adis);
+
+int adis_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int error_mask,
+ int *val);
+
+#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan), \
+ .extend_name = name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \
+ ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits)
+
+#define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \
+ ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits)
+
+#define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ info_sep, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+
+#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
+
+int adis_setup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
+void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev);
+
+int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
+void adis_remove_trigger(struct adis *adis);
+
+int adis_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline int adis_setup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
+{
+ return 0;
+}
+
+static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis_remove_trigger(struct adis *adis)
+{
+}
+
+#define adis_update_scan_mode NULL
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#ifdef CONFIG_DEBUG_FS
+
+int adis_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval, unsigned int *readval);
+
+#else
+
+#define adis_debugfs_reg_access NULL
+
+#endif
+
+#endif
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
new file mode 100644
index 00000000000000..25eeac762e849d
--- /dev/null
+++ b/include/linux/iio/kfifo_buf.h
@@ -0,0 +1,11 @@
+#ifndef __LINUX_IIO_KFIFO_BUF_H__
+#define __LINUX_IIO_KFIFO_BUF_H__
+
+#include <linux/kfifo.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+
+struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
+void iio_kfifo_free(struct iio_buffer *r);
+
+#endif
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
new file mode 100644
index 00000000000000..1601a2a63a7291
--- /dev/null
+++ b/include/linux/iio/machine.h
@@ -0,0 +1,31 @@
+/*
+ * Industrial I/O in kernel access map definitions for board files.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IIO_MACHINE_H__
+#define __LINUX_IIO_MACHINE_H__
+
+/**
+ * struct iio_map - description of link between consumer and device channels
+ * @adc_channel_label: Label used to identify the channel on the provider.
+ * This is matched against the datasheet_name element
+ * of struct iio_chan_spec.
+ * @consumer_dev_name: Name to uniquely identify the consumer device.
+ * @consumer_channel: Unique name used to identify the channel on the
+ * consumer side.
+ * @consumer_data: Data about the channel for use by the consumer driver.
+ */
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+ void *consumer_data;
+};
+
+#endif
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
new file mode 100644
index 00000000000000..8a1d18640ab94d
--- /dev/null
+++ b/include/linux/iio/sysfs.h
@@ -0,0 +1,127 @@
+/* The industrial I/O core
+ *
+ *Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * General attributes
+ */
+
+#ifndef _INDUSTRIAL_IO_SYSFS_H_
+#define _INDUSTRIAL_IO_SYSFS_H_
+
+struct iio_chan_spec;
+
+/**
+ * struct iio_dev_attr - iio specific device attribute
+ * @dev_attr: underlying device attribute
+ * @address: associated register address
+ * @l: list head for maintaining list of dynamically created attrs.
+ */
+struct iio_dev_attr {
+ struct device_attribute dev_attr;
+ u64 address;
+ struct list_head l;
+ struct iio_chan_spec const *c;
+};
+
+#define to_iio_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_dev_attr, dev_attr)
+
+ssize_t iio_read_const_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *len);
+
+/**
+ * struct iio_const_attr - constant device specific attribute
+ * often used for things like available modes
+ * @string: attribute string
+ * @dev_attr: underlying device attribute
+ */
+struct iio_const_attr {
+ const char *string;
+ struct device_attribute dev_attr;
+};
+
+#define to_iio_const_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_const_attr, dev_attr)
+
+/* Some attributes will be hard coded (device dependent) and not require an
+ address, in these cases pass a negative */
+#define IIO_ATTR(_name, _mode, _show, _store, _addr) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .address = _addr }
+
+#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
+
+#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_vname \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
+
+#define IIO_CONST_ATTR(_name, _string) \
+ struct iio_const_attr iio_const_attr_##_name \
+ = { .string = _string, \
+ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+
+#define IIO_CONST_ATTR_NAMED(_vname, _name, _string) \
+ struct iio_const_attr iio_const_attr_##_vname \
+ = { .string = _string, \
+ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+
+/* Generic attributes of onetype or another */
+
+/**
+ * IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency
+ * @_mode: sysfs file mode/permissions
+ * @_show: output method for the attribute
+ * @_store: input method for the attribute
+ **/
+#define IIO_DEV_ATTR_SAMP_FREQ(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies
+ * @_show: output method for the attribute
+ *
+ * May be mode dependent on some devices
+ **/
+#define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \
+ IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_CONST_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies
+ * @_string: frequency string for the attribute
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
+ IIO_CONST_ATTR(sampling_frequency_available, _string)
+
+/**
+ * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_show: output method for the attribute
+ **/
+#define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \
+ IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_string: frequency string for the attribute
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \
+ IIO_CONST_ATTR(integration_time_available, _string)
+
+#define IIO_DEV_ATTR_TEMP_RAW(_show) \
+ IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0)
+
+#define IIO_CONST_ATTR_TEMP_OFFSET(_string) \
+ IIO_CONST_ATTR(in_temp_offset, _string)
+
+#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
+ IIO_CONST_ATTR(in_temp_scale, _string)
+
+#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
new file mode 100644
index 00000000000000..fa76c79a52a1bf
--- /dev/null
+++ b/include/linux/iio/trigger.h
@@ -0,0 +1,149 @@
+/* The industrial I/O core, trigger handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#ifndef _IIO_TRIGGER_H_
+#define _IIO_TRIGGER_H_
+
+#ifdef CONFIG_IIO_TRIGGER
+struct iio_subirq {
+ bool enabled;
+};
+
+/**
+ * struct iio_trigger_ops - operations structure for an iio_trigger.
+ * @owner: used to monitor usage count of the trigger.
+ * @set_trigger_state: switch on/off the trigger on demand
+ * @try_reenable: function to reenable the trigger when the
+ * use count is zero (may be NULL)
+ * @validate_device: function to validate the device when the
+ * current trigger gets changed.
+ *
+ * This is typically static const within a driver and shared by
+ * instances of a given device.
+ **/
+struct iio_trigger_ops {
+ struct module *owner;
+ int (*set_trigger_state)(struct iio_trigger *trig, bool state);
+ int (*try_reenable)(struct iio_trigger *trig);
+ int (*validate_device)(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
+};
+
+
+/**
+ * struct iio_trigger - industrial I/O trigger device
+ * @ops: [DRIVER] operations structure
+ * @id: [INTERN] unique id number
+ * @name: [DRIVER] unique name
+ * @dev: [DRIVER] associated device (if relevant)
+ * @list: [INTERN] used in maintenance of global trigger list
+ * @alloc_list: [DRIVER] used for driver specific trigger list
+ * @use_count: use count for the trigger
+ * @subirq_chip: [INTERN] associate 'virtual' irq chip.
+ * @subirq_base: [INTERN] base number for irqs provided by trigger.
+ * @subirqs: [INTERN] information about the 'child' irqs.
+ * @pool: [INTERN] bitmap of irqs currently in use.
+ * @pool_lock: [INTERN] protection of the irq pool.
+ **/
+struct iio_trigger {
+ const struct iio_trigger_ops *ops;
+ int id;
+ const char *name;
+ struct device dev;
+
+ struct list_head list;
+ struct list_head alloc_list;
+ atomic_t use_count;
+
+ struct irq_chip subirq_chip;
+ int subirq_base;
+
+ struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER];
+ unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
+ struct mutex pool_lock;
+};
+
+
+static inline struct iio_trigger *to_iio_trigger(struct device *d)
+{
+ return container_of(d, struct iio_trigger, dev);
+}
+
+static inline void iio_trigger_put(struct iio_trigger *trig)
+{
+ module_put(trig->ops->owner);
+ put_device(&trig->dev);
+}
+
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
+{
+ get_device(&trig->dev);
+ __module_get(trig->ops->owner);
+
+ return trig;
+}
+
+/**
+ * iio_device_set_drvdata() - Set trigger driver data
+ * @trig: IIO trigger structure
+ * @data: Driver specific data
+ *
+ * Allows to attach an arbitrary pointer to an IIO trigger, which can later be
+ * retrieved by iio_trigger_get_drvdata().
+ */
+static inline void iio_trigger_set_drvdata(struct iio_trigger *trig, void *data)
+{
+ dev_set_drvdata(&trig->dev, data);
+}
+
+/**
+ * iio_trigger_get_drvdata() - Get trigger driver data
+ * @trig: IIO trigger structure
+ *
+ * Returns the data previously set with iio_trigger_set_drvdata()
+ */
+static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
+{
+ return dev_get_drvdata(&trig->dev);
+}
+
+/**
+ * iio_trigger_register() - register a trigger with the IIO core
+ * @trig_info: trigger to be registered
+ **/
+int iio_trigger_register(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_unregister() - unregister a trigger from the core
+ * @trig_info: trigger to be unregistered
+ **/
+void iio_trigger_unregister(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_poll() - called on a trigger occurring
+ * @trig: trigger which occurred
+ *
+ * Typically called in relevant hardware interrupt handler.
+ **/
+void iio_trigger_poll(struct iio_trigger *trig);
+void iio_trigger_poll_chained(struct iio_trigger *trig);
+
+irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
+
+__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+void iio_trigger_free(struct iio_trigger *trig);
+
+#else
+struct iio_trigger;
+struct iio_trigger_ops;
+#endif
+#endif /* _IIO_TRIGGER_H_ */
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
new file mode 100644
index 00000000000000..c4f8c7409666e3
--- /dev/null
+++ b/include/linux/iio/trigger_consumer.h
@@ -0,0 +1,63 @@
+/* The industrial I/O core, trigger consumer functions
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__
+#define __LINUX_IIO_TRIGGER_CONSUMER_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+struct iio_dev;
+struct iio_trigger;
+
+/**
+ * struct iio_poll_func - poll function pair
+ *
+ * @indio_dev: data specific to device (passed into poll func)
+ * @h: the function that is actually run on trigger
+ * @thread: threaded interrupt part
+ * @type: the type of interrupt (basically if oneshot)
+ * @name: name used to identify the trigger consumer.
+ * @irq: the corresponding irq as allocated from the
+ * trigger pool
+ * @timestamp: some devices need a timestamp grabbed as soon
+ * as possible after the trigger - hence handler
+ * passes it via here.
+ **/
+struct iio_poll_func {
+ struct iio_dev *indio_dev;
+ irqreturn_t (*h)(int irq, void *p);
+ irqreturn_t (*thread)(int irq, void *p);
+ int type;
+ char *name;
+ int irq;
+ s64 timestamp;
+};
+
+
+struct iio_poll_func
+*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ int type,
+ struct iio_dev *indio_dev,
+ const char *fmt,
+ ...);
+void iio_dealloc_pollfunc(struct iio_poll_func *pf);
+irqreturn_t iio_pollfunc_store_time(int irq, void *p);
+
+void iio_trigger_notify_done(struct iio_trigger *trig);
+
+/*
+ * Two functions for common case where all that happens is a pollfunc
+ * is attached and detached from a trigger
+ */
+int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
+int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
new file mode 100644
index 00000000000000..c378ebec605ec6
--- /dev/null
+++ b/include/linux/iio/triggered_buffer.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
+#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+
+#include <linux/interrupt.h>
+
+struct iio_dev;
+struct iio_buffer_setup_ops;
+
+int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*pollfunc_bh)(int irq, void *p),
+ irqreturn_t (*pollfunc_th)(int irq, void *p),
+ const struct iio_buffer_setup_ops *setup_ops);
+void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
new file mode 100644
index 00000000000000..4a2af8adf87461
--- /dev/null
+++ b/include/linux/iio/types.h
@@ -0,0 +1,94 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_TYPES_H_
+#define _IIO_TYPES_H_
+
+enum iio_chan_type {
+ IIO_VOLTAGE,
+ IIO_CURRENT,
+ IIO_POWER,
+ IIO_ACCEL,
+ IIO_ANGL_VEL,
+ IIO_MAGN,
+ IIO_LIGHT,
+ IIO_INTENSITY,
+ IIO_PROXIMITY,
+ IIO_TEMP,
+ IIO_INCLI,
+ IIO_ROT,
+ IIO_ANGL,
+ IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
+ IIO_ALTVOLTAGE,
+ IIO_CCT,
+ IIO_PRESSURE,
+ IIO_HUMIDITYRELATIVE,
+};
+
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_AND_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+ IIO_MOD_ROOT_SUM_SQUARED_X_Y,
+ IIO_MOD_SUM_SQUARED_X_Y_Z,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_RED,
+ IIO_MOD_LIGHT_GREEN,
+ IIO_MOD_LIGHT_BLUE,
+ IIO_MOD_QUATERNION,
+ IIO_MOD_TEMP_AMBIENT,
+ IIO_MOD_TEMP_OBJECT,
+ IIO_MOD_NORTH_MAGN,
+ IIO_MOD_NORTH_TRUE,
+ IIO_MOD_NORTH_MAGN_TILT_COMP,
+ IIO_MOD_NORTH_TRUE_TILT_COMP
+};
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
+
+enum iio_event_info {
+ IIO_EV_INFO_ENABLE,
+ IIO_EV_INFO_VALUE,
+ IIO_EV_INFO_HYSTERESIS,
+ IIO_EV_INFO_PERIOD,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
+#define IIO_VAL_INT 1
+#define IIO_VAL_INT_PLUS_MICRO 2
+#define IIO_VAL_INT_PLUS_NANO 3
+#define IIO_VAL_INT_PLUS_MICRO_DB 4
+#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_FRACTIONAL 10
+#define IIO_VAL_FRACTIONAL_LOG2 11
+
+#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/input/akm8975.h b/include/linux/input/akm8975.h
new file mode 100644
index 00000000000000..e709983f103431
--- /dev/null
+++ b/include/linux/input/akm8975.h
@@ -0,0 +1,60 @@
+/*
+ * Partly modified 2012 Sony Mobile Communications AB.
+ */
+/*
+ * Definitions for akm8975 compass chip.
+ */
+#ifndef AKM8975_H
+#define AKM8975_H
+#define AKM8975_I2C_NAME "akm8975"
+/* \name AK8975 operation mode
+ \anchor AK8975_Mode
+ Defines an operation mode of the AK8975.
+*/
+#define AK8975_MODE_SNG_MEASURE 0x01
+#define AK8975_MODE_SELF_TEST 0x08
+#define AK8975_MODE_FUSE_ACCESS 0x0F
+#define AK8975_MODE_POWERDOWN 0x00
+#define SENSOR_DATA_SIZE 8 /* Rx buffer size */
+#define RWBUF_SIZE 16 /* Read/Write buffer size.*/
+#define FUSEROM_SIZE 3 /* Read only fuse ROM area size. */
+/* \name AK8975 register address
+ \anchor AK8975_REG
+ Defines a register address of the AK8975.
+*/
+#define AK8975_REG_WIA 0x00
+#define AK8975_REG_INFO 0x01
+#define AK8975_REG_ST1 0x02
+#define AK8975_REG_HXL 0x03
+#define AK8975_REG_HXH 0x04
+#define AK8975_REG_HYL 0x05
+#define AK8975_REG_HYH 0x06
+#define AK8975_REG_HZL 0x07
+#define AK8975_REG_HZH 0x08
+#define AK8975_REG_ST2 0x09
+#define AK8975_REG_CNTL 0x0A
+#define AK8975_REG_RSV 0x0B
+#define AK8975_REG_ASTC 0x0C
+#define AK8975_REG_TS1 0x0D
+#define AK8975_REG_TS2 0x0E
+#define AK8975_REG_I2CDIS 0x0F
+#define AK8975_DEVICE_ID 0x48
+#define AK8975_ST1_NORMAL 0x00
+#define AK8975_ST1_DATA_READY 0x01
+#define UCHAR_MIN 0
+#define UCHAR_MAX 255
+/* \name AK8975 fuse-rom address
+ \anchor AK8975_FUSE
+ Defines a read-only address of the fuse ROM of the AK8975.
+*/
+#define AK8975_FUSE_ASAX 0x10
+#define AK8975_FUSE_ASAY 0x11
+#define AK8975_FUSE_ASAZ 0x12
+
+struct akm8975_platform_data {
+ int (*setup)(void);
+ void (*shutdown)(void);
+ void (*hw_config)(int enable);
+ void (*power_mode)(int enable);
+};
+#endif
diff --git a/include/linux/input/mpu6050.h b/include/linux/input/mpu6050.h
new file mode 100644
index 00000000000000..09e57036fb9f0d
--- /dev/null
+++ b/include/linux/input/mpu6050.h
@@ -0,0 +1,66 @@
+/*
+ * INVENSENSE MPU6050 driver
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Kishore Kadiyala <kishore.kadiyala@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_MPU6050_H
+#define _LINUX_MPU6050_H
+
+#define MPU6050_NAME "mpu6050"
+
+/* Sensor features enable flags */
+#define MPU6050_PASS_THROUGH_EN (1 << 0)
+
+/* Accelerometer operational modes */
+#define MPU605_MODE_FF 0 /*Free Fall mode*/
+#define MPU605_MODE_MD 1 /*Motion Detection mode*/
+#define MPU605_MODE_ZD 2 /*Zero Motion Detection mode*/
+
+/* Accelerometer full scale range */
+#define MPU6050_RANGE_2G 0
+#define MPU6050_RANGE_4G 1
+#define MPU6050_RANGE_8G 2
+#define MPU6050_RANGE_16G 3
+
+/* Gyroscope full scale range */
+#define MPU6050_GYRO_FSR_250 0
+#define MPU6050_GYRO_FSR_500 1
+#define MPU6050_GYRO_FSR_1000 2
+#define MPU6050_GYRO_FSR_2000 3
+
+/**
+ * struct mpu6050_gyro_platform_data - MPU6050 Platform data
+ * @aux_i2c_supply: Auxiliary i2c bus voltage supply level
+ * @sample_rate_div: Samplerate of MPU6050
+ * @config: fsync and DLPF config for accel
+ * @fifo_mode: FIFO Mode enable/disable
+ * @flags: sensor feature flags
+ * @mpu6050_accel: mpu6050 accel platform data
+ * @mpu6050_gyro: mpu6050 gyro platform data
+ */
+
+struct mpu6050_platform_data {
+ uint8_t fifo_mode;
+ uint8_t flags;
+ int x_axis;
+ int y_axis;
+ int z_axis;
+ uint8_t accel_fsr;
+ uint8_t gyro_fsr;
+};
+
+#endif
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 00000000000000..440f4b3b0a4ad8
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,376 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+struct ion_handle;
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+#ifdef __KERNEL__
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* __KERNEL__ */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ struct ion_handle *handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ struct ion_handle *handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _LINUX_ION_H */
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 00000000000000..856a5850217b24
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ * Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION 1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed. A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+ /* should be KEYCHORD_VERSION */
+ __u16 version;
+ /*
+ * client specified ID, returned from read()
+ * when this keychord is pressed.
+ */
+ __u16 id;
+
+ /* number of keycodes in this keychord */
+ __u16 count;
+
+ /* variable length array of keycodes */
+ __u16 keycodes[];
+};
+
+#endif /* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 00000000000000..a2ac49e5b684c7
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+ int (*reset_fn)(void);
+ int *keys_up;
+ int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 00000000000000..594bc591f256e4
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+ struct device *dev;
+
+ struct regmap *regmap;
+
+ int gpio_hwen;
+ int irq;
+
+ unsigned have_als:1;
+ unsigned have_backlights:1;
+ unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+ struct lm3533 *lm3533;
+ struct device *dev;
+ int id;
+};
+
+struct lm3533_als_platform_data {
+ unsigned pwm_mode:1; /* PWM input mode (default analog) */
+ u8 r_select; /* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+ char *name;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 default_brightness; /* 0 - 255 */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+ char *name;
+ const char *default_trigger;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+ LM3533_BOOST_FREQ_500KHZ,
+ LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+ LM3533_BOOST_OVP_16V,
+ LM3533_BOOST_OVP_24V,
+ LM3533_BOOST_OVP_32V,
+ LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+ int gpio_hwen;
+
+ enum lm3533_boost_ovp boost_ovp;
+ enum lm3533_boost_freq boost_freq;
+
+ struct lm3533_als_platform_data *als;
+
+ struct lm3533_bl_platform_data *backlights;
+ int num_backlights;
+
+ struct lm3533_led_platform_data *leds;
+ int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+ u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 00000000000000..ca60fbdec2f3b0
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID XT_OWNER_UID
+#define XT_QTAGUID_GID XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 00000000000000..eadc6903314e77
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 1 << 0,
+ XT_QUOTA_GROW = 1 << 1,
+ XT_QUOTA_PACKET = 1 << 2,
+ XT_QUOTA_NO_CHANGE = 1 << 3,
+ XT_QUOTA_MASK = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+ char name[15];
+ u_int8_t flags;
+
+ /* Comparison-invariant */
+ aligned_u64 quota;
+
+ /* Used internally by the kernel */
+ struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/persistent_ram.h b/include/linux/persistent_ram.h
new file mode 100644
index 00000000000000..f41e2086c645a3
--- /dev/null
+++ b/include/linux/persistent_ram.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_PERSISTENT_RAM_H__
+#define __LINUX_PERSISTENT_RAM_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct persistent_ram_buffer;
+
+struct persistent_ram_descriptor {
+ const char *name;
+ phys_addr_t size;
+};
+
+struct persistent_ram {
+ phys_addr_t start;
+ phys_addr_t size;
+
+ int num_descs;
+ struct persistent_ram_descriptor *descs;
+
+ struct list_head node;
+};
+
+struct persistent_ram_zone {
+ struct list_head node;
+ void *vaddr;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
+
+ /* ECC correction */
+ bool ecc;
+ char *par_buffer;
+ char *par_header;
+ struct rs_control *rs_decoder;
+ int corrected_bytes;
+ int bad_blocks;
+ int ecc_block_size;
+ int ecc_size;
+ int ecc_symsize;
+ int ecc_poly;
+
+ char *old_log;
+ size_t old_log_size;
+ size_t old_log_footer_size;
+ bool early;
+};
+
+int persistent_ram_early_init(struct persistent_ram *ram);
+
+struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
+ bool ecc);
+
+int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
+ unsigned int count);
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
+void *persistent_ram_old(struct persistent_ram_zone *prz);
+void persistent_ram_free_old(struct persistent_ram_zone *prz);
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len);
+
+#endif
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
new file mode 100644
index 00000000000000..bd712bd4b94ec3
--- /dev/null
+++ b/include/linux/platform_data/ad5449.h
@@ -0,0 +1,40 @@
+/*
+ * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
+ * Converter driver.
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
+#define __LINUX_PLATFORM_DATA_AD5449_H__
+
+/**
+ * enum ad5449_sdo_mode - AD5449 SDO pin configuration
+ * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
+ * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
+ * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
+ * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
+ * read back from the device.
+ */
+enum ad5449_sdo_mode {
+ AD5449_SDO_DRIVE_FULL = 0x0,
+ AD5449_SDO_DRIVE_WEAK = 0x1,
+ AD5449_SDO_OPEN_DRAIN = 0x2,
+ AD5449_SDO_DISABLED = 0x3,
+};
+
+/**
+ * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
+ * @sdo_mode: SDO pin mode
+ * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
+ * outputs to midscale (true) or to zero scale(false).
+ */
+struct ad5449_platform_data {
+ enum ad5449_sdo_mode sdo_mode;
+ bool hardware_clear_to_midscale;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
new file mode 100644
index 00000000000000..a5a1cb751874d1
--- /dev/null
+++ b/include/linux/platform_data/ad5755.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
+#define __LINUX_PLATFORM_DATA_AD5755_H__
+
+enum ad5755_mode {
+ AD5755_MODE_VOLTAGE_0V_5V = 0,
+ AD5755_MODE_VOLTAGE_0V_10V = 1,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
+ AD5755_MODE_CURRENT_4mA_20mA = 4,
+ AD5755_MODE_CURRENT_0mA_20mA = 5,
+ AD5755_MODE_CURRENT_0mA_24mA = 6,
+};
+
+enum ad5755_dc_dc_phase {
+ AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
+ AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
+ AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
+ AD5755_DC_DC_PHASE_90_DEGREE = 3,
+};
+
+enum ad5755_dc_dc_freq {
+ AD5755_DC_DC_FREQ_250kHZ = 0,
+ AD5755_DC_DC_FREQ_410kHZ = 1,
+ AD5755_DC_DC_FREQ_650kHZ = 2,
+};
+
+enum ad5755_dc_dc_maxv {
+ AD5755_DC_DC_MAXV_23V = 0,
+ AD5755_DC_DC_MAXV_24V5 = 1,
+ AD5755_DC_DC_MAXV_27V = 2,
+ AD5755_DC_DC_MAXV_29V5 = 3,
+};
+
+enum ad5755_slew_rate {
+ AD5755_SLEW_RATE_64k = 0,
+ AD5755_SLEW_RATE_32k = 1,
+ AD5755_SLEW_RATE_16k = 2,
+ AD5755_SLEW_RATE_8k = 3,
+ AD5755_SLEW_RATE_4k = 4,
+ AD5755_SLEW_RATE_2k = 5,
+ AD5755_SLEW_RATE_1k = 6,
+ AD5755_SLEW_RATE_500 = 7,
+ AD5755_SLEW_RATE_250 = 8,
+ AD5755_SLEW_RATE_125 = 9,
+ AD5755_SLEW_RATE_64 = 10,
+ AD5755_SLEW_RATE_32 = 11,
+ AD5755_SLEW_RATE_16 = 12,
+ AD5755_SLEW_RATE_8 = 13,
+ AD5755_SLEW_RATE_4 = 14,
+ AD5755_SLEW_RATE_0_5 = 15,
+};
+
+enum ad5755_slew_step_size {
+ AD5755_SLEW_STEP_SIZE_1 = 0,
+ AD5755_SLEW_STEP_SIZE_2 = 1,
+ AD5755_SLEW_STEP_SIZE_4 = 2,
+ AD5755_SLEW_STEP_SIZE_8 = 3,
+ AD5755_SLEW_STEP_SIZE_16 = 4,
+ AD5755_SLEW_STEP_SIZE_32 = 5,
+ AD5755_SLEW_STEP_SIZE_64 = 6,
+ AD5755_SLEW_STEP_SIZE_128 = 7,
+ AD5755_SLEW_STEP_SIZE_256 = 8,
+};
+
+/**
+ * struct ad5755_platform_data - AD5755 DAC driver platform data
+ * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
+ * compensation register is used.
+ * @dc_dc_phase: DC-DC converter phase.
+ * @dc_dc_freq: DC-DC converter frequency.
+ * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
+ * @dac.mode: The mode to be used for the DAC output.
+ * @dac.ext_current_sense_resistor: Whether an external current sense resistor
+ * is used.
+ * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
+ * @dac.slew.enable: Whether to enable digital slew.
+ * @dac.slew.rate: Slew rate of the digital slew.
+ * @dac.slew.step_size: Slew step size of the digital slew.
+ **/
+struct ad5755_platform_data {
+ bool ext_dc_dc_compenstation_resistor;
+ enum ad5755_dc_dc_phase dc_dc_phase;
+ enum ad5755_dc_dc_freq dc_dc_freq;
+ enum ad5755_dc_dc_maxv dc_dc_maxv;
+
+ struct {
+ enum ad5755_mode mode;
+ bool ext_current_sense_resistor;
+ bool enable_voltage_overrange;
+ struct {
+ bool enable;
+ enum ad5755_slew_rate rate;
+ enum ad5755_slew_step_size step_size;
+ } slew;
+ } dac[4];
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h
new file mode 100644
index 00000000000000..eabfdcb26992fb
--- /dev/null
+++ b/include/linux/platform_data/ad7266.h
@@ -0,0 +1,54 @@
+/*
+ * AD7266/65 SPI ADC driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_ADC_AD7266_H__
+#define __IIO_ADC_AD7266_H__
+
+/**
+ * enum ad7266_range - AD7266 reference voltage range
+ * @AD7266_RANGE_VREF: Device is configured for input range 0V - VREF
+ * (RANGE pin set to low)
+ * @AD7266_RANGE_2VREF: Device is configured for input range 0V - 2VREF
+ * (RANGE pin set to high)
+ */
+enum ad7266_range {
+ AD7266_RANGE_VREF,
+ AD7266_RANGE_2VREF,
+};
+
+/**
+ * enum ad7266_mode - AD7266 sample mode
+ * @AD7266_MODE_DIFF: Device is configured for full differential mode
+ * (SGL/DIFF pin set to low, AD0 pin set to low)
+ * @AD7266_MODE_PSEUDO_DIFF: Device is configured for pseudo differential mode
+ * (SGL/DIFF pin set to low, AD0 pin set to high)
+ * @AD7266_MODE_SINGLE_ENDED: Device is configured for single-ended mode
+ * (SGL/DIFF pin set to high)
+ */
+enum ad7266_mode {
+ AD7266_MODE_DIFF,
+ AD7266_MODE_PSEUDO_DIFF,
+ AD7266_MODE_SINGLE_ENDED,
+};
+
+/**
+ * struct ad7266_platform_data - Platform data for the AD7266 driver
+ * @range: Reference voltage range the device is configured for
+ * @mode: Sample mode the device is configured for
+ * @fixed_addr: Whether the address pins are hard-wired
+ * @addr_gpios: GPIOs used for controlling the address pins, only used if
+ * fixed_addr is set to false.
+ */
+struct ad7266_platform_data {
+ enum ad7266_range range;
+ enum ad7266_mode mode;
+ bool fixed_addr;
+ unsigned int addr_gpios[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h
new file mode 100644
index 00000000000000..bbd89fa51188e6
--- /dev/null
+++ b/include/linux/platform_data/ad7291.h
@@ -0,0 +1,12 @@
+#ifndef __IIO_AD7291_H__
+#define __IIO_AD7291_H__
+
+/**
+ * struct ad7291_platform_data - AD7291 platform data
+ * @use_external_ref: Whether to use an external or internal reference voltage
+ */
+struct ad7291_platform_data {
+ bool use_external_ref;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
new file mode 100644
index 00000000000000..fbf8adf1363a18
--- /dev/null
+++ b/include/linux/platform_data/ad7298.h
@@ -0,0 +1,20 @@
+/*
+ * AD7298 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
+#define __LINUX_PLATFORM_DATA_AD7298_H__
+
+/**
+ * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
+ * @ext_ref: Whether to use an external reference voltage.
+ **/
+struct ad7298_platform_data {
+ bool ext_ref;
+};
+
+#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
new file mode 100644
index 00000000000000..de6a7a6b4bbf89
--- /dev/null
+++ b/include/linux/platform_data/ad7303.h
@@ -0,0 +1,21 @@
+/*
+ * Analog Devices AD7303 DAC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_ADC_AD7303_H__
+#define __IIO_ADC_AD7303_H__
+
+/**
+ * struct ad7303_platform_data - AD7303 platform data
+ * @use_external_ref: If set to true use an external voltage reference connected
+ * to the REF pin, otherwise use the internal reference derived from Vdd.
+ */
+struct ad7303_platform_data {
+ bool use_external_ref;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7791.h b/include/linux/platform_data/ad7791.h
new file mode 100644
index 00000000000000..f9e4db1b82aec7
--- /dev/null
+++ b/include/linux/platform_data/ad7791.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_PLATFORM_DATA_AD7791__
+#define __LINUX_PLATFORM_DATA_AD7791__
+
+/**
+ * struct ad7791_platform_data - AD7791 device platform data
+ * @buffered: If set to true configure the device for buffered input mode.
+ * @burnout_current: If set to true the 100mA burnout current is enabled.
+ * @unipolar: If set to true sample in unipolar mode, if set to false sample in
+ * bipolar mode.
+ */
+struct ad7791_platform_data {
+ bool buffered;
+ bool burnout_current;
+ bool unipolar;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h
new file mode 100644
index 00000000000000..7ea6751aae6de4
--- /dev/null
+++ b/include/linux/platform_data/ad7793.h
@@ -0,0 +1,112 @@
+/*
+ * AD7792/AD7793 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD7793_H__
+#define __LINUX_PLATFORM_DATA_AD7793_H__
+
+/**
+ * enum ad7793_clock_source - AD7793 clock source selection
+ * @AD7793_CLK_SRC_INT: Internal 64 kHz clock, not available at the CLK pin.
+ * @AD7793_CLK_SRC_INT_CO: Internal 64 kHz clock, available at the CLK pin.
+ * @AD7793_CLK_SRC_EXT: Use external clock.
+ * @AD7793_CLK_SRC_EXT_DIV2: Use external clock divided by 2.
+ */
+enum ad7793_clock_source {
+ AD7793_CLK_SRC_INT,
+ AD7793_CLK_SRC_INT_CO,
+ AD7793_CLK_SRC_EXT,
+ AD7793_CLK_SRC_EXT_DIV2,
+};
+
+/**
+ * enum ad7793_bias_voltage - AD7793 bias voltage selection
+ * @AD7793_BIAS_VOLTAGE_DISABLED: Bias voltage generator disabled
+ * @AD7793_BIAS_VOLTAGE_AIN1: Bias voltage connected to AIN1(-).
+ * @AD7793_BIAS_VOLTAGE_AIN2: Bias voltage connected to AIN2(-).
+ * @AD7793_BIAS_VOLTAGE_AIN3: Bias voltage connected to AIN3(-).
+ * Only valid for AD7795/AD7796.
+ */
+enum ad7793_bias_voltage {
+ AD7793_BIAS_VOLTAGE_DISABLED,
+ AD7793_BIAS_VOLTAGE_AIN1,
+ AD7793_BIAS_VOLTAGE_AIN2,
+ AD7793_BIAS_VOLTAGE_AIN3,
+};
+
+/**
+ * enum ad7793_refsel - AD7793 reference voltage selection
+ * @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+)
+ * and REFIN1(-).
+ * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and
+ * and REFIN1(-). Only valid for AD7795/AD7796.
+ * @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference.
+ */
+enum ad7793_refsel {
+ AD7793_REFSEL_REFIN1 = 0,
+ AD7793_REFSEL_REFIN2 = 1,
+ AD7793_REFSEL_INTERNAL = 2,
+};
+
+/**
+ * enum ad7793_current_source_direction - AD7793 excitation current direction
+ * @AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2: Current source IEXC1 connected to pin
+ * IOUT1, current source IEXC2 connected to pin IOUT2.
+ * @AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1: Current source IEXC2 connected to pin
+ * IOUT1, current source IEXC1 connected to pin IOUT2.
+ * @AD7793_IEXEC1_IEXEC2_IOUT1: Both current sources connected to pin IOUT1.
+ * Only valid when the current sources are set to 10 uA or 210 uA.
+ * @AD7793_IEXEC1_IEXEC2_IOUT2: Both current sources connected to Pin IOUT2.
+ * Only valid when the current ources are set to 10 uA or 210 uA.
+ */
+enum ad7793_current_source_direction {
+ AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2 = 0,
+ AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1 = 1,
+ AD7793_IEXEC1_IEXEC2_IOUT1 = 2,
+ AD7793_IEXEC1_IEXEC2_IOUT2 = 3,
+};
+
+/**
+ * enum ad7793_excitation_current - AD7793 excitation current selection
+ * @AD7793_IX_DISABLED: Excitation current Disabled.
+ * @AD7793_IX_10uA: Enable 10 micro-ampere excitation current.
+ * @AD7793_IX_210uA: Enable 210 micro-ampere excitation current.
+ * @AD7793_IX_1mA: Enable 1 milli-Ampere excitation current.
+ */
+enum ad7793_excitation_current {
+ AD7793_IX_DISABLED = 0,
+ AD7793_IX_10uA = 1,
+ AD7793_IX_210uA = 2,
+ AD7793_IX_1mA = 3,
+};
+
+/**
+ * struct ad7793_platform_data - AD7793 platform data
+ * @clock_src: Clock source selection
+ * @burnout_current: If set to true the 100nA burnout current is enabled.
+ * @boost_enable: Enable boost for the bias voltage generator.
+ * @buffered: If set to true configure the device for buffered input mode.
+ * @unipolar: If set to true sample in unipolar mode, if set to false sample in
+ * bipolar mode.
+ * @refsel: Reference voltage selection
+ * @bias_voltage: Bias voltage selection
+ * @exitation_current: Excitation current selection
+ * @current_source_direction: Excitation current direction selection
+ */
+struct ad7793_platform_data {
+ enum ad7793_clock_source clock_src;
+ bool burnout_current;
+ bool boost_enable;
+ bool buffered;
+ bool unipolar;
+
+ enum ad7793_refsel refsel;
+ enum ad7793_bias_voltage bias_voltage;
+ enum ad7793_excitation_current exitation_current;
+ enum ad7793_current_source_direction current_source_direction;
+};
+
+#endif /* IIO_ADC_AD7793_H_ */
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
new file mode 100644
index 00000000000000..1e06eac3174dd6
--- /dev/null
+++ b/include/linux/platform_data/ad7887.h
@@ -0,0 +1,26 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_ADC_AD7887_H_
+#define IIO_ADC_AD7887_H_
+
+/**
+ * struct ad7887_platform_data - AD7887 ADC driver platform data
+ * @en_dual: Whether to use dual channel mode. If set to true AIN1 becomes the
+ * second input channel, and Vref is internally connected to Vdd. If set to
+ * false the device is used in single channel mode and AIN1/Vref is used as
+ * VREF input.
+ * @use_onchip_ref: Whether to use the onchip reference. If set to true the
+ * internal 2.5V reference is used. If set to false a external reference is
+ * used.
+ */
+struct ad7887_platform_data {
+ bool en_dual;
+ bool use_onchip_ref;
+};
+
+#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h
new file mode 100644
index 00000000000000..a81766cae230eb
--- /dev/null
+++ b/include/linux/platform_data/adau17x1.h
@@ -0,0 +1,109 @@
+/*
+ * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs
+ *
+ * Copyright 2011-2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__
+#define __LINUX_PLATFORM_DATA_ADAU17X1_H__
+
+/**
+ * enum adau17x1_micbias_voltage - Microphone bias voltage
+ * @ADAU17X1_MICBIAS_0_90_AVDD: 0.9 * AVDD
+ * @ADAU17X1_MICBIAS_0_65_AVDD: 0.65 * AVDD
+ */
+enum adau17x1_micbias_voltage {
+ ADAU17X1_MICBIAS_0_90_AVDD = 0,
+ ADAU17X1_MICBIAS_0_65_AVDD = 1,
+};
+
+/**
+ * enum adau1761_digmic_jackdet_pin_mode - Configuration of the JACKDET/MICIN pin
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: Disable the pin
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: Configure the pin for usage as
+ * digital microphone input.
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: Configure the pin for jack
+ * insertion detection.
+ */
+enum adau1761_digmic_jackdet_pin_mode {
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE,
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC,
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT,
+};
+
+/**
+ * adau1761_jackdetect_debounce_time - Jack insertion detection debounce time
+ * @ADAU1761_JACKDETECT_DEBOUNCE_5MS: 5 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_10MS: 10 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_20MS: 20 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_40MS: 40 milliseconds
+ */
+enum adau1761_jackdetect_debounce_time {
+ ADAU1761_JACKDETECT_DEBOUNCE_5MS = 0,
+ ADAU1761_JACKDETECT_DEBOUNCE_10MS = 1,
+ ADAU1761_JACKDETECT_DEBOUNCE_20MS = 2,
+ ADAU1761_JACKDETECT_DEBOUNCE_40MS = 3,
+};
+
+/**
+ * enum adau1761_output_mode - Output mode configuration
+ * @ADAU1761_OUTPUT_MODE_HEADPHONE: Headphone output
+ * @ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: Capless headphone output
+ * @ADAU1761_OUTPUT_MODE_LINE: Line output
+ */
+enum adau1761_output_mode {
+ ADAU1761_OUTPUT_MODE_HEADPHONE,
+ ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS,
+ ADAU1761_OUTPUT_MODE_LINE,
+};
+
+/**
+ * struct adau1761_platform_data - ADAU1761 Codec driver platform data
+ * @input_differential: If true the input pins will be configured in
+ * differential mode.
+ * @lineout_mode: Output mode for the LOUT/ROUT pins
+ * @headphone_mode: Output mode for the LHP/RHP pins
+ * @digmic_jackdetect_pin_mode: JACKDET/MICIN pin configuration
+ * @jackdetect_debounce_time: Jack insertion detection debounce time.
+ * Note: This value will only be used, if the JACKDET/MICIN pin is configured
+ * for jack insertion detection.
+ * @jackdetect_active_low: If true the jack insertion detection is active low.
+ * Othwise it will be active high.
+ * @micbias_voltage: Microphone voltage bias
+ */
+struct adau1761_platform_data {
+ bool input_differential;
+ enum adau1761_output_mode lineout_mode;
+ enum adau1761_output_mode headphone_mode;
+
+ enum adau1761_digmic_jackdet_pin_mode digmic_jackdetect_pin_mode;
+
+ enum adau1761_jackdetect_debounce_time jackdetect_debounce_time;
+ bool jackdetect_active_low;
+
+ enum adau17x1_micbias_voltage micbias_voltage;
+};
+
+/**
+ * struct adau1781_platform_data - ADAU1781 Codec driver platform data
+ * @left_input_differential: If true configure the left input as
+ * differential input.
+ * @right_input_differential: If true configure the right input as differntial
+ * input.
+ * @use_dmic: If true configure the MIC pins as digital microphone pins instead
+ * of analog microphone pins.
+ * @micbias_voltage: Microphone voltage bias
+ */
+struct adau1781_platform_data {
+ bool left_input_differential;
+ bool right_input_differential;
+
+ bool use_dmic;
+
+ enum adau17x1_micbias_voltage micbias_voltage;
+};
+
+#endif
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
new file mode 100644
index 00000000000000..0a811e144b365d
--- /dev/null
+++ b/include/linux/platform_data/adau1977.h
@@ -0,0 +1,46 @@
+/*
+ * ADAU1977/ADAU1978/ADAU1979 driver
+ *
+ * Copyright 2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
+#define __LINUX_PLATFORM_DATA_ADAU1977_H__
+
+/**
+ * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
+ * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
+ * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
+ * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
+ * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
+ * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
+ * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
+ * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
+ * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
+ * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
+ */
+enum adau1977_micbias {
+ ADAU1977_MICBIAS_5V0 = 0x0,
+ ADAU1977_MICBIAS_5V5 = 0x1,
+ ADAU1977_MICBIAS_6V0 = 0x2,
+ ADAU1977_MICBIAS_6V5 = 0x3,
+ ADAU1977_MICBIAS_7V0 = 0x4,
+ ADAU1977_MICBIAS_7V5 = 0x5,
+ ADAU1977_MICBIAS_8V0 = 0x6,
+ ADAU1977_MICBIAS_8V5 = 0x7,
+ ADAU1977_MICBIAS_9V0 = 0x8,
+};
+
+/**
+ * struct adau1977_platform_data - Platform configuration data for the ADAU1977
+ * @micbias: Specifies the voltage for the MICBIAS pin
+ */
+struct adau1977_platform_data {
+ enum adau1977_micbias micbias;
+ int reset_gpio;
+};
+
+#endif
diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h
new file mode 100644
index 00000000000000..3245f45f9d7733
--- /dev/null
+++ b/include/linux/platform_data/ads7828.h
@@ -0,0 +1,29 @@
+/*
+ * TI ADS7828 A/D Converter platform data definition
+ *
+ * Copyright (c) 2012 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * For further information, see the Documentation/hwmon/ads7828 file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PDATA_ADS7828_H
+#define _PDATA_ADS7828_H
+
+/**
+ * struct ads7828_platform_data - optional ADS7828 connectivity info
+ * @diff_input: Differential input mode.
+ * @ext_vref: Use an external voltage reference.
+ * @vref_mv: Voltage reference value, if external.
+ */
+struct ads7828_platform_data {
+ bool diff_input;
+ bool ext_vref;
+ unsigned int vref_mv;
+};
+
+#endif /* _PDATA_ADS7828_H */
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
new file mode 100644
index 00000000000000..ad3aa7b95f35f6
--- /dev/null
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -0,0 +1,31 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __INV_MPU6050_PLATFORM_H_
+#define __INV_MPU6050_PLATFORM_H_
+
+/**
+ * struct inv_mpu6050_platform_data - Platform data for the mpu driver
+ * @orientation: Orientation matrix of the chip
+ *
+ * Contains platform specific information on how to configure the MPU6050 to
+ * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct inv_mpu6050_platform_data {
+ __s8 orientation[9];
+};
+
+#endif
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
new file mode 100644
index 00000000000000..c90d7a09dda7bf
--- /dev/null
+++ b/include/linux/platform_data/tsl2563.h
@@ -0,0 +1,8 @@
+#ifndef __LINUX_TSL2563_H
+#define __LINUX_TSL2563_H
+
+struct tsl2563_platform_data {
+ int cover_comp_gain;
+};
+
+#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/sw_sync.h b/include/linux/sw_sync.h
new file mode 100644
index 00000000000000..bd6f2089e77f3c
--- /dev/null
+++ b/include/linux/sw_sync.h
@@ -0,0 +1,58 @@
+/*
+ * include/linux/sw_sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SW_SYNC_H
+#define _LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#include <linux/sync.h>
+
+struct sw_sync_timeline {
+ struct sync_timeline obj;
+
+ u32 value;
+};
+
+struct sw_sync_pt {
+ struct sync_pt pt;
+
+ u32 value;
+};
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+
+#endif /* __KERNEL __ */
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+
+#endif /* _LINUX_SW_SYNC_H */
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644
index 00000000000000..3e4c748e343a24
--- /dev/null
+++ b/include/linux/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h
new file mode 100644
index 00000000000000..5539cc5207796f
--- /dev/null
+++ b/include/linux/synaptics_i2c_rmi.h
@@ -0,0 +1,55 @@
+/*
+ * include/linux/synaptics_i2c_rmi.h - platform data structure for f75375s sensor
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNAPTICS_I2C_RMI_H
+#define _LINUX_SYNAPTICS_I2C_RMI_H
+
+#define SYNAPTICS_I2C_RMI_NAME "synaptics-rmi-ts"
+
+enum {
+ SYNAPTICS_FLIP_X = 1UL << 0,
+ SYNAPTICS_FLIP_Y = 1UL << 1,
+ SYNAPTICS_SWAP_XY = 1UL << 2,
+ SYNAPTICS_SNAP_TO_INACTIVE_EDGE = 1UL << 3,
+};
+
+struct synaptics_i2c_rmi_platform_data {
+ uint32_t version; /* Use this entry for panels with */
+ /* (major << 8 | minor) version or above. */
+ /* If non-zero another array entry follows */
+ int (*power)(int on); /* Only valid in first array entry */
+ uint32_t flags;
+ unsigned long irqflags;
+ uint32_t inactive_left; /* 0x10000 = screen width */
+ uint32_t inactive_right; /* 0x10000 = screen width */
+ uint32_t inactive_top; /* 0x10000 = screen height */
+ uint32_t inactive_bottom; /* 0x10000 = screen height */
+ uint32_t snap_left_on; /* 0x10000 = screen width */
+ uint32_t snap_left_off; /* 0x10000 = screen width */
+ uint32_t snap_right_on; /* 0x10000 = screen width */
+ uint32_t snap_right_off; /* 0x10000 = screen width */
+ uint32_t snap_top_on; /* 0x10000 = screen height */
+ uint32_t snap_top_off; /* 0x10000 = screen height */
+ uint32_t snap_bottom_on; /* 0x10000 = screen height */
+ uint32_t snap_bottom_off; /* 0x10000 = screen height */
+ uint32_t fuzz_x; /* 0x10000 = screen width */
+ uint32_t fuzz_y; /* 0x10000 = screen height */
+ int fuzz_p;
+ int fuzz_w;
+ int8_t sensitivity_adjust;
+};
+
+#endif /* _LINUX_SYNAPTICS_I2C_RMI_H */
diff --git a/include/linux/sync.h b/include/linux/sync.h
new file mode 100644
index 00000000000000..5f493638148d79
--- /dev/null
+++ b/include/linux/sync.h
@@ -0,0 +1,427 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/types.h>
+#ifdef __KERNEL__
+
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+struct sync_timeline;
+struct sync_pt;
+struct sync_fence;
+
+/**
+ * struct sync_timeline_ops - sync object implementation ops
+ * @driver_name: name of the implentation
+ * @dup: duplicate a sync_pt
+ * @has_signaled: returns:
+ * 1 if pt has signaled
+ * 0 if pt has not signaled
+ * <0 on error
+ * @compare: returns:
+ * 1 if b will signal before a
+ * 0 if a and b will signal at the same time
+ * -1 if a will signabl before b
+ * @free_pt: called before sync_pt is freed
+ * @release_obj: called before sync_timeline is freed
+ * @print_obj: deprecated
+ * @print_pt: deprecated
+ * @fill_driver_data: write implmentation specific driver data to data.
+ * should return an error if there is not enough room
+ * as specified by size. This information is returned
+ * to userspace by SYNC_IOC_FENCE_INFO.
+ * @timeline_value_str: fill str with the value of the sync_timeline's counter
+ * @pt_value_str: fill str with the value of the sync_pt
+ */
+struct sync_timeline_ops {
+ const char *driver_name;
+
+ /* required */
+ struct sync_pt *(*dup)(struct sync_pt *pt);
+
+ /* required */
+ int (*has_signaled)(struct sync_pt *pt);
+
+ /* required */
+ int (*compare)(struct sync_pt *a, struct sync_pt *b);
+
+ /* optional */
+ void (*free_pt)(struct sync_pt *sync_pt);
+
+ /* optional */
+ void (*release_obj)(struct sync_timeline *sync_timeline);
+
+ /* deprecated */
+ void (*print_obj)(struct seq_file *s,
+ struct sync_timeline *sync_timeline);
+
+ /* deprecated */
+ void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
+
+ /* optional */
+ int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+
+ /* optional */
+ void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
+ int size);
+
+ /* optional */
+ void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
+};
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @ops: ops that define the implementaiton of the sync_timeline
+ * @name: name of the sync_timeline. Useful for debugging
+ * @destoryed: set when sync_timeline is destroyed
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head, destroyed, and
+ * sync_pt.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ const struct sync_timeline_ops *ops;
+ char name[32];
+
+ /* protected by child_list_lock */
+ bool destroyed;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+ spinlock_t active_list_lock;
+
+ struct list_head sync_timeline_list;
+};
+
+/**
+ * struct sync_pt - sync point
+ * @parent: sync_timeline to which this sync_pt belongs
+ * @child_list: membership in sync_timeline.child_list_head
+ * @active_list: membership in sync_timeline.active_list_head
+ * @signaled_list: membership in temorary signaled_list on stack
+ * @fence: sync_fence to which the sync_pt belongs
+ * @pt_list: membership in sync_fence.pt_list_head
+ * @status: 1: signaled, 0:active, <0: error
+ * @timestamp: time which sync_pt status transitioned from active to
+ * singaled or error.
+ */
+struct sync_pt {
+ struct sync_timeline *parent;
+ struct list_head child_list;
+
+ struct list_head active_list;
+ struct list_head signaled_list;
+
+ struct sync_fence *fence;
+ struct list_head pt_list;
+
+ /* protected by parent->active_list_lock */
+ int status;
+
+ ktime_t timestamp;
+};
+
+/**
+ * struct sync_fence - sync fence
+ * @file: file representing this fence
+ * @kref: referenace count on fence.
+ * @name: name of sync_fence. Useful for debugging
+ * @pt_list_head: list of sync_pts in ths fence. immutable once fence
+ * is created
+ * @waiter_list_head: list of asynchronous waiters on this fence
+ * @waiter_list_lock: lock protecting @waiter_list_head and @status
+ * @status: 1: signaled, 0:active, <0: error
+ *
+ * @wq: wait queue for fence signaling
+ * @sync_fence_list: membership in global fence list
+ */
+struct sync_fence {
+ struct file *file;
+ struct kref kref;
+ char name[32];
+
+ /* this list is immutable once the fence is created */
+ struct list_head pt_list_head;
+
+ struct list_head waiter_list_head;
+ spinlock_t waiter_list_lock; /* also protects status */
+ int status;
+
+ wait_queue_head_t wq;
+
+ struct list_head sync_fence_list;
+};
+
+struct sync_fence_waiter;
+typedef void (*sync_callback_t)(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
+ * @waiter_list: membership in sync_fence.waiter_list_head
+ * @callback: function pointer to call when fence signals
+ * @callback_data: pointer to pass to @callback
+ */
+struct sync_fence_waiter {
+ struct list_head waiter_list;
+
+ sync_callback_t callback;
+};
+
+static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
+ sync_callback_t callback)
+{
+ waiter->callback = callback;
+}
+
+/*
+ * API for sync_timeline implementers
+ */
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @ops: specifies the implemention ops for the object
+ * @size: size to allocate for this obj
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline which will use the implemetation specified by
+ * @ops. @size bytes will be allocated allowing for implemntation specific
+ * data to be kept after the generic sync_timeline stuct.
+ */
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+ int size, const char *name);
+
+/**
+ * sync_timeline_destory() - destorys a sync object
+ * @obj: sync_timeline to destroy
+ *
+ * A sync implemntation should call this when the @obj is going away
+ * (i.e. module unload.) @obj won't actually be freed until all its childern
+ * sync_pts are freed.
+ */
+void sync_timeline_destroy(struct sync_timeline *obj);
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ *
+ * A sync implemntation should call this any time one of it's sync_pts
+ * has signaled or has an error condition.
+ */
+void sync_timeline_signal(struct sync_timeline *obj);
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: sync_pt's parent sync_timeline
+ * @size: size to allocate for this pt
+ *
+ * Creates a new sync_pt as a chiled of @parent. @size bytes will be
+ * allocated allowing for implemntation specific data to be kept after
+ * the generic sync_timeline struct.
+ */
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
+
+/**
+ * sync_pt_free() - frees a sync pt
+ * @pt: sync_pt to free
+ *
+ * This should only be called on sync_pts which have been created but
+ * not added to a fence.
+ */
+void sync_pt_free(struct sync_pt *pt);
+
+/**
+ * sync_fence_create() - creates a sync fence
+ * @name: name of fence to create
+ * @pt: sync_pt to add to the fence
+ *
+ * Creates a fence containg @pt. Once this is called, the fence takes
+ * ownership of @pt.
+ */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
+
+/*
+ * API for sync_fence consumers
+ */
+
+/**
+ * sync_fence_merge() - merge two fences
+ * @name: name of new fence
+ * @a: fence a
+ * @b: fence b
+ *
+ * Creates a new fence which contains copies of all the sync_pts in both
+ * @a and @b. @a and @b remain valid, independent fences.
+ */
+struct sync_fence *sync_fence_merge(const char *name,
+ struct sync_fence *a, struct sync_fence *b);
+
+/**
+ * sync_fence_fdget() - get a fence from an fd
+ * @fd: fd referencing a fence
+ *
+ * Ensures @fd references a valid fence, increments the refcount of the backing
+ * file, and returns the fence.
+ */
+struct sync_fence *sync_fence_fdget(int fd);
+
+/**
+ * sync_fence_put() - puts a refernnce of a sync fence
+ * @fence: fence to put
+ *
+ * Puts a reference on @fence. If this is the last reference, the fence and
+ * all it's sync_pts will be freed
+ */
+void sync_fence_put(struct sync_fence *fence);
+
+/**
+ * sync_fence_install() - installs a fence into a file descriptor
+ * @fence: fence to instal
+ * @fd: file descriptor in which to install the fence
+ *
+ * Installs @fence into @fd. @fd's should be acquired through get_unused_fd().
+ */
+void sync_fence_install(struct sync_fence *fence, int fd);
+
+/**
+ * sync_fence_wait_async() - registers and async wait on the fence
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * Returns 1 if @fence has already signaled.
+ *
+ * Registers a callback to be called when @fence signals or has an error.
+ * @waiter should be initialized with sync_fence_waiter_init().
+ */
+int sync_fence_wait_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_cancel_async() - cancels an async wait
+ * @fence: fence to wait on
+ * @waiter: waiter callback struck
+ *
+ * returns 0 if waiter was removed from fence's async waiter list.
+ * returns -ENOENT if waiter was not found on fence's async waiter list.
+ *
+ * Cancels a previously registered async wait. Will fail gracefully if
+ * @waiter was never registered or if @fence has already signaled @waiter.
+ */
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter);
+
+/**
+ * sync_fence_wait() - wait on fence
+ * @fence: fence to wait on
+ * @tiemout: timeout in ms
+ *
+ * Wait for @fence to be signaled or have an error. Waits indefinitely
+ * if @timeout < 0
+ */
+int sync_fence_wait(struct sync_fence *fence, long timeout);
+
+#endif /* __KERNEL__ */
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implmenting the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependant data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data reutnred to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/uhid.h b/include/linux/uhid.h
new file mode 100644
index 00000000000000..9c6974f169661d
--- /dev/null
+++ b/include/linux/uhid.h
@@ -0,0 +1,104 @@
+#ifndef __UHID_H_
+#define __UHID_H_
+
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Public header for user-space communication. We try to keep every structure
+ * aligned but to be safe we also use __attribute__((__packed__)). Therefore,
+ * the communication should be ABI compatible even between architectures.
+ */
+
+#include <linux/input.h>
+#include <linux/types.h>
+
+enum uhid_event_type {
+ UHID_CREATE,
+ UHID_DESTROY,
+ UHID_START,
+ UHID_STOP,
+ UHID_OPEN,
+ UHID_CLOSE,
+ UHID_OUTPUT,
+ UHID_OUTPUT_EV,
+ UHID_INPUT,
+ UHID_FEATURE,
+ UHID_FEATURE_ANSWER,
+};
+
+struct uhid_create_req {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+ __u8 __user *rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+#define UHID_DATA_MAX 4096
+
+enum uhid_report_type {
+ UHID_FEATURE_REPORT,
+ UHID_OUTPUT_REPORT,
+ UHID_INPUT_REPORT,
+};
+
+struct uhid_input_req {
+ __u8 data[UHID_DATA_MAX];
+ __u16 size;
+} __attribute__((__packed__));
+
+struct uhid_output_req {
+ __u8 data[UHID_DATA_MAX];
+ __u16 size;
+ __u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_output_ev_req {
+ __u16 type;
+ __u16 code;
+ __s32 value;
+} __attribute__((__packed__));
+
+struct uhid_feature_req {
+ __u32 id;
+ __u8 rnum;
+ __u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_feature_answer_req {
+ __u32 id;
+ __u16 err;
+ __u16 size;
+ __u8 data[UHID_DATA_MAX];
+};
+
+struct uhid_event {
+ __u32 type;
+
+ union {
+ struct uhid_create_req create;
+ struct uhid_input_req input;
+ struct uhid_output_req output;
+ struct uhid_output_ev_req output_ev;
+ struct uhid_feature_req feature;
+ struct uhid_feature_answer_req feature_answer;
+ } u;
+} __attribute__((__packed__));
+
+#endif /* __UHID_H_ */
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644
index 00000000000000..6bd6c4e52d17c6
--- /dev/null
+++ b/include/linux/uid_stat.h
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
new file mode 100644
index 00000000000000..0dc87fe8b4d9b7
--- /dev/null
+++ b/include/linux/usb/chipidea.h
@@ -0,0 +1,50 @@
+/*
+ * Platform data for the chipidea USB dual role controller
+ */
+
+#ifndef __LINUX_USB_CHIPIDEA_H
+#define __LINUX_USB_CHIPIDEA_H
+
+#include <linux/platform_device.h>
+
+enum ci_udc_operating_modes {
+ CI_UDC_DR_HOST = 0,
+ CI_UDC_DR_DEVICE,
+ CI_UDC_DR_OTG,
+ /*
+ * "static OTG": driver modules for host/device
+ * can be changed after linux boot
+ */
+ CI_UDC_DR_DUAL_HOST, // dual_mode booting in host
+ CI_UDC_DR_DUAL_DEVICE,// dual_mode booting in device
+
+ CI_UDC_DR_DISABLE_HOST_WORKAROUND = 0x100, // Will do a reset if 10 "null" transfer are detected
+};
+
+
+struct ci13xxx;
+struct ci13xxx_udc_driver {
+ const char *name;
+ /* offset of the capability registers */
+ uintptr_t capoffset;
+ unsigned power_budget;
+ unsigned long flags;
+#define CI13XXX_REGS_SHARED BIT(0)
+#define CI13XXX_REQUIRE_TRANSCEIVER BIT(1)
+#define CI13XXX_PULLUP_ON_VBUS BIT(2)
+#define CI13XXX_DISABLE_STREAMING BIT(3)
+
+#define CI13XXX_CONTROLLER_RESET_EVENT 0
+#define CI13XXX_CONTROLLER_STOPPED_EVENT 1
+ enum ci_udc_operating_modes operating_mode;
+ u8 workaround_reset_nb;
+ void (*notify_event) (struct ci13xxx *udc, unsigned event);
+ int (*init)(struct platform_device *, int (*ulpi_write)(struct platform_device *pdev, u8 addr, u8 val));
+ void (*exit)(struct platform_device *);
+ void (*set_vbus)(struct platform_device *, int value);
+};
+
+/* Default offset of capability registers */
+#define DEF_CAPOFFSET 0x100
+
+#endif
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 00000000000000..61ebe0aabc5ba4
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER 0
+#define ACCESSORY_STRING_MODEL 1
+#define ACCESSORY_STRING_DESCRIPTION 2
+#define ACCESSORY_STRING_VERSION 3
+#define ACCESSORY_STRING_URI 4
+#define ACCESSORY_STRING_SERIAL 5
+
+/* Control request for retrieving device's protocol version
+ *
+ * requestType: USB_DIR_IN | USB_TYPE_VENDOR
+ * request: ACCESSORY_GET_PROTOCOL
+ * value: 0
+ * index: 0
+ * data version number (16 bits little endian)
+ * 1 for original accessory support
+ * 2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL 51
+
+/* Control request for host to send a string to the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_STRING
+ * value: 0
+ * index: string ID
+ * data zero terminated UTF8 string
+ *
+ * The device can later retrieve these strings via the
+ * ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING 52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_START
+ * value: 0
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_START 53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID_DEVICE
+ * value: Accessory assigned ID for the HID device
+ * index: total length of the HID report descriptor
+ * data none
+ */
+#define ACCESSORY_REGISTER_HID 54
+
+/* Control request for unregistering a HID device.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_REGISTER_HID
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_UNREGISTER_HID 55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_HID_REPORT_DESC
+ * value: Accessory assigned ID for the HID device
+ * index: offset of data in descriptor
+ * (needed when HID descriptor is too big for one packet)
+ * data the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC 56
+
+/* Control request for sending HID events.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SEND_HID_EVENT
+ * value: Accessory assigned ID for the HID device
+ * index: 0
+ * data the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT 57
+
+/* Control request for setting the audio mode.
+ *
+ * requestType: USB_DIR_OUT | USB_TYPE_VENDOR
+ * request: ACCESSORY_SET_AUDIO_MODE
+ * value: 0 - no audio
+ * 1 - device to host, 44100 16-bit stereo PCM
+ * index: 0
+ * data none
+ */
+#define ACCESSORY_SET_AUDIO_MODE 58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE _IO('M', 8)
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 00000000000000..72a432e2fcdd66
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,75 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ uint32_t length;
+ /* container type (2 for data packet) */
+ uint16_t type;
+ /* MTP command code */
+ uint16_t command;
+ /* MTP transaction ID */
+ uint32_t transaction_id;
+};
+
+#endif /* __KERNEL__ */
+
+struct mtp_file_range {
+ /* file descriptor for file to transfer */
+ int fd;
+ /* offset in file for start of transfer */
+ loff_t offset;
+ /* number of bytes to transfer */
+ int64_t length;
+ /* MTP command ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint16_t command;
+ /* MTP transaction ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint32_t transaction_id;
+};
+
+struct mtp_event {
+ /* size of the event */
+ size_t length;
+ /* event data to send */
+ void *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range)
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/v4l2-common.h b/include/linux/v4l2-common.h
new file mode 100644
index 00000000000000..0fa8b64c3cdbd1
--- /dev/null
+++ b/include/linux/v4l2-common.h
@@ -0,0 +1,71 @@
+/*
+ * include/linux/v4l2-common.h
+ *
+ * Common V4L2 and V4L2 subdev definitions.
+ *
+ * Users are advised to #include this file either through videodev2.h
+ * (V4L2) or through v4l2-subdev.h (V4L2 subdev) rather than to refer
+ * to this file directly.
+ *
+ * Copyright (C) 2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __V4L2_COMMON__
+#define __V4L2_COMMON__
+
+/*
+ *
+ * Selection interface definitions
+ *
+ */
+
+/* Current cropping area */
+#define V4L2_SEL_TGT_CROP 0x0000
+/* Default cropping area */
+#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
+/* Cropping bounds */
+#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
+/* Current composing area */
+#define V4L2_SEL_TGT_COMPOSE 0x0100
+/* Default composing area */
+#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101
+/* Composing bounds */
+#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102
+/* Current composing area plus all padding pixels */
+#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
+
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL \
+ V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL \
+ V4L2_SEL_TGT_COMPOSE
+
+/* Selection flags */
+#define V4L2_SEL_FLAG_GE (1 << 0)
+#define V4L2_SEL_FLAG_LE (1 << 1)
+#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+
+#endif /* __V4L2_COMMON__ */
diff --git a/include/linux/v4l2-dv-timings.h b/include/linux/v4l2-dv-timings.h
new file mode 100644
index 00000000000000..6c8f159e416eea
--- /dev/null
+++ b/include/linux/v4l2-dv-timings.h
@@ -0,0 +1,913 @@
+/*
+ * V4L2 DV timings header.
+ *
+ * Copyright (C) 2012 Hans Verkuil <hans.verkuil@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _V4L2_DV_TIMINGS_H
+#define _V4L2_DV_TIMINGS_H
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
+/* Sadly gcc versions older than 4.6 have a bug in how they initialize
+ anonymous unions where they require additional curly brackets.
+ This violates the C1x standard. This workaround adds the curly brackets
+ if needed. */
+#define V4L2_INIT_BT_TIMINGS(_width, args...) \
+ { .bt = { _width , ## args } }
+#else
+#define V4L2_INIT_BT_TIMINGS(_width, args...) \
+ .bt = { _width , ## args }
+#endif
+
+/* CEA-861-E timings (i.e. standard HDTV timings) */
+
+#define V4L2_DV_BT_CEA_640X480P59_94 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 25175000, 16, 96, 48, 10, 2, 33, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+/* Note: these are the nominal timings, for HDMI links this format is typically
+ * double-clocked to meet the minimum pixelclock requirements. */
+#define V4L2_DV_BT_CEA_720X480I59_94 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
+ 13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_720X480P59_94 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
+ 27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+/* Note: these are the nominal timings, for HDMI links this format is typically
+ * double-clocked to meet the minimum pixelclock requirements. */
+#define V4L2_DV_BT_CEA_720X576I50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
+ 13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_720X576P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
+ 27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 59400000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1280X720P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080I50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080I60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HALF_LINE) \
+}
+
+#define V4L2_DV_BT_CEA_1920X1080P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_3840X2160P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_3840X2160P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_3840X2160P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_3840X2160P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_3840X2160P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_4096X2160P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_4096X2160P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_4096X2160P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+#define V4L2_DV_BT_CEA_4096X2160P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, 0) \
+}
+
+#define V4L2_DV_BT_CEA_4096X2160P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+}
+
+
+/* VESA Discrete Monitor Timings as per version 1.0, revision 12 */
+
+#define V4L2_DV_BT_DMT_640X350P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 350, 0, V4L2_DV_HSYNC_POS_POL, \
+ 31500000, 32, 64, 96, 32, 3, 60, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X400P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 400, 0, V4L2_DV_VSYNC_POS_POL, \
+ 31500000, 32, 64, 96, 1, 3, 41, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_720X400P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 400, 0, V4L2_DV_VSYNC_POS_POL, \
+ 35500000, 36, 72, 108, 1, 3, 42, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* VGA resolutions */
+#define V4L2_DV_BT_DMT_640X480P60 V4L2_DV_BT_CEA_640X480P59_94
+
+#define V4L2_DV_BT_DMT_640X480P72 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 31500000, 24, 40, 128, 9, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X480P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 31500000, 16, 64, 120, 1, 3, 16, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_640X480P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
+ 36000000, 56, 56, 80, 1, 3, 25, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* SVGA resolutions */
+#define V4L2_DV_BT_DMT_800X600P56 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 36000000, 24, 72, 128, 1, 2, 22, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 40000000, 40, 128, 88, 1, 4, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P72 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 50000000, 56, 120, 64, 37, 6, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 49500000, 16, 80, 160, 1, 3, 21, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 56250000, 32, 64, 152, 1, 3, 27, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_800X600P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(800, 600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 73250000, 48, 32, 80, 3, 4, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_848X480P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(848, 480, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 33750000, 16, 112, 112, 6, 8, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768I43 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 44900000, 8, 176, 56, 0, 4, 20, 0, 4, 21, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+/* XGA resolutions */
+#define V4L2_DV_BT_DMT_1024X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, 0, \
+ 65000000, 24, 136, 160, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P70 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, 0, \
+ 75000000, 24, 136, 144, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 78750000, 16, 96, 176, 1, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 94500000, 48, 96, 208, 1, 3, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1024X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1024, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 115500000, 48, 32, 80, 3, 4, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* XGA+ resolution */
+#define V4L2_DV_BT_DMT_1152X864P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1152, 864, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 64, 128, 256, 1, 3, 32, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X720P60 V4L2_DV_BT_CEA_1280X720P60
+
+/* WXGA resolutions */
+#define V4L2_DV_BT_DMT_1280X768P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 68250000, 48, 32, 80, 3, 7, 12, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 79500000, 64, 128, 192, 3, 7, 20, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 102250000, 80, 128, 208, 3, 7, 27, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_VSYNC_POS_POL, \
+ 117500000, 80, 136, 216, 3, 7, 31, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 140250000, 48, 32, 80, 3, 7, 35, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_HSYNC_POS_POL, \
+ 71000000, 48, 32, 80, 3, 6, 14, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 83500000, 72, 128, 200, 3, 6, 22, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 106500000, 80, 128, 208, 3, 6, 29, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_VSYNC_POS_POL, \
+ 122500000, 80, 136, 216, 3, 6, 34, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X800P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 800, 0, V4L2_DV_HSYNC_POS_POL, \
+ 146250000, 48, 32, 80, 3, 6, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 96, 112, 312, 1, 3, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 64, 160, 224, 1, 3, 47, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X960P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 960, 0, V4L2_DV_HSYNC_POS_POL, \
+ 175500000, 48, 32, 80, 3, 4, 50, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* SXGA resolutions */
+#define V4L2_DV_BT_DMT_1280X1024P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 48, 112, 248, 1, 3, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 135000000, 16, 144, 248, 1, 3, 38, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 157500000, 64, 160, 224, 1, 3, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1280X1024P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1280, 1024, 0, V4L2_DV_HSYNC_POS_POL, \
+ 187250000, 48, 32, 80, 3, 7, 50, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1360X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1360, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 85500000, 64, 112, 256, 3, 6, 18, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1360X768P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1360, 768, 0, V4L2_DV_HSYNC_POS_POL, \
+ 148250000, 48, 32, 80, 3, 5, 37, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1366X768P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 85500000, 70, 143, 213, 3, 3, 24, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1366X768P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 72000000, 14, 56, 64, 1, 3, 28, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* SXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1400X1050P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 101000000, 48, 32, 80, 3, 4, 23, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 121750000, 88, 144, 232, 3, 4, 32, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 156000000, 104, 144, 248, 3, 4, 42, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 179500000, 104, 152, 256, 3, 4, 48, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1400X1050P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1400, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 208000000, 48, 32, 80, 3, 4, 55, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1440X900P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_HSYNC_POS_POL, \
+ 88750000, 48, 32, 80, 3, 6, 17, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 106500000, 80, 152, 232, 3, 6, 25, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 136750000, 96, 152, 248, 3, 6, 33, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_VSYNC_POS_POL, \
+ 157000000, 104, 152, 256, 3, 6, 39, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1440X900P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1440, 900, 0, V4L2_DV_HSYNC_POS_POL, \
+ 182750000, 48, 32, 80, 3, 6, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1600X900P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 900, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 108000000, 24, 80, 96, 1, 3, 96, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* UXGA resolutions */
+#define V4L2_DV_BT_DMT_1600X1200P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 162000000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P65 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 175500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P70 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 189000000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 202500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 229500000, 64, 192, 304, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1600X1200P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1600, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 268250000, 48, 32, 80, 3, 4, 64, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WSXGA+ resolutions */
+#define V4L2_DV_BT_DMT_1680X1050P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 119000000, 48, 32, 80, 3, 6, 21, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 146250000, 104, 176, 280, 3, 6, 30, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 187000000, 120, 176, 296, 3, 6, 40, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_VSYNC_POS_POL, \
+ 214750000, 128, 176, 304, 3, 6, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1680X1050P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1680, 1050, 0, V4L2_DV_HSYNC_POS_POL, \
+ 245500000, 48, 32, 80, 3, 6, 53, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_VSYNC_POS_POL, \
+ 204750000, 128, 200, 328, 1, 3, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_VSYNC_POS_POL, \
+ 261000000, 96, 216, 352, 1, 3, 69, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1792X1344P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1792, 1344, 0, V4L2_DV_HSYNC_POS_POL, \
+ 333250000, 48, 32, 80, 3, 4, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_VSYNC_POS_POL, \
+ 218250000, 96, 224, 352, 1, 3, 43, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_VSYNC_POS_POL, \
+ 288000000, 128, 224, 352, 1, 3, 104, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1856X1392P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1856, 1392, 0, V4L2_DV_HSYNC_POS_POL, \
+ 356500000, 48, 32, 80, 3, 4, 75, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1080P60 V4L2_DV_BT_CEA_1920X1080P60
+
+/* WUXGA resolutions */
+#define V4L2_DV_BT_DMT_1920X1200P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 154000000, 48, 32, 80, 3, 6, 26, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 193250000, 136, 200, 336, 3, 6, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 245250000, 136, 208, 344, 3, 6, 46, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_VSYNC_POS_POL, \
+ 281250000, 144, 208, 352, 3, 6, 53, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1200P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1200, 0, V4L2_DV_HSYNC_POS_POL, \
+ 317000000, 48, 32, 80, 3, 6, 62, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_VSYNC_POS_POL, \
+ 234000000, 128, 208, 344, 1, 3, 56, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_VSYNC_POS_POL, \
+ 297000000, 144, 224, 352, 1, 3, 56, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_1920X1440P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1440, 0, V4L2_DV_HSYNC_POS_POL, \
+ 380500000, 48, 32, 80, 3, 4, 78, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_2048X1152P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1152, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 162000000, 26, 80, 96, 1, 3, 44, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT, V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* WQXGA resolutions */
+#define V4L2_DV_BT_DMT_2560X1600P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 268500000, 48, 32, 80, 3, 6, 37, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 348500000, 192, 280, 472, 3, 6, 49, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P75 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 443250000, 208, 280, 488, 3, 6, 63, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P85 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_VSYNC_POS_POL, \
+ 505250000, 208, 280, 488, 3, 6, 73, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, 0) \
+}
+
+#define V4L2_DV_BT_DMT_2560X1600P120_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2560, 1600, 0, V4L2_DV_HSYNC_POS_POL, \
+ 552750000, 48, 32, 80, 3, 6, 85, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+/* 4K resolutions */
+#define V4L2_DV_BT_DMT_4096X2160P60_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 556744000, 8, 32, 40, 48, 8, 6, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#define V4L2_DV_BT_DMT_4096X2160P59_94_RB { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ 556188000, 8, 32, 40, 48, 8, 6, 0, 0, 0, \
+ V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT, \
+ V4L2_DV_FL_REDUCED_BLANKING) \
+}
+
+#endif
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644
index 00000000000000..f4a698a2288037
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+ WAKE_LOCK_SUSPEND, /* Prevent suspend */
+ WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+ struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+ const char *name)
+{
+ wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+ wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+ __pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+ __pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+ __pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+ return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644
index 00000000000000..f07e0679fb8281
--- /dev/null
+++ b/include/linux/wifi_tiwlan.h
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS 3
+#define WMPA_NUMBER_OF_BUFFERS 160
+#define WMPA_SECTION_HEADER 24
+#define WMPA_SECTION_SIZE_0 (WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1 (WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2 (WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wl127x-rfkill.h b/include/linux/wl127x-rfkill.h
new file mode 100644
index 00000000000000..9057ec63d5d333
--- /dev/null
+++ b/include/linux/wl127x-rfkill.h
@@ -0,0 +1,35 @@
+/*
+ * Bluetooth TI wl127x rfkill power control via GPIO
+ *
+ * Copyright (C) 2009 Motorola, Inc.
+ * Copyright (C) 2008 Texas Instruments
+ * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _LINUX_WL127X_RFKILL_H
+#define _LINUX_WL127X_RFKILL_H
+
+#include <linux/rfkill.h>
+
+struct wl127x_rfkill_platform_data {
+ int nshutdown_gpio;
+
+ struct rfkill *rfkill; /* for driver only */
+};
+
+#endif
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 00000000000000..40ec3482d1efbc
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,27 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+ int (*set_power)(int val);
+ int (*set_reset)(int val);
+ int (*set_carddetect)(int val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+
+#endif
diff --git a/include/media/adv7391.h b/include/media/adv7391.h
new file mode 100644
index 00000000000000..3d17c40a1834d0
--- /dev/null
+++ b/include/media/adv7391.h
@@ -0,0 +1,64 @@
+/*
+ * adv7391.h
+ *
+ * Driver for Analog device's 7391
+ *
+ * Author : Nicolas Laclau <nicolas.laclau@parrot.com>
+ *
+ * Date : 22 Jan. 2015
+ *
+ */
+
+#ifndef ADV7391_H_
+#define ADV7391_H_
+
+#define ADV7391_NAME "adv7391"
+
+/* registers list */
+#define ADV7391_REG_PWR_MODE (0x00)
+#define ADV7391_REG_MODE_SEL (0x01)
+#define ADV7391_REG_DAC_OUT_LVL (0x0B)
+#define ADV7391_REG_SW_RESET (0x17)
+#define ADV7391_REG_SD_MODE1 (0x80)
+#define ADV7391_REG_SD_MODE2 (0x82)
+#define ADV7391_REG_SD_MODE3 (0x83)
+#define ADV7391_REG_SD_MODE6 (0x87)
+#define ADV7391_REG_SD_BRIGHT (0xA1)
+#define ADV7391_REG_SD_SCL_LSB (0x9C)
+#define ADV7391_REG_SD_Y_SCL (0x9D)
+#define ADV7391_REG_SD_CB_SCL (0x9E)
+#define ADV7391_REG_SD_CR_SCL (0x9F)
+
+/* flags for ADV7391_REG_PWR_MODE */
+#define ADV7391_FLG_DAC1_PWR (0x10)
+#define ADV7391_FLG_DAC2_PWR (0x08)
+#define ADV7391_FLG_DAC3_PWR (0x04)
+
+/* flags for ADV7391_REG_SW_RESET */
+#define ADV7391_FLG_SW_RESET (0x02)
+
+/* flags for ADV7391_REG_SD_MODE1 */
+#define ADV7391_FLG_LUMA_SSAF (0x10)
+
+/* flags for ADV7391_REG_SD_MODE2 */
+#define ADV7391_FLG_PRPB_SSAF_FILT (0x01)
+#define ADV7391_FLG_DAC_OUT1 (0x02)
+#define ADV7391_FLG_PEDESTAL (0x08)
+#define ADV7391_FLG_SQ_PIXEL (0x10)
+#define ADV7391_FLG_VCR_FF_FR_SYNC (0x20)
+#define ADV7391_FLG_PIX_DT_VALID (0x40)
+#define ADV7391_FLG_VID_EDG_CTL (0x80)
+
+/* flags for ADV7391_REG_SD_MODE3 */
+#define ADV7391_FLG_Y_OUT_LVL (0x02) /*(dflt:700/300mV, set:714/286mV)*/
+
+struct adv7391_platform_data {
+ int dac_pwr;
+ int dac_gain;
+ int sd_mode1;
+ int sd_mode2;
+ int sd_mode3;
+ int scale_lsb;
+};
+
+#endif /* ADV7391_H_ */
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
new file mode 100644
index 00000000000000..4f06a127a7f8e7
--- /dev/null
+++ b/include/media/adv7604.h
@@ -0,0 +1,175 @@
+/*
+ * adv7604 - Analog Devices ADV7604 video decoder driver
+ *
+ * Copyright 2012 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ADV7604_
+#define _ADV7604_
+
+/* Analog input muxing modes (AFE register 0x02, [2:0]) */
+enum adv7604_ain_sel {
+ ADV7604_AIN1_2_3_NC_SYNC_1_2 = 0,
+ ADV7604_AIN4_5_6_NC_SYNC_2_1 = 1,
+ ADV7604_AIN7_8_9_NC_SYNC_3_1 = 2,
+ ADV7604_AIN10_11_12_NC_SYNC_4_1 = 3,
+ ADV7604_AIN9_4_5_6_SYNC_2_1 = 4,
+};
+
+/* Bus rotation and reordering (IO register 0x04, [7:5]) */
+enum adv7604_op_ch_sel {
+ ADV7604_OP_CH_SEL_GBR = 0,
+ ADV7604_OP_CH_SEL_GRB = 1,
+ ADV7604_OP_CH_SEL_BGR = 2,
+ ADV7604_OP_CH_SEL_RGB = 3,
+ ADV7604_OP_CH_SEL_BRG = 4,
+ ADV7604_OP_CH_SEL_RBG = 5,
+};
+
+/* Input Color Space (IO register 0x02, [7:4]) */
+enum adv7604_inp_color_space {
+ ADV7604_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV7604_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV7604_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV7604_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV7604_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV7604_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV7604_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV7604_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV7604_INP_COLOR_SPACE_AUTO = 0xf,
+};
+
+/* Select output format (IO register 0x03, [7:0]) */
+enum adv7604_op_format_sel {
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_8 = 0x00,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_10 = 0x01,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_12_MODE0 = 0x02,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_12_MODE1 = 0x06,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_12_MODE2 = 0x0a,
+ ADV7604_OP_FORMAT_SEL_DDR_422_8 = 0x20,
+ ADV7604_OP_FORMAT_SEL_DDR_422_10 = 0x21,
+ ADV7604_OP_FORMAT_SEL_DDR_422_12_MODE0 = 0x22,
+ ADV7604_OP_FORMAT_SEL_DDR_422_12_MODE1 = 0x23,
+ ADV7604_OP_FORMAT_SEL_DDR_422_12_MODE2 = 0x24,
+ ADV7604_OP_FORMAT_SEL_SDR_444_24 = 0x40,
+ ADV7604_OP_FORMAT_SEL_SDR_444_30 = 0x41,
+ ADV7604_OP_FORMAT_SEL_SDR_444_36_MODE0 = 0x42,
+ ADV7604_OP_FORMAT_SEL_DDR_444_24 = 0x60,
+ ADV7604_OP_FORMAT_SEL_DDR_444_30 = 0x61,
+ ADV7604_OP_FORMAT_SEL_DDR_444_36 = 0x62,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_16 = 0x80,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_20 = 0x81,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_24_MODE0 = 0x82,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_24_MODE1 = 0x86,
+ ADV7604_OP_FORMAT_SEL_SDR_ITU656_24_MODE2 = 0x8a,
+};
+
+enum adv7604_int1_config {
+ ADV7604_INT1_CONFIG_OPEN_DRAIN,
+ ADV7604_INT1_CONFIG_ACTIVE_LOW,
+ ADV7604_INT1_CONFIG_ACTIVE_HIGH,
+ ADV7604_INT1_CONFIG_DISABLED,
+};
+
+/* Platform dependent definition */
+struct adv7604_platform_data {
+ /* connector - HDMI or DVI? */
+ unsigned connector_hdmi:1;
+
+ /* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
+ unsigned disable_pwrdnb:1;
+
+ /* DIS_CABLE_DET_RST: 1 if the 5V pins are unused and unconnected */
+ unsigned disable_cable_det_rst:1;
+
+ /* Force input color space */
+ enum adv7604_inp_color_space inp_color_space;
+
+ /* Analog input muxing mode */
+ enum adv7604_ain_sel ain_sel;
+
+ /* Bus rotation and reordering */
+ enum adv7604_op_ch_sel op_ch_sel;
+
+ /* Select output format */
+ enum adv7604_op_format_sel op_format_sel;
+
+ /* Configuration of the INT1 pin */
+ enum adv7604_int1_config int1_config;
+
+ /* IO register 0x02 */
+ unsigned alt_gamma:1;
+ unsigned op_656_range:1;
+ unsigned rgb_out:1;
+ unsigned alt_data_sat:1;
+
+ /* IO register 0x05 */
+ unsigned blank_data:1;
+ unsigned insert_av_codes:1;
+ unsigned replicate_av_codes:1;
+ unsigned invert_cbcr:1;
+
+ /* IO register 0x30 */
+ unsigned output_bus_lsb_to_msb:1;
+
+ /* Free run */
+ unsigned hdmi_free_run_mode;
+
+ /* i2c addresses: 0 == use default */
+ u8 i2c_avlink;
+ u8 i2c_cec;
+ u8 i2c_infoframe;
+ u8 i2c_esdp;
+ u8 i2c_dpp;
+ u8 i2c_afe;
+ u8 i2c_repeater;
+ u8 i2c_edid;
+ u8 i2c_hdmi;
+ u8 i2c_test;
+ u8 i2c_cp;
+ u8 i2c_vdp;
+
+ /* Default values are necessary
+ with soc camera architecture */
+ u32 default_width;
+ u32 default_height;
+
+ int cam_it;
+
+ int (*power_on) (void);
+ int (*power_off)(void);
+};
+
+/*
+ * Mode of operation.
+ * This is used as the input argument of the s_routing video op.
+ */
+enum adv7604_mode {
+ ADV7604_MODE_COMP,
+ ADV7604_MODE_GR,
+ ADV7604_MODE_HDMI,
+};
+
+#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
+#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001)
+#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
+
+/* notify events */
+#define ADV7604_HOTPLUG 1
+#define ADV7604_FMT_CHANGE 2
+
+#endif
diff --git a/include/media/ar0330.h b/include/media/ar0330.h
new file mode 100644
index 00000000000000..bf802234cf2171
--- /dev/null
+++ b/include/media/ar0330.h
@@ -0,0 +1,26 @@
+#ifndef AR0330_H
+#define AR0330_H
+
+struct clk;
+struct v4l2_subdev;
+
+enum {
+ AR0330_HW_BUS_PARALLEL,
+ AR0330_HW_BUS_MIPI,
+ AR0330_HW_BUS_HISPI,
+};
+
+struct ar0330_platform_data {
+ struct clk *clock;
+ u32 clk_rate;
+ u32 max_vco_rate;
+ u32 max_op_clk_rate;
+ int (*set_power)(int on);
+ unsigned int reset;
+ u32 hw_bus;
+};
+
+#define AR0330_POWER_ON 1
+#define ARO330_POWER_OFF 0
+
+#endif
diff --git a/include/media/ar1820.h b/include/media/ar1820.h
new file mode 100644
index 00000000000000..45eb9117b91dd2
--- /dev/null
+++ b/include/media/ar1820.h
@@ -0,0 +1,18 @@
+/*
+ * ar1820 - Aptina CMOS Digital Image Sensor
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Tue Aug 26 15:55:49 CEST 2014
+ *
+ */
+#ifndef __AR1820_H__
+#define __AR1820_H__
+
+struct ar1820_platform_data {
+ int (*set_power)(int on);
+
+ u32 ext_clk;
+};
+
+#endif /* __AR1820_H__ */
diff --git a/include/media/as3636.h b/include/media/as3636.h
new file mode 100644
index 00000000000000..4389e7692c3ff0
--- /dev/null
+++ b/include/media/as3636.h
@@ -0,0 +1,54 @@
+/*
+ * include/media/as3636.h
+ *
+ * Copyright (C) 2014 Parrot S.A.
+ *
+ * Contact: Maxime Jourdan <maxime.jourdan@parrot.com>
+ *
+ * Inspired from AS3636 driver made by Laurent Pinchart
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __AS3636_H__
+#define __AS3636_H__
+
+#include <media/v4l2-subdev.h>
+
+#define AS3636_NAME "as3636"
+#define AS3636_I2C_ADDR (0x28)
+
+#define V4L2_CID_PRIVATE_AS3636_TEST (V4L2_CID_FLASH_CLASS_BASE + 0x99)
+#define V4L2_CID_PRIVATE_AS3636_STDBY (V4L2_CID_FLASH_CLASS_BASE + 0x100)
+
+/* as3636_platform_data - Flash controller platform data
+ * @set_power: Set power callback
+ * @xenon_pulse_length: [us] 1-255 ; 0 = not used
+ * @switch_selection: [mA] 375-900, 75mA step
+ * @charge_max_voltage: [dV] 285-348 Capacitor's max charge voltage, 285 = 28.5V
+ * @dcdc_peak: [mA] 250-400, 50mA step. DCDC Boost Coil Peak current setting
+ * @auto_charge: [bool] Automatic charging of the capacitor after a flash
+ */
+struct as3636_platform_data {
+ int (*set_power)(struct v4l2_subdev *subdev, int on);
+ u8 xenon_pulse_length;
+ u16 switch_selection;
+ u16 charge_max_voltage;
+ u16 dcdc_peak;
+ u8 auto_charge;
+};
+
+#endif /* __AS3636_H__ */
diff --git a/include/media/galileo1.h b/include/media/galileo1.h
new file mode 100644
index 00000000000000..a6b2d2a04b65a8
--- /dev/null
+++ b/include/media/galileo1.h
@@ -0,0 +1,28 @@
+/*
+ * tc358746a - Toshiba device TC358746A CSI2.0 <-> Parallel bridge
+ *
+ * It can perform protocol conversion in both direction, MIPI to parallel or
+ * parallel to MIPI.
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Wed Jul 2 09:16:13 CEST 2014
+ *
+ */
+#ifndef __GALILEO1_H__
+#define __GALILEO1_H__
+
+struct galileo1_platform_data {
+ int (*set_power)(int on);
+
+ unsigned long refclk;
+ unsigned int lanes; /* Numer of CSI-2 lanes */
+};
+
+#define GALILEO1_SENSOR_I2C_ADDR 0x10
+#define GALILEO1_PMIC_I2C_ADDR 0x0e
+
+#define GALILEO1_POWER_ON 1
+#define GALILEO1_POWER_OFF 0
+
+#endif /* __GALILEO1_H__ */
diff --git a/include/media/mt9f002.h b/include/media/mt9f002.h
new file mode 100644
index 00000000000000..99a9c07c17dcc3
--- /dev/null
+++ b/include/media/mt9f002.h
@@ -0,0 +1,43 @@
+/*
+ * Header for mt9f002 driver
+ *
+ *
+ * Author : Karl Leplat
+ *
+ *
+ */
+#ifndef __MT9F002_H__
+#define __MT9F002_H__
+
+ enum mt9f002_interface {
+ MT9F002_Parallel,
+ MT9F002_MIPI,
+ MT9F002_HiSPi,
+ };
+
+ enum mt9f002_pixel_depth {
+ MT9F002_8bits = 8,
+ MT9F002_10bits = 10,
+ MT9F002_12bits = 12,
+ };
+
+enum mt9f002_lane {
+ MT9F002_1lane = 1,
+ MT9F002_2lane = 2,
+ MT9F002_3lane = 3,
+ MT9F002_4lane = 4,
+};
+
+struct mt9f002_platform_data {
+ enum mt9f002_interface interface;
+ enum mt9f002_pixel_depth pixel_depth;
+ enum mt9f002_lane number_of_lanes;
+ uint64_t ext_clk_freq_mhz;
+ uint64_t output_clk_freq_mhz;
+ int (*set_power)(int on);
+};
+
+#define MT9F002_POWER_ON 1
+#define MT9F002_POWER_OFF 0
+
+#endif
diff --git a/include/media/mt9m021.h b/include/media/mt9m021.h
new file mode 100644
index 00000000000000..df8537aaf74f16
--- /dev/null
+++ b/include/media/mt9m021.h
@@ -0,0 +1,20 @@
+/*
+ * mt9m021 - Aptina CMOS Digital Image Sensor
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Tue Aug 26 15:55:49 CEST 2014
+ *
+ */
+#ifndef __MT9M021_H__
+#define __MT9M021_H__
+
+struct mt9m021_platform_data {
+ int (*set_power)(int on);
+
+ u32 ext_clock;
+ u32 pix_clock;
+ bool no_soft_reset;
+};
+
+#endif /* __MT9M021_H__ */
diff --git a/include/media/mt9v117.h b/include/media/mt9v117.h
new file mode 100644
index 00000000000000..db390f2ff0207f
--- /dev/null
+++ b/include/media/mt9v117.h
@@ -0,0 +1,20 @@
+/*
+ * Header for mt9v117 driver
+ *
+ *
+ * Author : Julien BERAUD
+ *
+ *
+ */
+#ifndef __MT9V117_H__
+#define __MT9V117_H__
+
+struct mt9v117_platform_data {
+ unsigned int ext_clk_freq_hz;
+ int (*set_power)(int on);
+};
+
+#define MT9V117_POWER_ON 1
+#define MT9V117_POWER_OFF 0
+
+#endif
diff --git a/include/media/mx25l6435e.h b/include/media/mx25l6435e.h
new file mode 100644
index 00000000000000..52ed676f54710d
--- /dev/null
+++ b/include/media/mx25l6435e.h
@@ -0,0 +1,56 @@
+/**
+ * Copyright (C) 2013 Parrot S.A.
+ * @file mx25l6435e.h
+ * @author n.laclau
+ * @date 26 June 2013
+ */
+
+
+#ifndef MX25L6435E_H_
+#define MX25L6435E_H_
+
+#define MX25L6435E_READ 0x03 /* normal read */
+#define MX25L6435E_FREAD 0x0B /* fast read data */
+#define MX25L6435E_RDSFDP 0x5A /* Read SFDP */
+#define MX25L6435E_2READ 0xBB /* 2 x I/O read command */
+#define MX25L6435E_DREAD 0x3B /* 1I/2O read command */
+#define MX25L6435E_W4READ 0xE7 /* ... */
+#define MX25L6435E_4READ 0xEB /* 4 x I/O read command */
+#define MX25L6435E_QREAD 0x6B /* ... */
+#define MX25L6435E_WREN 0x06 /* write enable */
+#define MX25L6435E_WRDI 0x04 /* write disable */
+#define MX25L6435E_RDSR 0x05 /* read status register */
+#define MX25L6435E_RDCR 0x15 /* read configuration register */
+#define MX25L6435E_4PP 0x38 /* write status / configuration register */
+#define MX25L6435E_SE 0x20 /* Sector erase */
+#define MX25L6435E_BE32K 0x52 /* Block erase: 32Ko */
+#define MX25L6435E_BE64K 0xD8 /* Block erase: 64Ko */
+#define MX25L6435E_CE_1 0x60 /* Chip erase, or */
+#define MX25L6435E_CE_2 0xC7 /* Chip erase */
+#define MX25L6435E_PP 0x02 /* Page program */
+#define MX25L6435E_CP 0xAD /* Continuous program */
+#define MX25L6435E_DP 0xB9 /* Deep power down */
+#define MX25L6435E_RDP 0xAB /* release from deep power down */
+#define MX25L6435E_RDID 0x9F /* Read identification */
+#define MX25L6435E_RES 0xAB /* Read electronic ID */
+#define MX25L6435E_REMS 0x90 /* Read electronic manufacturer & device ID */
+#define MX25L6435E_REMS2 0xEF /* Read electronic manufacturer & device ID */
+#define MX25L6435E_REMS4 0xDF /* Read electronic manufacturer & device ID */
+#define MX25L6435E_ENSO 0xB1 /* Enter secured OTP */
+#define MX25L6435E_EXSO 0xC1 /* Exit secured OTP */
+#define MX25L6435E_RDSCUR 0x2B /* Read security register */
+#define MX25L6435E_WRSCUR 0x2F /* Write security register */
+#define MX25L6435E_SBLK 0x36 /* Single block lock */
+#define MX25L6435E_SBULK 0x39 /* Single block unlock */
+#define MX25L6435E_RDBLOCK 0x3C /* Block protect read */
+#define MX25L6435E_GBLK 0x7E /* Gang block lock */
+#define MX25L6435E_GBULK 0x98 /* Gang block unlock */
+#define MX25L6435E_NOP 0x00 /* No operation */
+#define MX25L6435E_RSTEN 0x66 /* Reset enable */
+#define MX25L6435E_RST 0x99 /* Reset memory */
+#define MX25L6435E_WPSEL 0x68 /* Write protect selection */
+#define MX25L6435E_ESRY 0x70 /* Enable SO to output RY/BY# */
+#define MX25L6435E_DSRY 0x80 /* Disable SO to output RY/BY# */
+#define MX25L6435E_RLZ_RENH 0xFF /* Release Read Enhanced */
+
+#endif /* MX25L6435E_H_ */
diff --git a/include/media/ov16825.h b/include/media/ov16825.h
new file mode 100644
index 00000000000000..38d4b731fe2879
--- /dev/null
+++ b/include/media/ov16825.h
@@ -0,0 +1,13 @@
+#ifndef __OV16825_H__
+#define __OV16825_H__
+
+#define OV16825_I2C_ADDR 0x36
+
+struct ov16825_platform_data {
+ int (*set_power)(int on);
+
+ u32 ext_clk;
+ unsigned int lanes; /* Numer of CSI-2 lanes */
+};
+
+#endif /* __OV16825_H__ */
diff --git a/include/media/ov7740.h b/include/media/ov7740.h
new file mode 100644
index 00000000000000..7769c91ac4c1e9
--- /dev/null
+++ b/include/media/ov7740.h
@@ -0,0 +1,16 @@
+/*
+ * Header for ov7740 driver
+ *
+ *
+ * Author : Maxime JOURDAN
+ *
+ *
+ */
+#ifndef __OV7740_H__
+#define __OV7740_H__
+
+struct ov7740_platform_data {
+ int (*set_power)(int on);
+};
+
+#endif \ No newline at end of file
diff --git a/include/media/tc358746a.h b/include/media/tc358746a.h
new file mode 100644
index 00000000000000..aa75ddac59c310
--- /dev/null
+++ b/include/media/tc358746a.h
@@ -0,0 +1,36 @@
+/*
+ * tc358746a - Toshiba device TC358746A CSI2.0 <-> Parallel bridge
+ *
+ * It can perform protocol conversion in both direction, MIPI to parallel or
+ * parallel to MIPI.
+ *
+ * Author : Eng-Hong SRON <eng-hong.sron@parrot.com>
+ *
+ * Date : Wed Jul 2 09:16:13 CEST 2014
+ *
+ */
+#ifndef __TC358746A_H__
+#define __TC358746A_H__
+
+struct tc358746a_platform_data {
+ int (*set_power)(int on);
+
+ /* External clock in Hz */
+ unsigned long refclk;
+ /* Numer of CSI-2 lanes */
+ unsigned int lanes;
+ /* Used to force a different pixelcode for the bridge. By default it
+ * copies the code from the sensor subdevice */
+ enum v4l2_mbus_pixelcode force_subdev_pixcode;
+ /* Length of the calibration delay loop (0 means default)*/
+ unsigned calibration_delay_ms;
+ /* If different from zero the bridge will use this value directly
+ * instead of using the (slow) calibration loop */
+ unsigned phytimdly;
+};
+
+#define TC358746A_I2C_ADDR 0x0e
+#define TC358746A_POWER_ON 1
+#define TC358746A_POWER_OFF 0
+
+#endif /* __TC358746A_H__ */
diff --git a/include/media/tc358764.h b/include/media/tc358764.h
new file mode 100644
index 00000000000000..a2db21d58d7501
--- /dev/null
+++ b/include/media/tc358764.h
@@ -0,0 +1,9 @@
+#ifndef _TC358764_H_
+#define _TC358764_H_
+
+struct tc358764_pdata {
+ // Called with 1 to power on, 0 to power off
+ int (*set_power)(int);
+};
+
+#endif /* _TC358764_H_ */
diff --git a/include/media/tw8834.h b/include/media/tw8834.h
new file mode 100644
index 00000000000000..3d1efee1802cfb
--- /dev/null
+++ b/include/media/tw8834.h
@@ -0,0 +1,22 @@
+/*
+ * Header for tw8834 driver
+ *
+ *
+ * Author : Julien BERAUD
+ *
+ *
+ */
+#ifndef __TW8834_H__
+#define __TW8834_H__
+
+struct tw8834_platform_data {
+ int (*set_power)(int on);
+};
+
+enum {
+ TW8834_POWER_OFF,
+ TW8834_POWER_ON,
+ TW8834_POWER_INPUT_DISABLE,
+ TW8834_POWER_INPUT_ENABLE,
+};
+#endif
diff --git a/include/media/tw9990.h b/include/media/tw9990.h
new file mode 100644
index 00000000000000..d1dc4d5e8410c9
--- /dev/null
+++ b/include/media/tw9990.h
@@ -0,0 +1,47 @@
+/*
+ * Header for tw9990 driver
+ *
+ *
+ * Author : Julien BERAUD
+ *
+ *
+ */
+#ifndef __TW9990_H__
+#define __TW9990_H__
+
+struct tw9990_platform_data {
+ int (*set_power)(int on);
+ int differential_input;
+ int anti_aliasing;
+ int power_saving;
+ int synct_mode; /* default or forced (forced use sync pulse value) */
+ int sync_pulse_amplitude; /* default=56 range=[127..0] */
+ int common_mode_clamp;
+};
+
+/* Differential input flags */
+#define TW9990_DIFFERENTIAL_ENABLED 1
+#define TW9990_DIFFERENTIAL_DISABLED 0
+
+/* Anti-aliasing flags */
+#define TW9990_ANTI_ALIAS_ENABLED 1
+#define TW9990_ANTI_ALIAS_DISABLED 0
+
+/* power saving flags */
+#define TW9990_POWER_SAVE_ENABLED 1
+#define TW9990_POWER_SAVE_DISABLED 0
+
+/* Synct mode flags */
+#define TW9990_SYNCT_DEFAULT 1
+#define TW9990_SYNCT_FORCED 0
+
+/* Common Mode Clamp flags */
+#define TW9990_CM_TOGGLE (0x40)
+#define TW9990_CM_CUR_MODE (0x20)
+#define TW9990_CM_RSTR_FULL_TIME (0x10)
+#define TW9990_CM_RESTORE (0x04)
+
+#define TW9990_POWER_ON 1
+#define TW9990_POWER_OFF 0
+
+#endif
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644
index 00000000000000..10e4c1506eeb2f
--- /dev/null
+++ b/include/net/activity_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
diff --git a/include/parrot/avicam_dummy_dev.h b/include/parrot/avicam_dummy_dev.h
new file mode 100644
index 00000000000000..88a32f8ea7a884
--- /dev/null
+++ b/include/parrot/avicam_dummy_dev.h
@@ -0,0 +1,70 @@
+
+/*
+ * Avicam dummy device driver
+ *
+ * Based(copied) on soc_camera_platform
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __AVICAM_DUMMY_DEV_H__
+#define __AVICAM_DUMMY_DEV_H__
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/video/avicam.h>
+#include <media/v4l2-mediabus.h>
+
+struct device;
+
+struct avicam_dummy_info {
+ struct v4l2_device *v4l2_dev;
+ int dev_id;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_subdev_frame_interval fi;
+ struct media_pad pad;
+};
+
+static inline void avicam_platform_release(struct device *pdev)
+{
+ dev_info(pdev, "release device\n");
+}
+
+static inline int avicam_dummy_add(struct v4l2_device *v4l2_dev,
+ struct platform_device **pdev,
+ struct avicam_dummy_info *info)
+{
+ int ret;
+
+ if (*pdev)
+ return -EBUSY;
+
+ *pdev = platform_device_alloc(AVICAM_DUMMY_NAME, info->dev_id);
+ if (!*pdev)
+ return -ENOMEM;
+
+ info->v4l2_dev = v4l2_dev;
+
+ (*pdev)->dev.platform_data = info;
+ (*pdev)->dev.release = avicam_platform_release;
+
+ ret = platform_device_add(*pdev);
+ if (ret < 0) {
+ platform_device_put(*pdev);
+ *pdev = NULL;
+ }
+
+ return ret;
+}
+
+static inline void avicam_dummy_del(struct platform_device *pdev)
+{
+ if (!pdev)
+ return;
+
+ platform_device_unregister(pdev);
+}
+
+#endif /* __AVICAM_DUMMY_DEV_H__ */
diff --git a/include/parrot/mfd/dib0700_mfd.h b/include/parrot/mfd/dib0700_mfd.h
new file mode 100644
index 00000000000000..ca7bafbde856cc
--- /dev/null
+++ b/include/parrot/mfd/dib0700_mfd.h
@@ -0,0 +1,20 @@
+/**
+ * linux/drivers/parrot/mfd/dib0700_mfd.h
+ *
+ * Copyright (C) 2015 Parrot S.A.
+ *
+ * author: Ronan Chauvin <ronan.chauvin@parrot.com>
+ * date: 14-Sep-2014
+ *
+ * This file is released under the GPL
+ */
+
+#ifndef _MFD_DIB0700_H
+#define _MFD_DIB0700_H
+
+#include "dib07x0.h"
+
+extern int dib0700_probe(struct usb_interface *intf, int *gpio_base);
+extern void dib0700_device_exit(struct usb_interface *intf);
+
+#endif
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 00000000000000..0791f2728c1bc0
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq),
+
+ TP_STRUCT__entry(
+ __field( u32, cpu_id )
+ __field(unsigned long, targfreq )
+ __field(unsigned long, actualfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = (u32) cpu_id;
+ __entry->targfreq = targfreq;
+ __entry->actualfreq = actualfreq;
+ ),
+
+ TP_printk("cpu=%u targ=%lu actual=%lu",
+ __entry->cpu_id, __entry->targfreq,
+ __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_up,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_down,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu_id )
+ __field(unsigned long, load )
+ __field(unsigned long, curfreq )
+ __field(unsigned long, targfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->load = load;
+ __entry->curfreq = curfreq;
+ __entry->targfreq = targfreq;
+ ),
+
+ TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curfreq,
+ __entry->targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+ TP_PROTO(char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+ TP_printk("%s", __entry->s)
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+ TP_PROTO(char *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+ TP_printk("%s", __entry->s)
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/parrot_trace.h b/include/trace/events/parrot_trace.h
new file mode 100644
index 00000000000000..f40093016bc145
--- /dev/null
+++ b/include/trace/events/parrot_trace.h
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM parrot_trace
+
+#if !defined(_PARROT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _PARROT_TRACE_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(user_kevent_start,
+
+ TP_PROTO(int evt),
+
+ TP_ARGS(evt),
+
+ TP_STRUCT__entry(
+ __field( int, event_start)
+ ),
+
+ TP_fast_assign(
+ __entry->event_start = evt;
+ ),
+
+ TP_printk("start = %d", __entry->event_start)
+)
+
+TRACE_EVENT(user_kevent_stop,
+
+ TP_PROTO(int evt),
+
+ TP_ARGS(evt),
+
+ TP_STRUCT__entry(
+ __field( int, event_stop)
+ ),
+
+ TP_fast_assign(
+ __entry->event_stop = evt;
+ ),
+
+ TP_printk("stop = %d", __entry->event_stop)
+)
+
+TRACE_EVENT(user_kmessage,
+
+ TP_PROTO(char *str),
+
+ TP_ARGS(str),
+
+ TP_STRUCT__entry(
+ __string( message, str)
+ ),
+
+ TP_fast_assign(
+ __assign_str(message, str);
+ ),
+
+ TP_printk("message = %s", __get_str(message))
+)
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sync.h b/include/trace/events/sync.h
new file mode 100644
index 00000000000000..f31bc63ca65d36
--- /dev/null
+++ b/include/trace/events/sync.h
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sync
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include <linux/sync.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ if (timeline->ops->timeline_value_str) {
+ timeline->ops->timeline_value_str(timeline,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+);
+
+TRACE_EVENT(sync_wait,
+ TP_PROTO(struct sync_fence *fence, int begin),
+
+ TP_ARGS(fence, begin),
+
+ TP_STRUCT__entry(
+ __string(name, fence->name)
+ __field(s32, status)
+ __field(u32, begin)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, fence->name);
+ __entry->status = fence->status;
+ __entry->begin = begin;
+ ),
+
+ TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
+ __get_str(name), __entry->status)
+);
+
+TRACE_EVENT(sync_pt,
+ TP_PROTO(struct sync_pt *pt),
+
+ TP_ARGS(pt),
+
+ TP_STRUCT__entry(
+ __string(timeline, pt->parent->name)
+ __array(char, value, 32)
+ ),
+
+ TP_fast_assign(
+ __assign_str(timeline, pt->parent->name);
+ if (pt->parent->ops->pt_value_str) {
+ pt->parent->ops->pt_value_str(pt,
+ __entry->value,
+ sizeof(__entry->value));
+ } else {
+ __entry->value[0] = '\0';
+ }
+ ),
+
+ TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
+ );
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
new file mode 100644
index 00000000000000..ca304046d9e2b6
--- /dev/null
+++ b/kernel/power/autosleep.c
@@ -0,0 +1,127 @@
+/*
+ * kernel/power/autosleep.c
+ *
+ * Opportunistic sleep support.
+ *
+ * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pm_wakeup.h>
+
+#include "power.h"
+
+static suspend_state_t autosleep_state;
+static struct workqueue_struct *autosleep_wq;
+/*
+ * Note: it is only safe to mutex_lock(&autosleep_lock) if a wakeup_source
+ * is active, otherwise a deadlock with try_to_suspend() is possible.
+ * Alternatively mutex_lock_interruptible() can be used. This will then fail
+ * if an auto_sleep cycle tries to freeze processes.
+ */
+static DEFINE_MUTEX(autosleep_lock);
+static struct wakeup_source *autosleep_ws;
+
+static void try_to_suspend(struct work_struct *work)
+{
+ unsigned int initial_count, final_count;
+
+ if (!pm_get_wakeup_count(&initial_count, true))
+ goto out;
+
+ mutex_lock(&autosleep_lock);
+
+ if (!pm_save_wakeup_count(initial_count)) {
+ mutex_unlock(&autosleep_lock);
+ goto out;
+ }
+
+ if (autosleep_state == PM_SUSPEND_ON) {
+ mutex_unlock(&autosleep_lock);
+ return;
+ }
+ if (autosleep_state >= PM_SUSPEND_MAX)
+ hibernate();
+ else
+ pm_suspend(autosleep_state);
+
+ mutex_unlock(&autosleep_lock);
+
+ if (!pm_get_wakeup_count(&final_count, false))
+ goto out;
+
+ /*
+ * If the wakeup occured for an unknown reason, wait to prevent the
+ * system from trying to suspend and waking up in a tight loop.
+ */
+ if (final_count == initial_count)
+ schedule_timeout_uninterruptible(HZ / 2);
+
+ out:
+ queue_up_suspend_work();
+}
+
+static DECLARE_WORK(suspend_work, try_to_suspend);
+
+void queue_up_suspend_work(void)
+{
+ if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
+ queue_work(autosleep_wq, &suspend_work);
+}
+
+suspend_state_t pm_autosleep_state(void)
+{
+ return autosleep_state;
+}
+
+int pm_autosleep_lock(void)
+{
+ return mutex_lock_interruptible(&autosleep_lock);
+}
+
+void pm_autosleep_unlock(void)
+{
+ mutex_unlock(&autosleep_lock);
+}
+
+int pm_autosleep_set_state(suspend_state_t state)
+{
+
+#ifndef CONFIG_HIBERNATION
+ if (state >= PM_SUSPEND_MAX)
+ return -EINVAL;
+#endif
+
+ __pm_stay_awake(autosleep_ws);
+
+ mutex_lock(&autosleep_lock);
+
+ autosleep_state = state;
+
+ __pm_relax(autosleep_ws);
+
+ if (state > PM_SUSPEND_ON) {
+ pm_wakep_autosleep_enabled(true);
+ queue_up_suspend_work();
+ } else {
+ pm_wakep_autosleep_enabled(false);
+ }
+
+ mutex_unlock(&autosleep_lock);
+ return 0;
+}
+
+int __init pm_autosleep_init(void)
+{
+ autosleep_ws = wakeup_source_register("autosleep");
+ if (!autosleep_ws)
+ return -ENOMEM;
+
+ autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
+ if (autosleep_wq)
+ return 0;
+
+ wakeup_source_unregister(autosleep_ws);
+ return -ENOMEM;
+}
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 00000000000000..d2a65da9f22c15
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ seq_printf(s, "time (secs) count\n");
+ seq_printf(s, "------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (time_in_suspend_bins[bin] == 0)
+ continue;
+ seq_printf(s, "%4d - %4d %4u\n",
+ bin ? 1 << (bin - 1) : 0, 1 << bin,
+ time_in_suspend_bins[bin]);
+ }
+ return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+ .open = suspend_time_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+ &suspend_time_debug_fops);
+ if (!d) {
+ pr_err("Failed to create suspend_time debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+ read_persistent_clock(&suspend_time_before);
+
+ return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+ struct timespec after;
+
+ read_persistent_clock(&after);
+
+ after = timespec_sub(after, suspend_time_before);
+
+ time_in_suspend_bins[fls(after.tv_sec)]++;
+
+ pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+ after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+ .suspend = suspend_time_syscore_suspend,
+ .resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+ register_syscore_ops(&suspend_time_syscore_ops);
+
+ return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+ unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 00000000000000..579700665e8cea
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,215 @@
+/*
+ * kernel/power/wakelock.c
+ *
+ * User space wakeup sources support.
+ *
+ * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * This code is based on the analogous interface allowing user space to
+ * manipulate wakelocks on Android.
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+
+#define WL_NUMBER_LIMIT 100
+#define WL_GC_COUNT_MAX 100
+#define WL_GC_TIME_SEC 300
+
+static DEFINE_MUTEX(wakelocks_lock);
+
+struct wakelock {
+ char *name;
+ struct rb_node node;
+ struct wakeup_source ws;
+ struct list_head lru;
+};
+
+static struct rb_root wakelocks_tree = RB_ROOT;
+static LIST_HEAD(wakelocks_lru_list);
+static unsigned int number_of_wakelocks;
+static unsigned int wakelocks_gc_count;
+
+ssize_t pm_show_wakelocks(char *buf, bool show_active)
+{
+ struct rb_node *node;
+ struct wakelock *wl;
+ char *str = buf;
+ char *end = buf + PAGE_SIZE;
+
+ mutex_lock(&wakelocks_lock);
+
+ for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
+ wl = rb_entry(node, struct wakelock, node);
+ if (wl->ws.active == show_active)
+ str += scnprintf(str, end - str, "%s ", wl->name);
+ }
+ if (str > buf)
+ str--;
+
+ str += scnprintf(str, end - str, "\n");
+
+ mutex_unlock(&wakelocks_lock);
+ return (str - buf);
+}
+
+static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
+ bool add_if_not_found)
+{
+ struct rb_node **node = &wakelocks_tree.rb_node;
+ struct rb_node *parent = *node;
+ struct wakelock *wl;
+
+ while (*node) {
+ int diff;
+
+ parent = *node;
+ wl = rb_entry(*node, struct wakelock, node);
+ diff = strncmp(name, wl->name, len);
+ if (diff == 0) {
+ if (wl->name[len])
+ diff = -1;
+ else
+ return wl;
+ }
+ if (diff < 0)
+ node = &(*node)->rb_left;
+ else
+ node = &(*node)->rb_right;
+ }
+ if (!add_if_not_found)
+ return ERR_PTR(-EINVAL);
+
+ if (number_of_wakelocks > WL_NUMBER_LIMIT)
+ return ERR_PTR(-ENOSPC);
+
+ /* Not found, we have to add a new one. */
+ wl = kzalloc(sizeof(*wl), GFP_KERNEL);
+ if (!wl)
+ return ERR_PTR(-ENOMEM);
+
+ wl->name = kstrndup(name, len, GFP_KERNEL);
+ if (!wl->name) {
+ kfree(wl);
+ return ERR_PTR(-ENOMEM);
+ }
+ wl->ws.name = wl->name;
+ wakeup_source_add(&wl->ws);
+ rb_link_node(&wl->node, parent, node);
+ rb_insert_color(&wl->node, &wakelocks_tree);
+ list_add(&wl->lru, &wakelocks_lru_list);
+ number_of_wakelocks++;
+ return wl;
+}
+
+int pm_wake_lock(const char *buf)
+{
+ const char *str = buf;
+ struct wakelock *wl;
+ u64 timeout_ns = 0;
+ size_t len;
+ int ret = 0;
+
+ while (*str && !isspace(*str))
+ str++;
+
+ len = str - buf;
+ if (!len)
+ return -EINVAL;
+
+ if (*str && *str != '\n') {
+ /* Find out if there's a valid timeout string appended. */
+ ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
+ if (ret)
+ return -EINVAL;
+ }
+
+ mutex_lock(&wakelocks_lock);
+
+ wl = wakelock_lookup_add(buf, len, true);
+ if (IS_ERR(wl)) {
+ ret = PTR_ERR(wl);
+ goto out;
+ }
+ if (timeout_ns) {
+ u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
+
+ do_div(timeout_ms, NSEC_PER_MSEC);
+ __pm_wakeup_event(&wl->ws, timeout_ms);
+ } else {
+ __pm_stay_awake(&wl->ws);
+ }
+
+ list_move(&wl->lru, &wakelocks_lru_list);
+
+ out:
+ mutex_unlock(&wakelocks_lock);
+ return ret;
+}
+
+static void wakelocks_gc(void)
+{
+ struct wakelock *wl, *aux;
+ ktime_t now = ktime_get();
+
+ list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
+ u64 idle_time_ns;
+ bool active;
+
+ spin_lock_irq(&wl->ws.lock);
+ idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
+ active = wl->ws.active;
+ spin_unlock_irq(&wl->ws.lock);
+
+ if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
+ break;
+
+ if (!active) {
+ wakeup_source_remove(&wl->ws);
+ rb_erase(&wl->node, &wakelocks_tree);
+ list_del(&wl->lru);
+ kfree(wl->name);
+ kfree(wl);
+ number_of_wakelocks--;
+ }
+ }
+ wakelocks_gc_count = 0;
+}
+
+int pm_wake_unlock(const char *buf)
+{
+ struct wakelock *wl;
+ size_t len;
+ int ret = 0;
+
+ len = strlen(buf);
+ if (!len)
+ return -EINVAL;
+
+ if (buf[len-1] == '\n')
+ len--;
+
+ if (!len)
+ return -EINVAL;
+
+ mutex_lock(&wakelocks_lock);
+
+ wl = wakelock_lookup_add(buf, len, false);
+ if (IS_ERR(wl)) {
+ ret = PTR_ERR(wl);
+ goto out;
+ }
+ __pm_relax(&wl->ws);
+ list_move(&wl->lru, &wakelocks_lru_list);
+ if (++wakelocks_gc_count > WL_GC_COUNT_MAX)
+ wakelocks_gc();
+
+ out:
+ mutex_unlock(&wakelocks_lock);
+ return ret;
+}
diff --git a/lucie/Config.in b/lucie/Config.in
new file mode 100644
index 00000000000000..0cbf541f64e6d4
--- /dev/null
+++ b/lucie/Config.in
@@ -0,0 +1,3 @@
+config BR2_PACKAGE_LINUX_PRESENT
+ bool
+ default y
diff --git a/lucie/linux.mk b/lucie/linux.mk
new file mode 100644
index 00000000000000..f2726f4521e1f4
--- /dev/null
+++ b/lucie/linux.mk
@@ -0,0 +1,27 @@
+#############################################################
+#
+# linux
+#
+#############################################################
+
+LINUX_DIR:=$(PACKAGE_DIR)/linux
+LINUX_EXPORTED_HEADERS := \
+ $(LINUX_DIR)/drivers/parrot/sound/p7_aai/aai_ioctl.h \
+# $(LINUX_DIR)/drivers/parrot/video/p6fb_ioctl.h \
+# $(LINUX_DIR)/drivers/parrot/char/dmamem_ioctl.h \
+# $(LINUX_DIR)/drivers/parrot/char/pwm/pwm_ioctl.h
+
+ifndef LINUXCONF_MK
+ $(error LINUXCONF_MK is not defined)
+endif
+
+include $(LINUXCONF_MK)
+
+#XXX overide toolchain
+LINUX_MAKE_ARGS= \
+ ARCH=$(ARCH) \
+ CROSS_COMPILE=/opt/arm-2012.03/bin/arm-none-linux-gnueabi- \
+ -C $(LINUX_DIR) \
+ INSTALL_MOD_PATH=$(TARGET_DIR) \
+ INSTALL_MOD_STRIP=1 \
+ O=$(LINUX_DIR_BUILD)
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 00000000000000..8a3e93470069cf
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,115 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+ int i;
+ unsigned long flags;
+ ktime_t now;
+ s64 delta;
+
+ spin_lock_irqsave(&activity_lock, flags);
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+ for (i = BUCKET_MAX - 1; i >= 0; i--) {
+ /*
+ * Check if the time delta between network activity is within the
+ * minimum bucket range.
+ */
+ if (delta < (1000000000ULL << i))
+ continue;
+
+ activity_stats[i]++;
+ last_transmit = now;
+ break;
+ }
+ spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i;
+ int len;
+ char *p = page;
+
+ /* Only print if offset is 0, or we have enough buffer space */
+ if (off || count < (30 * BUCKET_MAX + 22))
+ return -ENOMEM;
+
+ len = snprintf(p, count, "Min Bucket(sec) Count\n");
+ count -= len;
+ p += len;
+
+ for (i = 0; i < BUCKET_MAX; i++) {
+ len = snprintf(p, count, "%15d %lu\n", 1 << i, activity_stats[i]);
+ count -= len;
+ p += len;
+ }
+ *eof = 1;
+
+ return p - page;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ suspend_time = ktime_get_real();
+ break;
+
+ case PM_POST_SUSPEND:
+ suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+ last_transmit = ktime_sub(last_transmit, suspend_time);
+ }
+
+ return 0;
+}
+
+static struct notifier_block activity_stats_notifier_block = {
+ .notifier_call = activity_stats_notifier,
+};
+
+static int __init activity_stats_init(void)
+{
+ create_proc_read_entry("activity", S_IRUGO,
+ init_net.proc_net_stat, activity_stats_read_proc, NULL);
+ return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 00000000000000..0cbbf10026a67c
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int val, ret; \
+ ret = sscanf(buf, "%d", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ if (val < 0) \
+ return -EINVAL; \
+ _var = val; \
+ return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+ &tcp_wmem_min_attr.attr,
+ &tcp_wmem_def_attr.attr,
+ &tcp_wmem_max_attr.attr,
+ &tcp_rmem_min_attr.attr,
+ &tcp_rmem_def_attr.attr,
+ &tcp_rmem_max_attr.attr,
+ NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+ .attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+ struct kobject *ipv4_kobject;
+ int ret;
+
+ ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+ if (!ipv4_kobject)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+ if (ret) {
+ kobject_put(ipv4_kobject);
+ return ret;
+ }
+
+ return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 00000000000000..7efbab9b4bed92
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,2977 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+ ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+#else
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUSR;
+#endif
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+static gid_t proc_stats_readall_gid = AID_NET_BW_STATS;
+static gid_t proc_ctrl_write_gid = AID_NET_BW_ACCT;
+#else
+/* 0 means, don't limit anybody */
+static gid_t proc_stats_readall_gid;
+static gid_t proc_ctrl_write_gid;
+#endif
+module_param_named(stats_readall_gid, proc_stats_readall_gid, uint,
+ S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_gid, proc_ctrl_write_gid, uint,
+ S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ * - the iface stats handling will not act on notifications.
+ * - iptables matches will never match.
+ * - ctrl commands silently succeed.
+ * - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ * - don't create proc specific structs to track tags
+ * - don't check that active tag stats exceed some limits.
+ * - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+ S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+/*
+ * Ordering of locks:
+ * outer locks:
+ * iface_stat_list_lock
+ * sock_tag_list_lock
+ * inner locks:
+ * uid_tag_data_tree_lock
+ * tag_counter_set_list_lock
+ * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock
+ * is acquired.
+ *
+ * Call tree with all lock holders as of 2012-04-27:
+ *
+ * iface_stat_fmt_proc_read()
+ * iface_stat_list_lock
+ * (struct iface_stat)
+ *
+ * qtaguid_ctrl_proc_read()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * (struct proc_qtu_data->sock_tag_list)
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * (proc_qtu_data_tree)
+ * iface_stat_list_lock
+ *
+ * qtaguid_stats_proc_read()
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ *
+ * qtudev_open()
+ * uid_tag_data_tree_lock
+ *
+ * qtudev_release()
+ * sock_tag_data_list_lock
+ * uid_tag_data_tree_lock
+ * prdebug_full_state()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ * iface_stat_list_lock
+ *
+ * iface_netdev_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inetaddr_event_handler()
+ * iface_stat_create()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * iface_inet6addr_event_handler()
+ * iface_stat_create_ipv6()
+ * iface_stat_list_lock
+ * iface_stat_update()
+ * iface_stat_list_lock
+ *
+ * qtaguid_mt()
+ * account_for_uid()
+ * if_tag_stat_update()
+ * get_sock_stat()
+ * sock_tag_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ * tag_stat_update()
+ * get_active_counter_set()
+ * tag_counter_set_list_lock
+ *
+ *
+ * qtaguid_ctrl_parse()
+ * ctrl_cmd_delete()
+ * sock_tag_list_lock
+ * tag_counter_set_list_lock
+ * iface_stat_list_lock
+ * struct iface_stat->tag_stat_list_lock
+ * uid_tag_data_tree_lock
+ * ctrl_cmd_counter_set()
+ * tag_counter_set_list_lock
+ * ctrl_cmd_tag()
+ * sock_tag_list_lock
+ * (sock_tag_tree)
+ * get_tag_ref()
+ * uid_tag_data_tree_lock
+ * (uid_tag_data_tree)
+ * uid_tag_data_tree_lock
+ * (proc_qtu_data_tree)
+ * ctrl_cmd_untag()
+ * sock_tag_list_lock
+ * uid_tag_data_tree_lock
+ *
+ */
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_gid)
+ || in_egroup_p(proc_ctrl_write_gid);
+}
+
+static bool can_impersonate_uid(uid_t uid)
+{
+ return uid == current_fsuid() || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(uid_t uid)
+{
+ /* root pwnd */
+ return unlikely(!current_fsuid()) || uid == current_fsuid()
+ || unlikely(!proc_stats_readall_gid)
+ || in_egroup_p(proc_stats_readall_gid);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+ enum ifs_tx_rx direction,
+ enum ifs_proto ifs_proto,
+ int bytes,
+ int packets)
+{
+ counters->bpc[set][direction][ifs_proto].bytes += bytes;
+ counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].bytes
+ + counters->bpc[set][direction][IFS_UDP].bytes
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+ int set,
+ enum ifs_tx_rx direction)
+{
+ return counters->bpc[set][direction][IFS_TCP].packets
+ + counters->bpc[set][direction][IFS_UDP].packets
+ + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct tag_node *data = rb_entry(node, struct tag_node, node);
+ int result;
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " node=%p data=%p\n", tag, node, data);
+ result = tag_compare(tag, data->tag);
+ RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+ " data.tag=0x%llx (uid=%u) res=%d\n",
+ tag, data->tag, get_uid_from_tag(data->tag), result);
+ if (result < 0)
+ node = node->rb_left;
+ else if (result > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct tag_node *this = rb_entry(*new, struct tag_node,
+ node);
+ int result = tag_compare(data->tag, this->tag);
+ RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ this->tag,
+ get_uid_from_tag(this->tag));
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else if (result > 0)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+ struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+ tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+ tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+ struct tag_node *node = tag_node_tree_search(root, tag);
+ if (!node)
+ return NULL;
+ return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+ const struct sock *sk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct sock_tag *data = rb_entry(node, struct sock_tag,
+ sock_node);
+ if (sk < data->sk)
+ node = node->rb_left;
+ else if (sk > data->sk)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct sock_tag *this = rb_entry(*new, struct sock_tag,
+ sock_node);
+ parent = *new;
+ if (data->sk < this->sk)
+ new = &((*new)->rb_left);
+ else if (data->sk > this->sk)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->sock_node, parent, new);
+ rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+
+ node = rb_first(st_to_free_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ node = rb_next(node);
+ CT_DEBUG("qtaguid: %s(): "
+ "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+ st_entry->sk,
+ st_entry->tag,
+ get_uid_from_tag(st_entry->tag));
+ rb_erase(&st_entry->sock_node, st_to_free_tree);
+ sockfd_put(st_entry->socket);
+ kfree(st_entry);
+ }
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+ const pid_t pid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct proc_qtu_data *data = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ if (pid < data->pid)
+ node = node->rb_left;
+ else if (pid > data->pid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct proc_qtu_data *this = rb_entry(*new,
+ struct proc_qtu_data,
+ node);
+ parent = *new;
+ if (data->pid < this->pid)
+ new = &((*new)->rb_left);
+ else if (data->pid > this->pid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+ struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct uid_tag_data *this = rb_entry(*new,
+ struct uid_tag_data,
+ node);
+ parent = *new;
+ if (data->uid < this->uid)
+ new = &((*new)->rb_left);
+ else if (data->uid > this->uid)
+ new = &((*new)->rb_right);
+ else
+ BUG();
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+ uid_t uid)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct uid_tag_data *data = rb_entry(node,
+ struct uid_tag_data,
+ node);
+ if (uid < data->uid)
+ node = node->rb_left;
+ else if (uid > data->uid)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ * sets *found to true if not allocated.
+ * sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+ struct uid_tag_data *utd_entry;
+
+ /* Look for top level uid_tag_data for the UID */
+ utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+ DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+ if (found_res)
+ *found_res = utd_entry;
+ if (utd_entry)
+ return utd_entry;
+
+ utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+ if (!utd_entry) {
+ pr_err("qtaguid: get_uid_data(%u): "
+ "tag data alloc failed\n", uid);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ utd_entry->uid = uid;
+ utd_entry->tag_ref_tree = RB_ROOT;
+ uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+ DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+ return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+ struct uid_tag_data *utd_entry)
+{
+ struct tag_ref *tr_entry;
+ int res;
+
+ if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+ pr_info("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc quota exceeded. max=%d\n",
+ new_tag, max_sock_tags);
+ res = -EMFILE;
+ goto err_res;
+
+ }
+
+ tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+ if (!tr_entry) {
+ pr_err("qtaguid: new_tag_ref(0x%llx): "
+ "tag ref alloc failed\n",
+ new_tag);
+ res = -ENOMEM;
+ goto err_res;
+ }
+ tr_entry->tn.tag = new_tag;
+ /* tr_entry->num_sock_tags handled by caller */
+ utd_entry->num_active_tags++;
+ tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+ " inserted new tag ref %p\n",
+ new_tag, tr_entry);
+ return tr_entry;
+
+err_res:
+ return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+ bool found_utd;
+ uid_t uid = get_uid_from_tag(full_tag);
+
+ DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+ full_tag, uid);
+
+ utd_entry = get_uid_data(uid, &found_utd);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ if (utd_res)
+ *utd_res = utd_entry;
+ return NULL;
+ }
+
+ tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+ struct uid_tag_data **utd_res)
+{
+ struct uid_tag_data *utd_entry;
+ struct tag_ref *tr_entry;
+
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+ full_tag);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ if (!tr_entry)
+ tr_entry = new_tag_ref(full_tag, utd_entry);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ if (utd_res)
+ *utd_res = utd_entry;
+ DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+ full_tag, utd_entry, tr_entry);
+ return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+ /* Are we done with the UID tag data entry? */
+ if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+ !utd_entry->num_pqd) {
+ DR_DEBUG("qtaguid: %s(): "
+ "erase utd_entry=%p uid=%u "
+ "by pid=%u tgid=%u uid=%u\n", __func__,
+ utd_entry, utd_entry->uid,
+ current->pid, current->tgid, current_fsuid());
+ BUG_ON(utd_entry->num_active_tags);
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ } else {
+ DR_DEBUG("qtaguid: %s(): "
+ "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+ __func__, utd_entry, utd_entry->num_active_tags,
+ utd_entry->num_pqd);
+ BUG_ON(!(utd_entry->num_active_tags ||
+ utd_entry->num_pqd));
+ }
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+ struct uid_tag_data *utd_entry)
+{
+ DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+ tr_entry, tr_entry->tn.tag,
+ get_uid_from_tag(tr_entry->tn.tag));
+ if (!tr_entry->num_sock_tags) {
+ BUG_ON(!utd_entry->num_active_tags);
+ utd_entry->num_active_tags--;
+ rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+ DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+ kfree(tr_entry);
+ }
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+ struct rb_node *node;
+ struct tag_ref *tr_entry;
+ tag_t acct_tag;
+
+ DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+ full_tag, get_uid_from_tag(full_tag));
+ acct_tag = get_atag_from_tag(full_tag);
+ node = rb_first(&utd_entry->tag_ref_tree);
+ while (node) {
+ tr_entry = rb_entry(node, struct tag_ref, tn.node);
+ node = rb_next(node);
+ if (!acct_tag || tr_entry->tn.tag == full_tag)
+ free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+ }
+}
+
+static int read_proc_u64(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ uint64_t value;
+ char *p = page;
+ uint64_t *iface_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *iface_entry;
+ p += sprintf(p, "%llu\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int read_proc_bool(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ bool value;
+ char *p = page;
+ bool *bool_entry = data;
+
+ if (!data)
+ return 0;
+
+ value = *bool_entry;
+ p += sprintf(p, "%u\n", value);
+ len = (p - page) - off;
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+ return len;
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+ int active_set = 0;
+ struct tag_counter_set *tcs;
+
+ MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+ " (uid=%u)\n",
+ tag, get_uid_from_tag(tag));
+ /* For now we only handle UID tags for active sets */
+ tag = get_utag_from_tag(tag);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs)
+ active_set = tcs->active_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+ struct iface_stat *iface_entry;
+
+ /* Find the entry for tracking the specified tag within the interface */
+ if (ifname == NULL) {
+ pr_info("qtaguid: iface_stat: get() NULL device name\n");
+ return NULL;
+ }
+
+ /* Iterate over interfaces */
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (!strcmp(ifname, iface_entry->ifname))
+ goto done;
+ }
+ iface_entry = NULL;
+done:
+ return iface_entry;
+}
+
+static int iface_stat_fmt_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count,
+ int *eof, void *data)
+{
+ char *outp = page;
+ int item_index = 0;
+ int len;
+ int fmt = (int)data; /* The data is just 1 (old) or 2 (uses fmt) */
+ struct iface_stat *iface_entry;
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct rtnl_link_stats64 no_dev_stats = {0};
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ CT_DEBUG("qtaguid:proc iface_stat_fmt "
+ "pid=%u tgid=%u uid=%u "
+ "page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ if (fmt == 2 && item_index++ >= items_to_skip) {
+ len = snprintf(outp, char_count,
+ "ifname "
+ "total_skb_rx_bytes total_skb_rx_packets "
+ "total_skb_tx_bytes total_skb_tx_packets\n"
+ );
+ if (len >= char_count) {
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ /*
+ * This lock will prevent iface_stat_update() from changing active,
+ * and in turn prevent an interface from unregistering itself.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ if (item_index++ < items_to_skip)
+ continue;
+
+ if (iface_entry->active) {
+ stats = dev_get_stats(iface_entry->net_dev,
+ &dev_stats);
+ } else {
+ stats = &no_dev_stats;
+ }
+ /*
+ * If the meaning of the data changes, then update the fmtX
+ * string.
+ */
+ if (fmt == 1) {
+ len = snprintf(
+ outp, char_count,
+ "%s %d "
+ "%llu %llu %llu %llu "
+ "%llu %llu %llu %llu\n",
+ iface_entry->ifname,
+ iface_entry->active,
+ iface_entry->totals_via_dev[IFS_RX].bytes,
+ iface_entry->totals_via_dev[IFS_RX].packets,
+ iface_entry->totals_via_dev[IFS_TX].bytes,
+ iface_entry->totals_via_dev[IFS_TX].packets,
+ stats->rx_bytes, stats->rx_packets,
+ stats->tx_bytes, stats->tx_packets
+ );
+ } else {
+ len = snprintf(
+ outp, char_count,
+ "%s "
+ "%llu %llu %llu %llu\n",
+ iface_entry->ifname,
+ iface_entry->totals_via_skb[IFS_RX].bytes,
+ iface_entry->totals_via_skb[IFS_RX].packets,
+ iface_entry->totals_via_skb[IFS_TX].bytes,
+ iface_entry->totals_via_skb[IFS_TX].packets
+ );
+ }
+ if (len >= char_count) {
+ spin_unlock_bh(&iface_stat_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return outp - page;
+}
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+ struct proc_dir_entry *proc_entry;
+ struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+ iface_work);
+ struct iface_stat *new_iface = isw->iface_entry;
+
+ /* iface_entries are not deleted, so safe to manipulate. */
+ proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+ if (IS_ERR_OR_NULL(proc_entry)) {
+ pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+ kfree(isw);
+ return;
+ }
+
+ new_iface->proc_ptr = proc_entry;
+
+ create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_TX].bytes);
+ create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_RX].bytes);
+ create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_TX].packets);
+ create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry,
+ read_proc_u64,
+ &new_iface->totals_via_dev[IFS_RX].packets);
+ create_proc_read_entry("active", proc_iface_perms, proc_entry,
+ read_proc_bool, &new_iface->active);
+
+ IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+ "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+ kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+ struct net_device *net_dev,
+ bool activate)
+{
+ if (activate) {
+ entry->net_dev = net_dev;
+ entry->active = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "enable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+ } else {
+ entry->active = false;
+ entry->net_dev = NULL;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rfcnt=%d\n", __func__,
+ entry->ifname,
+ percpu_read(*net_dev->pcpu_refcnt));
+
+ }
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+ struct iface_stat *new_iface;
+ struct iface_stat_work *isw;
+
+ new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+ if (new_iface == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "iface_stat alloc failed\n", net_dev->name);
+ return NULL;
+ }
+ new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+ if (new_iface->ifname == NULL) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "ifname alloc failed\n", net_dev->name);
+ kfree(new_iface);
+ return NULL;
+ }
+ spin_lock_init(&new_iface->tag_stat_list_lock);
+ new_iface->tag_stat_tree = RB_ROOT;
+ _iface_stat_set_active(new_iface, net_dev, true);
+
+ /*
+ * ipv6 notifier chains are atomic :(
+ * No create_proc_read_entry() for you!
+ */
+ isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+ if (!isw) {
+ pr_err("qtaguid: iface_stat: create(%s): "
+ "work alloc failed\n", new_iface->ifname);
+ _iface_stat_set_active(new_iface, net_dev, false);
+ kfree(new_iface->ifname);
+ kfree(new_iface);
+ return NULL;
+ }
+ isw->iface_entry = new_iface;
+ INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+ schedule_work(&isw->iface_work);
+ list_add(&new_iface->list, &iface_stat_list);
+ return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+ struct iface_stat *iface)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ bool stats_rewound;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ /* No empty packets */
+ stats_rewound =
+ (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+ || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+ IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+ "bytes rx/tx=%llu/%llu "
+ "active=%d last_known=%d "
+ "stats_rewound=%d\n", __func__,
+ net_dev ? net_dev->name : "?",
+ iface, net_dev,
+ stats->rx_bytes, stats->tx_bytes,
+ iface->active, iface->last_known_valid, stats_rewound);
+
+ if (iface->active && iface->last_known_valid && stats_rewound) {
+ pr_warn_once("qtaguid: iface_stat: %s(%s): "
+ "iface reset its stats unexpectedly\n", __func__,
+ net_dev->name);
+
+ iface->totals_via_dev[IFS_TX].bytes +=
+ iface->last_known[IFS_TX].bytes;
+ iface->totals_via_dev[IFS_TX].packets +=
+ iface->last_known[IFS_TX].packets;
+ iface->totals_via_dev[IFS_RX].bytes +=
+ iface->last_known[IFS_RX].bytes;
+ iface->totals_via_dev[IFS_RX].packets +=
+ iface->last_known[IFS_RX].packets;
+ iface->last_known_valid = false;
+ IF_DEBUG("qtaguid: %s(%s): iface=%p "
+ "used last known bytes rx/tx=%llu/%llu\n", __func__,
+ iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+ iface->last_known[IFS_TX].bytes);
+ }
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+ struct in_ifaddr *ifa)
+{
+ struct in_device *in_dev = NULL;
+ const char *ifname;
+ struct iface_stat *entry;
+ __be32 ipaddr = 0;
+ struct iface_stat *new_iface;
+
+ IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+ net_dev ? net_dev->name : "?",
+ ifa, net_dev);
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create(): no net dev\n");
+ return;
+ }
+
+ ifname = net_dev->name;
+ if (!ifa) {
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+ IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+ ifname, in_dev);
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ifa=%p ifa_label=%s\n",
+ ifname, ifa,
+ ifa->ifa_label ? ifa->ifa_label : "(null)");
+ if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+ break;
+ }
+ }
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ ipaddr = ifa->ifa_local;
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !ipv4_is_loopback(ipaddr);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI4\n", __func__,
+ entry->ifname, activate, &ipaddr);
+ goto done_unlock_put;
+ } else if (ipv4_is_loopback(ipaddr)) {
+ IF_DEBUG("qtaguid: iface_stat: create(%s): "
+ "ignore loopback dev. ip=%pI4\n", ifname, &ipaddr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+ "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ if (in_dev)
+ in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+ struct inet6_ifaddr *ifa)
+{
+ struct in_device *in_dev;
+ const char *ifname;
+ struct iface_stat *entry;
+ struct iface_stat *new_iface;
+ int addr_type;
+
+ IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+ ifa, net_dev, net_dev ? net_dev->name : "");
+ if (!net_dev) {
+ pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+ return;
+ }
+ ifname = net_dev->name;
+
+ in_dev = in_dev_get(net_dev);
+ if (!in_dev) {
+ pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+ ifname);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+ ifname, in_dev);
+
+ if (!ifa) {
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+ ifname);
+ goto done_put;
+ }
+ addr_type = ipv6_addr_type(&ifa->addr);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(ifname);
+ if (entry != NULL) {
+ bool activate = !(addr_type & IPV6_ADDR_LOOPBACK);
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ ifname, entry);
+ iface_check_stats_reset_and_adjust(net_dev, entry);
+ _iface_stat_set_active(entry, net_dev, activate);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "tracking now %d on ip=%pI6c\n", __func__,
+ entry->ifname, activate, &ifa->addr);
+ goto done_unlock_put;
+ } else if (addr_type & IPV6_ADDR_LOOPBACK) {
+ IF_DEBUG("qtaguid: %s(%s): "
+ "ignore loopback dev. ip=%pI6c\n", __func__,
+ ifname, &ifa->addr);
+ goto done_unlock_put;
+ }
+
+ new_iface = iface_alloc(net_dev);
+ IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+ "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+ spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+ in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+ MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+ return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+ struct sock_tag *sock_tag_entry;
+ MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+ if (!sk)
+ return NULL;
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(sk);
+ spin_unlock_bh(&sock_tag_list_lock);
+ return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ int thoff, tproto;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ if (tproto < 0)
+ MT_DEBUG("%s(): transport header not found in ipv6"
+ " skb=%p\n", __func__, skb);
+ break;
+ case NFPROTO_IPV4:
+ tproto = ip_hdr(skb)->protocol;
+ break;
+ default:
+ tproto = IPPROTO_RAW;
+ }
+ return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+ break;
+ case IPPROTO_UDP:
+ dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+ break;
+ case IPPROTO_IP:
+ default:
+ dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+ 1);
+ break;
+ }
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+ struct rtnl_link_stats64 dev_stats, *stats;
+ struct iface_stat *entry;
+
+ stats = dev_get_stats(net_dev, &dev_stats);
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(net_dev->name);
+ if (entry == NULL) {
+ IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ net_dev->name, entry);
+ if (!entry->active) {
+ IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+ net_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ if (stash_only) {
+ entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+ entry->last_known[IFS_TX].packets = stats->tx_packets;
+ entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+ entry->last_known[IFS_RX].packets = stats->rx_packets;
+ entry->last_known_valid = true;
+ IF_DEBUG("qtaguid: %s(%s): "
+ "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+ entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+ entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+ entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+ entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+ /* We don't need the last_known[] anymore */
+ entry->last_known_valid = false;
+ _iface_stat_set_active(entry, net_dev, false);
+ IF_DEBUG("qtaguid: %s(%s): "
+ "disable tracking. rx/tx=%llu/%llu\n", __func__,
+ net_dev->name, stats->rx_bytes, stats->tx_bytes);
+ spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ struct iface_stat *entry;
+ const struct net_device *el_dev;
+ enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+ int bytes = skb->len;
+
+ if (!skb->dev) {
+ MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+ el_dev = par->in ? : par->out;
+ } else {
+ const struct net_device *other_dev;
+ el_dev = skb->dev;
+ other_dev = par->in ? : par->out;
+ if (el_dev != other_dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+ "par->(in/out)=%p %s\n",
+ par->hooknum, el_dev, el_dev->name, other_dev,
+ other_dev->name);
+ }
+ }
+
+ if (unlikely(!el_dev)) {
+ pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ } else if (unlikely(!el_dev->name)) {
+ pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ } else {
+ int proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto);
+ }
+
+ spin_lock_bh(&iface_stat_list_lock);
+ entry = get_iface_entry(el_dev->name);
+ if (entry == NULL) {
+ IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+ __func__, el_dev->name);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return;
+ }
+
+ IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ el_dev->name, entry);
+
+ entry->totals_via_skb[direction].bytes += bytes;
+ entry->totals_via_skb[direction].packets++;
+ spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+ enum ifs_tx_rx direction, int proto, int bytes)
+{
+ int active_set;
+ active_set = get_active_counter_set(tag_entry->tn.tag);
+ MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+ "dir=%d proto=%d bytes=%d)\n",
+ tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+ active_set, direction, proto, bytes);
+ data_counters_update(&tag_entry->counters, active_set, direction,
+ proto, bytes);
+ if (tag_entry->parent_counters)
+ data_counters_update(tag_entry->parent_counters, active_set,
+ direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+ tag_t tag)
+{
+ struct tag_stat *new_tag_stat_entry = NULL;
+ IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+ " (uid=%u)\n", __func__,
+ iface_entry, tag, get_uid_from_tag(tag));
+ new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+ if (!new_tag_stat_entry) {
+ pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+ goto done;
+ }
+ new_tag_stat_entry->tn.tag = tag;
+ tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+ return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+ const struct sock *sk, enum ifs_tx_rx direction,
+ int proto, int bytes)
+{
+ struct tag_stat *tag_stat_entry;
+ tag_t tag, acct_tag;
+ tag_t uid_tag;
+ struct data_counters *uid_tag_counters;
+ struct sock_tag *sock_tag_entry;
+ struct iface_stat *iface_entry;
+ struct tag_stat *new_tag_stat = NULL;
+ MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+ "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+ ifname, uid, sk, direction, proto, bytes);
+
+
+ iface_entry = get_iface_entry(ifname);
+ if (!iface_entry) {
+ pr_err("qtaguid: iface_stat: stat_update() %s not found\n",
+ ifname);
+ return;
+ }
+ /* It is ok to process data when an iface_entry is inactive */
+
+ MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+ ifname, iface_entry);
+
+ /*
+ * Look for a tagged sock.
+ * It will have an acct_uid.
+ */
+ sock_tag_entry = get_sock_stat(sk);
+ if (sock_tag_entry) {
+ tag = sock_tag_entry->tag;
+ acct_tag = get_atag_from_tag(tag);
+ uid_tag = get_utag_from_tag(tag);
+ } else {
+ acct_tag = make_atag_from_value(0);
+ tag = combine_atag_with_uid(acct_tag, uid);
+ uid_tag = make_tag_from_uid(uid);
+ }
+ MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+ " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+ tag, get_uid_from_tag(tag), iface_entry);
+ /* Loop over tag list under this interface for {acct_tag,uid_tag} */
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ tag);
+ if (tag_stat_entry) {
+ /*
+ * Updating the {acct_tag, uid_tag} entry handles both stats:
+ * {0, uid_tag} will also get updated.
+ */
+ tag_stat_update(tag_stat_entry, direction, proto, bytes);
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ return;
+ }
+
+ /* Loop over tag list under this interface for {0,uid_tag} */
+ tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+ uid_tag);
+ if (!tag_stat_entry) {
+ /* Here: the base uid_tag did not exist */
+ /*
+ * No parent counters. So
+ * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+ */
+ new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+ uid_tag_counters = &new_tag_stat->counters;
+ } else {
+ uid_tag_counters = &tag_stat_entry->counters;
+ }
+
+ if (acct_tag) {
+ /* Create the child {acct_tag, uid_tag} and hook up parent. */
+ new_tag_stat = create_if_tag_stat(iface_entry, tag);
+ new_tag_stat->parent_counters = uid_tag_counters;
+ } else {
+ /*
+ * For new_tag_stat to be still NULL here would require:
+ * {0, uid_tag} exists
+ * and {acct_tag, uid_tag} doesn't exist
+ * AND acct_tag == 0.
+ * Impossible. This reassures us that new_tag_stat
+ * below will always be assigned.
+ */
+ BUG_ON(!new_tag_stat);
+ }
+ tag_stat_update(new_tag_stat, direction, proto, bytes);
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr) {
+ struct net_device *dev = ptr;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+ "ev=0x%lx/%s netdev=%p->name=%s\n",
+ event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+ switch (event) {
+ case NETDEV_UP:
+ iface_stat_create(dev, NULL);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_create_ipv6(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->idev);
+ dev = (struct net_device *)ifa->idev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *dev;
+
+ if (unlikely(module_passive))
+ return NOTIFY_DONE;
+
+ IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+ "ev=0x%lx/%s ifa=%p\n",
+ event, netdev_evt_str(event), ifa);
+
+ switch (event) {
+ case NETDEV_UP:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_create(dev, ifa);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ case NETDEV_DOWN:
+ case NETDEV_UNREGISTER:
+ BUG_ON(!ifa || !ifa->ifa_dev);
+ dev = ifa->ifa_dev->dev;
+ iface_stat_update(dev, event == NETDEV_DOWN);
+ atomic64_inc(&qtu_events.iface_events);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+ .notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+ .notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+ .notifier_call = iface_inet6addr_event_handler,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+ int err;
+
+ iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+ if (!iface_stat_procdir) {
+ pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+ err = -1;
+ goto err;
+ }
+
+ iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename,
+ proc_iface_perms,
+ parent_procdir);
+ if (!iface_stat_all_procfile) {
+ pr_err("qtaguid: iface_stat: init "
+ " failed to create stat_old proc entry\n");
+ err = -1;
+ goto err_zap_entry;
+ }
+ iface_stat_all_procfile->read_proc = iface_stat_fmt_proc_read;
+ iface_stat_all_procfile->data = (void *)1; /* fmt1 */
+
+ iface_stat_fmt_procfile = create_proc_entry(iface_stat_fmt_procfilename,
+ proc_iface_perms,
+ parent_procdir);
+ if (!iface_stat_fmt_procfile) {
+ pr_err("qtaguid: iface_stat: init "
+ " failed to create stat_all proc entry\n");
+ err = -1;
+ goto err_zap_all_stats_entry;
+ }
+ iface_stat_fmt_procfile->read_proc = iface_stat_fmt_proc_read;
+ iface_stat_fmt_procfile->data = (void *)2; /* fmt2 */
+
+
+ err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register dev event handler\n");
+ goto err_zap_all_stats_entries;
+ }
+ err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv4 dev event handler\n");
+ goto err_unreg_nd;
+ }
+
+ err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+ if (err) {
+ pr_err("qtaguid: iface_stat: init "
+ "failed to register ipv6 dev event handler\n");
+ goto err_unreg_ip4_addr;
+ }
+ return 0;
+
+err_unreg_ip4_addr:
+ unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+ unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+ remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+ remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+ remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+ return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ struct sock *sk;
+ unsigned int hook_mask = (1 << par->hooknum);
+
+ MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+ par->hooknum, par->family);
+
+ /*
+ * Let's not abuse the the xt_socket_get*_sk(), or else it will
+ * return garbage SKs.
+ */
+ if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+ return NULL;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ sk = xt_socket_get6_sk(skb, par);
+ break;
+ case NFPROTO_IPV4:
+ sk = xt_socket_get4_sk(skb, par);
+ break;
+ default:
+ return NULL;
+ }
+
+ /*
+ * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs.
+ * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959
+ * Not fixed in 3.0-r3 :(
+ */
+ if (sk) {
+ MT_DEBUG("qtaguid: %p->sk_proto=%u "
+ "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+ if (sk->sk_state == TCP_TIME_WAIT) {
+ xt_socket_put_sk(sk);
+ sk = NULL;
+ }
+ }
+ return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+ const struct sock *alternate_sk, uid_t uid,
+ struct xt_action_param *par)
+{
+ const struct net_device *el_dev;
+
+ if (!skb->dev) {
+ MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+ el_dev = par->in ? : par->out;
+ } else {
+ const struct net_device *other_dev;
+ el_dev = skb->dev;
+ other_dev = par->in ? : par->out;
+ if (el_dev != other_dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+ "par->(in/out)=%p %s\n",
+ par->hooknum, el_dev, el_dev->name, other_dev,
+ other_dev->name);
+ }
+ }
+
+ if (unlikely(!el_dev)) {
+ pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+ } else if (unlikely(!el_dev->name)) {
+ pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+ } else {
+ int proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto);
+
+ if_tag_stat_update(el_dev->name, uid,
+ skb->sk ? skb->sk : alternate_sk,
+ par->in ? IFS_RX : IFS_TX,
+ proto, skb->len);
+ }
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_qtaguid_match_info *info = par->matchinfo;
+ const struct file *filp;
+ bool got_sock = false;
+ struct sock *sk;
+ uid_t sock_uid;
+ bool res;
+
+ if (unlikely(module_passive))
+ return (info->match ^ info->invert) == 0;
+
+ MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+ par->hooknum, skb, par->in, par->out, par->family);
+
+ atomic64_inc(&qtu_events.match_calls);
+ if (skb == NULL) {
+ res = (info->match ^ info->invert) == 0;
+ goto ret_res;
+ }
+
+ switch (par->hooknum) {
+ case NF_INET_PRE_ROUTING:
+ case NF_INET_POST_ROUTING:
+ atomic64_inc(&qtu_events.match_calls_prepost);
+ iface_stat_update_from_skb(skb, par);
+ /*
+ * We are done in pre/post. The skb will get processed
+ * further alter.
+ */
+ res = (info->match ^ info->invert);
+ goto ret_res;
+ break;
+ /* default: Fall through and do UID releated work */
+ }
+
+ sk = skb->sk;
+ if (sk == NULL) {
+ /*
+ * A missing sk->sk_socket happens when packets are in-flight
+ * and the matching socket is already closed and gone.
+ */
+ sk = qtaguid_find_sk(skb, par);
+ /*
+ * If we got the socket from the find_sk(), we will need to put
+ * it back, as nf_tproxy_get_sock_v4() got it.
+ */
+ got_sock = sk;
+ if (sk)
+ atomic64_inc(&qtu_events.match_found_sk_in_ct);
+ else
+ atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+ } else {
+ atomic64_inc(&qtu_events.match_found_sk);
+ }
+ MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+ par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+ if (sk != NULL) {
+ MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+ par->hooknum, filp ? filp->f_cred->fsuid : -1);
+ }
+
+ if (sk == NULL || sk->sk_socket == NULL) {
+ /*
+ * Here, the qtaguid_find_sk() using connection tracking
+ * couldn't find the owner, so for now we just count them
+ * against the system.
+ */
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not
+ * requested.
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, 0, par);
+ MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+ par->hooknum,
+ sk ? sk->sk_socket : NULL);
+ res = (info->match ^ info->invert) == 0;
+ atomic64_inc(&qtu_events.match_no_sk);
+ goto put_sock_ret_res;
+ } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+ res = false;
+ goto put_sock_ret_res;
+ }
+ filp = sk->sk_socket->file;
+ if (filp == NULL) {
+ MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+ account_for_uid(skb, sk, 0, par);
+ res = ((info->match ^ info->invert) &
+ (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+ atomic64_inc(&qtu_events.match_no_sk_file);
+ goto put_sock_ret_res;
+ }
+ sock_uid = filp->f_cred->fsuid;
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do iface stats when the uid-owner is not requested
+ */
+ if (!(info->match & XT_QTAGUID_UID))
+ account_for_uid(skb, sk, sock_uid, par);
+
+ /*
+ * The following two tests fail the match when:
+ * id not in range AND no inverted condition requested
+ * or id in range AND inverted condition requested
+ * Thus (!a && b) || (a && !b) == a ^ b
+ */
+ if (info->match & XT_QTAGUID_UID)
+ if ((filp->f_cred->fsuid >= info->uid_min &&
+ filp->f_cred->fsuid <= info->uid_max) ^
+ !(info->invert & XT_QTAGUID_UID)) {
+ MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+ if (info->match & XT_QTAGUID_GID)
+ if ((filp->f_cred->fsgid >= info->gid_min &&
+ filp->f_cred->fsgid <= info->gid_max) ^
+ !(info->invert & XT_QTAGUID_GID)) {
+ MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+ par->hooknum);
+ res = false;
+ goto put_sock_ret_res;
+ }
+
+ MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+ res = true;
+
+put_sock_ret_res:
+ if (got_sock)
+ xt_socket_put_sk(sk);
+ret_res:
+ MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+ return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+ va_list args;
+ char *fmt_buff;
+ char *buff;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ fmt_buff = kasprintf(GFP_ATOMIC,
+ "qtaguid: %s(): %s {\n", __func__, fmt);
+ BUG_ON(!fmt_buff);
+ va_start(args, fmt);
+ buff = kvasprintf(GFP_ATOMIC,
+ fmt_buff, args);
+ BUG_ON(!buff);
+ pr_debug("%s", buff);
+ kfree(fmt_buff);
+ kfree(buff);
+ va_end(args);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+ prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ spin_lock_bh(&iface_stat_list_lock);
+ prdebug_iface_stat_list(indent_level, &iface_stat_list);
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ char *outp = page;
+ int len;
+ uid_t uid;
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ int item_index = 0;
+ int indent_level = 0;
+ long f_count;
+
+ if (unlikely(module_passive)) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (*eof)
+ return 0;
+
+ CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u "
+ "page=%p off=%ld char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, items_to_skip, char_count, *eof);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ for (node = rb_first(&sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ if (item_index++ < items_to_skip)
+ continue;
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ uid = get_uid_from_tag(sock_tag_entry->tag);
+ CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+ "pid=%u\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag,
+ uid,
+ sock_tag_entry->pid
+ );
+ f_count = atomic_long_read(
+ &sock_tag_entry->socket->file->f_count);
+ len = snprintf(outp, char_count,
+ "sock=%p tag=0x%llx (uid=%u) pid=%u "
+ "f_count=%lu\n",
+ sock_tag_entry->sk,
+ sock_tag_entry->tag, uid,
+ sock_tag_entry->pid, f_count);
+ if (len >= char_count) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ if (item_index++ >= items_to_skip) {
+ len = snprintf(outp, char_count,
+ "events: sockets_tagged=%llu "
+ "sockets_untagged=%llu "
+ "counter_set_changes=%llu "
+ "delete_cmds=%llu "
+ "iface_events=%llu "
+ "match_calls=%llu "
+ "match_calls_prepost=%llu "
+ "match_found_sk=%llu "
+ "match_found_sk_in_ct=%llu "
+ "match_found_no_sk_in_ct=%llu "
+ "match_no_sk=%llu "
+ "match_no_sk_file=%llu\n",
+ atomic64_read(&qtu_events.sockets_tagged),
+ atomic64_read(&qtu_events.sockets_untagged),
+ atomic64_read(&qtu_events.counter_set_changes),
+ atomic64_read(&qtu_events.delete_cmds),
+ atomic64_read(&qtu_events.iface_events),
+ atomic64_read(&qtu_events.match_calls),
+ atomic64_read(&qtu_events.match_calls_prepost),
+ atomic64_read(&qtu_events.match_found_sk),
+ atomic64_read(&qtu_events.match_found_sk_in_ct),
+ atomic64_read(
+ &qtu_events.match_found_no_sk_in_ct),
+ atomic64_read(&qtu_events.match_no_sk),
+ atomic64_read(&qtu_events.match_no_sk_file));
+ if (len >= char_count) {
+ *outp = '\0';
+ return outp - page;
+ }
+ outp += len;
+ char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ /* Count the following as part of the last item_index */
+ if (item_index > items_to_skip) {
+ prdebug_full_state(indent_level, "proc ctrl");
+ }
+
+ *eof = 1;
+ return outp - page;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+ char cmd;
+ uid_t uid;
+ uid_t entry_uid;
+ tag_t acct_tag;
+ tag_t tag;
+ int res, argc;
+ struct iface_stat *iface_entry;
+ struct rb_node *node;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct tag_stat *ts_entry;
+ struct tag_counter_set *tcs_entry;
+ struct tag_ref *tr_entry;
+ struct uid_tag_data *utd_entry;
+
+ argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+ "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (argc < 3) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_delete(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = combine_atag_with_uid(acct_tag, uid);
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "looking for tag=0x%llx (uid=%u)\n",
+ input, tag, uid);
+
+ /* Delete socket tags */
+ spin_lock_bh(&sock_tag_list_lock);
+ node = rb_first(&sock_tag_tree);
+ while (node) {
+ st_entry = rb_entry(node, struct sock_tag, sock_node);
+ entry_uid = get_uid_from_tag(st_entry->tag);
+ node = rb_next(node);
+ if (entry_uid != uid)
+ continue;
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+ input, st_entry->tag, entry_uid);
+
+ if (!acct_tag || st_entry->tag == tag) {
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+ tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+ BUG_ON(tr_entry->num_sock_tags <= 0);
+ tr_entry->num_sock_tags--;
+ /*
+ * TODO: remove if, and start failing.
+ * This is a hack to work around the fact that in some
+ * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+ * and are trying to work around apps
+ * that didn't open the /dev/xt_qtaguid.
+ */
+ if (st_entry->list.next && st_entry->list.prev)
+ list_del(&st_entry->list);
+ }
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ /* Delete tag counter-sets */
+ spin_lock_bh(&tag_counter_set_list_lock);
+ /* Counter sets are only on the uid tag, not full tag */
+ tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (tcs_entry) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+ input,
+ tcs_entry->tn.tag,
+ get_uid_from_tag(tcs_entry->tn.tag),
+ tcs_entry->active_set);
+ rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+ kfree(tcs_entry);
+ }
+ spin_unlock_bh(&tag_counter_set_list_lock);
+
+ /*
+ * If acct_tag is 0, then all entries belonging to uid are
+ * erased.
+ */
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(iface_entry, &iface_stat_list, list) {
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ node = rb_first(&iface_entry->tag_stat_tree);
+ while (node) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "ts tag=0x%llx (uid=%u)\n",
+ input, ts_entry->tn.tag, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ if (!acct_tag || ts_entry->tn.tag == tag) {
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "erase ts: %s 0x%llx %u\n",
+ input, iface_entry->ifname,
+ get_atag_from_tag(ts_entry->tn.tag),
+ entry_uid);
+ rb_erase(&ts_entry->tn.node,
+ &iface_entry->tag_stat_tree);
+ kfree(ts_entry);
+ }
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ /* Cleanup the uid_tag_data */
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ node = rb_first(&uid_tag_data_tree);
+ while (node) {
+ utd_entry = rb_entry(node, struct uid_tag_data, node);
+ entry_uid = utd_entry->uid;
+ node = rb_next(node);
+
+ CT_DEBUG("qtaguid: ctrl_delete(%s): "
+ "utd uid=%u\n",
+ input, entry_uid);
+
+ if (entry_uid != uid)
+ continue;
+ /*
+ * Go over the tag_refs, and those that don't have
+ * sock_tags using them are freed.
+ */
+ put_tag_ref_tree(tag, utd_entry);
+ put_utd_entry(utd_entry);
+ }
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ atomic64_inc(&qtu_events.delete_cmds);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+ char cmd;
+ uid_t uid = 0;
+ tag_t tag;
+ int res, argc;
+ struct tag_counter_set *tcs;
+ int counter_set;
+
+ argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+ "set=%d uid=%u\n", input, argc, cmd,
+ counter_set, uid);
+ if (argc != 3) {
+ res = -EINVAL;
+ goto err;
+ }
+ if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+ pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+ input);
+ res = -EINVAL;
+ goto err;
+ }
+ if (!can_manipulate_uids()) {
+ pr_info("qtaguid: ctrl_counterset(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err;
+ }
+
+ tag = make_tag_from_uid(uid);
+ spin_lock_bh(&tag_counter_set_list_lock);
+ tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+ if (!tcs) {
+ tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+ if (!tcs) {
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ pr_err("qtaguid: ctrl_counterset(%s): "
+ "failed to alloc counter set\n",
+ input);
+ res = -ENOMEM;
+ goto err;
+ }
+ tcs->tn.tag = tag;
+ tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+ CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+ "(uid=%u) set=%d\n",
+ input, tag, get_uid_from_tag(tag), counter_set);
+ }
+ tcs->active_set = counter_set;
+ spin_unlock_bh(&tag_counter_set_list_lock);
+ atomic64_inc(&qtu_events.counter_set_changes);
+ res = 0;
+
+err:
+ return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ uid_t uid = 0;
+ tag_t acct_tag = make_atag_from_value(0);
+ tag_t full_tag;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *uid_tag_data_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ /* Unassigned args will get defaulted later. */
+ argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+ CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+ "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+ acct_tag, uid);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+ " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+ input, sock_fd, res, current->pid, current->tgid,
+ current_fsuid());
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ if (argc < 3) {
+ acct_tag = make_atag_from_value(0);
+ } else if (!valid_atag(acct_tag)) {
+ pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+ res = -EINVAL;
+ goto err_put;
+ }
+ CT_DEBUG("qtaguid: ctrl_tag(%s): "
+ "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+ "in_group=%d in_egroup=%d\n",
+ input, current->pid, current->tgid, current_uid(),
+ current_euid(), current_fsuid(),
+ in_group_p(proc_ctrl_write_gid),
+ in_egroup_p(proc_ctrl_write_gid));
+ if (argc < 4) {
+ uid = current_fsuid();
+ } else if (!can_impersonate_uid(uid)) {
+ pr_info("qtaguid: ctrl_tag(%s): "
+ "insufficient priv from pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+ res = -EPERM;
+ goto err_put;
+ }
+ full_tag = combine_atag_with_uid(acct_tag, uid);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+ if (IS_ERR(tag_ref_entry)) {
+ res = PTR_ERR(tag_ref_entry);
+ spin_unlock_bh(&sock_tag_list_lock);
+ goto err_put;
+ }
+ tag_ref_entry->num_sock_tags++;
+ if (sock_tag_entry) {
+ struct tag_ref *prev_tag_ref_entry;
+
+ CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+ "st@%p ...->f_count=%ld\n",
+ input, el_socket->sk, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ /*
+ * This is a re-tagging, so release the sock_fd that was
+ * locked at the time of the 1st tagging.
+ * There is still the ref from this call's sockfd_lookup() so
+ * it can be done within the spinlock.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+ &uid_tag_data_entry);
+ BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+ BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+ prev_tag_ref_entry->num_sock_tags--;
+ sock_tag_entry->tag = full_tag;
+ } else {
+ CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+ input, el_socket->sk);
+ sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+ GFP_ATOMIC);
+ if (!sock_tag_entry) {
+ pr_err("qtaguid: ctrl_tag(%s): "
+ "socket tag alloc failed\n",
+ input);
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -ENOMEM;
+ goto err_tag_unref_put;
+ }
+ sock_tag_entry->sk = el_socket->sk;
+ sock_tag_entry->socket = el_socket;
+ sock_tag_entry->pid = current->tgid;
+ sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+ uid);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once(
+ "qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid,
+ current_fsuid());
+ else
+ list_add(&sock_tag_entry->list,
+ &pqd_entry->sock_tag_list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+
+ sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+ atomic64_inc(&qtu_events.sockets_tagged);
+ }
+ spin_unlock_bh(&sock_tag_list_lock);
+ /* We keep the ref to the socket (file) until it is untagged */
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count));
+ return 0;
+
+err_tag_unref_put:
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ tag_ref_entry->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+ return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+ char cmd;
+ int sock_fd = 0;
+ struct socket *el_socket;
+ int res, argc;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+
+ argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+ input, argc, cmd, sock_fd);
+ if (argc < 2) {
+ res = -EINVAL;
+ goto err;
+ }
+ el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
+ if (!el_socket) {
+ pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+ " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+ input, sock_fd, res, current->pid, current->tgid,
+ current_fsuid());
+ goto err;
+ }
+ CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+ input, atomic_long_read(&el_socket->file->f_count),
+ el_socket->sk);
+ spin_lock_bh(&sock_tag_list_lock);
+ sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+ if (!sock_tag_entry) {
+ spin_unlock_bh(&sock_tag_list_lock);
+ res = -EINVAL;
+ goto err_put;
+ }
+ /*
+ * The socket already belongs to the current process
+ * so it can do whatever it wants to it.
+ */
+ rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+ tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+ BUG_ON(!tag_ref_entry);
+ BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+ pqd_entry = proc_qtu_data_tree_search(
+ &proc_qtu_data_tree, current->tgid);
+ /*
+ * TODO: remove if, and start failing.
+ * At first, we want to catch user-space code that is not
+ * opening the /dev/xt_qtaguid.
+ */
+ if (IS_ERR_OR_NULL(pqd_entry))
+ pr_warn_once("qtaguid: %s(): "
+ "User space forgot to open /dev/xt_qtaguid? "
+ "pid=%u tgid=%u uid=%u\n", __func__,
+ current->pid, current->tgid, current_fsuid());
+ else
+ list_del(&sock_tag_entry->list);
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ /*
+ * We don't free tag_ref from the utd_entry here,
+ * only during a cmd_delete().
+ */
+ tag_ref_entry->num_sock_tags--;
+ spin_unlock_bh(&sock_tag_list_lock);
+ /*
+ * Release the sock_fd that was grabbed at tag time,
+ * and once more for the sockfd_lookup() here.
+ */
+ sockfd_put(sock_tag_entry->socket);
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+ input, sock_tag_entry,
+ atomic_long_read(&el_socket->file->f_count) - 1);
+ sockfd_put(el_socket);
+
+ kfree(sock_tag_entry);
+ atomic64_inc(&qtu_events.sockets_untagged);
+
+ return 0;
+
+err_put:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+ input, atomic_long_read(&el_socket->file->f_count) - 1);
+ /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+ sockfd_put(el_socket);
+ return res;
+
+err:
+ CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+ return res;
+}
+
+static int qtaguid_ctrl_parse(const char *input, int count)
+{
+ char cmd;
+ int res;
+
+ CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+ input, current->pid, current->tgid, current_fsuid());
+
+ cmd = input[0];
+ /* Collect params for commands */
+ switch (cmd) {
+ case 'd':
+ res = ctrl_cmd_delete(input);
+ break;
+
+ case 's':
+ res = ctrl_cmd_counter_set(input);
+ break;
+
+ case 't':
+ res = ctrl_cmd_tag(input);
+ break;
+
+ case 'u':
+ res = ctrl_cmd_untag(input);
+ break;
+
+ default:
+ res = -EINVAL;
+ goto err;
+ }
+ if (!res)
+ res = count;
+err:
+ CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res);
+ return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+ if (unlikely(module_passive))
+ return count;
+
+ if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+ return -EINVAL;
+
+ if (copy_from_user(input_buf, buffer, count))
+ return -EFAULT;
+
+ input_buf[count] = '\0';
+ return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+ char *outp;
+ char **num_items_returned;
+ struct iface_stat *iface_entry;
+ struct tag_stat *ts_entry;
+ int item_index;
+ int items_to_skip;
+ int char_count;
+};
+
+static int pp_stats_line(struct proc_print_info *ppi, int cnt_set)
+{
+ int len;
+ struct data_counters *cnts;
+
+ if (!ppi->item_index) {
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ len = snprintf(ppi->outp, ppi->char_count,
+ "idx iface acct_tag_hex uid_tag_int cnt_set "
+ "rx_bytes rx_packets "
+ "tx_bytes tx_packets "
+ "rx_tcp_bytes rx_tcp_packets "
+ "rx_udp_bytes rx_udp_packets "
+ "rx_other_bytes rx_other_packets "
+ "tx_tcp_bytes tx_tcp_packets "
+ "tx_udp_bytes tx_udp_packets "
+ "tx_other_bytes tx_other_packets\n");
+ } else {
+ tag_t tag = ppi->ts_entry->tn.tag;
+ uid_t stat_uid = get_uid_from_tag(tag);
+ /* Detailed tags are not available to everybody */
+ if (get_atag_from_tag(tag)
+ && !can_read_other_uid_stats(stat_uid)) {
+ CT_DEBUG("qtaguid: stats line: "
+ "%s 0x%llx %u: insufficient priv "
+ "from pid=%u tgid=%u uid=%u\n",
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag), stat_uid,
+ current->pid, current->tgid, current_fsuid());
+ return 0;
+ }
+ if (ppi->item_index++ < ppi->items_to_skip)
+ return 0;
+ cnts = &ppi->ts_entry->counters;
+ len = snprintf(
+ ppi->outp, ppi->char_count,
+ "%d %s 0x%llx %u %u "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu "
+ "%llu %llu\n",
+ ppi->item_index,
+ ppi->iface_entry->ifname,
+ get_atag_from_tag(tag),
+ stat_uid,
+ cnt_set,
+ dc_sum_bytes(cnts, cnt_set, IFS_RX),
+ dc_sum_packets(cnts, cnt_set, IFS_RX),
+ dc_sum_bytes(cnts, cnt_set, IFS_TX),
+ dc_sum_packets(cnts, cnt_set, IFS_TX),
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+ cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+ }
+ return len;
+}
+
+static bool pp_sets(struct proc_print_info *ppi)
+{
+ int len;
+ int counter_set;
+ for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+ counter_set++) {
+ len = pp_stats_line(ppi, counter_set);
+ if (len >= ppi->char_count) {
+ *ppi->outp = '\0';
+ return false;
+ }
+ if (len) {
+ ppi->outp += len;
+ ppi->char_count -= len;
+ (*ppi->num_items_returned)++;
+ }
+ }
+ return true;
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_read(char *page, char **num_items_returned,
+ off_t items_to_skip, int char_count, int *eof,
+ void *data)
+{
+ struct proc_print_info ppi;
+ int len;
+
+ ppi.outp = page;
+ ppi.item_index = 0;
+ ppi.char_count = char_count;
+ ppi.num_items_returned = num_items_returned;
+ ppi.items_to_skip = items_to_skip;
+
+ if (unlikely(module_passive)) {
+ len = pp_stats_line(&ppi, 0);
+ /* The header should always be shorter than the buffer. */
+ BUG_ON(len >= ppi.char_count);
+ (*num_items_returned)++;
+ *eof = 1;
+ return len;
+ }
+
+ CT_DEBUG("qtaguid:proc stats pid=%u tgid=%u uid=%u "
+ "page=%p *num_items_returned=%p off=%ld "
+ "char_count=%d *eof=%d\n",
+ current->pid, current->tgid, current_fsuid(),
+ page, *num_items_returned,
+ items_to_skip, char_count, *eof);
+
+ if (*eof)
+ return 0;
+
+ /* The idx is there to help debug when things go belly up. */
+ len = pp_stats_line(&ppi, 0);
+ /* Don't advance the outp unless the whole line was printed */
+ if (len >= ppi.char_count) {
+ *ppi.outp = '\0';
+ return ppi.outp - page;
+ }
+ if (len) {
+ ppi.outp += len;
+ ppi.char_count -= len;
+ (*num_items_returned)++;
+ }
+
+ spin_lock_bh(&iface_stat_list_lock);
+ list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) {
+ struct rb_node *node;
+ spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ for (node = rb_first(&ppi.iface_entry->tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ if (!pp_sets(&ppi)) {
+ spin_unlock_bh(
+ &ppi.iface_entry->tag_stat_list_lock);
+ spin_unlock_bh(&iface_stat_list_lock);
+ return ppi.outp - page;
+ }
+ }
+ spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock);
+ }
+ spin_unlock_bh(&iface_stat_list_lock);
+
+ *eof = 1;
+ return ppi.outp - page;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+ struct proc_qtu_data *new_pqd_entry;
+ int res;
+ bool utd_entry_found;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+ current->pid, current->tgid, current_fsuid());
+
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ /* Look for existing uid data, or alloc one. */
+ utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
+ if (IS_ERR_OR_NULL(utd_entry)) {
+ res = PTR_ERR(utd_entry);
+ goto err_unlock;
+ }
+
+ /* Look for existing PID based proc_data */
+ pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+ current->tgid);
+ if (pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u "
+ "%s already opened\n",
+ current->pid, current->tgid, current_fsuid(),
+ QTU_DEV_NAME);
+ res = -EBUSY;
+ goto err_unlock_free_utd;
+ }
+
+ new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+ if (!new_pqd_entry) {
+ pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+ "proc data alloc failed\n",
+ current->pid, current->tgid, current_fsuid());
+ res = -ENOMEM;
+ goto err_unlock_free_utd;
+ }
+ new_pqd_entry->pid = current->tgid;
+ INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+ new_pqd_entry->parent_tag_data = utd_entry;
+ utd_entry->num_pqd++;
+
+ proc_qtu_data_tree_insert(new_pqd_entry,
+ &proc_qtu_data_tree);
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+ current_fsuid(), new_pqd_entry);
+ file->private_data = new_pqd_entry;
+ return 0;
+
+err_unlock_free_utd:
+ if (!utd_entry_found) {
+ rb_erase(&utd_entry->node, &uid_tag_data_tree);
+ kfree(utd_entry);
+ }
+err_unlock:
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+ struct proc_qtu_data *pqd_entry = file->private_data;
+ struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data;
+ struct sock_tag *st_entry;
+ struct rb_root st_to_free_tree = RB_ROOT;
+ struct list_head *entry, *next;
+ struct tag_ref *tr;
+
+ if (unlikely(qtu_proc_handling_passive))
+ return 0;
+
+ /*
+ * Do not trust the current->pid, it might just be a kworker cleaning
+ * up after a dead proc.
+ */
+ DR_DEBUG("qtaguid: qtudev_release(): "
+ "pid=%u tgid=%u uid=%u "
+ "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+ current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+ pqd_entry, pqd_entry->pid, utd_entry,
+ utd_entry->num_active_tags);
+
+ spin_lock_bh(&sock_tag_list_lock);
+ spin_lock_bh(&uid_tag_data_tree_lock);
+
+ list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+ st_entry = list_entry(entry, struct sock_tag, list);
+ DR_DEBUG("qtaguid: %s(): "
+ "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+ __func__,
+ st_entry, st_entry->sk,
+ current->pid, current->tgid,
+ pqd_entry->parent_tag_data->uid);
+
+ utd_entry = uid_tag_data_tree_search(
+ &uid_tag_data_tree,
+ get_uid_from_tag(st_entry->tag));
+ BUG_ON(IS_ERR_OR_NULL(utd_entry));
+ DR_DEBUG("qtaguid: %s(): "
+ "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+ st_entry->tag, utd_entry);
+ tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+ st_entry->tag);
+ BUG_ON(!tr);
+ BUG_ON(tr->num_sock_tags <= 0);
+ tr->num_sock_tags--;
+ free_tag_ref_from_utd_entry(tr, utd_entry);
+
+ rb_erase(&st_entry->sock_node, &sock_tag_tree);
+ list_del(&st_entry->list);
+ /* Can't sockfd_put() within spinlock, do it later. */
+ sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+ /*
+ * Try to free the utd_entry if no other proc_qtu_data is
+ * using it (num_pqd is 0) and it doesn't have active tags
+ * (num_active_tags is 0).
+ */
+ put_utd_entry(utd_entry);
+ }
+
+ rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+ BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+ pqd_entry->parent_tag_data->num_pqd--;
+ put_utd_entry(pqd_entry->parent_tag_data);
+ kfree(pqd_entry);
+ file->private_data = NULL;
+
+ spin_unlock_bh(&uid_tag_data_tree_lock);
+ spin_unlock_bh(&sock_tag_list_lock);
+
+
+ sock_tag_tree_erase(&st_to_free_tree);
+
+ prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+ current->pid, current->tgid);
+ return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+ .owner = THIS_MODULE,
+ .open = qtudev_open,
+ .release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = QTU_DEV_NAME,
+ .fops = &qtudev_fops,
+ /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+ int ret;
+ *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+ if (!*res_procdir) {
+ pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+ ret = -ENOMEM;
+ goto no_dir;
+ }
+
+ xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms,
+ *res_procdir);
+ if (!xt_qtaguid_ctrl_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+ " file\n");
+ ret = -ENOMEM;
+ goto no_ctrl_entry;
+ }
+ xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read;
+ xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write;
+
+ xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms,
+ *res_procdir);
+ if (!xt_qtaguid_stats_file) {
+ pr_err("qtaguid: failed to create xt_qtaguid/stats "
+ "file\n");
+ ret = -ENOMEM;
+ goto no_stats_entry;
+ }
+ xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read;
+ /*
+ * TODO: add support counter hacking
+ * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+ */
+ return 0;
+
+no_stats_entry:
+ remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+ remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+ return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+ /*
+ * This module masquerades as the "owner" module so that iptables
+ * tools can deal with it.
+ */
+ .name = "owner",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .match = qtaguid_mt,
+ .matchsize = sizeof(struct xt_qtaguid_match_info),
+ .me = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+ if (qtaguid_proc_register(&xt_qtaguid_procdir)
+ || iface_stat_init(xt_qtaguid_procdir)
+ || xt_register_match(&qtaguid_mt_reg)
+ || misc_register(&qtu_device))
+ return -1;
+ return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 00000000000000..d79f8383abf4b1
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,333 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do { \
+ if (unlikely(qtaguid_debug_mask & (mask))) \
+ pr_debug(__VA_ARGS__); \
+ } while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a: {acct_tag=1, uid_tag=10003}
+ * b: {acct_tag=2, uid_tag=10003}
+ * c: {acct_tag=3, uid_tag=10003}
+ * d: {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t; /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+ return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+ return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+ return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+ return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+ return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+ return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+ return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+ IFS_TX,
+ IFS_RX,
+ IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+ IFS_TCP,
+ IFS_UDP,
+ IFS_PROTO_OTHER,
+ IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+ uint64_t bytes;
+ uint64_t packets;
+};
+
+struct data_counters {
+ struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+ struct rb_node node;
+ tag_t tag;
+};
+
+struct tag_stat {
+ struct tag_node tn;
+ struct data_counters counters;
+ /*
+ * If this tag is acct_tag based, we need to count against the
+ * matching parent uid_tag.
+ */
+ struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+ struct list_head list; /* in iface_stat_list */
+ char *ifname;
+ bool active;
+ /* net_dev is only valid for active iface_stat */
+ struct net_device *net_dev;
+
+ struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+ struct byte_packet_counters totals_via_skb[IFS_MAX_DIRECTIONS];
+ /*
+ * We keep the last_known, because some devices reset their counters
+ * just before NETDEV_UP, while some will reset just before
+ * NETDEV_REGISTER (which is more normal).
+ * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+ * its current dev stats smaller that what was previously known, we
+ * assume an UNREGISTER and just use the last_known.
+ */
+ struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+ /* last_known is usable when last_known_valid is true */
+ bool last_known_valid;
+
+ struct proc_dir_entry *proc_ptr;
+
+ struct rb_root tag_stat_tree;
+ spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+ struct work_struct iface_work;
+ struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+ struct rb_node sock_node;
+ struct sock *sk; /* Only used as a number, never dereferenced */
+ /* The socket is needed for sockfd_put() */
+ struct socket *socket;
+ /* Used to associate with a given pid */
+ struct list_head list; /* in proc_qtu_data.sock_tag_list */
+ pid_t pid;
+
+ tag_t tag;
+};
+
+struct qtaguid_event_counts {
+ /* Various successful events */
+ atomic64_t sockets_tagged;
+ atomic64_t sockets_untagged;
+ atomic64_t counter_set_changes;
+ atomic64_t delete_cmds;
+ atomic64_t iface_events; /* Number of NETDEV_* events handled */
+
+ atomic64_t match_calls; /* Number of times iptables called mt */
+ /* Number of times iptables called mt from pre or post routing hooks */
+ atomic64_t match_calls_prepost;
+ /*
+ * match_found_sk_*: numbers related to the netfilter matching
+ * function finding a sock for the sk_buff.
+ * Total skbs processed is sum(match_found*).
+ */
+ atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
+ /* The connection tracker had or didn't have the sk. */
+ atomic64_t match_found_sk_in_ct;
+ atomic64_t match_found_no_sk_in_ct;
+ /*
+ * No sk could be found. No apparent owner. Could happen with
+ * unsolicited traffic.
+ */
+ atomic64_t match_no_sk;
+ /*
+ * The file ptr in the sk_socket wasn't there.
+ * This might happen for traffic while the socket is being closed.
+ */
+ atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+ struct tag_node tn;
+ int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+ struct rb_node node;
+ uid_t uid;
+
+ /*
+ * For the uid, how many accounting tags have been set.
+ */
+ int num_active_tags;
+ /* Track the number of proc_qtu_data that reference it */
+ int num_pqd;
+ struct rb_root tag_ref_tree;
+ /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+ struct tag_node tn;
+
+ /*
+ * This tracks the number of active sockets that have a tag on them
+ * which matches this tag_ref.tn.tag.
+ * A tag ref can live on after the sockets are untagged.
+ * A tag ref can only be removed during a tag delete command.
+ */
+ int num_sock_tags;
+};
+
+struct proc_qtu_data {
+ struct rb_node node;
+ pid_t pid;
+
+ struct uid_tag_data *parent_tag_data;
+
+ /* Tracks the sock_tags that need freeing upon this proc's death */
+ struct list_head sock_tag_list;
+ /* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 00000000000000..8cbd8e42bcc43e
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,564 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+ if (IS_ERR_OR_NULL(ptr)) {
+ pr_err("qtaguid: kmalloc failed\n");
+ BUG();
+ }
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+ char *res;
+
+ if (!tag)
+ res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC,
+ "tag_t@%p{tag=0x%llx, uid=%u}",
+ tag, *tag, get_uid_from_tag(*tag));
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ char *res;
+
+ if (!dc)
+ res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+ else if (showValues)
+ res = kasprintf(
+ GFP_ATOMIC, "data_counters@%p{"
+ "set0{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}, "
+ "set1{"
+ "rx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}, "
+ "tx{"
+ "tcp{b=%llu, p=%llu}, "
+ "udp{b=%llu, p=%llu},"
+ "other{b=%llu, p=%llu}}}}",
+ dc,
+ dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_RX][IFS_TCP].packets,
+ dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_RX][IFS_UDP].packets,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[0][IFS_TX][IFS_TCP].packets,
+ dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[0][IFS_TX][IFS_UDP].packets,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_RX][IFS_TCP].packets,
+ dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_RX][IFS_UDP].packets,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+ dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+ dc->bpc[1][IFS_TX][IFS_TCP].packets,
+ dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+ dc->bpc[1][IFS_TX][IFS_UDP].packets,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+ dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+ else
+ res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+ char *tag_str;
+ char *res;
+
+ if (!tn) {
+ res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&tn->tag);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_node@%p{tag=%s}",
+ tn, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+ char *tn_str;
+ char *res;
+
+ if (!tr) {
+ res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&tr->tn);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_ref@%p{%s, num_sock_tags=%d}",
+ tr, tn_str, tr->num_sock_tags);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+ char *tn_str;
+ char *counters_str;
+ char *parent_counters_str;
+ char *res;
+
+ if (!ts) {
+ res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tn_str = pp_tag_node(&ts->tn);
+ counters_str = pp_data_counters(&ts->counters, true);
+ parent_counters_str = pp_data_counters(ts->parent_counters, false);
+ res = kasprintf(GFP_ATOMIC,
+ "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+ ts, tn_str, counters_str, parent_counters_str);
+ _bug_on_err_or_null(res);
+ kfree(tn_str);
+ kfree(counters_str);
+ kfree(parent_counters_str);
+ return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+ char *res;
+ if (!is)
+ res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+ "list=list_head{...}, "
+ "ifname=%s, "
+ "total_dev={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "total_skb={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "last_known_valid=%d, "
+ "last_known={rx={bytes=%llu, "
+ "packets=%llu}, "
+ "tx={bytes=%llu, "
+ "packets=%llu}}, "
+ "active=%d, "
+ "net_dev=%p, "
+ "proc_ptr=%p, "
+ "tag_stat_tree=rb_root{...}}",
+ is,
+ is->ifname,
+ is->totals_via_dev[IFS_RX].bytes,
+ is->totals_via_dev[IFS_RX].packets,
+ is->totals_via_dev[IFS_TX].bytes,
+ is->totals_via_dev[IFS_TX].packets,
+ is->totals_via_skb[IFS_RX].bytes,
+ is->totals_via_skb[IFS_RX].packets,
+ is->totals_via_skb[IFS_TX].bytes,
+ is->totals_via_skb[IFS_TX].packets,
+ is->last_known_valid,
+ is->last_known[IFS_RX].bytes,
+ is->last_known[IFS_RX].packets,
+ is->last_known[IFS_TX].bytes,
+ is->last_known[IFS_TX].packets,
+ is->active,
+ is->net_dev,
+ is->proc_ptr);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+ char *tag_str;
+ char *res;
+
+ if (!st) {
+ res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ tag_str = pp_tag_t(&st->tag);
+ res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+ "sock_node=rb_node{...}, "
+ "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+ "pid=%u, tag=%s}",
+ st, st->sk, st->socket, atomic_long_read(
+ &st->socket->file->f_count),
+ st->pid, tag_str);
+ _bug_on_err_or_null(res);
+ kfree(tag_str);
+ return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+ char *res;
+
+ if (!utd)
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+ else
+ res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+ "uid=%u, num_active_acct_tags=%d, "
+ "num_pqd=%d, "
+ "tag_node_tree=rb_root{...}, "
+ "proc_qtu_data_tree=rb_root{...}}",
+ utd, utd->uid,
+ utd->num_active_tags, utd->num_pqd);
+ _bug_on_err_or_null(res);
+ return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ char *parent_tag_data_str;
+ char *res;
+
+ if (!pqd) {
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+ _bug_on_err_or_null(res);
+ return res;
+ }
+ parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+ res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+ "node=rb_node{...}, pid=%u, "
+ "parent_tag_data=%s, "
+ "sock_tag_list=list_head{...}}",
+ pqd, pqd->pid, parent_tag_data_str
+ );
+ _bug_on_err_or_null(res);
+ kfree(parent_tag_data_str);
+ return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+ struct rb_node *node;
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(sock_tag_tree)) {
+ str = "sock_tag_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(sock_tag_tree);
+ node;
+ node = rb_next(node)) {
+ sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+ struct sock_tag *sock_tag_entry;
+ char *str;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(sock_tag_list)) {
+ str = "sock_tag_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "sock_tag_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+ str = pp_sock_tag(sock_tag_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct proc_qtu_data *proc_qtu_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+ str = "proc_qtu_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "proc_qtu_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(proc_qtu_data_tree);
+ node;
+ node = rb_next(node)) {
+ proc_qtu_data_entry = rb_entry(node,
+ struct proc_qtu_data,
+ node);
+ str = pp_proc_qtu_data(proc_qtu_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ indent_level++;
+ prdebug_sock_tag_list(indent_level,
+ &proc_qtu_data_entry->sock_tag_list);
+ indent_level--;
+
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_ref *tag_ref_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_ref_tree)) {
+ str = "tag_ref_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_ref_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_ref_tree);
+ node;
+ node = rb_next(node)) {
+ tag_ref_entry = rb_entry(node,
+ struct tag_ref,
+ tn.node);
+ str = pp_tag_ref(tag_ref_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct uid_tag_data *uid_tag_data_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+ str = "uid_tag_data_tree=rb_root{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "uid_tag_data_tree=rb_root{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(uid_tag_data_tree);
+ node;
+ node = rb_next(node)) {
+ uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+ node);
+ str = pp_uid_tag_data(uid_tag_data_entry);
+ pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+ kfree(str);
+ if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+ indent_level++;
+ prdebug_tag_ref_tree(indent_level,
+ &uid_tag_data_entry->tag_ref_tree);
+ indent_level--;
+ }
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+ char *str;
+ struct rb_node *node;
+ struct tag_stat *ts_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (RB_EMPTY_ROOT(tag_stat_tree)) {
+ str = "tag_stat_tree{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "tag_stat_tree{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ for (node = rb_first(tag_stat_tree);
+ node;
+ node = rb_next(node)) {
+ ts_entry = rb_entry(node, struct tag_stat, tn.node);
+ str = pp_tag_stat(ts_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level,
+ str);
+ kfree(str);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+ char *str;
+ struct iface_stat *iface_entry;
+
+ if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+ return;
+
+ if (list_empty(iface_stat_list)) {
+ str = "iface_stat_list=list_head{}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ return;
+ }
+
+ str = "iface_stat_list=list_head{";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ indent_level++;
+ list_for_each_entry(iface_entry, iface_stat_list, list) {
+ str = pp_iface_stat(iface_entry);
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+ kfree(str);
+
+ spin_lock_bh(&iface_entry->tag_stat_list_lock);
+ if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+ indent_level++;
+ prdebug_tag_stat_tree(indent_level,
+ &iface_entry->tag_stat_tree);
+ indent_level--;
+ }
+ spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+ }
+ indent_level--;
+ str = "}";
+ pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+ "netdev_unknown",
+ "NETDEV_UP",
+ "NETDEV_DOWN",
+ "NETDEV_REBOOT",
+ "NETDEV_CHANGE",
+ "NETDEV_REGISTER",
+ "NETDEV_UNREGISTER",
+ "NETDEV_CHANGEMTU",
+ "NETDEV_CHANGEADDR",
+ "NETDEV_GOING_DOWN",
+ "NETDEV_CHANGENAME",
+ "NETDEV_FEAT_CHANGE",
+ "NETDEV_BONDING_FAILOVER",
+ "NETDEV_PRE_UP",
+ "NETDEV_PRE_TYPE_CHANGE",
+ "NETDEV_POST_TYPE_CHANGE",
+ "NETDEV_POST_INIT",
+ "NETDEV_UNREGISTER_BATCH",
+ "NETDEV_RELEASE",
+ "NETDEV_NOTIFY_PEERS",
+ "NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+ if (netdev_event < 0
+ || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+ return "bad event num";
+ return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 00000000000000..b63871a0be5a7a
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+ return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+ return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+ return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+ return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+ return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+ return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+ return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+ return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+ return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+ struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+ struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+ struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+ struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+ struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+ struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 00000000000000..fb2ef46b2b84dd
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,382 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * netfilter module to enforce network quotas
+ * Sam Johnston <samj@samj.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either
+ * version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+#include <linux/netfilter_ipv4/ipt_ULOG.h>
+#endif
+
+/**
+ * @lock: lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+ u_int64_t quota;
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t ref;
+ char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+ struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+ "Event number for NETLINK_NFLOG message. 0 disables log."
+ "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid = 0;
+static unsigned int quota_list_gid = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+ ulog_packet_msg_t *pm;
+ struct sk_buff *log_skb;
+ size_t size;
+ struct nlmsghdr *nlh;
+
+ if (!qlog_nl_event)
+ return;
+
+ size = NLMSG_SPACE(sizeof(*pm));
+ size = max(size, (size_t)NLMSG_GOODSIZE);
+ log_skb = alloc_skb(size, GFP_ATOMIC);
+ if (!log_skb) {
+ pr_err("xt_quota2: cannot alloc skb for logging\n");
+ return;
+ }
+
+ /* NLMSG_PUT() uses "goto nlmsg_failure" */
+ nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+ sizeof(*pm));
+ pm = NLMSG_DATA(nlh);
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ pm->data_len = 0;
+ pm->hook = hooknum;
+ if (prefix != NULL)
+ strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+ else
+ *(pm->prefix) = '\0';
+ if (in)
+ strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+ else
+ pm->indev_name[0] = '\0';
+
+ if (out)
+ strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+ else
+ pm->outdev_name[0] = '\0';
+
+ NETLINK_CB(log_skb).dst_group = 1;
+ pr_debug("throwing 1 packets to netlink group 1\n");
+ netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+
+nlmsg_failure: /* Used within NLMSG_PUT() */
+ pr_debug("xt_quota2: error during NLMSG_PUT\n");
+}
+#else
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+}
+#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static int quota_proc_read(char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ struct xt_quota_counter *e = data;
+ int ret;
+
+ spin_lock_bh(&e->lock);
+ ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota);
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static int quota_proc_write(struct file *file, const char __user *input,
+ unsigned long size, void *data)
+{
+ struct xt_quota_counter *e = data;
+ char buf[sizeof("18446744073709551616")];
+
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, input, size) != 0)
+ return -EFAULT;
+ buf[sizeof(buf)-1] = '\0';
+
+ spin_lock_bh(&e->lock);
+ e->quota = simple_strtoull(buf, NULL, 0);
+ spin_unlock_bh(&e->lock);
+ return size;
+}
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+ struct xt_quota_counter *e;
+ unsigned int size;
+
+ /* Do not need all the procfs things for anonymous counters. */
+ size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+ e = kmalloc(size, GFP_KERNEL);
+ if (e == NULL)
+ return NULL;
+
+ e->quota = q->quota;
+ spin_lock_init(&e->lock);
+ if (!anon) {
+ INIT_LIST_HEAD(&e->list);
+ atomic_set(&e->ref, 1);
+ strlcpy(e->name, q->name, sizeof(e->name));
+ }
+ return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name: name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+ struct proc_dir_entry *p;
+ struct xt_quota_counter *e = NULL;
+ struct xt_quota_counter *new_e;
+
+ if (*q->name == '\0')
+ return q2_new_counter(q, true);
+
+ /* No need to hold a lock while getting a new counter */
+ new_e = q2_new_counter(q, false);
+ if (new_e == NULL)
+ goto out;
+
+ spin_lock_bh(&counter_list_lock);
+ list_for_each_entry(e, &counter_list, list)
+ if (strcmp(e->name, q->name) == 0) {
+ atomic_inc(&e->ref);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(new_e);
+ pr_debug("xt_quota2: old counter name=%s", e->name);
+ return e;
+ }
+ e = new_e;
+ pr_debug("xt_quota2: new_counter name=%s", e->name);
+ list_add_tail(&e->list, &counter_list);
+ /* The entry having a refcount of 1 is not directly destructible.
+ * This func has not yet returned the new entry, thus iptables
+ * has not references for destroying this entry.
+ * For another rule to try to destroy it, it would 1st need for this
+ * func* to be re-invoked, acquire a new ref for the same named quota.
+ * Nobody will access the e->procfs_entry either.
+ * So release the lock. */
+ spin_unlock_bh(&counter_list_lock);
+
+ /* create_proc_entry() is not spin_lock happy */
+ p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms,
+ proc_xt_quota);
+
+ if (IS_ERR_OR_NULL(p)) {
+ spin_lock_bh(&counter_list_lock);
+ list_del(&e->list);
+ spin_unlock_bh(&counter_list_lock);
+ goto out;
+ }
+ p->data = e;
+ p->read_proc = quota_proc_read;
+ p->write_proc = quota_proc_write;
+ p->uid = quota_list_uid;
+ p->gid = quota_list_gid;
+ return e;
+
+ out:
+ kfree(e);
+ return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+ pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+ if (q->flags & ~XT_QUOTA_MASK)
+ return -EINVAL;
+
+ q->name[sizeof(q->name)-1] = '\0';
+ if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+ printk(KERN_ERR "xt_quota.3: illegal name\n");
+ return -EINVAL;
+ }
+
+ q->master = q2_get_counter(q);
+ if (q->master == NULL) {
+ printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+
+ if (*q->name == '\0') {
+ kfree(e);
+ return;
+ }
+
+ spin_lock_bh(&counter_list_lock);
+ if (!atomic_dec_and_test(&e->ref)) {
+ spin_unlock_bh(&counter_list_lock);
+ return;
+ }
+
+ list_del(&e->list);
+ remove_proc_entry(e->name, proc_xt_quota);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+ bool ret = q->flags & XT_QUOTA_INVERT;
+
+ spin_lock_bh(&e->lock);
+ if (q->flags & XT_QUOTA_GROW) {
+ /*
+ * While no_change is pointless in "grow" mode, we will
+ * implement it here simply to have a consistent behavior.
+ */
+ if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+ e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ }
+ ret = true;
+ } else {
+ if (e->quota >= skb->len) {
+ if (!(q->flags & XT_QUOTA_NO_CHANGE))
+ e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ ret = !ret;
+ } else {
+ /* We are transitioning, log that fact. */
+ if (e->quota) {
+ quota2_log(par->hooknum,
+ skb,
+ par->in,
+ par->out,
+ q->name);
+ }
+ /* we do not allow even small packets from now on */
+ e->quota = 0;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV4,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV6,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init quota_mt2_init(void)
+{
+ int ret;
+ pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+ nflognl = netlink_kernel_create(&init_net,
+ NETLINK_NFLOG, 1, NULL,
+ NULL, THIS_MODULE);
+ if (!nflognl)
+ return -ENOMEM;
+#endif
+
+ proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+ if (proc_xt_quota == NULL)
+ return -EACCES;
+
+ ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ if (ret < 0)
+ remove_proc_entry("xt_quota", init_net.proc_net);
+ pr_debug("xt_quota2: init() %d", ret);
+ return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+ xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/sound/soc/codecs/adau1977.c b/sound/soc/codecs/adau1977.c
new file mode 100644
index 00000000000000..339ab3f8ec5056
--- /dev/null
+++ b/sound/soc/codecs/adau1977.c
@@ -0,0 +1,1119 @@
+/*
+ * ADAU1977/ADAU1978/ADAU1979 driver
+ *
+ * Copyright 2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/platform_data/adau1977.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+
+#include "adau1977.h"
+
+#define ADAU1977_REG_POWER 0x00
+#define ADAU1977_REG_PLL 0x01
+#define ADAU1977_REG_BOOST 0x02
+#define ADAU1977_REG_MICBIAS 0x03
+#define ADAU1977_REG_BLOCK_POWER_SAI 0x04
+#define ADAU1977_REG_SAI_CTRL0 0x05
+#define ADAU1977_REG_SAI_CTRL1 0x06
+#define ADAU1977_REG_CMAP12 0x07
+#define ADAU1977_REG_CMAP34 0x08
+#define ADAU1977_REG_SAI_OVERTEMP 0x09
+#define ADAU1977_REG_POST_ADC_GAIN(x) (0x0a + (x))
+#define ADAU1977_REG_MISC_CONTROL 0x0e
+#define ADAU1977_REG_DIAG_CONTROL 0x10
+#define ADAU1977_REG_STATUS(x) (0x11 + (x))
+#define ADAU1977_REG_DIAG_IRQ1 0x15
+#define ADAU1977_REG_DIAG_IRQ2 0x16
+#define ADAU1977_REG_ADJUST1 0x17
+#define ADAU1977_REG_ADJUST2 0x18
+#define ADAU1977_REG_ADC_CLIP 0x19
+#define ADAU1977_REG_DC_HPF_CAL 0x1a
+
+#define ADAU1977_POWER_RESET BIT(7)
+#define ADAU1977_POWER_PWUP BIT(0)
+
+#define ADAU1977_PLL_CLK_S BIT(4)
+#define ADAU1977_PLL_MCS_MASK 0x7
+
+#define ADAU1977_MICBIAS_MB_VOLTS_MASK 0xf0
+#define ADAU1977_MICBIAS_MB_VOLTS_OFFSET 4
+
+#define ADAU1977_BLOCK_POWER_SAI_LR_POL BIT(7)
+#define ADAU1977_BLOCK_POWER_SAI_BCLK_EDGE BIT(6)
+#define ADAU1977_BLOCK_POWER_SAI_LDO_EN BIT(5)
+
+#define ADAU1977_SAI_CTRL0_FMT_MASK (0x3 << 6)
+#define ADAU1977_SAI_CTRL0_FMT_I2S (0x0 << 6)
+#define ADAU1977_SAI_CTRL0_FMT_LJ (0x1 << 6)
+#define ADAU1977_SAI_CTRL0_FMT_RJ_24BIT (0x2 << 6)
+#define ADAU1977_SAI_CTRL0_FMT_RJ_16BIT (0x3 << 6)
+
+#define ADAU1977_SAI_CTRL0_SAI_MASK (0x7 << 3)
+#define ADAU1977_SAI_CTRL0_SAI_I2S (0x0 << 3)
+#define ADAU1977_SAI_CTRL0_SAI_TDM_2 (0x1 << 3)
+#define ADAU1977_SAI_CTRL0_SAI_TDM_4 (0x2 << 3)
+#define ADAU1977_SAI_CTRL0_SAI_TDM_8 (0x3 << 3)
+#define ADAU1977_SAI_CTRL0_SAI_TDM_16 (0x4 << 3)
+
+#define ADAU1977_SAI_CTRL0_FS_MASK (0x7)
+#define ADAU1977_SAI_CTRL0_FS_8000_12000 (0x0)
+#define ADAU1977_SAI_CTRL0_FS_16000_24000 (0x1)
+#define ADAU1977_SAI_CTRL0_FS_32000_48000 (0x2)
+#define ADAU1977_SAI_CTRL0_FS_64000_96000 (0x3)
+#define ADAU1977_SAI_CTRL0_FS_128000_192000 (0x4)
+
+#define ADAU1977_SAI_CTRL1_SLOT_WIDTH_MASK (0x3 << 5)
+#define ADAU1977_SAI_CTRL1_SLOT_WIDTH_32 (0x0 << 5)
+#define ADAU1977_SAI_CTRL1_SLOT_WIDTH_24 (0x1 << 5)
+#define ADAU1977_SAI_CTRL1_SLOT_WIDTH_16 (0x2 << 5)
+#define ADAU1977_SAI_CTRL1_DATA_WIDTH_MASK (0x1 << 4)
+#define ADAU1977_SAI_CTRL1_DATA_WIDTH_16BIT (0x1 << 4)
+#define ADAU1977_SAI_CTRL1_DATA_WIDTH_24BIT (0x0 << 4)
+#define ADAU1977_SAI_CTRL1_LRCLK_PULSE BIT(3)
+#define ADAU1977_SAI_CTRL1_MSB BIT(2)
+#define ADAU1977_SAI_CTRL1_BCLKRATE_16 (0x1 << 1)
+#define ADAU1977_SAI_CTRL1_BCLKRATE_32 (0x0 << 1)
+#define ADAU1977_SAI_CTRL1_BCLKRATE_MASK (0x1 << 1)
+#define ADAU1977_SAI_CTRL1_MASTER BIT(0)
+
+#define ADAU1977_SAI_OVERTEMP_DRV_C(x) BIT(4 + (x))
+#define ADAU1977_SAI_OVERTEMP_DRV_HIZ BIT(3)
+
+#define ADAU1977_MISC_CONTROL_SUM_MODE_MASK (0x3 << 6)
+#define ADAU1977_MISC_CONTROL_SUM_MODE_1CH (0x2 << 6)
+#define ADAU1977_MISC_CONTROL_SUM_MODE_2CH (0x1 << 6)
+#define ADAU1977_MISC_CONTROL_SUM_MODE_4CH (0x0 << 6)
+#define ADAU1977_MISC_CONTROL_MMUTE BIT(4)
+#define ADAU1977_MISC_CONTROL_DC_CAL BIT(0)
+
+#define ADAU1977_CHAN_MAP_SECOND_SLOT_OFFSET 4
+#define ADAU1977_CHAN_MAP_FIRST_SLOT_OFFSET 0
+
+struct adau1977 {
+ struct regmap *regmap;
+ bool right_j;
+ unsigned int sysclk;
+ enum adau1977_sysclk_src sysclk_src;
+ enum adau1977_type type;
+
+ struct regulator *avdd_reg;
+ struct regulator *dvdd_reg;
+
+ struct snd_pcm_hw_constraint_list constraints;
+
+ struct device *dev;
+ void (*switch_mode)(struct device *dev);
+
+ unsigned int max_fs;
+ unsigned int slot_width;
+ bool enabled;
+ bool master;
+};
+
+static const struct reg_default adau1977_reg_defaults[] = {
+ { 0x00, 0x00 },
+ { 0x01, 0x41 },
+ { 0x02, 0x4a },
+ { 0x03, 0x7d },
+ { 0x04, 0x3d },
+ { 0x05, 0x02 },
+ { 0x06, 0x00 },
+ { 0x07, 0x10 },
+ { 0x08, 0x32 },
+ { 0x09, 0xf0 },
+ { 0x0a, 0xa0 },
+ { 0x0b, 0xa0 },
+ { 0x0c, 0xa0 },
+ { 0x0d, 0xa0 },
+ { 0x0e, 0x02 },
+ { 0x10, 0x0f },
+ { 0x15, 0x20 },
+ { 0x16, 0x00 },
+ { 0x17, 0x00 },
+ { 0x18, 0x00 },
+ { 0x1a, 0x00 },
+};
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(adau1977_adc_gain, -3562, 6000);
+
+static const struct snd_soc_dapm_widget adau1977_micbias_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("MICBIAS", ADAU1977_REG_MICBIAS,
+ 3, 0, NULL, 0)
+};
+
+static const struct snd_soc_dapm_widget adau1977_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("Vref", ADAU1977_REG_BLOCK_POWER_SAI,
+ 4, 0, NULL, 0),
+
+ SND_SOC_DAPM_ADC("ADC1", "Capture", ADAU1977_REG_BLOCK_POWER_SAI, 0, 0),
+ SND_SOC_DAPM_ADC("ADC2", "Capture", ADAU1977_REG_BLOCK_POWER_SAI, 1, 0),
+ SND_SOC_DAPM_ADC("ADC3", "Capture", ADAU1977_REG_BLOCK_POWER_SAI, 2, 0),
+ SND_SOC_DAPM_ADC("ADC4", "Capture", ADAU1977_REG_BLOCK_POWER_SAI, 3, 0),
+
+ SND_SOC_DAPM_INPUT("AIN1"),
+ SND_SOC_DAPM_INPUT("AIN2"),
+ SND_SOC_DAPM_INPUT("AIN3"),
+ SND_SOC_DAPM_INPUT("AIN4"),
+
+ SND_SOC_DAPM_OUTPUT("VREF"),
+};
+
+static const struct snd_soc_dapm_route adau1977_dapm_routes[] = {
+ { "ADC1", NULL, "AIN1" },
+ { "ADC2", NULL, "AIN2" },
+ { "ADC3", NULL, "AIN3" },
+ { "ADC4", NULL, "AIN4" },
+
+ { "ADC1", NULL, "Vref" },
+ { "ADC2", NULL, "Vref" },
+ { "ADC3", NULL, "Vref" },
+ { "ADC4", NULL, "Vref" },
+
+ { "VREF", NULL, "Vref" },
+};
+
+#define ADAU1977_VOLUME(x) \
+ SOC_SINGLE_TLV("ADC" #x " Capture Volume", \
+ ADAU1977_REG_POST_ADC_GAIN((x) - 1), \
+ 0, 255, 1, adau1977_adc_gain)
+
+#define ADAU1977_HPF_SWITCH(x) \
+ SOC_SINGLE("ADC" #x " Highpass-Filter Capture Switch", \
+ ADAU1977_REG_DC_HPF_CAL, (x) - 1, 1, 0)
+
+#define ADAU1977_DC_SUB_SWITCH(x) \
+ SOC_SINGLE("ADC" #x " DC Substraction Capture Switch", \
+ ADAU1977_REG_DC_HPF_CAL, (x) + 3, 1, 0)
+
+static const struct snd_kcontrol_new adau1977_snd_controls[] = {
+ ADAU1977_VOLUME(1),
+ ADAU1977_VOLUME(2),
+ ADAU1977_VOLUME(3),
+ ADAU1977_VOLUME(4),
+
+ ADAU1977_HPF_SWITCH(1),
+ ADAU1977_HPF_SWITCH(2),
+ ADAU1977_HPF_SWITCH(3),
+ ADAU1977_HPF_SWITCH(4),
+
+ ADAU1977_DC_SUB_SWITCH(1),
+ ADAU1977_DC_SUB_SWITCH(2),
+ ADAU1977_DC_SUB_SWITCH(3),
+ ADAU1977_DC_SUB_SWITCH(4),
+};
+
+static int adau1977_reset(struct adau1977 *adau1977)
+{
+ int ret;
+
+ /*
+ * The reset bit is obviously volatile, but we need to be able to cache
+ * the other bits in the register, so we can't just mark the whole
+ * register as volatile. Since this is the only place where we'll ever
+ * touch the reset bit just bypass the cache for this operation.
+ */
+ regcache_cache_bypass(adau1977->regmap, true);
+ ret = regmap_write(adau1977->regmap, ADAU1977_REG_POWER,
+ ADAU1977_POWER_RESET);
+ regcache_cache_bypass(adau1977->regmap, false);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int adau1977_lookup_mcs(struct adau1977 *adau1977, unsigned int rate)
+{
+ int mcs;
+
+ if (rate >= 8000 && rate <= 12000)
+ rate *= 512;
+ else if (rate >= 16000 && rate <= 24000)
+ rate *= 256;
+ else if (rate >= 32000 && rate <= 48000)
+ rate *= 128;
+ else if (rate >= 64000 && rate <= 96000)
+ rate *= 64;
+ else if (rate >= 128000 && rate <= 192000)
+ rate *= 32;
+ else
+ return -EINVAL;
+
+ if (adau1977->sysclk % rate != 0)
+ return -EINVAL;
+
+ mcs = adau1977->sysclk / rate;
+
+ /* The factors configured by MCS are 1, 2, 3, 4, 6 */
+ if (mcs < 1 || mcs > 6 || mcs == 5)
+ return -EINVAL;
+
+ mcs = mcs - 1;
+ if (mcs == 5)
+ mcs = 4;
+
+ return mcs;
+}
+
+static int adau1977_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate = params_rate(params);
+ unsigned int slot_width;
+ unsigned int ctrl0, ctrl0_mask;
+ unsigned int ctrl1;
+ int mcs;
+ int ret;
+
+ if (adau1977->sysclk_src == ADAU1977_SYSCLK_SRC_MCLK) {
+ mcs = adau1977_lookup_mcs(adau1977, rate);
+ if (mcs < 0)
+ return mcs;
+ } else {
+ mcs = 0;
+ }
+
+ ctrl0_mask = ADAU1977_SAI_CTRL0_FS_MASK;
+
+ if (rate >= 8000 && rate <= 12000)
+ ctrl0 = ADAU1977_SAI_CTRL0_FS_8000_12000;
+ else if (rate >= 16000 && rate <= 24000)
+ ctrl0 = ADAU1977_SAI_CTRL0_FS_16000_24000;
+ else if (rate >= 32000 && rate <= 48000)
+ ctrl0 = ADAU1977_SAI_CTRL0_FS_32000_48000;
+ else if (rate >= 64000 && rate <= 96000)
+ ctrl0 = ADAU1977_SAI_CTRL0_FS_64000_96000;
+ else if (rate >= 128000 && rate <= 192000)
+ ctrl0 = ADAU1977_SAI_CTRL0_FS_128000_192000;
+ else
+ return -EINVAL;
+
+ if (adau1977->right_j) {
+ switch (params_width(params)) {
+ case 16:
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_RJ_16BIT;
+ break;
+ case 24:
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_RJ_24BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctrl0_mask |= ADAU1977_SAI_CTRL0_FMT_MASK;
+ }
+
+ if (adau1977->master) {
+ switch (params_width(params)) {
+ case 16:
+ ctrl1 = ADAU1977_SAI_CTRL1_DATA_WIDTH_16BIT;
+ slot_width = 16;
+ break;
+ case 24:
+ case 32:
+ ctrl1 = ADAU1977_SAI_CTRL1_DATA_WIDTH_24BIT;
+ slot_width = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* In TDM mode there is a fixed slot width */
+ if (adau1977->slot_width)
+ slot_width = adau1977->slot_width;
+
+ if (slot_width == 16)
+ ctrl1 |= ADAU1977_SAI_CTRL1_BCLKRATE_16;
+ else
+ ctrl1 |= ADAU1977_SAI_CTRL1_BCLKRATE_32;
+
+ ret = regmap_update_bits(adau1977->regmap,
+ ADAU1977_REG_SAI_CTRL1,
+ ADAU1977_SAI_CTRL1_DATA_WIDTH_MASK |
+ ADAU1977_SAI_CTRL1_BCLKRATE_MASK,
+ ctrl1);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_CTRL0,
+ ctrl0_mask, ctrl0);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(adau1977->regmap, ADAU1977_REG_PLL,
+ ADAU1977_PLL_MCS_MASK, mcs);
+}
+
+static int adau1977_set_power(struct adau1977 *adau1977, bool enable)
+{
+ unsigned int val;
+ struct adau1977_platform_data *pdata = adau1977->dev->platform_data;
+ int ret = 0;
+
+ if (enable == adau1977->enabled)
+ return 0;
+
+ if (!enable) {
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_POWER,
+ ADAU1977_POWER_PWUP, 0);
+ regcache_mark_dirty(adau1977->regmap);
+ } else {
+ ret = regulator_enable(adau1977->avdd_reg);
+ if (ret)
+ return ret;
+ if (adau1977->dvdd_reg) {
+ ret = regulator_enable(adau1977->dvdd_reg);
+ if (ret)
+ goto err_disable_avdd;
+ }
+ }
+
+ if (gpio_is_valid(pdata->reset_gpio)) {
+ if (pdata->reset_gpio)
+ gpio_set_value(pdata->reset_gpio, enable);
+ }
+
+ regcache_cache_only(adau1977->regmap, !enable);
+
+ if (enable) {
+ if (adau1977->switch_mode)
+ adau1977->switch_mode(adau1977->dev);
+
+ ret = adau1977_reset(adau1977);
+ if (ret)
+ goto err_disable_dvdd;
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_POWER,
+ ADAU1977_POWER_PWUP, ADAU1977_POWER_PWUP);
+ if (ret)
+ goto err_disable_dvdd;
+ ret = regcache_sync(adau1977->regmap);
+ if (ret)
+ goto err_disable_dvdd;
+
+ /*
+ * The PLL register is not affected by the software reset. It is
+ * possible that the value of the register was changed to the default
+ * value while we were in cache only mode. In this case regcache_sync
+ * will skip over it and we have to manually sync it.
+ */
+ ret = regmap_read(adau1977->regmap, ADAU1977_REG_PLL, &val);
+ if (ret)
+ goto err_disable_dvdd;
+ if (val == 0x41) {
+ regcache_cache_bypass(adau1977->regmap, true);
+ ret = regmap_write(adau1977->regmap, ADAU1977_REG_PLL, 0x41);
+ if (ret)
+ goto err_disable_dvdd;
+ regcache_cache_bypass(adau1977->regmap, false);
+ }
+ } else {
+ regulator_disable(adau1977->avdd_reg);
+ if (adau1977->dvdd_reg)
+ regulator_disable(adau1977->dvdd_reg);
+ }
+
+ adau1977->enabled = enable;
+
+ return ret;
+
+err_disable_dvdd:
+ if (adau1977->dvdd_reg)
+ regulator_disable(adau1977->dvdd_reg);
+err_disable_avdd:
+ regulator_disable(adau1977->avdd_reg);
+ return ret;
+}
+
+static int adau1977_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ ret = adau1977_set_power(adau1977, true);
+ break;
+ case SND_SOC_BIAS_OFF:
+ ret = adau1977_set_power(adau1977, false);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ codec->dapm.bias_level = level;
+
+ return 0;
+}
+
+static int adau1977_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int width)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int ctrl0, ctrl1, drv;
+ unsigned int slot[4];
+ unsigned int i;
+ int ret;
+
+ if (slots == 0) {
+ /* 0 = No fixed slot width */
+ adau1977->slot_width = 0;
+ adau1977->max_fs = 192000;
+ return regmap_update_bits(adau1977->regmap,
+ ADAU1977_REG_SAI_CTRL0, ADAU1977_SAI_CTRL0_SAI_MASK,
+ ADAU1977_SAI_CTRL0_SAI_I2S);
+ }
+
+ if (rx_mask == 0 || tx_mask != 0)
+ return -EINVAL;
+
+ drv = 0;
+ for (i = 0; i < 4; i++) {
+ slot[i] = __ffs(rx_mask);
+ drv |= ADAU1977_SAI_OVERTEMP_DRV_C(i);
+ rx_mask &= ~(1 << slot[i]);
+ if (slot[i] >= slots)
+ return -EINVAL;
+ if (rx_mask == 0)
+ break;
+ }
+
+ if (rx_mask != 0)
+ return -EINVAL;
+
+ switch (width) {
+ case 16:
+ ctrl1 = ADAU1977_SAI_CTRL1_SLOT_WIDTH_16;
+ break;
+ case 24:
+ /* We can only generate 16 bit or 32 bit wide slots */
+ if (adau1977->master)
+ return -EINVAL;
+ ctrl1 = ADAU1977_SAI_CTRL1_SLOT_WIDTH_24;
+ break;
+ case 32:
+ ctrl1 = ADAU1977_SAI_CTRL1_SLOT_WIDTH_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (slots) {
+ case 2:
+ ctrl0 = ADAU1977_SAI_CTRL0_SAI_TDM_2;
+ break;
+ case 4:
+ ctrl0 = ADAU1977_SAI_CTRL0_SAI_TDM_4;
+ break;
+ case 8:
+ ctrl0 = ADAU1977_SAI_CTRL0_SAI_TDM_8;
+ break;
+ case 16:
+ ctrl0 = ADAU1977_SAI_CTRL0_SAI_TDM_16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_OVERTEMP,
+ ADAU1977_SAI_OVERTEMP_DRV_C(0) |
+ ADAU1977_SAI_OVERTEMP_DRV_C(1) |
+ ADAU1977_SAI_OVERTEMP_DRV_C(2) |
+ ADAU1977_SAI_OVERTEMP_DRV_C(3), drv);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(adau1977->regmap, ADAU1977_REG_CMAP12,
+ (slot[1] << ADAU1977_CHAN_MAP_SECOND_SLOT_OFFSET) |
+ (slot[0] << ADAU1977_CHAN_MAP_FIRST_SLOT_OFFSET));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(adau1977->regmap, ADAU1977_REG_CMAP34,
+ (slot[3] << ADAU1977_CHAN_MAP_SECOND_SLOT_OFFSET) |
+ (slot[2] << ADAU1977_CHAN_MAP_FIRST_SLOT_OFFSET));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_CTRL0,
+ ADAU1977_SAI_CTRL0_SAI_MASK, ctrl0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_CTRL1,
+ ADAU1977_SAI_CTRL1_SLOT_WIDTH_MASK, ctrl1);
+ if (ret)
+ return ret;
+
+ adau1977->slot_width = width;
+
+ /* In master mode the maximum bitclock is 24.576 MHz */
+ adau1977->max_fs = min(192000, 24576000 / width / slots);
+
+ return 0;
+}
+
+
+static int adau1977_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int val;
+
+ if (mute)
+ val = ADAU1977_MISC_CONTROL_MMUTE;
+ else
+ val = 0;
+
+ return regmap_update_bits(adau1977->regmap, ADAU1977_REG_MISC_CONTROL,
+ ADAU1977_MISC_CONTROL_MMUTE, val);
+}
+
+static int adau1977_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int ctrl0 = 0, ctrl1 = 0, block_power = 0;
+ bool invert_lrclk;
+ int ret;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ adau1977->master = false;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ctrl1 |= ADAU1977_SAI_CTRL1_MASTER;
+ adau1977->master = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_lrclk = false;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ block_power |= ADAU1977_BLOCK_POWER_SAI_BCLK_EDGE;
+ invert_lrclk = false;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ invert_lrclk = true;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ block_power |= ADAU1977_BLOCK_POWER_SAI_BCLK_EDGE;
+ invert_lrclk = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adau1977->right_j = false;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_LJ;
+ invert_lrclk = !invert_lrclk;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_RJ_24BIT;
+ adau1977->right_j = true;
+ invert_lrclk = !invert_lrclk;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ ctrl1 |= ADAU1977_SAI_CTRL1_LRCLK_PULSE;
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_I2S;
+ invert_lrclk = false;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ ctrl1 |= ADAU1977_SAI_CTRL1_LRCLK_PULSE;
+ ctrl0 |= ADAU1977_SAI_CTRL0_FMT_LJ;
+ invert_lrclk = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (invert_lrclk)
+ block_power |= ADAU1977_BLOCK_POWER_SAI_LR_POL;
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_BLOCK_POWER_SAI,
+ ADAU1977_BLOCK_POWER_SAI_LR_POL |
+ ADAU1977_BLOCK_POWER_SAI_BCLK_EDGE, block_power);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_CTRL0,
+ ADAU1977_SAI_CTRL0_FMT_MASK,
+ ctrl0);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_CTRL1,
+ ADAU1977_SAI_CTRL1_MASTER | ADAU1977_SAI_CTRL1_LRCLK_PULSE,
+ ctrl1);
+}
+
+static int adau1977_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(dai->codec);
+ u64 formats = 0;
+
+ if (adau1977->slot_width == 16)
+ formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE;
+ else if (adau1977->right_j || adau1977->slot_width == 24)
+ formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE;
+
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &adau1977->constraints);
+
+ if (adau1977->master)
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE, 8000, adau1977->max_fs);
+
+ if (formats != 0)
+ snd_pcm_hw_constraint_mask64(substream->runtime,
+ SNDRV_PCM_HW_PARAM_FORMAT, formats);
+
+ return 0;
+}
+
+static int adau1977_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int val;
+
+ if (tristate)
+ val = ADAU1977_SAI_OVERTEMP_DRV_HIZ;
+ else
+ val = 0;
+
+ return regmap_update_bits(adau1977->regmap, ADAU1977_REG_SAI_OVERTEMP,
+ ADAU1977_SAI_OVERTEMP_DRV_HIZ, val);
+}
+
+#define ADAU1977_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops adau1977_dai_ops = {
+ .startup = adau1977_startup,
+ .hw_params = adau1977_hw_params,
+ .digital_mute = adau1977_mute,
+ .set_fmt = adau1977_set_dai_fmt,
+ .set_tdm_slot = adau1977_set_tdm_slot,
+ .set_tristate = adau1977_set_tristate,
+};
+
+static struct snd_soc_dai_driver adau1977_dai = {
+ .name = "adau1977-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = ADAU1977_FORMATS,
+ .sig_bits = 24,
+ },
+ .ops = &adau1977_dai_ops,
+};
+
+static const unsigned int adau1977_rates[] = {
+ 8000, 16000, 32000, 64000, 128000,
+ 11025, 22050, 44100, 88200, 172400,
+ 12000, 24000, 48000, 96000, 192000,
+};
+
+#define ADAU1977_RATE_CONSTRAINT_MASK_32000 0x001f
+#define ADAU1977_RATE_CONSTRAINT_MASK_44100 0x03e0
+#define ADAU1977_RATE_CONSTRAINT_MASK_48000 0x7c00
+/* All rates >= 32000 */
+#define ADAU1977_RATE_CONSTRAINT_MASK_LRCLK 0x739c
+
+static bool adau1977_check_sysclk(unsigned int mclk, unsigned int base_freq)
+{
+ unsigned int mcs;
+
+ if (mclk % (base_freq * 128) != 0)
+ return false;
+
+ mcs = mclk / (128 * base_freq);
+ if (mcs < 1 || mcs > 6 || mcs == 5)
+ return false;
+
+ return true;
+}
+
+static int adau1977_set_sysclk(struct snd_soc_codec *codec,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(codec);
+ unsigned int mask = 0;
+ unsigned int clk_src;
+ unsigned int ret;
+
+ if (dir != SND_SOC_CLOCK_IN)
+ return -EINVAL;
+
+ if (clk_id != ADAU1977_SYSCLK)
+ return -EINVAL;
+
+ switch (source) {
+ case ADAU1977_SYSCLK_SRC_MCLK:
+ clk_src = 0;
+ break;
+ case ADAU1977_SYSCLK_SRC_LRCLK:
+ clk_src = ADAU1977_PLL_CLK_S;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ if (freq != 0 && source == ADAU1977_SYSCLK_SRC_MCLK) {
+ if (freq < 4000000 || freq > 36864000)
+ return -EINVAL;
+
+ if (adau1977_check_sysclk(freq, 32000))
+ mask |= ADAU1977_RATE_CONSTRAINT_MASK_32000;
+ if (adau1977_check_sysclk(freq, 44100))
+ mask |= ADAU1977_RATE_CONSTRAINT_MASK_44100;
+ if (adau1977_check_sysclk(freq, 48000))
+ mask |= ADAU1977_RATE_CONSTRAINT_MASK_48000;
+
+ if (mask == 0)
+ return -EINVAL;
+ } else if (source == ADAU1977_SYSCLK_SRC_LRCLK) {
+ mask = ADAU1977_RATE_CONSTRAINT_MASK_LRCLK;
+ }
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_PLL,
+ ADAU1977_PLL_CLK_S, clk_src);
+ if (ret)
+ return ret;
+
+ adau1977->constraints.mask = mask;
+ adau1977->sysclk_src = source;
+ adau1977->sysclk = freq;
+
+ return 0;
+}
+
+static int adau1977_codec_probe(struct snd_soc_codec *codec)
+{
+ struct adau1977 *adau1977 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (adau1977->type) {
+ case ADAU1977:
+ ret = snd_soc_dapm_new_controls(&codec->dapm,
+ adau1977_micbias_dapm_widgets,
+ ARRAY_SIZE(adau1977_micbias_dapm_widgets));
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver adau1977_codec_driver = {
+ .probe = adau1977_codec_probe,
+ .set_bias_level = adau1977_set_bias_level,
+ .set_sysclk = adau1977_set_sysclk,
+ .idle_bias_off = true,
+
+ .controls = adau1977_snd_controls,
+ .num_controls = ARRAY_SIZE(adau1977_snd_controls),
+ .dapm_widgets = adau1977_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau1977_dapm_widgets),
+ .dapm_routes = adau1977_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau1977_dapm_routes),
+};
+
+static int adau1977_setup_micbias(struct adau1977 *adau1977)
+{
+ struct adau1977_platform_data *pdata = adau1977->dev->platform_data;
+ struct device_node *np = adau1977->dev->of_node;
+ u32 micbias = ADAU1977_MICBIAS_8V5;
+
+ if (pdata) {
+ micbias = pdata->micbias;
+ } else if (np) {
+ of_property_read_u32(np, "adi,micbias", &micbias);
+ }
+
+ if (micbias > ADAU1977_MICBIAS_9V0)
+ return -EINVAL;
+
+ return regmap_update_bits(adau1977->regmap, ADAU1977_REG_MICBIAS,
+ ADAU1977_MICBIAS_MB_VOLTS_MASK,
+ micbias << ADAU1977_MICBIAS_MB_VOLTS_OFFSET);
+}
+
+int adau1977_probe(struct device *dev, struct regmap *regmap,
+ enum adau1977_type type, void (*switch_mode)(struct device *dev))
+{
+ unsigned int power_off_mask;
+ struct adau1977 *adau1977;
+ struct adau1977_platform_data *pdata = dev->platform_data;
+ int ret;
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ adau1977 = devm_kzalloc(dev, sizeof(*adau1977), GFP_KERNEL);
+ if (adau1977 == NULL)
+ return -ENOMEM;
+
+ adau1977->dev = dev;
+ adau1977->type = type;
+ adau1977->regmap = regmap;
+ adau1977->switch_mode = switch_mode;
+ adau1977->max_fs = 192000;
+
+ adau1977->constraints.list = adau1977_rates;
+ adau1977->constraints.count = ARRAY_SIZE(adau1977_rates);
+
+ adau1977->avdd_reg = devm_regulator_get(dev, "AVDD");
+ if (IS_ERR(adau1977->avdd_reg))
+ return PTR_ERR(adau1977->avdd_reg);
+
+ /* Optional */
+ adau1977->dvdd_reg = devm_regulator_get(dev, "DVDD");
+ if (IS_ERR(adau1977->dvdd_reg)) {
+ adau1977->dvdd_reg = NULL;
+ }
+
+ if (gpio_is_valid(pdata->reset_gpio)) {
+ ret = gpio_request_one(pdata->reset_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "reset");
+
+ if (ret) {
+ dev_err(dev, "failed to request GPIO CONVST\n");
+ return -EIO;
+ }
+
+ msleep(1);
+ }
+
+ dev_set_drvdata(dev, adau1977);
+
+ ret = adau1977_set_power(adau1977, true);
+ if (ret)
+ return ret;
+
+ if (type == ADAU1977) {
+ ret = adau1977_setup_micbias(adau1977);
+ if (ret)
+ goto err_poweroff;
+ }
+
+ if (adau1977->dvdd_reg)
+ power_off_mask = ~0;
+ else
+ power_off_mask = ~ADAU1977_BLOCK_POWER_SAI_LDO_EN;
+
+ ret = regmap_update_bits(adau1977->regmap, ADAU1977_REG_BLOCK_POWER_SAI,
+ power_off_mask, 0x00);
+ if (ret)
+ goto err_poweroff;
+
+ ret = adau1977_set_power(adau1977, false);
+ if (ret)
+ return ret;
+
+ return snd_soc_register_codec(dev, &adau1977_codec_driver,
+ &adau1977_dai, 1);
+
+err_poweroff:
+ adau1977_set_power(adau1977, false);
+ return ret;
+
+}
+
+static bool adau1977_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADAU1977_REG_STATUS(0):
+ case ADAU1977_REG_STATUS(1):
+ case ADAU1977_REG_STATUS(2):
+ case ADAU1977_REG_STATUS(3):
+ case ADAU1977_REG_ADC_CLIP:
+ return true;
+ }
+
+ return false;
+}
+
+const struct regmap_config adau1977_regmap_config = {
+ .max_register = ADAU1977_REG_DC_HPF_CAL,
+ .volatile_reg = adau1977_register_volatile,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = adau1977_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adau1977_reg_defaults),
+};
+
+static int adau1977_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap_config config;
+
+ config = adau1977_regmap_config;
+ config.val_bits = 8;
+ config.reg_bits = 8;
+
+ return adau1977_probe(&client->dev,
+ devm_regmap_init_i2c(client, &config),
+ id->driver_data, NULL);
+}
+
+static int adau1977_i2c_remove(struct i2c_client *client)
+{
+ struct adau1977_platform_data *pdata = client->dev.platform_data;
+ gpio_free(pdata->reset_gpio);
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+static const struct i2c_device_id adau1977_i2c_ids[] = {
+ { "adau1977", ADAU1977 },
+ { "adau1978", ADAU1978 },
+ { "adau1979", ADAU1978 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adau1977_i2c_ids);
+
+static struct i2c_driver adau1977_i2c_driver = {
+ .driver = {
+ .name = "adau1977",
+ .owner = THIS_MODULE,
+ },
+ .probe = adau1977_i2c_probe,
+ .remove = adau1977_i2c_remove,
+ .id_table = adau1977_i2c_ids,
+};
+
+static const struct spi_device_id adau1977_spi_ids[] = {
+ { "adau1977", ADAU1977 },
+ { "adau1978", ADAU1978 },
+ { "adau1979", ADAU1978 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adau1977_spi_ids);
+
+static void adau1977_spi_switch_mode(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ /*
+ * To get the device into SPI mode CLATCH has to be pulled low three times.
+ * Do this by issuing three dummy reads.
+ */
+ spi_w8r8(spi, 0x00);
+ spi_w8r8(spi, 0x00);
+ spi_w8r8(spi, 0x00);
+}
+
+static int adau1977_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap_config config;
+
+ config = adau1977_regmap_config;
+ config.val_bits = 8;
+ config.reg_bits = 16;
+ config.read_flag_mask = 0x1;
+
+ return adau1977_probe(&spi->dev,
+ devm_regmap_init_spi(spi, &config),
+ id->driver_data, adau1977_spi_switch_mode);
+}
+
+static int adau1977_spi_remove(struct spi_device *spi)
+{
+ struct adau1977_platform_data *pdata = spi->dev.platform_data;
+ gpio_free(pdata->reset_gpio);
+ snd_soc_unregister_codec(&spi->dev);
+ return 0;
+}
+
+static struct spi_driver adau1977_spi_driver = {
+ .driver = {
+ .name = "adau1977",
+ .owner = THIS_MODULE,
+ },
+ .probe = adau1977_spi_probe,
+ .remove = adau1977_spi_remove,
+ .id_table = adau1977_spi_ids,
+};
+
+
+static int __init adau1977_modinit(void)
+{
+ int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&adau1977_i2c_driver);
+ if (ret) {
+ printk(KERN_ERR "Failed to register adau1977 I2C driver: %d\n",
+ ret);
+ }
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ ret = spi_register_driver(&adau1977_spi_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register adau1977 SPI driver: %d\n",
+ ret);
+ }
+#endif
+ return ret;
+}
+
+static void __exit adau1977_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&adau1977_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ spi_unregister_driver(&adau1977_spi_driver);
+#endif
+}
+
+module_init(adau1977_modinit);
+module_exit(adau1977_exit);
+
+MODULE_DESCRIPTION("ASoC ADAU1977/ADAU1978/ADAU1979 driver");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau1977.h b/sound/soc/codecs/adau1977.h
new file mode 100644
index 00000000000000..973221441750b5
--- /dev/null
+++ b/sound/soc/codecs/adau1977.h
@@ -0,0 +1,37 @@
+/*
+ * ADAU1977/ADAU1978/ADAU1979 driver
+ *
+ * Copyright 2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __SOUND_SOC_CODECS_ADAU1977_H__
+#define __SOUND_SOC_CODECS_ADAU1977_H__
+
+#include <linux/regmap.h>
+
+struct device;
+
+enum adau1977_type {
+ ADAU1977,
+ ADAU1978,
+ ADAU1979,
+};
+
+int adau1977_probe(struct device *dev, struct regmap *regmap,
+ enum adau1977_type type, void (*switch_mode)(struct device *dev));
+
+extern const struct regmap_config adau1977_regmap_config;
+
+enum adau1977_clk_id {
+ ADAU1977_SYSCLK,
+};
+
+enum adau1977_sysclk_src {
+ ADAU1977_SYSCLK_SRC_MCLK,
+ ADAU1977_SYSCLK_SRC_LRCLK,
+};
+
+#endif
diff --git a/sound/soc/codecs/wau8822.c b/sound/soc/codecs/wau8822.c
new file mode 100644
index 00000000000000..ac9ffd3965143a
--- /dev/null
+++ b/sound/soc/codecs/wau8822.c
@@ -0,0 +1,884 @@
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "wau8822.h"
+
+#define CODEC_NAME "wau8822"
+
+#define wau8822_reset(c) wau8822_write(c, WAU8822_RESET, 0)
+
+struct wau8822_priv {
+ enum snd_soc_control_type control_type;
+ void *control_data;
+ u16 suspended_cache[WAU8822_CACHEREG_NUM];
+};
+
+/* wau8822 default cfg values */
+static const u16 wau8822_reg[WAU8822_CACHEREG_NUM] = {
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0050, 0x0000, 0x0140, 0x0000,
+ 0x0000, 0x0080, 0x0000, 0x00ff,
+ 0x00ff, 0x0000, 0x0100, 0x00ff,
+ 0x00ff, 0x0000, 0x012c, 0x002c,
+ 0x002c, 0x002c, 0x002c, 0x0000,
+ 0x0032, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0038, 0x000b, 0x0032, 0x0010,
+ 0x0008, 0x000c, 0x0093, 0x00e9,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0033, 0x0010, 0x0010, 0x0100,
+ 0x0100, 0x0002, 0x0001, 0x0001,
+ 0x0039, 0x0039, 0x0000, 0x0000,
+ 0x0001, 0x0001, 0x0000, 0x0000,
+ 0x0020, 0x000 , 0x0000, 0x001A,
+};
+
+/* wau8822 custom defaults values */
+static const struct reg_default wau8822_reg_defaults[] = {
+ /* starting values other than default ones */
+ {WAU8822_POWER1, 0x001f}, /*
+ * VREF 3kohm nominal impedance
+ * tie-off buffer power on
+ * bias buffer power on
+ * micbias enabled
+ */
+ {WAU8822_POWER2, 0x01bf}, /*
+ * left ADC on
+ * right ADC on
+ * left PGA on
+ * right PGA on
+ * left ADC mix / boost on
+ * right ADC mix / boost on
+ * left headphone on
+ * right headphone on
+ */
+ {WAU8822_POWER3, 0x006f}, /*
+ * left DAC on
+ * right DAC on
+ * left main mixer on
+ * right main mixer on
+ * left spk on
+ * right spk on
+ */
+ {WAU8822_IFACE, 0x0070}, /* standard i2s */
+ {WAU8822_CLOCK1, 0x0000}, /* divider for fs set to 1 */
+ {WAU8822_GPIO, 0x0004}, /* output divided PLL clock */
+ {WAU8822_DAC, 0x0008}, /* 128x oversampling */
+ {WAU8822_ADCVOLL, 0x01ff}, /* left adc set to 0dB */
+ {WAU8822_ADCVOLR, 0x01ff}, /* right adc set to 0dB */
+ {WAU8822_INPUT, 0x0000}, /* disconnect all input */
+ {WAU8822_INPGAL, 0x0110}, /* left PGA 0dB */
+ {WAU8822_INPGAR, 0x0110}, /* right PGA 0dB */
+ {WAU8822_ADCBOOSTL, 0x0055}, /*
+ * left line-in 0dB
+ * left aux-in 0dB
+ */
+ {WAU8822_ADCBOOSTR, 0x0055}, /*
+ * right line-in 0dB
+ * right aux-in 0dB
+ */
+};
+
+static inline unsigned int wau8822_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg == WAU8822_RESET)
+ return 0;
+ if (reg >= WAU8822_CACHEREG_NUM)
+ return -1;
+ return cache[reg];
+}
+static inline void wau8822_write_reg_cache(struct snd_soc_codec *codec,
+ u16 reg, unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= WAU8822_CACHEREG_NUM)
+ return;
+ cache[reg] = value;
+}
+
+static int wau8822_write(struct snd_soc_codec *codec,
+ unsigned int reg,
+ unsigned int value)
+{
+ int ret = 0;
+ wau8822_write_reg_cache(codec, reg, value);
+
+ ret = snd_soc_write(codec, reg, value);
+ if (!ret) {
+ dev_dbg(codec->dev, "%s: reg = 0x%x ; value = 0x%x success\n",
+ __func__, reg, value);
+ } else {
+ dev_err(codec->dev, "%s: reg = 0x%x ; value = 0x%x error\n",
+ __func__, reg, value);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const char * const wau8822_pga_input_sel_text[] = {
+ "None", "Line", "Mic"
+};
+
+static const struct soc_enum wau8822_pga_input_sel_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wau8822_pga_input_sel_text),
+ wau8822_pga_input_sel_text);
+
+int wau8822_vol_update(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ int err;
+ bool type_2r = 0;
+ unsigned int val2 = 0;
+ unsigned int val, val_mask;
+
+ val = (ucontrol->value.integer.value[0] & mask);
+ if (invert)
+ val = max - val;
+ val_mask = mask << shift;
+ val = val << shift;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ val2 = (ucontrol->value.integer.value[1] & mask);
+ if (invert)
+ val2 = max - val2;
+ if (reg == reg2) {
+ val_mask |= mask << rshift;
+ val |= val2 << rshift;
+ } else {
+ val2 = val2 << shift;
+ type_2r = 1;
+ }
+ }
+
+ err = snd_soc_update_bits_locked(codec,
+ reg,
+ val_mask | 0x100,
+ val | 0x100);
+ if (err < 0)
+ return err;
+
+ if (type_2r)
+ err = snd_soc_update_bits_locked(codec,
+ reg2,
+ val_mask | 0x100,
+ val2 | 0x100);
+
+ return err;
+}
+
+static int wau8822_pga_input_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 input_left = wau8822_read_reg_cache(codec, WAU8822_INPUT) & 0x7;
+ u16 input_right = (wau8822_read_reg_cache(codec, WAU8822_INPUT) >> 4)
+ & 0x7;
+
+ if (!input_left && !input_right) {
+ /* PGA input none */
+ ucontrol->value.integer.value[0] = 0;
+ } else if (input_left == 0x3 && input_right == 0x3) {
+ /* PGA input is Mic */
+ ucontrol->value.integer.value[0] = 2;
+ } else if (input_left == 0x4 && input_right == 0x4) {
+ /* PGA input is Line-in */
+ ucontrol->value.integer.value[0] = 1;
+ } else {
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ }
+
+ return ret;
+}
+
+static int wau8822_pga_input_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 input = wau8822_read_reg_cache(codec, WAU8822_INPUT) & ~0x77;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ break;
+ case 1:
+ /* Line-in */
+ input |= 0x44;
+ break;
+ case 2:
+ /* Mic */
+ input |= 0x33;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ goto error;
+ break;
+ }
+
+ wau8822_write(codec, WAU8822_INPUT, input);
+
+error:
+ return ret;
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -1200, 75, 0);
+static const DECLARE_TLV_DB_SCALE(hp_tlv, -5700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(boost_tlv, -1500, 300, 0);
+
+static const struct snd_kcontrol_new wau8822_snd_controls[] = {
+ SOC_SINGLE("Digital Loopback Switch", WAU8822_COMP, 0, 1, 0),
+
+ SOC_DOUBLE_R_EXT_TLV("DAC Playback Volume",
+ WAU8822_DACVOLL, WAU8822_DACVOLR, 0, 255, 0,
+ snd_soc_get_volsw, wau8822_vol_update, dac_tlv),
+
+ SOC_DOUBLE_R_EXT_TLV("ADC Capture Volume",
+ WAU8822_ADCVOLL, WAU8822_ADCVOLR, 0, 255, 0,
+ snd_soc_get_volsw, wau8822_vol_update, dac_tlv),
+
+ SOC_DOUBLE_R_EXT_TLV("PGA Capture Volume",
+ WAU8822_INPGAL, WAU8822_INPGAR, 0, 63, 0,
+ snd_soc_get_volsw, wau8822_vol_update, pga_tlv),
+
+ SOC_DOUBLE_R_EXT_TLV("Headphone Playback Volume",
+ WAU8822_HPVOLL, WAU8822_HPVOLR, 0, 63, 0,
+ snd_soc_get_volsw, wau8822_vol_update, hp_tlv),
+ SOC_DOUBLE_R("Headphone Playback Switch",
+ WAU8822_HPVOLL, WAU8822_HPVOLR, 6, 1, 1),
+
+ SOC_DOUBLE_R_EXT_TLV("Speaker Playback Volume",
+ WAU8822_SPKVOLL, WAU8822_SPKVOLR, 0, 63, 0,
+ snd_soc_get_volsw, wau8822_vol_update, hp_tlv),
+ SOC_DOUBLE_R("Speaker Playback Switch",
+ WAU8822_SPKVOLL, WAU8822_SPKVOLR, 6, 1, 1),
+
+ SOC_DOUBLE_R_TLV("Line In Capture Volume", WAU8822_ADCBOOSTL,
+ WAU8822_ADCBOOSTR, 4, 7, 0, boost_tlv),
+ SOC_DOUBLE_R_TLV("Aux In Capture Volume", WAU8822_ADCBOOSTL,
+ WAU8822_ADCBOOSTR, 0, 7, 0, boost_tlv),
+ SOC_DOUBLE_R("Capture Boost(+20dB)",
+ WAU8822_ADCBOOSTL, WAU8822_ADCBOOSTR, 8, 1, 0),
+
+ SOC_ENUM_EXT("PGA Input Selection", wau8822_pga_input_sel_enum,
+ wau8822_pga_input_get_switch,
+ wau8822_pga_input_set_switch),
+};
+
+static int wau8822_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+
+ u16 iface = wau8822_read_reg_cache(codec, WAU8822_IFACE) & 0x19f;
+ u16 clk2 = wau8822_read_reg_cache(codec, WAU8822_CLOCK2) & 0x1f1;
+
+ /* set word length */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ iface |= 0x0020;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iface |= 0x0040;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ iface |= 0x0060;
+ break;
+ }
+
+ /* set sample rate */
+ switch (params_rate(params)) {
+ case SNDRV_PCM_RATE_8000:
+ clk2 |= 0x5 << 1;
+ break;
+ case SNDRV_PCM_RATE_11025:
+ clk2 |= 0x4 << 1;
+ break;
+ case SNDRV_PCM_RATE_16000:
+ clk2 |= 0x3 << 1;
+ break;
+ case SNDRV_PCM_RATE_22050:
+ clk2 |= 0x2 << 1;
+ break;
+ case SNDRV_PCM_RATE_32000:
+ clk2 |= 0x1 << 1;
+ break;
+ case SNDRV_PCM_RATE_44100:
+ case SNDRV_PCM_RATE_48000:
+ break;
+ }
+
+ wau8822_write(codec, WAU8822_IFACE, iface);
+ wau8822_write(codec, WAU8822_CLOCK2, clk2);
+ return 0;
+}
+
+static int wau8822_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 iface = wau8822_read_reg_cache(codec, WAU8822_IFACE) & 0x67;
+ u16 clk1 = wau8822_read_reg_cache(codec, WAU8822_CLOCK1) & 0x1fe;
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ clk1 |= 0x0001;
+ break;
+
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ iface |= 0x0010;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface |= 0x0008;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ iface |= 0x00018;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF:
+ iface |= 0x0180;
+ break;
+
+ case SND_SOC_DAIFMT_IB_NF:
+ iface |= 0x0100;
+ break;
+
+ case SND_SOC_DAIFMT_NB_IF:
+ iface |= 0x0080;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ wau8822_write(codec, WAU8822_IFACE, iface);
+ wau8822_write(codec, WAU8822_CLOCK1, clk1);
+ return 0;
+}
+
+static int wau8822_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
+ int div_id, int div)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 reg;
+
+ switch (div_id) {
+ case WAU8822_OPCLKDIV:
+ reg = wau8822_read_reg_cache(codec, WAU8822_GPIO) & 0x1cf;
+ wau8822_write(codec, WAU8822_GPIO, reg | div);
+ break;
+ case WAU8822_MCLKDIV:
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1) & 0x11f;
+ wau8822_write(codec, WAU8822_CLOCK1, reg | div);
+ break;
+ case WAU8822_ADCCLK:
+ reg = wau8822_read_reg_cache(codec, WAU8822_ADC) & 0x1f7;
+ wau8822_write(codec, WAU8822_ADC, reg | div);
+ break;
+ case WAU8822_DACCLK:
+ reg = wau8822_read_reg_cache(codec, WAU8822_DAC) & 0x1f7;
+ wau8822_write(codec, WAU8822_DAC, reg | div);
+ break;
+ case WAU8822_BCLKDIV:
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1) & 0x1e3;
+ wau8822_write(codec, WAU8822_CLOCK1, reg | div);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct pll_ {
+ unsigned int pre_div:4; /* prescale - 1 */
+ unsigned int n:4;
+ unsigned int k;
+};
+
+static struct pll_ pll_div;
+
+/*
+ * The size in bits of the pll divide multiplied by 10
+ * to allow rounding later
+ */
+#define FIXED_PLL_SIZE ((1 << 24) * 10)
+
+static void pll_factors(unsigned int target, unsigned int source)
+{
+ unsigned long long Kpart;
+ unsigned int K, Ndiv, Nmod;
+
+ Ndiv = target / source;
+ if (Ndiv < 6) {
+ source >>= 1;
+ pll_div.pre_div = 1;
+ Ndiv = target / source;
+ } else {
+ pll_div.pre_div = 0;
+ }
+
+ if ((Ndiv < 6) || (Ndiv > 12))
+ pr_warn("WAU8822 N value %d outwith recommended range!\n",
+ Ndiv);
+
+ pll_div.n = Ndiv;
+ Nmod = target % source;
+ Kpart = FIXED_PLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, source);
+
+ K = Kpart & 0xffffffff;
+
+ /* Check if we need to round */
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ K /= 10;
+
+ pll_div.k = K;
+
+ pr_debug(CODEC_NAME "%s: pll_factor, freq_in %d, freq_out %d, "
+ "pre_div %d, Ndiv %d, K 0x%x\n", __func__,
+ source, target, pll_div.pre_div, pll_div.n, pll_div.k);
+}
+static int wau8822_set_dai_pll(struct snd_soc_dai *codec_dai,
+ int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 reg;
+
+ dev_dbg(codec->dev, "%s: pll_id %d, freq_in %d, frq_out %d\n",
+ __func__, pll_id, freq_in, freq_out);
+
+ if (freq_in == 0 || freq_out == 0) {
+ /* Clock CODEC directly from MCLK, MCLK is master clock */
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1);
+ wau8822_write(codec, WAU8822_CLOCK1, reg & 0x0ff);
+
+ /* Turn off PLL */
+ reg = wau8822_read_reg_cache(codec, WAU8822_POWER1);
+ wau8822_write(codec, WAU8822_POWER1, reg & 0x1df);
+ return 0;
+ }
+
+ /* compute PLL factors */
+ pll_factors(freq_out * 8, freq_in);
+
+ wau8822_write(codec, WAU8822_PLLN, (pll_div.pre_div << 4) | pll_div.n);
+ wau8822_write(codec, WAU8822_PLLK1, pll_div.k >> 18);
+ wau8822_write(codec, WAU8822_PLLK2, (pll_div.k >> 9) & 0x1ff);
+ wau8822_write(codec, WAU8822_PLLK3, pll_div.k & 0x1ff);
+
+ /* Turn on PLL */
+ reg = wau8822_read_reg_cache(codec, WAU8822_POWER1);
+ wau8822_write(codec, WAU8822_POWER1, reg | 0x020);
+
+ /* Run CODEC from PLL instead of MCLK */
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1) & 0x11f;
+ wau8822_write(codec, WAU8822_CLOCK1, reg | 0x100);
+
+ /* adapt the MCLK */
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1);
+ wau8822_write(codec, WAU8822_CLOCK1, reg | (0x020 << pll_div.pre_div));
+
+ /* to try to put the out in aiz */
+ wau8822_write(codec, WAU8822_MISC, 0x0);
+
+ /*
+ * to adapt bclk - McBSP driver currently
+ * only support 16bits data
+ */
+ reg = wau8822_read_reg_cache(codec, WAU8822_CLOCK1) & 0x1e3;
+
+ /*
+ * if divided by 8 -> 16BCLK per channel
+ * if divided by 4 -> 32BCLK per channel
+ * wau8822_write(codec, WAU8822_CLOCK1, reg | WAU8822_BCLKDIV_8);
+ */
+ wau8822_write(codec, WAU8822_CLOCK1, reg | (2 << 2));
+
+ return 0;
+}
+
+static int wau8822_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 mute_reg = wau8822_read_reg_cache(codec, WAU8822_DAC) & 0xffbf;
+
+ if (mute)
+ wau8822_write(codec, WAU8822_DAC, mute_reg | 0x40);
+ else
+ wau8822_write(codec, WAU8822_DAC, mute_reg);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops wau8822_dai_ops_playback = {
+ .hw_params = wau8822_pcm_hw_params,
+ .set_fmt = wau8822_set_dai_fmt,
+ .set_clkdiv = wau8822_set_dai_clkdiv,
+ .set_pll = wau8822_set_dai_pll,
+ .digital_mute = wau8822_mute,
+};
+
+static int wau8822_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 power1 = wau8822_read_reg_cache(codec, WAU8822_POWER1) & ~0x3;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ power1 |= 0x1; /* VMID 50k */
+ wau8822_write(codec, WAU8822_POWER1, power1);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ power1 |= 0x08 | 0x04;
+
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ /* Initial cap charge at VMID 5k */
+ wau8822_write(codec, WAU8822_POWER1, power1 | 0x3);
+ mdelay(100);
+ }
+
+ power1 |= 0x2; /* VMID 500k */
+ wau8822_write(codec, WAU8822_POWER1, power1);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ wau8822_write(codec, WAU8822_POWER1, 0);
+ wau8822_write(codec, WAU8822_POWER2, 0);
+ wau8822_write(codec, WAU8822_POWER3, 0);
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+static int wau8822_power_down(struct snd_soc_codec *codec)
+{
+ /* power2 shutdown */
+ wau8822_write(codec, WAU8822_POWER2, 0x0);
+ /* power3 shutdown except main mixer */
+ wau8822_write(codec, WAU8822_POWER3, 0xc);
+ /* power3 complete shutdown */
+ wau8822_write(codec, WAU8822_POWER3, 0x0);
+ /* power2 shutdown */
+ wau8822_write(codec, WAU8822_POWER1, 0x0);
+ /* headphone output mute */
+ wau8822_write(codec, WAU8822_HPVOLL, 0x140);
+ wau8822_write(codec, WAU8822_HPVOLR, 0x140);
+
+ return wau8822_set_bias_level(codec, SND_SOC_BIAS_OFF);
+}
+
+static int wau8822_probe(struct snd_soc_codec *codec)
+{
+ struct wau8822_priv *wau8822 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+ int i;
+ unsigned int value = 0;
+
+ dev_info(codec->dev, "WAU8822 Audio Codec");
+
+ codec->control_data = wau8822->control_data;
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, wau8822->control_type);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto error;
+ }
+
+ /* Reset */
+ ret = wau8822_reset(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
+ goto error;
+ }
+
+ /* power on device */
+ codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+ wau8822_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* initialize codec register */
+ for (i = 0; i < ARRAY_SIZE(wau8822_reg_defaults); i++) {
+ wau8822_write(codec, wau8822_reg_defaults[i].reg,
+ wau8822_reg_defaults[i].def);
+ }
+
+ /* update cache */
+ for (i = 0; i < WAU8822_CACHEREG_NUM; i++) {
+ value = snd_soc_read(codec, i);
+ wau8822_write_reg_cache(codec, i, value);
+ }
+
+ /* init controls */
+ dev_dbg(codec->dev, "init controls\n");
+ snd_soc_add_codec_controls(codec, wau8822_snd_controls,
+ ARRAY_SIZE(wau8822_snd_controls));
+
+error:
+ return ret;
+
+}
+
+static int wau8822_remove(struct snd_soc_codec *codec)
+{
+ return wau8822_power_down(codec);
+}
+
+static int wau8822_suspend(struct snd_soc_codec *codec)
+{
+ struct wau8822_priv *wau8822 = snd_soc_codec_get_drvdata(codec);
+ u16 *cache = codec->reg_cache;
+
+ /* we only need to suspend if we are a valid card */
+ if (!codec->card)
+ return 0;
+
+ /* save cache for suspend */
+ memcpy(wau8822->suspended_cache, cache,
+ WAU8822_CACHEREG_NUM * sizeof(u16));
+
+ return wau8822_power_down(codec);
+}
+
+static int wau8822_resume(struct snd_soc_codec *codec)
+{
+ int ret;
+ int i;
+ u16 *cache = codec->reg_cache;
+ struct wau8822_priv *wau8822 = snd_soc_codec_get_drvdata(codec);
+
+ /* we only need to resume if we are a valid card */
+ if (!codec->card)
+ return 0;
+
+ /* Reset */
+ ret = wau8822_reset(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
+ goto error;
+ }
+
+ /* power on device */
+ codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+ wau8822_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* initialize codec register */
+ for (i = 0; i < ARRAY_SIZE(wau8822_reg_defaults); i++) {
+ snd_soc_write(codec, wau8822_reg_defaults[i].reg,
+ wau8822_reg_defaults[i].def);
+ }
+
+ /* restore suspended bias level */
+ wau8822_set_bias_level(codec, codec->dapm.suspend_bias_level);
+
+ /* restore cached regs */
+ memcpy(cache, wau8822->suspended_cache,
+ WAU8822_CACHEREG_NUM * sizeof(u16));
+ for (i = 0; i < ARRAY_SIZE(wau8822_reg); i++) {
+ if (i == WAU8822_RESET)
+ continue;
+ wau8822_write(codec, i, cache[i]);
+ }
+
+error:
+ return ret;
+}
+
+static int wau8822_volatile_register(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ return reg == WAU8822_RESET;
+}
+
+struct snd_soc_codec_driver soc_codec_dev_wau8822 = {
+ .probe = wau8822_probe,
+ .remove = wau8822_remove,
+ .suspend = wau8822_suspend,
+ .resume = wau8822_resume,
+ .set_bias_level = wau8822_set_bias_level,
+ .volatile_register = wau8822_volatile_register,
+ .reg_cache_size = sizeof(wau8822_reg),
+ .reg_word_size = sizeof(u16),
+ .reg_cache_default = &wau8822_reg,
+};
+
+#define WAU8822_RATES (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_11025 | \
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define WAU8822_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wau8822_dai[] = {
+ {
+ .name = "wau8822-hifi",
+ .id = 0,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WAU8822_RATES,
+ .formats = WAU8822_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WAU8822_RATES,
+ .formats = WAU8822_FORMATS,
+ },
+ .ops = &wau8822_dai_ops_playback,
+ },
+};
+
+static int wau8822_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wau8822_priv *wau8822;
+ int ret;
+
+ wau8822 = kzalloc(sizeof(struct wau8822_priv), GFP_KERNEL);
+ if (wau8822 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, wau8822);
+ wau8822->control_data = i2c;
+ wau8822->control_type = SND_SOC_I2C;
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wau8822,
+ wau8822_dai, ARRAY_SIZE(wau8822_dai));
+
+ if (ret < 0) {
+ kfree(wau8822);
+ pr_err(CODEC_NAME ": %s failed: ret = %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+static int wau8822_i2c_remove(struct i2c_client *client)
+{
+ struct snd_soc_codec *codec = i2c_get_clientdata(client);
+ kfree(codec);
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+/*
+ * WAU8822 2 wire address is determined by A1 pin
+ * state during powerup.
+ * low = 0x1a
+ * high = 0x1b
+ */
+static const struct i2c_device_id wau8822_i2c_table[] = {
+ {"wau8822", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, wau8822_i2c_table);
+
+/* i2c codec control layer */
+static struct i2c_driver wau8822_i2c_driver = {
+ .driver = {
+ .name = "wau8822",
+ .owner = THIS_MODULE,
+ },
+ .probe = wau8822_i2c_probe,
+ .remove = wau8822_i2c_remove,
+ .id_table = wau8822_i2c_table,
+};
+
+static int __init wau8822_modinit(void)
+{
+ int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ pr_info(CODEC_NAME ": module init\n");
+ ret = i2c_add_driver(&wau8822_i2c_driver);
+ if (ret != 0)
+ pr_err(CODEC_NAME
+ ": Failed to register wau8822 I2C driver:%d\n",
+ ret);
+#endif
+
+ return ret;
+}
+module_init(wau8822_modinit);
+
+static void __exit wau8822_modexit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ pr_info(CODEC_NAME ": module exit\n");
+ i2c_del_driver(&wau8822_i2c_driver);
+#endif
+}
+module_exit(wau8822_modexit);
+
+MODULE_DESCRIPTION("ASoC wau8822 driver");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/codecs/wau8822.h b/sound/soc/codecs/wau8822.h
new file mode 100644
index 00000000000000..94356e50294020
--- /dev/null
+++ b/sound/soc/codecs/wau8822.h
@@ -0,0 +1,81 @@
+
+#ifndef _WAU8822_H_
+#define _WAU8822_H_
+
+/* wau8822 register space */
+#define WAU8822_RESET 0x00
+#define WAU8822_POWER1 0x01
+#define WAU8822_POWER2 0x02
+#define WAU8822_POWER3 0x03
+#define WAU8822_IFACE 0x04
+#define WAU8822_COMP 0x05
+#define WAU8822_CLOCK1 0x06
+#define WAU8822_CLOCK2 0x07
+#define WAU8822_GPIO 0x08
+#define WAU8822_JACK1 0x09
+#define WAU8822_DAC 0x0a
+#define WAU8822_DACVOLL 0x0b
+#define WAU8822_DACVOLR 0x0c
+#define WAU8822_JACK2 0x0d
+#define WAU8822_ADC 0x0e
+#define WAU8822_ADCVOLL 0x0f
+#define WAU8822_ADCVOLR 0x10
+/* 0x11 is reserved */
+#define WAU8822_EQ1 0x12
+#define WAU8822_EQ2 0x13
+#define WAU8822_EQ3 0x14
+#define WAU8822_EQ4 0x15
+#define WAU8822_EQ5 0x16
+/* 0x17 is reserved */
+#define WAU8822_DACLIM1 0x18
+#define WAU8822_DACLIM2 0x19
+/* 0x1a is reserved */
+#define WAU8822_NOTCH1 0x1b
+#define WAU8822_NOTCH2 0x1c
+#define WAU8822_NOTCH3 0x1d
+#define WAU8822_NOTCH4 0x1e
+/* 0x1f is reserved */
+#define WAU8822_ALC1 0x20
+#define WAU8822_ALC2 0x21
+#define WAU8822_ALC3 0x22
+#define WAU8822_NGATE 0x23
+#define WAU8822_PLLN 0x24
+#define WAU8822_PLLK1 0x25
+#define WAU8822_PLLK2 0x26
+#define WAU8822_PLLK3 0x27
+/* 0x28 is reserved */
+#define WAU8822_3D 0x29
+/* 0x2a is reserved */
+#define WAU8822_RSPKSUBMIX 0x2b
+#define WAU8822_INPUT 0x2c
+#define WAU8822_INPGAL 0x2d
+#define WAU8822_INPGAR 0x2e
+#define WAU8822_ADCBOOSTL 0x2f
+#define WAU8822_ADCBOOSTR 0x30
+#define WAU8822_OUTPUT 0x31
+#define WAU8822_MIXL 0x32
+#define WAU8822_MIXR 0x33
+#define WAU8822_HPVOLL 0x34
+#define WAU8822_HPVOLR 0x35
+#define WAU8822_SPKVOLL 0x36
+#define WAU8822_SPKVOLR 0x37
+#define WAU8822_AUX2MIX 0x38
+#define WAU8822_AUX1MIX 0x39
+#define WAU8822_POWER4 0x3a
+#define WAU8822_TIMESLOTL 0x3b
+#define WAU8822_MISC 0x3c
+#define WAU8822_TIMESLOTR 0x3d
+#define WAU8822_REV 0x3e
+#define WAU8822_ID 0x3f
+
+#define WAU8822_CACHEREG_NUM 64
+
+/* Clock divider Id's */
+#define WAU8822_OPCLKDIV 0
+#define WAU8822_MCLKDIV 1
+#define WAU8822_ADCCLK 2
+#define WAU8822_DACCLK 3
+#define WAU8822_BCLKDIV 4
+
+#endif
+
diff --git a/sound/soc/codecs/wm8594.c b/sound/soc/codecs/wm8594.c
new file mode 100644
index 00000000000000..3889438029d58f
--- /dev/null
+++ b/sound/soc/codecs/wm8594.c
@@ -0,0 +1,1124 @@
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "wm8594.h"
+
+#define CODEC_NAME "wm8594"
+
+#define wm8594_reset(c) wm8594_write(c, WM8594_RESET, 0)
+
+struct wm8594_priv {
+ enum snd_soc_control_type control_type;
+ void *control_data;
+};
+
+/* wm8594 default cfg values */
+static const u16 wm8594_reg[WM8594_CACHEREG_NUM] = {
+ 0x8594, 0x0000, 0x008a, 0x0000,
+ 0x0000, 0x00c8, 0x00c8, 0x008a,
+ 0x0000, 0x0000, 0x00c8, 0x00c8,
+ 0x0000, 0x200a, 0x0000, 0x0000,
+ 0x00c3, 0x00c3, 0x000c, 0x000c,
+ 0x000c, 0x000c, 0x000c, 0x000c,
+ 0x0003, 0x007e, 0x0048, 0x0000,
+ 0x0000, 0x0008, 0x0000, 0x0088,
+ 0x0163, 0x0040, 0x0010, 0x0002
+
+};
+
+static const struct reg_default wm8594_reg_defaults[] = {
+ {WM8594_DAC1_CTRL1, 0x008e}, /*
+ * DAC1 cross enable
+ * 32 bits i2s format
+ */
+ {WM8594_DAC1_CTRL2, 0x001b}, /*
+ * MCLK = 256 * Fs
+ * BLCK = 64 * Fs
+ */
+ {WM8594_ADC_CTRL1, 0x200a}, /*
+ * ADC cross enable
+ * 32 bits i2s format
+ */
+ {WM8594_DAC2_CTRL1, 0x008e}, /*
+ * DAC2 cross enable
+ * 32 bits i2s format
+ */
+ {WM8594_DAC2_CTRL2, 0x001b}, /*
+ * MCLK = 256 * Fs
+ * BLCK = 64 * Fs
+ */
+ {WM8594_INPUT_CTRL1, 0x09a9},
+ {WM8594_INPUT_CTRL2, 0x0a9a}, /*
+ * connect PGA1 and PGA2
+ * to DAC1
+ */
+ {WM8594_INPUT_CTRL3, 0x0080}, /* connect ADC to VIN1 */
+};
+
+static inline unsigned int wm8594_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= WM8594_CACHEREG_NUM)
+ return -1;
+ return cache[reg];
+}
+
+static inline void wm8594_write_reg_cache(struct snd_soc_codec *codec,
+ u16 reg, unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= WM8594_CACHEREG_NUM)
+ return;
+ cache[reg] = value;
+}
+
+static int wm8594_write(struct snd_soc_codec *codec,
+ unsigned int reg,
+ unsigned int value)
+{
+ int ret = 0;
+ wm8594_write_reg_cache(codec, reg, value);
+
+ ret = snd_soc_write(codec, reg, value);
+ if (!ret) {
+ dev_dbg(codec->dev, "%s: reg = 0x%x ; value = 0x%x success\n",
+ __func__, reg, value);
+ } else {
+ dev_err(codec->dev, "%s: reg = 0x%x ; value = 0x%x error\n",
+ __func__, reg, value);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const char * const wm8594_adc_input_sel_text[] = {
+ "VIN1", "VIN2", "VIN3", "VIN4", "VIN5"
+};
+
+static const char * const wm8594_pga_sel_text[] = {
+ "No Input", "VIN1", "VIN2", "VIN3", "VIN4", "VIN5", "DAC1", "DAC2"
+};
+
+static const struct soc_enum wm8594_adc_input_sel_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wm8594_adc_input_sel_text),
+ wm8594_adc_input_sel_text);
+
+static const struct soc_enum wm8594_pga_sel_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wm8594_pga_sel_text),
+ wm8594_pga_sel_text);
+
+static int wm8594_vol_update(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int rshift = mc->rshift;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ int err;
+ bool type_2r = 0;
+ unsigned int val2 = 0;
+ unsigned int val, val_mask;
+
+ val = (ucontrol->value.integer.value[0] & mask);
+ if (invert)
+ val = max - val;
+ val_mask = mask << shift;
+ val = val << shift;
+ if (snd_soc_volsw_is_stereo(mc)) {
+ val2 = (ucontrol->value.integer.value[1] & mask);
+ if (invert)
+ val2 = max - val2;
+ if (reg == reg2) {
+ val_mask |= mask << rshift;
+ val |= val2 << rshift;
+ } else {
+ val2 = val2 << shift;
+ type_2r = 1;
+ }
+ }
+
+ err = snd_soc_update_bits_locked(codec,
+ reg,
+ val_mask | 0x100,
+ val | 0x100);
+ if (err < 0)
+ return err;
+
+ if (type_2r)
+ err = snd_soc_update_bits_locked(codec,
+ reg2,
+ val_mask | 0x100,
+ val2 | 0x100);
+
+ return err;
+}
+
+
+static int wm8594_adc_input_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 lsel = wm8594_read_reg_cache(codec, WM8594_INPUT_CTRL3) & 0x000f;
+ u16 rsel = wm8594_read_reg_cache(codec, WM8594_INPUT_CTRL3) & 0x00f0;
+
+ /*
+ * here we expect that no inversion has been made
+ * left input <-> left adc
+ * right input <-> right adc
+ */
+ switch (lsel) {
+ case 0x0:
+ /* VIN1L */
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 0x1:
+ /* VIN2L */
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 0x2:
+ /* VIN3L */
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 0x3:
+ /* VIN4L */
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 0x4:
+ /* VIN5L */
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ break;
+ };
+
+ switch (rsel) {
+ case 0x80:
+ /* VIN1R */
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 0x90:
+ /* VIN2R */
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 0xa0:
+ /* VIN3R */
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 0xb0:
+ /* VIN4R */
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 0xc0:
+ /* VIN5R */
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ break;
+ };
+
+ return ret;
+}
+
+static int wm8594_adc_input_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 sel = wm8594_read_reg_cache(codec, WM8594_INPUT_CTRL3) & ~0x00ff;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ /* VIN1 */
+ sel |= 0x80;
+ break;
+ case 1:
+ /* VIN2 */
+ sel |= 0x91;
+ break;
+ case 2:
+ /* VIN3 */
+ sel |= 0xa2;
+ break;
+ case 3:
+ /* VIN4 */
+ sel |= 0xb3;
+ break;
+ case 4:
+ /* VIN5 */
+ sel |= 0xc4;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ goto error;
+ break;
+ }
+
+ wm8594_write(codec, WM8594_INPUT_CTRL3, sel);
+
+error:
+ return ret;
+}
+
+static int wm8594_pga_sel_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ u16 reg_left, u16 shift_left,
+ u16 reg_right, u16 shift_right)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 lsel = (wm8594_read_reg_cache(codec, reg_left) >> shift_left)
+ & 0xf;
+ u16 rsel = (wm8594_read_reg_cache(codec, reg_right) >> shift_right)
+ & 0xf;
+
+ /*
+ * here we expect that no inversion has been made
+ */
+ switch (lsel) {
+ case 0x0:
+ /* no input */
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 0x1:
+ /* VIN1L */
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 0x2:
+ /* VIN2L */
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 0x3:
+ /* VIN3L */
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 0x4:
+ /* VIN4L */
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 0x5:
+ /* VIN5L */
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ case 0x9:
+ /* DAC1L */
+ ucontrol->value.integer.value[0] = 6;
+ break;
+ case 0xb:
+ /* DAC2L */
+ ucontrol->value.integer.value[0] = 7;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ break;
+ };
+
+ switch (rsel) {
+ case 0x0:
+ /* no input */
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 0x1:
+ /* VIN1R */
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 0x2:
+ /* VIN2R */
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 0x3:
+ /* VIN3R */
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 0x4:
+ /* VIN4R */
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 0x5:
+ /* VIN5R */
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ case 0xa:
+ /* DAC1R */
+ ucontrol->value.integer.value[0] = 6;
+ break;
+ case 0xc:
+ /* DAC2R */
+ ucontrol->value.integer.value[0] = 7;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ break;
+ };
+
+ return ret;
+}
+
+static int wm8594_pga_sel_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ u16 reg_left, u16 shift_left,
+ u16 reg_right, u16 shift_right)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 lsel = wm8594_read_reg_cache(codec, reg_left)
+ & ~(0xf << shift_left);
+ u16 rsel = wm8594_read_reg_cache(codec, reg_right)
+ & ~(0xf << shift_right);
+
+ if (reg_left == reg_right)
+ lsel &= ~(0xf << shift_right);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ /* no input */
+ break;
+ case 1:
+ /* VIN1 */
+ if (reg_left == reg_right) {
+ lsel |= 0x1 << shift_left | 0x1 << shift_right;
+ } else {
+ lsel |= 0x1 << shift_left;
+ rsel |= 0x1 << shift_right;
+ }
+ break;
+ case 2:
+ /* VIN2 */
+ if (reg_left == reg_right) {
+ lsel |= 0x2 << shift_left | 0x2 << shift_right;
+ } else {
+ lsel |= 0x2 << shift_left;
+ rsel |= 0x2 << shift_right;
+ }
+ break;
+ case 3:
+ /* VIN3 */
+ if (reg_left == reg_right) {
+ lsel |= 0x3 << shift_left | 0x3 << shift_right;
+ } else {
+ lsel |= 0x3 << shift_left;
+ rsel |= 0x3 << shift_right;
+ }
+ break;
+ case 4:
+ /* VIN4 */
+ if (reg_left == reg_right) {
+ lsel |= 0x4 << shift_left | 0x4 << shift_right;
+ } else {
+ lsel |= 0x4 << shift_left;
+ rsel |= 0x4 << shift_right;
+ }
+ break;
+ case 5:
+ /* VIN5 */
+ if (reg_left == reg_right) {
+ lsel |= 0x5 << shift_left | 0x5 << shift_right;
+ } else {
+ lsel |= 0x5 << shift_left;
+ rsel |= 0x5 << shift_right;
+ }
+ break;
+ case 6:
+ /* DAC1 */
+ if (reg_left == reg_right) {
+ lsel |= 0x9 << shift_left | 0xa << shift_right;
+ } else {
+ lsel |= 0x9 << shift_left;
+ rsel |= 0xa << shift_right;
+ }
+ break;
+ case 7:
+ /* DAC2 */
+ if (reg_left == reg_right) {
+ lsel |= 0xb << shift_left | 0xc << shift_right;
+ } else {
+ lsel |= 0xb << shift_left;
+ rsel |= 0xc << shift_right;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "%s: unexpected control value\n", __func__);
+ goto error;
+ break;
+ }
+
+ wm8594_write(codec, reg_left, lsel);
+ if (reg_left != reg_right)
+ wm8594_write(codec, reg_right, rsel);
+
+error:
+ return ret;
+}
+
+
+static int wm8594_pga1_sel_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_get_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL1, 0,
+ WM8594_INPUT_CTRL1, 4);
+}
+
+static int wm8594_pga1_sel_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_set_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL1, 0,
+ WM8594_INPUT_CTRL1, 4);
+}
+
+static int wm8594_pga2_sel_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_get_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL1, 8,
+ WM8594_INPUT_CTRL2, 0);
+}
+
+static int wm8594_pga2_sel_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_set_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL1, 8,
+ WM8594_INPUT_CTRL2, 0);
+}
+
+static int wm8594_pga3_sel_get_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_get_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL2, 4,
+ WM8594_INPUT_CTRL2, 8);
+}
+
+static int wm8594_pga3_sel_set_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return wm8594_pga_sel_set_switch(kcontrol, ucontrol,
+ WM8594_INPUT_CTRL2, 4,
+ WM8594_INPUT_CTRL2, 8);
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 50, 0);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -7400, 50, 0xa0);
+
+static const struct snd_kcontrol_new wm8594_snd_controls[] = {
+ SOC_DOUBLE_R_EXT_TLV("DAC1 Playback Volume",
+ WM8594_DAC1L_VOL, WM8594_DAC1R_VOL, 0, 224, 0,
+ snd_soc_get_volsw, wm8594_vol_update, dac_tlv),
+ SOC_SINGLE("DAC1 Mute Switch", WM8594_DAC1_CTRL1, 9, 1, 1),
+
+ SOC_DOUBLE_R_EXT_TLV("DAC2 Playback Volume",
+ WM8594_DAC2L_VOL, WM8594_DAC2R_VOL, 0, 224, 0,
+ snd_soc_get_volsw, wm8594_vol_update, dac_tlv),
+ SOC_SINGLE("DAC2 Mute Switch", WM8594_DAC2_CTRL1, 9, 1, 1),
+
+ SOC_DOUBLE_R_EXT_TLV("ADC Capture Volume",
+ WM8594_ADCL_VOL, WM8594_ADCR_VOL, 0, 255, 0,
+ snd_soc_get_volsw, wm8594_vol_update, adc_tlv),
+
+ SOC_DOUBLE_R_EXT_TLV("PGA1 Volume",
+ WM8594_PGA1L_VOL, WM8594_PGA1R_VOL, 0, 160, 1,
+ snd_soc_get_volsw, wm8594_vol_update, pga_tlv),
+ SOC_DOUBLE("PGA1 Mute Switch", WM8594_PGA_CTRL2, 1, 2, 1, 1),
+ SOC_ENUM_EXT("PGA1 Input Selection", wm8594_pga_sel_enum,
+ wm8594_pga1_sel_get_switch, wm8594_pga1_sel_set_switch),
+
+ SOC_DOUBLE_R_EXT_TLV("PGA2 Volume",
+ WM8594_PGA2L_VOL, WM8594_PGA2R_VOL, 0, 160, 1,
+ snd_soc_get_volsw, wm8594_vol_update, pga_tlv),
+ SOC_DOUBLE("PGA2 Mute Switch", WM8594_PGA_CTRL2, 3, 4, 1, 1),
+ SOC_ENUM_EXT("PGA2 Input Selection", wm8594_pga_sel_enum,
+ wm8594_pga2_sel_get_switch, wm8594_pga2_sel_set_switch),
+
+ SOC_DOUBLE_R_EXT_TLV("PGA3 Volume",
+ WM8594_PGA3L_VOL, WM8594_PGA3R_VOL, 0, 160, 1,
+ snd_soc_get_volsw, wm8594_vol_update, pga_tlv),
+ SOC_DOUBLE("PGA3 Mute Switch", WM8594_PGA_CTRL2, 5, 4, 1, 1),
+ SOC_ENUM_EXT("PGA3 Input Selection", wm8594_pga_sel_enum,
+ wm8594_pga3_sel_get_switch, wm8594_pga3_sel_set_switch),
+
+ SOC_SINGLE("PGA Mute All Switch", WM8594_PGA_CTRL2, 0, 1, 1),
+
+ SOC_SINGLE("ADC Gain Control", WM8594_INPUT_CTRL3, 8, 3, 0),
+ SOC_ENUM_EXT("ADC Input Selection", wm8594_adc_input_sel_enum,
+ wm8594_adc_input_get_switch, wm8594_adc_input_set_switch),
+};
+
+static int wm8594_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+
+ u16 dac1_ctrl1 = wm8594_read_reg_cache(codec, WM8594_DAC1_CTRL1) & ~0xc;
+ u16 dac2_ctrl1 = wm8594_read_reg_cache(codec, WM8594_DAC2_CTRL1) & ~0xc;
+ u16 add = wm8594_read_reg_cache(codec, WM8594_ADD_CTRL1) & ~0x70;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ dac1_ctrl1 |= 0x0004;
+ dac2_ctrl1 |= 0x0004;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ dac1_ctrl1 |= 0x0008;
+ dac2_ctrl1 |= 0x0008;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ dac1_ctrl1 |= 0x000c;
+ dac2_ctrl1 |= 0x000c;
+ break;
+ }
+
+ /* set sample rate */
+ switch (params_rate(params)) {
+ case SNDRV_PCM_RATE_32000:
+ break;
+ case SNDRV_PCM_RATE_44100:
+ add |= 0x10;
+ break;
+ case SNDRV_PCM_RATE_88200:
+ add |= 0x30;
+ break;
+ case SNDRV_PCM_RATE_96000:
+ add |= 0x40;
+ break;
+ case SNDRV_PCM_RATE_176400:
+ add |= 0x50;
+ break;
+ case SNDRV_PCM_RATE_192000:
+ add |= 0x60;
+ break;
+ /* 48000 is the default value */
+ case SNDRV_PCM_RATE_48000:
+ default:
+ add |= 0x20;
+ break;
+ }
+
+ wm8594_write(codec, WM8594_DAC1_CTRL1, dac1_ctrl1);
+ wm8594_write(codec, WM8594_DAC2_CTRL1, dac2_ctrl1);
+ wm8594_write(codec, WM8594_ADD_CTRL1, add);
+ return 0;
+}
+
+static int wm8594_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+
+ u16 dac1_ctrl1 = wm8594_read_reg_cache(codec, WM8594_DAC1_CTRL1)
+ & ~0x33;
+ u16 dac1_ctrl3 = wm8594_read_reg_cache(codec, WM8594_DAC1_CTRL3) & ~0x1;
+
+ u16 dac2_ctrl1 = wm8594_read_reg_cache(codec, WM8594_DAC2_CTRL1)
+ & ~0x33;
+ u16 dac2_ctrl3 = wm8594_read_reg_cache(codec, WM8594_DAC2_CTRL3) & ~0x1;
+
+ u16 adc_ctrl1 = wm8594_read_reg_cache(codec, WM8594_ADC_CTRL1) & ~0x33;
+ u16 adc_ctrl3 = wm8594_read_reg_cache(codec, WM8594_ADC_CTRL3) & ~0x1;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ dac1_ctrl3 |= 0x0001;
+ dac2_ctrl3 |= 0x0001;
+ adc_ctrl3 |= 0x0001;
+ break;
+
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ dac1_ctrl1 |= 0x0002;
+ dac2_ctrl1 |= 0x0002;
+ adc_ctrl1 |= 0x0002;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ dac1_ctrl1 |= 0x0001;
+ dac2_ctrl1 |= 0x0001;
+ adc_ctrl1 |= 0x0001;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ dac1_ctrl1 |= 0x0003;
+ dac2_ctrl1 |= 0x0003;
+ adc_ctrl1 |= 0x0003;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF:
+ dac1_ctrl1 |= 0x0030;
+ dac2_ctrl1 |= 0x0030;
+ adc_ctrl1 |= 0x0030;
+ break;
+
+ case SND_SOC_DAIFMT_IB_NF:
+ dac1_ctrl1 |= 0x0010;
+ dac2_ctrl1 |= 0x0010;
+ adc_ctrl1 |= 0x0010;
+ break;
+
+ case SND_SOC_DAIFMT_NB_IF:
+ dac1_ctrl1 |= 0x0020;
+ dac2_ctrl1 |= 0x0020;
+ adc_ctrl1 |= 0x0020;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ wm8594_write(codec, WM8594_DAC1_CTRL1, dac1_ctrl1);
+ wm8594_write(codec, WM8594_DAC1_CTRL3, dac1_ctrl3);
+
+ wm8594_write(codec, WM8594_DAC2_CTRL1, dac2_ctrl1);
+ wm8594_write(codec, WM8594_DAC2_CTRL3, dac2_ctrl3);
+
+ wm8594_write(codec, WM8594_ADC_CTRL1, adc_ctrl1);
+ wm8594_write(codec, WM8594_ADC_CTRL3, adc_ctrl3);
+ return 0;
+}
+
+static int wm8594_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ return 0;
+}
+
+static struct snd_soc_dai_ops wm8594_dai_ops_playback = {
+ .hw_params = wm8594_pcm_hw_params,
+ .set_fmt = wm8594_set_dai_fmt,
+ .digital_mute = wm8594_mute,
+};
+
+static int wm8594_power_up(struct snd_soc_codec *codec)
+{
+ u16 bias = 0;
+ u16 dac1 = wm8594_read_reg_cache(codec, WM8594_DAC1_CTRL1) & ~0x100;
+ u16 dac2 = wm8594_read_reg_cache(codec, WM8594_DAC2_CTRL1) & ~0x100;
+ u16 adc = wm8594_read_reg_cache(codec, WM8594_ADC_CTRL1) & ~0x40;
+
+ /*
+ * set up initial biases
+ * SOFT_ST | FAST_EN | POBCTRL | BUFIO_EN
+ */
+ bias |= 0x1d;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * enable output drivers to allow the AC coupling capacitors at the
+ * output charge to be precharged to DACVMID
+ * VOUTxL_EN | VOUTxR_EN
+ */
+ wm8594_write(codec, WM8594_OUTPUT_CTRL3, 0x1fc0);
+
+ /*
+ * enable DACVMID, 750k selected for pop reduction
+ * VMID_SEL = 10
+ */
+ bias |= 0x80;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * wait until DACVMID has fully charged
+ */
+ mdelay(1500);
+
+ /*
+ * enable master bias
+ * BIAS_EN
+ */
+ bias |= 0x20;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * switch output drivers to use the master bias instead of the
+ * power-up (fast) bias
+ * POBCTRL = 0
+ */
+ bias &= ~0x1;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * enable all function required for use:
+ * - enable all inputs
+ * - enable codec
+ * - enable dac1, dac2 and adc
+ */
+ wm8594_write(codec, WM8594_INPUT_CTRL4, 0xff);
+ wm8594_write(codec, WM8594_ENABLE, 0x1);
+ dac1 |= 0x100;
+ wm8594_write(codec, WM8594_DAC1_CTRL1, dac1);
+ dac2 |= 0x100;
+ wm8594_write(codec, WM8594_DAC2_CTRL1, dac2);
+ adc |= 0x40;
+ wm8594_write(codec, WM8594_ADC_CTRL1, adc);
+
+ /*
+ * unmute PGAs and switch DACVMID to 75k for normal operation
+ */
+ bias &= ~0xc0;
+ bias |= 0x40;
+ wm8594_write(codec, WM8594_PGA_CTRL2, 0x0);
+
+ return 0;
+}
+
+static int wm8594_power_down(struct snd_soc_codec *codec)
+{
+ u16 bias;
+ u16 dac1 = wm8594_read_reg_cache(codec, WM8594_DAC1_CTRL1) & ~0x100;
+ u16 dac2 = wm8594_read_reg_cache(codec, WM8594_DAC2_CTRL1) & ~0x100;
+ u16 adc = wm8594_read_reg_cache(codec, WM8594_ADC_CTRL1) & ~0x40;
+ u16 output = wm8594_read_reg_cache(codec, WM8594_OUTPUT_CTRL3) & ~0x40;
+
+ /*
+ * mute all PGAs
+ */
+ wm8594_write(codec, WM8594_PGA_CTRL2, 0x7f);
+
+ /*
+ * set biases for power down mode
+ * FAST_EN = 1
+ * VMID_SEL = 01
+ * BIAS_EN = 1
+ * BUFIO_EN = 1
+ * VMIDTOG = 0
+ * SOFT_ST = 1
+ */
+ bias = 0x7c;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * switch outputs to fast bias
+ * POBCTRL = 1
+ */
+ bias |= 0x2;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * power down
+ */
+ wm8594_write(codec, WM8594_DAC1_CTRL1, dac1);
+ wm8594_write(codec, WM8594_DAC2_CTRL1, dac2);
+ wm8594_write(codec, WM8594_ADC_CTRL1, adc);
+ wm8594_write(codec, WM8594_INPUT_CTRL4, 0x00);
+ wm8594_write(codec, WM8594_ENABLE, 0x0);
+
+ /*
+ * power down VMID
+ * VMIDSEL = 00
+ */
+ bias &= ~0xc0;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ /*
+ * wait for DACVMID to be discharged
+ */
+ mdelay(1500);
+
+ /*
+ * clamp outputs to ground
+ * power down outputs
+ */
+ wm8594_write(codec, WM8594_OUTPUT_CTRL3, output);
+ wm8594_write(codec, WM8594_OUTPUT_CTRL3, 0);
+
+ /*
+ * disable remaining bias controls
+ */
+ bias = 0;
+ wm8594_write(codec, WM8594_BIAS, bias);
+
+ return 0;
+}
+
+static int wm8594_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ wm8594_power_up(codec);
+ break;
+
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ wm8594_power_down(codec);
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+static int wm8594_probe(struct snd_soc_codec *codec)
+{
+ int i;
+ int ret = 0;
+ unsigned int value = 0;
+ struct wm8594_priv *wm8594 = snd_soc_codec_get_drvdata(codec);
+
+ dev_info(codec->dev, "WM8594 Audio Codec");
+
+ codec->control_data = wm8594->control_data;
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, wm8594->control_type);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto error;
+ }
+
+ ret = wm8594_reset(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
+ goto error;
+ }
+
+ /* initialize codec register */
+ for (i = 0; i < ARRAY_SIZE(wm8594_reg_defaults); i++) {
+ wm8594_write(codec, wm8594_reg_defaults[i].reg,
+ wm8594_reg_defaults[i].def);
+ }
+
+ /* power on device */
+ codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+ wm8594_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* update cache */
+ for (i = 0; i < WM8594_CACHEREG_NUM; i++) {
+ value = snd_soc_read(codec, i);
+ wm8594_write_reg_cache(codec, i, value);
+ }
+
+ /* init controls */
+ snd_soc_add_codec_controls(codec, wm8594_snd_controls,
+ ARRAY_SIZE(wm8594_snd_controls));
+
+error:
+ return ret;
+
+}
+
+static int wm8594_remove(struct snd_soc_codec *codec)
+{
+ wm8594_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8594_suspend(struct snd_soc_codec *codec)
+{
+ /* we only need to suspend if we are a valid card */
+ if (!codec->card)
+ return 0;
+
+ wm8594_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8594_resume(struct snd_soc_codec *codec)
+{
+ int i;
+ u16 *cache = codec->reg_cache;
+
+ /* we only need to resume if we are a valid card */
+ if (!codec->card)
+ return 0;
+
+ /* Sync reg_cache with the hardware */
+ for (i = 0; i < ARRAY_SIZE(wm8594_reg); i++) {
+ if (i == WM8594_RESET)
+ continue;
+ snd_soc_write(codec->control_data, i, cache[i]);
+ }
+
+ wm8594_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ wm8594_set_bias_level(codec, codec->dapm.suspend_bias_level);
+ return 0;
+}
+
+struct snd_soc_codec_driver soc_codec_dev_wm8594 = {
+ .probe = wm8594_probe,
+ .remove = wm8594_remove,
+ .suspend = wm8594_suspend,
+ .resume = wm8594_resume,
+ .set_bias_level = wm8594_set_bias_level,
+ .reg_cache_size = sizeof(wm8594_reg),
+ .reg_word_size = sizeof(u16),
+ .reg_cache_default = &wm8594_reg,
+};
+
+#define WM8594_RATES (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_11025 | \
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define WM8594_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wm8594_dai[] = {
+ {
+ .name = "wm8594-hifi",
+ .id = 0,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8594_RATES,
+ .formats = WM8594_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8594_RATES,
+ .formats = WM8594_FORMATS,
+ },
+ .ops = &wm8594_dai_ops_playback,
+ },
+};
+
+static int wm8594_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8594_priv *wm8594;
+ int ret;
+
+ wm8594 = kzalloc(sizeof(struct wm8594_priv), GFP_KERNEL);
+ if (wm8594 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, wm8594);
+ wm8594->control_data = i2c;
+ wm8594->control_type = SND_SOC_I2C;
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8594,
+ wm8594_dai, ARRAY_SIZE(wm8594_dai));
+
+ if (ret < 0) {
+ kfree(wm8594);
+ pr_err(CODEC_NAME ": %s failed: ret = %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+static int wm8594_i2c_remove(struct i2c_client *client)
+{
+ struct snd_soc_codec *codec = i2c_get_clientdata(client);
+ snd_soc_unregister_codec(&client->dev);
+ kfree(codec);
+ return 0;
+}
+
+
+/*
+ * WM8594 2 wires address is determined by pin 45
+ * state during powerup.
+ * low = 0x1a
+ * high = 0x1c
+ */
+static const struct i2c_device_id wm8594_i2c_table[] = {
+ {"wm8594", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, wm8594_i2c_table);
+
+/* i2c codec control layer */
+static struct i2c_driver wm8594_i2c_driver = {
+ .driver = {
+ .name = "wm8594",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8594_i2c_probe,
+ .remove = wm8594_i2c_remove,
+ .id_table = wm8594_i2c_table,
+};
+
+static int __init wm8594_modinit(void)
+{
+ int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ pr_info(CODEC_NAME ": module init\n");
+ ret = i2c_add_driver(&wm8594_i2c_driver);
+ if (ret != 0)
+ pr_err(CODEC_NAME
+ ": Failed to register wm8594 I2C driver:%d\n",
+ ret);
+#endif
+
+ return ret;
+}
+module_init(wm8594_modinit);
+
+static void __exit wm8594_modexit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ pr_info(CODEC_NAME ": module exit\n");
+ i2c_del_driver(&wm8594_i2c_driver);
+#endif
+}
+module_exit(wm8594_modexit);
+
+MODULE_DESCRIPTION("ASoC wm8594 driver");
+MODULE_AUTHOR("Adrien Charruel");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/codecs/wm8594.h b/sound/soc/codecs/wm8594.h
new file mode 100644
index 00000000000000..519205e6a52734
--- /dev/null
+++ b/sound/soc/codecs/wm8594.h
@@ -0,0 +1,47 @@
+
+#ifndef _WM8594_H_
+#define _WM8594_H_
+
+#define WM8594_RESET 0x00
+#define WM8594_DEVICE_ID 0x00
+#define WM8594_REVISION 0x01
+#define WM8594_DAC1_CTRL1 0x02
+#define WM8594_DAC1_CTRL2 0x03
+#define WM8594_DAC1_CTRL3 0x04
+#define WM8594_DAC1L_VOL 0x05
+#define WM8594_DAC1R_VOL 0x06
+#define WM8594_DAC2_CTRL1 0x07
+#define WM8594_DAC2_CTRL2 0x08
+#define WM8594_DAC2_CTRL3 0x09
+#define WM8594_DAC2L_VOL 0x0a
+#define WM8594_DAC2R_VOL 0x0b
+#define WM8594_ENABLE 0x0c
+#define WM8594_ADC_CTRL1 0x0d
+#define WM8594_ADC_CTRL2 0x0e
+#define WM8594_ADC_CTRL3 0x0f
+#define WM8594_ADCL_VOL 0x10
+#define WM8594_ADCR_VOL 0x11
+/* register 0x12 reserved */
+#define WM8594_PGA1L_VOL 0x13
+#define WM8594_PGA1R_VOL 0x14
+#define WM8594_PGA2L_VOL 0x15
+#define WM8594_PGA2R_VOL 0x16
+#define WM8594_PGA3L_VOL 0x17
+#define WM8594_PGA3R_VOL 0x18
+#define WM8594_PGA_CTRL1 0x19
+#define WM8594_PGA_CTRL2 0x1a
+#define WM8594_ADD_CTRL1 0x1b
+#define WM8594_INPUT_CTRL1 0x1c
+#define WM8594_INPUT_CTRL2 0x1d
+#define WM8594_INPUT_CTRL3 0x1e
+#define WM8594_INPUT_CTRL4 0x1f
+#define WM8594_OUTPUT_CTRL1 0x20
+#define WM8594_OUTPUT_CTRL2 0x21
+#define WM8594_OUTPUT_CTRL3 0x22
+#define WM8594_BIAS 0x23
+#define WM8594_PGA_CTRL3 0x24
+
+#define WM8594_CACHEREG_NUM 36
+
+#endif
+
diff --git a/sound/soc/parrot/Kconfig b/sound/soc/parrot/Kconfig
new file mode 100644
index 00000000000000..ca121cd5d3cf89
--- /dev/null
+++ b/sound/soc/parrot/Kconfig
@@ -0,0 +1,9 @@
+config SND_SOC_PARROT_FC7100
+ tristate "Support for the Parrot FC7100 board"
+ depends on I2C && MACH_PARROT_FC7100_FAMILY
+ select SND_SOC_WAU8822
+ select SND_SOC_WM8594
+ select SND_SOC_ADAU1977
+ help
+ Say Y if you want to add support for the Parrot FC7100
+ board.
diff --git a/sound/soc/parrot/Makefile b/sound/soc/parrot/Makefile
new file mode 100644
index 00000000000000..1159fcd09078ba
--- /dev/null
+++ b/sound/soc/parrot/Makefile
@@ -0,0 +1,8 @@
+# Parrot Platform Support
+# Use a dummy platform. The Alsa standard AAI driver is used
+
+# Parrot Machine Support
+snd-soc-parrot-fc7100-objs := parrot-fc7100.o
+
+obj-$(CONFIG_SND_SOC_PARROT_FC7100) += snd-soc-parrot-fc7100.o
+
diff --git a/sound/soc/parrot/parrot-fc7100.c b/sound/soc/parrot/parrot-fc7100.c
new file mode 100644
index 00000000000000..ec6d90a84321fb
--- /dev/null
+++ b/sound/soc/parrot/parrot-fc7100.c
@@ -0,0 +1,92 @@
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+/*
+ * Digital audio interface glue - connects codec <--> CPU
+ * Here the purpose is to simply declare the codec, no real connection is made
+ * between codec DAI and CPU DAI. Hence we use a dummy cpu dai.
+ */
+
+/* Audio machine driver */
+
+static int parrot_fc7100_soc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct snd_soc_card *card;
+
+ card = kzalloc(sizeof(struct snd_soc_card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ card->dai_link = pdev->dev.platform_data;
+ card->num_links = 1;
+ card->dev = &pdev->dev;
+ card->owner = THIS_MODULE;
+
+ card->name = kasprintf(GFP_KERNEL, "codec control %s", card->dai_link->name);
+ if (!card->name)
+ dev_warn(&pdev->dev, "failed to set card name for codec '%s'\n",
+ card->dai_link->name);
+
+ dev_info(&pdev->dev, "Parrot FC7100 SoC probe '%s'\n", card->name);
+ ret = snd_soc_register_card(card);
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int parrot_fc7100_soc_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = dev_get_drvdata(&pdev->dev);
+ snd_soc_unregister_card(card);
+ kfree(card->name);
+ kfree(card);
+ return 0;
+}
+
+int parrot_fc7100_soc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+#ifdef CONFIG_PM_SLEEP
+ return snd_soc_suspend(&pdev->dev);
+#else
+ return 0;
+#endif
+}
+
+int parrot_fc7100_soc_resume(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return snd_soc_resume(&pdev->dev);
+#else
+ return 0;
+#endif
+}
+
+static struct platform_driver parrot_fc7100_soc_driver = {
+ .driver = {
+ .name = "parrot-fc7100-audio",
+ .owner = THIS_MODULE,
+ },
+ .probe = parrot_fc7100_soc_probe,
+ .remove = parrot_fc7100_soc_remove,
+ .suspend = parrot_fc7100_soc_suspend,
+ .resume = parrot_fc7100_soc_resume,
+};
+
+module_platform_driver(parrot_fc7100_soc_driver);
+
+MODULE_AUTHOR("Adrien Charruel <adrien.charruel@parrot.com>");
+MODULE_DESCRIPTION("ALSA SoC Parrot FC7100 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:parrot-fc7100");